This commit is contained in:
krahets 2024-09-28 09:26:54 +08:00
parent 03a6cd27ca
commit 4ac4f94628
25 changed files with 119 additions and 162 deletions

View file

@ -1443,8 +1443,8 @@ comments: true
/* 双向链表节点类 */
class ListNode {
int val; // 节点值
ListNode next; // 指向后继节点的引用
ListNode prev; // 指向前驱节点的引用
ListNode? next; // 指向后继节点的引用
ListNode? prev; // 指向前驱节点的引用
ListNode(this.val, [this.next, this.prev]); // 构造函数
}
```

View file

@ -1721,7 +1721,7 @@ comments: true
remove(index) {
if (index < 0 || index >= this.#size) throw new Error('索引越界');
let num = this.#arr[index];
// 将索引 index 之后的元素都向前移动一位
// 将索引 index 之后的元素都向前移动一位
for (let j = index; j < this.#size - 1; j++) {
this.#arr[j] = this.#arr[j + 1];
}

View file

@ -532,11 +532,7 @@ comments: true
) {
// 当放置完所有行时,记录解
if row == n {
let mut copy_state: Vec<Vec<String>> = Vec::new();
for s_row in state.clone() {
copy_state.push(s_row);
}
res.push(copy_state);
res.push(state.clone());
return;
}
// 遍历所有列
@ -547,12 +543,12 @@ comments: true
// 剪枝:不允许该格子所在列、主对角线、次对角线上存在皇后
if !cols[col] && !diags1[diag1] && !diags2[diag2] {
// 尝试:将皇后放置在该格子
state.get_mut(row).unwrap()[col] = "Q".into();
state[row][col] = "Q".into();
(cols[col], diags1[diag1], diags2[diag2]) = (true, true, true);
// 放置下一行
backtrack(row + 1, n, state, res, cols, diags1, diags2);
// 回退:将该格子恢复为空位
state.get_mut(row).unwrap()[col] = "#".into();
state[row][col] = "#".into();
(cols[col], diags1[diag1], diags2[diag2]) = (false, false, false);
}
}
@ -561,14 +557,7 @@ comments: true
/* 求解 n 皇后 */
fn n_queens(n: usize) -> Vec<Vec<Vec<String>>> {
// 初始化 n*n 大小的棋盘,其中 'Q' 代表皇后,'#' 代表空位
let mut state: Vec<Vec<String>> = Vec::new();
for _ in 0..n {
let mut row: Vec<String> = Vec::new();
for _ in 0..n {
row.push("#".into());
}
state.push(row);
}
let mut state: Vec<Vec<String>> = vec![vec!["#".to_string(); n]; n];
let mut cols = vec![false; n]; // 记录列是否有皇后
let mut diags1 = vec![false; 2 * n - 1]; // 记录主对角线上是否有皇后
let mut diags2 = vec![false; 2 * n - 1]; // 记录次对角线上是否有皇后

View file

@ -355,7 +355,7 @@ comments: true
```rust title="subset_sum_i_naive.rs"
/* 回溯算法:子集和 I */
fn backtrack(
mut state: Vec<i32>,
state: &mut Vec<i32>,
target: i32,
total: i32,
choices: &[i32],
@ -363,7 +363,7 @@ comments: true
) {
// 子集和等于 target 时,记录解
if total == target {
res.push(state);
res.push(state.clone());
return;
}
// 遍历所有选择
@ -375,7 +375,7 @@ comments: true
// 尝试:做出选择,更新元素和 total
state.push(choices[i]);
// 进行下一轮选择
backtrack(state.clone(), target, total + choices[i], choices, res);
backtrack(state, target, total + choices[i], choices, res);
// 回退:撤销选择,恢复到之前的状态
state.pop();
}
@ -383,10 +383,10 @@ comments: true
/* 求解子集和 I包含重复子集 */
fn subset_sum_i_naive(nums: &[i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 状态(子集)
let mut state = Vec::new(); // 状态(子集)
let total = 0; // 子集和
let mut res = Vec::new(); // 结果列表(子集列表)
backtrack(state, target, total, nums, &mut res);
backtrack(&mut state, target, total, nums, &mut res);
res
}
```
@ -912,7 +912,7 @@ comments: true
```rust title="subset_sum_i.rs"
/* 回溯算法:子集和 I */
fn backtrack(
mut state: Vec<i32>,
state: &mut Vec<i32>,
target: i32,
choices: &[i32],
start: usize,
@ -920,7 +920,7 @@ comments: true
) {
// 子集和等于 target 时,记录解
if target == 0 {
res.push(state);
res.push(state.clone());
return;
}
// 遍历所有选择
@ -934,7 +934,7 @@ comments: true
// 尝试:做出选择,更新 target, start
state.push(choices[i]);
// 进行下一轮选择
backtrack(state.clone(), target - choices[i], choices, i, res);
backtrack(state, target - choices[i], choices, i, res);
// 回退:撤销选择,恢复到之前的状态
state.pop();
}
@ -942,11 +942,11 @@ comments: true
/* 求解子集和 I */
fn subset_sum_i(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 状态(子集)
let mut state = Vec::new(); // 状态(子集)
nums.sort(); // 对 nums 进行排序
let start = 0; // 遍历起始点
let mut res = Vec::new(); // 结果列表(子集列表)
backtrack(state, target, nums, start, &mut res);
backtrack(&mut state, target, nums, start, &mut res);
res
}
```
@ -1512,7 +1512,7 @@ comments: true
```rust title="subset_sum_ii.rs"
/* 回溯算法:子集和 II */
fn backtrack(
mut state: Vec<i32>,
state: &mut Vec<i32>,
target: i32,
choices: &[i32],
start: usize,
@ -1520,7 +1520,7 @@ comments: true
) {
// 子集和等于 target 时,记录解
if target == 0 {
res.push(state);
res.push(state.clone());
return;
}
// 遍历所有选择
@ -1539,7 +1539,7 @@ comments: true
// 尝试:做出选择,更新 target, start
state.push(choices[i]);
// 进行下一轮选择
backtrack(state.clone(), target - choices[i], choices, i, res);
backtrack(state, target - choices[i], choices, i + 1, res);
// 回退:撤销选择,恢复到之前的状态
state.pop();
}
@ -1547,11 +1547,11 @@ comments: true
/* 求解子集和 II */
fn subset_sum_ii(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 状态(子集)
let mut state = Vec::new(); // 状态(子集)
nums.sort(); // 对 nums 进行排序
let start = 0; // 遍历起始点
let mut res = Vec::new(); // 结果列表(子集列表)
backtrack(state, target, nums, start, &mut res);
backtrack(&mut state, target, nums, start, &mut res);
res
}
```

View file

@ -69,13 +69,6 @@ comments: true
=== "C++"
```cpp title="quick_sort.cpp"
/* 元素交换 */
void swap(vector<int> &nums, int i, int j) {
int tmp = nums[i];
nums[i] = nums[j];
nums[j] = tmp;
}
/* 哨兵划分 */
int partition(vector<int> &nums, int left, int right) {
// 以 nums[left] 为基准数
@ -85,9 +78,9 @@ comments: true
j--; // 从右向左找首个小于基准数的元素
while (i < j && nums[i] <= nums[left])
i++; // 从左向右找首个大于基准数的元素
swap(nums, i, j); // 交换这两个元素
swap(nums[i], nums[j]); // 交换这两个元素
}
swap(nums, i, left); // 将基准数交换至两子数组的分界线
swap(nums[i], nums[left]); // 将基准数交换至两子数组的分界线
return i; // 返回基准数的索引
}
```
@ -721,7 +714,7 @@ comments: true
// 选取三个候选元素的中位数
int med = medianThree(nums, left, (left + right) / 2, right);
// 将中位数交换至数组最左端
swap(nums, left, med);
swap(nums[left], nums[med]);
// 以 nums[left] 为基准数
int i = left, j = right;
while (i < j) {
@ -729,9 +722,9 @@ comments: true
j--; // 从右向左找首个小于基准数的元素
while (i < j && nums[i] <= nums[left])
i++; // 从左向右找首个大于基准数的元素
swap(nums, i, j); // 交换这两个元素
swap(nums[i], nums[j]); // 交换这两个元素
}
swap(nums, i, left); // 将基准数交换至两子数组的分界线
swap(nums[i], nums[left]); // 将基准数交换至两子数组的分界线
return i; // 返回基准数的索引
}
```

View file

@ -665,7 +665,7 @@ comments: true
### 2. &nbsp; 完全二叉树
如图 7-5 所示,<u>完全二叉树complete binary tree</u>只有最底层的节点未被填满,且最底层节点尽量靠左填充。
如图 7-5 所示,<u>完全二叉树complete binary tree</u>只有最底层的节点未被填满,且最底层节点尽量靠左填充。请注意,完美二叉树也是一棵完全二叉树。
![完全二叉树](binary_tree.assets/complete_binary_tree.png){ class="animation-figure" }

View file

@ -4,7 +4,7 @@ comments: true
# 2.3 &nbsp; Time complexity
Time complexity is a concept used to measure how the run time of an algorithm increases with the size of the input data. Understanding time complexity is crucial for accurately assessing the efficiency of an algorithm.
The runtime can intuitively assess the efficiency of an algorithm. How can we accurately estimate the runtime of a piece of an algorithm?
1. **Determining the Running Platform**: This includes hardware configuration, programming language, system environment, etc., all of which can affect the efficiency of code execution.
2. **Evaluating the Run Time for Various Computational Operations**: For instance, an addition operation `+` might take 1 ns, a multiplication operation `*` might take 10 ns, a print operation `print()` might take 5 ns, etc.

View file

@ -4,7 +4,7 @@ comments: true
# 9.2 &nbsp; Basic operations on graphs
The basic operations on graphs can be divided into operations on "edges" and operations on "vertices". Under the two representation methods of "adjacency matrix" and "adjacency list", the implementation methods are different.
The basic operations on graphs can be divided into operations on "edges" and operations on "vertices". Under the two representation methods of "adjacency matrix" and "adjacency list", the implementations are different.
## 9.2.1 &nbsp; Implementation based on adjacency matrix

View file

@ -4,7 +4,7 @@ comments: true
# 9.3 &nbsp; Graph traversal
Trees represent a "one-to-many" relationship, while graphs have a higher degree of freedom and can represent any "many-to-many" relationship. Therefore, we can consider trees as a special case of graphs. Clearly, **tree traversal operations are also a special case of graph traversal operations**.
Trees represent a "one-to-many" relationship, while graphs have a higher degree of freedom and can represent any "many-to-many" relationship. Therefore, we can consider tree as a special case of graph. Clearly, **tree traversal operations are also a special case of graph traversal operations**.
Both graphs and trees require the application of search algorithms to implement traversal operations. Graph traversal can be divided into two types: <u>Breadth-First Search (BFS)</u> and <u>Depth-First Search (DFS)</u>.
@ -18,7 +18,7 @@ Both graphs and trees require the application of search algorithms to implement
### 1. &nbsp; Algorithm implementation
BFS is usually implemented with the help of a queue, as shown in the code below. The queue has a "first in, first out" property, which aligns with the BFS idea of traversing "from near to far".
BFS is usually implemented with the help of a queue, as shown in the code below. The queue is "first in, first out", which aligns with the BFS idea of traversing "from near to far".
1. Add the starting vertex `startVet` to the queue and start the loop.
2. In each iteration of the loop, pop the vertex at the front of the queue and record it as visited, then add all adjacent vertices of that vertex to the back of the queue.
@ -184,7 +184,7 @@ To prevent revisiting vertices, we use a hash set `visited` to record which node
[class]{}-[func]{graphBFS}
```
The code is relatively abstract, it is suggested to compare with Figure 9-10 to deepen the understanding.
The code is relatively abstract, you can compare it with Figure 9-10 to get a better understanding.
=== "<1>"
![Steps of breadth-first search of a graph](graph_traversal.assets/graph_bfs_step1.png){ class="animation-figure" }
@ -223,7 +223,7 @@ The code is relatively abstract, it is suggested to compare with Figure 9-10 to
!!! question "Is the sequence of breadth-first traversal unique?"
Not unique. Breadth-first traversal only requires traversing in a "from near to far" order, **and the traversal order of multiple vertices at the same distance can be arbitrarily shuffled**. For example, in Figure 9-10, the visitation order of vertices $1$ and $3$ can be switched, as can the order of vertices $2$, $4$, and $6$.
Not unique. Breadth-first traversal only requires traversing in a "near to far" order, **and the traversal order of the vertices with the same distance can be arbitrary**. For example, in Figure 9-10, the visit order of vertices $1$ and $3$ can be swapped, as can the order of vertices $2$, $4$, and $6$.
### 2. &nbsp; Complexity analysis
@ -233,7 +233,7 @@ The code is relatively abstract, it is suggested to compare with Figure 9-10 to
## 9.3.2 &nbsp; Depth-first search
**Depth-first search is a traversal method that prioritizes going as far as possible and then backtracks when no further paths are available**. As shown in Figure 9-11, starting from the top left vertex, visit some adjacent vertex of the current vertex until no further path is available, then return and continue until all vertices are traversed.
**Depth-first search is a traversal method that prioritizes going as far as possible and then backtracks when no further path is available**. As shown in Figure 9-11, starting from the top left vertex, visit some adjacent vertex of the current vertex until no further path is available, then return and continue until all vertices are traversed.
![Depth-first traversal of a graph](graph_traversal.assets/graph_dfs.png){ class="animation-figure" }

View file

@ -4,7 +4,7 @@ comments: true
# 6.1 &nbsp; Hash table
A <u>hash table</u>, also known as a <u>hash map</u>, is a data structure that establishes a mapping between keys and values, enabling efficient element retrieval. Specifically, when we input a `key` into the hash table, we can retrive the corresponding `value` in $O(1)$ time complexity.
A <u>hash table</u>, also known as a <u>hash map</u>, is a data structure that establishes a mapping between keys and values, enabling efficient element retrieval. Specifically, when we input a `key` into the hash table, we can retrieve the corresponding `value` in $O(1)$ time complexity.
As shown in Figure 6-1, given $n$ students, each student has two data fields: "Name" and "Student ID". If we want to implement a query function that takes a student ID as input and returns the corresponding name, we can use the hash table shown in Figure 6-1.
@ -14,9 +14,9 @@ As shown in Figure 6-1, given $n$ students, each student has two data fields: "N
In addition to hash tables, arrays and linked lists can also be used to implement query functionality, but the time complexity is different. Their efficiency is compared in Table 6-1:
- **Inserting elements**: Simply append the element to the tail of the array (or linked list). The time complexity of this operation is $O(1)$.
- **Searching for elements**: As the array (or linked list) is unsorted, searching for an element requires traversing through all of the elements. The time complexity of this operation is $O(n)$.
- **Deleting elements**: To remove an element, we first need to locate it. Then, we delete it from the array (or linked list). The time complexity of this operation is $O(n)$.
- **Inserting an element**: Simply append the element to the tail of the array (or linked list). The time complexity of this operation is $O(1)$.
- **Searching for an element**: As the array (or linked list) is unsorted, searching for an element requires traversing through all of the elements. The time complexity of this operation is $O(n)$.
- **Deleting an element**: To remove an element, we first need to locate it. Then, we delete it from the array (or linked list). The time complexity of this operation is $O(n)$.
<p align="center"> Table 6-1 &nbsp; Comparison of time efficiency for common operations </p>
@ -30,7 +30,7 @@ In addition to hash tables, arrays and linked lists can also be used to implemen
</div>
It can be seen that **the time complexity for operations (insertion, deletion, searching, and modification) in a hash table is $O(1)$**, which is highly efficient.
As observed, **the time complexity for operations (insertion, deletion, searching, and modification) in a hash table is $O(1)$**, which is highly efficient.
## 6.1.1 &nbsp; Common operations of hash table
@ -66,7 +66,7 @@ Common operations of a hash table include: initialization, querying, adding key-
unordered_map<int, string> map;
/* Add operation */
// Add key-value pair (key, value) to the hash table
// Add key-value pair (key, value) to hash table
map[12836] = "Xiao Ha";
map[15937] = "Xiao Luo";
map[16750] = "Xiao Suan";
@ -89,7 +89,7 @@ Common operations of a hash table include: initialization, querying, adding key-
Map<Integer, String> map = new HashMap<>();
/* Add operation */
// Add key-value pair (key, value) to the hash table
// Add key-value pair (key, value) to hash table
map.put(12836, "Xiao Ha");
map.put(15937, "Xiao Luo");
map.put(16750, "Xiao Suan");
@ -111,7 +111,7 @@ Common operations of a hash table include: initialization, querying, adding key-
/* Initialize hash table */
Dictionary<int, string> map = new() {
/* Add operation */
// Add key-value pair (key, value) to the hash table
// Add key-value pair (key, value) to hash table
{ 12836, "Xiao Ha" },
{ 15937, "Xiao Luo" },
{ 16750, "Xiao Suan" },
@ -135,7 +135,7 @@ Common operations of a hash table include: initialization, querying, adding key-
hmap := make(map[int]string)
/* Add operation */
// Add key-value pair (key, value) to the hash table
// Add key-value pair (key, value) to hash table
hmap[12836] = "Xiao Ha"
hmap[15937] = "Xiao Luo"
hmap[16750] = "Xiao Suan"
@ -158,7 +158,7 @@ Common operations of a hash table include: initialization, querying, adding key-
var map: [Int: String] = [:]
/* Add operation */
// Add key-value pair (key, value) to the hash table
// Add key-value pair (key, value) to hash table
map[12836] = "Xiao Ha"
map[15937] = "Xiao Luo"
map[16750] = "Xiao Suan"
@ -202,7 +202,7 @@ Common operations of a hash table include: initialization, querying, adding key-
/* Initialize hash table */
const map = new Map<number, string>();
/* Add operation */
// Add key-value pair (key, value) to the hash table
// Add key-value pair (key, value) to hash table
map.set(12836, 'Xiao Ha');
map.set(15937, 'Xiao Luo');
map.set(16750, 'Xiao Suan');
@ -230,7 +230,7 @@ Common operations of a hash table include: initialization, querying, adding key-
Map<int, String> map = {};
/* Add operation */
// Add key-value pair (key, value) to the hash table
// Add key-value pair (key, value) to hash table
map[12836] = "Xiao Ha";
map[15937] = "Xiao Luo";
map[16750] = "Xiao Suan";
@ -255,7 +255,7 @@ Common operations of a hash table include: initialization, querying, adding key-
let mut map: HashMap<i32, String> = HashMap::new();
/* Add operation */
// Add key-value pair (key, value) to the hash table
// Add key-value pair (key, value) to hash table
map.insert(12836, "Xiao Ha".to_string());
map.insert(15937, "Xiao Luo".to_string());
map.insert(16750, "Xiao Suan".to_string());
@ -502,10 +502,10 @@ First, let's consider the simplest case: **implementing a hash table using only
So, how do we locate the corresponding bucket based on the `key`? This is achieved through a <u>hash function</u>. The role of the hash function is to map a larger input space to a smaller output space. In a hash table, the input space consists of all the keys, and the output space consists of all the buckets (array indices). In other words, given a `key`, **we can use the hash function to determine the storage location of the corresponding key-value pair in the array**.
When given a `key`, the calculation process of the hash function consists of the following two steps:
With a given `key`, the calculation of the hash function consists of two steps:
1. Calculate the hash value by using a certain hash algorithm `hash()`.
2. Take the modulus of the hash value with the bucket count (array length) `capacity` to obtain the array `index` corresponding to that key.
2. Take the modulus of the hash value with the bucket count (array length) `capacity` to obtain the array `index` corresponding to the key.
```shell
index = hash(key) % capacity

View file

@ -69,13 +69,6 @@ After the pivot partitioning, the original array is divided into three parts: le
=== "C++"
```cpp title="quick_sort.cpp"
/* Swap elements */
void swap(vector<int> &nums, int i, int j) {
int tmp = nums[i];
nums[i] = nums[j];
nums[j] = tmp;
}
/* Partition */
int partition(vector<int> &nums, int left, int right) {
// Use nums[left] as the pivot

View file

@ -15,15 +15,15 @@ As shown in Figure 7-16, a <u>binary search tree</u> satisfies the following con
## 7.4.1 &nbsp; Operations on a binary search tree
We encapsulate the binary search tree as a class `BinarySearchTree` and declare a member variable `root`, pointing to the tree's root node.
We encapsulate the binary search tree as a class `BinarySearchTree` and declare a member variable `root` pointing to the tree's root node.
### 1. &nbsp; Searching for a node
Given a target node value `num`, one can search according to the properties of the binary search tree. As shown in Figure 7-17, we declare a node `cur` and start from the binary tree's root node `root`, looping to compare the size relationship between the node value `cur.val` and `num`.
Given a target node value `num`, one can search according to the properties of the binary search tree. As shown in Figure 7-17, we declare a node `cur`, start from the binary tree's root node `root`, and loop to compare the size between the node value `cur.val` and `num`.
- If `cur.val < num`, it means the target node is in `cur`'s right subtree, thus execute `cur = cur.right`.
- If `cur.val > num`, it means the target node is in `cur`'s left subtree, thus execute `cur = cur.left`.
- If `cur.val = num`, it means the target node is found, exit the loop and return the node.
- If `cur.val = num`, it means the target node is found, exit the loop, and return the node.
=== "<1>"
![Example of searching for a node in a binary search tree](binary_search_tree.assets/bst_search_step1.png){ class="animation-figure" }
@ -39,7 +39,7 @@ Given a target node value `num`, one can search according to the properties of t
<p align="center"> Figure 7-17 &nbsp; Example of searching for a node in a binary search tree </p>
The search operation in a binary search tree works on the same principle as the binary search algorithm, eliminating half of the possibilities in each round. The number of loops is at most the height of the binary tree. When the binary tree is balanced, it uses $O(\log n)$ time. Example code is as follows:
The search operation in a binary search tree works on the same principle as the binary search algorithm, eliminating half of the cases in each round. The number of loops is at most the height of the binary tree. When the binary tree is balanced, it uses $O(\log n)$ time. The example code is as follows:
=== "Python"
@ -177,8 +177,8 @@ The search operation in a binary search tree works on the same principle as the
Given an element `num` to be inserted, to maintain the property of the binary search tree "left subtree < root node < right subtree," the insertion operation proceeds as shown in Figure 7-18.
1. **Finding the insertion position**: Similar to the search operation, start from the root node and loop downwards according to the size relationship between the current node value and `num` until passing through the leaf node (traversing to `None`) then exit the loop.
2. **Insert the node at that position**: Initialize the node `num` and place it where `None` was.
1. **Finding insertion position**: Similar to the search operation, start from the root node, loop downwards according to the size relationship between the current node value and `num`, until the leaf node is passed (traversed to `None`), then exit the loop.
2. **Insert the node at this position**: Initialize the node `num` and place it where `None` was.
![Inserting a node into a binary search tree](binary_search_tree.assets/bst_insert.png){ class="animation-figure" }
@ -186,8 +186,8 @@ Given an element `num` to be inserted, to maintain the property of the binary se
In the code implementation, note the following two points.
- The binary search tree does not allow duplicate nodes; otherwise, it will violate its definition. Therefore, if the node to be inserted already exists in the tree, the insertion is not performed, and it directly returns.
- To perform the insertion operation, we need to use the node `pre` to save the node from the last loop. This way, when traversing to `None`, we can get its parent node, thus completing the node insertion operation.
- The binary search tree does not allow duplicate nodes to exist; otherwise, its definition would be violated. Therefore, if the node to be inserted already exists in the tree, the insertion is not performed, and the node returns directly.
- To perform the insertion operation, we need to use the node `pre` to save the node from the previous loop. This way, when traversing to `None`, we can get its parent node, thus completing the node insertion operation.
=== "Python"
@ -355,9 +355,9 @@ Similar to searching for a node, inserting a node uses $O(\log n)$ time.
### 3. &nbsp; Removing a node
First, find the target node in the binary tree, then remove it. Similar to inserting a node, we need to ensure that after the removal operation is completed, the property of the binary search tree "left subtree < root node < right subtree" is still satisfied. Therefore, based on the number of child nodes of the target node, we divide it into 0, 1, and 2 cases, performing the corresponding node removal operations.
First, find the target node in the binary tree, then remove it. Similar to inserting a node, we need to ensure that after the removal operation is completed, the property of the binary search tree "left subtree < root node < right subtree" is still satisfied. Therefore, based on the number of child nodes of the target node, we divide it into three cases: 0, 1, and 2, and perform the corresponding node removal operations.
As shown in Figure 7-19, when the degree of the node to be removed is $0$, it means the node is a leaf node, and it can be directly removed.
As shown in Figure 7-19, when the degree of the node to be removed is $0$, it means the node is a leaf node and can be directly removed.
![Removing a node in a binary search tree (degree 0)](binary_search_tree.assets/bst_remove_case1.png){ class="animation-figure" }
@ -623,9 +623,9 @@ The operation of removing a node also uses $O(\log n)$ time, where finding the n
### 4. &nbsp; In-order traversal is ordered
As shown in Figure 7-22, the in-order traversal of a binary tree follows the "left $\rightarrow$ root $\rightarrow$ right" traversal order, and a binary search tree satisfies the size relationship "left child node $<$ root node $<$ right child node".
As shown in Figure 7-22, the in-order traversal of a binary tree follows the traversal order of "left $\rightarrow$ root $\rightarrow$ right," and a binary search tree satisfies the size relationship of "left child node $<$ root node $<$ right child node."
This means that in-order traversal in a binary search tree always traverses the next smallest node first, thus deriving an important property: **The in-order traversal sequence of a binary search tree is ascending**.
This means that when performing in-order traversal in a binary search tree, the next smallest node will always be traversed first, thus leading to an important property: **The sequence of in-order traversal in a binary search tree is ascending**.
Using the ascending property of in-order traversal, obtaining ordered data in a binary search tree requires only $O(n)$ time, without the need for additional sorting operations, which is very efficient.
@ -635,7 +635,7 @@ Using the ascending property of in-order traversal, obtaining ordered data in a
## 7.4.2 &nbsp; Efficiency of binary search trees
Given a set of data, we consider using an array or a binary search tree for storage. Observing Table 7-2, the operations on a binary search tree all have logarithmic time complexity, which is stable and efficient. Only in scenarios of high-frequency addition and low-frequency search and removal, arrays are more efficient than binary search trees.
Given a set of data, we consider using an array or a binary search tree for storage. Observing Table 7-2, the operations on a binary search tree all have logarithmic time complexity, which is stable and efficient. Arrays are more efficient than binary search trees only in scenarios involving frequent additions and infrequent searches or removals.
<p align="center"> Table 7-2 &nbsp; Efficiency comparison between arrays and search trees </p>
@ -649,9 +649,9 @@ Given a set of data, we consider using an array or a binary search tree for stor
</div>
In ideal conditions, the binary search tree is "balanced," thus any node can be found within $\log n$ loops.
Ideally, the binary search tree is "balanced," allowing any node can be found within $\log n$ loops.
However, continuously inserting and removing nodes in a binary search tree may lead to the binary tree degenerating into a chain list as shown in Figure 7-23, at which point the time complexity of various operations also degrades to $O(n)$.
However, if we continuously insert and remove nodes in a binary search tree, it may degenerate into a linked list as shown in Figure 7-23, where the time complexity of various operations also degrades to $O(n)$.
![Degradation of a binary search tree](binary_search_tree.assets/bst_degradation.png){ class="animation-figure" }

View file

@ -4,15 +4,15 @@ comments: true
# 7.2 &nbsp; Binary tree traversal
From the perspective of physical structure, a tree is a data structure based on linked lists, hence its traversal method involves accessing nodes one by one through pointers. However, a tree is a non-linear data structure, which makes traversing a tree more complex than traversing a linked list, requiring the assistance of search algorithms to achieve.
From a physical structure perspective, a tree is a data structure based on linked lists. Hence, its traversal method involves accessing nodes one by one through pointers. However, a tree is a non-linear data structure, which makes traversing a tree more complex than traversing a linked list, requiring the assistance of search algorithms.
Common traversal methods for binary trees include level-order traversal, pre-order traversal, in-order traversal, and post-order traversal, among others.
The common traversal methods for binary trees include level-order traversal, pre-order traversal, in-order traversal, and post-order traversal.
## 7.2.1 &nbsp; Level-order traversal
As shown in Figure 7-9, <u>level-order traversal</u> traverses the binary tree from top to bottom, layer by layer, and accesses nodes in each layer in a left-to-right order.
As shown in Figure 7-9, <u>level-order traversal</u> traverses the binary tree from top to bottom, layer by layer. Within each level, it visits nodes from left to right.
Level-order traversal essentially belongs to <u>breadth-first traversal</u>, also known as <u>breadth-first search (BFS)</u>, which embodies a "circumferentially outward expanding" layer-by-layer traversal method.
Level-order traversal is essentially a type of <u>breadth-first traversal</u>, also known as <u>breadth-first search (BFS)</u>, which embodies a "circumferentially outward expanding" layer-by-layer traversal method.
![Level-order traversal of a binary tree](binary_tree_traversal.assets/binary_tree_bfs.png){ class="animation-figure" }
@ -155,14 +155,14 @@ Breadth-first traversal is usually implemented with the help of a "queue". The q
### 2. &nbsp; Complexity analysis
- **Time complexity is $O(n)$**: All nodes are visited once, using $O(n)$ time, where $n$ is the number of nodes.
- **Space complexity is $O(n)$**: In the worst case, i.e., a full binary tree, before traversing to the lowest level, the queue can contain at most $(n + 1) / 2$ nodes at the same time, occupying $O(n)$ space.
- **Time complexity is $O(n)$**: All nodes are visited once, taking $O(n)$ time, where $n$ is the number of nodes.
- **Space complexity is $O(n)$**: In the worst case, i.e., a full binary tree, before traversing to the bottom level, the queue can contain at most $(n + 1) / 2$ nodes simultaneously, occupying $O(n)$ space.
## 7.2.2 &nbsp; Preorder, in-order, and post-order traversal
Correspondingly, pre-order, in-order, and post-order traversal all belong to <u>depth-first traversal</u>, also known as <u>depth-first search (DFS)</u>, which embodies a "proceed to the end first, then backtrack and continue" traversal method.
Figure 7-10 shows the working principle of performing a depth-first traversal on a binary tree. **Depth-first traversal is like walking around the perimeter of the entire binary tree**, encountering three positions at each node, corresponding to pre-order traversal, in-order traversal, and post-order traversal.
Figure 7-10 shows the working principle of performing a depth-first traversal on a binary tree. **Depth-first traversal is like "walking" around the entire binary tree**, encountering three positions at each node, corresponding to pre-order, in-order, and post-order traversal.
![Preorder, in-order, and post-order traversal of a binary search tree](binary_tree_traversal.assets/binary_tree_dfs.png){ class="animation-figure" }
@ -428,4 +428,4 @@ Figure 7-11 shows the recursive process of pre-order traversal of a binary tree,
### 2. &nbsp; Complexity analysis
- **Time complexity is $O(n)$**: All nodes are visited once, using $O(n)$ time.
- **Space complexity is $O(n)$**: In the worst case, i.e., the tree degrades into a linked list, the recursion depth reaches $n$, the system occupies $O(n)$ stack frame space.
- **Space complexity is $O(n)$**: In the worst case, i.e., the tree degenerates into a linked list, the recursion depth reaches $n$, the system occupies $O(n)$ stack frame space.

View file

@ -1443,8 +1443,8 @@ comments: true
/* 雙向鏈結串列節點類別 */
class ListNode {
int val; // 節點值
ListNode next; // 指向後繼節點的引用
ListNode prev; // 指向前驅節點的引用
ListNode? next; // 指向後繼節點的引用
ListNode? prev; // 指向前驅節點的引用
ListNode(this.val, [this.next, this.prev]); // 建構子
}
```

View file

@ -1721,7 +1721,7 @@ comments: true
remove(index) {
if (index < 0 || index >= this.#size) throw new Error('索引越界');
let num = this.#arr[index];
// 將索引 index 之後的元素都向前移動一位
// 將索引 index 之後的元素都向前移動一位
for (let j = index; j < this.#size - 1; j++) {
this.#arr[j] = this.#arr[j + 1];
}

View file

@ -532,11 +532,7 @@ comments: true
) {
// 當放置完所有行時,記錄解
if row == n {
let mut copy_state: Vec<Vec<String>> = Vec::new();
for s_row in state.clone() {
copy_state.push(s_row);
}
res.push(copy_state);
res.push(state.clone());
return;
}
// 走訪所有列
@ -547,12 +543,12 @@ comments: true
// 剪枝:不允許該格子所在列、主對角線、次對角線上存在皇后
if !cols[col] && !diags1[diag1] && !diags2[diag2] {
// 嘗試:將皇后放置在該格子
state.get_mut(row).unwrap()[col] = "Q".into();
state[row][col] = "Q".into();
(cols[col], diags1[diag1], diags2[diag2]) = (true, true, true);
// 放置下一行
backtrack(row + 1, n, state, res, cols, diags1, diags2);
// 回退:將該格子恢復為空位
state.get_mut(row).unwrap()[col] = "#".into();
state[row][col] = "#".into();
(cols[col], diags1[diag1], diags2[diag2]) = (false, false, false);
}
}
@ -561,14 +557,7 @@ comments: true
/* 求解 n 皇后 */
fn n_queens(n: usize) -> Vec<Vec<Vec<String>>> {
// 初始化 n*n 大小的棋盤,其中 'Q' 代表皇后,'#' 代表空位
let mut state: Vec<Vec<String>> = Vec::new();
for _ in 0..n {
let mut row: Vec<String> = Vec::new();
for _ in 0..n {
row.push("#".into());
}
state.push(row);
}
let mut state: Vec<Vec<String>> = vec![vec!["#".to_string(); n]; n];
let mut cols = vec![false; n]; // 記錄列是否有皇后
let mut diags1 = vec![false; 2 * n - 1]; // 記錄主對角線上是否有皇后
let mut diags2 = vec![false; 2 * n - 1]; // 記錄次對角線上是否有皇后

View file

@ -355,7 +355,7 @@ comments: true
```rust title="subset_sum_i_naive.rs"
/* 回溯演算法:子集和 I */
fn backtrack(
mut state: Vec<i32>,
state: &mut Vec<i32>,
target: i32,
total: i32,
choices: &[i32],
@ -363,7 +363,7 @@ comments: true
) {
// 子集和等於 target 時,記錄解
if total == target {
res.push(state);
res.push(state.clone());
return;
}
// 走訪所有選擇
@ -375,7 +375,7 @@ comments: true
// 嘗試:做出選擇,更新元素和 total
state.push(choices[i]);
// 進行下一輪選擇
backtrack(state.clone(), target, total + choices[i], choices, res);
backtrack(state, target, total + choices[i], choices, res);
// 回退:撤銷選擇,恢復到之前的狀態
state.pop();
}
@ -383,10 +383,10 @@ comments: true
/* 求解子集和 I包含重複子集 */
fn subset_sum_i_naive(nums: &[i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 狀態(子集)
let mut state = Vec::new(); // 狀態(子集)
let total = 0; // 子集和
let mut res = Vec::new(); // 結果串列(子集串列)
backtrack(state, target, total, nums, &mut res);
backtrack(&mut state, target, total, nums, &mut res);
res
}
```
@ -912,7 +912,7 @@ comments: true
```rust title="subset_sum_i.rs"
/* 回溯演算法:子集和 I */
fn backtrack(
mut state: Vec<i32>,
state: &mut Vec<i32>,
target: i32,
choices: &[i32],
start: usize,
@ -920,7 +920,7 @@ comments: true
) {
// 子集和等於 target 時,記錄解
if target == 0 {
res.push(state);
res.push(state.clone());
return;
}
// 走訪所有選擇
@ -934,7 +934,7 @@ comments: true
// 嘗試:做出選擇,更新 target, start
state.push(choices[i]);
// 進行下一輪選擇
backtrack(state.clone(), target - choices[i], choices, i, res);
backtrack(state, target - choices[i], choices, i, res);
// 回退:撤銷選擇,恢復到之前的狀態
state.pop();
}
@ -942,11 +942,11 @@ comments: true
/* 求解子集和 I */
fn subset_sum_i(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 狀態(子集)
let mut state = Vec::new(); // 狀態(子集)
nums.sort(); // 對 nums 進行排序
let start = 0; // 走訪起始點
let mut res = Vec::new(); // 結果串列(子集串列)
backtrack(state, target, nums, start, &mut res);
backtrack(&mut state, target, nums, start, &mut res);
res
}
```
@ -1512,7 +1512,7 @@ comments: true
```rust title="subset_sum_ii.rs"
/* 回溯演算法:子集和 II */
fn backtrack(
mut state: Vec<i32>,
state: &mut Vec<i32>,
target: i32,
choices: &[i32],
start: usize,
@ -1520,7 +1520,7 @@ comments: true
) {
// 子集和等於 target 時,記錄解
if target == 0 {
res.push(state);
res.push(state.clone());
return;
}
// 走訪所有選擇
@ -1539,7 +1539,7 @@ comments: true
// 嘗試:做出選擇,更新 target, start
state.push(choices[i]);
// 進行下一輪選擇
backtrack(state.clone(), target - choices[i], choices, i, res);
backtrack(state, target - choices[i], choices, i + 1, res);
// 回退:撤銷選擇,恢復到之前的狀態
state.pop();
}
@ -1547,11 +1547,11 @@ comments: true
/* 求解子集和 II */
fn subset_sum_ii(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 狀態(子集)
let mut state = Vec::new(); // 狀態(子集)
nums.sort(); // 對 nums 進行排序
let start = 0; // 走訪起始點
let mut res = Vec::new(); // 結果串列(子集串列)
backtrack(state, target, nums, start, &mut res);
backtrack(&mut state, target, nums, start, &mut res);
res
}
```

View file

@ -69,13 +69,6 @@ comments: true
=== "C++"
```cpp title="quick_sort.cpp"
/* 元素交換 */
void swap(vector<int> &nums, int i, int j) {
int tmp = nums[i];
nums[i] = nums[j];
nums[j] = tmp;
}
/* 哨兵劃分 */
int partition(vector<int> &nums, int left, int right) {
// 以 nums[left] 為基準數
@ -85,9 +78,9 @@ comments: true
j--; // 從右向左找首個小於基準數的元素
while (i < j && nums[i] <= nums[left])
i++; // 從左向右找首個大於基準數的元素
swap(nums, i, j); // 交換這兩個元素
swap(nums[i], nums[j]); // 交換這兩個元素
}
swap(nums, i, left); // 將基準數交換至兩子陣列的分界線
swap(nums[i], nums[left]); // 將基準數交換至兩子陣列的分界線
return i; // 返回基準數的索引
}
```
@ -721,7 +714,7 @@ comments: true
// 選取三個候選元素的中位數
int med = medianThree(nums, left, (left + right) / 2, right);
// 將中位數交換至陣列最左端
swap(nums, left, med);
swap(nums[left], nums[med]);
// 以 nums[left] 為基準數
int i = left, j = right;
while (i < j) {
@ -729,9 +722,9 @@ comments: true
j--; // 從右向左找首個小於基準數的元素
while (i < j && nums[i] <= nums[left])
i++; // 從左向右找首個大於基準數的元素
swap(nums, i, j); // 交換這兩個元素
swap(nums[i], nums[j]); // 交換這兩個元素
}
swap(nums, i, left); // 將基準數交換至兩子陣列的分界線
swap(nums[i], nums[left]); // 將基準數交換至兩子陣列的分界線
return i; // 返回基準數的索引
}
```

View file

@ -665,7 +665,7 @@ comments: true
### 2. &nbsp; 完全二元樹
如圖 7-5 所示,<u>完全二元樹complete binary tree</u>只有最底層的節點未被填滿,且最底層節點儘量靠左填充。
如圖 7-5 所示,<u>完全二元樹complete binary tree</u>只有最底層的節點未被填滿,且最底層節點儘量靠左填充。請注意,完美二元樹也是一棵完全二元樹。
![完全二元樹](binary_tree.assets/complete_binary_tree.png){ class="animation-figure" }