diff --git a/docs-en/chapter_array_and_linkedlist/array.md b/docs-en/chapter_array_and_linkedlist/array.md
index e5e69e27c..5857b780d 100755
--- a/docs-en/chapter_array_and_linkedlist/array.md
+++ b/docs-en/chapter_array_and_linkedlist/array.md
@@ -290,7 +290,7 @@ Accessing elements in an array is highly efficient, allowing us to randomly acce
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
### 3. Inserting Elements
@@ -472,7 +472,7 @@ It's important to note that since the length of an array is fixed, inserting an
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
### 4. Deleting Elements
@@ -631,7 +631,7 @@ Note that after deletion, the last element becomes "meaningless", so we do not n
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
Overall, the insertion and deletion operations in arrays have the following disadvantages:
@@ -855,7 +855,7 @@ In most programming languages, we can traverse an array either by indices or by
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
### 6. Finding Elements
@@ -1023,7 +1023,7 @@ Since arrays are linear data structures, this operation is known as "linear sear
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
### 7. Expanding Arrays
@@ -1233,7 +1233,7 @@ To expand an array, we need to create a larger array and then copy the elements
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
## 4.1.2 Advantages and Limitations of Arrays
diff --git a/docs-en/chapter_array_and_linkedlist/linked_list.md b/docs-en/chapter_array_and_linkedlist/linked_list.md
index 9f1ce26d7..64cbb151b 100755
--- a/docs-en/chapter_array_and_linkedlist/linked_list.md
+++ b/docs-en/chapter_array_and_linkedlist/linked_list.md
@@ -543,7 +543,7 @@ In contrast, the time complexity of inserting an element in an array is $O(n)$,
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
### 3. Deleting a Node
@@ -733,7 +733,7 @@ Note that although node `P` still points to `n1` after the deletion operation is
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
### 4. Accessing Nodes
@@ -912,7 +912,7 @@ Note that although node `P` still points to `n1` after the deletion operation is
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
### 5. Finding Nodes
@@ -1114,7 +1114,7 @@ Traverse the linked list to find a node with a value equal to `target`, and outp
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
## 4.2.2 Arrays vs. Linked Lists
diff --git a/docs-en/chapter_array_and_linkedlist/list.md b/docs-en/chapter_array_and_linkedlist/list.md
index 7e951aab4..342cbafde 100755
--- a/docs-en/chapter_array_and_linkedlist/list.md
+++ b/docs-en/chapter_array_and_linkedlist/list.md
@@ -930,7 +930,7 @@ To deepen the understanding of how lists work, let's try implementing a simple v
if index < 0 or index >= self._size:
raise IndexError("索引越界")
num = self._arr[index]
- # 索引 i 之后的元素都向前移动一位
+ # 将索引 index 之后的元素都向前移动一位
for j in range(index, self._size - 1):
self._arr[j] = self._arr[j + 1]
# 更新元素数量
@@ -1028,7 +1028,7 @@ To deepen the understanding of how lists work, let's try implementing a simple v
if (index < 0 || index >= size())
throw out_of_range("索引越界");
int num = arr[index];
- // 索引 i 之后的元素都向前移动一位
+ // 将索引 index 之后的元素都向前移动一位
for (int j = index; j < size() - 1; j++) {
arr[j] = arr[j + 1];
}
@@ -1136,7 +1136,7 @@ To deepen the understanding of how lists work, let's try implementing a simple v
if (index < 0 || index >= size)
throw new IndexOutOfBoundsException("索引越界");
int num = arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for (int j = index; j < size - 1; j++) {
arr[j] = arr[j + 1];
}
@@ -1238,7 +1238,7 @@ To deepen the understanding of how lists work, let's try implementing a simple v
if (index < 0 || index >= arrSize)
throw new IndexOutOfRangeException("索引越界");
int num = arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for (int j = index; j < arrSize - 1; j++) {
arr[j] = arr[j + 1];
}
@@ -1351,7 +1351,7 @@ To deepen the understanding of how lists work, let's try implementing a simple v
panic("索引越界")
}
num := l.arr[index]
- // 索引 i 之后的元素都向前移动一位
+ // 将索引 index 之后的元素都向前移动一位
for j := index; j < l.arrSize-1; j++ {
l.arr[j] = l.arr[j+1]
}
@@ -1454,7 +1454,7 @@ To deepen the understanding of how lists work, let's try implementing a simple v
fatalError("索引越界")
}
let num = arr[index]
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for j in index ..< (_size - 1) {
arr[j] = arr[j + 1]
}
@@ -1552,7 +1552,7 @@ To deepen the understanding of how lists work, let's try implementing a simple v
remove(index) {
if (index < 0 || index >= this.#size) throw new Error('索引越界');
let num = this.#arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for (let j = index; j < this.#size - 1; j++) {
this.#arr[j] = this.#arr[j + 1];
}
@@ -1652,7 +1652,7 @@ To deepen the understanding of how lists work, let's try implementing a simple v
public remove(index: number): number {
if (index < 0 || index >= this._size) throw new Error('索引越界');
let num = this.arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for (let j = index; j < this._size - 1; j++) {
this.arr[j] = this.arr[j + 1];
}
@@ -1745,7 +1745,7 @@ To deepen the understanding of how lists work, let's try implementing a simple v
int remove(int index) {
if (index >= _size) throw RangeError('索引越界');
int _num = _arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for (var j = index; j < _size - 1; j++) {
_arr[j] = _arr[j + 1];
}
@@ -1858,7 +1858,7 @@ To deepen the understanding of how lists work, let's try implementing a simple v
pub fn remove(&mut self, index: usize) -> i32 {
if index >= self.size() {panic!("索引越界")};
let num = self.arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for j in (index..self.size - 1) {
self.arr[j] = self.arr[j + 1];
}
@@ -2081,7 +2081,7 @@ To deepen the understanding of how lists work, let's try implementing a simple v
pub fn remove(self: *Self, index: usize) T {
if (index < 0 or index >= self.size()) @panic("索引越界");
var num = self.arr[index];
- // 索引 i 之后的元素都向前移动一位
+ // 将索引 index 之后的元素都向前移动一位
var j = index;
while (j < self.size() - 1) : (j += 1) {
self.arr[j] = self.arr[j + 1];
@@ -2118,3 +2118,8 @@ To deepen the understanding of how lists work, let's try implementing a simple v
};
}
```
+
+??? pythontutor "Visualizing Code"
+
+
+ 全屏观看 >
diff --git a/docs-en/chapter_computational_complexity/iteration_and_recursion.md b/docs-en/chapter_computational_complexity/iteration_and_recursion.md
index b2c466fc7..e20437a9c 100644
--- a/docs-en/chapter_computational_complexity/iteration_and_recursion.md
+++ b/docs-en/chapter_computational_complexity/iteration_and_recursion.md
@@ -185,7 +185,7 @@ The following function implements the sum $1 + 2 + \dots + n$ using a `for` loop
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
The flowchart below represents this sum function.
@@ -396,7 +396,7 @@ Below we use a `while` loop to implement the sum $1 + 2 + \dots + n$:
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
**The `while` loop is more flexible than the `for` loop**. In a `while` loop, we can freely design the initialization and update steps of the condition variable.
@@ -620,7 +620,7 @@ For example, in the following code, the condition variable $i$ is updated twice
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
Overall, **`for` loops are more concise, while `while` loops are more flexible**. Both can implement iterative structures. Which one to use should be determined based on the specific requirements of the problem.
@@ -839,7 +839,7 @@ We can nest one loop structure within another. Below is an example using `for` l
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
The flowchart below represents this nested loop.
@@ -1049,7 +1049,7 @@ Observe the following code, where calling the function `recur(n)` completes the
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
The Figure 2-3 shows the recursive process of this function.
@@ -1250,7 +1250,7 @@ For example, in calculating $1 + 2 + \dots + n$, we can make the result variable
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
The execution process of tail recursion is shown in the following figure. Comparing regular recursion and tail recursion, the point of the summation operation is different.
@@ -1463,7 +1463,7 @@ Using the recursive relation, and considering the first two numbers as terminati
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
Observing the above code, we see that it recursively calls two functions within itself, **meaning that one call generates two branching calls**. As illustrated below, this continuous recursive calling eventually creates a "recursion tree" with a depth of $n$.
@@ -1786,7 +1786,7 @@ Therefore, **we can use an explicit stack to simulate the behavior of the call s
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
Observing the above code, when recursion is transformed into iteration, the code becomes more complex. Although iteration and recursion can often be transformed into each other, it's not always advisable to do so for two reasons:
diff --git a/docs-en/chapter_computational_complexity/space_complexity.md b/docs-en/chapter_computational_complexity/space_complexity.md
index f803786a5..3d292ed69 100644
--- a/docs-en/chapter_computational_complexity/space_complexity.md
+++ b/docs-en/chapter_computational_complexity/space_complexity.md
@@ -1065,7 +1065,7 @@ Note that memory occupied by initializing variables or calling functions in a lo
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
### 2. Linear Order $O(n)$
@@ -1334,7 +1334,7 @@ Linear order is common in arrays, linked lists, stacks, queues, etc., where the
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
As shown below, this function's recursive depth is $n$, meaning there are $n$ instances of unreturned `linear_recur()` function, using $O(n)$ size of stack frame space:
@@ -1480,7 +1480,7 @@ As shown below, this function's recursive depth is $n$, meaning there are $n$ in
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
![Recursive Function Generating Linear Order Space Complexity](space_complexity.assets/space_complexity_recursive_linear.png){ class="animation-figure" }
@@ -1705,7 +1705,7 @@ Quadratic order is common in matrices and graphs, where the number of elements i
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
As shown below, the recursive depth of this function is $n$, and in each recursive call, an array is initialized with lengths $n$, $n-1$, $\dots$, $2$, $1$, averaging $n/2$, thus overall occupying $O(n^2)$ space:
@@ -1869,7 +1869,7 @@ As shown below, the recursive depth of this function is $n$, and in each recursi
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
![Recursive Function Generating Quadratic Order Space Complexity](space_complexity.assets/space_complexity_recursive_quadratic.png){ class="animation-figure" }
@@ -2047,7 +2047,7 @@ Exponential order is common in binary trees. Observe the below image, a "full bi
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
![Full Binary Tree Generating Exponential Order Space Complexity](space_complexity.assets/space_complexity_exponential.png){ class="animation-figure" }
diff --git a/docs-en/chapter_computational_complexity/time_complexity.md b/docs-en/chapter_computational_complexity/time_complexity.md
index 1e81ee035..78b43110c 100644
--- a/docs-en/chapter_computational_complexity/time_complexity.md
+++ b/docs-en/chapter_computational_complexity/time_complexity.md
@@ -1122,7 +1122,7 @@ Constant order means the number of operations is independent of the input data s
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
### 2. Linear Order $O(n)$
@@ -1279,7 +1279,7 @@ Linear order indicates the number of operations grows linearly with the input da
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
Operations like array traversal and linked list traversal have a time complexity of $O(n)$, where $n$ is the length of the array or list:
@@ -1452,7 +1452,7 @@ Operations like array traversal and linked list traversal have a time complexity
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
It's important to note that **the input data size $n$ should be determined based on the type of input data**. For example, in the first example, $n$ represents the input data size, while in the second example, the length of the array $n$ is the data size.
@@ -1654,7 +1654,7 @@ Quadratic order means the number of operations grows quadratically with the inpu
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
The following image compares constant order, linear order, and quadratic order time complexities.
@@ -1939,7 +1939,7 @@ For instance, in bubble sort, the outer loop runs $n - 1$ times, and the inner l
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
### 4. Exponential Order $O(2^n)$
@@ -2172,7 +2172,7 @@ The following image and code simulate the cell division process, with a time com
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
![Exponential Order Time Complexity](time_complexity.assets/time_complexity_exponential.png){ class="animation-figure" }
@@ -2312,7 +2312,7 @@ In practice, exponential order often appears in recursive functions. For example
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
Exponential order growth is extremely rapid and is commonly seen in exhaustive search methods (brute force, backtracking, etc.). For large-scale problems, exponential order is unacceptable, often requiring dynamic programming or greedy algorithms as solutions.
@@ -2494,7 +2494,7 @@ The following image and code simulate the "halving each round" process, with a t
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
![Logarithmic Order Time Complexity](time_complexity.assets/time_complexity_logarithmic.png){ class="animation-figure" }
@@ -2634,7 +2634,7 @@ Like exponential order, logarithmic order also frequently appears in recursive f
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
Logarithmic order is typical in algorithms based on the divide-and-conquer strategy, embodying the "split into many" and "simplify complex problems" approach. It's slow-growing and is the most ideal time complexity after constant order.
@@ -2832,7 +2832,7 @@ Linear-logarithmic order often appears in nested loops, with the complexities of
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
The image below demonstrates how linear-logarithmic order is generated. Each level of a binary tree has $n$ operations, and the tree has $\log_2 n + 1$ levels, resulting in a time complexity of $O(n \log n)$.
@@ -3043,7 +3043,7 @@ Factorials are typically implemented using recursion. As shown in the image and
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
![Factorial Order Time Complexity](time_complexity.assets/time_complexity_factorial.png){ class="animation-figure" }
@@ -3410,7 +3410,7 @@ The "worst-case time complexity" corresponds to the asymptotic upper bound, deno
??? pythontutor "Visualizing Code"
- 全屏观看 >
+ 全屏观看 >
It's important to note that the best-case time complexity is rarely used in practice, as it is usually only achievable under very low probabilities and might be misleading. **The worst-case time complexity is more practical as it provides a safety value for efficiency**, allowing us to confidently use the algorithm.
diff --git a/docs/chapter_array_and_linkedlist/array.md b/docs/chapter_array_and_linkedlist/array.md
index 015c5415c..92b139fd9 100755
--- a/docs/chapter_array_and_linkedlist/array.md
+++ b/docs/chapter_array_and_linkedlist/array.md
@@ -294,7 +294,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 3. 插入元素
@@ -476,7 +476,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 4. 删除元素
@@ -635,7 +635,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
总的来看,数组的插入与删除操作有以下缺点。
@@ -859,7 +859,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 6. 查找元素
@@ -1027,7 +1027,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 7. 扩容数组
@@ -1237,7 +1237,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
## 4.1.2 数组的优点与局限性
diff --git a/docs/chapter_array_and_linkedlist/linked_list.md b/docs/chapter_array_and_linkedlist/linked_list.md
index 6bf4eb49d..f2b5bbc35 100755
--- a/docs/chapter_array_and_linkedlist/linked_list.md
+++ b/docs/chapter_array_and_linkedlist/linked_list.md
@@ -547,7 +547,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 3. 删除节点
@@ -737,7 +737,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 4. 访问节点
@@ -916,7 +916,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 5. 查找节点
@@ -1118,7 +1118,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
## 4.2.2 数组 vs. 链表
diff --git a/docs/chapter_array_and_linkedlist/list.md b/docs/chapter_array_and_linkedlist/list.md
index 2c42e5f5a..4c4fd4c64 100755
--- a/docs/chapter_array_and_linkedlist/list.md
+++ b/docs/chapter_array_and_linkedlist/list.md
@@ -954,7 +954,7 @@ comments: true
if index < 0 or index >= self._size:
raise IndexError("索引越界")
num = self._arr[index]
- # 索引 i 之后的元素都向前移动一位
+ # 将索引 index 之后的元素都向前移动一位
for j in range(index, self._size - 1):
self._arr[j] = self._arr[j + 1]
# 更新元素数量
@@ -1052,7 +1052,7 @@ comments: true
if (index < 0 || index >= size())
throw out_of_range("索引越界");
int num = arr[index];
- // 索引 i 之后的元素都向前移动一位
+ // 将索引 index 之后的元素都向前移动一位
for (int j = index; j < size() - 1; j++) {
arr[j] = arr[j + 1];
}
@@ -1160,7 +1160,7 @@ comments: true
if (index < 0 || index >= size)
throw new IndexOutOfBoundsException("索引越界");
int num = arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for (int j = index; j < size - 1; j++) {
arr[j] = arr[j + 1];
}
@@ -1262,7 +1262,7 @@ comments: true
if (index < 0 || index >= arrSize)
throw new IndexOutOfRangeException("索引越界");
int num = arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for (int j = index; j < arrSize - 1; j++) {
arr[j] = arr[j + 1];
}
@@ -1375,7 +1375,7 @@ comments: true
panic("索引越界")
}
num := l.arr[index]
- // 索引 i 之后的元素都向前移动一位
+ // 将索引 index 之后的元素都向前移动一位
for j := index; j < l.arrSize-1; j++ {
l.arr[j] = l.arr[j+1]
}
@@ -1478,7 +1478,7 @@ comments: true
fatalError("索引越界")
}
let num = arr[index]
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for j in index ..< (_size - 1) {
arr[j] = arr[j + 1]
}
@@ -1576,7 +1576,7 @@ comments: true
remove(index) {
if (index < 0 || index >= this.#size) throw new Error('索引越界');
let num = this.#arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for (let j = index; j < this.#size - 1; j++) {
this.#arr[j] = this.#arr[j + 1];
}
@@ -1676,7 +1676,7 @@ comments: true
public remove(index: number): number {
if (index < 0 || index >= this._size) throw new Error('索引越界');
let num = this.arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for (let j = index; j < this._size - 1; j++) {
this.arr[j] = this.arr[j + 1];
}
@@ -1769,7 +1769,7 @@ comments: true
int remove(int index) {
if (index >= _size) throw RangeError('索引越界');
int _num = _arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for (var j = index; j < _size - 1; j++) {
_arr[j] = _arr[j + 1];
}
@@ -1882,7 +1882,7 @@ comments: true
pub fn remove(&mut self, index: usize) -> i32 {
if index >= self.size() {panic!("索引越界")};
let num = self.arr[index];
- // 将索引 index 之后的元素都向前移动一位
+ // 将将索引 index 之后的元素都向前移动一位
for j in (index..self.size - 1) {
self.arr[j] = self.arr[j + 1];
}
@@ -2105,7 +2105,7 @@ comments: true
pub fn remove(self: *Self, index: usize) T {
if (index < 0 or index >= self.size()) @panic("索引越界");
var num = self.arr[index];
- // 索引 i 之后的元素都向前移动一位
+ // 将索引 index 之后的元素都向前移动一位
var j = index;
while (j < self.size() - 1) : (j += 1) {
self.arr[j] = self.arr[j + 1];
@@ -2142,3 +2142,8 @@ comments: true
};
}
```
+
+??? pythontutor "可视化运行"
+
+
+ 全屏观看 >
diff --git a/docs/chapter_backtracking/backtracking_algorithm.md b/docs/chapter_backtracking/backtracking_algorithm.md
index 2bd52777e..d9e6c39c0 100644
--- a/docs/chapter_backtracking/backtracking_algorithm.md
+++ b/docs/chapter_backtracking/backtracking_algorithm.md
@@ -209,7 +209,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
![在前序遍历中搜索节点](backtracking_algorithm.assets/preorder_find_nodes.png){ class="animation-figure" }
@@ -480,7 +480,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
在每次“尝试”中,我们通过将当前节点添加进 `path` 来记录路径;而在“回退”前,我们需要将该节点从 `path` 中弹出,**以恢复本次尝试之前的状态**。
@@ -792,7 +792,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
“剪枝”是一个非常形象的名词。如图 13-3 所示,在搜索过程中,**我们“剪掉”了不满足约束条件的搜索分支**,避免许多无意义的尝试,从而提高了搜索效率。
diff --git a/docs/chapter_backtracking/n_queens_problem.md b/docs/chapter_backtracking/n_queens_problem.md
index eda66d5bf..16f467e26 100644
--- a/docs/chapter_backtracking/n_queens_problem.md
+++ b/docs/chapter_backtracking/n_queens_problem.md
@@ -665,7 +665,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
逐行放置 $n$ 次,考虑列约束,则从第一行到最后一行分别有 $n$、$n-1$、$\dots$、$2$、$1$ 个选择,**因此时间复杂度为 $O(n!)$** 。实际上,根据对角线约束的剪枝也能够大幅缩小搜索空间,因而搜索效率往往优于以上时间复杂度。
diff --git a/docs/chapter_backtracking/permutations_problem.md b/docs/chapter_backtracking/permutations_problem.md
index b72c481ca..6a1574d71 100644
--- a/docs/chapter_backtracking/permutations_problem.md
+++ b/docs/chapter_backtracking/permutations_problem.md
@@ -474,7 +474,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
## 13.2.2 考虑相等元素的情况
@@ -950,7 +950,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
假设元素两两之间互不相同,则 $n$ 个元素共有 $n!$ 种排列(阶乘);在记录结果时,需要复制长度为 $n$ 的列表,使用 $O(n)$ 时间。**因此时间复杂度为 $O(n!n)$** 。
diff --git a/docs/chapter_backtracking/subset_sum_problem.md b/docs/chapter_backtracking/subset_sum_problem.md
index 3064e1310..90ab8ed99 100644
--- a/docs/chapter_backtracking/subset_sum_problem.md
+++ b/docs/chapter_backtracking/subset_sum_problem.md
@@ -431,7 +431,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
向以上代码输入数组 $[3, 4, 5]$ 和目标元素 $9$ ,输出结果为 $[3, 3, 3], [4, 5], [5, 4]$ 。**虽然成功找出了所有和为 $9$ 的子集,但其中存在重复的子集 $[4, 5]$ 和 $[5, 4]$** 。
@@ -914,7 +914,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
图 13-12 所示为将数组 $[3, 4, 5]$ 和目标元素 $9$ 输入以上代码后的整体回溯过程。
@@ -1438,7 +1438,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
图 13-14 展示了数组 $[4, 4, 5]$ 和目标元素 $9$ 的回溯过程,共包含四种剪枝操作。请你将图示与代码注释相结合,理解整个搜索过程,以及每种剪枝操作是如何工作的。
diff --git a/docs/chapter_computational_complexity/iteration_and_recursion.md b/docs/chapter_computational_complexity/iteration_and_recursion.md
index 6098f3640..6c5f03f0d 100644
--- a/docs/chapter_computational_complexity/iteration_and_recursion.md
+++ b/docs/chapter_computational_complexity/iteration_and_recursion.md
@@ -185,7 +185,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
图 2-1 是该求和函数的流程框图。
@@ -396,7 +396,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
**`while` 循环比 `for` 循环的自由度更高**。在 `while` 循环中,我们可以自由地设计条件变量的初始化和更新步骤。
@@ -620,7 +620,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
总的来说,**`for` 循环的代码更加紧凑,`while` 循环更加灵活**,两者都可以实现迭代结构。选择使用哪一个应该根据特定问题的需求来决定。
@@ -839,7 +839,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
图 2-2 是该嵌套循环的流程框图。
@@ -1049,7 +1049,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
图 2-3 展示了该函数的递归过程。
@@ -1250,7 +1250,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
尾递归的执行过程如图 2-5 所示。对比普通递归和尾递归,两者的求和操作的执行点是不同的。
@@ -1463,7 +1463,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
观察以上代码,我们在函数内递归调用了两个函数,**这意味着从一个调用产生了两个调用分支**。如图 2-6 所示,这样不断递归调用下去,最终将产生一棵层数为 $n$ 的「递归树 recursion tree」。
@@ -1786,7 +1786,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
观察以上代码,当递归转化为迭代后,代码变得更加复杂了。尽管迭代和递归在很多情况下可以互相转化,但不一定值得这样做,有以下两点原因。
diff --git a/docs/chapter_computational_complexity/space_complexity.md b/docs/chapter_computational_complexity/space_complexity.md
index 4d169e94b..b707a4322 100755
--- a/docs/chapter_computational_complexity/space_complexity.md
+++ b/docs/chapter_computational_complexity/space_complexity.md
@@ -1064,7 +1064,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 2. 线性阶 $O(n)$
@@ -1333,7 +1333,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
如图 2-17 所示,此函数的递归深度为 $n$ ,即同时存在 $n$ 个未返回的 `linear_recur()` 函数,使用 $O(n)$ 大小的栈帧空间:
@@ -1479,7 +1479,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
![递归函数产生的线性阶空间复杂度](space_complexity.assets/space_complexity_recursive_linear.png){ class="animation-figure" }
@@ -1704,7 +1704,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
如图 2-18 所示,该函数的递归深度为 $n$ ,在每个递归函数中都初始化了一个数组,长度分别为 $n$、$n-1$、$\dots$、$2$、$1$ ,平均长度为 $n / 2$ ,因此总体占用 $O(n^2)$ 空间:
@@ -1868,7 +1868,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
![递归函数产生的平方阶空间复杂度](space_complexity.assets/space_complexity_recursive_quadratic.png){ class="animation-figure" }
@@ -2046,7 +2046,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
![满二叉树产生的指数阶空间复杂度](space_complexity.assets/space_complexity_exponential.png){ class="animation-figure" }
diff --git a/docs/chapter_computational_complexity/time_complexity.md b/docs/chapter_computational_complexity/time_complexity.md
index 4dd371666..4242619c1 100755
--- a/docs/chapter_computational_complexity/time_complexity.md
+++ b/docs/chapter_computational_complexity/time_complexity.md
@@ -1126,7 +1126,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 2. 线性阶 $O(n)$
@@ -1283,7 +1283,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
遍历数组和遍历链表等操作的时间复杂度均为 $O(n)$ ,其中 $n$ 为数组或链表的长度:
@@ -1456,7 +1456,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
值得注意的是,**输入数据大小 $n$ 需根据输入数据的类型来具体确定**。比如在第一个示例中,变量 $n$ 为输入数据大小;在第二个示例中,数组长度 $n$ 为数据大小。
@@ -1658,7 +1658,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
图 2-10 对比了常数阶、线性阶和平方阶三种时间复杂度。
@@ -1943,7 +1943,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 4. 指数阶 $O(2^n)$
@@ -2176,7 +2176,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
![指数阶的时间复杂度](time_complexity.assets/time_complexity_exponential.png){ class="animation-figure" }
@@ -2316,7 +2316,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
指数阶增长非常迅速,在穷举法(暴力搜索、回溯等)中比较常见。对于数据规模较大的问题,指数阶是不可接受的,通常需要使用动态规划或贪心算法等来解决。
@@ -2498,7 +2498,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
![对数阶的时间复杂度](time_complexity.assets/time_complexity_logarithmic.png){ class="animation-figure" }
@@ -2638,7 +2638,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
对数阶常出现于基于分治策略的算法中,体现了“一分为多”和“化繁为简”的算法思想。它增长缓慢,是仅次于常数阶的理想的时间复杂度。
@@ -2836,7 +2836,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
图 2-13 展示了线性对数阶的生成方式。二叉树的每一层的操作总数都为 $n$ ,树共有 $\log_2 n + 1$ 层,因此时间复杂度为 $O(n \log n)$ 。
@@ -3047,7 +3047,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
![阶乘阶的时间复杂度](time_complexity.assets/time_complexity_factorial.png){ class="animation-figure" }
@@ -3414,7 +3414,7 @@ $$
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
值得说明的是,我们在实际中很少使用最佳时间复杂度,因为通常只有在很小概率下才能达到,可能会带来一定的误导性。**而最差时间复杂度更为实用,因为它给出了一个效率安全值**,让我们可以放心地使用算法。
diff --git a/docs/chapter_hashing/hash_algorithm.md b/docs/chapter_hashing/hash_algorithm.md
index fad5185b1..d5e64e30e 100644
--- a/docs/chapter_hashing/hash_algorithm.md
+++ b/docs/chapter_hashing/hash_algorithm.md
@@ -569,7 +569,7 @@ index = hash(key) % capacity
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
观察发现,每种哈希算法的最后一步都是对大质数 $1000000007$ 取模,以确保哈希值在合适的范围内。值得思考的是,为什么要强调对质数取模,或者说对合数取模的弊端是什么?这是一个有趣的问题。
diff --git a/docs/chapter_hashing/hash_collision.md b/docs/chapter_hashing/hash_collision.md
index e7693d7bb..25f49437d 100644
--- a/docs/chapter_hashing/hash_collision.md
+++ b/docs/chapter_hashing/hash_collision.md
@@ -1319,7 +1319,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
值得注意的是,当链表很长时,查询效率 $O(n)$ 很差。**此时可以将链表转换为“AVL 树”或“红黑树”**,从而将查询操作的时间复杂度优化至 $O(\log n)$ 。
diff --git a/docs/chapter_hashing/hash_map.md b/docs/chapter_hashing/hash_map.md
index b401387ff..f8301c2a7 100755
--- a/docs/chapter_hashing/hash_map.md
+++ b/docs/chapter_hashing/hash_map.md
@@ -1644,7 +1644,7 @@ index = hash(key) % capacity
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
## 6.1.3 哈希冲突与扩容
diff --git a/docs/chapter_stack_and_queue/queue.md b/docs/chapter_stack_and_queue/queue.md
index 0cd9ab0c5..ba4a793d8 100755
--- a/docs/chapter_stack_and_queue/queue.md
+++ b/docs/chapter_stack_and_queue/queue.md
@@ -1214,7 +1214,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 2. 基于数组的实现
@@ -2130,7 +2130,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
以上实现的队列仍然具有局限性:其长度不可变。然而,这个问题不难解决,我们可以将数组替换为动态数组,从而引入扩容机制。有兴趣的读者可以尝试自行实现。
diff --git a/docs/chapter_stack_and_queue/stack.md b/docs/chapter_stack_and_queue/stack.md
index 61f788648..cbd67e692 100755
--- a/docs/chapter_stack_and_queue/stack.md
+++ b/docs/chapter_stack_and_queue/stack.md
@@ -1086,7 +1086,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
### 2. 基于数组的实现
@@ -1697,7 +1697,7 @@ comments: true
??? pythontutor "可视化运行"
- 全屏观看 >
+ 全屏观看 >
## 5.1.3 两种实现对比
diff --git a/docs/chapter_tree/array_representation_of_tree.md b/docs/chapter_tree/array_representation_of_tree.md
index 7606e931f..b57bbc4e2 100644
--- a/docs/chapter_tree/array_representation_of_tree.md
+++ b/docs/chapter_tree/array_representation_of_tree.md
@@ -154,7 +154,7 @@ comments: true
self._tree = list(arr)
def size(self):
- """数组长度"""
+ """列表容量"""
return len(self._tree)
def val(self, i: int) -> int:
@@ -231,7 +231,7 @@ comments: true
tree = arr;
}
- /* 数组长度 */
+ /* 列表容量 */
int size() {
return tree.size();
}
@@ -326,7 +326,7 @@ comments: true
tree = new ArrayList<>(arr);
}
- /* 数组长度 */
+ /* 列表容量 */
public int size() {
return tree.size();
}
@@ -413,7 +413,7 @@ comments: true
class ArrayBinaryTree(List arr) {
List tree = new(arr);
- /* 数组长度 */
+ /* 列表容量 */
public int Size() {
return tree.Count;
}
@@ -508,7 +508,7 @@ comments: true
}
}
- /* 数组长度 */
+ /* 列表容量 */
func (abt *arrayBinaryTree) size() int {
return len(abt.tree)
}
@@ -605,7 +605,7 @@ comments: true
tree = arr
}
- /* 数组长度 */
+ /* 列表容量 */
func size() -> Int {
tree.count
}
@@ -703,7 +703,7 @@ comments: true
this.#tree = arr;
}
- /* 数组长度 */
+ /* 列表容量 */
size() {
return this.#tree.length;
}
@@ -789,7 +789,7 @@ comments: true
this.#tree = arr;
}
- /* 数组长度 */
+ /* 列表容量 */
size(): number {
return this.#tree.length;
}
@@ -873,7 +873,7 @@ comments: true
/* 构造方法 */
ArrayBinaryTree(this._tree);
- /* 数组长度 */
+ /* 列表容量 */
int size() {
return _tree.length;
}
@@ -972,7 +972,7 @@ comments: true
Self { tree: arr }
}
- /* 数组长度 */
+ /* 列表容量 */
fn size(&self) -> i32 {
self.tree.len() as i32
}
@@ -1083,7 +1083,7 @@ comments: true
free(abt);
}
- /* 数组长度 */
+ /* 列表容量 */
int size(ArrayBinaryTree *abt) {
return abt->size;
}