| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463 |
- #!/usr/bin/env python3
- """编译单日课程(周六 10:00 批处理)"""
- import argparse
- import os
- import sys
- from pathlib import Path
- from jinja2 import Template
- import yaml
- # 添加项目根目录到路径
- PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent.parent
- sys.path.insert(0, str(PROJECT_ROOT))
- def load_config() -> dict:
- """加载配置"""
- config_path = PROJECT_ROOT / "skills" / "mathlab" / "config.yaml"
- with open(config_path, "r", encoding="utf-8") as f:
- return yaml.safe_load(f)
- def generate_symbol_decoder(topic: str) -> str:
- """生成符号解码字典(根据主题动态生成)"""
- decoders = {
- "感知机": r"""
- <div class="symbol-map"><strong>$w$</strong> → <code>self.weights</code> (权重向量,shape: [d])</div>
- <div class="symbol-map"><strong>$b$</strong> → <code>self.bias</code> (偏置标量,shape: [])</div>
- <div class="symbol-map"><strong>$x$</strong> → <code>input_tensor</code> (输入样本,shape: [d])</div>
- <div class="symbol-map"><strong>$y$</strong> → <code>label</code> (标签,值域:{-1, +1})</div>
- <div class="symbol-map"><strong>$\sign(\cdot)$</strong> → <code>np.sign()</code> (符号函数)</div>
- <div class="symbol-map"><strong>$\xi$</strong> → <code>margin</code> (间隔,用于更新步长)</div>
- """,
- "K 近邻": r"""
- <div class="symbol-map"><strong>$x$</strong> → <code>input_tensor</code> (待预测样本,shape: [d])</div>
- <div class="symbol-map"><strong>$X$</strong> → <code>self.X_train</code> (训练集,shape: [N, d])</div>
- <div class="symbol-map"><strong>$y$</strong> → <code>label</code> (待预测标签)</div>
- <div class="symbol-map"><strong>$Y$</strong> → <code>self.y_train</code> (训练标签,shape: [N])</div>
- <div class="symbol-map"><strong>$k$</strong> → <code>self.k</code> (近邻数量)</div>
- <div class="symbol-map"><strong>$p$</strong> → <code>p</code> (Lp 范数,p=2 为欧氏距离)</div>
- """,
- "朴素贝叶斯": r"""
- <div class="symbol-map"><strong>$P(c_k)$</strong> → <code>self.class_priors_[k]</code> (类别先验概率)</div>
- <div class="symbol-map"><strong>$P(x^{(j)} | c_k)$</strong> → <code>self.feature_probs_[k, j]</code> (条件概率)</div>
- <div class="symbol-map"><strong>$P(c_k | x)$</strong> → <code>posterior</code> (后验概率)</div>
- <div class="symbol-map"><strong>$\alpha$</strong> → <code>alpha</code> (拉普拉斯平滑系数)</div>
- """,
- "EM 算法": r"""
- <div class="symbol-map"><strong>$\theta$</strong> → <code>self.params</code> (模型参数)</div>
- <div class="symbol-map"><strong>$z$</strong> → <code>latent_vars</code> (隐变量)</div>
- <div class="symbol-map"><strong>$Q(\theta | \theta^{(t)})$</strong> → <code>Q_function</code> (期望下界)</div>
- <div class="symbol-map"><strong>$\mathcal{L}(\theta)$</strong> → <code>log_likelihood</code> (对数似然)</div>
- """,
- "隐马尔可夫模型": r"""
- <div class="symbol-map"><strong>$\pi$</strong> → <code>self.initial_probs</code> (初始状态概率)</div>
- <div class="symbol-map"><strong>$A$</strong> → <code>self.transition_probs</code> (状态转移矩阵)</div>
- <div class="symbol-map"><strong>$B$</strong> → <code>self.emission_probs</code> (观测发射矩阵)</div>
- <div class="symbol-map"><strong>$O$</strong> → <code>observations</code> (观测序列)</div>
- <div class="symbol-map"><strong>$Q$</strong> → <code>states</code> (隐藏状态序列)</div>
- """,
- "计算图": r"""
- <div class="symbol-map"><strong>$x$</strong> → <code>input_tensor</code> (输入张量)</div>
- <div class="symbol-map"><strong>$y$</strong> → <code>output_tensor</code> (输出张量)</div>
- <div class="symbol-map"><strong>$\frac{\partial y}{\partial x}$</strong> → <code>gradient</code> (梯度)</div>
- <div class="symbol-map"><strong>$\mathcal{G}$</strong> → <code>computation_graph</code> (计算图结构)</div>
- """,
- "卷积神经网络": r"""
- <div class="symbol-map"><strong>$X$</strong> → <code>input_tensor</code> (输入特征图,shape: [C, H, W])</div>
- <div class="symbol-map"><strong>$W$</strong> → <code>self.weight</code> (卷积核,shape: [C_out, C_in, k, k])</div>
- <div class="symbol-map"><strong>$b$</strong> → <code>self.bias</code> (偏置,shape: [C_out])</div>
- <div class="symbol-map"><strong>$Y$</strong> → <code>output_tensor</code> (输出特征图)</div>
- <div class="symbol-map"><strong>$s$</strong> → <code>stride</code> (步长)</div>
- <div class="symbol-map"><strong>$p$</strong> → <code>padding</code> (填充)</div>
- """
- }
- return decoders.get(topic, decoders["感知机"])
- def generate_math_derivation(topic: str) -> str:
- """生成数学推导(根据主题动态生成)"""
- derivations = {
- "感知机": r"""
- ### 感知机预测函数
- $$f(x) = w \cdot x + b$$
- 其中 $\cdot$ 表示向量点积。
- ### 损失函数(hinge loss 的简化形式)
- $$L(w, b) = -\sum_{x_i \in M} y_i (w \cdot x_i + b)$$
- $M$ 是误分类点集合。
- ### 梯度下降更新规则
- $$w \leftarrow w + \eta \cdot y_i \cdot x_i$$
- $$b \leftarrow b + \eta \cdot y_i$$
- 其中 $\eta$ 是学习率。
- """,
- "K 近邻": r"""
- ### 距离计算(Lp 范数)
- $$d(x, x^{(i)}) = \left( \sum_{j=1}^{d} |x^{(j)} - x^{(i)(j)}|^p \right)^{1/p}$$
- 当 $p=2$ 时为欧氏距离:
- $$d(x, x^{(i)}) = \sqrt{\sum_{j=1}^{d} (x^{(j)} - x^{(i)(j)})^2}$$
- ### 投票规则
- $$\hat{y} = \text{argmode}_{c} \sum_{i=1}^{k} \mathbb{I}(y^{(i)} = c)$$
- 其中 $\mathbb{I}(\cdot)$ 是指示函数,统计 k 个近邻中各类别出现次数。
- """,
- "朴素贝叶斯": r"""
- ### 贝叶斯公式
- $$P(c_k | x) = \frac{P(x | c_k) P(c_k)}{P(x)}$$
- ### 朴素条件独立假设
- $$P(x | c_k) = P(x^{(1)}, x^{(2)}, ..., x^{(d)} | c_k) = \prod_{j=1}^{d} P(x^{(j)} | c_k)$$
- ### 后验概率最大化
- $$\hat{y} = \text{argmax}_{c_k} P(c_k) \prod_{j=1}^{d} P(x^{(j)} | c_k)$$
- ### 拉普拉斯平滑
- $$P(x^{(j)} = v | c_k) = \frac{\sum_{i=1}^{N} \mathbb{I}(x^{(i)(j)} = v, y^{(i)} = c_k) + \alpha}{\sum_{i=1}^{N} \mathbb{I}(y^{(i)} = c_k) + \alpha \cdot V}$$
- 其中 $V$ 是特征取值数量,$\alpha$ 是平滑系数。
- """,
- "EM 算法": r"""
- ### 对数似然函数
- $$\mathcal{L}(\theta) = \log P(X | \theta) = \log \sum_{Z} P(X, Z | \theta)$$
- ### E 步:构造下界
- $$\mathcal{L}(\theta) \geq Q(\theta | \theta^{(t)}) = \sum_{Z} P(Z | X, \theta^{(t)}) \log \frac{P(X, Z | \theta)}{P(Z | X, \theta^{(t)})}$$
- ### M 步:最大化下界
- $$\theta^{(t+1)} = \text{argmax}_{\theta} Q(\theta | \theta^{(t)})$$
- ### 迭代收敛
- $$\mathcal{L}(\theta^{(t+1)}) \geq \mathcal{L}(\theta^{(t)})$$
- """,
- "隐马尔可夫模型": r"""
- ### 三大基本问题
- **1. 概率计算问题**(前向算法)
- $$\alpha_t(i) = P(o_1, o_2, ..., o_t, q_t = S_i | \lambda)$$
- 递推公式:
- $$\alpha_{t+1}(i) = \left[ \sum_{j=1}^{N} \alpha_t(j) a_{ji} \right] b_i(o_{t+1})$$
- **2. 学习问题**(Baum-Welch 算法)
- $$\gamma_t(i) = P(q_t = S_i | O, \lambda)$$
- $$\xi_t(i, j) = P(q_t = S_i, q_{t+1} = S_j | O, \lambda)$$
- **3. 预测问题**(Viterbi 算法)
- $$\delta_t(i) = \max_{q_1, ..., q_{t-1}} P(q_1, ..., q_t=i, o_1, ..., o_t | \lambda)$$
- """,
- "计算图": r"""
- ### 计算图结构
- $$y = f(x) = f_L(f_{L-1}(\cdots f_1(x)\cdots))$$
- ### 链式法则
- $$\frac{\partial y}{\partial x} = \frac{\partial f_L}{\partial u_{L-1}} \cdot \frac{\partial f_{L-1}}{\partial u_{L-2}} \cdots \frac{\partial f_1}{\partial x}$$
- ### 反向传播
- 前向:计算每一层的输出 $u_l = f_l(u_{l-1})$
- 反向:从后往前计算梯度
- $$\frac{\partial L}{\partial u_l} = \frac{\partial L}{\partial u_{l+1}} \cdot \frac{\partial u_{l+1}}{\partial u_l}$$
- """,
- "卷积神经网络": r"""
- ### 卷积运算
- $$(X * W)_{i,j} = \sum_{m=0}^{k-1} \sum_{n=0}^{k-1} W_{m,n} \cdot X_{i+m, j+n}$$
- ### 输出尺寸计算
- $$H_{out} = \left\lfloor \frac{H_{in} + 2p - k}{s} \right\rfloor + 1$$
- $$W_{out} = \left\lfloor \frac{W_{in} + 2p - k}{s} \right\rfloor + 1$$
- 其中 $k$ 是卷积核尺寸,$p$ 是填充,$s$ 是步长。
- ### 梯度反向传播
- $$\frac{\partial L}{\partial W} = X_{rotated} * \frac{\partial L}{\partial Y}$$
- $$\frac{\partial L}{\partial X} = \frac{\partial L}{\partial Y} * W_{rotated}$$
- 其中 $rotated$ 表示 180 度旋转。
- """
- }
- return derivations.get(topic, derivations["感知机"])
- def generate_html(day: int, topic: str, config: dict) -> str:
- """生成课程 HTML(根据主题动态生成参数)"""
- template_path = PROJECT_ROOT / "skills" / "mathlab" / "templates" / "course_template.html"
-
- with open(template_path, "r", encoding="utf-8") as f:
- template_content = f.read()
-
- template = Template(template_content)
-
- # 不同主题的技术参数
- topic_params = {
- "感知机": {
- "technical_debt": "之前的算法无法处理线性不可分数据,导致泛化能力差。感知机通过引入决策边界,将分类问题转化为优化问题。",
- "visual_intuition": "想象一条直线(或超平面)将两类点分开。权重向量 $w$ 决定直线的方向,偏置 $b$ 决定直线的位置。",
- "bilibili_keyword": "感知机 直观解释",
- "optimization_bottleneck": r"全量梯度下降需要遍历所有样本,复杂度 $O(N \cdot d)$。现代框架使用 mini-batch SGD 加速。",
- "oj_mission": "实现感知机的 forward 和 update 函数,在 XOR 数据集上验证无法收敛(线性不可分)。"
- },
- "K 近邻": {
- "technical_debt": "基于模型的算法需要训练过程,无法利用新样本。K 近邻是实例学习(惰性学习),直接存储训练数据用于预测。",
- "visual_intuition": "想象在一个平面上,新点周围的 k 个最近邻居多数是红色,那它也被预测为红色。距离越近影响越大。",
- "bilibili_keyword": "K 近邻算法 直观解释",
- "optimization_bottleneck": r"预测时需要计算到所有训练样本的距离,复杂度 $O(N \cdot d)$。使用 KD 树或 Ball 树加速到 $O(\log N)$。",
- "oj_mission": "实现 K 近邻的 distance 和 predict 函数,在手写数字数据集上验证 k 值对准确率的影响。"
- },
- "朴素贝叶斯": {
- "technical_debt": "传统分类器需要估计联合概率分布,参数多且易过拟合。朴素贝叶斯通过条件独立假设简化模型。",
- "visual_intuition": "想象判断一封邮件是否是垃圾邮件:根据关键词出现的概率,结合先验经验,计算后验概率。",
- "bilibili_keyword": "朴素贝叶斯 直观解释",
- "optimization_bottleneck": r"需要统计所有特征 - 类别组合的频率,高维数据下计算量大。使用哈希表或稀疏矩阵优化。",
- "oj_mission": "实现高斯朴素贝叶斯的 fit 和 predict 函数,在鸢尾花数据集上验证分类效果。"
- },
- "EM 算法": {
- "technical_debt": "监督学习需要完整标注数据,但现实中很多数据缺失或隐变量未知。EM 算法通过迭代估计隐变量和参数。",
- "visual_intuition": "想象混合高斯模型:E 步猜测每个点属于哪个高斯,M 步根据猜测更新高斯参数,循环迭代直到收敛。",
- "bilibili_keyword": "EM 算法 直观解释",
- "optimization_bottleneck": r"每次迭代需要遍历所有样本计算期望,复杂度 $O(N \cdot K \cdot I)$,K 为隐变量数,I 为迭代次数。",
- "oj_mission": "实现 EM 算法拟合混合高斯分布,在双峰数据上验证参数收敛过程。"
- },
- "隐马尔可夫模型": {
- "technical_debt": "传统序列模型无法处理隐藏状态。HMM 引入不可观测的状态序列,通过观测序列推断隐藏状态。",
- "visual_intuition": "想象通过天气(晴/雨)推断草地干湿状态:天气是隐藏状态,草地干湿是观测,转移概率决定状态变化。",
- "bilibili_keyword": "隐马尔可夫模型 直观解释",
- "optimization_bottleneck": r"前向 - 后向算法需要 $O(N^2 \cdot T)$ 计算,Viterbi 解码需要 $O(N^2 \cdot T)$ 动态规划。",
- "oj_mission": "实现 HMM 的前向算法计算观测概率,在中文分词数据集上验证 Viterbi 解码效果。"
- },
- "计算图": {
- "technical_debt": "手动推导梯度复杂易错,深度学习模型嵌套多层。计算图自动记录前向过程,反向自动求导。",
- "visual_intuition": "想象一个流程图:数据从左到右经过各种运算节点,每个节点记录输入输出,反向时从右到左传递梯度。",
- "bilibili_keyword": "计算图 自动求导 原理",
- "optimization_bottleneck": r"计算图占用内存存储中间结果,大模型下显存爆炸。使用梯度检查点(checkpoint)节省内存。",
- "oj_mission": "实现简易计算图的 forward 和 backward,验证链式法则的正确性。"
- },
- "卷积神经网络": {
- "technical_debt": "全连接网络处理图像时参数爆炸,无法捕捉空间局部性。CNN 通过卷积核共享参数,提取局部特征。",
- "visual_intuition": "想象一个滤波器在图像上滑动,每个位置计算局部区域的加权和,提取边缘、纹理等特征。",
- "bilibili_keyword": "卷积神经网络 CNN 直观解释",
- "optimization_bottleneck": r"卷积运算复杂度 $O(C_{in} \cdot C_{out} \cdot k^2 \cdot H \cdot W)$。使用深度可分离卷积(Depthwise Separable)加速。",
- "oj_mission": "实现 2D 卷积层的 forward 和 backward,在 MNIST 数据集上验证卷积特征提取效果。"
- }
- }
-
- params = topic_params.get(topic, topic_params["感知机"])
-
- html_content = template.render(
- day=day,
- topic=topic,
- technical_debt=params["technical_debt"],
- visual_intuition=params["visual_intuition"],
- bilibili_keyword=params["bilibili_keyword"],
- symbol_decoder=generate_symbol_decoder(topic),
- math_derivation=generate_math_derivation(topic),
- optimization_bottleneck=params["optimization_bottleneck"],
- oj_mission=params["oj_mission"],
- katex_css="https://cdn.jsdelivr.net/npm/katex@0.16.9/dist/katex.min.css",
- katex_js="https://cdn.jsdelivr.net/npm/katex@0.16.9/dist/katex.min.js"
- )
-
- return html_content
- def generate_exercise(day: int, topic: str) -> str:
- """生成练习题"""
- # 将中文主题转换为英文类名
- topic_en_map = {
- "感知机": "Perceptron",
- "K 近邻": "KNearestNeighbor",
- "朴素贝叶斯": "NaiveBayes",
- "EM 算法": "ExpectationMaximization",
- "隐马尔可夫模型": "HiddenMarkovModel",
- "计算图": "ComputationalGraph",
- "卷积神经网络": "ConvolutionalNeuralNetwork",
- "注意力机制": "AttentionMechanism",
- "Bellman 方程": "BellmanEquation",
- "价值迭代": "ValueIteration",
- "策略迭代": "PolicyIteration"
- }
- class_name = topic_en_map.get(topic, "Model")
-
- return f'''"""
- Day {day} - {topic} 练习
- 任务:实现 {topic} 的核心算法
- """
- import numpy as np
- import matplotlib.pyplot as plt
- class {class_name}:
- """{topic} 类"""
-
- def __init__(self, learning_rate: float = 0.01):
- self.learning_rate = learning_rate
- self.weights = None
- self.bias = None
-
- def forward(self, X: np.ndarray) -> np.ndarray:
- """前向传播
-
- Args:
- X: 输入数据,shape: [n_samples, n_features]
-
- Returns:
- 预测结果,shape: [n_samples]
- """
- # TODO: 实现 f(x) = sign(w · x + b)
- raise NotImplementedError
-
- def compute_loss(self, X: np.ndarray, y: np.ndarray) -> float:
- """计算损失"""
- # TODO: 实现损失函数
- raise NotImplementedError
-
- def update(self, X_i: np.ndarray, y_i: int):
- """更新参数
-
- Args:
- X_i: 单个样本,shape: [n_features]
- y_i: 标签,值域 {-1, +1}
- """
- # TODO: 实现梯度下降更新
- raise NotImplementedError
-
- def fit(self, X: np.ndarray, y: np.ndarray, max_iter: int = 100):
- """训练模型"""
- # TODO: 实现训练循环
- raise NotImplementedError
- def plot_concept():
- """可视化概念"""
- # 生成二维数据
- np.random.seed(42)
- X = np.random.randn(100, 2)
- y = np.sign(X[:, 0] + X[:, 1] - 0.5) * 1
-
- # 绘制散点图
- plt.figure(figsize=(8, 6))
- scatter = plt.scatter(X[:, 0], X[:, 1], c=y, cmap="bwr", s=100, edgecolors="black")
- plt.xlabel("x1")
- plt.ylabel("x2")
- plt.title("Day {day} - {topic} 可视化")
- plt.colorbar(scatter)
- plt.grid(True, alpha=0.3)
- plt.savefig("./plots/day{day}_concept.png", dpi=150)
- print(f"✅ 可视化已保存:plots/day{day}_concept.png")
- if __name__ == "__main__":
- plot_concept()
- '''
- def generate_test(day: int, topic: str) -> str:
- """生成测试用例"""
- return f'''"""
- Day {day} - {topic} 测试用例
- """
- import numpy as np
- import sys
- sys.path.append("../exercises")
- # TODO: 导入对应的类
- # from day{day}_task import *
- def test_forward_shape():
- """测试前向传播输出形状"""
- # TODO: 实现测试
- assert True
- def test_loss_computation():
- """测试损失计算"""
- # TODO: 实现测试
- assert True
- def test_update_rule():
- """测试参数更新规则"""
- # TODO: 实现测试
- assert True
- def test_convergence():
- """测试收敛性"""
- # TODO: 实现测试
- assert True
- '''
- def main():
- parser = argparse.ArgumentParser(description="编译单日课程")
- parser.add_argument("--day", type=int, required=True, help="天数 (1-7)")
- parser.add_argument("--topic", type=str, required=True, help="主题名称")
- args = parser.parse_args()
-
- config = load_config()
-
- # 创建 staging 目录
- staging_dir = PROJECT_ROOT / config["output"]["staging"]
- staging_dir.mkdir(exist_ok=True)
-
- exercises_dir = staging_dir / "exercises"
- tests_dir = staging_dir / "tests"
- exercises_dir.mkdir(exist_ok=True)
- tests_dir.mkdir(exist_ok=True)
-
- # 生成 HTML
- html_content = generate_html(args.day, args.topic, config)
- html_path = staging_dir / f"course_day{args.day}.html"
- with open(html_path, "w", encoding="utf-8") as f:
- f.write(html_content)
- print(f"✅ 生成课程 HTML: {html_path}")
-
- # 生成练习题
- exercise_content = generate_exercise(args.day, args.topic)
- exercise_path = exercises_dir / f"day{args.day}_task.py"
- with open(exercise_path, "w", encoding="utf-8") as f:
- f.write(exercise_content)
- print(f"✅ 生成练习题:{exercise_path}")
-
- # 生成测试
- test_content = generate_test(args.day, args.topic)
- test_path = tests_dir / f"test_day{args.day}.py"
- with open(test_path, "w", encoding="utf-8") as f:
- f.write(test_content)
- print(f"✅ 生成测试用例:{test_path}")
-
- print(f"\n🎉 Day {args.day} 编译完成!文件已保存到 staging/")
- if __name__ == "__main__":
- main()
|