# %% md # ## 1. 导入依赖 # %% import os from datetime import datetime from typing import List import polars as pl from src.factors import FactorEngine from src.training import ( DateSplitter, LightGBMModel, STFilter, StandardScaler, # StockFilterConfig, # 已删除,使用 StockPoolManager + filter_func 替代 StockPoolManager, Trainer, Winsorizer, NullFiller, check_data_quality, ) from src.training.config import TrainingConfig # %% md # ## 2. 定义辅助函数 # %% def register_factors( engine: FactorEngine, selected_factors: List[str], factor_definitions: dict, label_factor: dict, ) -> List[str]: """注册因子(selected_factors 从 metadata 查询,factor_definitions 用 DSL 表达式注册)""" print("=" * 80) print("注册因子") print("=" * 80) # 注册 SELECTED_FACTORS 中的因子(已在 metadata 中) print("\n注册特征因子(从 metadata):") for name in selected_factors: engine.add_factor(name) print(f" - {name}") # 注册 FACTOR_DEFINITIONS 中的因子(通过表达式,尚未在 metadata 中) print("\n注册特征因子(表达式):") for name, expr in factor_definitions.items(): engine.add_factor(name, expr) print(f" - {name}: {expr}") # 注册 label 因子(通过表达式) print("\n注册 Label 因子(表达式):") for name, expr in label_factor.items(): engine.add_factor(name, expr) print(f" - {name}: {expr}") # 特征列 = SELECTED_FACTORS + FACTOR_DEFINITIONS 的 keys feature_cols = selected_factors + list(factor_definitions.keys()) print(f"\n特征因子数: {len(feature_cols)}") print(f" - 来自 metadata: {len(selected_factors)}") print(f" - 来自表达式: {len(factor_definitions)}") print(f"Label: {list(label_factor.keys())[0]}") print(f"已注册因子总数: {len(engine.list_registered())}") return feature_cols def prepare_data( engine: FactorEngine, feature_cols: List[str], start_date: str, end_date: str, ) -> pl.DataFrame: print("\n" + "=" * 80) print("准备数据") print("=" * 80) # 计算因子(全市场数据) print(f"\n计算因子: {start_date} - {end_date}") factor_names = feature_cols + [LABEL_NAME] # 包含 label data = engine.compute( factor_names=factor_names, start_date=start_date, end_date=end_date, ) print(f"数据形状: {data.shape}") print(f"数据列: {data.columns}") print(f"\n前5行预览:") print(data.head()) return data # %% md # ## 3. 配置参数 # # ### 3.1 因子定义 # %% # 特征因子定义字典:新增因子只需在此处添加一行 LABEL_NAME = "future_return_5" # 当前选择的因子列表(从 FACTOR_DEFINITIONS 中选择要使用的因子) SELECTED_FACTORS = [ # ================= 1. 价格、趋势与路径依赖 ================= "ma_5", "ma_20", "ma_ratio_5_20", "bias_10", "high_low_ratio", "bbi_ratio", "return_5", "return_20", "kaufman_ER_20", "mom_acceleration_10_20", "drawdown_from_high_60", "up_days_ratio_20", # ================= 2. 波动率、风险调整与高阶矩 ================= "volatility_5", "volatility_20", "volatility_ratio", "std_return_20", "sharpe_ratio_20", "min_ret_20", "volatility_squeeze_5_60", # ================= 3. 日内微观结构与异象 ================= "overnight_intraday_diff", "upper_shadow_ratio", "capital_retention_20", "max_ret_20", # ================= 4. 量能、流动性与量价背离 ================= "volume_ratio_5_20", "turnover_rate_mean_5", "turnover_deviation", "amihud_illiq_20", "turnover_cv_20", "pv_corr_20", "close_vwap_deviation", # ================= 5. 基本面财务特征 ================= "roe", "roa", "profit_margin", "debt_to_equity", "current_ratio", "net_profit_yoy", "revenue_yoy", "healthy_expansion_velocity", # ================= 6. 基本面估值与截面动量共振 ================= "EP", "BP", "CP", "market_cap_rank", "turnover_rank", "return_5_rank", "EP_rank", "pe_expansion_trend", "value_price_divergence", "active_market_cap", "ebit_rank", ] # 因子定义字典(完整因子库) FACTOR_DEFINITIONS = { # ================= 1. 价格、趋势与路径依赖 (Trend, Momentum & Path Dependency) ================= "ma_5": "ts_mean(close, 5)", "ma_20": "ts_mean(close, 20)", "ma_ratio_5_20": "ts_mean(close, 5) / (ts_mean(close, 20) + 1e-8) - 1", # 均线发散度 "bias_10": "close / (ts_mean(close, 10) + 1e-8) - 1", # 10日乖离率 "high_low_ratio": "(close - ts_min(low, 20)) / (ts_max(high, 20) - ts_min(low, 20) + 1e-8)", # 威廉指标变形 "bbi_ratio": "(ts_mean(close, 3) + ts_mean(close, 6) + ts_mean(close, 12) + ts_mean(close, 24)) / (4 * close + 1e-8)", # 多空指标比率 "return_5": "(close / (ts_delay(close, 5) + 1e-8)) - 1", # 5日动量 "return_20": "(close / (ts_delay(close, 20) + 1e-8)) - 1", # 20日动量 # [高阶] Kaufman 趋势效率 (极高价值) - 衡量趋势流畅度,剔除无序震荡 "kaufman_ER_20": "abs(close - ts_delay(close, 20)) / (ts_sum(abs(close - ts_delay(close, 1)), 20) + 1e-8)", # [高阶] 动量加速度 - 寻找二阶导数大于0,正在加速爆发的股票 "mom_acceleration_10_20": "(close / (ts_delay(close, 10) + 1e-8) - 1) - (ts_delay(close, 10) / (ts_delay(close, 20) + 1e-8) - 1)", # [高阶] 高点距离衰减 - 衡量套牢盘压力 "drawdown_from_high_60": "close / (ts_max(high, 60) + 1e-8) - 1", # [高阶] 趋势一致性 - 过去20天内收红的天数比例 "up_days_ratio_20": "ts_sum(close > ts_delay(close, 1), 20) / 20", # ================= 2. 波动率、风险调整与高阶矩 (Volatility & Risk-Adjusted Returns) ================= "volatility_5": "ts_std(close, 5)", "volatility_20": "ts_std(close, 20)", "volatility_ratio": "ts_std(close, 5) / (ts_std(close, 20) + 1e-8)", # 波动率期限结构 "std_return_20": "ts_std((close / (ts_delay(close, 1) + 1e-8)) - 1, 20)", # 真实收益率波动率 # [高阶] 夏普趋势比率 - 惩罚暴涨暴跌,奖励稳健爬坡 "sharpe_ratio_20": "ts_mean(close / (ts_delay(close, 1) + 1e-8) - 1, 20) / (ts_std(close / (ts_delay(close, 1) + 1e-8) - 1, 20) + 1e-8)", # [高阶] 尾部崩盘风险 - 过去一个月最大单日跌幅 "min_ret_20": "ts_min(close / (ts_delay(close, 1) + 1e-8) - 1, 20)", # [高阶] 波动率挤压比 - 寻找盘整到极致面临变盘的股票 (布林带收口) "volatility_squeeze_5_60": "ts_std(close, 5) / (ts_std(close, 60) + 1e-8)", # ================= 3. 日内微观结构与异象 (Intraday Microstructure & Anomalies) ================= # [高阶] 隔夜与日内背离 - 差值越小说明主力越喜欢在盘中吸筹 "overnight_intraday_diff": "(open / (ts_delay(close, 1) + 1e-8) - 1) - (close / (open + 1e-8) - 1)", # [高阶] 上影线抛压极值 - 冲高回落被套牢的概率 "upper_shadow_ratio": "(high - ((open + close + abs(open - close)) / 2)) / (high - low + 1e-8)", # [高阶] 资金沉淀率 - 衡量主力日内高抛低吸洗盘的剧烈程度 "capital_retention_20": "ts_sum(abs(close - open), 20) / (ts_sum(high - low, 20) + 1e-8)", # [高阶] MAX 彩票效应 - 反转因子,剔除近期有过妖股连板特征的标的 "max_ret_20": "ts_max(close / (ts_delay(close, 1) + 1e-8) - 1, 20)", # ================= 4. 量能、流动性与量价背离 (Volume, Liquidity & Divergence) ================= "volume_ratio_5_20": "ts_mean(vol, 5) / (ts_mean(vol, 20) + 1e-8)", # 相对放量比 "turnover_rate_mean_5": "ts_mean(turnover_rate, 5)", # 活跃度 "turnover_deviation": "(turnover_rate - ts_mean(turnover_rate, 10)) / (ts_std(turnover_rate, 10) + 1e-8)", # 换手率偏离度 # [高阶] Amihud 非流动性异象 (绝对核心) - 衡量砸盘/拉升的摩擦成本 "amihud_illiq_20": "ts_mean(abs(close / (ts_delay(close, 1) + 1e-8) - 1) / (amount + 1e-8), 20)", # [高阶] 换手率惩罚因子 - 换手率忽高忽低说明游资接力,行情极不稳定 "turnover_cv_20": "ts_std(turnover_rate, 20) / (ts_mean(turnover_rate, 20) + 1e-8)", # [高阶] 纯粹量价相关性 - 检验是否是"放量上涨,缩量下跌"的良性多头 "pv_corr_20": "ts_corr(close / (ts_delay(close, 1) + 1e-8) - 1, vol, 20)", # [高阶] 收盘价与均价背离 - 专门抓尾盘突袭拉升骗线的股票 "close_vwap_deviation": "close / (amount / (vol * 100 + 1e-8) + 1e-8) - 1", # ================= 5. 基本面财务特征 (Fundamental Quality & Structure) ================= "roe": "n_income / (total_hldr_eqy_exc_min_int + 1e-8)", # 净资产收益率 "roa": "n_income / (total_assets + 1e-8)", # 总资产收益率 "profit_margin": "n_income / (revenue + 1e-8)", # 销售净利率 "debt_to_equity": "total_liab / (total_hldr_eqy_exc_min_int + 1e-8)", # 杠杆率 "current_ratio": "total_cur_assets / (total_cur_liab + 1e-8)", # 短期偿债安全垫 # [高阶] 利润同比增速 (日频延后252天等于去年同期) "net_profit_yoy": "(n_income / (ts_delay(n_income, 252) + 1e-8)) - 1", # [高阶] 营收同比增速 "revenue_yoy": "(revenue / (ts_delay(revenue, 252) + 1e-8)) - 1", # [高阶] 资产负债表扩张斜率 - 剔除单纯靠举债扩张的公司 "healthy_expansion_velocity": "(total_assets / (ts_delay(total_assets, 252) + 1e-8) - 1) - (total_liab / (ts_delay(total_liab, 252) + 1e-8) - 1)", # ================= 6. 基本面估值与截面动量共振 (Valuation & Cross-Sectional Ranking) ================= # 估值水平绝对值 (Tushare 市值单位需要 * 10000 转换为元) "EP": "n_income / (total_mv * 10000 + 1e-8)", # 盈利收益率 (1/PE) "BP": "total_hldr_eqy_exc_min_int / (total_mv * 10000 + 1e-8)", # 账面市值比 (1/PB) "CP": "n_cashflow_act / (total_mv * 10000 + 1e-8)", # 经营现金流收益率 (1/PCF) # 全市场截面排名因子 "market_cap_rank": "cs_rank(total_mv)", # 规模因子 (Size) "turnover_rank": "cs_rank(turnover_rate)", "return_5_rank": "cs_rank((close / (ts_delay(close, 5) + 1e-8)) - 1)", "EP_rank": "cs_rank(n_income / (total_mv + 1e-8))", # 谁最便宜 # [高阶] 戴维斯双击动量 - 估值相对上一年是否在扩张 "pe_expansion_trend": "(total_mv / (n_income + 1e-8)) / (ts_delay(total_mv, 60) / (ts_delay(n_income, 60) + 1e-8) + 1e-8) - 1", # [高阶] 业绩与价格背离度 - 截面做差:利润排名全市场第一,但近20日价格排名倒数第一,捕捉被错杀的潜伏股 "value_price_divergence": "cs_rank((n_income - ts_delay(n_income, 252)) / (abs(ts_delay(n_income, 252)) + 1e-8)) - cs_rank(close / (ts_delay(close, 20) + 1e-8))", # [高阶] 流动性溢价调整后市值 - 识别僵尸大盘股和极度活跃的小微盘 "active_market_cap": "total_mv * ts_mean(turnover_rate, 20)", "ebit_rank": "cs_rank(ebit)", } # Label 因子定义(不参与训练,用于计算目标) LABEL_FACTOR = { LABEL_NAME: "(ts_delay(close, -5) / ts_delay(open, -1)) - 1", # 未来5日收益率 } # %% md # ### 3.2 训练参数配置 # %% # 日期范围配置(正确的 train/val/test 三分法) # Train: 用于训练模型参数 # Val: 用于验证/早停/调参(位于 train 之后,test 之前) # Test: 仅用于最终评估,完全独立于训练过程 TRAIN_START = "20200101" TRAIN_END = "20231231" VAL_START = "20240101" VAL_END = "20241231" TEST_START = "20250101" TEST_END = "20261231" # 模型参数配置 MODEL_PARAMS = { "objective": "regression", "metric": "mae", # 改为 MAE,对异常值更稳健 # 树结构控制(防过拟合核心) # "num_leaves": 20, # 从31降为20,降低模型复杂度 # "max_depth": 16, # 显式限制深度,防止过度拟合噪声 # "min_child_samples": 50, # 叶子最小样本数,防止学习极端样本 # "min_child_weight": 0.001, # 学习参数 "learning_rate": 0.01, # 降低学习率,配合更多树 "n_estimators": 1000, # 增加树数量,配合早停 # 采样策略(关键防过拟合) "subsample": 0.8, # 每棵树随机采样80%数据(行采样) "subsample_freq": 5, # 每5轮迭代进行一次 subsample "colsample_bytree": 0.8, # 每棵树随机选择80%特征(列采样) # 正则化 "reg_alpha": 0.1, # L1正则,增加稀疏性 "reg_lambda": 1.0, # L2正则,平滑权重 # 数值稳定性 "verbose": -1, "random_state": 42, } # 股票池筛选函数 # 使用新的 StockPoolManager API:传入自定义筛选函数和所需列/因子 # 筛选函数接收单日 DataFrame,返回布尔 Series # # 筛选逻辑(针对单日数据): # 1. 先排除创业板、科创板、北交所(ST过滤由STFilter组件处理) # 2. 然后选取市值最小的500只股票 def stock_pool_filter(df: pl.DataFrame) -> pl.Series: """股票池筛选函数(单日数据) 筛选条件: 1. 排除创业板(代码以 300 开头) 2. 排除科创板(代码以 688 开头) 3. 排除北交所(代码以 8、9 或 4 开头) 4. 选取当日市值最小的500只股票 """ # 代码筛选(排除创业板、科创板、北交所) code_filter = ( ~df["ts_code"].str.starts_with("30") # 排除创业板 & ~df["ts_code"].str.starts_with("68") # 排除科创板 & ~df["ts_code"].str.starts_with("8") # 排除北交所 & ~df["ts_code"].str.starts_with("9") # 排除北交所 & ~df["ts_code"].str.starts_with("4") # 排除北交所 ) # 在已筛选的股票中,选取市值最小的500只 # 按市值升序排序,取前500 valid_df = df.filter(code_filter) n = min(1000, len(valid_df)) small_cap_codes = valid_df.sort("total_mv").head(n)["ts_code"] # 返回布尔 Series:是否在被选中的股票中 return df["ts_code"].is_in(small_cap_codes) # 定义筛选所需的基础列 STOCK_FILTER_REQUIRED_COLUMNS = ["total_mv"] # ST过滤由STFilter组件处理 # 可选:定义筛选所需的因子(如果需要用因子进行筛选) # STOCK_FILTER_REQUIRED_FACTORS = { # "market_cap_rank": "cs_rank(total_mv)", # } # 输出配置(相对于本文件所在目录) OUTPUT_DIR = "output" SAVE_PREDICTIONS = True PERSIST_MODEL = False # Top N 配置:每日推荐股票数量 TOP_N = 5 # 可调整为 10, 20 等 # %% md # ## 4. 训练流程 # # ### 4.1 初始化组件 # %% print("\n" + "=" * 80) print("LightGBM 回归模型训练") print("=" * 80) # 1. 创建 FactorEngine(启用 metadata 功能) print("\n[1] 创建 FactorEngine") engine = FactorEngine(metadata_path="data/factors.jsonl") # 2. 使用 metadata 定义因子 print("\n[2] 定义因子(从 metadata 注册)") feature_cols = register_factors( engine, SELECTED_FACTORS, FACTOR_DEFINITIONS, LABEL_FACTOR ) target_col = LABEL_NAME # 3. 准备数据(使用模块级别的日期配置) print("\n[3] 准备数据") data = prepare_data( engine=engine, feature_cols=feature_cols, start_date=TRAIN_START, end_date=TEST_END, ) # 4. 打印配置信息 print(f"\n[配置] 训练期: {TRAIN_START} - {TRAIN_END}") print(f"[配置] 验证期: {VAL_START} - {VAL_END}") print(f"[配置] 测试期: {TEST_START} - {TEST_END}") print(f"[配置] 特征数: {len(feature_cols)}") print(f"[配置] 目标变量: {target_col}") # 5. 创建模型 model = LightGBMModel(params=MODEL_PARAMS) # 6. 创建数据处理器(使用函数返回的完整特征列表) processors = [ NullFiller(feature_cols=feature_cols, strategy="mean"), Winsorizer(feature_cols=feature_cols, lower=0.01, upper=0.99), StandardScaler(feature_cols=feature_cols), ] # 7. 创建数据划分器(正确的 train/val/test 三分法) # Train: 训练模型参数 | Val: 验证/早停 | Test: 最终评估 splitter = DateSplitter( train_start=TRAIN_START, train_end=TRAIN_END, val_start=VAL_START, val_end=VAL_END, test_start=TEST_START, test_end=TEST_END, ) # 8. 创建股票池管理器 # 使用新的 API:传入自定义筛选函数和所需列 pool_manager = StockPoolManager( filter_func=stock_pool_filter, required_columns=STOCK_FILTER_REQUIRED_COLUMNS, # 筛选所需的额外列 # required_factors=STOCK_FILTER_REQUIRED_FACTORS, # 可选:筛选所需的因子 data_router=engine.router, ) print("[股票池筛选] 使用自定义函数进行股票池筛选") print(f"[股票池筛选] 所需基础列: {STOCK_FILTER_REQUIRED_COLUMNS}") print("[股票池筛选] 筛选逻辑: 排除创业板/科创板/北交所后,每日选市值最小的500只") # print(f"[股票池筛选] 所需因子: {list(STOCK_FILTER_REQUIRED_FACTORS.keys())}") # 9. 创建 ST 股票过滤器 st_filter = STFilter( data_router=engine.router, ) # 10. 创建训练器 trainer = Trainer( model=model, pool_manager=pool_manager, processors=processors, filters=[st_filter], # 使用STFilter过滤ST股票 splitter=splitter, target_col=target_col, feature_cols=feature_cols, persist_model=PERSIST_MODEL, ) # %% md # ### 4.2 执行训练 # %% print("\n" + "=" * 80) print("开始训练") print("=" * 80) # 步骤 1: 股票池筛选 print("\n[步骤 1/6] 股票池筛选") print("-" * 60) if pool_manager: print(" 执行每日独立筛选股票池...") filtered_data = pool_manager.filter_and_select_daily(data) print(f" 筛选前数据规模: {data.shape}") print(f" 筛选后数据规模: {filtered_data.shape}") print(f" 筛选前股票数: {data['ts_code'].n_unique()}") print(f" 筛选后股票数: {filtered_data['ts_code'].n_unique()}") print(f" 删除记录数: {len(data) - len(filtered_data)}") else: filtered_data = data print(" 未配置股票池管理器,跳过筛选") # %% # 步骤 2: 划分训练/验证/测试集(正确的三分法) print("\n[步骤 2/6] 划分训练集、验证集和测试集") print("-" * 60) if splitter: # 正确的三分法:train用于训练,val用于验证/早停,test仅用于最终评估 train_data, val_data, test_data = splitter.split(filtered_data) print(f" 训练集数据规模: {train_data.shape}") print(f" 验证集数据规模: {val_data.shape}") print(f" 测试集数据规模: {test_data.shape}") print(f" 训练集股票数: {train_data['ts_code'].n_unique()}") print(f" 验证集股票数: {val_data['ts_code'].n_unique()}") print(f" 测试集股票数: {test_data['ts_code'].n_unique()}") print( f" 训练集日期范围: {train_data['trade_date'].min()} - {train_data['trade_date'].max()}" ) print( f" 验证集日期范围: {val_data['trade_date'].min()} - {val_data['trade_date'].max()}" ) print( f" 测试集日期范围: {test_data['trade_date'].min()} - {test_data['trade_date'].max()}" ) print("\n 训练集前5行预览:") print(train_data.head()) print("\n 验证集前5行预览:") print(val_data.head()) print("\n 测试集前5行预览:") print(test_data.head()) else: train_data = filtered_data test_data = filtered_data print(" 未配置划分器,全部作为训练集") # %% # 步骤 3: 数据质量检查(必须在预处理之前) print("\n[步骤 3/7] 数据质量检查") print("-" * 60) print(" [说明] 此检查在 fillna 等处理之前执行,用于发现数据问题") print("\n 检查训练集...") check_data_quality(train_data, feature_cols, raise_on_error=True) if "val_data" in locals() and val_data is not None: print("\n 检查验证集...") check_data_quality(val_data, feature_cols, raise_on_error=True) print("\n 检查测试集...") check_data_quality(test_data, feature_cols, raise_on_error=True) print(" [成功] 数据质量检查通过,未发现异常") # %% # 步骤 4: 训练集数据处理 print("\n[步骤 4/7] 训练集数据处理") print("-" * 60) fitted_processors = [] if processors: for i, processor in enumerate(processors, 1): print(f" [{i}/{len(processors)}] 应用处理器: {processor.__class__.__name__}") train_data_before = len(train_data) train_data = processor.fit_transform(train_data) train_data_after = len(train_data) fitted_processors.append(processor) print(f" 处理前记录数: {train_data_before}") print(f" 处理后记录数: {train_data_after}") if train_data_before != train_data_after: print(f" 删除记录数: {train_data_before - train_data_after}") print("\n 训练集处理后前5行预览:") print(train_data.head()) print(f"\n 训练集特征统计:") print(f" 特征数: {len(feature_cols)}") print(f" 样本数: {len(train_data)}") print(f" 缺失值统计:") for col in feature_cols[:5]: # 只显示前5个特征的缺失值 null_count = train_data[col].null_count() if null_count > 0: print(f" {col}: {null_count} ({null_count / len(train_data) * 100:.2f}%)") # %% # 步骤 4: 训练模型 print("\n[步骤 5/7] 训练模型") print("-" * 60) print(f" 模型类型: LightGBM") print(f" 训练样本数: {len(train_data)}") print(f" 特征数: {len(feature_cols)}") print(f" 目标变量: {target_col}") X_train = train_data.select(feature_cols) y_train = train_data.select(target_col).to_series() print(f"\n 目标变量统计:") print(f" 均值: {y_train.mean():.6f}") print(f" 标准差: {y_train.std():.6f}") print(f" 最小值: {y_train.min():.6f}") print(f" 最大值: {y_train.max():.6f}") print(f" 缺失值: {y_train.null_count()}") print("\n 开始训练...") model.fit(X_train, y_train) print(" 训练完成!") # %% # 步骤 5: 测试集数据处理 print("\n[步骤 6/7] 测试集数据处理") print("-" * 60) if processors and test_data is not train_data: for i, processor in enumerate(fitted_processors, 1): print( f" [{i}/{len(fitted_processors)}] 应用处理器: {processor.__class__.__name__}" ) test_data_before = len(test_data) test_data = processor.transform(test_data) test_data_after = len(test_data) print(f" 处理前记录数: {test_data_before}") print(f" 处理后记录数: {test_data_after}") else: print(" 跳过测试集处理") # %% # 步骤 6: 生成预测 print("\n[步骤 7/7] 生成预测") print("-" * 60) X_test = test_data.select(feature_cols) print(f" 测试样本数: {len(X_test)}") print(" 预测中...") predictions = model.predict(X_test) print(f" 预测完成!") print(f"\n 预测结果统计:") print(f" 均值: {predictions.mean():.6f}") print(f" 标准差: {predictions.std():.6f}") print(f" 最小值: {predictions.min():.6f}") print(f" 最大值: {predictions.max():.6f}") # 保存结果到 trainer trainer.results = test_data.with_columns([pl.Series("prediction", predictions)]) # %% md # ### 4.3 训练指标曲线 # %% print("\n" + "=" * 80) print("训练指标曲线") print("=" * 80) # 重新训练以收集指标(因为之前的训练没有保存评估结果) print("\n重新训练模型以收集训练指标...") import lightgbm as lgb # 准备数据(使用 val 做验证,test 不参与训练过程) X_train_np = X_train.to_numpy() y_train_np = y_train.to_numpy() X_val_np = val_data.select(feature_cols).to_numpy() y_val_np = val_data.select(target_col).to_series().to_numpy() # 创建数据集 train_dataset = lgb.Dataset(X_train_np, label=y_train_np) val_dataset = lgb.Dataset(X_val_np, label=y_val_np, reference=train_dataset) # 用于存储评估结果 evals_result = {} # 使用与原模型相同的参数重新训练 # 正确的三分法:train用于训练,val用于验证,test不参与训练过程 # 添加早停:如果验证指标连续100轮没有改善则停止训练 booster_with_eval = lgb.train( MODEL_PARAMS, train_dataset, num_boost_round=MODEL_PARAMS.get("n_estimators", 100), valid_sets=[train_dataset, val_dataset], valid_names=["train", "val"], callbacks=[ lgb.record_evaluation(evals_result), lgb.early_stopping(stopping_rounds=100, verbose=True), ], ) print("训练完成,指标已收集") # 获取指标名称 metric_name = list(evals_result["train"].keys())[0] print(f"\n评估指标: {metric_name}") # 提取训练和验证指标 train_metric = evals_result["train"][metric_name] val_metric = evals_result["val"][metric_name] # 显示早停信息 actual_rounds = len(train_metric) expected_rounds = MODEL_PARAMS.get("n_estimators", 100) print(f"\n[早停信息]") print(f" 配置的最大轮数: {expected_rounds}") print(f" 实际训练轮数: {actual_rounds}") if actual_rounds < expected_rounds: print(f" 早停状态: 已触发(连续100轮验证指标未改善)") else: print(f" 早停状态: 未触发(达到最大轮数)") print(f"\n最终指标:") print(f" 训练 {metric_name}: {train_metric[-1]:.6f}") print(f" 验证 {metric_name}: {val_metric[-1]:.6f}") # %% # 绘制训练指标曲线 import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(12, 6)) # 绘制训练集和验证集的指标曲线(注意:val用于验证,test不参与训练) iterations = range(1, len(train_metric) + 1) ax.plot( iterations, train_metric, label=f"Train {metric_name}", linewidth=2, color="blue" ) ax.plot( iterations, val_metric, label=f"Validation {metric_name}", linewidth=2, color="red" ) ax.set_xlabel("Iteration", fontsize=12) ax.set_ylabel(metric_name.upper(), fontsize=12) ax.set_title( f"Training and Validation {metric_name.upper()} Curve", fontsize=14, fontweight="bold", ) ax.legend(fontsize=10) ax.grid(True, alpha=0.3) # 标记最佳验证指标点(用于早停决策) best_iter = val_metric.index(min(val_metric)) best_metric = min(val_metric) ax.axvline( x=best_iter + 1, color="green", linestyle="--", alpha=0.7, label=f"Best Iteration ({best_iter + 1})", ) ax.scatter([best_iter + 1], [best_metric], color="green", s=100, zorder=5) ax.annotate( f"Best: {best_metric:.6f}\nIter: {best_iter + 1}", xy=(best_iter + 1, best_metric), xytext=(best_iter + 1 + len(iterations) * 0.1, best_metric), fontsize=9, arrowprops=dict(arrowstyle="->", color="green", alpha=0.7), ) plt.tight_layout() plt.show() print(f"\n[指标分析]") print(f" 最佳验证 {metric_name}: {best_metric:.6f}") print(f" 最佳迭代轮数: {best_iter + 1}") print(f" 早停建议: 如果验证指标连续10轮不下降,建议在第 {best_iter + 1} 轮停止训练") print(f"\n[重要提醒] 验证集仅用于早停/调参,测试集完全独立于训练过程!") # %% md # ### 4.4 查看结果 # %% print("\n" + "=" * 80) print("训练结果") print("=" * 80) results = trainer.results print(f"\n结果数据形状: {results.shape}") print(f"结果列: {results.columns}") print(f"\n结果前10行预览:") print(results.head(10)) print(f"\n结果后5行预览:") print(results.tail()) print(f"\n每日预测样本数统计:") daily_counts = results.group_by("trade_date").agg(pl.len()).sort("trade_date") print(f" 最小: {daily_counts['len'].min()}") print(f" 最大: {daily_counts['len'].max()}") print(f" 平均: {daily_counts['len'].mean():.2f}") # 展示某一天的前10个预测结果 sample_date = results["trade_date"][0] sample_data = results.filter(results["trade_date"] == sample_date).head(10) print(f"\n示例日期 {sample_date} 的前10条预测:") print(sample_data.select(["ts_code", "trade_date", target_col, "prediction"])) # %% md # ### 4.4 保存结果 # %% print("\n" + "=" * 80) print("保存预测结果") print("=" * 80) # 确保输出目录存在 os.makedirs(OUTPUT_DIR, exist_ok=True) # 生成时间戳 start_dt = datetime.strptime(TEST_START, "%Y%m%d") end_dt = datetime.strptime(TEST_END, "%Y%m%d") date_str = f"{start_dt.strftime('%Y%m%d')}_{end_dt.strftime('%Y%m%d')}" # 保存每日 Top N print(f"\n[1/1] 保存每日 Top {TOP_N} 股票...") topn_output_path = os.path.join(OUTPUT_DIR, f"regression_output.csv") # 按日期分组,取每日 top N topn_by_date = [] unique_dates = results["trade_date"].unique().sort() for date in unique_dates: day_data = results.filter(results["trade_date"] == date) # 按 prediction 降序排序,取前 N topn = day_data.sort("prediction", descending=True).head(TOP_N) topn_by_date.append(topn) # 合并所有日期的 top N topn_results = pl.concat(topn_by_date) # 格式化日期并调整列顺序:日期、分数、股票 topn_to_save = topn_results.select( [ pl.col("trade_date").str.slice(0, 4) + "-" + pl.col("trade_date").str.slice(4, 2) + "-" + pl.col("trade_date").str.slice(6, 2).alias("date"), pl.col("prediction").alias("score"), pl.col("ts_code"), ] ) topn_to_save.write_csv(topn_output_path, include_header=True) print(f" 保存路径: {topn_output_path}") print( f" 保存行数: {len(topn_to_save)}({len(unique_dates)}个交易日 × 每日top{TOP_N})" ) print(f"\n 预览(前15行):") print(topn_to_save.head(15)) # %% md # ### 4.5 特征重要性 # %% importance = model.feature_importance() if importance is not None: print("\n特征重要性:") print(importance.sort_values(ascending=False)) print("\n" + "=" * 80) print("训练完成!") print("=" * 80) # %% md # ## 5. 可视化分析 # # 使用训练好的模型直接绘图。 # - **特征重要性图**:辅助特征选择 # - **决策树图**:理解决策逻辑 # %% # 导入可视化库 import matplotlib.pyplot as plt import lightgbm as lgb import pandas as pd # 从封装的model中取出底层Booster booster = model.model print(f"模型类型: {type(booster)}") print(f"特征数量: {len(feature_cols)}") # %% md # ### 5.1 绘制特征重要性(辅助特征选择) # # **解读**: # - 重要性高的特征对模型贡献大 # - 重要性为0的特征可以考虑删除 # - 可以帮助理解哪些因子最有效 # %% print("绘制特征重要性...") fig, ax = plt.subplots(figsize=(10, 8)) lgb.plot_importance( booster, max_num_features=20, importance_type="gain", title="Feature Importance (Gain)", ax=ax, ) ax.set_xlabel("Importance (Gain)") plt.tight_layout() plt.show() # 打印重要性排名 importance_gain = pd.Series( booster.feature_importance(importance_type="gain"), index=feature_cols ).sort_values(ascending=False) print("\n[特征重要性排名 - Gain]") print(importance_gain) # 识别低重要性特征 zero_importance = importance_gain[importance_gain == 0].index.tolist() if zero_importance: print(f"\n[低重要性特征] 以下{len(zero_importance)}个特征重要性为0,可考虑删除:") for feat in zero_importance: print(f" - {feat}") else: print("\n所有特征都有一定重要性")