{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "79a7758178bafdd3", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T12:46:06.987506Z", "start_time": "2025-04-03T12:46:06.259551Z" }, "jupyter": { "source_hidden": true } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "e:\\PyProject\\NewStock\\main\\train\n" ] } ], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", "\n", "import gc\n", "import os\n", "import sys\n", "sys.path.append('../../')\n", "print(os.getcwd())\n", "import pandas as pd\n", "from main.factor.factor import get_rolling_factor, get_simple_factor\n", "from main.utils.factor import read_industry_data\n", "from main.utils.factor_processor import calculate_score\n", "from main.utils.utils import read_and_merge_h5_data, merge_with_industry_data\n", "\n", "import warnings\n", "\n", "warnings.filterwarnings(\"ignore\")" ] }, { "cell_type": "code", "execution_count": 2, "id": "a79cafb06a7e0e43", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T12:47:00.212859Z", "start_time": "2025-04-03T12:46:06.998047Z" }, "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "daily data\n", "daily basic\n", "inner merge on ['ts_code', 'trade_date']\n", "stk limit\n", "left merge on ['ts_code', 'trade_date']\n", "money flow\n", "left merge on ['ts_code', 'trade_date']\n", "cyq perf\n", "left merge on ['ts_code', 'trade_date']\n", "\n", "RangeIndex: 8665405 entries, 0 to 8665404\n", "Data columns (total 32 columns):\n", " # Column Dtype \n", "--- ------ ----- \n", " 0 ts_code object \n", " 1 trade_date datetime64[ns]\n", " 2 open float64 \n", " 3 close float64 \n", " 4 high float64 \n", " 5 low float64 \n", " 6 vol float64 \n", " 7 pct_chg float64 \n", " 8 turnover_rate float64 \n", " 9 pe_ttm float64 \n", " 10 circ_mv float64 \n", " 11 total_mv float64 \n", " 12 volume_ratio float64 \n", " 13 is_st bool \n", " 14 up_limit float64 \n", " 15 down_limit float64 \n", " 16 buy_sm_vol float64 \n", " 17 sell_sm_vol float64 \n", " 18 buy_lg_vol float64 \n", " 19 sell_lg_vol float64 \n", " 20 buy_elg_vol float64 \n", " 21 sell_elg_vol float64 \n", " 22 net_mf_vol float64 \n", " 23 his_low float64 \n", " 24 his_high float64 \n", " 25 cost_5pct float64 \n", " 26 cost_15pct float64 \n", " 27 cost_50pct float64 \n", " 28 cost_85pct float64 \n", " 29 cost_95pct float64 \n", " 30 weight_avg float64 \n", " 31 winner_rate float64 \n", "dtypes: bool(1), datetime64[ns](1), float64(29), object(1)\n", "memory usage: 2.0+ GB\n", "None\n" ] } ], "source": [ "from main.utils.utils import read_and_merge_h5_data\n", "\n", "print('daily data')\n", "df = read_and_merge_h5_data('../../data/daily_data.h5', key='daily_data',\n", " columns=['ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'vol', 'pct_chg'],\n", " df=None)\n", "\n", "print('daily basic')\n", "df = read_and_merge_h5_data('../../data/daily_basic.h5', key='daily_basic',\n", " columns=['ts_code', 'trade_date', 'turnover_rate', 'pe_ttm', 'circ_mv', 'total_mv', 'volume_ratio',\n", " 'is_st'], df=df, join='inner')\n", "\n", "print('stk limit')\n", "df = read_and_merge_h5_data('../../data/stk_limit.h5', key='stk_limit',\n", " columns=['ts_code', 'trade_date', 'pre_close', 'up_limit', 'down_limit'],\n", " df=df)\n", "print('money flow')\n", "df = read_and_merge_h5_data('../../data/money_flow.h5', key='money_flow',\n", " columns=['ts_code', 'trade_date', 'buy_sm_vol', 'sell_sm_vol', 'buy_lg_vol', 'sell_lg_vol',\n", " 'buy_elg_vol', 'sell_elg_vol', 'net_mf_vol'],\n", " df=df)\n", "print('cyq perf')\n", "df = read_and_merge_h5_data('../../data/cyq_perf.h5', key='cyq_perf',\n", " columns=['ts_code', 'trade_date', 'his_low', 'his_high', 'cost_5pct', 'cost_15pct',\n", " 'cost_50pct',\n", " 'cost_85pct', 'cost_95pct', 'weight_avg', 'winner_rate'],\n", " df=df)\n", "print(df.info())" ] }, { "cell_type": "code", "execution_count": 3, "id": "cac01788dac10678", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T12:47:10.527104Z", "start_time": "2025-04-03T12:47:00.488715Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "industry\n" ] } ], "source": [ "print('industry')\n", "industry_df = read_and_merge_h5_data('../../data/industry_data.h5', key='industry_data',\n", " columns=['ts_code', 'l2_code', 'in_date'],\n", " df=None, on=['ts_code'], join='left')\n", "\n", "\n", "def merge_with_industry_data(df, industry_df):\n", " # 确保日期字段是 datetime 类型\n", " df['trade_date'] = pd.to_datetime(df['trade_date'])\n", " industry_df['in_date'] = pd.to_datetime(industry_df['in_date'])\n", "\n", " # 对 industry_df 按 ts_code 和 in_date 排序\n", " industry_df_sorted = industry_df.sort_values(['in_date', 'ts_code'])\n", "\n", " # 对原始 df 按 ts_code 和 trade_date 排序\n", " df_sorted = df.sort_values(['trade_date', 'ts_code'])\n", "\n", " # 使用 merge_asof 进行向后合并\n", " merged = pd.merge_asof(\n", " df_sorted,\n", " industry_df_sorted,\n", " by='ts_code', # 按 ts_code 分组\n", " left_on='trade_date',\n", " right_on='in_date',\n", " direction='backward'\n", " )\n", "\n", " # 获取每个 ts_code 的最早 in_date 记录\n", " min_in_date_per_ts = (industry_df_sorted\n", " .groupby('ts_code')\n", " .first()\n", " .reset_index()[['ts_code', 'l2_code']])\n", "\n", " # 填充未匹配到的记录(trade_date 早于所有 in_date 的情况)\n", " merged['l2_code'] = merged['l2_code'].fillna(\n", " merged['ts_code'].map(min_in_date_per_ts.set_index('ts_code')['l2_code'])\n", " )\n", "\n", " # 保留需要的列并重置索引\n", " result = merged.reset_index(drop=True)\n", " return result\n", "\n", "\n", "# 使用示例\n", "df = merge_with_industry_data(df, industry_df)\n", "# print(mdf[mdf['ts_code'] == '600751.SH'][['ts_code', 'trade_date', 'l2_code']])" ] }, { "cell_type": "code", "execution_count": 4, "id": "c4e9e1d31da6dba6", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T12:47:10.719252Z", "start_time": "2025-04-03T12:47:10.541247Z" }, "jupyter": { "source_hidden": true } }, "outputs": [], "source": [ "from main.factor.factor import *\n", "\n", "def calculate_indicators(df):\n", " \"\"\"\n", " 计算四个指标:当日涨跌幅、5日移动平均、RSI、MACD。\n", " \"\"\"\n", " df = df.sort_values('trade_date')\n", " df['daily_return'] = (df['close'] - df['pre_close']) / df['pre_close'] * 100\n", " # df['5_day_ma'] = df['close'].rolling(window=5).mean()\n", " delta = df['close'].diff()\n", " gain = delta.where(delta > 0, 0)\n", " loss = -delta.where(delta < 0, 0)\n", " avg_gain = gain.rolling(window=14).mean()\n", " avg_loss = loss.rolling(window=14).mean()\n", " rs = avg_gain / avg_loss\n", " df['RSI'] = 100 - (100 / (1 + rs))\n", "\n", " # 计算MACD\n", " ema12 = df['close'].ewm(span=12, adjust=False).mean()\n", " ema26 = df['close'].ewm(span=26, adjust=False).mean()\n", " df['MACD'] = ema12 - ema26\n", " df['Signal_line'] = df['MACD'].ewm(span=9, adjust=False).mean()\n", " df['MACD_hist'] = df['MACD'] - df['Signal_line']\n", "\n", " # 4. 情绪因子1:市场上涨比例(Up Ratio)\n", " df['up_ratio'] = df['daily_return'].apply(lambda x: 1 if x > 0 else 0)\n", " df['up_ratio_20d'] = df['up_ratio'].rolling(window=20).mean() # 过去20天上涨比例\n", "\n", " # 5. 情绪因子2:成交量变化率(Volume Change Rate)\n", " df['volume_mean'] = df['vol'].rolling(window=20).mean() # 过去20天的平均成交量\n", " df['volume_change_rate'] = (df['vol'] - df['volume_mean']) / df['volume_mean'] * 100 # 成交量变化率\n", "\n", " # 6. 情绪因子3:波动率(Volatility)\n", " df['volatility'] = df['daily_return'].rolling(window=20).std() # 过去20天的日收益率标准差\n", "\n", " # 7. 情绪因子4:成交额变化率(Amount Change Rate)\n", " df['amount_mean'] = df['amount'].rolling(window=20).mean() # 过去20天的平均成交额\n", " df['amount_change_rate'] = (df['amount'] - df['amount_mean']) / df['amount_mean'] * 100 # 成交额变化率\n", "\n", " # df = sentiment_panic_greed_index(df)\n", " # df = sentiment_market_breadth_proxy(df)\n", " # df = sentiment_reversal_indicator(df)\n", "\n", " return df\n", "\n", "\n", "def generate_index_indicators(h5_filename):\n", " df = pd.read_hdf(h5_filename, key='index_data')\n", " df['trade_date'] = pd.to_datetime(df['trade_date'], format='%Y%m%d')\n", " df = df.sort_values('trade_date')\n", "\n", " # 计算每个ts_code的相关指标\n", " df_indicators = []\n", " for ts_code in df['ts_code'].unique():\n", " df_index = df[df['ts_code'] == ts_code].copy()\n", " df_index = calculate_indicators(df_index)\n", " df_indicators.append(df_index)\n", "\n", " # 合并所有指数的结果\n", " df_all_indicators = pd.concat(df_indicators, ignore_index=True)\n", "\n", " # 保留trade_date列,并将同一天的数据按ts_code合并成一行\n", " df_final = df_all_indicators.pivot_table(\n", " index='trade_date',\n", " columns='ts_code',\n", " values=['daily_return', \n", " 'RSI', 'MACD', 'Signal_line', 'MACD_hist', \n", " # 'sentiment_panic_greed_index',\n", " 'up_ratio_20d', 'volume_change_rate', 'volatility',\n", " 'amount_change_rate', 'amount_mean'],\n", " aggfunc='last'\n", " )\n", "\n", " df_final.columns = [f\"{col[1]}_{col[0]}\" for col in df_final.columns]\n", " df_final = df_final.reset_index()\n", "\n", " return df_final\n", "\n", "\n", "# 使用函数\n", "h5_filename = '../../data/index_data.h5'\n", "index_data = generate_index_indicators(h5_filename)\n", "index_data = index_data.dropna()\n" ] }, { "cell_type": "code", "execution_count": 5, "id": "a735bc02ceb4d872", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T12:47:10.821169Z", "start_time": "2025-04-03T12:47:10.751831Z" } }, "outputs": [], "source": [ "import talib\n", "import numpy as np" ] }, { "cell_type": "code", "execution_count": 6, "id": "53f86ddc0677a6d7", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T12:47:15.944254Z", "start_time": "2025-04-03T12:47:10.826179Z" }, "jupyter": { "source_hidden": true }, "scrolled": true }, "outputs": [], "source": [ "from main.utils.factor import get_act_factor\n", "\n", "\n", "def read_industry_data(h5_filename):\n", " # 读取 H5 文件中所有的行业数据\n", " industry_data = pd.read_hdf(h5_filename, key='sw_daily', columns=[\n", " 'ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'pe', 'pb', 'vol'\n", " ]) # 假设 H5 文件的键是 'industry_data'\n", " industry_data = industry_data.sort_values(by=['ts_code', 'trade_date'])\n", " industry_data = industry_data.reindex()\n", " industry_data['trade_date'] = pd.to_datetime(industry_data['trade_date'], format='%Y%m%d')\n", "\n", " grouped = industry_data.groupby('ts_code', group_keys=False)\n", " industry_data['obv'] = grouped.apply(\n", " lambda x: pd.Series(talib.OBV(x['close'].values, x['vol'].values), index=x.index)\n", " )\n", " industry_data['return_5'] = grouped['close'].apply(lambda x: x / x.shift(5) - 1)\n", " industry_data['return_20'] = grouped['close'].apply(lambda x: x / x.shift(20) - 1)\n", "\n", " industry_data = get_act_factor(industry_data, cat=False)\n", " industry_data = industry_data.sort_values(by=['trade_date', 'ts_code'])\n", "\n", " # # 计算每天每个 ts_code 的因子和当天所有 ts_code 的中位数的偏差\n", " # factor_columns = ['obv', 'return_5', 'return_20', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4'] # 因子列\n", " # \n", " # for factor in factor_columns:\n", " # if factor in industry_data.columns:\n", " # # 计算每天每个 ts_code 的因子值与当天所有 ts_code 的中位数的偏差\n", " # industry_data[f'{factor}_deviation'] = industry_data.groupby('trade_date')[factor].transform(\n", " # lambda x: x - x.mean())\n", "\n", " industry_data['return_5_percentile'] = industry_data.groupby('trade_date')['return_5'].transform(\n", " lambda x: x.rank(pct=True))\n", " industry_data['return_20_percentile'] = industry_data.groupby('trade_date')['return_20'].transform(\n", " lambda x: x.rank(pct=True))\n", "\n", " # cs_rank_intraday_range(industry_data)\n", " # cs_rank_close_pos_in_range(industry_data)\n", "\n", " industry_data = industry_data.drop(columns=['open', 'close', 'high', 'low', 'pe', 'pb', 'vol'])\n", "\n", " industry_data = industry_data.rename(\n", " columns={col: f'industry_{col}' for col in industry_data.columns if col not in ['ts_code', 'trade_date']})\n", "\n", " industry_data = industry_data.rename(columns={'ts_code': 'cat_l2_code'})\n", " return industry_data\n", "\n", "\n", "industry_df = read_industry_data('../../data/sw_daily.h5')\n" ] }, { "cell_type": "code", "execution_count": 7, "id": "dbe2fd8021b9417f", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T12:47:15.969344Z", "start_time": "2025-04-03T12:47:15.963327Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['ts_code', 'open', 'close', 'high', 'low', 'circ_mv', 'total_mv', 'is_st', 'up_limit', 'down_limit', 'buy_sm_vol', 'sell_sm_vol', 'buy_lg_vol', 'sell_lg_vol', 'buy_elg_vol', 'sell_elg_vol', 'net_mf_vol', 'his_low', 'his_high', 'cost_5pct', 'cost_15pct', 'cost_50pct', 'cost_85pct', 'cost_95pct', 'weight_avg', 'in_date']\n" ] } ], "source": [ "origin_columns = df.columns.tolist()\n", "origin_columns = [col for col in origin_columns if\n", " col not in ['turnover_rate', 'pe_ttm', 'volume_ratio', 'vol', 'pct_chg', 'l2_code', 'winner_rate']]\n", "origin_columns = [col for col in origin_columns if col not in index_data.columns]\n", "origin_columns = [col for col in origin_columns if 'cyq' not in col]\n", "print(origin_columns)" ] }, { "cell_type": "code", "execution_count": 8, "id": "85c3e3d0235ffffa", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T12:47:16.089879Z", "start_time": "2025-04-03T12:47:15.990101Z" } }, "outputs": [], "source": [ "fina_indicator_df = read_and_merge_h5_data('../../data/fina_indicator.h5', key='fina_indicator',\n", " columns=['ts_code', 'ann_date', 'undist_profit_ps', 'ocfps', 'bps'],\n", " df=None)\n", "cashflow_df = read_and_merge_h5_data('../../data/cashflow.h5', key='cashflow',\n", " columns=['ts_code', 'ann_date', 'n_cashflow_act'],\n", " df=None)\n", "balancesheet_df = read_and_merge_h5_data('../../data/balancesheet.h5', key='balancesheet',\n", " columns=['ts_code', 'ann_date', 'money_cap', 'total_liab'],\n", " df=None)\n", "top_list_df = read_and_merge_h5_data('../../data/top_list.h5', key='top_list',\n", " columns=['ts_code', 'trade_date', 'reason'],\n", " df=None)\n", "\n", "top_list_df = top_list_df.sort_values(by='trade_date', ascending=False).drop_duplicates(subset=['ts_code', 'trade_date'], keep='first').sort_values(by='trade_date')\n" ] }, { "cell_type": "code", "execution_count": 9, "id": "92d84ce15a562ec6", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T13:08:01.612695Z", "start_time": "2025-04-03T12:47:16.121802Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "使用 'ann_date' 作为财务数据生效日期。\n", "警告: 从 financial_data_subset 中移除了 366 行,因为其 'ts_code' 或 'ann_date' 列存在空值。\n", "使用 'ann_date' 作为财务数据生效日期。\n", "警告: 从 financial_data_subset 中移除了 366 行,因为其 'ts_code' 或 'ann_date' 列存在空值。\n", "开始计算因子: AR, BR (原地修改)...\n", "因子 AR, BR 计算成功。\n", "因子 AR, BR 计算流程结束。\n", "使用 'ann_date' 作为财务数据生效日期。\n", "使用 'ann_date' 作为财务数据生效日期。\n", "使用 'ann_date' 作为财务数据生效日期。\n", "使用 'ann_date' 作为财务数据生效日期。\n", "警告: 从 financial_data_subset 中移除了 366 行,因为其 'ts_code' 或 'ann_date' 列存在空值。\n", "计算 BBI...\n", "--- 计算日级别偏离度 (使用 pct_chg) ---\n", "--- 计算日级别动量基准 (使用 pct_chg) ---\n", "日级别动量基准计算完成 (使用 pct_chg)。\n", "日级别偏离度计算完成 (使用 pct_chg)。\n", "--- 计算日级别行业偏离度 (使用 pct_chg 和行业基准) ---\n", "--- 计算日级别行业动量基准 (使用 pct_chg 和 cat_l2_code) ---\n", "错误: 计算日级别行业动量基准需要以下列: ['pct_chg', 'cat_l2_code', 'trade_date', 'ts_code']。\n", "错误: 计算日级别行业偏离度需要以下列: ['pct_chg', 'daily_industry_positive_benchmark', 'daily_industry_negative_benchmark']。请先运行 daily_industry_momentum_benchmark(df)。\n", "Index(['ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'vol',\n", " 'pct_chg', 'turnover_rate', 'pe_ttm', 'circ_mv', 'total_mv',\n", " 'volume_ratio', 'is_st', 'up_limit', 'down_limit', 'buy_sm_vol',\n", " 'sell_sm_vol', 'buy_lg_vol', 'sell_lg_vol', 'buy_elg_vol',\n", " 'sell_elg_vol', 'net_mf_vol', 'his_low', 'his_high', 'cost_5pct',\n", " 'cost_15pct', 'cost_50pct', 'cost_85pct', 'cost_95pct', 'weight_avg',\n", " 'winner_rate', 'l2_code', 'undist_profit_ps', 'ocfps', 'AR', 'BR',\n", " 'AR_BR', 'log_circ_mv', 'cashflow_to_ev_factor', 'book_to_price_ratio',\n", " 'turnover_rate_mean_5', 'variance_20', 'bbi_ratio_factor',\n", " 'daily_deviation', 'lg_elg_net_buy_vol', 'flow_lg_elg_intensity',\n", " 'sm_net_buy_vol', 'flow_divergence_diff', 'flow_divergence_ratio',\n", " 'total_buy_vol', 'lg_elg_buy_prop', 'flow_struct_buy_change',\n", " 'lg_elg_net_buy_vol_change', 'flow_lg_elg_accel',\n", " 'chip_concentration_range', 'chip_skewness', 'floating_chip_proxy',\n", " 'cost_support_15pct_change', 'cat_winner_price_zone',\n", " 'flow_chip_consistency', 'profit_taking_vs_absorb', '_is_positive',\n", " '_is_negative', 'cat_is_positive', '_pos_returns', '_neg_returns',\n", " '_pos_returns_sq', '_neg_returns_sq', 'upside_vol', 'downside_vol',\n", " 'vol_ratio', 'return_skew', 'return_kurtosis', 'volume_change_rate',\n", " 'cat_volume_breakout', 'turnover_deviation', 'cat_turnover_spike',\n", " 'avg_volume_ratio', 'cat_volume_ratio_breakout', 'vol_spike',\n", " 'vol_std_5', 'atr_14', 'atr_6', 'obv'],\n", " dtype='object')\n" ] }, { "ename": "KeyboardInterrupt", "evalue": "", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", "Cell \u001b[1;32mIn[9], line 39\u001b[0m\n\u001b[0;32m 37\u001b[0m df \u001b[38;5;241m=\u001b[39m daily_deviation(df)\n\u001b[0;32m 38\u001b[0m df \u001b[38;5;241m=\u001b[39m daily_industry_deviation(df)\n\u001b[1;32m---> 39\u001b[0m df, _ \u001b[38;5;241m=\u001b[39m get_rolling_factor(df)\n\u001b[0;32m 40\u001b[0m df, _ \u001b[38;5;241m=\u001b[39m get_simple_factor(df)\n\u001b[0;32m 42\u001b[0m df \u001b[38;5;241m=\u001b[39m df\u001b[38;5;241m.\u001b[39mrename(columns\u001b[38;5;241m=\u001b[39m{\u001b[38;5;124m'\u001b[39m\u001b[38;5;124ml1_code\u001b[39m\u001b[38;5;124m'\u001b[39m: \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcat_l1_code\u001b[39m\u001b[38;5;124m'\u001b[39m})\n", "File \u001b[1;32me:\\PyProject\\NewStock\\main\\train\\../..\\main\\factor\\factor.py:340\u001b[0m, in \u001b[0;36mget_rolling_factor\u001b[1;34m(df)\u001b[0m\n\u001b[0;32m 336\u001b[0m \u001b[38;5;66;03m# 计算 act_factor1, act_factor2, act_factor3, act_factor4\u001b[39;00m\n\u001b[0;32m 337\u001b[0m df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mact_factor1\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m grouped[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_ema_5\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mapply(\n\u001b[0;32m 338\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m x: np\u001b[38;5;241m.\u001b[39marctan((x \u001b[38;5;241m/\u001b[39m x\u001b[38;5;241m.\u001b[39mshift(\u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m100\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m57.3\u001b[39m \u001b[38;5;241m/\u001b[39m \u001b[38;5;241m50\u001b[39m\n\u001b[0;32m 339\u001b[0m )\n\u001b[1;32m--> 340\u001b[0m df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mact_factor2\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m grouped[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_ema_13\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mapply(\n\u001b[0;32m 341\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m x: np\u001b[38;5;241m.\u001b[39marctan((x \u001b[38;5;241m/\u001b[39m x\u001b[38;5;241m.\u001b[39mshift(\u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m100\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m57.3\u001b[39m \u001b[38;5;241m/\u001b[39m \u001b[38;5;241m40\u001b[39m\n\u001b[0;32m 342\u001b[0m )\n\u001b[0;32m 343\u001b[0m df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mact_factor3\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m grouped[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_ema_20\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mapply(\n\u001b[0;32m 344\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m x: np\u001b[38;5;241m.\u001b[39marctan((x \u001b[38;5;241m/\u001b[39m x\u001b[38;5;241m.\u001b[39mshift(\u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m100\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m57.3\u001b[39m \u001b[38;5;241m/\u001b[39m \u001b[38;5;241m21\u001b[39m\n\u001b[0;32m 345\u001b[0m )\n\u001b[0;32m 346\u001b[0m df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mact_factor4\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m grouped[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_ema_60\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mapply(\n\u001b[0;32m 347\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m x: np\u001b[38;5;241m.\u001b[39marctan((x \u001b[38;5;241m/\u001b[39m x\u001b[38;5;241m.\u001b[39mshift(\u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m100\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m57.3\u001b[39m \u001b[38;5;241m/\u001b[39m \u001b[38;5;241m10\u001b[39m\n\u001b[0;32m 348\u001b[0m )\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\groupby\\generic.py:230\u001b[0m, in \u001b[0;36mSeriesGroupBy.apply\u001b[1;34m(self, func, *args, **kwargs)\u001b[0m\n\u001b[0;32m 224\u001b[0m \u001b[38;5;129m@Appender\u001b[39m(\n\u001b[0;32m 225\u001b[0m _apply_docs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtemplate\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mformat(\n\u001b[0;32m 226\u001b[0m \u001b[38;5;28minput\u001b[39m\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mseries\u001b[39m\u001b[38;5;124m\"\u001b[39m, examples\u001b[38;5;241m=\u001b[39m_apply_docs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mseries_examples\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[0;32m 227\u001b[0m )\n\u001b[0;32m 228\u001b[0m )\n\u001b[0;32m 229\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mapply\u001b[39m(\u001b[38;5;28mself\u001b[39m, func, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Series:\n\u001b[1;32m--> 230\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39mapply(func, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\groupby\\groupby.py:1824\u001b[0m, in \u001b[0;36mGroupBy.apply\u001b[1;34m(self, func, include_groups, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1822\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m option_context(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmode.chained_assignment\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m):\n\u001b[0;32m 1823\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m-> 1824\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_python_apply_general(f, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_selected_obj)\n\u001b[0;32m 1825\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[0;32m 1826\u001b[0m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mobj, Series)\n\u001b[0;32m 1827\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_selection \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m 1828\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_selected_obj\u001b[38;5;241m.\u001b[39mshape \u001b[38;5;241m!=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_obj_with_exclusions\u001b[38;5;241m.\u001b[39mshape\n\u001b[0;32m 1829\u001b[0m ):\n\u001b[0;32m 1830\u001b[0m warnings\u001b[38;5;241m.\u001b[39mwarn(\n\u001b[0;32m 1831\u001b[0m message\u001b[38;5;241m=\u001b[39m_apply_groupings_depr\u001b[38;5;241m.\u001b[39mformat(\n\u001b[0;32m 1832\u001b[0m \u001b[38;5;28mtype\u001b[39m(\u001b[38;5;28mself\u001b[39m)\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mapply\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 1835\u001b[0m stacklevel\u001b[38;5;241m=\u001b[39mfind_stack_level(),\n\u001b[0;32m 1836\u001b[0m )\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\groupby\\groupby.py:1885\u001b[0m, in \u001b[0;36mGroupBy._python_apply_general\u001b[1;34m(self, f, data, not_indexed_same, is_transform, is_agg)\u001b[0m\n\u001b[0;32m 1850\u001b[0m \u001b[38;5;129m@final\u001b[39m\n\u001b[0;32m 1851\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_python_apply_general\u001b[39m(\n\u001b[0;32m 1852\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 1857\u001b[0m is_agg: \u001b[38;5;28mbool\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m 1858\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m NDFrameT:\n\u001b[0;32m 1859\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 1860\u001b[0m \u001b[38;5;124;03m Apply function f in python space\u001b[39;00m\n\u001b[0;32m 1861\u001b[0m \n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 1883\u001b[0m \u001b[38;5;124;03m data after applying f\u001b[39;00m\n\u001b[0;32m 1884\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m-> 1885\u001b[0m values, mutated \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_grouper\u001b[38;5;241m.\u001b[39mapply_groupwise(f, data, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39maxis)\n\u001b[0;32m 1886\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m not_indexed_same \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 1887\u001b[0m not_indexed_same \u001b[38;5;241m=\u001b[39m mutated\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\groupby\\ops.py:919\u001b[0m, in \u001b[0;36mBaseGrouper.apply_groupwise\u001b[1;34m(self, f, data, axis)\u001b[0m\n\u001b[0;32m 917\u001b[0m \u001b[38;5;66;03m# group might be modified\u001b[39;00m\n\u001b[0;32m 918\u001b[0m group_axes \u001b[38;5;241m=\u001b[39m group\u001b[38;5;241m.\u001b[39maxes\n\u001b[1;32m--> 919\u001b[0m res \u001b[38;5;241m=\u001b[39m f(group)\n\u001b[0;32m 920\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m mutated \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m _is_indexed_like(res, group_axes, axis):\n\u001b[0;32m 921\u001b[0m mutated \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n", "File \u001b[1;32me:\\PyProject\\NewStock\\main\\train\\../..\\main\\factor\\factor.py:341\u001b[0m, in \u001b[0;36mget_rolling_factor..\u001b[1;34m(x)\u001b[0m\n\u001b[0;32m 336\u001b[0m \u001b[38;5;66;03m# 计算 act_factor1, act_factor2, act_factor3, act_factor4\u001b[39;00m\n\u001b[0;32m 337\u001b[0m df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mact_factor1\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m grouped[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_ema_5\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mapply(\n\u001b[0;32m 338\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m x: np\u001b[38;5;241m.\u001b[39marctan((x \u001b[38;5;241m/\u001b[39m x\u001b[38;5;241m.\u001b[39mshift(\u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m100\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m57.3\u001b[39m \u001b[38;5;241m/\u001b[39m \u001b[38;5;241m50\u001b[39m\n\u001b[0;32m 339\u001b[0m )\n\u001b[0;32m 340\u001b[0m df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mact_factor2\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m grouped[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_ema_13\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mapply(\n\u001b[1;32m--> 341\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m x: np\u001b[38;5;241m.\u001b[39marctan((x \u001b[38;5;241m/\u001b[39m x\u001b[38;5;241m.\u001b[39mshift(\u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m100\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m57.3\u001b[39m \u001b[38;5;241m/\u001b[39m \u001b[38;5;241m40\u001b[39m\n\u001b[0;32m 342\u001b[0m )\n\u001b[0;32m 343\u001b[0m df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mact_factor3\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m grouped[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_ema_20\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mapply(\n\u001b[0;32m 344\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m x: np\u001b[38;5;241m.\u001b[39marctan((x \u001b[38;5;241m/\u001b[39m x\u001b[38;5;241m.\u001b[39mshift(\u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m100\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m57.3\u001b[39m \u001b[38;5;241m/\u001b[39m \u001b[38;5;241m21\u001b[39m\n\u001b[0;32m 345\u001b[0m )\n\u001b[0;32m 346\u001b[0m df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mact_factor4\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m grouped[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_ema_60\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mapply(\n\u001b[0;32m 347\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m x: np\u001b[38;5;241m.\u001b[39marctan((x \u001b[38;5;241m/\u001b[39m x\u001b[38;5;241m.\u001b[39mshift(\u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m100\u001b[39m) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m57.3\u001b[39m \u001b[38;5;241m/\u001b[39m \u001b[38;5;241m10\u001b[39m\n\u001b[0;32m 348\u001b[0m )\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\ops\\common.py:76\u001b[0m, in \u001b[0;36m_unpack_zerodim_and_defer..new_method\u001b[1;34m(self, other)\u001b[0m\n\u001b[0;32m 72\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mNotImplemented\u001b[39m\n\u001b[0;32m 74\u001b[0m other \u001b[38;5;241m=\u001b[39m item_from_zerodim(other)\n\u001b[1;32m---> 76\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m method(\u001b[38;5;28mself\u001b[39m, other)\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\arraylike.py:194\u001b[0m, in \u001b[0;36mOpsMixin.__sub__\u001b[1;34m(self, other)\u001b[0m\n\u001b[0;32m 192\u001b[0m \u001b[38;5;129m@unpack_zerodim_and_defer\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m__sub__\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 193\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__sub__\u001b[39m(\u001b[38;5;28mself\u001b[39m, other):\n\u001b[1;32m--> 194\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_arith_method(other, operator\u001b[38;5;241m.\u001b[39msub)\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\series.py:6135\u001b[0m, in \u001b[0;36mSeries._arith_method\u001b[1;34m(self, other, op)\u001b[0m\n\u001b[0;32m 6133\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_arith_method\u001b[39m(\u001b[38;5;28mself\u001b[39m, other, op):\n\u001b[0;32m 6134\u001b[0m \u001b[38;5;28mself\u001b[39m, other \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_align_for_op(other)\n\u001b[1;32m-> 6135\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m base\u001b[38;5;241m.\u001b[39mIndexOpsMixin\u001b[38;5;241m.\u001b[39m_arith_method(\u001b[38;5;28mself\u001b[39m, other, op)\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\base.py:1382\u001b[0m, in \u001b[0;36mIndexOpsMixin._arith_method\u001b[1;34m(self, other, op)\u001b[0m\n\u001b[0;32m 1379\u001b[0m rvalues \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39marange(rvalues\u001b[38;5;241m.\u001b[39mstart, rvalues\u001b[38;5;241m.\u001b[39mstop, rvalues\u001b[38;5;241m.\u001b[39mstep)\n\u001b[0;32m 1381\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m np\u001b[38;5;241m.\u001b[39merrstate(\u001b[38;5;28mall\u001b[39m\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mignore\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[1;32m-> 1382\u001b[0m result \u001b[38;5;241m=\u001b[39m ops\u001b[38;5;241m.\u001b[39marithmetic_op(lvalues, rvalues, op)\n\u001b[0;32m 1384\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_construct_result(result, name\u001b[38;5;241m=\u001b[39mres_name)\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\ops\\array_ops.py:283\u001b[0m, in \u001b[0;36marithmetic_op\u001b[1;34m(left, right, op)\u001b[0m\n\u001b[0;32m 279\u001b[0m _bool_arith_check(op, left, right) \u001b[38;5;66;03m# type: ignore[arg-type]\u001b[39;00m\n\u001b[0;32m 281\u001b[0m \u001b[38;5;66;03m# error: Argument 1 to \"_na_arithmetic_op\" has incompatible type\u001b[39;00m\n\u001b[0;32m 282\u001b[0m \u001b[38;5;66;03m# \"Union[ExtensionArray, ndarray[Any, Any]]\"; expected \"ndarray[Any, Any]\"\u001b[39;00m\n\u001b[1;32m--> 283\u001b[0m res_values \u001b[38;5;241m=\u001b[39m _na_arithmetic_op(left, right, op) \u001b[38;5;66;03m# type: ignore[arg-type]\u001b[39;00m\n\u001b[0;32m 285\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m res_values\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\ops\\array_ops.py:218\u001b[0m, in \u001b[0;36m_na_arithmetic_op\u001b[1;34m(left, right, op, is_cmp)\u001b[0m\n\u001b[0;32m 215\u001b[0m func \u001b[38;5;241m=\u001b[39m partial(expressions\u001b[38;5;241m.\u001b[39mevaluate, op)\n\u001b[0;32m 217\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 218\u001b[0m result \u001b[38;5;241m=\u001b[39m func(left, right)\n\u001b[0;32m 219\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m:\n\u001b[0;32m 220\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_cmp \u001b[38;5;129;01mand\u001b[39;00m (\n\u001b[0;32m 221\u001b[0m left\u001b[38;5;241m.\u001b[39mdtype \u001b[38;5;241m==\u001b[39m \u001b[38;5;28mobject\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mgetattr\u001b[39m(right, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdtype\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;241m==\u001b[39m \u001b[38;5;28mobject\u001b[39m\n\u001b[0;32m 222\u001b[0m ):\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 225\u001b[0m \u001b[38;5;66;03m# Don't do this for comparisons, as that will handle complex numbers\u001b[39;00m\n\u001b[0;32m 226\u001b[0m \u001b[38;5;66;03m# incorrectly, see GH#32047\u001b[39;00m\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\computation\\expressions.py:242\u001b[0m, in \u001b[0;36mevaluate\u001b[1;34m(op, a, b, use_numexpr)\u001b[0m\n\u001b[0;32m 239\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m op_str \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 240\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m use_numexpr:\n\u001b[0;32m 241\u001b[0m \u001b[38;5;66;03m# error: \"None\" not callable\u001b[39;00m\n\u001b[1;32m--> 242\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _evaluate(op, op_str, a, b) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 243\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _evaluate_standard(op, op_str, a, b)\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\computation\\expressions.py:131\u001b[0m, in \u001b[0;36m_evaluate_numexpr\u001b[1;34m(op, op_str, a, b)\u001b[0m\n\u001b[0;32m 128\u001b[0m _store_test_result(result \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[0;32m 130\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m result \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m--> 131\u001b[0m result \u001b[38;5;241m=\u001b[39m _evaluate_standard(op, op_str, a, b)\n\u001b[0;32m 133\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m result\n", "File \u001b[1;32me:\\Python\\anaconda\\envs\\new_trader\\Lib\\site-packages\\pandas\\core\\computation\\expressions.py:73\u001b[0m, in \u001b[0;36m_evaluate_standard\u001b[1;34m(op, op_str, a, b)\u001b[0m\n\u001b[0;32m 71\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m _TEST_MODE:\n\u001b[0;32m 72\u001b[0m _store_test_result(\u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[1;32m---> 73\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m op(a, b)\n", "\u001b[1;31mKeyboardInterrupt\u001b[0m: " ] } ], "source": [ "\n", "import numpy as np\n", "from main.factor.factor import *\n", "\n", "def filter_data(df):\n", " # df = df.groupby('trade_date').apply(lambda x: x.nlargest(1000, 'act_factor1'))\n", " df = df[~df['is_st']]\n", " df = df[~df['ts_code'].str.endswith('BJ')]\n", " df = df[~df['ts_code'].str.startswith('30')]\n", " df = df[~df['ts_code'].str.startswith('68')]\n", " df = df[~df['ts_code'].str.startswith('8')]\n", " df = df[df['trade_date'] >= '2019-01-01']\n", " if 'in_date' in df.columns:\n", " df = df.drop(columns=['in_date'])\n", " df = df.reset_index(drop=True)\n", " return df\n", "\n", "gc.collect()\n", "\n", "df = filter_data(df)\n", "df = df.sort_values(by=['ts_code', 'trade_date'])\n", "\n", "# df = price_minus_deduction_price(df, n=120)\n", "# df = price_deduction_price_diff_ratio_to_sma(df, n=120)\n", "# df = cat_price_vs_sma_vs_deduction_price(df, n=120)\n", "# df = cat_reason(df, top_list_df)\n", "# df = cat_is_on_top_list(df, top_list_df)\n", "\n", "df = add_financial_factor(df, fina_indicator_df, factor_value_col='undist_profit_ps')\n", "df = add_financial_factor(df, fina_indicator_df, factor_value_col='ocfps')\n", "calculate_arbr(df, N=26)\n", "df['log_circ_mv'] = np.log(df['circ_mv'])\n", "df = calculate_cashflow_to_ev_factor(df, cashflow_df, balancesheet_df)\n", "df = caculate_book_to_price_ratio(df, fina_indicator_df)\n", "df = turnover_rate_n(df, n=5)\n", "df = variance_n(df, n=20)\n", "df = bbi_ratio_factor(df)\n", "df = daily_deviation(df)\n", "df = daily_industry_deviation(df)\n", "df, _ = get_rolling_factor(df)\n", "df, _ = get_simple_factor(df)\n", "\n", "df = df.rename(columns={'l1_code': 'cat_l1_code'})\n", "df = df.rename(columns={'l2_code': 'cat_l2_code'})\n", "\n", "lg_flow_mom_corr(df, N=20, M=60)\n", "lg_flow_accel(df)\n", "profit_pressure(df)\n", "underwater_resistance(df)\n", "cost_conc_std(df, N=20)\n", "profit_decay(df, N=20)\n", "vol_amp_loss(df, N=20)\n", "vol_drop_profit_cnt(df, N=20, M=5)\n", "lg_flow_vol_interact(df, N=20)\n", "cost_break_confirm_cnt(df, M=5)\n", "atr_norm_channel_pos(df, N=14)\n", "turnover_diff_skew(df, N=20)\n", "lg_sm_flow_diverge(df, N=20)\n", "pullback_strong(df, N=20, M=20)\n", "vol_wgt_hist_pos(df, N=20)\n", "vol_adj_roc(df, N=20)\n", "\n", "cs_rank_net_lg_flow_val(df)\n", "cs_rank_flow_divergence(df)\n", "cs_rank_industry_adj_lg_flow(df) # Needs cat_l2_code\n", "cs_rank_elg_buy_ratio(df)\n", "cs_rank_rel_profit_margin(df)\n", "cs_rank_cost_breadth(df)\n", "cs_rank_dist_to_upper_cost(df)\n", "cs_rank_winner_rate(df)\n", "cs_rank_intraday_range(df)\n", "cs_rank_close_pos_in_range(df)\n", "cs_rank_opening_gap(df) # Needs pre_close\n", "cs_rank_pos_in_hist_range(df) # Needs his_low, his_high\n", "cs_rank_vol_x_profit_margin(df)\n", "cs_rank_lg_flow_price_concordance(df)\n", "cs_rank_turnover_per_winner(df)\n", "cs_rank_ind_cap_neutral_pe(df) # Placeholder - needs external libraries\n", "cs_rank_volume_ratio(df) # Needs volume_ratio\n", "cs_rank_elg_buy_sell_sm_ratio(df)\n", "cs_rank_cost_dist_vol_ratio(df) # Needs volume_ratio\n", "cs_rank_size(df) # Needs circ_mv\n", "\n", "\n", "# df = df.merge(index_data, on='trade_date', how='left')\n", "\n", "print(df.info())\n", "print(df.columns.tolist())" ] }, { "cell_type": "code", "execution_count": null, "id": "b87b938028afa206", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T13:08:03.658725Z", "start_time": "2025-04-03T13:08:02.469611Z" } }, "outputs": [], "source": [ "from scipy.stats import ks_2samp, wasserstein_distance\n", "\n", "\n", "def remove_shifted_features(train_data, test_data, feature_columns, ks_threshold=0.05, wasserstein_threshold=0.1,\n", " importance_threshold=0.05):\n", " dropped_features = []\n", "\n", " # **统计数据漂移**\n", " numeric_columns = train_data.select_dtypes(include=['float64', 'int64']).columns\n", " numeric_columns = [col for col in numeric_columns if col in feature_columns]\n", " for feature in numeric_columns:\n", " ks_stat, p_value = ks_2samp(train_data[feature], test_data[feature])\n", " wasserstein_dist = wasserstein_distance(train_data[feature], test_data[feature])\n", "\n", " if p_value < ks_threshold or wasserstein_dist > wasserstein_threshold:\n", " dropped_features.append(feature)\n", "\n", " print(f\"检测到 {len(dropped_features)} 个可能漂移的特征: {dropped_features}\")\n", "\n", " # **应用阈值进行最终筛选**\n", " filtered_features = [f for f in feature_columns if f not in dropped_features]\n", "\n", " return filtered_features, dropped_features\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "f4f16d63ad18d1bc", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T13:08:03.670700Z", "start_time": "2025-04-03T13:08:03.665739Z" } }, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "import statsmodels.api as sm # 用于中性化回归\n", "from tqdm import tqdm # 可选,用于显示进度条\n", "\n", "# --- 常量 ---\n", "epsilon = 1e-10 # 防止除零\n", "\n", "# --- 1. 中位数去极值 (MAD) ---\n", "\n", "def cs_mad_filter(df: pd.DataFrame,\n", " features: list,\n", " k: float = 3.0,\n", " scale_factor: float = 1.4826):\n", " \"\"\"\n", " 对指定特征列进行截面 MAD 去极值处理 (原地修改)。\n", "\n", " 方法: 对每日截面数据,计算 median 和 MAD,\n", " 将超出 [median - k * scale * MAD, median + k * scale * MAD] 范围的值\n", " 替换为边界值 (Winsorization)。\n", " scale_factor=1.4826 使得 MAD 约等于正态分布的标准差。\n", "\n", " Args:\n", " df (pd.DataFrame): 输入 DataFrame,需包含 'trade_date' 和 features 列。\n", " features (list): 需要处理的特征列名列表。\n", " k (float): MAD 的倍数,用于确定边界。默认为 3.0。\n", " scale_factor (float): MAD 的缩放因子。默认为 1.4826。\n", "\n", " WARNING: 此函数会原地修改输入的 DataFrame 'df'。\n", " \"\"\"\n", " print(f\"开始截面 MAD 去极值处理 (k={k})...\")\n", " if not all(col in df.columns for col in features):\n", " missing = [col for col in features if col not in df.columns]\n", " print(f\"错误: DataFrame 中缺少以下特征列: {missing}。跳过去极值处理。\")\n", " return\n", "\n", " grouped = df.groupby('trade_date')\n", "\n", " for col in tqdm(features, desc=\"MAD Filtering\"):\n", " try:\n", " # 计算截面中位数\n", " median = grouped[col].transform('median')\n", " # 计算截面 MAD (Median Absolute Deviation from Median)\n", " mad = (df[col] - median).abs().groupby(df['trade_date']).transform('median')\n", "\n", " # 计算上下边界\n", " lower_bound = median - k * scale_factor * mad\n", " upper_bound = median + k * scale_factor * mad\n", "\n", " # 原地应用 clip\n", " df[col] = np.clip(df[col], lower_bound, upper_bound)\n", "\n", " except KeyError:\n", " print(f\"警告: 列 '{col}' 可能不存在或在分组中出错,跳过此列的 MAD 处理。\")\n", " except Exception as e:\n", " print(f\"警告: 处理列 '{col}' 时发生错误: {e},跳过此列的 MAD 处理。\")\n", "\n", " print(\"截面 MAD 去极值处理完成。\")\n", "\n", "\n", "# --- 2. 行业市值中性化 ---\n", "\n", "def cs_neutralize_industry_cap(df: pd.DataFrame,\n", " features: list,\n", " industry_col: str = 'cat_l2_code',\n", " market_cap_col: str = 'circ_mv'):\n", " \"\"\"\n", " 对指定特征列进行截面行业和对数市值中性化 (原地修改)。\n", " 使用 OLS 回归: feature ~ 1 + log(market_cap) + C(industry)\n", " 将回归残差写回原特征列。\n", "\n", " Args:\n", " df (pd.DataFrame): 输入 DataFrame,需包含 'trade_date', features 列,\n", " industry_col, market_cap_col。\n", " features (list): 需要处理的特征列名列表。\n", " industry_col (str): 行业分类列名。\n", " market_cap_col (str): 流通市值列名。\n", "\n", " WARNING: 此函数会原地修改输入的 DataFrame 'df' 的 features 列。\n", " 计算量较大,可能耗时较长。\n", " 需要安装 statsmodels 库 (pip install statsmodels)。\n", " \"\"\"\n", " print(\"开始截面行业市值中性化...\")\n", " required_cols = features + ['trade_date', industry_col, market_cap_col]\n", " if not all(col in df.columns for col in required_cols):\n", " missing = [col for col in required_cols if col not in df.columns]\n", " print(f\"错误: DataFrame 中缺少必需列: {missing}。无法进行中性化。\")\n", " return\n", "\n", " # 预处理:计算 log 市值,处理 industry code 可能的 NaN\n", " log_cap_col = '_log_market_cap'\n", " df[log_cap_col] = np.log1p(df[market_cap_col]) # log1p 处理 0 值\n", " # df[industry_col] = df[industry_col].cat.add_categories('UnknownIndustry')\n", " # df[industry_col] = df[industry_col].fillna('UnknownIndustry') # 填充行业 NaN\n", " # df[industry_col] = df[industry_col].astype('category') # 转为类别,ols 会自动处理\n", "\n", " dates = df['trade_date'].unique()\n", " all_residuals = [] # 用于收集所有日期的残差\n", "\n", " for date in tqdm(dates, desc=\"Neutralizing\"):\n", " daily_data = df.loc[df['trade_date'] == date, features + [log_cap_col, industry_col]].copy() # 使用 .loc 获取副本\n", "\n", " # 准备自变量 X (常数项 + log市值 + 行业哑变量)\n", " X = daily_data[[log_cap_col]]\n", " X = sm.add_constant(X, prepend=True) # 添加常数项\n", " # 创建行业哑变量 (drop_first=True 避免共线性)\n", " industry_dummies = pd.get_dummies(daily_data[industry_col], prefix=industry_col, drop_first=True)\n", " industry_dummies = industry_dummies.astype(int)\n", " X = pd.concat([X, industry_dummies], axis=1)\n", "\n", " daily_residuals = daily_data[[col for col in features]].copy() # 创建用于存储残差的df\n", "\n", " for col in features:\n", " Y = daily_data[col]\n", "\n", " # 处理 NaN 值,确保 X 和 Y 在相同位置有有效值\n", " valid_mask = Y.notna() & X.notna().all(axis=1)\n", " if valid_mask.sum() < (X.shape[1] + 1): # 数据点不足以估计模型\n", " print(f\"警告: 日期 {date}, 特征 {col} 有效数据不足 ({valid_mask.sum()}个),无法中性化,填充 NaN。\")\n", " daily_residuals[col] = np.nan\n", " continue\n", "\n", " Y_valid = Y[valid_mask]\n", " X_valid = X[valid_mask]\n", "\n", " # 执行 OLS 回归\n", " try:\n", " model = sm.OLS(Y_valid.to_numpy(), X_valid.to_numpy())\n", " results = model.fit()\n", " # 将残差填回对应位置\n", " daily_residuals.loc[valid_mask, col] = results.resid\n", " daily_residuals.loc[~valid_mask, col] = np.nan # 原本无效的位置填充 NaN\n", " except Exception as e:\n", " print(f\"警告: 日期 {date}, 特征 {col} 回归失败: {e},填充 NaN。\")\n", " daily_residuals[col] = np.nan\n", " break\n", "\n", " all_residuals.append(daily_residuals)\n", "\n", " # 合并所有日期的残差结果\n", " if all_residuals:\n", " residuals_df = pd.concat(all_residuals)\n", " # 将残差结果更新回原始 df (原地修改)\n", " # 使用 update 比 merge 更适合基于索引的原地更新\n", " # 确保 residuals_df 的索引与 df 中对应部分一致\n", " df.update(residuals_df)\n", " else:\n", " print(\"没有有效的残差结果可以合并。\")\n", "\n", "\n", " # 清理临时列\n", " df.drop(columns=[log_cap_col], inplace=True)\n", " print(\"截面行业市值中性化完成。\")\n", "\n", "\n", "# --- 3. Z-Score 标准化 ---\n", "\n", "def cs_zscore_standardize(df: pd.DataFrame, features: list, epsilon: float = 1e-10):\n", " \"\"\"\n", " 对指定特征列进行截面 Z-Score 标准化 (原地修改)。\n", " 方法: Z = (value - cross_sectional_mean) / (cross_sectional_std + epsilon)\n", "\n", " Args:\n", " df (pd.DataFrame): 输入 DataFrame,需包含 'trade_date' 和 features 列。\n", " features (list): 需要处理的特征列名列表。\n", " epsilon (float): 防止除以零的小常数。\n", "\n", " WARNING: 此函数会原地修改输入的 DataFrame 'df'。\n", " \"\"\"\n", " print(\"开始截面 Z-Score 标准化...\")\n", " if not all(col in df.columns for col in features):\n", " missing = [col for col in features if col not in df.columns]\n", " print(f\"错误: DataFrame 中缺少以下特征列: {missing}。跳过标准化处理。\")\n", " return\n", "\n", " grouped = df.groupby('trade_date')\n", "\n", " for col in tqdm(features, desc=\"Standardizing\"):\n", " try:\n", " # 使用 transform 计算截面均值和标准差\n", " mean = grouped[col].transform('mean')\n", " std = grouped[col].transform('std')\n", "\n", " # 计算 Z-Score 并原地赋值\n", " df[col] = (df[col] - mean) / (std + epsilon)\n", "\n", " except KeyError:\n", " print(f\"警告: 列 '{col}' 可能不存在或在分组中出错,跳过此列的标准化处理。\")\n", " except Exception as e:\n", " print(f\"警告: 处理列 '{col}' 时发生错误: {e},跳过此列的标准化处理。\")\n", "\n", " print(\"截面 Z-Score 标准化完成。\")\n", "\n", "def fill_nan_with_daily_median(df: pd.DataFrame, feature_columns: list[str]) -> pd.DataFrame:\n", " \"\"\"\n", " 对指定特征列进行每日截面中位数填充缺失值 (NaN)。\n", "\n", " 参数:\n", " df (pd.DataFrame): 包含多日数据的DataFrame,需要包含 'trade_date' 和 feature_columns 中的列。\n", " feature_columns (list[str]): 需要进行缺失值填充的特征列名称列表。\n", "\n", " 返回:\n", " pd.DataFrame: 包含缺失值填充后特征列的DataFrame。在输入DataFrame的副本上操作。\n", " \"\"\"\n", " processed_df = df.copy() # 在副本上操作,保留原始数据\n", "\n", " # 确保 trade_date 是 datetime 类型以便正确分组\n", " processed_df['trade_date'] = pd.to_datetime(processed_df['trade_date'])\n", "\n", " def _fill_daily_nan(group):\n", " # group 是某一个交易日的 DataFrame\n", "\n", " # 遍历指定的特征列\n", " for feature_col in feature_columns:\n", " # 检查列是否存在于当前分组中\n", " if feature_col in group.columns:\n", " # 计算当日该特征的中位数\n", " median_val = group[feature_col].median()\n", "\n", " # 使用当日中位数填充该特征列的 NaN 值\n", " # inplace=True 会直接修改 group DataFrame\n", " group[feature_col].fillna(median_val, inplace=True)\n", " # else:\n", " # print(f\"Warning: Feature column '{feature_col}' not found in daily group for {group['trade_date'].iloc[0]}. Skipping.\")\n", "\n", " return group\n", "\n", " # 按交易日期分组,并应用每日填充函数\n", " # group_keys=False 避免将分组键添加到结果索引中\n", " filled_df = processed_df.groupby('trade_date', group_keys=False).apply(_fill_daily_nan)\n", "\n", " return filled_df" ] }, { "cell_type": "code", "execution_count": null, "id": "40e6b68a91b30c79", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T13:08:04.694262Z", "start_time": "2025-04-03T13:08:03.694904Z" } }, "outputs": [], "source": [ "import pandas as pd\n", "\n", "\n", "def remove_outliers_label_percentile(label: pd.Series, lower_percentile: float = 0.01, upper_percentile: float = 0.99,\n", " log=True):\n", " if not (0 <= lower_percentile < upper_percentile <= 1):\n", " raise ValueError(\"Percentile values must satisfy 0 <= lower_percentile < upper_percentile <= 1.\")\n", "\n", " # Calculate lower and upper bounds based on percentiles\n", " lower_bound = label.quantile(lower_percentile)\n", " upper_bound = label.quantile(upper_percentile)\n", "\n", " # Filter out values outside the bounds\n", " filtered_label = label[(label >= lower_bound) & (label <= upper_bound)]\n", "\n", " # Print the number of removed outliers\n", " if log:\n", " print(f\"Removed {len(label) - len(filtered_label)} outliers.\")\n", " return filtered_label\n", "\n", "\n", "def calculate_risk_adjusted_target(df, days=5):\n", " df = df.sort_values(by=['ts_code', 'trade_date'])\n", "\n", " df['future_close'] = df.groupby('ts_code')['close'].shift(-days)\n", " df['future_open'] = df.groupby('ts_code')['open'].shift(-1)\n", " df['future_return'] = (df['future_close'] - df['future_open']) / df['future_open']\n", "\n", " df['future_volatility'] = df.groupby('ts_code')['future_return'].rolling(days, min_periods=1).std().reset_index(\n", " level=0, drop=True)\n", " sharpe_ratio = df['future_return'] * df['future_volatility']\n", " sharpe_ratio.replace([np.inf, -np.inf], np.nan, inplace=True)\n", "\n", " return sharpe_ratio\n", "\n", "\n", "def calculate_score(df, days=5, lambda_param=1.0):\n", " def calculate_max_drawdown(prices):\n", " peak = prices.iloc[0] # 初始化峰值\n", " max_drawdown = 0 # 初始化最大回撤\n", "\n", " for price in prices:\n", " if price > peak:\n", " peak = price # 更新峰值\n", " else:\n", " drawdown = (peak - price) / peak # 计算当前回撤\n", " max_drawdown = max(max_drawdown, drawdown) # 更新最大回撤\n", "\n", " return max_drawdown\n", "\n", " def compute_stock_score(stock_df):\n", " stock_df = stock_df.sort_values(by=['trade_date'])\n", " future_return = stock_df['future_return']\n", " # 使用已有的 pct_chg 字段计算波动率\n", " volatility = stock_df['pct_chg'].rolling(days).std().shift(-days)\n", " max_drawdown = stock_df['close'].rolling(days).apply(calculate_max_drawdown, raw=False).shift(-days)\n", " score = future_return - lambda_param * max_drawdown\n", " return score\n", "\n", " # # 确保 DataFrame 按照股票代码和交易日期排序\n", " # df = df.sort_values(by=['ts_code', 'trade_date'])\n", "\n", " # 对每个股票分别计算 score\n", " df['score'] = df.groupby('ts_code').apply(compute_stock_score).reset_index(level=0, drop=True)\n", "\n", " return df['score']\n", "\n", "\n", "def remove_highly_correlated_features(df, feature_columns, threshold=0.9):\n", " numeric_features = df[feature_columns].select_dtypes(include=[np.number]).columns.tolist()\n", " if not numeric_features:\n", " raise ValueError(\"No numeric features found in the provided data.\")\n", "\n", " corr_matrix = df[numeric_features].corr().abs()\n", " upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))\n", " to_drop = [column for column in upper.columns if any(upper[column] > threshold)]\n", " remaining_features = [col for col in feature_columns if col not in to_drop\n", " or 'act' in col or 'af' in col]\n", " return remaining_features\n", "\n", "\n", "def cross_sectional_standardization(df, features):\n", " df_sorted = df.sort_values(by='trade_date') # 按时间排序\n", " df_standardized = df_sorted.copy()\n", "\n", " for date in df_sorted['trade_date'].unique():\n", " # 获取当前时间点的数据\n", " current_data = df_standardized[df_standardized['trade_date'] == date]\n", "\n", " # 只对指定特征进行标准化\n", " scaler = StandardScaler()\n", " standardized_values = scaler.fit_transform(current_data[features])\n", "\n", " # 将标准化结果重新赋值回去\n", " df_standardized.loc[df_standardized['trade_date'] == date, features] = standardized_values\n", "\n", " return df_standardized\n", "\n", "\n", "import numpy as np\n", "import pandas as pd\n", "\n", "\n", "def neutralize_manual_revised(df: pd.DataFrame, features: list, industry_col: str, mkt_cap_col: str) -> pd.DataFrame:\n", " \"\"\"\n", " 手动实现简单回归以提升速度,通过构建 Series 确保索引对齐。\n", " 对特征在行业内部进行市值中性化。\n", "\n", " Args:\n", " df: 输入的 DataFrame,包含特征、行业分类和市值列。\n", " features: 需要进行中性化的特征列名列表。\n", " industry_col: 行业分类列的列名。\n", " mkt_cap_col: 市值列的列名。\n", "\n", " Returns:\n", " 中性化后的 DataFrame。\n", " \"\"\"\n", "\n", " df[mkt_cap_col] = pd.to_numeric(df[mkt_cap_col], errors='coerce')\n", " df_cleaned = df.dropna(subset=[mkt_cap_col]).copy()\n", " df_cleaned = df_cleaned[df_cleaned[mkt_cap_col] > 0].copy()\n", "\n", " if df_cleaned.empty:\n", " print(\"警告: 清理市值异常值后 DataFrame 为空。\")\n", " return df # 返回原始或空df,取决于清理前的状态\n", "\n", " processed_df = df\n", "\n", " for col in features:\n", " if col not in df_cleaned.columns:\n", " print(f\"警告: 特征列 '{col}' 不存在于清理后的 DataFrame 中,已跳过。\")\n", " # 对于原始 df 中该列不存在的,在结果 df 中也保持原样(可能全是NaN)\n", " processed_df[col] = df[col] if col in df.columns else np.nan\n", " continue\n", "\n", " # 跳过对控制变量本身进行中性化\n", " if col == mkt_cap_col or col == industry_col:\n", " print(f\"警告: 特征列 '{col}' 是控制变量或内部使用的列,跳过中性化。\")\n", " # 在结果 df 中也保持原样\n", " processed_df[col] = df[col] if col in df.columns else np.nan\n", " continue\n", "\n", " residual_series = pd.Series(index=df_cleaned.index, dtype=float)\n", "\n", " # 在分组前处理特征列的 NaN,只对有因子值的行进行回归计算\n", " df_subset_factor = df_cleaned.dropna(subset=[col]).copy()\n", "\n", " if not df_subset_factor.empty:\n", " for industry, group in df_subset_factor.groupby(industry_col):\n", " x = group[mkt_cap_col] # 市值对数\n", " y = group[col] # 因子值\n", "\n", " # 确保有足够的数据点 (>1) 且市值对数有方差 (>0) 进行回归计算\n", " # 检查 np.var > 一个很小的正数,避免浮点数误差导致的零方差判断问题\n", " if len(group) > 1 and np.var(x) > 1e-9:\n", " try:\n", " beta = np.cov(y, x)[0, 1] / np.var(x)\n", " alpha = np.mean(y) - beta * np.mean(x)\n", "\n", " # 计算残差\n", " resid = y - (alpha + beta * x)\n", "\n", " # 将计算出的残差存储到 residual_series 中,通过索引自动对齐\n", " residual_series.loc[resid.index] = resid\n", "\n", " except Exception as e:\n", " # 捕获可能的计算异常,例如np.cov或np.var因为极端数据报错\n", " print(f\"警告: 在行业 {industry} 计算回归时发生错误: {e}。该组残差将设为原始值或 NaN。\")\n", " # 此时该组的残差会保持 residual_series 初始化时的 NaN 或后续处理\n", " # 也可以选择保留原始值:residual_series.loc[group.index] = group[col]\n", "\n", " else:\n", " residual_series.loc[group.index] = group[col] # 保留原始因子值\n", " processed_df.loc[residual_series.index, col] = residual_series\n", "\n", "\n", " else:\n", " processed_df[col] = np.nan # 或 df[col] if col in df.columns else np.nan\n", "\n", " return processed_df\n", "\n", "\n", "import gc\n", "\n", "gc.collect()\n", "\n", "\n", "def mad_filter(df, features, n=3):\n", " for col in features:\n", " median = df[col].median()\n", " mad = np.median(np.abs(df[col] - median))\n", " upper = median + n * mad\n", " lower = median - n * mad\n", " df[col] = np.clip(df[col], lower, upper) # 截断极值\n", " return df\n", "\n", "\n", "def percentile_filter(df, features, lower_percentile=0.01, upper_percentile=0.99):\n", " for col in features:\n", " # 按日期分组计算上下百分位数\n", " lower_bound = df.groupby('trade_date')[col].transform(\n", " lambda x: x.quantile(lower_percentile)\n", " )\n", " upper_bound = df.groupby('trade_date')[col].transform(\n", " lambda x: x.quantile(upper_percentile)\n", " )\n", " # 截断超出范围的值\n", " df[col] = np.clip(df[col], lower_bound, upper_bound)\n", " return df\n", "\n", "\n", "from scipy.stats import iqr\n", "\n", "\n", "def iqr_filter(df, features):\n", " for col in features:\n", " df[col] = df.groupby('trade_date')[col].transform(\n", " lambda x: (x - x.median()) / iqr(x) if iqr(x) != 0 else x\n", " )\n", " return df\n", "\n", "\n", "def quantile_filter(df, features, lower_quantile=0.01, upper_quantile=0.99, window=60):\n", " df = df.copy()\n", " for col in features:\n", " # 计算 rolling 统计量,需要按日期进行 groupby\n", " rolling_lower = df.groupby('trade_date')[col].transform(lambda x: x.rolling(window=min(len(x), window)).quantile(lower_quantile))\n", " rolling_upper = df.groupby('trade_date')[col].transform(lambda x: x.rolling(window=min(len(x), window)).quantile(upper_quantile))\n", "\n", " # 对数据进行裁剪\n", " df[col] = np.clip(df[col], rolling_lower, rolling_upper)\n", " \n", " return df\n", "\n", "def select_top_features_by_rankic(df: pd.DataFrame, feature_columns: list, n: int, target_column: str = 'future_return') -> list:\n", " \"\"\"\n", " 计算给定特征与目标列的 RankIC,并返回 RankIC 绝对值最高的 n 个特征。\n", "\n", " Args:\n", " df: 包含特征列和目标列的 Pandas DataFrame。\n", " feature_columns: 包含所有待评估特征列名的列表。\n", " n: 希望选取的 RankIC 绝对值最高的特征数量。\n", " target_column: 目标列的名称,用于计算 RankIC。默认为 'future_return'。\n", "\n", " Returns:\n", " 包含 RankIC 绝对值最高的 n 个特征列名的列表。\n", " \"\"\"\n", " numeric_columns = df.select_dtypes(include=['float64', 'int64']).columns\n", " numeric_columns = [col for col in numeric_columns if col in feature_columns]\n", " if target_column not in df.columns:\n", " raise ValueError(f\"目标列 '{target_column}' 不存在于 DataFrame 中。\")\n", "\n", " rankic_scores = {}\n", " for feature in numeric_columns:\n", " if feature not in df.columns:\n", " print(f\"警告: 特征列 '{feature}' 不存在于 DataFrame 中,已跳过。\")\n", " continue\n", "\n", " # 计算特征与目标列的 RankIC (斯皮尔曼相关系数)\n", " # dropna() 是为了处理缺失值,确保相关性计算不失败\n", " valid_data = df[[feature, target_column]].dropna()\n", " if len(valid_data) > 1: # 确保有足够的数据点进行相关性计算\n", " # 计算斯皮尔曼相关性\n", " correlation = valid_data[feature].corr(valid_data[target_column], method='spearman')\n", " rankic_scores[feature] = abs(correlation) # 使用绝对值来衡量相关性强度\n", " else:\n", " rankic_scores[feature] = 0 # 数据不足,RankIC设为0或跳过\n", "\n", " # 将 RankIC 分数转换为 Series 便于排序\n", " rankic_series = pd.Series(rankic_scores)\n", "\n", " # 按 RankIC 绝对值降序排序,选取前 n 个特征\n", " # handle case where n might be larger than available features\n", " n_actual = min(n, len(rankic_series))\n", " top_features = rankic_series.sort_values(ascending=False).head(n_actual).index.tolist()\n", " top_features = [col for col in feature_columns if col in top_features or col not in numeric_columns]\n", " return top_features\n", "\n", "def create_deviation_within_dates(df, feature_columns):\n", " groupby_col = 'cat_l2_code' # 使用 trade_date 进行分组\n", " new_columns = {}\n", " ret_feature_columns = feature_columns[:]\n", "\n", " # 自动选择所有数值型特征\n", " num_features = [col for col in feature_columns if 'cat' not in col and 'index' not in col]\n", "\n", " # num_features = ['vol', 'pct_chg', 'turnover_rate', 'volume_ratio', 'cat_vol_spike', 'obv', 'maobv_6', 'return_5', 'return_10', 'return_20', 'std_return_5', 'std_return_15', 'std_return_90', 'std_return_90_2', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4', 'act_factor5', 'act_factor6', 'rank_act_factor1', 'rank_act_factor2', 'rank_act_factor3', 'active_buy_volume_large', 'active_buy_volume_big', 'active_buy_volume_small', 'alpha_022', 'alpha_003', 'alpha_007', 'alpha_013']\n", " num_features = [col for col in num_features if 'cat' not in col and 'industry' not in col]\n", " num_features = [col for col in num_features if 'limit' not in col]\n", " num_features = [col for col in num_features if 'cyq' not in col]\n", "\n", " # 遍历所有数值型特征\n", " for feature in num_features:\n", " if feature == 'trade_date': # 不需要对 'trade_date' 计算偏差\n", " continue\n", "\n", " # grouped_mean = df.groupby(['trade_date'])[feature].transform('mean')\n", " # deviation_col_name = f'deviation_mean_{feature}'\n", " # new_columns[deviation_col_name] = df[feature] - grouped_mean\n", " # ret_feature_columns.append(deviation_col_name)\n", "\n", " grouped_mean = df.groupby(['trade_date', groupby_col])[feature].transform('mean')\n", " deviation_col_name = f'deviation_mean_{feature}'\n", " new_columns[deviation_col_name] = df[feature] - grouped_mean\n", " ret_feature_columns.append(deviation_col_name)\n", "\n", " # 将新计算的偏差特征与原始 DataFrame 合并\n", " df = pd.concat([df, pd.DataFrame(new_columns)], axis=1)\n", "\n", " # for feature in ['obv', 'return_20', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4']:\n", " # df[f'deviation_industry_{feature}'] = df[feature] - df[f'industry_{feature}']\n", "\n", " return df, ret_feature_columns\n" ] }, { "cell_type": "code", "execution_count": null, "id": "47c12bb34062ae7a", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T14:57:50.841165Z", "start_time": "2025-04-03T14:49:25.889057Z" } }, "outputs": [], "source": [ "days = 5\n", "validation_days = 120\n", "\n", "import gc\n", "\n", "gc.collect()\n", "\n", "df = df.sort_values(by=['ts_code', 'trade_date'])\n", "df['future_return'] = df.groupby('ts_code', group_keys=False)['close'].apply(lambda x: x.shift(-days) / x - 1)\n", "# df['future_return'] = (df.groupby('ts_code')['close'].shift(-days) - df.groupby('ts_code')['open'].shift(-1)) / \\\n", "# df.groupby('ts_code')['open'].shift(-1)\n", "\n", "df['cat_up_limit'] = df['pct_chg'] > 5\n", "# df['label'] = (df.groupby('ts_code')['cat_up_limit']\n", "# .rolling(window=5, min_periods=1).sum()\n", "# .groupby('ts_code') # 再次按 ts_code 分组\n", "# .shift(-5)\n", "# .fillna(0) # 填充每个股票组最后的 NaN\n", "# .astype(int)\n", "# .reset_index(level=0, drop=True))\n", "df['label'] = df.groupby('trade_date', group_keys=False)['future_return'].transform(\n", " lambda x: pd.qcut(x, q=20, labels=False, duplicates='drop')\n", ")\n", "filter_index = df['future_return'].between(df['future_return'].quantile(0.01), df['future_return'].quantile(0.99))\n", "\n", "# for col in [col for col in df.columns]:\n", "# train_data[col] = train_data[col].astype('str')\n", "# test_data[col] = test_data[col].astype('str')" ] }, { "cell_type": "code", "execution_count": null, "id": "8f4dc587", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " ts_code trade_date open close high low vol pct_chg \\\n", "976171 002193.SZ 2019-01-02 18.02 17.82 18.21 17.73 22513.62 -1.00 \n", "976172 002193.SZ 2019-01-03 17.82 17.78 17.94 17.65 19523.21 -0.22 \n", "976173 002193.SZ 2019-01-04 17.47 18.04 18.10 17.34 28094.69 1.46 \n", "976174 002193.SZ 2019-01-07 18.17 18.31 18.39 18.00 23866.88 1.50 \n", "976175 002193.SZ 2019-01-08 18.25 18.52 18.66 18.13 22853.46 1.15 \n", "... ... ... ... ... ... ... ... ... \n", "977704 002193.SZ 2025-05-19 11.74 11.95 11.97 11.61 94249.32 2.49 \n", "977705 002193.SZ 2025-05-20 11.99 12.20 12.22 11.84 103040.57 2.09 \n", "977706 002193.SZ 2025-05-21 12.20 11.97 12.28 11.82 83112.00 -1.89 \n", "977707 002193.SZ 2025-05-22 11.95 11.80 12.17 11.72 88811.00 -1.42 \n", "977708 002193.SZ 2025-05-23 11.74 11.51 11.90 11.51 93013.00 -2.46 \n", "\n", " turnover_rate pe_ttm ... cs_rank_lg_flow_price_concordance \\\n", "976171 0.9771 23.0129 ... 0.621118 \n", "976172 0.8473 22.9598 ... 0.230095 \n", "976173 1.2193 23.3049 ... 0.507119 \n", "976174 1.0358 23.6499 ... 0.315078 \n", "976175 0.9918 23.9154 ... 0.248631 \n", "... ... ... ... ... \n", "977704 3.6012 NaN ... 0.224402 \n", "977705 3.9371 NaN ... 0.124834 \n", "977706 3.1757 NaN ... 0.803720 \n", "977707 3.3934 NaN ... 0.667220 \n", "977708 3.5540 NaN ... 0.308867 \n", "\n", " cs_rank_turnover_per_winner cs_rank_ind_cap_neutral_pe \\\n", "976171 0.990869 NaN \n", "976172 0.934283 NaN \n", "976173 0.925182 NaN \n", "976174 0.843796 NaN \n", "976175 0.834672 NaN \n", "... ... ... \n", "977704 0.624335 NaN \n", "977705 0.641102 NaN \n", "977706 0.624709 NaN \n", "977707 0.563268 NaN \n", "977708 0.661242 NaN \n", "\n", " cs_rank_volume_ratio cs_rank_elg_buy_sell_sm_ratio \\\n", "976171 0.710344 0.341855 \n", "976172 0.444444 0.318912 \n", "976173 0.489226 0.260036 \n", "976174 0.250000 0.251095 \n", "976175 0.510588 0.286679 \n", "... ... ... \n", "977704 0.234043 0.397274 \n", "977705 0.344124 0.116534 \n", "977706 0.221189 0.126370 \n", "977707 0.412155 0.130521 \n", "977708 0.534540 0.134175 \n", "\n", " cs_rank_cost_dist_vol_ratio cs_rank_size future_return \\\n", "976171 0.261603 0.262235 0.034231 \n", "976172 0.185342 0.264695 0.029809 \n", "976173 0.211959 0.259489 0.025499 \n", "976174 0.145266 0.255474 0.014746 \n", "976175 0.202299 0.259854 0.003240 \n", "... ... ... ... \n", "977704 0.299535 0.031250 NaN \n", "977705 0.339641 0.032205 NaN \n", "977706 0.291597 0.030555 NaN \n", "977707 0.372634 0.030555 NaN \n", "977708 0.430090 0.027898 NaN \n", "\n", " cat_up_limit label \n", "976171 False 8.0 \n", "976172 False 7.0 \n", "976173 False 10.0 \n", "976174 False 13.0 \n", "976175 False 6.0 \n", "... ... ... \n", "977704 False NaN \n", "977705 False NaN \n", "977706 False NaN \n", "977707 False NaN \n", "977708 False NaN \n", "\n", "[1538 rows x 181 columns]\n" ] } ], "source": [ "print(df[df['ts_code'] == '002193.SZ'])" ] }, { "cell_type": "code", "execution_count": null, "id": "29221dde", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "192\n" ] } ], "source": [ "feature_columns = [col for col in df.head(10)\n", " .merge(industry_df, on=['cat_l2_code', 'trade_date'], how='left')\n", " .merge(index_data, on='trade_date', how='left')\n", " .columns\n", " ]\n", "feature_columns = [col for col in feature_columns if col not in ['trade_date',\n", " 'ts_code',\n", " 'label']]\n", "feature_columns = [col for col in feature_columns if 'future' not in col]\n", "feature_columns = [col for col in feature_columns if 'label' not in col]\n", "feature_columns = [col for col in feature_columns if 'score' not in col]\n", "feature_columns = [col for col in feature_columns if 'gen' not in col]\n", "feature_columns = [col for col in feature_columns if 'is_st' not in col]\n", "feature_columns = [col for col in feature_columns if 'pe_ttm' not in col]\n", "# feature_columns = [col for col in feature_columns if 'volatility' not in col]\n", "# feature_columns = [col for col in feature_columns if 'circ_mv' not in col]\n", "feature_columns = [col for col in feature_columns if 'code' not in col]\n", "feature_columns = [col for col in feature_columns if col not in origin_columns]\n", "feature_columns = [col for col in feature_columns if not col.startswith('_')]\n", "# feature_columns = [col for col in feature_columns if col not in ['ts_code', 'trade_date', 'vol_std_5', 'cov', 'delta_cov', 'alpha_22_improved', 'alpha_007', 'consecutive_up_limit', 'mv_volatility', 'volume_growth', 'mv_growth', 'arbr']]\n", "feature_columns = [col for col in feature_columns if col not in ['intraday_lg_flow_corr_20', \n", " 'cap_neutral_cost_metric', \n", " 'hurst_net_mf_vol_60', \n", " 'complex_factor_deap_1', \n", " 'lg_buy_consolidation_20',\n", " 'cs_rank_ind_cap_neutral_pe',\n", " 'cs_rank_opening_gap',\n", " 'cs_rank_ind_adj_lg_flow']]\n", "feature_columns = [col for col in feature_columns if col not in ['cat_reason', 'cat_is_on_top_list']]\n", "print(len(feature_columns))" ] }, { "cell_type": "code", "execution_count": null, "id": "03ee5daf", "metadata": {}, "outputs": [], "source": [ "# df = fill_nan_with_daily_median(df, feature_columns)\n", "for feature_col in [col for col in feature_columns if col in df.columns]:\n", " # median_val = df[feature_col].median()\n", " df[feature_col].fillna(0, inplace=True)" ] }, { "cell_type": "code", "execution_count": null, "id": "b76ea08a", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " ts_code trade_date log_circ_mv\n", "0 000001.SZ 2019-01-02 16.574219\n", "1 000001.SZ 2019-01-03 16.583965\n", "2 000001.SZ 2019-01-04 16.633371\n", "['vol', 'pct_chg', 'turnover_rate', 'volume_ratio', 'winner_rate', 'undist_profit_ps', 'ocfps', 'AR', 'BR', 'AR_BR', 'log_circ_mv', 'cashflow_to_ev_factor', 'book_to_price_ratio', 'turnover_rate_mean_5', 'variance_20', 'bbi_ratio_factor', 'daily_deviation', 'lg_elg_net_buy_vol', 'flow_lg_elg_intensity', 'sm_net_buy_vol', 'total_buy_vol', 'lg_elg_buy_prop', 'flow_struct_buy_change', 'lg_elg_net_buy_vol_change', 'flow_lg_elg_accel', 'chip_concentration_range', 'chip_skewness', 'floating_chip_proxy', 'cost_support_15pct_change', 'cat_winner_price_zone', 'flow_chip_consistency', 'profit_taking_vs_absorb', 'cat_is_positive', 'upside_vol', 'downside_vol', 'vol_ratio', 'return_skew', 'return_kurtosis', 'volume_change_rate', 'cat_volume_breakout', 'turnover_deviation', 'cat_turnover_spike', 'avg_volume_ratio', 'cat_volume_ratio_breakout', 'vol_spike', 'vol_std_5', 'atr_14', 'atr_6', 'obv', 'maobv_6', 'rsi_3', 'return_5', 'return_20', 'std_return_5', 'std_return_90', 'std_return_90_2', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4', 'rank_act_factor1', 'rank_act_factor2', 'rank_act_factor3', 'cov', 'delta_cov', 'alpha_22_improved', 'alpha_003', 'alpha_007', 'alpha_013', 'vol_break', 'weight_roc5', 'smallcap_concentration', 'cost_stability', 'high_cost_break_days', 'liquidity_risk', 'turnover_std', 'mv_volatility', 'volume_growth', 'mv_growth', 'momentum_factor', 'resonance_factor', 'log_close', 'cat_vol_spike', 'up', 'down', 'obv_maobv_6', 'std_return_5_over_std_return_90', 'std_return_90_minus_std_return_90_2', 'cat_af2', 'cat_af3', 'cat_af4', 'act_factor5', 'act_factor6', 'active_buy_volume_large', 'active_buy_volume_big', 'active_buy_volume_small', 'buy_lg_vol_minus_sell_lg_vol', 'buy_elg_vol_minus_sell_elg_vol', 'ctrl_strength', 'low_cost_dev', 'asymmetry', 'lock_factor', 'cat_vol_break', 'cost_atr_adj', 'cat_golden_resonance', 'mv_turnover_ratio', 'mv_adjusted_volume', 'mv_weighted_turnover', 'nonlinear_mv_volume', 'mv_volume_ratio', 'mv_momentum', 'lg_flow_mom_corr_20_60', 'lg_flow_accel', 'profit_pressure', 'underwater_resistance', 'cost_conc_std_20', 'profit_decay_20', 'vol_amp_loss_20', 'vol_drop_profit_cnt_5', 'lg_flow_vol_interact_20', 'cost_break_confirm_cnt_5', 'atr_norm_channel_pos_14', 'turnover_diff_skew_20', 'lg_sm_flow_diverge_20', 'pullback_strong_20_20', 'vol_wgt_hist_pos_20', 'vol_adj_roc_20', 'cs_rank_net_lg_flow_val', 'cs_rank_elg_buy_ratio', 'cs_rank_rel_profit_margin', 'cs_rank_cost_breadth', 'cs_rank_dist_to_upper_cost', 'cs_rank_winner_rate', 'cs_rank_intraday_range', 'cs_rank_close_pos_in_range', 'cs_rank_pos_in_hist_range', 'cs_rank_vol_x_profit_margin', 'cs_rank_lg_flow_price_concordance', 'cs_rank_turnover_per_winner', 'cs_rank_volume_ratio', 'cs_rank_elg_buy_sell_sm_ratio', 'cs_rank_cost_dist_vol_ratio', 'cs_rank_size', 'cat_up_limit', 'industry_obv', 'industry_return_5', 'industry_return_20', 'industry__ema_5', 'industry__ema_13', 'industry__ema_20', 'industry__ema_60', 'industry_act_factor1', 'industry_act_factor2', 'industry_act_factor3', 'industry_act_factor4', 'industry_act_factor5', 'industry_act_factor6', 'industry_rank_act_factor1', 'industry_rank_act_factor2', 'industry_rank_act_factor3', 'industry_return_5_percentile', 'industry_return_20_percentile', '000852.SH_MACD', '000905.SH_MACD', '399006.SZ_MACD', '000852.SH_MACD_hist', '000905.SH_MACD_hist', '399006.SZ_MACD_hist', '000852.SH_RSI', '000905.SH_RSI', '399006.SZ_RSI', '000852.SH_Signal_line', '000905.SH_Signal_line', '399006.SZ_Signal_line', '000852.SH_amount_change_rate', '000905.SH_amount_change_rate', '399006.SZ_amount_change_rate', '000852.SH_amount_mean', '000905.SH_amount_mean', '399006.SZ_amount_mean', '000852.SH_daily_return', '000905.SH_daily_return', '399006.SZ_daily_return', '000852.SH_up_ratio_20d', '000905.SH_up_ratio_20d', '399006.SZ_up_ratio_20d', '000852.SH_volatility', '000905.SH_volatility', '399006.SZ_volatility', '000852.SH_volume_change_rate', '000905.SH_volume_change_rate', '399006.SZ_volume_change_rate']\n", "去除极值\n", "开始截面 MAD 去极值处理 (k=3.0)...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "MAD Filtering: 100%|██████████| 132/132 [00:28<00:00, 4.64it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "截面 MAD 去极值处理完成。\n", "开始截面 MAD 去极值处理 (k=3.0)...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "MAD Filtering: 100%|██████████| 132/132 [00:23<00:00, 5.57it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "截面 MAD 去极值处理完成。\n", "开始截面 MAD 去极值处理 (k=3.0)...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "MAD Filtering: 0it [00:00, ?it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "截面 MAD 去极值处理完成。\n", "开始截面 MAD 去极值处理 (k=3.0)...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "MAD Filtering: 0it [00:00, ?it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "截面 MAD 去极值处理完成。\n", "feature_columns: ['vol', 'pct_chg', 'turnover_rate', 'volume_ratio', 'winner_rate', 'undist_profit_ps', 'ocfps', 'AR', 'BR', 'AR_BR', 'log_circ_mv', 'cashflow_to_ev_factor', 'book_to_price_ratio', 'turnover_rate_mean_5', 'variance_20', 'bbi_ratio_factor', 'daily_deviation', 'lg_elg_net_buy_vol', 'flow_lg_elg_intensity', 'sm_net_buy_vol', 'total_buy_vol', 'lg_elg_buy_prop', 'flow_struct_buy_change', 'lg_elg_net_buy_vol_change', 'flow_lg_elg_accel', 'chip_concentration_range', 'chip_skewness', 'floating_chip_proxy', 'cost_support_15pct_change', 'cat_winner_price_zone', 'flow_chip_consistency', 'profit_taking_vs_absorb', 'cat_is_positive', 'upside_vol', 'downside_vol', 'vol_ratio', 'return_skew', 'return_kurtosis', 'volume_change_rate', 'cat_volume_breakout', 'turnover_deviation', 'cat_turnover_spike', 'avg_volume_ratio', 'cat_volume_ratio_breakout', 'vol_spike', 'vol_std_5', 'atr_14', 'atr_6', 'obv', 'maobv_6', 'rsi_3', 'return_5', 'return_20', 'std_return_5', 'std_return_90', 'std_return_90_2', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4', 'rank_act_factor1', 'rank_act_factor2', 'rank_act_factor3', 'cov', 'delta_cov', 'alpha_22_improved', 'alpha_003', 'alpha_007', 'alpha_013', 'vol_break', 'weight_roc5', 'smallcap_concentration', 'cost_stability', 'high_cost_break_days', 'liquidity_risk', 'turnover_std', 'mv_volatility', 'volume_growth', 'mv_growth', 'momentum_factor', 'resonance_factor', 'log_close', 'cat_vol_spike', 'up', 'down', 'obv_maobv_6', 'std_return_5_over_std_return_90', 'std_return_90_minus_std_return_90_2', 'cat_af2', 'cat_af3', 'cat_af4', 'act_factor5', 'act_factor6', 'active_buy_volume_large', 'active_buy_volume_big', 'active_buy_volume_small', 'buy_lg_vol_minus_sell_lg_vol', 'buy_elg_vol_minus_sell_elg_vol', 'ctrl_strength', 'low_cost_dev', 'asymmetry', 'lock_factor', 'cat_vol_break', 'cost_atr_adj', 'cat_golden_resonance', 'mv_turnover_ratio', 'mv_adjusted_volume', 'mv_weighted_turnover', 'nonlinear_mv_volume', 'mv_volume_ratio', 'mv_momentum', 'lg_flow_mom_corr_20_60', 'lg_flow_accel', 'profit_pressure', 'underwater_resistance', 'cost_conc_std_20', 'profit_decay_20', 'vol_amp_loss_20', 'vol_drop_profit_cnt_5', 'lg_flow_vol_interact_20', 'cost_break_confirm_cnt_5', 'atr_norm_channel_pos_14', 'turnover_diff_skew_20', 'lg_sm_flow_diverge_20', 'pullback_strong_20_20', 'vol_wgt_hist_pos_20', 'vol_adj_roc_20', 'cs_rank_net_lg_flow_val', 'cs_rank_elg_buy_ratio', 'cs_rank_rel_profit_margin', 'cs_rank_cost_breadth', 'cs_rank_dist_to_upper_cost', 'cs_rank_winner_rate', 'cs_rank_intraday_range', 'cs_rank_close_pos_in_range', 'cs_rank_pos_in_hist_range', 'cs_rank_vol_x_profit_margin', 'cs_rank_lg_flow_price_concordance', 'cs_rank_turnover_per_winner', 'cs_rank_volume_ratio', 'cs_rank_elg_buy_sell_sm_ratio', 'cs_rank_cost_dist_vol_ratio', 'cs_rank_size', 'cat_up_limit', 'industry_obv', 'industry_return_5', 'industry_return_20', 'industry__ema_5', 'industry__ema_13', 'industry__ema_20', 'industry__ema_60', 'industry_act_factor1', 'industry_act_factor2', 'industry_act_factor3', 'industry_act_factor4', 'industry_act_factor5', 'industry_act_factor6', 'industry_rank_act_factor1', 'industry_rank_act_factor2', 'industry_rank_act_factor3', 'industry_return_5_percentile', 'industry_return_20_percentile', '000852.SH_MACD', '000905.SH_MACD', '399006.SZ_MACD', '000852.SH_MACD_hist', '000905.SH_MACD_hist', '399006.SZ_MACD_hist', '000852.SH_RSI', '000905.SH_RSI', '399006.SZ_RSI', '000852.SH_Signal_line', '000905.SH_Signal_line', '399006.SZ_Signal_line', '000852.SH_amount_change_rate', '000905.SH_amount_change_rate', '399006.SZ_amount_change_rate', '000852.SH_amount_mean', '000905.SH_amount_mean', '399006.SZ_amount_mean', '000852.SH_daily_return', '000905.SH_daily_return', '399006.SZ_daily_return', '000852.SH_up_ratio_20d', '000905.SH_up_ratio_20d', '399006.SZ_up_ratio_20d', '000852.SH_volatility', '000905.SH_volatility', '399006.SZ_volatility', '000852.SH_volume_change_rate', '000905.SH_volume_change_rate', '399006.SZ_volume_change_rate']\n", "df最小日期: 2019-01-02\n", "df最大日期: 2025-05-23\n", "2057539\n", "train_data最小日期: 2020-01-02\n", "train_data最大日期: 2022-12-30\n", "1766694\n", "test_data最小日期: 2023-01-03\n", "test_data最大日期: 2025-05-23\n", " ts_code trade_date log_circ_mv\n", "0 000001.SZ 2019-01-02 16.574219\n", "1 000001.SZ 2019-01-03 16.583965\n", "2 000001.SZ 2019-01-04 16.633371\n" ] } ], "source": [ "split_date = '2023-01-01'\n", "train_data = df[filter_index & (df['trade_date'] <= split_date) & (df['trade_date'] >= '2020-01-01')]\n", "test_data = df[(df['trade_date'] >= split_date)]\n", "\n", "print(df[['ts_code', 'trade_date', 'log_circ_mv']].head(3))\n", "\n", "industry_df = industry_df.sort_values(by=['trade_date'])\n", "index_data = index_data.sort_values(by=['trade_date'])\n", "\n", "# train_data = train_data.merge(industry_df, on=['cat_l2_code', 'trade_date'], how='left')\n", "# train_data = train_data.merge(index_data, on='trade_date', how='left')\n", "# test_data = test_data.merge(industry_df, on=['cat_l2_code', 'trade_date'], how='left')\n", "# test_data = test_data.merge(index_data, on='trade_date', how='left')\n", "\n", "train_data, test_data = train_data.replace([np.inf, -np.inf], np.nan), test_data.replace([np.inf, -np.inf], np.nan)\n", "\n", "# feature_columns_new = feature_columns[:]\n", "# train_data, _ = create_deviation_within_dates(train_data, [col for col in feature_columns if col in train_data.columns])\n", "# test_data, _ = create_deviation_within_dates(test_data, [col for col in feature_columns if col in train_data.columns])\n", "\n", "# feature_columns = [\n", "# 'undist_profit_ps', \n", "# 'AR_BR',\n", "# 'pe_ttm',\n", "# 'alpha_22_improved', \n", "# 'alpha_003', \n", "# 'alpha_007', \n", "# 'alpha_013', \n", "# 'cat_up_limit', \n", "# 'cat_down_limit', \n", "# 'up_limit_count_10d', \n", "# 'down_limit_count_10d', \n", "# 'consecutive_up_limit', \n", "# 'vol_break', \n", "# 'weight_roc5', \n", "# 'price_cost_divergence', \n", "# 'smallcap_concentration', \n", "# 'cost_stability', \n", "# 'high_cost_break_days', \n", "# 'liquidity_risk', \n", "# 'turnover_std', \n", "# 'mv_volatility', \n", "# 'volume_growth', \n", "# 'mv_growth', \n", "# 'lg_flow_mom_corr_20_60', \n", "# 'lg_flow_accel', \n", "# 'profit_pressure', \n", "# 'underwater_resistance', \n", "# 'cost_conc_std_20', \n", "# 'profit_decay_20', \n", "# 'vol_amp_loss_20', \n", "# 'vol_drop_profit_cnt_5', \n", "# 'lg_flow_vol_interact_20', \n", "# 'cost_break_confirm_cnt_5', \n", "# 'atr_norm_channel_pos_14', \n", "# 'turnover_diff_skew_20', \n", "# 'lg_sm_flow_diverge_20', \n", "# 'pullback_strong_20_20', \n", "# 'vol_wgt_hist_pos_20', \n", "# 'vol_adj_roc_20',\n", "# 'cashflow_to_ev_factor',\n", "# 'ocfps',\n", "# 'book_to_price_ratio',\n", "# 'turnover_rate_mean_5',\n", "# 'variance_20',\n", "# 'bbi_ratio_factor'\n", "# ]\n", "# feature_columns = [col for col in feature_columns if col in train_data.columns]\n", "# feature_columns = [col for col in feature_columns if not col.startswith('_')]\n", "\n", "numeric_columns = df.select_dtypes(include=['float64', 'int64']).columns\n", "numeric_columns = [col for col in numeric_columns if col in feature_columns]\n", "# feature_columns = select_top_features_by_rankic(df, numeric_columns, n=10)\n", "print(feature_columns)\n", "\n", "# train_data = fill_nan_with_daily_median(train_data, feature_columns)\n", "# test_data = fill_nan_with_daily_median(test_data, feature_columns)\n", "\n", "train_data = train_data.dropna(subset=[col for col in feature_columns if col in train_data.columns])\n", "train_data = train_data.dropna(subset=['label'])\n", "train_data = train_data.reset_index(drop=True)\n", "# print(test_data.tail())\n", "test_data = test_data.dropna(subset=[col for col in feature_columns if col in train_data.columns])\n", "# test_data = test_data.dropna(subset=['label'])\n", "test_data = test_data.reset_index(drop=True)\n", "\n", "transform_feature_columns = feature_columns\n", "transform_feature_columns = [col for col in transform_feature_columns if col in feature_columns and not col.startswith('cat') and col in train_data.columns]\n", "# transform_feature_columns.remove('undist_profit_ps')\n", "print('去除极值')\n", "cs_mad_filter(train_data, transform_feature_columns)\n", "# print('中性化')\n", "# cs_neutralize_industry_cap(train_data, transform_feature_columns)\n", "# print('标准化')\n", "# cs_zscore_standardize(train_data, transform_feature_columns)\n", "\n", "cs_mad_filter(test_data, transform_feature_columns)\n", "# cs_neutralize_industry_cap(test_data, transform_feature_columns)\n", "# cs_zscore_standardize(test_data, transform_feature_columns)\n", "\n", "mad_filter_feature_columns = [col for col in feature_columns if col not in transform_feature_columns and not col.startswith('cat') and col in train_data.columns]\n", "cs_mad_filter(train_data, mad_filter_feature_columns)\n", "cs_mad_filter(test_data, mad_filter_feature_columns)\n", "\n", "\n", "print(f'feature_columns: {feature_columns}')\n", "\n", "\n", "print(f\"df最小日期: {df['trade_date'].min().strftime('%Y-%m-%d')}\")\n", "print(f\"df最大日期: {df['trade_date'].max().strftime('%Y-%m-%d')}\")\n", "print(len(train_data))\n", "print(f\"train_data最小日期: {train_data['trade_date'].min().strftime('%Y-%m-%d')}\")\n", "print(f\"train_data最大日期: {train_data['trade_date'].max().strftime('%Y-%m-%d')}\")\n", "print(len(test_data))\n", "print(f\"test_data最小日期: {test_data['trade_date'].min().strftime('%Y-%m-%d')}\")\n", "print(f\"test_data最大日期: {test_data['trade_date'].max().strftime('%Y-%m-%d')}\")\n", "\n", "cat_columns = [col for col in feature_columns if col.startswith('cat')]\n", "for col in cat_columns:\n", " train_data[col] = train_data[col].astype('category')\n", " test_data[col] = test_data[col].astype('category')\n", "\n", "print(df[['ts_code', 'trade_date', 'log_circ_mv']].head(3))\n" ] }, { "cell_type": "code", "execution_count": null, "id": "2e4b027e", "metadata": {}, "outputs": [], "source": [ "class RmseObjective(object):\n", "\n", " def is_max_optimal(self):\n", " return False\n", "\n", " def get_final_error(self, error, weight):\n", " return np.sqrt(error / (weight + 1e-38))\n", "\n", "\n", " def evaluate(self, approxes, target, weight):\n", " assert len(approxes) == 1\n", " assert len(target) == len(approxes[0])\n", "\n", " approx = approxes[0]\n", "\n", " error_sum = 0.0\n", " weight_sum = 0.0\n", "\n", " for i in range(len(approx)):\n", " w = 1.0 if weight is None else weight[i]\n", " weight_sum += w\n", " error_sum += w * ((approx[i] - target[i])**2)\n", "\n", " return error_sum, weight_sum\n", "\n", " def calc_ders_range(self, approxes, targets, weights):\n", " assert len(approxes) == len(targets)\n", " if weights is not None:\n", " assert len(weights) == len(approxes)\n", "\n", " result = []\n", " for index in range(len(targets)):\n", " der1 = targets[index] - approxes[index]\n", " der2 = -1\n", "\n", " if weights is not None:\n", " der1 *= weights[index]\n", " der2 *= weights[index]\n", "\n", " result.append((der1, der2))\n", " return result\n" ] }, { "cell_type": "code", "execution_count": null, "id": "3ff2d1c5", "metadata": {}, "outputs": [], "source": [ "from sklearn.preprocessing import StandardScaler\n", "from sklearn.linear_model import LogisticRegression\n", "import matplotlib.pyplot as plt # 保持 matplotlib 导入,尽管LightGBM的绘图功能已移除\n", "from sklearn.decomposition import PCA\n", "import pandas as pd\n", "import numpy as np\n", "import datetime # 用于日期计算\n", "from catboost import CatBoostClassifier, CatBoostRanker, CatBoostRegressor\n", "from catboost import Pool\n", "import lightgbm as lgb\n", "from lightgbm import LGBMRanker, LGBMRegressor\n", "\n", "def train_model(train_data_df, feature_columns,\n", " print_info=True, # 调整参数名,更通用\n", " validation_days=180, use_pca=False, split_date=None,\n", " target_column='label', type='light'): # 增加目标列参数\n", "\n", " print('train data size: ', len(train_data_df))\n", " print(train_data_df[['ts_code', 'trade_date', 'log_circ_mv']])\n", " # 确保数据按时间排序\n", " train_data_df = train_data_df.sort_values(by='trade_date')\n", "\n", " # 识别数值型特征列\n", " numeric_feature_columns = train_data_df[feature_columns].select_dtypes(include=['float64', 'int64']).columns.tolist()\n", "\n", " # 去除标签为空的样本\n", " initial_len = len(train_data_df)\n", " train_data_df = train_data_df.dropna(subset=[target_column])\n", "\n", " if print_info:\n", " print(f'原始样本数: {initial_len}, 去除标签为空后样本数: {len(train_data_df)}')\n", "\n", " # 提取特征和标签,只取数值型特征用于线性回归\n", " \n", " if split_date is None:\n", " all_dates = train_data_df['trade_date'].unique() # 获取所有唯一的 trade_date\n", " split_date = all_dates[-validation_days] # 划分点为倒数第 validation_days 天\n", " train_data_split = train_data_df[train_data_df['trade_date'] < split_date] # 训练集\n", " val_data_split = train_data_df[train_data_df['trade_date'] >= split_date] # 验证集\n", "\n", " train_data_split = train_data_split.sort_values('trade_date')\n", " val_data_split = val_data_split.sort_values('trade_date')\n", "\n", " \n", " X_train = train_data_split[feature_columns]\n", " y_train = train_data_split[target_column]\n", " \n", " X_val = val_data_split[feature_columns]\n", " y_val = val_data_split[target_column]\n", "\n", "\n", " # # 标准化数值特征 (使用 StandardScaler 对训练集fit并transform, 对验证集只transform)\n", " scaler = StandardScaler()\n", " # X_train = scaler.fit_transform(X_train)\n", "\n", " # 训练线性回归模型\n", " # model = LogisticRegression(random_state=42)\n", " \n", " # # 使用处理后的特征和样本权重进行训练\n", " # model.fit(X_train, y_train)\n", "\n", "\n", " if type == 'cat':\n", " params = {\n", " 'loss_function': 'QueryRMSE', # 适用于二分类\n", " 'eval_metric': 'NDCG', # 评估指标\n", " 'iterations': 1500,\n", " 'learning_rate': 0.01,\n", " 'depth': 10, # 控制模型复杂度\n", " # 'l2_leaf_reg': 0.1, # L2 正则化\n", " 'verbose': 5000,\n", " 'early_stopping_rounds': 300,\n", " 'one_hot_max_size': 50,\n", " # 'class_weights': [0.6, 1.2],\n", " 'task_type': 'GPU',\n", " 'has_time': True,\n", " 'random_seed': 7\n", " }\n", " cat_features = [i for i, col in enumerate(feature_columns) if col.startswith('cat')]\n", " group_train = train_data_split['trade_date'].factorize()[0]\n", " group_val = val_data_split['trade_date'].factorize()[0]\n", " train_pool = Pool(\n", " data=X_train,\n", " label=y_train,\n", " group_id=group_train,\n", " cat_features=cat_features\n", " )\n", " val_pool = Pool(\n", " data=X_val,\n", " label=y_val,\n", " group_id=group_val,\n", " cat_features=cat_features\n", " )\n", "\n", "\n", " model = CatBoostRanker(**params)\n", " model.fit(train_pool,\n", " eval_set=val_pool, \n", " plot=True, \n", " use_best_model=True\n", " )\n", " elif type == 'light':\n", " label_gain = list(range(len(train_data_split['label'].unique())))\n", " params = {\n", " 'label_gain': [gain * gain for gain in label_gain],\n", " 'objective': 'lambdarank',\n", " 'metric': 'rank_xendcg',\n", " 'learning_rate': 0.01,\n", " 'num_leaves': 1024,\n", " # 'min_data_in_leaf': 128,\n", " 'max_depth': 10,\n", " 'max_bin': 1024,\n", " 'feature_fraction': 0.7,\n", " 'bagging_fraction': 0.7,\n", " 'bagging_freq': 5,\n", " 'lambda_l1': 1,\n", " 'lambda_l2': 1,\n", " 'boosting': 'gbdt',\n", " 'verbosity': -1,\n", " 'extra_trees': True,\n", " # 'max_position': 5,\n", " # 'ndcg_at': 1,\n", " 'quant_train_renew_leaf': True,\n", " # 'lambdarank_truncation_level': 1,\n", " # 'lambdarank_position_bias_regularization': 1,\n", " 'seed': 7\n", " }\n", " train_groups = train_data_split.groupby('trade_date').size().tolist()\n", " val_groups = val_data_split.groupby('trade_date').size().tolist()\n", "\n", " categorical_feature = [col for col in feature_columns if 'cat' in col]\n", " train_dataset = lgb.Dataset(\n", " X_train, label=y_train, \n", " group=train_groups,\n", " categorical_feature=categorical_feature\n", " )\n", " val_dataset = lgb.Dataset(\n", " X_val, label=y_val, \n", " group=val_groups,\n", " categorical_feature=categorical_feature\n", " )\n", "\n", " evals = {}\n", " callbacks = [lgb.log_evaluation(period=1000),\n", " lgb.callback.record_evaluation(evals),\n", " lgb.early_stopping(100, first_metric_only=True)\n", " ]\n", " # # 训练模型\n", " # model = lgb.train(\n", " # params, train_dataset, num_boost_round=1000,\n", " # valid_sets=[train_dataset, val_dataset], valid_names=['train', 'valid'],\n", " # callbacks=callbacks\n", " # )\n", "\n", " # # 打印特征重要性(如果需要)\n", " # if True:\n", " # lgb.plot_metric(evals)\n", " # lgb.plot_importance(model, importance_type='split', max_num_features=20)\n", " # plt.show()\n", "\n", " from flaml import AutoML\n", " from sklearn.datasets import fetch_california_housing\n", "\n", " # Initialize an AutoML instance\n", " model = AutoML()\n", " # Specify automl goal and constraint\n", " automl_settings = {\n", " \"time_budget\": 600, # in seconds\n", " \"metric\": \"ndcg@1\",\n", " \"task\": \"rank\",\n", " \"estimator_list\": [\n", " \"catboost\",\n", " \"lgbm\",\n", " \"xgboost\"\n", " ], \n", " \"ensemble\": {\n", " \"final_estimator\": LGBMRanker(),\n", " \"passthrough\": False,\n", " },\n", " }\n", " model.fit(X_train=X_train, y_train=y_train, groups=train_groups,\n", " X_val=X_val, y_val=y_val,groups_val=val_groups,\n", " mlflow_logging=False, **automl_settings)\n", "\n", "\n", " return model, scaler, None # 返回训练好的模型、scaler 和 pca 对象" ] }, { "cell_type": "code", "execution_count": null, "id": "c6eb5cd4-e714-420a-ac48-39af3e11ee81", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T15:03:18.426481Z", "start_time": "2025-04-03T15:02:19.926352Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "train data size: 728000\n", " ts_code trade_date log_circ_mv\n", "0 600306.SH 2020-01-02 11.552040\n", "1 603269.SH 2020-01-02 11.324801\n", "2 002633.SZ 2020-01-02 11.759023\n", "3 603991.SH 2020-01-02 11.181150\n", "4 000691.SZ 2020-01-02 11.677910\n", "... ... ... ...\n", "727995 002235.SZ 2022-12-30 12.914708\n", "727996 605598.SH 2022-12-30 11.783580\n", "727997 002613.SZ 2022-12-30 12.489464\n", "727998 600800.SH 2022-12-30 12.571911\n", "727999 603068.SH 2022-12-30 12.967134\n", "\n", "[728000 rows x 3 columns]\n", "原始样本数: 728000, 去除标签为空后样本数: 728000\n", "[flaml.automl.logger: 05-26 20:16:56] {1728} INFO - task = rank\n", "[flaml.automl.logger: 05-26 20:16:56] {1736} INFO - Data split method: group\n", "[flaml.automl.logger: 05-26 20:16:56] {1739} INFO - Evaluation method: holdout\n", "[flaml.automl.logger: 05-26 20:16:56] {1838} INFO - Minimizing error metric: 1-ndcg@1\n", "[flaml.automl.logger: 05-26 20:16:56] {1955} INFO - List of ML learners in AutoML Run: ['catboost', 'lgbm', 'xgboost']\n", "[flaml.automl.logger: 05-26 20:16:56] {2258} INFO - iteration 0, current learner catboost\n", "[flaml.automl.logger: 05-26 20:17:16] {2393} INFO - Estimated sufficient time budget=203147s. Estimated necessary time budget=203s.\n", "[flaml.automl.logger: 05-26 20:17:16] {2442} INFO - at 44.5s,\testimator catboost's best error=0.3789,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:17:16] {2258} INFO - iteration 1, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:17:20] {2442} INFO - at 48.3s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:17:20] {2258} INFO - iteration 2, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:17:25] {2442} INFO - at 53.2s,\testimator xgboost's best error=0.5269,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:17:25] {2258} INFO - iteration 3, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:17:29] {2442} INFO - at 57.0s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:17:29] {2258} INFO - iteration 4, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:17:34] {2442} INFO - at 61.6s,\testimator xgboost's best error=0.5269,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:17:34] {2258} INFO - iteration 5, current learner catboost\n", "[flaml.automl.logger: 05-26 20:19:53] {2442} INFO - at 200.7s,\testimator catboost's best error=0.3789,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:19:53] {2258} INFO - iteration 6, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:19:56] {2442} INFO - at 204.4s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:19:56] {2258} INFO - iteration 7, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:20:01] {2442} INFO - at 209.4s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:01] {2258} INFO - iteration 8, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:20:05] {2442} INFO - at 212.9s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:05] {2258} INFO - iteration 9, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:20:09] {2442} INFO - at 216.6s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:09] {2258} INFO - iteration 10, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:20:13] {2442} INFO - at 221.4s,\testimator xgboost's best error=0.5266,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:13] {2258} INFO - iteration 11, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:20:18] {2442} INFO - at 226.2s,\testimator xgboost's best error=0.4896,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:18] {2258} INFO - iteration 12, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:20:23] {2442} INFO - at 231.0s,\testimator xgboost's best error=0.4896,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:23] {2258} INFO - iteration 13, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:20:28] {2442} INFO - at 235.7s,\testimator xgboost's best error=0.4896,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:28] {2258} INFO - iteration 14, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:20:34] {2442} INFO - at 242.2s,\testimator xgboost's best error=0.4896,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:34] {2258} INFO - iteration 15, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:20:38] {2442} INFO - at 246.3s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:38] {2258} INFO - iteration 16, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:20:44] {2442} INFO - at 251.8s,\testimator xgboost's best error=0.4896,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:44] {2258} INFO - iteration 17, current learner catboost\n", "[flaml.automl.logger: 05-26 20:20:52] {2442} INFO - at 260.4s,\testimator catboost's best error=0.3789,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:52] {2258} INFO - iteration 18, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:20:58] {2442} INFO - at 265.8s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:20:58] {2258} INFO - iteration 19, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:21:02] {2442} INFO - at 269.7s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:21:02] {2258} INFO - iteration 20, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:21:07] {2442} INFO - at 275.0s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:21:07] {2258} INFO - iteration 21, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:21:12] {2442} INFO - at 279.9s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:21:12] {2258} INFO - iteration 22, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:21:15] {2442} INFO - at 283.5s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:21:15] {2258} INFO - iteration 23, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:21:20] {2442} INFO - at 288.1s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:21:20] {2258} INFO - iteration 24, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:21:25] {2442} INFO - at 292.8s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:21:25] {2258} INFO - iteration 25, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:21:30] {2442} INFO - at 298.0s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:21:30] {2258} INFO - iteration 26, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:21:36] {2442} INFO - at 304.0s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:21:36] {2258} INFO - iteration 27, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:21:41] {2442} INFO - at 309.0s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:21:41] {2258} INFO - iteration 28, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:21:48] {2442} INFO - at 316.1s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:21:48] {2258} INFO - iteration 29, current learner catboost\n", "[flaml.automl.logger: 05-26 20:24:08] {2442} INFO - at 456.2s,\testimator catboost's best error=0.3789,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:24:08] {2258} INFO - iteration 30, current learner catboost\n", "[flaml.automl.logger: 05-26 20:24:17] {2442} INFO - at 465.0s,\testimator catboost's best error=0.3789,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:24:17] {2258} INFO - iteration 31, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:24:22] {2442} INFO - at 469.9s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:24:22] {2258} INFO - iteration 32, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:24:27] {2442} INFO - at 474.9s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:24:27] {2258} INFO - iteration 33, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:24:30] {2442} INFO - at 478.6s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:24:30] {2258} INFO - iteration 34, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:24:35] {2442} INFO - at 483.1s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:24:35] {2258} INFO - iteration 35, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:24:41] {2442} INFO - at 489.2s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:24:41] {2258} INFO - iteration 36, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:24:46] {2442} INFO - at 493.8s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:24:46] {2258} INFO - iteration 37, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:24:52] {2442} INFO - at 500.5s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:24:52] {2258} INFO - iteration 38, current learner lgbm\n", "[flaml.automl.logger: 05-26 20:24:56] {2442} INFO - at 504.3s,\testimator lgbm's best error=0.4831,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:24:56] {2258} INFO - iteration 39, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:25:01] {2442} INFO - at 509.0s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:25:01] {2258} INFO - iteration 40, current learner xgboost\n", "[flaml.automl.logger: 05-26 20:25:09] {2442} INFO - at 516.7s,\testimator xgboost's best error=0.4722,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:25:09] {2258} INFO - iteration 41, current learner catboost\n", "[flaml.automl.logger: 05-26 20:26:37] {2442} INFO - at 605.2s,\testimator catboost's best error=0.3789,\tbest estimator catboost's best error=0.3789\n", "[flaml.automl.logger: 05-26 20:26:37] {2550} INFO - selected model: \n", "[flaml.automl.logger: 05-26 20:26:37] {1985} INFO - fit succeeded\n", "[flaml.automl.logger: 05-26 20:26:37] {1986} INFO - Time taken to find the best model: 44.485583543777466\n" ] } ], "source": [ "\n", "gc.collect()\n", "\n", "use_pca = False\n", "type = 'light'\n", "# feature_contri = [2 if feat.startswith('act_factor') or 'buy' in feat or 'sell' in feat else 1 for feat in feature_columns]\n", "# light_params['feature_contri'] = feature_contri\n", "# print(f'feature_contri: {feature_contri}')\n", "model, scaler, pca = train_model(train_data\n", " .dropna(subset=['label']).groupby('trade_date', group_keys=False)\n", " .apply(lambda x: x.nsmallest(1000, 'total_mv'))\n", " .merge(industry_df, on=['cat_l2_code', 'trade_date'], how='left')\n", " .merge(index_data, on='trade_date', how='left'), \n", " feature_columns, type=type, target_column='label')\n" ] }, { "cell_type": "code", "execution_count": null, "id": "5d1522a7538db91b", "metadata": { "ExecuteTime": { "end_time": "2025-04-03T15:04:39.656944Z", "start_time": "2025-04-03T15:04:39.298483Z" } }, "outputs": [], "source": [ "score_df = test_data.groupby('trade_date', group_keys=False).apply(lambda x: x.nsmallest(1000, 'total_mv'))\n", "# score_df = fill_nan_with_daily_median(score_df, ['pe_ttm'])\n", "# score_df = score_df[score_df['pe_ttm'] > 0]\n", "score_df = score_df.merge(industry_df, on=['cat_l2_code', 'trade_date'], how='left')\n", "score_df = score_df.merge(index_data, on='trade_date', how='left')\n", "# score_df = score_df.groupby('trade_date', group_keys=False).apply(lambda x: x.nsmallest(50, 'total_mv')).reset_index()\n", "numeric_columns = score_df.select_dtypes(include=['float64', 'int64']).columns\n", "numeric_columns = [col for col in feature_columns if col in numeric_columns]\n", "\n", "if type == 'cat':\n", " score_df['score'] = model.predict(score_df[feature_columns])\n", "elif type == 'light':\n", " score_df['score'] = model.predict(score_df[feature_columns])\n", "score_df['score_ranks'] = score_df.groupby('trade_date')['score'].rank(ascending=True)\n", "\n", "score_df = score_df.groupby('trade_date', group_keys=False).apply(\n", " lambda x: \n", " x[\n", " # (x['score'] <= x['score'].quantile(0.99)) & \n", " (x['score'] >= x['score'].quantile(0.90))\n", " ] # 计算90%分位数作为阈值,筛选分数>=阈值的行\n", ").reset_index(drop=True) # drop=True 避免添加旧索引列\n", "# df_to_drop = score_df.loc[score_df.groupby('trade_date')['score'].idxmax()]\n", "# score_df = score_df.drop(df_to_drop.index)\n", "save_df = score_df.groupby('trade_date', group_keys=False).apply(lambda x: x.nlargest(2, 'score')).reset_index()\n", "# save_df = score_df.groupby('trade_date', group_keys=False).apply(lambda x: x.nsmallest(2, 'total_mv')).reset_index()\n", "save_df = save_df.sort_values(['trade_date', 'score'])\n", "save_df[['trade_date', 'score', 'ts_code']].to_csv('predictions_test.tsv', index=False)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "09b1799e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "192\n", "['vol', 'pct_chg', 'turnover_rate', 'volume_ratio', 'winner_rate', 'undist_profit_ps', 'ocfps', 'AR', 'BR', 'AR_BR', 'log_circ_mv', 'cashflow_to_ev_factor', 'book_to_price_ratio', 'turnover_rate_mean_5', 'variance_20', 'bbi_ratio_factor', 'daily_deviation', 'lg_elg_net_buy_vol', 'flow_lg_elg_intensity', 'sm_net_buy_vol', 'total_buy_vol', 'lg_elg_buy_prop', 'flow_struct_buy_change', 'lg_elg_net_buy_vol_change', 'flow_lg_elg_accel', 'chip_concentration_range', 'chip_skewness', 'floating_chip_proxy', 'cost_support_15pct_change', 'cat_winner_price_zone', 'flow_chip_consistency', 'profit_taking_vs_absorb', 'cat_is_positive', 'upside_vol', 'downside_vol', 'vol_ratio', 'return_skew', 'return_kurtosis', 'volume_change_rate', 'cat_volume_breakout', 'turnover_deviation', 'cat_turnover_spike', 'avg_volume_ratio', 'cat_volume_ratio_breakout', 'vol_spike', 'vol_std_5', 'atr_14', 'atr_6', 'obv', 'maobv_6', 'rsi_3', 'return_5', 'return_20', 'std_return_5', 'std_return_90', 'std_return_90_2', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4', 'rank_act_factor1', 'rank_act_factor2', 'rank_act_factor3', 'cov', 'delta_cov', 'alpha_22_improved', 'alpha_003', 'alpha_007', 'alpha_013', 'vol_break', 'weight_roc5', 'smallcap_concentration', 'cost_stability', 'high_cost_break_days', 'liquidity_risk', 'turnover_std', 'mv_volatility', 'volume_growth', 'mv_growth', 'momentum_factor', 'resonance_factor', 'log_close', 'cat_vol_spike', 'up', 'down', 'obv_maobv_6', 'std_return_5_over_std_return_90', 'std_return_90_minus_std_return_90_2', 'cat_af2', 'cat_af3', 'cat_af4', 'act_factor5', 'act_factor6', 'active_buy_volume_large', 'active_buy_volume_big', 'active_buy_volume_small', 'buy_lg_vol_minus_sell_lg_vol', 'buy_elg_vol_minus_sell_elg_vol', 'ctrl_strength', 'low_cost_dev', 'asymmetry', 'lock_factor', 'cat_vol_break', 'cost_atr_adj', 'cat_golden_resonance', 'mv_turnover_ratio', 'mv_adjusted_volume', 'mv_weighted_turnover', 'nonlinear_mv_volume', 'mv_volume_ratio', 'mv_momentum', 'lg_flow_mom_corr_20_60', 'lg_flow_accel', 'profit_pressure', 'underwater_resistance', 'cost_conc_std_20', 'profit_decay_20', 'vol_amp_loss_20', 'vol_drop_profit_cnt_5', 'lg_flow_vol_interact_20', 'cost_break_confirm_cnt_5', 'atr_norm_channel_pos_14', 'turnover_diff_skew_20', 'lg_sm_flow_diverge_20', 'pullback_strong_20_20', 'vol_wgt_hist_pos_20', 'vol_adj_roc_20', 'cs_rank_net_lg_flow_val', 'cs_rank_elg_buy_ratio', 'cs_rank_rel_profit_margin', 'cs_rank_cost_breadth', 'cs_rank_dist_to_upper_cost', 'cs_rank_winner_rate', 'cs_rank_intraday_range', 'cs_rank_close_pos_in_range', 'cs_rank_pos_in_hist_range', 'cs_rank_vol_x_profit_margin', 'cs_rank_lg_flow_price_concordance', 'cs_rank_turnover_per_winner', 'cs_rank_volume_ratio', 'cs_rank_elg_buy_sell_sm_ratio', 'cs_rank_cost_dist_vol_ratio', 'cs_rank_size', 'cat_up_limit', 'industry_obv', 'industry_return_5', 'industry_return_20', 'industry__ema_5', 'industry__ema_13', 'industry__ema_20', 'industry__ema_60', 'industry_act_factor1', 'industry_act_factor2', 'industry_act_factor3', 'industry_act_factor4', 'industry_act_factor5', 'industry_act_factor6', 'industry_rank_act_factor1', 'industry_rank_act_factor2', 'industry_rank_act_factor3', 'industry_return_5_percentile', 'industry_return_20_percentile', '000852.SH_MACD', '000905.SH_MACD', '399006.SZ_MACD', '000852.SH_MACD_hist', '000905.SH_MACD_hist', '399006.SZ_MACD_hist', '000852.SH_RSI', '000905.SH_RSI', '399006.SZ_RSI', '000852.SH_Signal_line', '000905.SH_Signal_line', '399006.SZ_Signal_line', '000852.SH_amount_change_rate', '000905.SH_amount_change_rate', '399006.SZ_amount_change_rate', '000852.SH_amount_mean', '000905.SH_amount_mean', '399006.SZ_amount_mean', '000852.SH_daily_return', '000905.SH_daily_return', '399006.SZ_daily_return', '000852.SH_up_ratio_20d', '000905.SH_up_ratio_20d', '399006.SZ_up_ratio_20d', '000852.SH_volatility', '000905.SH_volatility', '399006.SZ_volatility', '000852.SH_volume_change_rate', '000905.SH_volume_change_rate', '399006.SZ_volume_change_rate']\n" ] } ], "source": [ "print(len(feature_columns))\n", "print(feature_columns)" ] }, { "cell_type": "code", "execution_count": null, "id": "bceabd1f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "警告: DataFrame 中没有 'group_id' 列。假设整个 DataFrame 是一个需要排序的组。\n", "\n", "NDCG 结果\n", "{'ndcg@1': 0.16666666666666666, 'ndcg@3': 0.12073315639204953, 'ndcg@5': 0.16262598210797247}\n" ] } ], "source": [ "import pandas as pd\n", "import numpy as np\n", "\n", "def calculate_ndcg(df: pd.DataFrame, score_col: str, label_col: str, group_id: str = 'trade_date', k_values: list = [1, 3, 5, 10]):\n", " \"\"\"\n", " 计算 DataFrame 中 score 列和 label 列的 NDCG 值。\n", "\n", " Args:\n", " df (pd.DataFrame): 包含 score (排序学习预测分数) 和 label (相关性标签) 的 DataFrame。\n", " 假设每个需要排序的组(例如,每天的股票)在 DataFrame 中是连续的。\n", " score_col (str): 包含模型预测分数的列名。\n", " label_col (str): 包含相关性标签的列名。标签值越高表示相关性越高。\n", " k_values (list): 一个整数列表,表示计算 NDCG 的 top-k 值。\n", " 例如,[1, 3, 5] 将计算 NDCG@1, NDCG@3 和 NDCG@5。\n", "\n", " Returns:\n", " dict: 一个字典,包含每个 k 值对应的平均 NDCG 值。\n", " 例如: {'ndcg@1': 0.85, 'ndcg@3': 0.78, 'ndcg@5': 0.72, 'ndcg@10': 0.65}\n", " \"\"\"\n", " ndcg_scores = {f'ndcg@{k}': [] for k in k_values}\n", "\n", " def dcg_at_k(r, k):\n", " r = np.asfarray(r)[:k] if len(r) > 0 else np.zeros(k)\n", " return np.sum(r / np.log2(np.arange(2, r.size + 2)))\n", "\n", " def ndcg_at_k(r, k):\n", " dcg_max = dcg_at_k(sorted(r, reverse=True), k)\n", " if not dcg_max:\n", " return 0.\n", " return dcg_at_k(r, k) / dcg_max\n", "\n", " # 假设 DataFrame 已经按照需要排序的组(例如,'trade_date')进行了分组,\n", " # 并且每个组内的顺序不重要,我们只需要计算每个组的 NDCG。\n", " # 如果需要按特定组计算 NDCG,请先对 DataFrame 进行分组。\n", " if group_id not in df.columns:\n", " print(\"警告: DataFrame 中没有 'group_id' 列。假设整个 DataFrame 是一个需要排序的组。\")\n", " group_df = df.sort_values(by=score_col, ascending=False)\n", " relevant_labels = group_df[label_col].values\n", " for k in k_values:\n", " ndcg_scores[f'ndcg@{k}'].append(ndcg_at_k(relevant_labels, k))\n", " else:\n", " for _, group_df in df.groupby(group_id):\n", " group_df_sorted = group_df.sort_values(by=score_col, ascending=False)\n", " relevant_labels = group_df_sorted[label_col].values\n", " for k in k_values:\n", " ndcg_scores[f'ndcg@{k}'].append(ndcg_at_k(relevant_labels, k))\n", "\n", " avg_ndcg = {k: np.mean(v) if v else np.nan for k, v in ndcg_scores.items()}\n", " return avg_ndcg\n", "\n", "\n", "ndcg_results_single_group = calculate_ndcg(score_df, score_col='score', label_col='label', k_values=[1, 3, 5], group_id=None)\n", "print(\"\\nNDCG 结果\")\n", "print(ndcg_results_single_group)\n" ] } ], "metadata": { "kernelspec": { "display_name": "new_trader", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.11" } }, "nbformat": 4, "nbformat_minor": 5 }