1985 lines
223 KiB
Plaintext
1985 lines
223 KiB
Plaintext
|
|
{
|
|||
|
|
"cells": [
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 1,
|
|||
|
|
"id": "79a7758178bafdd3",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T12:46:06.987506Z",
|
|||
|
|
"start_time": "2025-04-03T12:46:06.259551Z"
|
|||
|
|
},
|
|||
|
|
"jupyter": {
|
|||
|
|
"source_hidden": true
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
"e:\\PyProject\\NewStock\\main\\train\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"source": [
|
|||
|
|
"# %load_ext autoreload\n",
|
|||
|
|
"# %autoreload 2\n",
|
|||
|
|
"\n",
|
|||
|
|
"import gc\n",
|
|||
|
|
"import os\n",
|
|||
|
|
"import sys\n",
|
|||
|
|
"sys.path.append('../../')\n",
|
|||
|
|
"print(os.getcwd())\n",
|
|||
|
|
"import pandas as pd\n",
|
|||
|
|
"from main.factor.factor import get_rolling_factor, get_simple_factor\n",
|
|||
|
|
"from main.utils.factor import read_industry_data\n",
|
|||
|
|
"from main.utils.factor_processor import calculate_score\n",
|
|||
|
|
"from main.utils.utils import read_and_merge_h5_data, merge_with_industry_data\n",
|
|||
|
|
"\n",
|
|||
|
|
"import warnings\n",
|
|||
|
|
"\n",
|
|||
|
|
"warnings.filterwarnings(\"ignore\")"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 2,
|
|||
|
|
"id": "a79cafb06a7e0e43",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T12:47:00.212859Z",
|
|||
|
|
"start_time": "2025-04-03T12:46:06.998047Z"
|
|||
|
|
},
|
|||
|
|
"scrolled": true
|
|||
|
|
},
|
|||
|
|
"outputs": [
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
"daily data\n",
|
|||
|
|
"daily basic\n",
|
|||
|
|
"inner merge on ['ts_code', 'trade_date']\n",
|
|||
|
|
"stk limit\n",
|
|||
|
|
"left merge on ['ts_code', 'trade_date']\n",
|
|||
|
|
"money flow\n",
|
|||
|
|
"left merge on ['ts_code', 'trade_date']\n",
|
|||
|
|
"cyq perf\n",
|
|||
|
|
"left merge on ['ts_code', 'trade_date']\n",
|
|||
|
|
"<class 'pandas.core.frame.DataFrame'>\n",
|
|||
|
|
"RangeIndex: 8514978 entries, 0 to 8514977\n",
|
|||
|
|
"Data columns (total 31 columns):\n",
|
|||
|
|
" # Column Dtype \n",
|
|||
|
|
"--- ------ ----- \n",
|
|||
|
|
" 0 ts_code object \n",
|
|||
|
|
" 1 trade_date datetime64[ns]\n",
|
|||
|
|
" 2 open float64 \n",
|
|||
|
|
" 3 close float64 \n",
|
|||
|
|
" 4 high float64 \n",
|
|||
|
|
" 5 low float64 \n",
|
|||
|
|
" 6 vol float64 \n",
|
|||
|
|
" 7 pct_chg float64 \n",
|
|||
|
|
" 8 turnover_rate float64 \n",
|
|||
|
|
" 9 pe_ttm float64 \n",
|
|||
|
|
" 10 circ_mv float64 \n",
|
|||
|
|
" 11 volume_ratio float64 \n",
|
|||
|
|
" 12 is_st bool \n",
|
|||
|
|
" 13 up_limit float64 \n",
|
|||
|
|
" 14 down_limit float64 \n",
|
|||
|
|
" 15 buy_sm_vol float64 \n",
|
|||
|
|
" 16 sell_sm_vol float64 \n",
|
|||
|
|
" 17 buy_lg_vol float64 \n",
|
|||
|
|
" 18 sell_lg_vol float64 \n",
|
|||
|
|
" 19 buy_elg_vol float64 \n",
|
|||
|
|
" 20 sell_elg_vol float64 \n",
|
|||
|
|
" 21 net_mf_vol float64 \n",
|
|||
|
|
" 22 his_low float64 \n",
|
|||
|
|
" 23 his_high float64 \n",
|
|||
|
|
" 24 cost_5pct float64 \n",
|
|||
|
|
" 25 cost_15pct float64 \n",
|
|||
|
|
" 26 cost_50pct float64 \n",
|
|||
|
|
" 27 cost_85pct float64 \n",
|
|||
|
|
" 28 cost_95pct float64 \n",
|
|||
|
|
" 29 weight_avg float64 \n",
|
|||
|
|
" 30 winner_rate float64 \n",
|
|||
|
|
"dtypes: bool(1), datetime64[ns](1), float64(28), object(1)\n",
|
|||
|
|
"memory usage: 1.9+ GB\n",
|
|||
|
|
"None\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"source": [
|
|||
|
|
"from main.utils.utils import read_and_merge_h5_data\n",
|
|||
|
|
"\n",
|
|||
|
|
"print('daily data')\n",
|
|||
|
|
"df = read_and_merge_h5_data('../../data/daily_data.h5', key='daily_data',\n",
|
|||
|
|
" columns=['ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'vol', 'pct_chg'],\n",
|
|||
|
|
" df=None)\n",
|
|||
|
|
"\n",
|
|||
|
|
"print('daily basic')\n",
|
|||
|
|
"df = read_and_merge_h5_data('../../data/daily_basic.h5', key='daily_basic',\n",
|
|||
|
|
" columns=['ts_code', 'trade_date', 'turnover_rate', 'pe_ttm', 'circ_mv', 'volume_ratio',\n",
|
|||
|
|
" 'is_st'], df=df, join='inner')\n",
|
|||
|
|
"\n",
|
|||
|
|
"print('stk limit')\n",
|
|||
|
|
"df = read_and_merge_h5_data('../../data/stk_limit.h5', key='stk_limit',\n",
|
|||
|
|
" columns=['ts_code', 'trade_date', 'pre_close', 'up_limit', 'down_limit'],\n",
|
|||
|
|
" df=df)\n",
|
|||
|
|
"print('money flow')\n",
|
|||
|
|
"df = read_and_merge_h5_data('../../data/money_flow.h5', key='money_flow',\n",
|
|||
|
|
" columns=['ts_code', 'trade_date', 'buy_sm_vol', 'sell_sm_vol', 'buy_lg_vol', 'sell_lg_vol',\n",
|
|||
|
|
" 'buy_elg_vol', 'sell_elg_vol', 'net_mf_vol'],\n",
|
|||
|
|
" df=df)\n",
|
|||
|
|
"print('cyq perf')\n",
|
|||
|
|
"df = read_and_merge_h5_data('../../data/cyq_perf.h5', key='cyq_perf',\n",
|
|||
|
|
" columns=['ts_code', 'trade_date', 'his_low', 'his_high', 'cost_5pct', 'cost_15pct',\n",
|
|||
|
|
" 'cost_50pct',\n",
|
|||
|
|
" 'cost_85pct', 'cost_95pct', 'weight_avg', 'winner_rate'],\n",
|
|||
|
|
" df=df)\n",
|
|||
|
|
"print(df.info())"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 3,
|
|||
|
|
"id": "cac01788dac10678",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T12:47:10.527104Z",
|
|||
|
|
"start_time": "2025-04-03T12:47:00.488715Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
"industry\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"source": [
|
|||
|
|
"print('industry')\n",
|
|||
|
|
"industry_df = read_and_merge_h5_data('../../data/industry_data.h5', key='industry_data',\n",
|
|||
|
|
" columns=['ts_code', 'l2_code', 'in_date'],\n",
|
|||
|
|
" df=None, on=['ts_code'], join='left')\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def merge_with_industry_data(df, industry_df):\n",
|
|||
|
|
" # 确保日期字段是 datetime 类型\n",
|
|||
|
|
" df['trade_date'] = pd.to_datetime(df['trade_date'])\n",
|
|||
|
|
" industry_df['in_date'] = pd.to_datetime(industry_df['in_date'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 对 industry_df 按 ts_code 和 in_date 排序\n",
|
|||
|
|
" industry_df_sorted = industry_df.sort_values(['in_date', 'ts_code'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 对原始 df 按 ts_code 和 trade_date 排序\n",
|
|||
|
|
" df_sorted = df.sort_values(['trade_date', 'ts_code'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 使用 merge_asof 进行向后合并\n",
|
|||
|
|
" merged = pd.merge_asof(\n",
|
|||
|
|
" df_sorted,\n",
|
|||
|
|
" industry_df_sorted,\n",
|
|||
|
|
" by='ts_code', # 按 ts_code 分组\n",
|
|||
|
|
" left_on='trade_date',\n",
|
|||
|
|
" right_on='in_date',\n",
|
|||
|
|
" direction='backward'\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 获取每个 ts_code 的最早 in_date 记录\n",
|
|||
|
|
" min_in_date_per_ts = (industry_df_sorted\n",
|
|||
|
|
" .groupby('ts_code')\n",
|
|||
|
|
" .first()\n",
|
|||
|
|
" .reset_index()[['ts_code', 'l2_code']])\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 填充未匹配到的记录(trade_date 早于所有 in_date 的情况)\n",
|
|||
|
|
" merged['l2_code'] = merged['l2_code'].fillna(\n",
|
|||
|
|
" merged['ts_code'].map(min_in_date_per_ts.set_index('ts_code')['l2_code'])\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 保留需要的列并重置索引\n",
|
|||
|
|
" result = merged.reset_index(drop=True)\n",
|
|||
|
|
" return result\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"# 使用示例\n",
|
|||
|
|
"df = merge_with_industry_data(df, industry_df)\n",
|
|||
|
|
"# print(mdf[mdf['ts_code'] == '600751.SH'][['ts_code', 'trade_date', 'l2_code']])"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 4,
|
|||
|
|
"id": "c4e9e1d31da6dba6",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T12:47:10.719252Z",
|
|||
|
|
"start_time": "2025-04-03T12:47:10.541247Z"
|
|||
|
|
},
|
|||
|
|
"jupyter": {
|
|||
|
|
"source_hidden": true
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [],
|
|||
|
|
"source": [
|
|||
|
|
"def calculate_indicators(df):\n",
|
|||
|
|
" \"\"\"\n",
|
|||
|
|
" 计算四个指标:当日涨跌幅、5日移动平均、RSI、MACD。\n",
|
|||
|
|
" \"\"\"\n",
|
|||
|
|
" df = df.sort_values('trade_date')\n",
|
|||
|
|
" df['daily_return'] = (df['close'] - df['pre_close']) / df['pre_close'] * 100\n",
|
|||
|
|
" # df['5_day_ma'] = df['close'].rolling(window=5).mean()\n",
|
|||
|
|
" delta = df['close'].diff()\n",
|
|||
|
|
" gain = delta.where(delta > 0, 0)\n",
|
|||
|
|
" loss = -delta.where(delta < 0, 0)\n",
|
|||
|
|
" avg_gain = gain.rolling(window=14).mean()\n",
|
|||
|
|
" avg_loss = loss.rolling(window=14).mean()\n",
|
|||
|
|
" rs = avg_gain / avg_loss\n",
|
|||
|
|
" df['RSI'] = 100 - (100 / (1 + rs))\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算MACD\n",
|
|||
|
|
" ema12 = df['close'].ewm(span=12, adjust=False).mean()\n",
|
|||
|
|
" ema26 = df['close'].ewm(span=26, adjust=False).mean()\n",
|
|||
|
|
" df['MACD'] = ema12 - ema26\n",
|
|||
|
|
" df['Signal_line'] = df['MACD'].ewm(span=9, adjust=False).mean()\n",
|
|||
|
|
" df['MACD_hist'] = df['MACD'] - df['Signal_line']\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 4. 情绪因子1:市场上涨比例(Up Ratio)\n",
|
|||
|
|
" df['up_ratio'] = df['daily_return'].apply(lambda x: 1 if x > 0 else 0)\n",
|
|||
|
|
" df['up_ratio_20d'] = df['up_ratio'].rolling(window=20).mean() # 过去20天上涨比例\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 5. 情绪因子2:成交量变化率(Volume Change Rate)\n",
|
|||
|
|
" df['volume_mean'] = df['vol'].rolling(window=20).mean() # 过去20天的平均成交量\n",
|
|||
|
|
" df['volume_change_rate'] = (df['vol'] - df['volume_mean']) / df['volume_mean'] * 100 # 成交量变化率\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 6. 情绪因子3:波动率(Volatility)\n",
|
|||
|
|
" df['volatility'] = df['daily_return'].rolling(window=20).std() # 过去20天的日收益率标准差\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 7. 情绪因子4:成交额变化率(Amount Change Rate)\n",
|
|||
|
|
" df['amount_mean'] = df['amount'].rolling(window=20).mean() # 过去20天的平均成交额\n",
|
|||
|
|
" df['amount_change_rate'] = (df['amount'] - df['amount_mean']) / df['amount_mean'] * 100 # 成交额变化率\n",
|
|||
|
|
"\n",
|
|||
|
|
" return df\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def generate_index_indicators(h5_filename):\n",
|
|||
|
|
" df = pd.read_hdf(h5_filename, key='index_data')\n",
|
|||
|
|
" df['trade_date'] = pd.to_datetime(df['trade_date'], format='%Y%m%d')\n",
|
|||
|
|
" df = df.sort_values('trade_date')\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算每个ts_code的相关指标\n",
|
|||
|
|
" df_indicators = []\n",
|
|||
|
|
" for ts_code in df['ts_code'].unique():\n",
|
|||
|
|
" df_index = df[df['ts_code'] == ts_code].copy()\n",
|
|||
|
|
" df_index = calculate_indicators(df_index)\n",
|
|||
|
|
" df_indicators.append(df_index)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 合并所有指数的结果\n",
|
|||
|
|
" df_all_indicators = pd.concat(df_indicators, ignore_index=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 保留trade_date列,并将同一天的数据按ts_code合并成一行\n",
|
|||
|
|
" df_final = df_all_indicators.pivot_table(\n",
|
|||
|
|
" index='trade_date',\n",
|
|||
|
|
" columns='ts_code',\n",
|
|||
|
|
" values=['daily_return', 'RSI', 'MACD', 'Signal_line',\n",
|
|||
|
|
" 'MACD_hist', 'up_ratio_20d', 'volume_change_rate', 'volatility',\n",
|
|||
|
|
" 'amount_change_rate', 'amount_mean'],\n",
|
|||
|
|
" aggfunc='last'\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" df_final.columns = [f\"{col[1]}_{col[0]}\" for col in df_final.columns]\n",
|
|||
|
|
" df_final = df_final.reset_index()\n",
|
|||
|
|
"\n",
|
|||
|
|
" return df_final\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"# 使用函数\n",
|
|||
|
|
"h5_filename = '../../data/index_data.h5'\n",
|
|||
|
|
"index_data = generate_index_indicators(h5_filename)\n",
|
|||
|
|
"index_data = index_data.dropna()\n"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 5,
|
|||
|
|
"id": "a735bc02ceb4d872",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T12:47:10.821169Z",
|
|||
|
|
"start_time": "2025-04-03T12:47:10.751831Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [],
|
|||
|
|
"source": [
|
|||
|
|
"\n",
|
|||
|
|
"import talib\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def get_rolling_factor(df):\n",
|
|||
|
|
" old_columns = df.columns.tolist()[:]\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 按股票和日期排序(如果尚未排序)\n",
|
|||
|
|
" df = df.sort_values(by=['ts_code', 'trade_date'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" grouped = df.groupby('ts_code', group_keys=False)\n",
|
|||
|
|
"\n",
|
|||
|
|
" epsilon = 1e-8\n",
|
|||
|
|
" df['lg_elg_net_buy_vol'] = df['buy_lg_vol'] + df['buy_elg_vol'] - df['sell_lg_vol'] - df['sell_elg_vol']\n",
|
|||
|
|
" # 检查 'volume' 列是否存在且有效\n",
|
|||
|
|
" df['flow_lg_elg_intensity'] = df['lg_elg_net_buy_vol'] / (df['vol'] + epsilon)\n",
|
|||
|
|
" \n",
|
|||
|
|
" \n",
|
|||
|
|
" # 2. 散户与主力背离度 (Retail vs Institutional Divergence)\n",
|
|||
|
|
" # 衡量小单净流入与(大单+超大单)净流入的差异或比率\n",
|
|||
|
|
" df['sm_net_buy_vol'] = df['buy_sm_vol'] - df['sell_sm_vol']\n",
|
|||
|
|
" df['flow_divergence_diff'] = df['sm_net_buy_vol'] - df['lg_elg_net_buy_vol']\n",
|
|||
|
|
" # 比率形式可能更稳定\n",
|
|||
|
|
" df['flow_divergence_ratio'] = df['sm_net_buy_vol'] / (df['lg_elg_net_buy_vol'] + np.sign(df['lg_elg_net_buy_vol']) * epsilon + epsilon) # 复杂处理避免0/0\n",
|
|||
|
|
" \n",
|
|||
|
|
" # 3. 资金流结构变动 (Flow Structure Change - Relative Strength of Large Flow)\n",
|
|||
|
|
" # 大单+超大单买入额占总买入额的比例的变化\n",
|
|||
|
|
" df['total_buy_vol'] = df['buy_sm_vol'] + df['buy_lg_vol'] + df['buy_elg_vol']\n",
|
|||
|
|
" df['lg_elg_buy_prop'] = (df['buy_lg_vol'] + df['buy_elg_vol']) / (df['total_buy_vol'] + epsilon)\n",
|
|||
|
|
" df['flow_struct_buy_change'] = grouped['lg_elg_buy_prop'].diff(1) # 1日变化\n",
|
|||
|
|
" \n",
|
|||
|
|
" # 4. 资金流加速度 (Flow Acceleration)\n",
|
|||
|
|
" # 净主力资金流的变化率(二阶导)\n",
|
|||
|
|
" df['lg_elg_net_buy_vol_change'] = grouped['lg_elg_net_buy_vol'].diff(1)\n",
|
|||
|
|
" df['flow_lg_elg_accel'] = grouped['lg_elg_net_buy_vol_change'].diff(1)\n",
|
|||
|
|
" \n",
|
|||
|
|
" # # 5. 极端资金流事件 (Categorical: Extreme Flow Event)\n",
|
|||
|
|
" # # 定义主力资金流强度是否处于其历史极端水平(例如,过去N天的90分位数以上或10分位数以下)\n",
|
|||
|
|
" # rolling_window = 20 # 可调整窗口期\n",
|
|||
|
|
" \n",
|
|||
|
|
" # # Step 1: Calculate the rolling quantiles separately\n",
|
|||
|
|
" # rolling_high = grouped['flow_lg_elg_intensity'].rolling(rolling_window, min_periods=1).quantile(0.9) # min_periods=1 保证窗口未满时也有输出\n",
|
|||
|
|
" # rolling_low = grouped['flow_lg_elg_intensity'].rolling(rolling_window, min_periods=1).quantile(0.1)\n",
|
|||
|
|
" \n",
|
|||
|
|
" # # Step 2: Assign the results to the DataFrame\n",
|
|||
|
|
" # # 确保 df 和 rolling_high/low 的索引是一致的\n",
|
|||
|
|
" # # 如果 df 的索引在此期间没有被修改过,这通常是安全的\n",
|
|||
|
|
" # df['flow_lg_elg_intensity_rolling_high'] = rolling_high\n",
|
|||
|
|
" # df['flow_lg_elg_intensity_rolling_low'] = rolling_low\n",
|
|||
|
|
" \n",
|
|||
|
|
" # # Step 3: Continue with the logic using the new columns\n",
|
|||
|
|
" # conditions_flow = [\n",
|
|||
|
|
" # df['flow_lg_elg_intensity'] > df['flow_lg_elg_intensity_rolling_high'],\n",
|
|||
|
|
" # df['flow_lg_elg_intensity'] < df['flow_lg_elg_intensity_rolling_low']\n",
|
|||
|
|
" # ]\n",
|
|||
|
|
" # choices_flow = [1, -1] # 1: 极端流入, -1: 极端流出\n",
|
|||
|
|
" # df['cat_extreme_flow'] = np.select(conditions_flow, choices_flow, default=0)\n",
|
|||
|
|
" \n",
|
|||
|
|
" # --- 筹码分布因子 ---\n",
|
|||
|
|
" \n",
|
|||
|
|
" # 6. 筹码集中度 (Chip Concentration)\n",
|
|||
|
|
" # 衡量筹码分布的紧密程度,例如 95% 与 5% 成本价的差距,相对于当前价格进行标准化\n",
|
|||
|
|
" # 检查 'close' 列是否存在且有效\n",
|
|||
|
|
" df['chip_concentration_range'] = (df['cost_95pct'] - df['cost_5pct']) / (df['close'] + epsilon)\n",
|
|||
|
|
" \n",
|
|||
|
|
" \n",
|
|||
|
|
" # 7. 筹码分布偏度 (Chip Distribution Skewness Proxy)\n",
|
|||
|
|
" # 比较中位数成本 (cost_50pct) 和加权平均成本 (weight_avg)\n",
|
|||
|
|
" # weight_avg > cost_50pct 暗示高成本区有较多筹码(右偏)\n",
|
|||
|
|
" df['chip_skewness'] = (df['weight_avg'] - df['cost_50pct']) / (df['cost_50pct'] + epsilon)\n",
|
|||
|
|
" \n",
|
|||
|
|
" # 8. 浮筹比例 (Floating Chips Proxy)\n",
|
|||
|
|
" # 衡量短期内(例如15%成本线以下)的筹码比例与总获利盘比例的关系\n",
|
|||
|
|
" # winner_rate 高但 cost_15pct 接近当前价,可能意味着大部分获利盘成本不高,易浮动\n",
|
|||
|
|
" # 这里简化为:获利盘比例 与 (当前价-15%成本价)/当前价 的乘积\n",
|
|||
|
|
" price_dist_cost15 = (df['close'] - df['cost_15pct']) / (df['close'] + epsilon)\n",
|
|||
|
|
" df['floating_chip_proxy'] = df['winner_rate'] * np.maximum(0, price_dist_cost15) # 只考虑价格高于15%成本线的情况\n",
|
|||
|
|
" \n",
|
|||
|
|
" # 9. 成本支撑强度变化 (Cost Support Strength Change)\n",
|
|||
|
|
" # 观察低位筹码成本(如 5% 或 15% 分位点)的变化率,看支撑位是上移还是下移\n",
|
|||
|
|
" df['cost_support_15pct_change'] = grouped['cost_15pct'].pct_change(1) * 100 # 百分比变化\n",
|
|||
|
|
" \n",
|
|||
|
|
" # 10. 获利盘压力/支撑区 (Categorical: Winner Rate Zone & Price Position)\n",
|
|||
|
|
" # 结合获利盘比例和当前价格相对于筹码成本的位置\n",
|
|||
|
|
" # 例如: 价格在 85% 成本线之上 & 获利盘 > 0.8 -> 高位派发风险区?\n",
|
|||
|
|
" # 价格在 15% 成本线之下 & 获利盘 < 0.2 -> 低位吸筹潜力区?\n",
|
|||
|
|
" conditions_winner = [\n",
|
|||
|
|
" (df['close'] > df['cost_85pct']) & (df['winner_rate'] > 0.8), # 高位 & 高获利盘\n",
|
|||
|
|
" (df['close'] < df['cost_15pct']) & (df['winner_rate'] < 0.2), # 低位 & 低获利盘\n",
|
|||
|
|
" (df['close'] > df['cost_50pct']) & (df['winner_rate'] > 0.5), # 中高位 & 多数获利\n",
|
|||
|
|
" (df['close'] < df['cost_50pct']) & (df['winner_rate'] < 0.5), # 中低位 & 多数亏损\n",
|
|||
|
|
" ]\n",
|
|||
|
|
" choices_winner = [1, 2, 3, 4] # 1:高风险区, 2:低潜力区, 3:中上获利区, 4:中下亏损区\n",
|
|||
|
|
" df['cat_winner_price_zone'] = np.select(conditions_winner, choices_winner, default=0) # 0: 其他\n",
|
|||
|
|
" \n",
|
|||
|
|
" \n",
|
|||
|
|
" # --- 结合因子 ---\n",
|
|||
|
|
" \n",
|
|||
|
|
" # 11. 主力行为与筹码结构一致性 (Flow-Chip Consistency)\n",
|
|||
|
|
" # 例如:主力净买入发生在价格接近下方筹码密集区(如 cost_15pct 到 cost_50pct)时\n",
|
|||
|
|
" price_near_low_support = (df['close'] > df['cost_15pct']) & (df['close'] < df['cost_50pct'])\n",
|
|||
|
|
" df['flow_chip_consistency'] = df['lg_elg_net_buy_vol'] * price_near_low_support.astype(int)\n",
|
|||
|
|
" # 可以进一步标准化或做成 categorical\n",
|
|||
|
|
" \n",
|
|||
|
|
" # 12. 获利了结压力/承接盘强度 (Profit-Taking Pressure vs Absorption)\n",
|
|||
|
|
" # 在高获利盘(winner_rate > 0.7)的情况下,观察主力资金是净流出(了结)还是净流入(高位换手/承接)\n",
|
|||
|
|
" high_winner_rate_flag = (df['winner_rate'] > 0.7).astype(int)\n",
|
|||
|
|
" df['profit_taking_vs_absorb'] = df['lg_elg_net_buy_vol'] * high_winner_rate_flag\n",
|
|||
|
|
" # 正值表示高获利盘下主力仍在买入(承接),负值表示主力在卖出(了结)\n",
|
|||
|
|
" \n",
|
|||
|
|
" \n",
|
|||
|
|
" # 清理临时列和可能产生的 NaN (可选,根据需要处理)\n",
|
|||
|
|
" cols_to_drop = ['lg_elg_net_buy_vol', 'sm_net_buy_vol', 'total_buy_vol', 'lg_elg_buy_prop',\n",
|
|||
|
|
" 'lg_elg_net_buy_vol_change', 'flow_lg_elg_intensity_rolling_high',\n",
|
|||
|
|
" 'flow_lg_elg_intensity_rolling_low']\n",
|
|||
|
|
" # df = df.drop(columns=cols_to_drop)\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
" window = 20\n",
|
|||
|
|
" df['_is_positive'] = (df['pct_chg'] > 0).astype(int)\n",
|
|||
|
|
" df['_is_negative'] = (df['pct_chg'] < 0).astype(int)\n",
|
|||
|
|
" df['cat_is_positive'] = (df['pct_chg'] > 0).astype(int)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 分离正负收益率 (用于计算各自的均值和平方均值)\n",
|
|||
|
|
" # 注意:这里我们保留原始收益率用于计算,而不是 clip 到 0\n",
|
|||
|
|
" df['_pos_returns'] = df['pct_chg'].where(df['pct_chg'] > 0, 0) # 非正设为0,便于求和\n",
|
|||
|
|
" df['_neg_returns'] = df['pct_chg'].where(df['pct_chg'] < 0, 0) # 非负设为0,便于求和\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算收益率的平方 (用于计算 E[X^2])\n",
|
|||
|
|
" df['_pos_returns_sq'] = np.square(df['_pos_returns'])\n",
|
|||
|
|
" df['_neg_returns_sq'] = np.square(df['_neg_returns']) # 平方后负数变正\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 4. 计算滚动统计量 (使用内置函数,速度较快)\n",
|
|||
|
|
" # 计算正收益日的统计量\n",
|
|||
|
|
" rolling_pos_count = grouped['_is_positive'].rolling(window, min_periods=max(1, window // 2)).sum()\n",
|
|||
|
|
" rolling_pos_sum = grouped['_pos_returns'].rolling(window, min_periods=max(1, window // 2)).sum()\n",
|
|||
|
|
" rolling_pos_sum_sq = grouped['_pos_returns_sq'].rolling(window, min_periods=max(1, window // 2)).sum()\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算负收益日的统计量\n",
|
|||
|
|
" rolling_neg_count = grouped['_is_negative'].rolling(window, min_periods=max(1, window // 2)).sum()\n",
|
|||
|
|
" rolling_neg_sum = grouped['_neg_returns'].rolling(window, min_periods=max(1, window // 2)).sum()\n",
|
|||
|
|
" rolling_neg_sum_sq = grouped['_neg_returns_sq'].rolling(window, min_periods=max(1, window // 2)).sum()\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 5. 计算方差和标准差\n",
|
|||
|
|
" pos_mean_sq = rolling_pos_sum_sq / rolling_pos_count\n",
|
|||
|
|
" pos_mean = rolling_pos_sum / rolling_pos_count\n",
|
|||
|
|
" pos_var = pos_mean_sq - np.square(pos_mean)\n",
|
|||
|
|
" pos_var = pos_var.where(rolling_pos_count >= 2, np.nan).clip(lower=0)\n",
|
|||
|
|
" upside_vol = np.sqrt(pos_var)\n",
|
|||
|
|
"\n",
|
|||
|
|
" neg_mean_sq = rolling_neg_sum_sq / rolling_neg_count\n",
|
|||
|
|
" neg_mean = rolling_neg_sum / rolling_neg_count # 注意 neg_mean 是负数\n",
|
|||
|
|
" neg_var = neg_mean_sq - np.square(neg_mean)\n",
|
|||
|
|
" neg_var = neg_var.where(rolling_neg_count >= 2, np.nan).clip(lower=0)\n",
|
|||
|
|
" downside_vol = np.sqrt(neg_var)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # rolling 操作后结果带有 MultiIndex,需要去除股票代码层级以便合并\n",
|
|||
|
|
" df['upside_vol'] = upside_vol.reset_index(level=0, drop=True)\n",
|
|||
|
|
" df['downside_vol'] = downside_vol.reset_index(level=0, drop=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['vol_ratio'] = df['upside_vol'] / df['downside_vol']\n",
|
|||
|
|
" df['vol_ratio'] = df['vol_ratio'].replace([np.inf, -np.inf], np.nan).fillna(0) # 或 fillna(np.nan)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['return_skew'] = grouped['pct_chg'].rolling(window=5).skew().reset_index(0, drop=True)\n",
|
|||
|
|
" df['return_kurtosis'] = grouped['pct_chg'].rolling(window=5).kurt().reset_index(0, drop=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 因子 1:短期成交量变化率\n",
|
|||
|
|
" df['volume_change_rate'] = (\n",
|
|||
|
|
" grouped['vol'].rolling(window=2).mean() /\n",
|
|||
|
|
" grouped['vol'].rolling(window=10).mean() - 1\n",
|
|||
|
|
" ).reset_index(level=0, drop=True) # 确保索引对齐\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 因子 2:成交量突破信号\n",
|
|||
|
|
" max_volume = grouped['vol'].rolling(window=5).max().reset_index(level=0, drop=True) # 确保索引对齐\n",
|
|||
|
|
" df['cat_volume_breakout'] = (df['vol'] > max_volume)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 因子 3:换手率均线偏离度\n",
|
|||
|
|
" mean_turnover = grouped['turnover_rate'].rolling(window=3).mean().reset_index(level=0, drop=True)\n",
|
|||
|
|
" std_turnover = grouped['turnover_rate'].rolling(window=3).std().reset_index(level=0, drop=True)\n",
|
|||
|
|
" df['turnover_deviation'] = (df['turnover_rate'] - mean_turnover) / std_turnover\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 因子 4:换手率激增信号\n",
|
|||
|
|
" df['cat_turnover_spike'] = (df['turnover_rate'] > mean_turnover + 2 * std_turnover)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 因子 5:量比均值\n",
|
|||
|
|
" df['avg_volume_ratio'] = grouped['volume_ratio'].rolling(window=3).mean().reset_index(level=0, drop=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 因子 6:量比突破信号\n",
|
|||
|
|
" max_volume_ratio = grouped['volume_ratio'].rolling(window=5).max().reset_index(level=0, drop=True)\n",
|
|||
|
|
" df['cat_volume_ratio_breakout'] = (df['volume_ratio'] > max_volume_ratio)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['vol_spike'] = grouped.apply(\n",
|
|||
|
|
" lambda x: pd.Series(x['vol'].rolling(20).mean(), index=x.index)\n",
|
|||
|
|
" )\n",
|
|||
|
|
" df['vol_std_5'] = grouped['vol'].pct_change().rolling(window=5).std()\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算 ATR\n",
|
|||
|
|
" df['atr_14'] = grouped.apply(\n",
|
|||
|
|
" lambda x: pd.Series(talib.ATR(x['high'].values, x['low'].values, x['close'].values, timeperiod=14),\n",
|
|||
|
|
" index=x.index)\n",
|
|||
|
|
" )\n",
|
|||
|
|
" df['atr_6'] = grouped.apply(\n",
|
|||
|
|
" lambda x: pd.Series(talib.ATR(x['high'].values, x['low'].values, x['close'].values, timeperiod=6),\n",
|
|||
|
|
" index=x.index)\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算 OBV 及其均线\n",
|
|||
|
|
" df['obv'] = grouped.apply(\n",
|
|||
|
|
" lambda x: pd.Series(talib.OBV(x['close'].values, x['vol'].values), index=x.index)\n",
|
|||
|
|
" )\n",
|
|||
|
|
" print(df.columns)\n",
|
|||
|
|
" df['maobv_6'] = grouped.apply(\n",
|
|||
|
|
" lambda x: pd.Series(talib.SMA(x['obv'].values, timeperiod=6), index=x.index)\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['rsi_3'] = grouped.apply(\n",
|
|||
|
|
" lambda x: pd.Series(talib.RSI(x['close'].values, timeperiod=3), index=x.index)\n",
|
|||
|
|
" )\n",
|
|||
|
|
" # df['rsi_6'] = grouped.apply(\n",
|
|||
|
|
" # lambda x: pd.Series(talib.RSI(x['close'].values, timeperiod=6), index=x.index)\n",
|
|||
|
|
" # )\n",
|
|||
|
|
" # df['rsi_9'] = grouped.apply(\n",
|
|||
|
|
" # lambda x: pd.Series(talib.RSI(x['close'].values, timeperiod=9), index=x.index)\n",
|
|||
|
|
" # )\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算 return_10 和 return_20\n",
|
|||
|
|
" df['return_5'] = grouped['close'].apply(lambda x: x / x.shift(5) - 1)\n",
|
|||
|
|
" # df['return_10'] = grouped['close'].apply(lambda x: x / x.shift(10) - 1)\n",
|
|||
|
|
" df['return_20'] = grouped['close'].apply(lambda x: x / x.shift(20) - 1)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # df['avg_close_5'] = grouped['close'].apply(lambda x: x.rolling(window=5).mean() / x)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算标准差指标\n",
|
|||
|
|
" df['std_return_5'] = grouped['close'].apply(lambda x: x.pct_change().rolling(window=5).std())\n",
|
|||
|
|
" # df['std_return_15'] = grouped['close'].apply(lambda x: x.pct_change().rolling(window=15).std())\n",
|
|||
|
|
" # df['std_return_25'] = grouped['close'].apply(lambda x: x.pct_change().rolling(window=25).std())\n",
|
|||
|
|
" df['std_return_90'] = grouped['close'].apply(lambda x: x.pct_change().rolling(window=90).std())\n",
|
|||
|
|
" df['std_return_90_2'] = grouped['close'].apply(lambda x: x.shift(10).pct_change().rolling(window=90).std())\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算 EMA 指标\n",
|
|||
|
|
" df['_ema_5'] = grouped['close'].apply(\n",
|
|||
|
|
" lambda x: pd.Series(talib.EMA(x.values, timeperiod=5), index=x.index)\n",
|
|||
|
|
" )\n",
|
|||
|
|
" df['_ema_13'] = grouped['close'].apply(\n",
|
|||
|
|
" lambda x: pd.Series(talib.EMA(x.values, timeperiod=13), index=x.index)\n",
|
|||
|
|
" )\n",
|
|||
|
|
" df['_ema_20'] = grouped['close'].apply(\n",
|
|||
|
|
" lambda x: pd.Series(talib.EMA(x.values, timeperiod=20), index=x.index)\n",
|
|||
|
|
" )\n",
|
|||
|
|
" df['_ema_60'] = grouped['close'].apply(\n",
|
|||
|
|
" lambda x: pd.Series(talib.EMA(x.values, timeperiod=60), index=x.index)\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算 act_factor1, act_factor2, act_factor3, act_factor4\n",
|
|||
|
|
" df['act_factor1'] = grouped['_ema_5'].apply(\n",
|
|||
|
|
" lambda x: np.arctan((x / x.shift(1) - 1) * 100) * 57.3 / 50\n",
|
|||
|
|
" )\n",
|
|||
|
|
" df['act_factor2'] = grouped['_ema_13'].apply(\n",
|
|||
|
|
" lambda x: np.arctan((x / x.shift(1) - 1) * 100) * 57.3 / 40\n",
|
|||
|
|
" )\n",
|
|||
|
|
" df['act_factor3'] = grouped['_ema_20'].apply(\n",
|
|||
|
|
" lambda x: np.arctan((x / x.shift(1) - 1) * 100) * 57.3 / 21\n",
|
|||
|
|
" )\n",
|
|||
|
|
" df['act_factor4'] = grouped['_ema_60'].apply(\n",
|
|||
|
|
" lambda x: np.arctan((x / x.shift(1) - 1) * 100) * 57.3 / 10\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 根据 trade_date 截面计算排名\n",
|
|||
|
|
" df['rank_act_factor1'] = df.groupby('trade_date', group_keys=False)['act_factor1'].rank(ascending=False, pct=True)\n",
|
|||
|
|
" df['rank_act_factor2'] = df.groupby('trade_date', group_keys=False)['act_factor2'].rank(ascending=False, pct=True)\n",
|
|||
|
|
" df['rank_act_factor3'] = df.groupby('trade_date', group_keys=False)['act_factor3'].rank(ascending=False, pct=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['log(circ_mv)'] = np.log(df['circ_mv'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" window_high_volume = 5\n",
|
|||
|
|
" window_close_stddev = 20\n",
|
|||
|
|
" period_delta = 5\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算每只股票的滚动协方差\n",
|
|||
|
|
" def calculate_rolling_cov(group):\n",
|
|||
|
|
" return group['high'].rolling(window_high_volume).cov(group['vol'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['cov'] = grouped.apply(calculate_rolling_cov)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算每只股票的协方差差分\n",
|
|||
|
|
" def calculate_delta_cov(group):\n",
|
|||
|
|
" return group['cov'].diff(period_delta)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['delta_cov'] = grouped.apply(calculate_delta_cov)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算每只股票的滚动标准差\n",
|
|||
|
|
" def calculate_stddev_close(group):\n",
|
|||
|
|
" return group['close'].rolling(window_close_stddev).std()\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['_stddev_close'] = grouped.apply(calculate_stddev_close)\n",
|
|||
|
|
" df['_rank_stddev'] = df.groupby('trade_date')['_stddev_close'].rank(pct=True)\n",
|
|||
|
|
" df['alpha_22_improved'] = -1 * df['delta_cov'] * df['_rank_stddev']\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['alpha_003'] = np.where(df['high'] != df['low'],\n",
|
|||
|
|
" (df['close'] - df['open']) / (df['high'] - df['low']),\n",
|
|||
|
|
" 0)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['alpha_007'] = grouped.apply(lambda x: x['close'].rolling(5).corr(x['vol']))\n",
|
|||
|
|
" df['alpha_007'] = df.groupby('trade_date', group_keys=False)['alpha_007'].rank(ascending=True, pct=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['alpha_013'] = grouped['close'].transform(lambda x: x.rolling(5).sum() - x.rolling(20).sum())\n",
|
|||
|
|
" df['alpha_013'] = df.groupby('trade_date', group_keys=False)['alpha_013'].rank(ascending=True, pct=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['cat_up_limit'] = (df['close'] == df['up_limit']) # 是否涨停(1表示涨停,0表示未涨停)\n",
|
|||
|
|
" df['cat_down_limit'] = (df['close'] == df['down_limit']) # 是否跌停(1表示跌停,0表示未跌停)\n",
|
|||
|
|
" df['up_limit_count_10d'] = grouped['cat_up_limit'].rolling(window=10, min_periods=1).sum().reset_index(level=0,\n",
|
|||
|
|
" drop=True)\n",
|
|||
|
|
" df['down_limit_count_10d'] = grouped['cat_down_limit'].rolling(window=10, min_periods=1).sum().reset_index(level=0,\n",
|
|||
|
|
" drop=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 3. 最近连续涨跌停天数\n",
|
|||
|
|
" def calculate_consecutive_limits(series):\n",
|
|||
|
|
" \"\"\"\n",
|
|||
|
|
" 计算连续涨停/跌停天数。\n",
|
|||
|
|
" \"\"\"\n",
|
|||
|
|
" consecutive_up = series * (series.groupby((series != series.shift()).cumsum()).cumcount() + 1)\n",
|
|||
|
|
" consecutive_down = series * (series.groupby((series != series.shift()).cumsum()).cumcount() + 1)\n",
|
|||
|
|
" return consecutive_up, consecutive_down\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 连续涨停天数\n",
|
|||
|
|
" df['consecutive_up_limit'] = grouped['cat_up_limit'].apply(\n",
|
|||
|
|
" lambda x: calculate_consecutive_limits(x)[0]\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['vol_break'] = np.where((df['close'] > df['cost_85pct']) & (df['volume_ratio'] > 2), 1, 0)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['weight_roc5'] = grouped['weight_avg'].apply(lambda x: x.pct_change(5))\n",
|
|||
|
|
"\n",
|
|||
|
|
" def rolling_corr(group):\n",
|
|||
|
|
" roc_close = group['close'].pct_change()\n",
|
|||
|
|
" roc_weight = group['weight_avg'].pct_change()\n",
|
|||
|
|
" return roc_close.rolling(10).corr(roc_weight)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['price_cost_divergence'] = grouped.apply(rolling_corr)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['smallcap_concentration'] = (1 / df['log(circ_mv)']) * (df['cost_85pct'] - df['cost_15pct'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 16. 筹码稳定性指数 (20日波动率)\n",
|
|||
|
|
" df['weight_std20'] = grouped['weight_avg'].apply(lambda x: x.rolling(20).std())\n",
|
|||
|
|
" df['cost_stability'] = df['weight_std20'] / grouped['weight_avg'].transform(lambda x: x.rolling(20).mean())\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 17. 成本区间突破标记\n",
|
|||
|
|
" df['high_cost_break_days'] = grouped.apply(lambda g: g['close'].gt(g['cost_95pct']).rolling(5).sum())\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 20. 筹码-流动性风险\n",
|
|||
|
|
" df['liquidity_risk'] = (df['cost_95pct'] - df['cost_5pct']) * (\n",
|
|||
|
|
" 1 / grouped['vol'].transform(lambda x: x.rolling(10).mean()))\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 7. 市值波动率因子 (使用 grouped)\n",
|
|||
|
|
" df['turnover_std'] = grouped['turnover_rate'].transform(lambda x: x.rolling(window=20).std())\n",
|
|||
|
|
" df['mv_volatility'] = grouped.apply(lambda x: x['turnover_std'] / x['log(circ_mv)'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 8. 市值成长性因子\n",
|
|||
|
|
" df['volume_growth'] = grouped['vol'].pct_change(periods=20)\n",
|
|||
|
|
" df['mv_growth'] = df['volume_growth'] / df['log(circ_mv)']\n",
|
|||
|
|
"\n",
|
|||
|
|
" # AR 指标\n",
|
|||
|
|
" df[\"ar\"] = grouped.apply(\n",
|
|||
|
|
" lambda x: (x[\"high\"].div(x[\"open\"]).rolling(3).sum()) / (x[\"open\"].div(x[\"low\"]).rolling(3).sum()) * 100)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # BR 指标\n",
|
|||
|
|
" df[\"pre_close\"] = grouped[\"close\"].shift(1)\n",
|
|||
|
|
" df[\"br_up\"] = (df[\"high\"] - df[\"pre_close\"]).clip(lower=0)\n",
|
|||
|
|
" df[\"br_down\"] = (df[\"pre_close\"] - df[\"low\"]).clip(lower=0)\n",
|
|||
|
|
" df[\"br\"] = grouped.apply(lambda x: (x[\"br_up\"].rolling(3).sum()) / (x[\"br_down\"].rolling(3).sum()) * 100)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # ARBR\n",
|
|||
|
|
" df['arbr'] = df['ar'] - df['br']\n",
|
|||
|
|
" df.drop(columns=[\"pre_close\", \"br_up\", \"br_down\", 'ar', 'br'], inplace=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df.drop(columns=['weight_std20'], inplace=True, errors='ignore')\n",
|
|||
|
|
" df.drop(\n",
|
|||
|
|
" columns=['_is_positive', '_is_negative', '_pos_returns', '_neg_returns', '_pos_returns_sq', '_neg_returns_sq'],\n",
|
|||
|
|
" inplace=True, errors='ignore')\n",
|
|||
|
|
" new_columns = [col for col in df.columns.tolist()[:] if col not in old_columns]\n",
|
|||
|
|
"\n",
|
|||
|
|
" return df, new_columns\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def get_simple_factor(df):\n",
|
|||
|
|
" old_columns = df.columns.tolist()[:]\n",
|
|||
|
|
" df = df.sort_values(by=['ts_code', 'trade_date'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" alpha = 0.5\n",
|
|||
|
|
" df['momentum_factor'] = df['volume_change_rate'] + alpha * df['turnover_deviation']\n",
|
|||
|
|
" df['resonance_factor'] = df['volume_ratio'] * df['pct_chg']\n",
|
|||
|
|
" df['log_close'] = np.log(df['close'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['cat_vol_spike'] = df['vol'] > 2 * df['vol_spike']\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['up'] = (df['high'] - df[['close', 'open']].max(axis=1)) / df['close']\n",
|
|||
|
|
" df['down'] = (df[['close', 'open']].min(axis=1) - df['low']) / df['close']\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['obv-maobv_6'] = df['obv'] - df['maobv_6']\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算比值指标\n",
|
|||
|
|
" df['std_return_5 / std_return_90'] = df['std_return_5'] / df['std_return_90']\n",
|
|||
|
|
" # df['std_return_5 / std_return_25'] = df['std_return_5'] / df['std_return_25']\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算标准差差值\n",
|
|||
|
|
" df['std_return_90 - std_return_90_2'] = df['std_return_90'] - df['std_return_90_2']\n",
|
|||
|
|
"\n",
|
|||
|
|
" # df['cat_af1'] = df['act_factor1'] > 0\n",
|
|||
|
|
" df['cat_af2'] = df['act_factor2'] > df['act_factor1']\n",
|
|||
|
|
" df['cat_af3'] = df['act_factor3'] > df['act_factor2']\n",
|
|||
|
|
" df['cat_af4'] = df['act_factor4'] > df['act_factor3']\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算 act_factor5 和 act_factor6\n",
|
|||
|
|
" df['act_factor5'] = df['act_factor1'] + df['act_factor2'] + df['act_factor3'] + df['act_factor4']\n",
|
|||
|
|
" df['act_factor6'] = (df['act_factor1'] - df['act_factor2']) / np.sqrt(\n",
|
|||
|
|
" df['act_factor1'] ** 2 + df['act_factor2'] ** 2)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['active_buy_volume_large'] = df['buy_lg_vol'] / df['net_mf_vol']\n",
|
|||
|
|
" df['active_buy_volume_big'] = df['buy_elg_vol'] / df['net_mf_vol']\n",
|
|||
|
|
" df['active_buy_volume_small'] = df['buy_sm_vol'] / df['net_mf_vol']\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['buy_lg_vol_minus_sell_lg_vol'] = (df['buy_lg_vol'] - df['sell_lg_vol']) / df['net_mf_vol']\n",
|
|||
|
|
" df['buy_elg_vol_minus_sell_elg_vol'] = (df['buy_elg_vol'] - df['sell_elg_vol']) / df['net_mf_vol']\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['log(circ_mv)'] = np.log(df['circ_mv'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['ctrl_strength'] = (df['cost_85pct'] - df['cost_15pct']) / (df['his_high'] - df['his_low'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['low_cost_dev'] = (df['close'] - df['cost_5pct']) / (df['cost_50pct'] - df['cost_5pct'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['asymmetry'] = (df['cost_95pct'] - df['cost_50pct']) / (df['cost_50pct'] - df['cost_5pct'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['lock_factor'] = df['turnover_rate'] * (\n",
|
|||
|
|
" 1 - (df['cost_95pct'] - df['cost_5pct']) / (df['his_high'] - df['his_low']))\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['cat_vol_break'] = (df['close'] > df['cost_85pct']) & (df['volume_ratio'] > 2)\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['cost_atr_adj'] = (df['cost_95pct'] - df['cost_5pct']) / df['atr_14']\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 12. 小盘股筹码集中度\n",
|
|||
|
|
" df['smallcap_concentration'] = (1 / df['log(circ_mv)']) * (df['cost_85pct'] - df['cost_15pct'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['cat_golden_resonance'] = ((df['close'] > df['weight_avg']) &\n",
|
|||
|
|
" (df['volume_ratio'] > 1.5) &\n",
|
|||
|
|
" (df['winner_rate'] > 0.7))\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['mv_turnover_ratio'] = df['turnover_rate'] / df['log(circ_mv)']\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['mv_adjusted_volume'] = df['vol'] / df['log(circ_mv)']\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['mv_weighted_turnover'] = df['turnover_rate'] * (1 / df['log(circ_mv)'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['nonlinear_mv_volume'] = df['vol'] / df['log(circ_mv)']\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['mv_volume_ratio'] = df['volume_ratio'] / df['log(circ_mv)']\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['mv_momentum'] = df['turnover_rate'] * df['volume_ratio'] / df['log(circ_mv)']\n",
|
|||
|
|
"\n",
|
|||
|
|
" drop_columns = [col for col in df.columns if col.startswith('_')]\n",
|
|||
|
|
" df.drop(columns=drop_columns, inplace=True, errors='ignore')\n",
|
|||
|
|
"\n",
|
|||
|
|
" new_columns = [col for col in df.columns.tolist()[:] if col not in old_columns]\n",
|
|||
|
|
" return df, new_columns\n"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 6,
|
|||
|
|
"id": "53f86ddc0677a6d7",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T12:47:15.944254Z",
|
|||
|
|
"start_time": "2025-04-03T12:47:10.826179Z"
|
|||
|
|
},
|
|||
|
|
"jupyter": {
|
|||
|
|
"source_hidden": true
|
|||
|
|
},
|
|||
|
|
"scrolled": true
|
|||
|
|
},
|
|||
|
|
"outputs": [],
|
|||
|
|
"source": [
|
|||
|
|
"from main.utils.factor import get_act_factor\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def read_industry_data(h5_filename):\n",
|
|||
|
|
" # 读取 H5 文件中所有的行业数据\n",
|
|||
|
|
" industry_data = pd.read_hdf(h5_filename, key='sw_daily', columns=[\n",
|
|||
|
|
" 'ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'pe', 'pb', 'vol'\n",
|
|||
|
|
" ]) # 假设 H5 文件的键是 'industry_data'\n",
|
|||
|
|
" industry_data = industry_data.sort_values(by=['ts_code', 'trade_date'])\n",
|
|||
|
|
" industry_data = industry_data.reindex()\n",
|
|||
|
|
" industry_data['trade_date'] = pd.to_datetime(industry_data['trade_date'], format='%Y%m%d')\n",
|
|||
|
|
"\n",
|
|||
|
|
" grouped = industry_data.groupby('ts_code', group_keys=False)\n",
|
|||
|
|
" industry_data['obv'] = grouped.apply(\n",
|
|||
|
|
" lambda x: pd.Series(talib.OBV(x['close'].values, x['vol'].values), index=x.index)\n",
|
|||
|
|
" )\n",
|
|||
|
|
" industry_data['return_5'] = grouped['close'].apply(lambda x: x / x.shift(5) - 1)\n",
|
|||
|
|
" industry_data['return_20'] = grouped['close'].apply(lambda x: x / x.shift(20) - 1)\n",
|
|||
|
|
"\n",
|
|||
|
|
" industry_data = get_act_factor(industry_data, cat=False)\n",
|
|||
|
|
" industry_data = industry_data.sort_values(by=['trade_date', 'ts_code'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" # # 计算每天每个 ts_code 的因子和当天所有 ts_code 的中位数的偏差\n",
|
|||
|
|
" # factor_columns = ['obv', 'return_5', 'return_20', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4'] # 因子列\n",
|
|||
|
|
" # \n",
|
|||
|
|
" # for factor in factor_columns:\n",
|
|||
|
|
" # if factor in industry_data.columns:\n",
|
|||
|
|
" # # 计算每天每个 ts_code 的因子值与当天所有 ts_code 的中位数的偏差\n",
|
|||
|
|
" # industry_data[f'{factor}_deviation'] = industry_data.groupby('trade_date')[factor].transform(\n",
|
|||
|
|
" # lambda x: x - x.mean())\n",
|
|||
|
|
"\n",
|
|||
|
|
" industry_data['return_5_percentile'] = industry_data.groupby('trade_date')['return_5'].transform(\n",
|
|||
|
|
" lambda x: x.rank(pct=True))\n",
|
|||
|
|
" industry_data['return_20_percentile'] = industry_data.groupby('trade_date')['return_20'].transform(\n",
|
|||
|
|
" lambda x: x.rank(pct=True))\n",
|
|||
|
|
" industry_data = industry_data.drop(columns=['open', 'close', 'high', 'low', 'pe', 'pb', 'vol'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" industry_data = industry_data.rename(\n",
|
|||
|
|
" columns={col: f'industry_{col}' for col in industry_data.columns if col not in ['ts_code', 'trade_date']})\n",
|
|||
|
|
"\n",
|
|||
|
|
" industry_data = industry_data.rename(columns={'ts_code': 'cat_l2_code'})\n",
|
|||
|
|
" return industry_data\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"industry_df = read_industry_data('../../data/sw_daily.h5')\n"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 7,
|
|||
|
|
"id": "dbe2fd8021b9417f",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T12:47:15.969344Z",
|
|||
|
|
"start_time": "2025-04-03T12:47:15.963327Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
"['ts_code', 'open', 'close', 'high', 'low', 'circ_mv', 'is_st', 'up_limit', 'down_limit', 'buy_sm_vol', 'sell_sm_vol', 'buy_lg_vol', 'sell_lg_vol', 'buy_elg_vol', 'sell_elg_vol', 'net_mf_vol', 'his_low', 'his_high', 'cost_5pct', 'cost_15pct', 'cost_50pct', 'cost_85pct', 'cost_95pct', 'weight_avg', 'in_date']\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"source": [
|
|||
|
|
"origin_columns = df.columns.tolist()\n",
|
|||
|
|
"origin_columns = [col for col in origin_columns if\n",
|
|||
|
|
" col not in ['turnover_rate', 'pe_ttm', 'volume_ratio', 'vol', 'pct_chg', 'l2_code', 'winner_rate']]\n",
|
|||
|
|
"origin_columns = [col for col in origin_columns if col not in index_data.columns]\n",
|
|||
|
|
"origin_columns = [col for col in origin_columns if 'cyq' not in col]\n",
|
|||
|
|
"print(origin_columns)"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 8,
|
|||
|
|
"id": "85c3e3d0235ffffa",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T12:47:16.089879Z",
|
|||
|
|
"start_time": "2025-04-03T12:47:15.990101Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
" ts_code trade_date is_st\n",
|
|||
|
|
"29 000037.SZ 2017-01-03 True\n",
|
|||
|
|
"72 000408.SZ 2017-01-03 True\n",
|
|||
|
|
"95 000504.SZ 2017-01-03 True\n",
|
|||
|
|
"96 000505.SZ 2017-01-03 True\n",
|
|||
|
|
"101 000511.SZ 2017-01-03 True\n",
|
|||
|
|
"... ... ... ...\n",
|
|||
|
|
"8513971 603869.SH 2025-04-09 True\n",
|
|||
|
|
"8513976 603879.SH 2025-04-09 True\n",
|
|||
|
|
"8514023 603959.SH 2025-04-09 True\n",
|
|||
|
|
"8514406 688282.SH 2025-04-09 True\n",
|
|||
|
|
"8514410 688287.SH 2025-04-09 True\n",
|
|||
|
|
"\n",
|
|||
|
|
"[193456 rows x 3 columns]\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"source": [
|
|||
|
|
"print(df[df['is_st']][['ts_code', 'trade_date', 'is_st']])"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 9,
|
|||
|
|
"id": "92d84ce15a562ec6",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T13:08:01.612695Z",
|
|||
|
|
"start_time": "2025-04-03T12:47:16.121802Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
"Index(['ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'vol',\n",
|
|||
|
|
" 'pct_chg', 'turnover_rate', 'pe_ttm', 'circ_mv', 'volume_ratio',\n",
|
|||
|
|
" 'is_st', 'up_limit', 'down_limit', 'buy_sm_vol', 'sell_sm_vol',\n",
|
|||
|
|
" 'buy_lg_vol', 'sell_lg_vol', 'buy_elg_vol', 'sell_elg_vol',\n",
|
|||
|
|
" 'net_mf_vol', 'his_low', 'his_high', 'cost_5pct', 'cost_15pct',\n",
|
|||
|
|
" 'cost_50pct', 'cost_85pct', 'cost_95pct', 'weight_avg', 'winner_rate',\n",
|
|||
|
|
" 'l2_code', 'lg_elg_net_buy_vol', 'flow_lg_elg_intensity',\n",
|
|||
|
|
" 'sm_net_buy_vol', 'flow_divergence_diff', 'flow_divergence_ratio',\n",
|
|||
|
|
" 'total_buy_vol', 'lg_elg_buy_prop', 'flow_struct_buy_change',\n",
|
|||
|
|
" 'lg_elg_net_buy_vol_change', 'flow_lg_elg_accel',\n",
|
|||
|
|
" 'chip_concentration_range', 'chip_skewness', 'floating_chip_proxy',\n",
|
|||
|
|
" 'cost_support_15pct_change', 'cat_winner_price_zone',\n",
|
|||
|
|
" 'flow_chip_consistency', 'profit_taking_vs_absorb', '_is_positive',\n",
|
|||
|
|
" '_is_negative', 'cat_is_positive', '_pos_returns', '_neg_returns',\n",
|
|||
|
|
" '_pos_returns_sq', '_neg_returns_sq', 'upside_vol', 'downside_vol',\n",
|
|||
|
|
" 'vol_ratio', 'return_skew', 'return_kurtosis', 'volume_change_rate',\n",
|
|||
|
|
" 'cat_volume_breakout', 'turnover_deviation', 'cat_turnover_spike',\n",
|
|||
|
|
" 'avg_volume_ratio', 'cat_volume_ratio_breakout', 'vol_spike',\n",
|
|||
|
|
" 'vol_std_5', 'atr_14', 'atr_6', 'obv'],\n",
|
|||
|
|
" dtype='object')\n",
|
|||
|
|
"Calculating lg_flow_mom_corr_20_60...\n",
|
|||
|
|
"Finished lg_flow_mom_corr_20_60.\n",
|
|||
|
|
"Calculating lg_buy_consolidation_20...\n",
|
|||
|
|
"Finished lg_buy_consolidation_20.\n",
|
|||
|
|
"Calculating lg_flow_accel...\n",
|
|||
|
|
"Finished lg_flow_accel.\n",
|
|||
|
|
"Calculating profit_pressure...\n",
|
|||
|
|
"Finished profit_pressure.\n",
|
|||
|
|
"Calculating underwater_resistance...\n",
|
|||
|
|
"Finished underwater_resistance.\n",
|
|||
|
|
"Calculating cost_conc_std_20...\n",
|
|||
|
|
"Finished cost_conc_std_20.\n",
|
|||
|
|
"Calculating profit_decay_20...\n",
|
|||
|
|
"Finished profit_decay_20.\n",
|
|||
|
|
"Calculating vol_amp_loss_20...\n",
|
|||
|
|
"Finished vol_amp_loss_20.\n",
|
|||
|
|
"Calculating vol_drop_profit_cnt_5...\n",
|
|||
|
|
"Finished vol_drop_profit_cnt_5.\n",
|
|||
|
|
"Calculating lg_flow_vol_interact_20...\n",
|
|||
|
|
"Finished lg_flow_vol_interact_20.\n",
|
|||
|
|
"Calculating cost_break_confirm_cnt_5...\n",
|
|||
|
|
"Finished cost_break_confirm_cnt_5.\n",
|
|||
|
|
"Calculating atr_norm_channel_pos_14...\n",
|
|||
|
|
"Finished atr_norm_channel_pos_14.\n",
|
|||
|
|
"Calculating turnover_diff_skew_20...\n",
|
|||
|
|
"Finished turnover_diff_skew_20.\n",
|
|||
|
|
"Calculating lg_sm_flow_diverge_20...\n",
|
|||
|
|
"Finished lg_sm_flow_diverge_20.\n",
|
|||
|
|
"Calculating pullback_strong_20_20...\n",
|
|||
|
|
"Finished pullback_strong_20_20.\n",
|
|||
|
|
"Calculating vol_wgt_hist_pos_20...\n",
|
|||
|
|
"Finished vol_wgt_hist_pos_20.\n",
|
|||
|
|
"Calculating vol_adj_roc_20...\n",
|
|||
|
|
"Finished vol_adj_roc_20.\n",
|
|||
|
|
"Calculating intraday_lg_flow_corr_20 (Placeholder - complex implementation)...\n",
|
|||
|
|
"Finished intraday_lg_flow_corr_20 (Placeholder).\n",
|
|||
|
|
"Calculating cap_neutral_cost_metric (Placeholder - requires statsmodels)...\n",
|
|||
|
|
"Finished cap_neutral_cost_metric (Placeholder).\n",
|
|||
|
|
"Calculating hurst_net_mf_vol_60 (Placeholder - requires hurst library)...\n",
|
|||
|
|
"Error: 'hurst' library not installed. Cannot calculate factor.\n",
|
|||
|
|
"Finished hurst_net_mf_vol_60 (Placeholder).\n",
|
|||
|
|
"Calculating cs_rank_net_lg_flow_val...\n",
|
|||
|
|
"Finished cs_rank_net_lg_flow_val.\n",
|
|||
|
|
"Calculating cs_rank_flow_divergence...\n",
|
|||
|
|
"Finished cs_rank_flow_divergence.\n",
|
|||
|
|
"Calculating cs_rank_ind_adj_lg_flow...\n",
|
|||
|
|
"Error calculating cs_rank_ind_adj_lg_flow: Missing 'cat_l2_code' column. Assigning NaN.\n",
|
|||
|
|
"Calculating cs_rank_elg_buy_ratio...\n",
|
|||
|
|
"Finished cs_rank_elg_buy_ratio.\n",
|
|||
|
|
"Calculating cs_rank_rel_profit_margin...\n",
|
|||
|
|
"Finished cs_rank_rel_profit_margin.\n",
|
|||
|
|
"Calculating cs_rank_cost_breadth...\n",
|
|||
|
|
"Finished cs_rank_cost_breadth.\n",
|
|||
|
|
"Calculating cs_rank_dist_to_upper_cost...\n",
|
|||
|
|
"Finished cs_rank_dist_to_upper_cost.\n",
|
|||
|
|
"Calculating cs_rank_winner_rate...\n",
|
|||
|
|
"Finished cs_rank_winner_rate.\n",
|
|||
|
|
"Calculating cs_rank_intraday_range...\n",
|
|||
|
|
"Finished cs_rank_intraday_range.\n",
|
|||
|
|
"Calculating cs_rank_close_pos_in_range...\n",
|
|||
|
|
"Finished cs_rank_close_pos_in_range.\n",
|
|||
|
|
"Calculating cs_rank_opening_gap...\n",
|
|||
|
|
"Error calculating cs_rank_opening_gap: Missing 'pre_close' column. Assigning NaN.\n",
|
|||
|
|
"Calculating cs_rank_pos_in_hist_range...\n",
|
|||
|
|
"Finished cs_rank_pos_in_hist_range.\n",
|
|||
|
|
"Calculating cs_rank_vol_x_profit_margin...\n",
|
|||
|
|
"Finished cs_rank_vol_x_profit_margin.\n",
|
|||
|
|
"Calculating cs_rank_lg_flow_price_concordance...\n",
|
|||
|
|
"Finished cs_rank_lg_flow_price_concordance.\n",
|
|||
|
|
"Calculating cs_rank_turnover_per_winner...\n",
|
|||
|
|
"Finished cs_rank_turnover_per_winner.\n",
|
|||
|
|
"Calculating cs_rank_ind_cap_neutral_pe (Placeholder - requires statsmodels)...\n",
|
|||
|
|
"Finished cs_rank_ind_cap_neutral_pe (Placeholder).\n",
|
|||
|
|
"Calculating cs_rank_volume_ratio...\n",
|
|||
|
|
"Finished cs_rank_volume_ratio.\n",
|
|||
|
|
"Calculating cs_rank_elg_buy_sell_sm_ratio...\n",
|
|||
|
|
"Finished cs_rank_elg_buy_sell_sm_ratio.\n",
|
|||
|
|
"Calculating cs_rank_cost_dist_vol_ratio...\n",
|
|||
|
|
"Finished cs_rank_cost_dist_vol_ratio.\n",
|
|||
|
|
"Calculating cs_rank_size...\n",
|
|||
|
|
"Finished cs_rank_size.\n",
|
|||
|
|
"<class 'pandas.core.frame.DataFrame'>\n",
|
|||
|
|
"Index: 3830570 entries, 0 to 3830569\n",
|
|||
|
|
"Columns: 176 entries, ts_code to cs_rank_size\n",
|
|||
|
|
"dtypes: bool(12), datetime64[ns](1), float64(157), int32(3), int64(1), object(2)\n",
|
|||
|
|
"memory usage: 4.7+ GB\n",
|
|||
|
|
"None\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"source": [
|
|||
|
|
"import numpy as np\n",
|
|||
|
|
"\n",
|
|||
|
|
"def filter_data(df):\n",
|
|||
|
|
" # df = df.groupby('trade_date').apply(lambda x: x.nlargest(1000, 'act_factor1'))\n",
|
|||
|
|
" df = df[~df['is_st']]\n",
|
|||
|
|
" df = df[~df['ts_code'].str.endswith('BJ')]\n",
|
|||
|
|
" df = df[~df['ts_code'].str.startswith('30')]\n",
|
|||
|
|
" df = df[~df['ts_code'].str.startswith('68')]\n",
|
|||
|
|
" df = df[~df['ts_code'].str.startswith('8')]\n",
|
|||
|
|
" df = df[df['trade_date'] >= '2020-01-01']\n",
|
|||
|
|
" if 'in_date' in df.columns:\n",
|
|||
|
|
" df = df.drop(columns=['in_date'])\n",
|
|||
|
|
" df = df.reset_index(drop=True)\n",
|
|||
|
|
" return df\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"df = filter_data(df)\n",
|
|||
|
|
"# df = get_technical_factor(df)\n",
|
|||
|
|
"# df = get_act_factor(df)\n",
|
|||
|
|
"# df = get_money_flow_factor(df)\n",
|
|||
|
|
"# df = get_alpha_factor(df)\n",
|
|||
|
|
"# df = get_limit_factor(df)\n",
|
|||
|
|
"# df = get_cyp_perf_factor(df)\n",
|
|||
|
|
"# df = get_mv_factors(df)\n",
|
|||
|
|
"df, _ = get_rolling_factor(df)\n",
|
|||
|
|
"df, _ = get_simple_factor(df)\n",
|
|||
|
|
"from main.factor.factor import *\n",
|
|||
|
|
"lg_flow_mom_corr(df, N=20, M=60)\n",
|
|||
|
|
"lg_buy_consolidation(df, N=20)\n",
|
|||
|
|
"lg_flow_accel(df)\n",
|
|||
|
|
"profit_pressure(df)\n",
|
|||
|
|
"underwater_resistance(df)\n",
|
|||
|
|
"cost_conc_std(df, N=20)\n",
|
|||
|
|
"profit_decay(df, N=20)\n",
|
|||
|
|
"vol_amp_loss(df, N=20)\n",
|
|||
|
|
"vol_drop_profit_cnt(df, N=20, M=5)\n",
|
|||
|
|
"lg_flow_vol_interact(df, N=20)\n",
|
|||
|
|
"cost_break_confirm_cnt(df, M=5)\n",
|
|||
|
|
"atr_norm_channel_pos(df, N=14)\n",
|
|||
|
|
"turnover_diff_skew(df, N=20)\n",
|
|||
|
|
"lg_sm_flow_diverge(df, N=20)\n",
|
|||
|
|
"pullback_strong(df, N=20, M=20)\n",
|
|||
|
|
"vol_wgt_hist_pos(df, N=20)\n",
|
|||
|
|
"vol_adj_roc(df, N=20)\n",
|
|||
|
|
"intraday_lg_flow_corr(df, N=20) # Placeholder\n",
|
|||
|
|
"cap_neutral_cost_metric(df) # Placeholder\n",
|
|||
|
|
"hurst_exponent_flow(df, N=60) # Placeholder\n",
|
|||
|
|
"# calculate_complex_factor(df)\n",
|
|||
|
|
"cs_rank_net_lg_flow_val(df)\n",
|
|||
|
|
"cs_rank_flow_divergence(df)\n",
|
|||
|
|
"cs_rank_industry_adj_lg_flow(df) # Needs cat_l2_code\n",
|
|||
|
|
"cs_rank_elg_buy_ratio(df)\n",
|
|||
|
|
"cs_rank_rel_profit_margin(df)\n",
|
|||
|
|
"cs_rank_cost_breadth(df)\n",
|
|||
|
|
"cs_rank_dist_to_upper_cost(df)\n",
|
|||
|
|
"cs_rank_winner_rate(df)\n",
|
|||
|
|
"cs_rank_intraday_range(df)\n",
|
|||
|
|
"cs_rank_close_pos_in_range(df)\n",
|
|||
|
|
"cs_rank_opening_gap(df) # Needs pre_close\n",
|
|||
|
|
"cs_rank_pos_in_hist_range(df) # Needs his_low, his_high\n",
|
|||
|
|
"cs_rank_vol_x_profit_margin(df)\n",
|
|||
|
|
"cs_rank_lg_flow_price_concordance(df)\n",
|
|||
|
|
"cs_rank_turnover_per_winner(df)\n",
|
|||
|
|
"cs_rank_ind_cap_neutral_pe(df) # Placeholder - needs external libraries\n",
|
|||
|
|
"cs_rank_volume_ratio(df) # Needs volume_ratio\n",
|
|||
|
|
"cs_rank_elg_buy_sell_sm_ratio(df)\n",
|
|||
|
|
"cs_rank_cost_dist_vol_ratio(df) # Needs volume_ratio\n",
|
|||
|
|
"cs_rank_size(df) # Needs circ_mv\n",
|
|||
|
|
"# df = df.merge(industry_df, on=['l1_code', 'trade_date'], how='left')\n",
|
|||
|
|
"df = df.rename(columns={'l1_code': 'cat_l1_code'})\n",
|
|||
|
|
"df = df.rename(columns={'l2_code': 'cat_l2_code'})\n",
|
|||
|
|
"\n",
|
|||
|
|
"# df = df.merge(index_data, on='trade_date', how='left')\n",
|
|||
|
|
"\n",
|
|||
|
|
"print(df.info())"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 10,
|
|||
|
|
"id": "b87b938028afa206",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T13:08:03.658725Z",
|
|||
|
|
"start_time": "2025-04-03T13:08:02.469611Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [],
|
|||
|
|
"source": [
|
|||
|
|
"from scipy.stats import ks_2samp, wasserstein_distance\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def remove_shifted_features(train_data, test_data, feature_columns, ks_threshold=0.05, wasserstein_threshold=0.1,\n",
|
|||
|
|
" importance_threshold=0.05):\n",
|
|||
|
|
" dropped_features = []\n",
|
|||
|
|
"\n",
|
|||
|
|
" # **统计数据漂移**\n",
|
|||
|
|
" numeric_columns = train_data.select_dtypes(include=['float64', 'int64']).columns\n",
|
|||
|
|
" numeric_columns = [col for col in numeric_columns if col in feature_columns]\n",
|
|||
|
|
" for feature in numeric_columns:\n",
|
|||
|
|
" ks_stat, p_value = ks_2samp(train_data[feature], test_data[feature])\n",
|
|||
|
|
" wasserstein_dist = wasserstein_distance(train_data[feature], test_data[feature])\n",
|
|||
|
|
"\n",
|
|||
|
|
" if p_value < ks_threshold or wasserstein_dist > wasserstein_threshold:\n",
|
|||
|
|
" dropped_features.append(feature)\n",
|
|||
|
|
"\n",
|
|||
|
|
" print(f\"检测到 {len(dropped_features)} 个可能漂移的特征: {dropped_features}\")\n",
|
|||
|
|
"\n",
|
|||
|
|
" # **应用阈值进行最终筛选**\n",
|
|||
|
|
" filtered_features = [f for f in feature_columns if f not in dropped_features]\n",
|
|||
|
|
"\n",
|
|||
|
|
" return filtered_features, dropped_features\n",
|
|||
|
|
"\n"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 11,
|
|||
|
|
"id": "f4f16d63ad18d1bc",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T13:08:03.670700Z",
|
|||
|
|
"start_time": "2025-04-03T13:08:03.665739Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [],
|
|||
|
|
"source": [
|
|||
|
|
"def create_deviation_within_dates(df, feature_columns):\n",
|
|||
|
|
" groupby_col = 'cat_l2_code' # 使用 trade_date 进行分组\n",
|
|||
|
|
" new_columns = {}\n",
|
|||
|
|
" ret_feature_columns = feature_columns[:]\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 自动选择所有数值型特征\n",
|
|||
|
|
" num_features = [col for col in feature_columns if 'cat' not in col and 'index' not in col]\n",
|
|||
|
|
"\n",
|
|||
|
|
" # num_features = ['vol', 'pct_chg', 'turnover_rate', 'volume_ratio', 'cat_vol_spike', 'obv', 'maobv_6', 'return_5', 'return_10', 'return_20', 'std_return_5', 'std_return_15', 'std_return_90', 'std_return_90_2', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4', 'act_factor5', 'act_factor6', 'rank_act_factor1', 'rank_act_factor2', 'rank_act_factor3', 'active_buy_volume_large', 'active_buy_volume_big', 'active_buy_volume_small', 'alpha_022', 'alpha_003', 'alpha_007', 'alpha_013']\n",
|
|||
|
|
" num_features = [col for col in num_features if 'cat' not in col and 'industry' not in col]\n",
|
|||
|
|
" num_features = [col for col in num_features if 'limit' not in col]\n",
|
|||
|
|
" num_features = [col for col in num_features if 'cyq' not in col]\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 遍历所有数值型特征\n",
|
|||
|
|
" for feature in num_features:\n",
|
|||
|
|
" if feature == 'trade_date': # 不需要对 'trade_date' 计算偏差\n",
|
|||
|
|
" continue\n",
|
|||
|
|
"\n",
|
|||
|
|
" # grouped_mean = df.groupby(['trade_date'])[feature].transform('mean')\n",
|
|||
|
|
" # deviation_col_name = f'deviation_mean_{feature}'\n",
|
|||
|
|
" # new_columns[deviation_col_name] = df[feature] - grouped_mean\n",
|
|||
|
|
" # ret_feature_columns.append(deviation_col_name)\n",
|
|||
|
|
"\n",
|
|||
|
|
" grouped_mean = df.groupby(['trade_date', groupby_col])[feature].transform('mean')\n",
|
|||
|
|
" deviation_col_name = f'deviation_mean_{feature}'\n",
|
|||
|
|
" new_columns[deviation_col_name] = df[feature] - grouped_mean\n",
|
|||
|
|
" ret_feature_columns.append(deviation_col_name)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 将新计算的偏差特征与原始 DataFrame 合并\n",
|
|||
|
|
" df = pd.concat([df, pd.DataFrame(new_columns)], axis=1)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # for feature in ['obv', 'return_20', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4']:\n",
|
|||
|
|
" # df[f'deviation_industry_{feature}'] = df[feature] - df[f'industry_{feature}']\n",
|
|||
|
|
"\n",
|
|||
|
|
" return df, ret_feature_columns\n"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 12,
|
|||
|
|
"id": "40e6b68a91b30c79",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T13:08:04.694262Z",
|
|||
|
|
"start_time": "2025-04-03T13:08:03.694904Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [],
|
|||
|
|
"source": [
|
|||
|
|
"import pandas as pd\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def remove_outliers_label_percentile(label: pd.Series, lower_percentile: float = 0.01, upper_percentile: float = 0.99,\n",
|
|||
|
|
" log=True):\n",
|
|||
|
|
" if not (0 <= lower_percentile < upper_percentile <= 1):\n",
|
|||
|
|
" raise ValueError(\"Percentile values must satisfy 0 <= lower_percentile < upper_percentile <= 1.\")\n",
|
|||
|
|
"\n",
|
|||
|
|
" # Calculate lower and upper bounds based on percentiles\n",
|
|||
|
|
" lower_bound = label.quantile(lower_percentile)\n",
|
|||
|
|
" upper_bound = label.quantile(upper_percentile)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # Filter out values outside the bounds\n",
|
|||
|
|
" filtered_label = label[(label >= lower_bound) & (label <= upper_bound)]\n",
|
|||
|
|
"\n",
|
|||
|
|
" # Print the number of removed outliers\n",
|
|||
|
|
" if log:\n",
|
|||
|
|
" print(f\"Removed {len(label) - len(filtered_label)} outliers.\")\n",
|
|||
|
|
" return filtered_label\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def calculate_risk_adjusted_target(df, days=5):\n",
|
|||
|
|
" df = df.sort_values(by=['ts_code', 'trade_date'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['future_close'] = df.groupby('ts_code')['close'].shift(-days)\n",
|
|||
|
|
" df['future_open'] = df.groupby('ts_code')['open'].shift(-1)\n",
|
|||
|
|
" df['future_return'] = (df['future_close'] - df['future_open']) / df['future_open']\n",
|
|||
|
|
"\n",
|
|||
|
|
" df['future_volatility'] = df.groupby('ts_code')['future_return'].rolling(days, min_periods=1).std().reset_index(\n",
|
|||
|
|
" level=0, drop=True)\n",
|
|||
|
|
" sharpe_ratio = df['future_return'] * df['future_volatility']\n",
|
|||
|
|
" sharpe_ratio.replace([np.inf, -np.inf], np.nan, inplace=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" return sharpe_ratio\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def calculate_score(df, days=5, lambda_param=1.0):\n",
|
|||
|
|
" def calculate_max_drawdown(prices):\n",
|
|||
|
|
" peak = prices.iloc[0] # 初始化峰值\n",
|
|||
|
|
" max_drawdown = 0 # 初始化最大回撤\n",
|
|||
|
|
"\n",
|
|||
|
|
" for price in prices:\n",
|
|||
|
|
" if price > peak:\n",
|
|||
|
|
" peak = price # 更新峰值\n",
|
|||
|
|
" else:\n",
|
|||
|
|
" drawdown = (peak - price) / peak # 计算当前回撤\n",
|
|||
|
|
" max_drawdown = max(max_drawdown, drawdown) # 更新最大回撤\n",
|
|||
|
|
"\n",
|
|||
|
|
" return max_drawdown\n",
|
|||
|
|
"\n",
|
|||
|
|
" def compute_stock_score(stock_df):\n",
|
|||
|
|
" stock_df = stock_df.sort_values(by=['trade_date'])\n",
|
|||
|
|
" future_return = stock_df['future_return']\n",
|
|||
|
|
" # 使用已有的 pct_chg 字段计算波动率\n",
|
|||
|
|
" volatility = stock_df['pct_chg'].rolling(days).std().shift(-days)\n",
|
|||
|
|
" max_drawdown = stock_df['close'].rolling(days).apply(calculate_max_drawdown, raw=False).shift(-days)\n",
|
|||
|
|
" score = future_return - lambda_param * max_drawdown\n",
|
|||
|
|
" return score\n",
|
|||
|
|
"\n",
|
|||
|
|
" # # 确保 DataFrame 按照股票代码和交易日期排序\n",
|
|||
|
|
" # df = df.sort_values(by=['ts_code', 'trade_date'])\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 对每个股票分别计算 score\n",
|
|||
|
|
" df['score'] = df.groupby('ts_code').apply(compute_stock_score).reset_index(level=0, drop=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" return df['score']\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def remove_highly_correlated_features(df, feature_columns, threshold=0.9):\n",
|
|||
|
|
" numeric_features = df[feature_columns].select_dtypes(include=[np.number]).columns.tolist()\n",
|
|||
|
|
" if not numeric_features:\n",
|
|||
|
|
" raise ValueError(\"No numeric features found in the provided data.\")\n",
|
|||
|
|
"\n",
|
|||
|
|
" corr_matrix = df[numeric_features].corr().abs()\n",
|
|||
|
|
" upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))\n",
|
|||
|
|
" to_drop = [column for column in upper.columns if any(upper[column] > threshold)]\n",
|
|||
|
|
" remaining_features = [col for col in feature_columns if col not in to_drop\n",
|
|||
|
|
" or 'act' in col or 'af' in col]\n",
|
|||
|
|
" return remaining_features\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def cross_sectional_standardization(df, features):\n",
|
|||
|
|
" df_sorted = df.sort_values(by='trade_date') # 按时间排序\n",
|
|||
|
|
" df_standardized = df_sorted.copy()\n",
|
|||
|
|
"\n",
|
|||
|
|
" for date in df_sorted['trade_date'].unique():\n",
|
|||
|
|
" # 获取当前时间点的数据\n",
|
|||
|
|
" current_data = df_standardized[df_standardized['trade_date'] == date]\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 只对指定特征进行标准化\n",
|
|||
|
|
" scaler = StandardScaler()\n",
|
|||
|
|
" standardized_values = scaler.fit_transform(current_data[features])\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 将标准化结果重新赋值回去\n",
|
|||
|
|
" df_standardized.loc[df_standardized['trade_date'] == date, features] = standardized_values\n",
|
|||
|
|
"\n",
|
|||
|
|
" return df_standardized\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"import numpy as np\n",
|
|||
|
|
"import pandas as pd\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def neutralize_manual(df, features, industry_col, mkt_cap_col):\n",
|
|||
|
|
" \"\"\" 手动实现简单回归以提升速度 \"\"\"\n",
|
|||
|
|
"\n",
|
|||
|
|
" for col in features:\n",
|
|||
|
|
" residuals = []\n",
|
|||
|
|
" for _, group in df.groupby(industry_col):\n",
|
|||
|
|
" if len(group) > 1:\n",
|
|||
|
|
" x = np.log(group[mkt_cap_col]) # 市值对数\n",
|
|||
|
|
" y = group[col] # 因子值\n",
|
|||
|
|
" beta = np.cov(y, x)[0, 1] / np.var(x) # 计算斜率\n",
|
|||
|
|
" alpha = np.mean(y) - beta * np.mean(x) # 计算截距\n",
|
|||
|
|
" resid = y - (alpha + beta * x) # 计算残差\n",
|
|||
|
|
" residuals.extend(resid)\n",
|
|||
|
|
" else:\n",
|
|||
|
|
" residuals.extend(group[col]) # 样本不足时保留原值\n",
|
|||
|
|
"\n",
|
|||
|
|
" df[col] = residuals\n",
|
|||
|
|
"\n",
|
|||
|
|
" return df\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"import gc\n",
|
|||
|
|
"\n",
|
|||
|
|
"gc.collect()\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def mad_filter(df, features, n=3):\n",
|
|||
|
|
" for col in features:\n",
|
|||
|
|
" median = df[col].median()\n",
|
|||
|
|
" mad = np.median(np.abs(df[col] - median))\n",
|
|||
|
|
" upper = median + n * mad\n",
|
|||
|
|
" lower = median - n * mad\n",
|
|||
|
|
" df[col] = np.clip(df[col], lower, upper) # 截断极值\n",
|
|||
|
|
" return df\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def percentile_filter(df, features, lower_percentile=0.01, upper_percentile=0.99):\n",
|
|||
|
|
" for col in features:\n",
|
|||
|
|
" # 按日期分组计算上下百分位数\n",
|
|||
|
|
" lower_bound = df.groupby('trade_date')[col].transform(\n",
|
|||
|
|
" lambda x: x.quantile(lower_percentile)\n",
|
|||
|
|
" )\n",
|
|||
|
|
" upper_bound = df.groupby('trade_date')[col].transform(\n",
|
|||
|
|
" lambda x: x.quantile(upper_percentile)\n",
|
|||
|
|
" )\n",
|
|||
|
|
" # 截断超出范围的值\n",
|
|||
|
|
" df[col] = np.clip(df[col], lower_bound, upper_bound)\n",
|
|||
|
|
" return df\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"from scipy.stats import iqr\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def iqr_filter(df, features):\n",
|
|||
|
|
" for col in features:\n",
|
|||
|
|
" df[col] = df.groupby('trade_date')[col].transform(\n",
|
|||
|
|
" lambda x: (x - x.median()) / iqr(x) if iqr(x) != 0 else x\n",
|
|||
|
|
" )\n",
|
|||
|
|
" return df\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def quantile_filter(df, features, lower_quantile=0.01, upper_quantile=0.99, window=60):\n",
|
|||
|
|
" df = df.copy()\n",
|
|||
|
|
" for col in features:\n",
|
|||
|
|
" # 计算 rolling 统计量,需要按日期进行 groupby\n",
|
|||
|
|
" rolling_lower = df.groupby('trade_date')[col].transform(lambda x: x.rolling(window=min(len(x), window)).quantile(lower_quantile))\n",
|
|||
|
|
" rolling_upper = df.groupby('trade_date')[col].transform(lambda x: x.rolling(window=min(len(x), window)).quantile(upper_quantile))\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 对数据进行裁剪\n",
|
|||
|
|
" df[col] = np.clip(df[col], rolling_lower, rolling_upper)\n",
|
|||
|
|
" \n",
|
|||
|
|
" return df\n"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 18,
|
|||
|
|
"id": "47c12bb34062ae7a",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T14:57:50.841165Z",
|
|||
|
|
"start_time": "2025-04-03T14:49:25.889057Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
"['vol', 'pct_chg', 'turnover_rate', 'volume_ratio', 'winner_rate', 'lg_elg_net_buy_vol', 'flow_lg_elg_intensity', 'sm_net_buy_vol', 'total_buy_vol', 'lg_elg_buy_prop', 'flow_struct_buy_change', 'lg_elg_net_buy_vol_change', 'flow_lg_elg_accel', 'chip_concentration_range', 'chip_skewness', 'floating_chip_proxy', 'cost_support_15pct_change', 'cat_winner_price_zone', 'flow_chip_consistency', 'profit_taking_vs_absorb', 'cat_is_positive', 'upside_vol', 'downside_vol', 'vol_ratio', 'return_skew', 'return_kurtosis', 'volume_change_rate', 'cat_volume_breakout', 'turnover_deviation', 'cat_turnover_spike', 'avg_volume_ratio', 'cat_volume_ratio_breakout', 'vol_spike', 'vol_std_5', 'atr_14', 'atr_6', 'obv', 'maobv_6', 'rsi_3', 'return_5', 'return_20', 'std_return_5', 'std_return_90', 'std_return_90_2', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4', 'rank_act_factor1', 'rank_act_factor2', 'rank_act_factor3', 'cov', 'delta_cov', 'alpha_22_improved', 'alpha_003', 'alpha_007', 'alpha_013', 'cat_up_limit', 'cat_down_limit', 'up_limit_count_10d', 'down_limit_count_10d', 'consecutive_up_limit', 'vol_break', 'weight_roc5', 'smallcap_concentration', 'cost_stability', 'high_cost_break_days', 'liquidity_risk', 'turnover_std', 'mv_volatility', 'volume_growth', 'mv_growth', 'arbr', 'momentum_factor', 'resonance_factor', 'log_close', 'cat_vol_spike', 'up', 'down', 'obv-maobv_6', 'std_return_5 / std_return_90', 'std_return_90 - std_return_90_2', 'cat_af2', 'cat_af3', 'cat_af4', 'act_factor5', 'act_factor6', 'active_buy_volume_large', 'active_buy_volume_big', 'active_buy_volume_small', 'buy_lg_vol_minus_sell_lg_vol', 'buy_elg_vol_minus_sell_elg_vol', 'ctrl_strength', 'low_cost_dev', 'asymmetry', 'lock_factor', 'cat_vol_break', 'cost_atr_adj', 'cat_golden_resonance', 'mv_turnover_ratio', 'mv_adjusted_volume', 'mv_weighted_turnover', 'nonlinear_mv_volume', 'mv_volume_ratio', 'mv_momentum', 'lg_flow_mom_corr_20_60', 'lg_flow_accel', 'profit_pressure', 'underwater_resistance', 'cost_conc_std_20', 'profit_decay_20', 'vol_amp_loss_20', 'vol_drop_profit_cnt_5', 'lg_flow_vol_interact_20', 'cost_break_confirm_cnt_5', 'atr_norm_channel_pos_14', 'turnover_diff_skew_20', 'lg_sm_flow_diverge_20', 'pullback_strong_20_20', 'vol_wgt_hist_pos_20', 'vol_adj_roc_20', 'cs_rank_net_lg_flow_val', 'cs_rank_elg_buy_ratio', 'cs_rank_rel_profit_margin', 'cs_rank_cost_breadth', 'cs_rank_dist_to_upper_cost', 'cs_rank_winner_rate', 'cs_rank_intraday_range', 'cs_rank_close_pos_in_range', 'cs_rank_pos_in_hist_range', 'cs_rank_vol_x_profit_margin', 'cs_rank_lg_flow_price_concordance', 'cs_rank_turnover_per_winner', 'cs_rank_volume_ratio', 'cs_rank_elg_buy_sell_sm_ratio', 'cs_rank_cost_dist_vol_ratio', 'cs_rank_size']\n",
|
|||
|
|
"去极值\n",
|
|||
|
|
"去极值\n",
|
|||
|
|
"检测到 36 个可能漂移的特征: ['vol', 'pct_chg', 'turnover_rate', 'volume_ratio', 'vol_ratio', 'obv', 'alpha_003', 'resonance_factor', 'log_close', 'up', 'down', 'mv_turnover_ratio', 'mv_adjusted_volume', 'mv_weighted_turnover', 'nonlinear_mv_volume', 'mv_volume_ratio', 'mv_momentum', 'profit_decay_20', 'vol_drop_profit_cnt_5', 'cost_break_confirm_cnt_5', 'atr_norm_channel_pos_14', 'pullback_strong_20_20', 'vol_wgt_hist_pos_20', 'vol_adj_roc_20', 'cs_rank_elg_buy_ratio', 'cs_rank_rel_profit_margin', 'cs_rank_cost_breadth', 'cs_rank_dist_to_upper_cost', 'cs_rank_intraday_range', 'cs_rank_close_pos_in_range', 'cs_rank_pos_in_hist_range', 'cs_rank_vol_x_profit_margin', 'cs_rank_turnover_per_winner', 'cs_rank_volume_ratio', 'cs_rank_elg_buy_sell_sm_ratio', 'cs_rank_size']\n",
|
|||
|
|
"feature_columns: ['winner_rate', 'lg_elg_net_buy_vol', 'flow_lg_elg_intensity', 'total_buy_vol', 'lg_elg_buy_prop', 'flow_struct_buy_change', 'lg_elg_net_buy_vol_change', 'flow_lg_elg_accel', 'chip_concentration_range', 'chip_skewness', 'floating_chip_proxy', 'cost_support_15pct_change', 'cat_winner_price_zone', 'flow_chip_consistency', 'cat_is_positive', 'upside_vol', 'downside_vol', 'return_skew', 'return_kurtosis', 'volume_change_rate', 'cat_volume_breakout', 'turnover_deviation', 'cat_turnover_spike', 'avg_volume_ratio', 'cat_volume_ratio_breakout', 'vol_spike', 'vol_std_5', 'atr_14', 'maobv_6', 'rsi_3', 'return_5', 'return_20', 'std_return_5', 'std_return_90', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4', 'rank_act_factor1', 'rank_act_factor2', 'rank_act_factor3', 'cov', 'delta_cov', 'alpha_007', 'alpha_013', 'cat_up_limit', 'cat_down_limit', 'up_limit_count_10d', 'down_limit_count_10d', 'consecutive_up_limit', 'vol_break', 'weight_roc5', 'smallcap_concentration', 'cost_stability', 'high_cost_break_days', 'liquidity_risk', 'turnover_std', 'volume_growth', 'arbr', 'momentum_factor', 'cat_vol_spike', 'obv-maobv_6', 'std_return_5 / std_return_90', 'std_return_90 - std_return_90_2', 'cat_af2', 'cat_af3', 'cat_af4', 'act_factor5', 'act_factor6', 'active_buy_volume_large', 'active_buy_volume_big', 'active_buy_volume_small', 'buy_lg_vol_minus_sell_lg_vol', 'buy_elg_vol_minus_sell_elg_vol', 'ctrl_strength', 'low_cost_dev', 'asymmetry', 'lock_factor', 'cat_vol_break', 'cost_atr_adj', 'cat_golden_resonance', 'lg_flow_mom_corr_20_60', 'profit_pressure', 'underwater_resistance', 'cost_conc_std_20', 'vol_amp_loss_20', 'lg_flow_vol_interact_20', 'turnover_diff_skew_20', 'lg_sm_flow_diverge_20', 'cs_rank_net_lg_flow_val', 'cs_rank_winner_rate', 'cs_rank_lg_flow_price_concordance', 'cs_rank_cost_dist_vol_ratio']\n",
|
|||
|
|
"739366\n",
|
|||
|
|
"最小日期: 2020-06-04\n",
|
|||
|
|
"最大日期: 2023-08-01\n",
|
|||
|
|
"402667\n",
|
|||
|
|
"最小日期: 2023-08-01\n",
|
|||
|
|
"最大日期: 2025-04-09\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"source": [
|
|||
|
|
"days = 1\n",
|
|||
|
|
"validation_days = 120\n",
|
|||
|
|
"\n",
|
|||
|
|
"import gc\n",
|
|||
|
|
"\n",
|
|||
|
|
"gc.collect()\n",
|
|||
|
|
"\n",
|
|||
|
|
"df = df.sort_values(by=['ts_code', 'trade_date'])\n",
|
|||
|
|
"# df['future_return'] = df.groupby('ts_code', group_keys=False)['close'].apply(lambda x: x.shift(-days) / x - 1)\n",
|
|||
|
|
"df['future_return'] = (df.groupby('ts_code')['close'].shift(-days) - df.groupby('ts_code')['open'].shift(-1)) / \\\n",
|
|||
|
|
" df.groupby('ts_code')['open'].shift(-1)\n",
|
|||
|
|
"df['future_volatility'] = (\n",
|
|||
|
|
" df.groupby('ts_code')['pct_chg']\n",
|
|||
|
|
" .transform(lambda x: x.rolling(days).std().shift(-days))\n",
|
|||
|
|
")\n",
|
|||
|
|
"\n",
|
|||
|
|
"df['future_score'] = (\n",
|
|||
|
|
" 0.7 * df['future_return']\n",
|
|||
|
|
" + 0.3 * df['future_volatility']\n",
|
|||
|
|
")\n",
|
|||
|
|
"# df['future_score'] = calculate_score(df, days=2, lambda_param=0.3)\n",
|
|||
|
|
"\n",
|
|||
|
|
"filter_index = df['future_return'].between(df['future_return'].quantile(0.01), df['future_return'].quantile(0.99))\n",
|
|||
|
|
"filter_index = df['future_volatility'].between(df['future_volatility'].quantile(0.01),\n",
|
|||
|
|
" df['future_volatility'].quantile(0.99)) | filter_index\n",
|
|||
|
|
"\n",
|
|||
|
|
"# df['label'] = df.groupby('trade_date', group_keys=False)['future_volatility'].transform(\n",
|
|||
|
|
"# lambda x: pd.qcut(x, q=30, labels=False, duplicates='drop')\n",
|
|||
|
|
"# )\n",
|
|||
|
|
"\n",
|
|||
|
|
"df['label'] = df.groupby('trade_date', group_keys=False)['future_return'].transform(\n",
|
|||
|
|
" lambda x: pd.qcut(x, q=20, labels=False, duplicates='drop')\n",
|
|||
|
|
")\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"# df['1_score'] = df.groupby('ts_code', group_keys=False)['future_score'].shift(days)\n",
|
|||
|
|
"# df['2_score'] = df.groupby('ts_code', group_keys=False)['future_score'].shift(1 + days)\n",
|
|||
|
|
"# df['3_score'] = df.groupby('ts_code', group_keys=False)['future_score'].shift(3 + days - 1)\n",
|
|||
|
|
"\n",
|
|||
|
|
"def symmetric_log_transform(values):\n",
|
|||
|
|
" return np.sign(values) * np.log1p(np.abs(values))\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"train_data = df[filter_index & (df['trade_date'] <= '2023-08-01') & (df['trade_date'] >= '2000-01-01')]\n",
|
|||
|
|
"test_data = df[(df['trade_date'] >= '2023-08-01')]\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def select_pre_zt_stocks_dynamic(stock_df):\n",
|
|||
|
|
" # 排序数据\n",
|
|||
|
|
" stock_df = stock_df.sort_values(by=['trade_date', 'ts_code'])\n",
|
|||
|
|
" stock_df = stock_df.groupby('trade_date', group_keys=False).apply(\n",
|
|||
|
|
" lambda x: x.nlargest(1000, 'return_20')\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" return stock_df\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"train_data = select_pre_zt_stocks_dynamic(train_data)\n",
|
|||
|
|
"test_data = select_pre_zt_stocks_dynamic(test_data)\n",
|
|||
|
|
"\n",
|
|||
|
|
"# train_data['label'] = train_data.groupby('trade_date', group_keys=False)['future_score'].transform(\n",
|
|||
|
|
"# lambda x: pd.qcut(x, q=50, labels=False, duplicates='drop')\n",
|
|||
|
|
"# )\n",
|
|||
|
|
"# test_data['label'] = test_data.groupby('trade_date', group_keys=False)['future_score'].transform(\n",
|
|||
|
|
"# lambda x: pd.qcut(x, q=50, labels=False, duplicates='drop')\n",
|
|||
|
|
"# )\n",
|
|||
|
|
"\n",
|
|||
|
|
"industry_df = industry_df.sort_values(by=['trade_date'])\n",
|
|||
|
|
"index_data = index_data.sort_values(by=['trade_date'])\n",
|
|||
|
|
"\n",
|
|||
|
|
"train_data = train_data.merge(industry_df, on=['cat_l2_code', 'trade_date'], how='left')\n",
|
|||
|
|
"# train_data = train_data.merge(index_data, on='trade_date', how='left')\n",
|
|||
|
|
"test_data = test_data.merge(industry_df, on=['cat_l2_code', 'trade_date'], how='left')\n",
|
|||
|
|
"# test_data = test_data.merge(index_data, on='trade_date', how='left')\n",
|
|||
|
|
"\n",
|
|||
|
|
"train_data, test_data = train_data.replace([np.inf, -np.inf], np.nan), test_data.replace([np.inf, -np.inf], np.nan)\n",
|
|||
|
|
"\n",
|
|||
|
|
"# feature_columns_new = feature_columns[:]\n",
|
|||
|
|
"# train_data, _ = create_deviation_within_dates(train_data, feature_columns)\n",
|
|||
|
|
"# test_data, _ = create_deviation_within_dates(test_data, feature_columns)\n",
|
|||
|
|
"\n",
|
|||
|
|
"feature_columns = [col for col in df.columns if col in df.columns]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if col not in ['trade_date',\n",
|
|||
|
|
" 'ts_code',\n",
|
|||
|
|
" 'label']]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if 'future' not in col]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if 'label' not in col]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if 'score' not in col]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if 'gen' not in col]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if 'is_st' not in col]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if 'pe_ttm' not in col]\n",
|
|||
|
|
"# feature_columns = [col for col in feature_columns if 'volatility' not in col]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if 'circ_mv' not in col]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if 'cat_l2_code' not in col]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if col not in origin_columns]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if not col.startswith('_')]\n",
|
|||
|
|
"# feature_columns = [col for col in feature_columns if col not in ['ts_code', 'trade_date', 'vol_std_5', 'cov', 'delta_cov', 'alpha_22_improved', 'alpha_007', 'consecutive_up_limit', 'mv_volatility', 'volume_growth', 'mv_growth', 'arbr']]\n",
|
|||
|
|
"feature_columns = [col for col in feature_columns if col not in ['intraday_lg_flow_corr_20', \n",
|
|||
|
|
" 'cap_neutral_cost_metric', \n",
|
|||
|
|
" 'hurst_net_mf_vol_60', \n",
|
|||
|
|
" 'complex_factor_deap_1', \n",
|
|||
|
|
" 'lg_buy_consolidation_20',\n",
|
|||
|
|
" 'cs_rank_ind_cap_neutral_pe',\n",
|
|||
|
|
" 'cs_rank_opening_gap',\n",
|
|||
|
|
" 'cs_rank_ind_adj_lg_flow']]\n",
|
|||
|
|
"print(feature_columns)\n",
|
|||
|
|
"numeric_columns = df.select_dtypes(include=['float64', 'int64']).columns\n",
|
|||
|
|
"numeric_columns = [col for col in numeric_columns if col in feature_columns]\n",
|
|||
|
|
"print('去极值')\n",
|
|||
|
|
"train_data = quantile_filter(train_data, numeric_columns) # 去极值\n",
|
|||
|
|
"# print('中性化')\n",
|
|||
|
|
"# train_data = neutralize_manual(train_data, numeric_columns, industry_col='cat_l2_code', mkt_cap_col='log(circ_mv)') # 中性化\n",
|
|||
|
|
"print('去极值')\n",
|
|||
|
|
"test_data = quantile_filter(test_data, numeric_columns) # 去极值\n",
|
|||
|
|
"# print('中性化')\n",
|
|||
|
|
"# test_data = neutralize_manual(test_data, numeric_columns, industry_col='cat_l2_code', mkt_cap_col='log(circ_mv)')\n",
|
|||
|
|
"all_dates = train_data['trade_date'].unique() # 获取所有唯一的 trade_date\n",
|
|||
|
|
"split_date = all_dates[-validation_days] # 划分点为倒数第 validation_days 天\n",
|
|||
|
|
"train_data_split = train_data[train_data['trade_date'] < split_date] # 训练集\n",
|
|||
|
|
"val_data_split = train_data[train_data['trade_date'] >= split_date] # 验证集\n",
|
|||
|
|
"\n",
|
|||
|
|
"feature_columns, _ = remove_shifted_features(\n",
|
|||
|
|
" train_data_split,\n",
|
|||
|
|
" val_data_split,\n",
|
|||
|
|
" feature_columns)\n",
|
|||
|
|
"\n",
|
|||
|
|
"feature_columns = remove_highly_correlated_features(train_data,\n",
|
|||
|
|
" feature_columns)\n",
|
|||
|
|
"keep_columns = [col for col in train_data.columns if\n",
|
|||
|
|
" col in feature_columns or col in ['ts_code', 'trade_date', 'label', 'future_return',\n",
|
|||
|
|
" 'future_score', 'future_volatility']]\n",
|
|||
|
|
"# train_data = train_data[keep_columns]\n",
|
|||
|
|
"print(f'feature_columns: {feature_columns}')\n",
|
|||
|
|
"\n",
|
|||
|
|
"train_data = train_data.dropna(subset=feature_columns)\n",
|
|||
|
|
"train_data = train_data.dropna(subset=['label'])\n",
|
|||
|
|
"train_data = train_data.reset_index(drop=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
"# print(test_data.tail())\n",
|
|||
|
|
"test_data = test_data.dropna(subset=feature_columns)\n",
|
|||
|
|
"# test_data = test_data.dropna(subset=['label'])\n",
|
|||
|
|
"test_data = test_data.reset_index(drop=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
"print(len(train_data))\n",
|
|||
|
|
"print(f\"最小日期: {train_data['trade_date'].min().strftime('%Y-%m-%d')}\")\n",
|
|||
|
|
"print(f\"最大日期: {train_data['trade_date'].max().strftime('%Y-%m-%d')}\")\n",
|
|||
|
|
"print(len(test_data))\n",
|
|||
|
|
"print(f\"最小日期: {test_data['trade_date'].min().strftime('%Y-%m-%d')}\")\n",
|
|||
|
|
"print(f\"最大日期: {test_data['trade_date'].max().strftime('%Y-%m-%d')}\")\n",
|
|||
|
|
"\n",
|
|||
|
|
"cat_columns = [col for col in feature_columns if col.startswith('cat')]\n",
|
|||
|
|
"for col in cat_columns:\n",
|
|||
|
|
" train_data[col] = train_data[col].astype('category')\n",
|
|||
|
|
" test_data[col] = test_data[col].astype('category')\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"# feature_columns_new.remove('cat_l2_code')"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 19,
|
|||
|
|
"id": "8f134d435f71e9e2",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T14:57:51.050696Z",
|
|||
|
|
"start_time": "2025-04-03T14:57:51.034030Z"
|
|||
|
|
},
|
|||
|
|
"jupyter": {
|
|||
|
|
"source_hidden": true
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [],
|
|||
|
|
"source": [
|
|||
|
|
"from sklearn.preprocessing import StandardScaler\n",
|
|||
|
|
"import lightgbm as lgb\n",
|
|||
|
|
"import matplotlib.pyplot as plt\n",
|
|||
|
|
"from sklearn.decomposition import PCA\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def train_light_model(train_data_df, params, feature_columns, callbacks, evals,\n",
|
|||
|
|
" print_feature_importance=True, num_boost_round=100,\n",
|
|||
|
|
" validation_days=180, use_pca=False, split_date=None): # 新增参数:validation_days\n",
|
|||
|
|
" # 确保数据按时间排序\n",
|
|||
|
|
" train_data_df = train_data_df.sort_values(by='trade_date')\n",
|
|||
|
|
"\n",
|
|||
|
|
" numeric_columns = train_data_df.select_dtypes(include=['float64', 'int64']).columns\n",
|
|||
|
|
" numeric_columns = [col for col in numeric_columns if col in feature_columns]\n",
|
|||
|
|
" # X_train.loc[:, numeric_columns] = scaler.fit_transform(X_train[numeric_columns])\n",
|
|||
|
|
" # X_val.loc[:, numeric_columns] = scaler.transform(X_val[numeric_columns])\n",
|
|||
|
|
" train_data_df = cross_sectional_standardization(train_data_df, numeric_columns)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 去除标签为空的样本\n",
|
|||
|
|
" train_data_df = train_data_df.dropna(subset=['label'])\n",
|
|||
|
|
" print('原始训练集大小: ', len(train_data_df))\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 按时间顺序划分训练集和验证集\n",
|
|||
|
|
" if split_date is None:\n",
|
|||
|
|
" all_dates = train_data_df['trade_date'].unique() # 获取所有唯一的 trade_date\n",
|
|||
|
|
" split_date = all_dates[-validation_days] # 划分点为倒数第 validation_days 天\n",
|
|||
|
|
" train_data_split = train_data_df[train_data_df['trade_date'] < split_date] # 训练集\n",
|
|||
|
|
" val_data_split = train_data_df[train_data_df['trade_date'] >= split_date] # 验证集\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 打印划分结果\n",
|
|||
|
|
" print(f\"划分后的训练集大小: {len(train_data_split)}, 验证集大小: {len(val_data_split)}\")\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 提取特征和标签\n",
|
|||
|
|
" X_train = train_data_split[feature_columns]\n",
|
|||
|
|
" y_train = train_data_split['label']\n",
|
|||
|
|
"\n",
|
|||
|
|
" X_val = val_data_split[feature_columns]\n",
|
|||
|
|
" y_val = val_data_split['label']\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 标准化数值特征\n",
|
|||
|
|
" scaler = StandardScaler()\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算每个 trade_date 内的样本数(LTR 需要 group 信息)\n",
|
|||
|
|
" train_groups = train_data_split.groupby('trade_date').size().tolist()\n",
|
|||
|
|
" val_groups = val_data_split.groupby('trade_date').size().tolist()\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 处理类别特征\n",
|
|||
|
|
" categorical_feature = [col for col in feature_columns if 'cat' in col]\n",
|
|||
|
|
"\n",
|
|||
|
|
" pca = None\n",
|
|||
|
|
" if use_pca:\n",
|
|||
|
|
" pca = PCA(n_components=0.95) # 或指定 n_components=固定值(如 10)\n",
|
|||
|
|
" numeric_features = [col for col in feature_columns if col not in categorical_feature]\n",
|
|||
|
|
" numeric_pca = pca.fit_transform(X_train[numeric_features])\n",
|
|||
|
|
" X_train = pd.concat([pd.DataFrame(numeric_pca, index=X_train.index), X_train[categorical_feature]], axis=1)\n",
|
|||
|
|
"\n",
|
|||
|
|
" numeric_pca = pca.transform(X_val[numeric_features])\n",
|
|||
|
|
" X_val = pd.concat([pd.DataFrame(numeric_pca, index=X_val.index), X_val[categorical_feature]], axis=1)\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 计算权重(基于时间)\n",
|
|||
|
|
" # trade_date = train_data_split['trade_date'] # 交易日期\n",
|
|||
|
|
" # weights = (trade_date - trade_date.min()).dt.days / (trade_date.max() - trade_date.min()).days + 1\n",
|
|||
|
|
" # weights = train_data_split.groupby('trade_date')['std_return_5'].transform(\n",
|
|||
|
|
" # lambda x: x / x.mean()\n",
|
|||
|
|
" # )\n",
|
|||
|
|
" ud = sorted(train_data_split[\"trade_date\"].unique().tolist())\n",
|
|||
|
|
" date_weights = {date: weight * weight for date, weight in zip(ud, np.linspace(1, 10, len(ud)))}\n",
|
|||
|
|
" params['weight'] = train_data_split[\"trade_date\"].map(date_weights).tolist()\n",
|
|||
|
|
"\n",
|
|||
|
|
" print('feature_columns size: ', len(X_train.columns.tolist()))\n",
|
|||
|
|
"\n",
|
|||
|
|
" train_dataset = lgb.Dataset(\n",
|
|||
|
|
" X_train, label=y_train, group=train_groups,\n",
|
|||
|
|
" categorical_feature=categorical_feature\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" # weights = val_data_split.groupby('trade_date')['std_return_5'].transform(\n",
|
|||
|
|
" # lambda x: x / x.mean()\n",
|
|||
|
|
" # )\n",
|
|||
|
|
" val_dataset = lgb.Dataset(\n",
|
|||
|
|
" X_val, label=y_val, group=val_groups,\n",
|
|||
|
|
" categorical_feature=categorical_feature\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 训练模型\n",
|
|||
|
|
" model = lgb.train(\n",
|
|||
|
|
" params, train_dataset, num_boost_round=num_boost_round,\n",
|
|||
|
|
" valid_sets=[train_dataset, val_dataset], valid_names=['train', 'valid'],\n",
|
|||
|
|
" callbacks=callbacks\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" # 打印特征重要性(如果需要)\n",
|
|||
|
|
" if print_feature_importance:\n",
|
|||
|
|
" lgb.plot_metric(evals)\n",
|
|||
|
|
" lgb.plot_importance(model, importance_type='split', max_num_features=20)\n",
|
|||
|
|
" plt.show()\n",
|
|||
|
|
"\n",
|
|||
|
|
" return model, scaler, pca\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"from catboost import CatBoostRanker, Pool\n",
|
|||
|
|
"import numpy as np\n",
|
|||
|
|
"\n",
|
|||
|
|
"\n",
|
|||
|
|
"def train_catboost(train_data_df, test_data_df, feature_columns, params=None, plot=False):\n",
|
|||
|
|
" X_train = train_data_df[feature_columns]\n",
|
|||
|
|
" y_train = train_data_df['label']\n",
|
|||
|
|
"\n",
|
|||
|
|
" X_val = test_data_df[feature_columns]\n",
|
|||
|
|
" y_val = test_data_df['label']\n",
|
|||
|
|
"\n",
|
|||
|
|
" scaler = StandardScaler()\n",
|
|||
|
|
" numeric_columns = X_train.select_dtypes(include=['float64', 'int64']).columns\n",
|
|||
|
|
" X_train.loc[:, numeric_columns] = scaler.fit_transform(X_train[numeric_columns])\n",
|
|||
|
|
" X_val.loc[:, numeric_columns] = scaler.transform(X_val[numeric_columns])\n",
|
|||
|
|
"\n",
|
|||
|
|
" group_train = train_data_df['trade_date'].factorize()[0]\n",
|
|||
|
|
" group_val = test_data_df['trade_date'].factorize()[0]\n",
|
|||
|
|
"\n",
|
|||
|
|
" cat_features = [i for i, col in enumerate(feature_columns) if col.startswith('cat')]\n",
|
|||
|
|
" print(f'cat_features: {cat_features}')\n",
|
|||
|
|
"\n",
|
|||
|
|
" train_pool = Pool(\n",
|
|||
|
|
" data=X_train,\n",
|
|||
|
|
" label=y_train,\n",
|
|||
|
|
" group_id=group_train,\n",
|
|||
|
|
" cat_features=cat_features\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" val_pool = Pool(\n",
|
|||
|
|
" data=X_val,\n",
|
|||
|
|
" label=y_val,\n",
|
|||
|
|
" group_id=group_val,\n",
|
|||
|
|
" cat_features=cat_features\n",
|
|||
|
|
" )\n",
|
|||
|
|
"\n",
|
|||
|
|
" # CatBoost 排序学习模型\n",
|
|||
|
|
" model = CatBoostRanker(**params)\n",
|
|||
|
|
" model.fit(train_pool, eval_set=val_pool, plot=plot, use_best_model=True)\n",
|
|||
|
|
"\n",
|
|||
|
|
" return model, scaler\n"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 20,
|
|||
|
|
"id": "c6eb5cd4-e714-420a-ac48-39af3e11ee81",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T15:03:18.426481Z",
|
|||
|
|
"start_time": "2025-04-03T15:02:19.926352Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
"train data size: 739366\n",
|
|||
|
|
"原始训练集大小: 739366\n",
|
|||
|
|
"划分后的训练集大小: 621433, 验证集大小: 117933\n",
|
|||
|
|
"feature_columns size: 93\n",
|
|||
|
|
"Training until validation scores don't improve for 100 rounds\n",
|
|||
|
|
"[100]\ttrain's ndcg@1: 0.790171\tvalid's ndcg@1: 0.396168\n",
|
|||
|
|
"Early stopping, best iteration is:\n",
|
|||
|
|
"[22]\ttrain's ndcg@1: 0.653157\tvalid's ndcg@1: 0.479455\n",
|
|||
|
|
"Evaluated only: ndcg@1\n"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"data": {
|
|||
|
|
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAjwAAAHHCAYAAAC7soLdAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAi21JREFUeJzs3Xd4lMXawOHf7qY3ElJJCITeayhSpCiIoihio0jziAU46uHjqFgoNvSoiB0boiiKImJBwBCadAi9hZ5QUkhCetvsvt8fk2wI6Y1Nee7r2ivZt84OS/bZmWdmdJqmaQghhBBC1GF6axdACCGEEKK6ScAjhBBCiDpPAh4hhBBC1HkS8AghhBCizpOARwghhBB1ngQ8QgghhKjzJOARQgghRJ0nAY8QQggh6jwJeIQQQghR50nAI4RgyZIl6HQ6zp8/X233mDt3LjqdrtZc19rOnz+PTqdjyZIlFTpfp9Mxd+7cKi2TELWZBDxC3EB5gYVOp2Pr1q2F9muaRmBgIDqdjrvuuqtC9/jkk08q/CEpymfZsmUsXLjQ2sUQQpSBBDxCWIGDgwPLli0rtH3z5s1cvHgRe3v7Cl+7IgHP+PHjycjIoGnTphW+r7W89NJLZGRkWOXe1RnwNG3alIyMDMaPH1+h8zMyMnjppZequFRC1F4S8AhhBcOHD+fnn38mJyenwPZly5YRHByMn5/fDSlHWloaAAaDAQcHh1rVNZRXdhsbGxwcHKxcmtJlZmZiNpvLfLxOp8PBwQGDwVCh+zk4OGBjY1Ohc4WoiyTgEcIKxowZQ3x8PCEhIZZt2dnZrFixgrFjxxZ5jtlsZuHChXTo0AEHBwd8fX15/PHHuXr1quWYoKAgjh49yubNmy1dZ4MGDQLyu9M2b97M1KlT8fHxoXHjxgX2XZ/Ds2bNGgYOHIirqytubm707NmzyJap623dupWePXvi4OBAixYt+OyzzwodU1KOyvX5J3l5OseOHWPs2LF4eHjQv3//AvuuP3/69OmsWrWKjh07Ym9vT4cOHVi7dm2he23atIkePXoUKGtZ8oIGDRrE6tWriYiIsNR1UFCQ5Zo6nY4ff/yRl156iYCAAJycnEhOTiYhIYGZM2fSqVMnXFxccHNz44477uDgwYOl1s+kSZNwcXHh0qVLjBw5EhcXF7y9vZk5cyYmk6lMdXj69GkmTZqEu7s7DRo0YPLkyaSnpxc4NyMjg6eeegovLy9cXV25++67uXTpkuQFiVpNwn8hrCAoKIg+ffrwww8/cMcddwAquEhKSmL06NF88MEHhc55/PHHWbJkCZMnT+app57i3LlzfPTRR+zfv59t27Zha2vLwoUL+fe//42LiwsvvvgiAL6+vgWuM3XqVLy9vZk9e7allaQoS5Ys4ZFHHqFDhw7MmjULd3d39u/fz9q1a4sNygAOHz7Mbbfdhre3N3PnziUnJ4c5c+YUKkdFPPDAA7Rq1Yo33ngDTdNKPHbr1q2sXLmSqVOn4urqygcffMB9991HZGQknp6eAOzfv5/bb7+dRo0aMW/ePEwmE6+88gre3t6lluXFF18kKSmJixcv8t577wHg4uJS4JhXX30VOzs7Zs6cSVZWFnZ2dhw7doxVq1bxwAMP0KxZM2JiYvjss88YOHAgx44dw9/fv8T7mkwmhg0bRu/evXnnnXdYv3497777Li1atODJJ58stdwPPvggzZo1Y/78+ezbt48vv/wSHx8f3nrrLcsxkyZN4qeffmL8+PHcdNNNbN68mTvvvLPUawtRo2lCiBvm66+/1gBtz5492kcffaS5urpq6enpmqZp2gMPPKANHjxY0zRNa9q0qXbnnXdazvvnn380QPv+++8LXG/t2rWFtnfo0EEbOHBgsffu37+/lpOTU+S+c+fOaZqmaYmJiZqrq6vWu3dvLSMjo8CxZrO5xNc4cuRIzcHBQYuIiLBsO3bsmGYwGLRr/+ScO3dOA7Svv/660DUAbc6cOZbnc+bM0QBtzJgxhY7N23f9+XZ2dtrp06ct2w4ePKgB2ocffmjZNmLECM3JyUm7dOmSZdupU6c0GxubQtcsyp133qk1bdq00PaNGzdqgNa8eXPLv2+ezMxMzWQyFdh27tw5zd7eXnvllVcKbLu+fiZOnKgBBY7TNE3r1q2bFhwcXKgOiqrDRx55pMBx9957r+bp6Wl5HhYWpgHaM888U+C4SZMmFbqmELWJdGkJYSUPPvggGRkZ/Pnnn6SkpPDnn38W23Ly888/06BBA4YOHUpcXJzlERwcjIuLCxs3bizzfadMmVJqXkhISAgpKSk8//zzhfJjSurqMZlMrFu3jpEjR9KkSRPL9nbt2jFs2LAyl7E4TzzxRJmPHTJkCC1atLA879y5M25ubpw9e9ZS1vXr1zNy5MgCrSotW7a0tLpV1sSJE3F0dCywzd7eHr1ebylDfHw8Li4utGnThn379pXputfXw80332x5XRU5Nz4+nuTkZABLt9/UqVMLHPfvf/+7TNcXoqaSLi0hrMTb25shQ4awbNky0tPTMZlM3H///UUee+rUKZKSkvDx8Slyf2xsbJnv26xZs1KPOXPmDAAdO3Ys83UBrly5QkZGBq1atSq0r02bNvz111/lut71ylL2PNcGXHk8PDwsOU+xsbFkZGTQsmXLQscVta0iiiqv2Wzm/fff55NPPuHcuXMFcm/yutpK4uDgUKjL7drXVZrr68XDwwOAq1ev4ubmRkREBHq9vlDZq6pOhLAWCXiEsKKxY8cyZcoUoqOjueOOO3B3dy/yOLPZjI+PD99//32R+8uSc5Ln+hYHaymupej65NtrlafsxbViaaXk/lSlosr7xhtv8PLLL/PII4/w6quv0rBhQ/R6Pc8880yZRnFVdNRWaeffyHoRwhok4BHCiu69914ef/xxdu7cyfLly4s9rkWLFqxfv55+/fqV+qFfFUPL87qCjhw5Uq5v9t7e3jg6OnLq1KlC+8LDwws8z2tZSExMLLA9IiKinKWtGB8fHxwcHDh9+nShfUVtK0pF6nrFihUMHjyYr776qsD2xMREvLy8yn29qta0aVPMZjPnzp0r0FJX1joRoqaSHB4hrMjFxYVPP/2UuXPnMmLEiGKPe/DBBzGZTLz66quF9uXk5BQIGpydnQsFEeV122234erqyvz588nMzCywr6SWAIPBwLBhw1i1ahWRkZGW7cePH2fdunUFjnVzc8PLy4stW7YU2P7JJ59UquxlZTAYGDJkCKtWreLy5cuW7adPn2bNmjVluoazszNJSUnlvu/1dfjzzz9z6dKlcl2nuuTlWl3/7/Dhhx9aozhCVBlp4RHCyiZOnFjqMQMHDuTxxx9n/vz5HDhwgNtuuw1bW1tOnTrFzz//zPvvv2/J/wkODubTTz/ltddeo2XLlvj4+HDLLbeUq0xubm689957PProo/Ts2dMy983BgwdJT0/nm2++KfbcefPmsXbtWm6++WamTp1KTk4OH374IR06dODQoUMFjn300Ud58803efTRR+nRowdbtmzh5MmT5SprZcydO5e///6bfv368eSTT2Iymfjoo4/o2LEjBw4cKPX84OBgli9fzowZM+jZsycuLi4lBq4Ad911F6+88gqTJ0+mb9++HD58mO+//57mzZtX0auqnODgYO677z4WLlxIfHy8ZVh63r9LbZqcUohrScAjRC2xaNEigoOD+eyzz3jhhRewsbEhKCiIhx9+mH79+lmOmz17NhEREfzvf/8jJSWFgQMHljvgAfjXv/6Fj48Pb775Jq+++iq2tra0bduW//znPyWe17lzZ9atW8eMGTOYPXs2jRs3Zt68eURFRRUKeGbPns2VK1dYsWIFP/30E3fccQdr1qwpNjm7qgUHB7NmzRpmzpzJyy+/TGBgIK+88grHjx/nxIkTpZ4/depUDhw4wNdff817771H06ZNSw14XnjhBdLS0li2bBnLly+ne/furF69mueff76qXlalffvtt/j5+fHDDz/w66+/MmTIEJYvX06bNm1qxazWQhRFp0mmmhBCFDBy5EiOHj1aZC5SfXXgwAG6devGd999x7h
|
|||
|
|
"text/plain": [
|
|||
|
|
"<Figure size 640x480 with 1 Axes>"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
"metadata": {},
|
|||
|
|
"output_type": "display_data"
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"data": {
|
|||
|
|
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAwYAAAHHCAYAAAAF7U/sAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAA8NBJREFUeJzs3XdUFFf7wPHv0jsKooCiWECxIXZjww4oBhsGfaMkGmMhSmxoLAF7x66JvoJJRFNU9I0VjWhEY9fYFSNiEnsUBCJS5veHh/25UgSkKc/nnD2yM3fuPDMPsnvn3rmjUhRFQQghhBBCCFGqaRV3AEIIIYQQQojiJw0DIYQQQgghhDQMhBBCCCGEENIwEEIIIYQQQiANAyGEEEIIIQTSMBBCCCGEEEIgDQMhhBBCCCEE0jAQQgghhBBCIA0DIYQQQgghBNIwEEIIId4ZoaGhqFQqYmJiijsUIcRbSBoGQggh3loZX4Szek2YMKFQ9nnkyBECAwN58uRJodRfmiUlJREYGEhkZGRxhyJEqaRT3AEIIYQQb2ratGlUrVpVY1ndunULZV9HjhwhKCgIX19fypQpUyj7yK8PP/yQDz74AH19/eIOJV+SkpIICgoCwNXVtXiDEaIUkoaBEEKIt567uzuNGzcu7jDeSGJiIsbGxm9Uh7a2Ntra2gUUUdFJT0/n+fPnxR2GEKWeDCUSQgjxztu1axetW7fG2NgYU1NTunbtysWLFzXK/P777/j6+lKtWjUMDAywtrbm448/5tGjR+oygYGBjBs3DoCqVauqhy3FxMQQExODSqUiNDQ00/5VKhWBgYEa9ahUKi5dukS/fv0oW7YsrVq1Uq//7rvvaNSoEYaGhlhYWPDBBx9w+/bt1x5nVvcY2Nvb061bNyIjI2ncuDGGhobUq1dPPVxny5Yt1KtXDwMDAxo1asSZM2c06vT19cXExIQ//viDLl26YGxsjK2tLdOmTUNRFI2yiYmJjBkzBjs7O/T19alZsyYLFizIVE6lUuHn58eGDRuoU6cO+vr6rF69GisrKwCCgoLU5zbjvOUmPy+f2+joaHWvjrm5OR999BFJSUmZztl3331H06ZNMTIyomzZsrRp04a9e/dqlMnN748Q7wLpMRBCCPHWi4uL4+HDhxrLypUrB8C3337LwIED6dKlC3PnziUpKYlVq1bRqlUrzpw5g729PQARERH88ccffPTRR1hbW3Px4kW+/vprLl68yG+//YZKpaJnz55cu3aNjRs3EhwcrN6HlZUVDx48yHPcffr0wcHBgVmzZqm/PM+cOZMpU6bg7e3N4MGDefDgAcuWLaNNmzacOXMmX8OXoqOj6devH59++in/+c9/WLBgAZ6enqxevZovvviC4cOHAzB79my8vb25evUqWlr/f+0wLS0NNzc3mjdvzrx589i9ezdffvklqampTJs2DQBFUejevTsHDhxg0KBBNGjQgD179jBu3Dj++usvgoODNWL65Zdf+OGHH/Dz86NcuXI4OzuzatUqhg0bRo8ePejZsycA9evXB3KXn5d5e3tTtWpVZs+ezenTp1m7di3ly5dn7ty56jJBQUEEBgby3nvvMW3aNPT09Dh27Bi//PILnTt3BnL/+yPEO0ERQggh3lIhISEKkOVLURTl6dOnSpkyZZRPPvlEY7u7d+8q5ubmGsuTkpIy1b9x40YFUA4dOqReNn/+fAVQbt68qVH25s2bCqCEhIRkqgdQvvzyS/X7L7/8UgEUHx8fjXIxMTGKtra2MnPmTI3l58+fV3R0dDItz+58vBxblSpVFEA5cuSIetmePXsUQDE0NFRu3bqlXv7VV18pgHLgwAH1soEDByqA8tlnn6mXpaenK127dlX09PSUBw8eKIqiKOHh4QqgzJgxQyOm3r17KyqVSomOjtY4H1paWsrFixc1yj548CDTucqQ2/xknNuPP/5Yo2yPHj0US0tL9fvr168rWlpaSo8ePZS0tDSNsunp6Yqi5O33R4h3gQwlEkII8dZbsWIFERERGi94cZX5yZMn+Pj48PDhQ/VLW1ubZs2aceDAAXUdhoaG6p+fPXvGw4cPad68OQCnT58ulLiHDh2q8X7Lli2kp6fj7e2tEa+1tTUODg4a8eZF7dq1adGihfp9s2bNAGjfvj2VK1fOtPyPP/7IVIefn5/654yhQM+fP2ffvn0A7Ny5E21tbUaOHKmx3ZgxY1AUhV27dmksb9u2LbVr1871MeQ1P6+e29atW/Po0SPi4+MBCA8PJz09nalTp2r0jmQcH+Tt90eId4EMJRJCCPHWa9q0aZY3H1+/fh148QU4K2ZmZuqf//nnH4KCgti0aRP379/XKBcXF1eA0f6/V2dSun79Ooqi4ODgkGV5XV3dfO3n5S//AObm5gDY2dllufzx48cay7W0tKhWrZrGMkdHRwD1/Qy3bt3C1tYWU1NTjXJOTk7q9S979dhfJ6/5efWYy5YtC7w4NjMzM27cuIGWllaOjZO8/P4I8S6QhoEQQoh3Vnp6OvBinLi1tXWm9To6//8x6O3tzZEjRxg3bhwNGjTAxMSE9PR03Nzc1PXk5NUx7hnS0tKy3eblq+AZ8apUKnbt2pXl7EImJiavjSMr2c1UlN1y5ZWbhQvDq8f+OnnNT0EcW15+f4R4F8hvtBBCiHdW9erVAShfvjwdO3bMttzjx4/Zv38/QUFBTJ06Vb0844rxy7JrAGRckX71wWevXil/XbyKolC1alX1FfmSID09nT/++EMjpmvXrgGob76tUqUK+/bt4+nTpxq9BleuXFGvf53szm1e8pNb1atXJz09nUuXLtGgQYNsy8Drf3+EeFfIPQZCCCHeWV26dMHMzIxZs2aRkpKSaX3GTEIZV5dfvZq8ePHiTNtkPGvg1QaAmZkZ5cqV49ChQxrLV65cmet4e/bsiba2NkFBQZliURQl09ScRWn58uUasSxfvhxdXV06dOgAgIeHB2lpaRrlAIKDg1GpVLi7u792H0ZGRkDmc5uX/OSWl5cXWlpaTJs2LVOPQ8Z+cvv7I8S7QnoMhBBCvLPMzMxYtWoVH374IQ0bNuSDDz7AysqK2NhYduzYQcuWLVm+fDlmZma0adOGefPmkZKSQsWKFdm7dy83b97MVGejRo0AmDRpEh988AG6urp4enpibGzM4MGDmTNnDoMHD6Zx48YcOnRIfWU9N6pXr86MGTOYOHEiMTExeHl5YWpqys2bN9m6dStDhgxh7NixBXZ+csvAwIDdu3czcOBAmjVrxq5du9ixYwdffPGF+tkDnp6etGvXjkmTJhETE4OzszN79+5l27Zt+Pv7q6++58TQ0JDatWvz/fff4+joiIWFBXXr1qVu3bq5zk9u1ahRg0mTJjF9+nRat25Nz5490dfX58SJE9ja2jJ79uxc//4I8c4optmQhBBCiDeWMT3niRMncix34MABpUuXLoq5ubliYGCgVK9eXfH19VVOnjypLvPnn38qPXr0UMqUKaOYm5srffr0Uf7+++8sp8+cPn26UrFiRUVLS0tjetCkpCRl0KBBirm5uWJqaqp4e3sr9+/fz3a60oypPl+1efNmpVWrVoqxsbFibGys1KpVSxkxYoRy9erVXJ2PV6cr7dq1a6aygDJixAiNZRlTrs6fP1+9bODAgYqxsbFy48YNpXPnzoqRkZFSoUIF5csvv8w0zefTp0+Vzz//XLG1tVV0dXUVBwcHZf78+erpP3Pad4YjR44ojRo1UvT09DTOW27zk925zercKIqirFu3TnFxcVH09fWVsmXLKm3btlUiIiI0yuTm90eId4FKUYrgDiMhhBBCvJV8fX356aefSEhIKO5QhBCFTO4xEEIIIYQQQkjDQAghhBBCCCENAyGEEEIIIQQg9xgIIYQQQgghpMdACCGEEEIIIQ0DIYQQQgghBPKAMyFEDtLT0/n7778xNTVFpVIVdzhCCCGEyAVFUXj69Cm2trZoaeW+H0AaBkKIbP3999/Y2dkVdxhCCCGEyIfbt29TqVKlXJeXhoEQIlumpqYA3Lx5EwsLi2KOpvRKSUlh7969dO7cGV1d3eIOp9S
|
|||
|
|
"text/plain": [
|
|||
|
|
"<Figure size 640x480 with 1 Axes>"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
"metadata": {},
|
|||
|
|
"output_type": "display_data"
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
"train data size: 739366\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"source": [
|
|||
|
|
"print('train data size: ', len(train_data))\n",
|
|||
|
|
"\n",
|
|||
|
|
"label_gain = list(range(len(train_data['label'].unique())))\n",
|
|||
|
|
"label_gain = [gain * gain for gain in label_gain]\n",
|
|||
|
|
"light_params = {\n",
|
|||
|
|
" 'label_gain': label_gain,\n",
|
|||
|
|
" 'objective': 'lambdarank',\n",
|
|||
|
|
" 'metric': 'ndcg',\n",
|
|||
|
|
" 'learning_rate': 0.03,\n",
|
|||
|
|
" 'num_leaves': 32,\n",
|
|||
|
|
" # 'min_data_in_leaf': 128,\n",
|
|||
|
|
" 'max_depth': 8,\n",
|
|||
|
|
" 'max_bin': 32,\n",
|
|||
|
|
" 'feature_fraction': 0.7,\n",
|
|||
|
|
" 'bagging_fraction': 0.7,\n",
|
|||
|
|
" 'bagging_freq': 5,\n",
|
|||
|
|
" 'lambda_l1': 0.1,\n",
|
|||
|
|
" 'lambda_l2': 0.1,\n",
|
|||
|
|
" 'boosting': 'gbdt',\n",
|
|||
|
|
" 'verbosity': -1,\n",
|
|||
|
|
" 'extra_trees': True,\n",
|
|||
|
|
" 'max_position': 5,\n",
|
|||
|
|
" 'ndcg_at': 1,\n",
|
|||
|
|
" 'quant_train_renew_leaf': True,\n",
|
|||
|
|
" 'lambdarank_truncation_level': 1,\n",
|
|||
|
|
" 'lambdarank_position_bias_regularization': 1,\n",
|
|||
|
|
" 'seed': 7\n",
|
|||
|
|
"}\n",
|
|||
|
|
"evals = {}\n",
|
|||
|
|
"\n",
|
|||
|
|
"gc.collect()\n",
|
|||
|
|
"\n",
|
|||
|
|
"use_pca = False\n",
|
|||
|
|
"# feature_contri = [2 if feat.startswith('act_factor') or 'buy' in feat or 'sell' in feat else 1 for feat in feature_columns]\n",
|
|||
|
|
"# light_params['feature_contri'] = feature_contri\n",
|
|||
|
|
"# print(f'feature_contri: {feature_contri}')\n",
|
|||
|
|
"model, scaler, pca = train_light_model(train_data.dropna(subset=['label']),\n",
|
|||
|
|
" light_params, feature_columns,\n",
|
|||
|
|
" [lgb.log_evaluation(period=100),\n",
|
|||
|
|
" lgb.callback.record_evaluation(evals),\n",
|
|||
|
|
" lgb.early_stopping(100, first_metric_only=True)\n",
|
|||
|
|
" ], evals,\n",
|
|||
|
|
" num_boost_round=1000, validation_days=120,\n",
|
|||
|
|
" print_feature_importance=True, use_pca=use_pca)\n",
|
|||
|
|
"\n",
|
|||
|
|
"print('train data size: ', len(train_data))"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 21,
|
|||
|
|
"id": "5d1522a7538db91b",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T15:04:39.656944Z",
|
|||
|
|
"start_time": "2025-04-03T15:04:39.298483Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
"9.663390663390663\n",
|
|||
|
|
" trade_date ts_code future_return future_score label\n",
|
|||
|
|
"29 2023-08-01 605188.SH 0.046809 NaN 19.0\n",
|
|||
|
|
"1017 2023-08-02 000014.SZ 0.062638 NaN 19.0\n",
|
|||
|
|
"1988 2023-08-03 000890.SZ 0.132200 NaN 19.0\n",
|
|||
|
|
"2958 2023-08-04 603536.SH -0.062907 NaN 0.0\n",
|
|||
|
|
"3921 2023-08-07 603828.SH -0.016221 NaN 2.0\n",
|
|||
|
|
"4948 2023-08-08 002336.SZ -0.032209 NaN 0.0\n",
|
|||
|
|
"5902 2023-08-09 600155.SH 0.027813 NaN 19.0\n",
|
|||
|
|
"6964 2023-08-10 002787.SZ 0.085946 NaN 19.0\n",
|
|||
|
|
"7842 2023-08-11 601136.SH 0.103558 NaN 19.0\n",
|
|||
|
|
"8841 2023-08-14 603536.SH -0.028928 NaN 0.0\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"source": [
|
|||
|
|
"# train_data = train_data.sort_values(by='trade_date')\n",
|
|||
|
|
"# all_dates = train_data['trade_date'].unique() # 获取所有唯一的 trade_date\n",
|
|||
|
|
"# split_date = all_dates[-120] # 划分点为倒数第 validation_days 天\n",
|
|||
|
|
"# print(split_date)\n",
|
|||
|
|
"# print(all_dates)\n",
|
|||
|
|
"# val_data_split = train_data[train_data['trade_date'] >= split_date] # 验证集\n",
|
|||
|
|
"\n",
|
|||
|
|
"score_df = test_data\n",
|
|||
|
|
"numeric_columns = score_df.select_dtypes(include=['float64', 'int64']).columns\n",
|
|||
|
|
"numeric_columns = [col for col in numeric_columns if col in feature_columns]\n",
|
|||
|
|
"# score_df.loc[:, numeric_columns] = scaler.transform(score_df[numeric_columns])\n",
|
|||
|
|
"score_df = cross_sectional_standardization(score_df, numeric_columns)\n",
|
|||
|
|
"\n",
|
|||
|
|
"if use_pca and pca is not None:\n",
|
|||
|
|
" categorical_feature = [col for col in feature_columns if 'cat' in col]\n",
|
|||
|
|
" numeric_features = [col for col in feature_columns if col not in categorical_feature]\n",
|
|||
|
|
" numeric_pca = pca.transform(score_df[numeric_features])\n",
|
|||
|
|
" score_df = pd.concat([pd.DataFrame(numeric_pca), score_df[categorical_feature],\n",
|
|||
|
|
" score_df[['trade_date', 'ts_code', 'future_return', 'future_score', 'label']]], axis=1)\n",
|
|||
|
|
" score_df['score'] = model.predict(score_df[[col for col in score_df.columns if\n",
|
|||
|
|
" col not in ['trade_date', 'ts_code', 'future_return', 'future_score',\n",
|
|||
|
|
" 'label']]])\n",
|
|||
|
|
"else:\n",
|
|||
|
|
" score_df['score'] = model.predict(score_df[feature_columns])\n",
|
|||
|
|
"# train_data['score'] = catboost_model.predict(train_data[feature_columns])\n",
|
|||
|
|
"score_df = score_df.loc[score_df.groupby('trade_date')['score'].idxmax()]\n",
|
|||
|
|
"# score_df = score_df[score_df['score'] > 0]\n",
|
|||
|
|
"score_df[['trade_date', 'score', 'ts_code']].to_csv('predictions_test.tsv', index=False)\n",
|
|||
|
|
"print(score_df['label'].mean())\n",
|
|||
|
|
"print(score_df[['trade_date', 'ts_code', 'future_return', 'future_score', 'label']].head(10))"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 22,
|
|||
|
|
"id": "d86af99d15cb3bdd",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T15:03:25.791021Z",
|
|||
|
|
"start_time": "2025-04-03T15:03:25.537833Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
" trade_date ts_code close open future_return\n",
|
|||
|
|
"2563 2020-01-02 603577.SH 16.43 16.34 0.001819\n",
|
|||
|
|
"5354 2020-01-03 603577.SH 16.52 16.49 -0.007251\n",
|
|||
|
|
"8146 2020-01-06 603577.SH 16.43 16.55 0.007299\n",
|
|||
|
|
"10936 2020-01-07 603577.SH 16.56 16.44 -0.012689\n",
|
|||
|
|
"13725 2020-01-08 603577.SH 16.34 16.55 0.003636\n",
|
|||
|
|
"... ... ... ... ... ...\n",
|
|||
|
|
"3817882 2025-04-02 603577.SH 19.74 19.78 0.003579\n",
|
|||
|
|
"3820967 2025-04-03 603577.SH 19.63 19.56 -0.066596\n",
|
|||
|
|
"3824052 2025-04-07 603577.SH 17.66 18.92 -0.078398\n",
|
|||
|
|
"3827137 2025-04-08 603577.SH 16.34 17.73 0.047170\n",
|
|||
|
|
"3830225 2025-04-09 603577.SH 16.65 15.90 NaN\n",
|
|||
|
|
"\n",
|
|||
|
|
"[1275 rows x 5 columns]\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"source": [
|
|||
|
|
"print(df[(df['ts_code'] == '603577.SH') & (df['trade_date'] >= '2018-06-04')][\n",
|
|||
|
|
" ['trade_date', 'ts_code', 'close', 'open', 'future_return']])"
|
|||
|
|
]
|
|||
|
|
},
|
|||
|
|
{
|
|||
|
|
"cell_type": "code",
|
|||
|
|
"execution_count": 23,
|
|||
|
|
"id": "ef9d068e-67f7-412c-bbd8-cdee7492dbc9",
|
|||
|
|
"metadata": {
|
|||
|
|
"ExecuteTime": {
|
|||
|
|
"end_time": "2025-04-03T15:03:25.893508Z",
|
|||
|
|
"start_time": "2025-04-03T15:03:25.878525Z"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"outputs": [
|
|||
|
|
{
|
|||
|
|
"name": "stdout",
|
|||
|
|
"output_type": "stream",
|
|||
|
|
"text": [
|
|||
|
|
"nan\n",
|
|||
|
|
"nan\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"source": [
|
|||
|
|
"print(train_data[\"future_score\"].corr(train_data[\"label\"]))\n",
|
|||
|
|
"print(test_data[\"future_score\"].corr(test_data[\"label\"]))\n"
|
|||
|
|
]
|
|||
|
|
}
|
|||
|
|
],
|
|||
|
|
"metadata": {
|
|||
|
|
"kernelspec": {
|
|||
|
|
"display_name": "new_trader",
|
|||
|
|
"language": "python",
|
|||
|
|
"name": "python3"
|
|||
|
|
},
|
|||
|
|
"language_info": {
|
|||
|
|
"codemirror_mode": {
|
|||
|
|
"name": "ipython",
|
|||
|
|
"version": 3
|
|||
|
|
},
|
|||
|
|
"file_extension": ".py",
|
|||
|
|
"mimetype": "text/x-python",
|
|||
|
|
"name": "python",
|
|||
|
|
"nbconvert_exporter": "python",
|
|||
|
|
"pygments_lexer": "ipython3",
|
|||
|
|
"version": "3.11.11"
|
|||
|
|
}
|
|||
|
|
},
|
|||
|
|
"nbformat": 4,
|
|||
|
|
"nbformat_minor": 5
|
|||
|
|
}
|