1930 lines
86 KiB
Plaintext
1930 lines
86 KiB
Plaintext
{
|
||
"cells": [
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 1,
|
||
"id": "79a7758178bafdd3",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T12:46:06.987506Z",
|
||
"start_time": "2025-04-03T12:46:06.259551Z"
|
||
},
|
||
"jupyter": {
|
||
"source_hidden": true
|
||
}
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"e:\\PyProject\\NewStock\\main\\train\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"%load_ext autoreload\n",
|
||
"%autoreload 2\n",
|
||
"\n",
|
||
"import gc\n",
|
||
"import os\n",
|
||
"import sys\n",
|
||
"sys.path.append('../../')\n",
|
||
"print(os.getcwd())\n",
|
||
"import pandas as pd\n",
|
||
"from main.factor.factor import get_rolling_factor, get_simple_factor\n",
|
||
"from main.utils.factor import read_industry_data\n",
|
||
"from main.utils.factor_processor import calculate_score\n",
|
||
"from main.utils.utils import read_and_merge_h5_data, merge_with_industry_data\n",
|
||
"\n",
|
||
"import warnings\n",
|
||
"\n",
|
||
"warnings.filterwarnings(\"ignore\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 2,
|
||
"id": "a79cafb06a7e0e43",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T12:47:00.212859Z",
|
||
"start_time": "2025-04-03T12:46:06.998047Z"
|
||
},
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"daily data\n",
|
||
"daily basic\n",
|
||
"inner merge on ['ts_code', 'trade_date']\n",
|
||
"stk limit\n",
|
||
"left merge on ['ts_code', 'trade_date']\n",
|
||
"money flow\n",
|
||
"left merge on ['ts_code', 'trade_date']\n",
|
||
"cyq perf\n",
|
||
"left merge on ['ts_code', 'trade_date']\n",
|
||
"<class 'pandas.core.frame.DataFrame'>\n",
|
||
"RangeIndex: 8509852 entries, 0 to 8509851\n",
|
||
"Data columns (total 32 columns):\n",
|
||
" # Column Dtype \n",
|
||
"--- ------ ----- \n",
|
||
" 0 ts_code object \n",
|
||
" 1 trade_date datetime64[ns]\n",
|
||
" 2 open float64 \n",
|
||
" 3 close float64 \n",
|
||
" 4 high float64 \n",
|
||
" 5 low float64 \n",
|
||
" 6 vol float64 \n",
|
||
" 7 pct_chg float64 \n",
|
||
" 8 turnover_rate float64 \n",
|
||
" 9 pe_ttm float64 \n",
|
||
" 10 circ_mv float64 \n",
|
||
" 11 total_mv float64 \n",
|
||
" 12 volume_ratio float64 \n",
|
||
" 13 is_st bool \n",
|
||
" 14 up_limit float64 \n",
|
||
" 15 down_limit float64 \n",
|
||
" 16 buy_sm_vol float64 \n",
|
||
" 17 sell_sm_vol float64 \n",
|
||
" 18 buy_lg_vol float64 \n",
|
||
" 19 sell_lg_vol float64 \n",
|
||
" 20 buy_elg_vol float64 \n",
|
||
" 21 sell_elg_vol float64 \n",
|
||
" 22 net_mf_vol float64 \n",
|
||
" 23 his_low float64 \n",
|
||
" 24 his_high float64 \n",
|
||
" 25 cost_5pct float64 \n",
|
||
" 26 cost_15pct float64 \n",
|
||
" 27 cost_50pct float64 \n",
|
||
" 28 cost_85pct float64 \n",
|
||
" 29 cost_95pct float64 \n",
|
||
" 30 weight_avg float64 \n",
|
||
" 31 winner_rate float64 \n",
|
||
"dtypes: bool(1), datetime64[ns](1), float64(29), object(1)\n",
|
||
"memory usage: 2.0+ GB\n",
|
||
"None\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"from main.utils.utils import read_and_merge_h5_data\n",
|
||
"\n",
|
||
"print('daily data')\n",
|
||
"df = read_and_merge_h5_data('../../data/daily_data.h5', key='daily_data',\n",
|
||
" columns=['ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'vol', 'pct_chg'],\n",
|
||
" df=None)\n",
|
||
"\n",
|
||
"print('daily basic')\n",
|
||
"df = read_and_merge_h5_data('../../data/daily_basic.h5', key='daily_basic',\n",
|
||
" columns=['ts_code', 'trade_date', 'turnover_rate', 'pe_ttm', 'circ_mv', 'total_mv', 'volume_ratio',\n",
|
||
" 'is_st'], df=df, join='inner')\n",
|
||
"\n",
|
||
"print('stk limit')\n",
|
||
"df = read_and_merge_h5_data('../../data/stk_limit.h5', key='stk_limit',\n",
|
||
" columns=['ts_code', 'trade_date', 'pre_close', 'up_limit', 'down_limit'],\n",
|
||
" df=df)\n",
|
||
"print('money flow')\n",
|
||
"df = read_and_merge_h5_data('../../data/money_flow.h5', key='money_flow',\n",
|
||
" columns=['ts_code', 'trade_date', 'buy_sm_vol', 'sell_sm_vol', 'buy_lg_vol', 'sell_lg_vol',\n",
|
||
" 'buy_elg_vol', 'sell_elg_vol', 'net_mf_vol'],\n",
|
||
" df=df)\n",
|
||
"print('cyq perf')\n",
|
||
"df = read_and_merge_h5_data('../../data/cyq_perf.h5', key='cyq_perf',\n",
|
||
" columns=['ts_code', 'trade_date', 'his_low', 'his_high', 'cost_5pct', 'cost_15pct',\n",
|
||
" 'cost_50pct',\n",
|
||
" 'cost_85pct', 'cost_95pct', 'weight_avg', 'winner_rate'],\n",
|
||
" df=df)\n",
|
||
"print(df.info())"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 3,
|
||
"id": "cac01788dac10678",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T12:47:10.527104Z",
|
||
"start_time": "2025-04-03T12:47:00.488715Z"
|
||
}
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"industry\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"print('industry')\n",
|
||
"industry_df = read_and_merge_h5_data('../../data/industry_data.h5', key='industry_data',\n",
|
||
" columns=['ts_code', 'l2_code', 'in_date'],\n",
|
||
" df=None, on=['ts_code'], join='left')\n",
|
||
"\n",
|
||
"\n",
|
||
"def merge_with_industry_data(df, industry_df):\n",
|
||
" # 确保日期字段是 datetime 类型\n",
|
||
" df['trade_date'] = pd.to_datetime(df['trade_date'])\n",
|
||
" industry_df['in_date'] = pd.to_datetime(industry_df['in_date'])\n",
|
||
"\n",
|
||
" # 对 industry_df 按 ts_code 和 in_date 排序\n",
|
||
" industry_df_sorted = industry_df.sort_values(['in_date', 'ts_code'])\n",
|
||
"\n",
|
||
" # 对原始 df 按 ts_code 和 trade_date 排序\n",
|
||
" df_sorted = df.sort_values(['trade_date', 'ts_code'])\n",
|
||
"\n",
|
||
" # 使用 merge_asof 进行向后合并\n",
|
||
" merged = pd.merge_asof(\n",
|
||
" df_sorted,\n",
|
||
" industry_df_sorted,\n",
|
||
" by='ts_code', # 按 ts_code 分组\n",
|
||
" left_on='trade_date',\n",
|
||
" right_on='in_date',\n",
|
||
" direction='backward'\n",
|
||
" )\n",
|
||
"\n",
|
||
" # 获取每个 ts_code 的最早 in_date 记录\n",
|
||
" min_in_date_per_ts = (industry_df_sorted\n",
|
||
" .groupby('ts_code')\n",
|
||
" .first()\n",
|
||
" .reset_index()[['ts_code', 'l2_code']])\n",
|
||
"\n",
|
||
" # 填充未匹配到的记录(trade_date 早于所有 in_date 的情况)\n",
|
||
" merged['l2_code'] = merged['l2_code'].fillna(\n",
|
||
" merged['ts_code'].map(min_in_date_per_ts.set_index('ts_code')['l2_code'])\n",
|
||
" )\n",
|
||
"\n",
|
||
" # 保留需要的列并重置索引\n",
|
||
" result = merged.reset_index(drop=True)\n",
|
||
" return result\n",
|
||
"\n",
|
||
"\n",
|
||
"# 使用示例\n",
|
||
"df = merge_with_industry_data(df, industry_df)\n",
|
||
"# print(mdf[mdf['ts_code'] == '600751.SH'][['ts_code', 'trade_date', 'l2_code']])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 4,
|
||
"id": "c4e9e1d31da6dba6",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T12:47:10.719252Z",
|
||
"start_time": "2025-04-03T12:47:10.541247Z"
|
||
},
|
||
"jupyter": {
|
||
"source_hidden": true
|
||
}
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"def calculate_indicators(df):\n",
|
||
" \"\"\"\n",
|
||
" 计算四个指标:当日涨跌幅、5日移动平均、RSI、MACD。\n",
|
||
" \"\"\"\n",
|
||
" df = df.sort_values('trade_date')\n",
|
||
" df['daily_return'] = (df['close'] - df['pre_close']) / df['pre_close'] * 100\n",
|
||
" # df['5_day_ma'] = df['close'].rolling(window=5).mean()\n",
|
||
" delta = df['close'].diff()\n",
|
||
" gain = delta.where(delta > 0, 0)\n",
|
||
" loss = -delta.where(delta < 0, 0)\n",
|
||
" avg_gain = gain.rolling(window=14).mean()\n",
|
||
" avg_loss = loss.rolling(window=14).mean()\n",
|
||
" rs = avg_gain / avg_loss\n",
|
||
" df['RSI'] = 100 - (100 / (1 + rs))\n",
|
||
"\n",
|
||
" # 计算MACD\n",
|
||
" ema12 = df['close'].ewm(span=12, adjust=False).mean()\n",
|
||
" ema26 = df['close'].ewm(span=26, adjust=False).mean()\n",
|
||
" df['MACD'] = ema12 - ema26\n",
|
||
" df['Signal_line'] = df['MACD'].ewm(span=9, adjust=False).mean()\n",
|
||
" df['MACD_hist'] = df['MACD'] - df['Signal_line']\n",
|
||
"\n",
|
||
" # 4. 情绪因子1:市场上涨比例(Up Ratio)\n",
|
||
" df['up_ratio'] = df['daily_return'].apply(lambda x: 1 if x > 0 else 0)\n",
|
||
" df['up_ratio_20d'] = df['up_ratio'].rolling(window=20).mean() # 过去20天上涨比例\n",
|
||
"\n",
|
||
" # 5. 情绪因子2:成交量变化率(Volume Change Rate)\n",
|
||
" df['volume_mean'] = df['vol'].rolling(window=20).mean() # 过去20天的平均成交量\n",
|
||
" df['volume_change_rate'] = (df['vol'] - df['volume_mean']) / df['volume_mean'] * 100 # 成交量变化率\n",
|
||
"\n",
|
||
" # 6. 情绪因子3:波动率(Volatility)\n",
|
||
" df['volatility'] = df['daily_return'].rolling(window=20).std() # 过去20天的日收益率标准差\n",
|
||
"\n",
|
||
" # 7. 情绪因子4:成交额变化率(Amount Change Rate)\n",
|
||
" df['amount_mean'] = df['amount'].rolling(window=20).mean() # 过去20天的平均成交额\n",
|
||
" df['amount_change_rate'] = (df['amount'] - df['amount_mean']) / df['amount_mean'] * 100 # 成交额变化率\n",
|
||
"\n",
|
||
" return df\n",
|
||
"\n",
|
||
"\n",
|
||
"def generate_index_indicators(h5_filename):\n",
|
||
" df = pd.read_hdf(h5_filename, key='index_data')\n",
|
||
" df['trade_date'] = pd.to_datetime(df['trade_date'], format='%Y%m%d')\n",
|
||
" df = df.sort_values('trade_date')\n",
|
||
"\n",
|
||
" # 计算每个ts_code的相关指标\n",
|
||
" df_indicators = []\n",
|
||
" for ts_code in df['ts_code'].unique():\n",
|
||
" df_index = df[df['ts_code'] == ts_code].copy()\n",
|
||
" df_index = calculate_indicators(df_index)\n",
|
||
" df_indicators.append(df_index)\n",
|
||
"\n",
|
||
" # 合并所有指数的结果\n",
|
||
" df_all_indicators = pd.concat(df_indicators, ignore_index=True)\n",
|
||
"\n",
|
||
" # 保留trade_date列,并将同一天的数据按ts_code合并成一行\n",
|
||
" df_final = df_all_indicators.pivot_table(\n",
|
||
" index='trade_date',\n",
|
||
" columns='ts_code',\n",
|
||
" values=['daily_return', 'RSI', 'MACD', 'Signal_line',\n",
|
||
" 'MACD_hist', 'up_ratio_20d', 'volume_change_rate', 'volatility',\n",
|
||
" 'amount_change_rate', 'amount_mean'],\n",
|
||
" aggfunc='last'\n",
|
||
" )\n",
|
||
"\n",
|
||
" df_final.columns = [f\"{col[1]}_{col[0]}\" for col in df_final.columns]\n",
|
||
" df_final = df_final.reset_index()\n",
|
||
"\n",
|
||
" return df_final\n",
|
||
"\n",
|
||
"\n",
|
||
"# 使用函数\n",
|
||
"h5_filename = '../../data/index_data.h5'\n",
|
||
"index_data = generate_index_indicators(h5_filename)\n",
|
||
"index_data = index_data.dropna()\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 5,
|
||
"id": "a735bc02ceb4d872",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T12:47:10.821169Z",
|
||
"start_time": "2025-04-03T12:47:10.751831Z"
|
||
}
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"import talib\n",
|
||
"import numpy as np"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 6,
|
||
"id": "53f86ddc0677a6d7",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T12:47:15.944254Z",
|
||
"start_time": "2025-04-03T12:47:10.826179Z"
|
||
},
|
||
"jupyter": {
|
||
"source_hidden": true
|
||
},
|
||
"scrolled": true
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"from main.utils.factor import get_act_factor\n",
|
||
"\n",
|
||
"\n",
|
||
"def read_industry_data(h5_filename):\n",
|
||
" # 读取 H5 文件中所有的行业数据\n",
|
||
" industry_data = pd.read_hdf(h5_filename, key='sw_daily', columns=[\n",
|
||
" 'ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'pe', 'pb', 'vol'\n",
|
||
" ]) # 假设 H5 文件的键是 'industry_data'\n",
|
||
" industry_data = industry_data.sort_values(by=['ts_code', 'trade_date'])\n",
|
||
" industry_data = industry_data.reindex()\n",
|
||
" industry_data['trade_date'] = pd.to_datetime(industry_data['trade_date'], format='%Y%m%d')\n",
|
||
"\n",
|
||
" grouped = industry_data.groupby('ts_code', group_keys=False)\n",
|
||
" industry_data['obv'] = grouped.apply(\n",
|
||
" lambda x: pd.Series(talib.OBV(x['close'].values, x['vol'].values), index=x.index)\n",
|
||
" )\n",
|
||
" industry_data['return_5'] = grouped['close'].apply(lambda x: x / x.shift(5) - 1)\n",
|
||
" industry_data['return_20'] = grouped['close'].apply(lambda x: x / x.shift(20) - 1)\n",
|
||
"\n",
|
||
" industry_data = get_act_factor(industry_data, cat=False)\n",
|
||
" industry_data = industry_data.sort_values(by=['trade_date', 'ts_code'])\n",
|
||
"\n",
|
||
" # # 计算每天每个 ts_code 的因子和当天所有 ts_code 的中位数的偏差\n",
|
||
" # factor_columns = ['obv', 'return_5', 'return_20', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4'] # 因子列\n",
|
||
" # \n",
|
||
" # for factor in factor_columns:\n",
|
||
" # if factor in industry_data.columns:\n",
|
||
" # # 计算每天每个 ts_code 的因子值与当天所有 ts_code 的中位数的偏差\n",
|
||
" # industry_data[f'{factor}_deviation'] = industry_data.groupby('trade_date')[factor].transform(\n",
|
||
" # lambda x: x - x.mean())\n",
|
||
"\n",
|
||
" industry_data['return_5_percentile'] = industry_data.groupby('trade_date')['return_5'].transform(\n",
|
||
" lambda x: x.rank(pct=True))\n",
|
||
" industry_data['return_20_percentile'] = industry_data.groupby('trade_date')['return_20'].transform(\n",
|
||
" lambda x: x.rank(pct=True))\n",
|
||
" industry_data = industry_data.drop(columns=['open', 'close', 'high', 'low', 'pe', 'pb', 'vol'])\n",
|
||
"\n",
|
||
" industry_data = industry_data.rename(\n",
|
||
" columns={col: f'industry_{col}' for col in industry_data.columns if col not in ['ts_code', 'trade_date']})\n",
|
||
"\n",
|
||
" industry_data = industry_data.rename(columns={'ts_code': 'cat_l2_code'})\n",
|
||
" return industry_data\n",
|
||
"\n",
|
||
"\n",
|
||
"industry_df = read_industry_data('../../data/sw_daily.h5')\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 7,
|
||
"id": "dbe2fd8021b9417f",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T12:47:15.969344Z",
|
||
"start_time": "2025-04-03T12:47:15.963327Z"
|
||
}
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"['ts_code', 'open', 'close', 'high', 'low', 'circ_mv', 'total_mv', 'is_st', 'up_limit', 'down_limit', 'buy_sm_vol', 'sell_sm_vol', 'buy_lg_vol', 'sell_lg_vol', 'buy_elg_vol', 'sell_elg_vol', 'net_mf_vol', 'his_low', 'his_high', 'cost_5pct', 'cost_15pct', 'cost_50pct', 'cost_85pct', 'cost_95pct', 'weight_avg', 'in_date']\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"origin_columns = df.columns.tolist()\n",
|
||
"origin_columns = [col for col in origin_columns if\n",
|
||
" col not in ['turnover_rate', 'pe_ttm', 'volume_ratio', 'vol', 'pct_chg', 'l2_code', 'winner_rate']]\n",
|
||
"origin_columns = [col for col in origin_columns if col not in index_data.columns]\n",
|
||
"origin_columns = [col for col in origin_columns if 'cyq' not in col]\n",
|
||
"print(origin_columns)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 8,
|
||
"id": "85c3e3d0235ffffa",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T12:47:16.089879Z",
|
||
"start_time": "2025-04-03T12:47:15.990101Z"
|
||
}
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"fina_indicator_df = read_and_merge_h5_data('../../data/fina_indicator.h5', key='fina_indicator',\n",
|
||
" columns=['ts_code', 'ann_date', 'undist_profit_ps', 'ocfps', 'bps'],\n",
|
||
" df=None)\n",
|
||
"cashflow_df = read_and_merge_h5_data('../../data/cashflow.h5', key='cashflow',\n",
|
||
" columns=['ts_code', 'ann_date', 'n_cashflow_act'],\n",
|
||
" df=None)\n",
|
||
"balancesheet_df = read_and_merge_h5_data('../../data/balancesheet.h5', key='balancesheet',\n",
|
||
" columns=['ts_code', 'ann_date', 'money_cap', 'total_liab'],\n",
|
||
" df=None)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 9,
|
||
"id": "a1c5858e",
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Index(['ts_code', 'ann_date', 'money_cap', 'total_liab'], dtype='object')\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"print(balancesheet_df.columns)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": null,
|
||
"id": "92d84ce15a562ec6",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T13:08:01.612695Z",
|
||
"start_time": "2025-04-03T12:47:16.121802Z"
|
||
}
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"开始计算因子: AR, BR (原地修改)...\n",
|
||
"因子 AR, BR 计算成功。\n",
|
||
"因子 AR, BR 计算流程结束。\n",
|
||
"使用 'ann_date' 作为财务数据生效日期。\n",
|
||
"使用 'ann_date' 作为财务数据生效日期。\n",
|
||
"使用 'ann_date' 作为财务数据生效日期。\n",
|
||
"使用 'ann_date' 作为财务数据生效日期。\n",
|
||
"警告: 从 financial_data_subset 中移除了 366 行,因为其 'ts_code' 或 'ann_date' 列存在空值。\n",
|
||
"计算 BBI...\n",
|
||
"Index(['ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'vol',\n",
|
||
" 'pct_chg', 'turnover_rate', 'pe_ttm', 'circ_mv', 'total_mv',\n",
|
||
" 'volume_ratio', 'is_st', 'up_limit', 'down_limit', 'buy_sm_vol',\n",
|
||
" 'sell_sm_vol', 'buy_lg_vol', 'sell_lg_vol', 'buy_elg_vol',\n",
|
||
" 'sell_elg_vol', 'net_mf_vol', 'his_low', 'his_high', 'cost_5pct',\n",
|
||
" 'cost_15pct', 'cost_50pct', 'cost_85pct', 'cost_95pct', 'weight_avg',\n",
|
||
" 'winner_rate', 'cat_l2_code', 'undist_profit_ps', 'ocfps', 'AR', 'BR',\n",
|
||
" 'AR_BR', 'log_circ_mv', 'cashflow_to_ev_factor', 'book_to_price_ratio',\n",
|
||
" 'turnover_rate_mean_5', 'variance_20', 'bbi_ratio_factor',\n",
|
||
" 'future_return', 'future_volatility', 'label', 'lg_elg_net_buy_vol',\n",
|
||
" 'flow_lg_elg_intensity', 'sm_net_buy_vol', 'flow_divergence_diff',\n",
|
||
" 'flow_divergence_ratio', 'total_buy_vol', 'lg_elg_buy_prop',\n",
|
||
" 'flow_struct_buy_change', 'lg_elg_net_buy_vol_change',\n",
|
||
" 'flow_lg_elg_accel', 'chip_concentration_range', 'chip_skewness',\n",
|
||
" 'floating_chip_proxy', 'cost_support_15pct_change',\n",
|
||
" 'cat_winner_price_zone', 'flow_chip_consistency',\n",
|
||
" 'profit_taking_vs_absorb', '_is_positive', '_is_negative',\n",
|
||
" 'cat_is_positive', '_pos_returns', '_neg_returns', '_pos_returns_sq',\n",
|
||
" '_neg_returns_sq', 'upside_vol', 'downside_vol', 'vol_ratio',\n",
|
||
" 'return_skew', 'return_kurtosis', 'volume_change_rate',\n",
|
||
" 'cat_volume_breakout', 'turnover_deviation', 'cat_turnover_spike',\n",
|
||
" 'avg_volume_ratio', 'cat_volume_ratio_breakout', 'vol_spike',\n",
|
||
" 'vol_std_5', 'atr_14', 'atr_6', 'obv'],\n",
|
||
" dtype='object')\n",
|
||
"<class 'pandas.core.frame.DataFrame'>\n",
|
||
"Index: 4450396 entries, 0 to 4450395\n",
|
||
"Columns: 123 entries, ts_code to mv_growth\n",
|
||
"dtypes: bool(6), datetime64[ns](1), float64(110), int32(3), int64(1), object(2)\n",
|
||
"memory usage: 3.9+ GB\n",
|
||
"None\n",
|
||
"['ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'vol', 'pct_chg', 'turnover_rate', 'pe_ttm', 'circ_mv', 'total_mv', 'volume_ratio', 'is_st', 'up_limit', 'down_limit', 'buy_sm_vol', 'sell_sm_vol', 'buy_lg_vol', 'sell_lg_vol', 'buy_elg_vol', 'sell_elg_vol', 'net_mf_vol', 'his_low', 'his_high', 'cost_5pct', 'cost_15pct', 'cost_50pct', 'cost_85pct', 'cost_95pct', 'weight_avg', 'winner_rate', 'cat_l2_code', 'undist_profit_ps', 'ocfps', 'AR', 'BR', 'AR_BR', 'log_circ_mv', 'cashflow_to_ev_factor', 'book_to_price_ratio', 'turnover_rate_mean_5', 'variance_20', 'bbi_ratio_factor', 'future_return', 'future_volatility', 'label', 'lg_elg_net_buy_vol', 'flow_lg_elg_intensity', 'sm_net_buy_vol', 'flow_divergence_diff', 'flow_divergence_ratio', 'total_buy_vol', 'lg_elg_buy_prop', 'flow_struct_buy_change', 'lg_elg_net_buy_vol_change', 'flow_lg_elg_accel', 'chip_concentration_range', 'chip_skewness', 'floating_chip_proxy', 'cost_support_15pct_change', 'cat_winner_price_zone', 'flow_chip_consistency', 'profit_taking_vs_absorb', 'cat_is_positive', 'upside_vol', 'downside_vol', 'vol_ratio', 'return_skew', 'return_kurtosis', 'volume_change_rate', 'cat_volume_breakout', 'turnover_deviation', 'cat_turnover_spike', 'avg_volume_ratio', 'cat_volume_ratio_breakout', 'vol_spike', 'vol_std_5', 'atr_14', 'atr_6', 'obv', 'maobv_6', 'rsi_3', 'return_5', 'return_20', 'std_return_5', 'std_return_90', 'std_return_90_2', '_ema_5', '_ema_13', '_ema_20', '_ema_60', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4', 'rank_act_factor1', 'rank_act_factor2', 'rank_act_factor3', 'cov', 'delta_cov', '_stddev_close', '_rank_stddev', 'alpha_22_improved', 'alpha_003', 'alpha_007', 'alpha_013', 'cat_up_limit', 'cat_down_limit', 'up_limit_count_10d', 'down_limit_count_10d', 'consecutive_up_limit', 'vol_break', 'weight_roc5', 'price_cost_divergence', 'smallcap_concentration', 'cost_stability', 'high_cost_break_days', 'liquidity_risk', 'turnover_std', 'mv_volatility', 'volume_growth', 'mv_growth']\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"\n",
|
||
"import numpy as np\n",
|
||
"from main.factor.factor import *\n",
|
||
"\n",
|
||
"def filter_data(df):\n",
|
||
" # df = df.groupby('trade_date').apply(lambda x: x.nlargest(1000, 'act_factor1'))\n",
|
||
" df = df[~df['is_st']]\n",
|
||
" df = df[~df['ts_code'].str.endswith('BJ')]\n",
|
||
" df = df[~df['ts_code'].str.startswith('30')]\n",
|
||
" df = df[~df['ts_code'].str.startswith('68')]\n",
|
||
" df = df[~df['ts_code'].str.startswith('8')]\n",
|
||
" df = df[df['trade_date'] >= '2019-01-01']\n",
|
||
" if 'in_date' in df.columns:\n",
|
||
" df = df.drop(columns=['in_date'])\n",
|
||
" df = df.reset_index(drop=True)\n",
|
||
" return df\n",
|
||
"\n",
|
||
"gc.collect()\n",
|
||
"\n",
|
||
"df = filter_data(df)\n",
|
||
"df = df.sort_values(by=['ts_code', 'trade_date'])\n",
|
||
"df = add_financial_factor(df, fina_indicator_df, factor_value_col='undist_profit_ps')\n",
|
||
"df = add_financial_factor(df, fina_indicator_df, factor_value_col='ocfps')\n",
|
||
"calculate_arbr(df, N=26)\n",
|
||
"df['log_circ_mv'] = np.log(df['circ_mv'])\n",
|
||
"df = calculate_cashflow_to_ev_factor(df, cashflow_df, balancesheet_df)\n",
|
||
"df = caculate_book_to_price_ratio(df, fina_indicator_df)\n",
|
||
"df = turnover_rate_n(df, n=5)\n",
|
||
"df = variance_n(df, n=20)\n",
|
||
"df = bbi_ratio_factor(df)\n",
|
||
"# df, _ = get_rolling_factor(df)\n",
|
||
"\n",
|
||
"# lg_flow_mom_corr(df, N=20, M=60)\n",
|
||
"# lg_flow_accel(df)\n",
|
||
"# profit_pressure(df)\n",
|
||
"# underwater_resistance(df)\n",
|
||
"# cost_conc_std(df, N=20)\n",
|
||
"# profit_decay(df, N=20)\n",
|
||
"# vol_amp_loss(df, N=20)\n",
|
||
"# vol_drop_profit_cnt(df, N=20, M=5)\n",
|
||
"# lg_flow_vol_interact(df, N=20)\n",
|
||
"# cost_break_confirm_cnt(df, M=5)\n",
|
||
"# atr_norm_channel_pos(df, N=14)\n",
|
||
"# turnover_diff_skew(df, N=20)\n",
|
||
"# lg_sm_flow_diverge(df, N=20)\n",
|
||
"# pullback_strong(df, N=20, M=20)\n",
|
||
"# vol_wgt_hist_pos(df, N=20)\n",
|
||
"# vol_adj_roc(df, N=20)\n",
|
||
"\n",
|
||
"df = df.rename(columns={'l1_code': 'cat_l1_code'})\n",
|
||
"df = df.rename(columns={'l2_code': 'cat_l2_code'})\n",
|
||
"\n",
|
||
"# df = df.merge(index_data, on='trade_date', how='left')\n",
|
||
"\n",
|
||
"print(df.info())\n",
|
||
"print(df.columns.tolist())"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 90,
|
||
"id": "b87b938028afa206",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T13:08:03.658725Z",
|
||
"start_time": "2025-04-03T13:08:02.469611Z"
|
||
}
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"from scipy.stats import ks_2samp, wasserstein_distance\n",
|
||
"\n",
|
||
"\n",
|
||
"def remove_shifted_features(train_data, test_data, feature_columns, ks_threshold=0.05, wasserstein_threshold=0.1,\n",
|
||
" importance_threshold=0.05):\n",
|
||
" dropped_features = []\n",
|
||
"\n",
|
||
" # **统计数据漂移**\n",
|
||
" numeric_columns = train_data.select_dtypes(include=['float64', 'int64']).columns\n",
|
||
" numeric_columns = [col for col in numeric_columns if col in feature_columns]\n",
|
||
" for feature in numeric_columns:\n",
|
||
" ks_stat, p_value = ks_2samp(train_data[feature], test_data[feature])\n",
|
||
" wasserstein_dist = wasserstein_distance(train_data[feature], test_data[feature])\n",
|
||
"\n",
|
||
" if p_value < ks_threshold or wasserstein_dist > wasserstein_threshold:\n",
|
||
" dropped_features.append(feature)\n",
|
||
"\n",
|
||
" print(f\"检测到 {len(dropped_features)} 个可能漂移的特征: {dropped_features}\")\n",
|
||
"\n",
|
||
" # **应用阈值进行最终筛选**\n",
|
||
" filtered_features = [f for f in feature_columns if f not in dropped_features]\n",
|
||
"\n",
|
||
" return filtered_features, dropped_features\n",
|
||
"\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 91,
|
||
"id": "f4f16d63ad18d1bc",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T13:08:03.670700Z",
|
||
"start_time": "2025-04-03T13:08:03.665739Z"
|
||
}
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"import pandas as pd\n",
|
||
"import numpy as np\n",
|
||
"import statsmodels.api as sm # 用于中性化回归\n",
|
||
"from tqdm import tqdm # 可选,用于显示进度条\n",
|
||
"\n",
|
||
"# --- 常量 ---\n",
|
||
"epsilon = 1e-10 # 防止除零\n",
|
||
"\n",
|
||
"# --- 1. 中位数去极值 (MAD) ---\n",
|
||
"\n",
|
||
"def cs_mad_filter(df: pd.DataFrame,\n",
|
||
" features: list,\n",
|
||
" k: float = 3.0,\n",
|
||
" scale_factor: float = 1.4826):\n",
|
||
" \"\"\"\n",
|
||
" 对指定特征列进行截面 MAD 去极值处理 (原地修改)。\n",
|
||
"\n",
|
||
" 方法: 对每日截面数据,计算 median 和 MAD,\n",
|
||
" 将超出 [median - k * scale * MAD, median + k * scale * MAD] 范围的值\n",
|
||
" 替换为边界值 (Winsorization)。\n",
|
||
" scale_factor=1.4826 使得 MAD 约等于正态分布的标准差。\n",
|
||
"\n",
|
||
" Args:\n",
|
||
" df (pd.DataFrame): 输入 DataFrame,需包含 'trade_date' 和 features 列。\n",
|
||
" features (list): 需要处理的特征列名列表。\n",
|
||
" k (float): MAD 的倍数,用于确定边界。默认为 3.0。\n",
|
||
" scale_factor (float): MAD 的缩放因子。默认为 1.4826。\n",
|
||
"\n",
|
||
" WARNING: 此函数会原地修改输入的 DataFrame 'df'。\n",
|
||
" \"\"\"\n",
|
||
" print(f\"开始截面 MAD 去极值处理 (k={k})...\")\n",
|
||
" if not all(col in df.columns for col in features):\n",
|
||
" missing = [col for col in features if col not in df.columns]\n",
|
||
" print(f\"错误: DataFrame 中缺少以下特征列: {missing}。跳过去极值处理。\")\n",
|
||
" return\n",
|
||
"\n",
|
||
" grouped = df.groupby('trade_date')\n",
|
||
"\n",
|
||
" for col in tqdm(features, desc=\"MAD Filtering\"):\n",
|
||
" try:\n",
|
||
" # 计算截面中位数\n",
|
||
" median = grouped[col].transform('median')\n",
|
||
" # 计算截面 MAD (Median Absolute Deviation from Median)\n",
|
||
" mad = (df[col] - median).abs().groupby(df['trade_date']).transform('median')\n",
|
||
"\n",
|
||
" # 计算上下边界\n",
|
||
" lower_bound = median - k * scale_factor * mad\n",
|
||
" upper_bound = median + k * scale_factor * mad\n",
|
||
"\n",
|
||
" # 原地应用 clip\n",
|
||
" df[col] = np.clip(df[col], lower_bound, upper_bound)\n",
|
||
"\n",
|
||
" except KeyError:\n",
|
||
" print(f\"警告: 列 '{col}' 可能不存在或在分组中出错,跳过此列的 MAD 处理。\")\n",
|
||
" except Exception as e:\n",
|
||
" print(f\"警告: 处理列 '{col}' 时发生错误: {e},跳过此列的 MAD 处理。\")\n",
|
||
"\n",
|
||
" print(\"截面 MAD 去极值处理完成。\")\n",
|
||
"\n",
|
||
"\n",
|
||
"# --- 2. 行业市值中性化 ---\n",
|
||
"\n",
|
||
"def cs_neutralize_industry_cap(df: pd.DataFrame,\n",
|
||
" features: list,\n",
|
||
" industry_col: str = 'cat_l2_code',\n",
|
||
" market_cap_col: str = 'circ_mv'):\n",
|
||
" \"\"\"\n",
|
||
" 对指定特征列进行截面行业和对数市值中性化 (原地修改)。\n",
|
||
" 使用 OLS 回归: feature ~ 1 + log(market_cap) + C(industry)\n",
|
||
" 将回归残差写回原特征列。\n",
|
||
"\n",
|
||
" Args:\n",
|
||
" df (pd.DataFrame): 输入 DataFrame,需包含 'trade_date', features 列,\n",
|
||
" industry_col, market_cap_col。\n",
|
||
" features (list): 需要处理的特征列名列表。\n",
|
||
" industry_col (str): 行业分类列名。\n",
|
||
" market_cap_col (str): 流通市值列名。\n",
|
||
"\n",
|
||
" WARNING: 此函数会原地修改输入的 DataFrame 'df' 的 features 列。\n",
|
||
" 计算量较大,可能耗时较长。\n",
|
||
" 需要安装 statsmodels 库 (pip install statsmodels)。\n",
|
||
" \"\"\"\n",
|
||
" print(\"开始截面行业市值中性化...\")\n",
|
||
" required_cols = features + ['trade_date', industry_col, market_cap_col]\n",
|
||
" if not all(col in df.columns for col in required_cols):\n",
|
||
" missing = [col for col in required_cols if col not in df.columns]\n",
|
||
" print(f\"错误: DataFrame 中缺少必需列: {missing}。无法进行中性化。\")\n",
|
||
" return\n",
|
||
"\n",
|
||
" # 预处理:计算 log 市值,处理 industry code 可能的 NaN\n",
|
||
" log_cap_col = '_log_market_cap'\n",
|
||
" df[log_cap_col] = np.log1p(df[market_cap_col]) # log1p 处理 0 值\n",
|
||
" # df[industry_col] = df[industry_col].cat.add_categories('UnknownIndustry')\n",
|
||
" # df[industry_col] = df[industry_col].fillna('UnknownIndustry') # 填充行业 NaN\n",
|
||
" # df[industry_col] = df[industry_col].astype('category') # 转为类别,ols 会自动处理\n",
|
||
"\n",
|
||
" dates = df['trade_date'].unique()\n",
|
||
" all_residuals = [] # 用于收集所有日期的残差\n",
|
||
"\n",
|
||
" for date in tqdm(dates, desc=\"Neutralizing\"):\n",
|
||
" daily_data = df.loc[df['trade_date'] == date, features + [log_cap_col, industry_col]].copy() # 使用 .loc 获取副本\n",
|
||
"\n",
|
||
" # 准备自变量 X (常数项 + log市值 + 行业哑变量)\n",
|
||
" X = daily_data[[log_cap_col]]\n",
|
||
" X = sm.add_constant(X, prepend=True) # 添加常数项\n",
|
||
" # 创建行业哑变量 (drop_first=True 避免共线性)\n",
|
||
" industry_dummies = pd.get_dummies(daily_data[industry_col], prefix=industry_col, drop_first=True)\n",
|
||
" industry_dummies = industry_dummies.astype(int)\n",
|
||
" X = pd.concat([X, industry_dummies], axis=1)\n",
|
||
"\n",
|
||
" daily_residuals = daily_data[[col for col in features]].copy() # 创建用于存储残差的df\n",
|
||
"\n",
|
||
" for col in features:\n",
|
||
" Y = daily_data[col]\n",
|
||
"\n",
|
||
" # 处理 NaN 值,确保 X 和 Y 在相同位置有有效值\n",
|
||
" valid_mask = Y.notna() & X.notna().all(axis=1)\n",
|
||
" if valid_mask.sum() < (X.shape[1] + 1): # 数据点不足以估计模型\n",
|
||
" print(f\"警告: 日期 {date}, 特征 {col} 有效数据不足 ({valid_mask.sum()}个),无法中性化,填充 NaN。\")\n",
|
||
" daily_residuals[col] = np.nan\n",
|
||
" continue\n",
|
||
"\n",
|
||
" Y_valid = Y[valid_mask]\n",
|
||
" X_valid = X[valid_mask]\n",
|
||
"\n",
|
||
" # 执行 OLS 回归\n",
|
||
" try:\n",
|
||
" model = sm.OLS(Y_valid.to_numpy(), X_valid.to_numpy())\n",
|
||
" results = model.fit()\n",
|
||
" # 将残差填回对应位置\n",
|
||
" daily_residuals.loc[valid_mask, col] = results.resid\n",
|
||
" daily_residuals.loc[~valid_mask, col] = np.nan # 原本无效的位置填充 NaN\n",
|
||
" except Exception as e:\n",
|
||
" print(f\"警告: 日期 {date}, 特征 {col} 回归失败: {e},填充 NaN。\")\n",
|
||
" daily_residuals[col] = np.nan\n",
|
||
" break\n",
|
||
"\n",
|
||
" all_residuals.append(daily_residuals)\n",
|
||
"\n",
|
||
" # 合并所有日期的残差结果\n",
|
||
" if all_residuals:\n",
|
||
" residuals_df = pd.concat(all_residuals)\n",
|
||
" # 将残差结果更新回原始 df (原地修改)\n",
|
||
" # 使用 update 比 merge 更适合基于索引的原地更新\n",
|
||
" # 确保 residuals_df 的索引与 df 中对应部分一致\n",
|
||
" df.update(residuals_df)\n",
|
||
" else:\n",
|
||
" print(\"没有有效的残差结果可以合并。\")\n",
|
||
"\n",
|
||
"\n",
|
||
" # 清理临时列\n",
|
||
" df.drop(columns=[log_cap_col], inplace=True)\n",
|
||
" print(\"截面行业市值中性化完成。\")\n",
|
||
"\n",
|
||
"\n",
|
||
"# --- 3. Z-Score 标准化 ---\n",
|
||
"\n",
|
||
"def cs_zscore_standardize(df: pd.DataFrame, features: list, epsilon: float = 1e-10):\n",
|
||
" \"\"\"\n",
|
||
" 对指定特征列进行截面 Z-Score 标准化 (原地修改)。\n",
|
||
" 方法: Z = (value - cross_sectional_mean) / (cross_sectional_std + epsilon)\n",
|
||
"\n",
|
||
" Args:\n",
|
||
" df (pd.DataFrame): 输入 DataFrame,需包含 'trade_date' 和 features 列。\n",
|
||
" features (list): 需要处理的特征列名列表。\n",
|
||
" epsilon (float): 防止除以零的小常数。\n",
|
||
"\n",
|
||
" WARNING: 此函数会原地修改输入的 DataFrame 'df'。\n",
|
||
" \"\"\"\n",
|
||
" print(\"开始截面 Z-Score 标准化...\")\n",
|
||
" if not all(col in df.columns for col in features):\n",
|
||
" missing = [col for col in features if col not in df.columns]\n",
|
||
" print(f\"错误: DataFrame 中缺少以下特征列: {missing}。跳过标准化处理。\")\n",
|
||
" return\n",
|
||
"\n",
|
||
" grouped = df.groupby('trade_date')\n",
|
||
"\n",
|
||
" for col in tqdm(features, desc=\"Standardizing\"):\n",
|
||
" try:\n",
|
||
" # 使用 transform 计算截面均值和标准差\n",
|
||
" mean = grouped[col].transform('mean')\n",
|
||
" std = grouped[col].transform('std')\n",
|
||
"\n",
|
||
" # 计算 Z-Score 并原地赋值\n",
|
||
" df[col] = (df[col] - mean) / (std + epsilon)\n",
|
||
"\n",
|
||
" except KeyError:\n",
|
||
" print(f\"警告: 列 '{col}' 可能不存在或在分组中出错,跳过此列的标准化处理。\")\n",
|
||
" except Exception as e:\n",
|
||
" print(f\"警告: 处理列 '{col}' 时发生错误: {e},跳过此列的标准化处理。\")\n",
|
||
"\n",
|
||
" print(\"截面 Z-Score 标准化完成。\")\n",
|
||
"\n",
|
||
"def fill_nan_with_daily_median(df: pd.DataFrame, feature_columns: list[str]) -> pd.DataFrame:\n",
|
||
" \"\"\"\n",
|
||
" 对指定特征列进行每日截面中位数填充缺失值 (NaN)。\n",
|
||
"\n",
|
||
" 参数:\n",
|
||
" df (pd.DataFrame): 包含多日数据的DataFrame,需要包含 'trade_date' 和 feature_columns 中的列。\n",
|
||
" feature_columns (list[str]): 需要进行缺失值填充的特征列名称列表。\n",
|
||
"\n",
|
||
" 返回:\n",
|
||
" pd.DataFrame: 包含缺失值填充后特征列的DataFrame。在输入DataFrame的副本上操作。\n",
|
||
" \"\"\"\n",
|
||
" processed_df = df.copy() # 在副本上操作,保留原始数据\n",
|
||
"\n",
|
||
" # 确保 trade_date 是 datetime 类型以便正确分组\n",
|
||
" processed_df['trade_date'] = pd.to_datetime(processed_df['trade_date'])\n",
|
||
"\n",
|
||
" def _fill_daily_nan(group):\n",
|
||
" # group 是某一个交易日的 DataFrame\n",
|
||
"\n",
|
||
" # 遍历指定的特征列\n",
|
||
" for feature_col in feature_columns:\n",
|
||
" # 检查列是否存在于当前分组中\n",
|
||
" if feature_col in group.columns:\n",
|
||
" # 计算当日该特征的中位数\n",
|
||
" median_val = group[feature_col].median()\n",
|
||
"\n",
|
||
" # 使用当日中位数填充该特征列的 NaN 值\n",
|
||
" # inplace=True 会直接修改 group DataFrame\n",
|
||
" group[feature_col].fillna(median_val, inplace=True)\n",
|
||
" else:\n",
|
||
" print(f\"Warning: Feature column '{feature_col}' not found in daily group for {group['trade_date'].iloc[0]}. Skipping.\")\n",
|
||
"\n",
|
||
" return group\n",
|
||
"\n",
|
||
" # 按交易日期分组,并应用每日填充函数\n",
|
||
" # group_keys=False 避免将分组键添加到结果索引中\n",
|
||
" filled_df = processed_df.groupby('trade_date', group_keys=False).apply(_fill_daily_nan)\n",
|
||
"\n",
|
||
" return filled_df"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 92,
|
||
"id": "40e6b68a91b30c79",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T13:08:04.694262Z",
|
||
"start_time": "2025-04-03T13:08:03.694904Z"
|
||
}
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"import pandas as pd\n",
|
||
"\n",
|
||
"\n",
|
||
"def remove_outliers_label_percentile(label: pd.Series, lower_percentile: float = 0.01, upper_percentile: float = 0.99,\n",
|
||
" log=True):\n",
|
||
" if not (0 <= lower_percentile < upper_percentile <= 1):\n",
|
||
" raise ValueError(\"Percentile values must satisfy 0 <= lower_percentile < upper_percentile <= 1.\")\n",
|
||
"\n",
|
||
" # Calculate lower and upper bounds based on percentiles\n",
|
||
" lower_bound = label.quantile(lower_percentile)\n",
|
||
" upper_bound = label.quantile(upper_percentile)\n",
|
||
"\n",
|
||
" # Filter out values outside the bounds\n",
|
||
" filtered_label = label[(label >= lower_bound) & (label <= upper_bound)]\n",
|
||
"\n",
|
||
" # Print the number of removed outliers\n",
|
||
" if log:\n",
|
||
" print(f\"Removed {len(label) - len(filtered_label)} outliers.\")\n",
|
||
" return filtered_label\n",
|
||
"\n",
|
||
"\n",
|
||
"def calculate_risk_adjusted_target(df, days=5):\n",
|
||
" df = df.sort_values(by=['ts_code', 'trade_date'])\n",
|
||
"\n",
|
||
" df['future_close'] = df.groupby('ts_code')['close'].shift(-days)\n",
|
||
" df['future_open'] = df.groupby('ts_code')['open'].shift(-1)\n",
|
||
" df['future_return'] = (df['future_close'] - df['future_open']) / df['future_open']\n",
|
||
"\n",
|
||
" df['future_volatility'] = df.groupby('ts_code')['future_return'].rolling(days, min_periods=1).std().reset_index(\n",
|
||
" level=0, drop=True)\n",
|
||
" sharpe_ratio = df['future_return'] * df['future_volatility']\n",
|
||
" sharpe_ratio.replace([np.inf, -np.inf], np.nan, inplace=True)\n",
|
||
"\n",
|
||
" return sharpe_ratio\n",
|
||
"\n",
|
||
"\n",
|
||
"def calculate_score(df, days=5, lambda_param=1.0):\n",
|
||
" def calculate_max_drawdown(prices):\n",
|
||
" peak = prices.iloc[0] # 初始化峰值\n",
|
||
" max_drawdown = 0 # 初始化最大回撤\n",
|
||
"\n",
|
||
" for price in prices:\n",
|
||
" if price > peak:\n",
|
||
" peak = price # 更新峰值\n",
|
||
" else:\n",
|
||
" drawdown = (peak - price) / peak # 计算当前回撤\n",
|
||
" max_drawdown = max(max_drawdown, drawdown) # 更新最大回撤\n",
|
||
"\n",
|
||
" return max_drawdown\n",
|
||
"\n",
|
||
" def compute_stock_score(stock_df):\n",
|
||
" stock_df = stock_df.sort_values(by=['trade_date'])\n",
|
||
" future_return = stock_df['future_return']\n",
|
||
" # 使用已有的 pct_chg 字段计算波动率\n",
|
||
" volatility = stock_df['pct_chg'].rolling(days).std().shift(-days)\n",
|
||
" max_drawdown = stock_df['close'].rolling(days).apply(calculate_max_drawdown, raw=False).shift(-days)\n",
|
||
" score = future_return - lambda_param * max_drawdown\n",
|
||
" return score\n",
|
||
"\n",
|
||
" # # 确保 DataFrame 按照股票代码和交易日期排序\n",
|
||
" # df = df.sort_values(by=['ts_code', 'trade_date'])\n",
|
||
"\n",
|
||
" # 对每个股票分别计算 score\n",
|
||
" df['score'] = df.groupby('ts_code').apply(compute_stock_score).reset_index(level=0, drop=True)\n",
|
||
"\n",
|
||
" return df['score']\n",
|
||
"\n",
|
||
"\n",
|
||
"def remove_highly_correlated_features(df, feature_columns, threshold=0.9):\n",
|
||
" numeric_features = df[feature_columns].select_dtypes(include=[np.number]).columns.tolist()\n",
|
||
" if not numeric_features:\n",
|
||
" raise ValueError(\"No numeric features found in the provided data.\")\n",
|
||
"\n",
|
||
" corr_matrix = df[numeric_features].corr().abs()\n",
|
||
" upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))\n",
|
||
" to_drop = [column for column in upper.columns if any(upper[column] > threshold)]\n",
|
||
" remaining_features = [col for col in feature_columns if col not in to_drop\n",
|
||
" or 'act' in col or 'af' in col]\n",
|
||
" return remaining_features\n",
|
||
"\n",
|
||
"\n",
|
||
"def cross_sectional_standardization(df, features):\n",
|
||
" df_sorted = df.sort_values(by='trade_date') # 按时间排序\n",
|
||
" df_standardized = df_sorted.copy()\n",
|
||
"\n",
|
||
" for date in df_sorted['trade_date'].unique():\n",
|
||
" # 获取当前时间点的数据\n",
|
||
" current_data = df_standardized[df_standardized['trade_date'] == date]\n",
|
||
"\n",
|
||
" # 只对指定特征进行标准化\n",
|
||
" scaler = StandardScaler()\n",
|
||
" standardized_values = scaler.fit_transform(current_data[features])\n",
|
||
"\n",
|
||
" # 将标准化结果重新赋值回去\n",
|
||
" df_standardized.loc[df_standardized['trade_date'] == date, features] = standardized_values\n",
|
||
"\n",
|
||
" return df_standardized\n",
|
||
"\n",
|
||
"\n",
|
||
"import numpy as np\n",
|
||
"import pandas as pd\n",
|
||
"\n",
|
||
"\n",
|
||
"def neutralize_manual_revised(df: pd.DataFrame, features: list, industry_col: str, mkt_cap_col: str) -> pd.DataFrame:\n",
|
||
" \"\"\"\n",
|
||
" 手动实现简单回归以提升速度,通过构建 Series 确保索引对齐。\n",
|
||
" 对特征在行业内部进行市值中性化。\n",
|
||
"\n",
|
||
" Args:\n",
|
||
" df: 输入的 DataFrame,包含特征、行业分类和市值列。\n",
|
||
" features: 需要进行中性化的特征列名列表。\n",
|
||
" industry_col: 行业分类列的列名。\n",
|
||
" mkt_cap_col: 市值列的列名。\n",
|
||
"\n",
|
||
" Returns:\n",
|
||
" 中性化后的 DataFrame。\n",
|
||
" \"\"\"\n",
|
||
"\n",
|
||
" df[mkt_cap_col] = pd.to_numeric(df[mkt_cap_col], errors='coerce')\n",
|
||
" df_cleaned = df.dropna(subset=[mkt_cap_col]).copy()\n",
|
||
" df_cleaned = df_cleaned[df_cleaned[mkt_cap_col] > 0].copy()\n",
|
||
"\n",
|
||
" if df_cleaned.empty:\n",
|
||
" print(\"警告: 清理市值异常值后 DataFrame 为空。\")\n",
|
||
" return df # 返回原始或空df,取决于清理前的状态\n",
|
||
"\n",
|
||
" processed_df = df\n",
|
||
"\n",
|
||
" for col in features:\n",
|
||
" if col not in df_cleaned.columns:\n",
|
||
" print(f\"警告: 特征列 '{col}' 不存在于清理后的 DataFrame 中,已跳过。\")\n",
|
||
" # 对于原始 df 中该列不存在的,在结果 df 中也保持原样(可能全是NaN)\n",
|
||
" processed_df[col] = df[col] if col in df.columns else np.nan\n",
|
||
" continue\n",
|
||
"\n",
|
||
" # 跳过对控制变量本身进行中性化\n",
|
||
" if col == mkt_cap_col or col == industry_col:\n",
|
||
" print(f\"警告: 特征列 '{col}' 是控制变量或内部使用的列,跳过中性化。\")\n",
|
||
" # 在结果 df 中也保持原样\n",
|
||
" processed_df[col] = df[col] if col in df.columns else np.nan\n",
|
||
" continue\n",
|
||
"\n",
|
||
" residual_series = pd.Series(index=df_cleaned.index, dtype=float)\n",
|
||
"\n",
|
||
" # 在分组前处理特征列的 NaN,只对有因子值的行进行回归计算\n",
|
||
" df_subset_factor = df_cleaned.dropna(subset=[col]).copy()\n",
|
||
"\n",
|
||
" if not df_subset_factor.empty:\n",
|
||
" for industry, group in df_subset_factor.groupby(industry_col):\n",
|
||
" x = group[mkt_cap_col] # 市值对数\n",
|
||
" y = group[col] # 因子值\n",
|
||
"\n",
|
||
" # 确保有足够的数据点 (>1) 且市值对数有方差 (>0) 进行回归计算\n",
|
||
" # 检查 np.var > 一个很小的正数,避免浮点数误差导致的零方差判断问题\n",
|
||
" if len(group) > 1 and np.var(x) > 1e-9:\n",
|
||
" try:\n",
|
||
" beta = np.cov(y, x)[0, 1] / np.var(x)\n",
|
||
" alpha = np.mean(y) - beta * np.mean(x)\n",
|
||
"\n",
|
||
" # 计算残差\n",
|
||
" resid = y - (alpha + beta * x)\n",
|
||
"\n",
|
||
" # 将计算出的残差存储到 residual_series 中,通过索引自动对齐\n",
|
||
" residual_series.loc[resid.index] = resid\n",
|
||
"\n",
|
||
" except Exception as e:\n",
|
||
" # 捕获可能的计算异常,例如np.cov或np.var因为极端数据报错\n",
|
||
" print(f\"警告: 在行业 {industry} 计算回归时发生错误: {e}。该组残差将设为原始值或 NaN。\")\n",
|
||
" # 此时该组的残差会保持 residual_series 初始化时的 NaN 或后续处理\n",
|
||
" # 也可以选择保留原始值:residual_series.loc[group.index] = group[col]\n",
|
||
"\n",
|
||
" else:\n",
|
||
" residual_series.loc[group.index] = group[col] # 保留原始因子值\n",
|
||
" processed_df.loc[residual_series.index, col] = residual_series\n",
|
||
"\n",
|
||
"\n",
|
||
" else:\n",
|
||
" processed_df[col] = np.nan # 或 df[col] if col in df.columns else np.nan\n",
|
||
"\n",
|
||
" return processed_df\n",
|
||
"\n",
|
||
"\n",
|
||
"import gc\n",
|
||
"\n",
|
||
"gc.collect()\n",
|
||
"\n",
|
||
"\n",
|
||
"def mad_filter(df, features, n=3):\n",
|
||
" for col in features:\n",
|
||
" median = df[col].median()\n",
|
||
" mad = np.median(np.abs(df[col] - median))\n",
|
||
" upper = median + n * mad\n",
|
||
" lower = median - n * mad\n",
|
||
" df[col] = np.clip(df[col], lower, upper) # 截断极值\n",
|
||
" return df\n",
|
||
"\n",
|
||
"\n",
|
||
"def percentile_filter(df, features, lower_percentile=0.01, upper_percentile=0.99):\n",
|
||
" for col in features:\n",
|
||
" # 按日期分组计算上下百分位数\n",
|
||
" lower_bound = df.groupby('trade_date')[col].transform(\n",
|
||
" lambda x: x.quantile(lower_percentile)\n",
|
||
" )\n",
|
||
" upper_bound = df.groupby('trade_date')[col].transform(\n",
|
||
" lambda x: x.quantile(upper_percentile)\n",
|
||
" )\n",
|
||
" # 截断超出范围的值\n",
|
||
" df[col] = np.clip(df[col], lower_bound, upper_bound)\n",
|
||
" return df\n",
|
||
"\n",
|
||
"\n",
|
||
"from scipy.stats import iqr\n",
|
||
"\n",
|
||
"\n",
|
||
"def iqr_filter(df, features):\n",
|
||
" for col in features:\n",
|
||
" df[col] = df.groupby('trade_date')[col].transform(\n",
|
||
" lambda x: (x - x.median()) / iqr(x) if iqr(x) != 0 else x\n",
|
||
" )\n",
|
||
" return df\n",
|
||
"\n",
|
||
"\n",
|
||
"def quantile_filter(df, features, lower_quantile=0.01, upper_quantile=0.99, window=60):\n",
|
||
" df = df.copy()\n",
|
||
" for col in features:\n",
|
||
" # 计算 rolling 统计量,需要按日期进行 groupby\n",
|
||
" rolling_lower = df.groupby('trade_date')[col].transform(lambda x: x.rolling(window=min(len(x), window)).quantile(lower_quantile))\n",
|
||
" rolling_upper = df.groupby('trade_date')[col].transform(lambda x: x.rolling(window=min(len(x), window)).quantile(upper_quantile))\n",
|
||
"\n",
|
||
" # 对数据进行裁剪\n",
|
||
" df[col] = np.clip(df[col], rolling_lower, rolling_upper)\n",
|
||
" \n",
|
||
" return df\n",
|
||
"\n",
|
||
"def select_top_features_by_rankic(df: pd.DataFrame, feature_columns: list, n: int, target_column: str = 'future_return') -> list:\n",
|
||
" \"\"\"\n",
|
||
" 计算给定特征与目标列的 RankIC,并返回 RankIC 绝对值最高的 n 个特征。\n",
|
||
"\n",
|
||
" Args:\n",
|
||
" df: 包含特征列和目标列的 Pandas DataFrame。\n",
|
||
" feature_columns: 包含所有待评估特征列名的列表。\n",
|
||
" n: 希望选取的 RankIC 绝对值最高的特征数量。\n",
|
||
" target_column: 目标列的名称,用于计算 RankIC。默认为 'future_return'。\n",
|
||
"\n",
|
||
" Returns:\n",
|
||
" 包含 RankIC 绝对值最高的 n 个特征列名的列表。\n",
|
||
" \"\"\"\n",
|
||
" numeric_columns = df.select_dtypes(include=['float64', 'int64']).columns\n",
|
||
" numeric_columns = [col for col in numeric_columns if col in feature_columns]\n",
|
||
" if target_column not in df.columns:\n",
|
||
" raise ValueError(f\"目标列 '{target_column}' 不存在于 DataFrame 中。\")\n",
|
||
"\n",
|
||
" rankic_scores = {}\n",
|
||
" for feature in numeric_columns:\n",
|
||
" if feature not in df.columns:\n",
|
||
" print(f\"警告: 特征列 '{feature}' 不存在于 DataFrame 中,已跳过。\")\n",
|
||
" continue\n",
|
||
"\n",
|
||
" # 计算特征与目标列的 RankIC (斯皮尔曼相关系数)\n",
|
||
" # dropna() 是为了处理缺失值,确保相关性计算不失败\n",
|
||
" valid_data = df[[feature, target_column]].dropna()\n",
|
||
" if len(valid_data) > 1: # 确保有足够的数据点进行相关性计算\n",
|
||
" # 计算斯皮尔曼相关性\n",
|
||
" correlation = valid_data[feature].corr(valid_data[target_column], method='spearman')\n",
|
||
" rankic_scores[feature] = abs(correlation) # 使用绝对值来衡量相关性强度\n",
|
||
" else:\n",
|
||
" rankic_scores[feature] = 0 # 数据不足,RankIC设为0或跳过\n",
|
||
"\n",
|
||
" # 将 RankIC 分数转换为 Series 便于排序\n",
|
||
" rankic_series = pd.Series(rankic_scores)\n",
|
||
"\n",
|
||
" # 按 RankIC 绝对值降序排序,选取前 n 个特征\n",
|
||
" # handle case where n might be larger than available features\n",
|
||
" n_actual = min(n, len(rankic_series))\n",
|
||
" top_features = rankic_series.sort_values(ascending=False).head(n_actual).index.tolist()\n",
|
||
" top_features = [col for col in feature_columns if col in top_features or col not in numeric_columns]\n",
|
||
" return top_features"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 93,
|
||
"id": "dd3018e3",
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# transform_feature_columns = ['ARBR']\n",
|
||
"# df = df[df['trade_date'] >= '2020-01-01']\n",
|
||
"# print('去除极值')\n",
|
||
"# cs_mad_filter(df, transform_feature_columns)\n",
|
||
"# print('中性化')\n",
|
||
"# cs_neutralize_industry_cap(df, transform_feature_columns)\n",
|
||
"# print('标准化')\n",
|
||
"# cs_zscore_standardize(df, transform_feature_columns)\n",
|
||
"\n",
|
||
"# cs_mad_filter(test_data, transform_feature_columns)\n",
|
||
"# cs_neutralize_industry_cap(test_data, transform_feature_columns)\n",
|
||
"# cs_zscore_standardize(test_data, transform_feature_columns)\n",
|
||
"\n",
|
||
"# print(f'feature_columns: {feature_columns}')\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 94,
|
||
"id": "47c12bb34062ae7a",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T14:57:50.841165Z",
|
||
"start_time": "2025-04-03T14:49:25.889057Z"
|
||
}
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"days = 5\n",
|
||
"validation_days = 120\n",
|
||
"\n",
|
||
"import gc\n",
|
||
"\n",
|
||
"gc.collect()\n",
|
||
"\n",
|
||
"df = df.sort_values(by=['ts_code', 'trade_date'])\n",
|
||
"df['future_return'] = df.groupby('ts_code', group_keys=False)['close'].apply(lambda x: x.shift(-days) / x - 1)\n",
|
||
"# df['future_return'] = (df.groupby('ts_code')['close'].shift(-days) - df.groupby('ts_code')['open'].shift(-1)) / \\\n",
|
||
"# df.groupby('ts_code')['open'].shift(-1)\n",
|
||
"df['future_volatility'] = (\n",
|
||
" df.groupby('ts_code')['pct_chg']\n",
|
||
" .transform(lambda x: x.rolling(days).std().shift(-days))\n",
|
||
")\n",
|
||
"\n",
|
||
"# df['future_score'] = calculate_score(df, days=2, lambda_param=0.3)\n",
|
||
"\n",
|
||
"filter_index = df['future_return'].between(df['future_return'].quantile(0.01), df['future_return'].quantile(0.99))\n",
|
||
"\n",
|
||
"# df['label'] = df.groupby('trade_date', group_keys=False)['future_volatility'].transform(\n",
|
||
"# lambda x: pd.qcut(x, q=30, labels=False, duplicates='drop')\n",
|
||
"# )\n",
|
||
"\n",
|
||
"df['label'] = df['future_return']\n",
|
||
"\n",
|
||
"def symmetric_log_transform(values):\n",
|
||
" return np.sign(values) * np.log1p(np.abs(values))\n",
|
||
"\n",
|
||
"# for col in [col for col in df.columns]:\n",
|
||
"# train_data[col] = train_data[col].astype('str')\n",
|
||
"# test_data[col] = test_data[col].astype('str')"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 95,
|
||
"id": "b76ea08a",
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
" ts_code trade_date log_circ_mv\n",
|
||
"0 000001.SZ 2019-01-02 16.574219\n",
|
||
"2738 000001.SZ 2019-01-03 16.583965\n",
|
||
"5477 000001.SZ 2019-01-04 16.633371\n",
|
||
"['undist_profit_ps', 'AR_BR', 'pe_ttm', 'alpha_22_improved', 'alpha_003', 'alpha_007', 'alpha_013', 'cat_up_limit', 'cat_down_limit', 'up_limit_count_10d', 'down_limit_count_10d', 'consecutive_up_limit', 'vol_break', 'weight_roc5', 'price_cost_divergence', 'smallcap_concentration', 'cost_stability', 'high_cost_break_days', 'liquidity_risk', 'turnover_std', 'mv_volatility', 'volume_growth', 'mv_growth', 'cashflow_to_ev_factor', 'ocfps', 'book_to_price_ratio', 'turnover_rate_mean_5', 'variance_20', 'bbi_ratio_factor']\n",
|
||
"去除极值\n",
|
||
"开始截面 MAD 去极值处理 (k=3.0)...\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"MAD Filtering: 100%|██████████| 27/27 [00:05<00:00, 4.91it/s]\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"截面 MAD 去极值处理完成。\n",
|
||
"开始截面 MAD 去极值处理 (k=3.0)...\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"MAD Filtering: 100%|██████████| 27/27 [00:04<00:00, 6.18it/s]\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"截面 MAD 去极值处理完成。\n",
|
||
"开始截面 MAD 去极值处理 (k=3.0)...\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"MAD Filtering: 0it [00:00, ?it/s]\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"截面 MAD 去极值处理完成。\n",
|
||
"开始截面 MAD 去极值处理 (k=3.0)...\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"MAD Filtering: 0it [00:00, ?it/s]\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"截面 MAD 去极值处理完成。\n",
|
||
"feature_columns: ['undist_profit_ps', 'AR_BR', 'pe_ttm', 'alpha_22_improved', 'alpha_003', 'alpha_007', 'alpha_013', 'cat_up_limit', 'cat_down_limit', 'up_limit_count_10d', 'down_limit_count_10d', 'consecutive_up_limit', 'vol_break', 'weight_roc5', 'price_cost_divergence', 'smallcap_concentration', 'cost_stability', 'high_cost_break_days', 'liquidity_risk', 'turnover_std', 'mv_volatility', 'volume_growth', 'mv_growth', 'cashflow_to_ev_factor', 'ocfps', 'book_to_price_ratio', 'turnover_rate_mean_5', 'variance_20', 'bbi_ratio_factor']\n",
|
||
"df最小日期: 2019-01-02\n",
|
||
"df最大日期: 2025-04-09\n",
|
||
"2060133\n",
|
||
"train_data最小日期: 2020-01-02\n",
|
||
"train_data最大日期: 2022-12-30\n",
|
||
"1678529\n",
|
||
"test_data最小日期: 2023-01-03\n",
|
||
"test_data最大日期: 2025-04-09\n",
|
||
" ts_code trade_date log_circ_mv\n",
|
||
"0 000001.SZ 2019-01-02 16.574219\n",
|
||
"2738 000001.SZ 2019-01-03 16.583965\n",
|
||
"5477 000001.SZ 2019-01-04 16.633371\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"train_data = df[filter_index & (df['trade_date'] <= '2023-01-01') & (df['trade_date'] >= '2020-01-01')]\n",
|
||
"test_data = df[(df['trade_date'] >= '2023-01-01')]\n",
|
||
"\n",
|
||
"print(df[['ts_code', 'trade_date', 'log_circ_mv']].head(3))\n",
|
||
"\n",
|
||
"industry_df = industry_df.sort_values(by=['trade_date'])\n",
|
||
"index_data = index_data.sort_values(by=['trade_date'])\n",
|
||
"\n",
|
||
"# train_data = train_data.merge(industry_df, on=['cat_l2_code', 'trade_date'], how='left')\n",
|
||
"# train_data = train_data.merge(index_data, on='trade_date', how='left')\n",
|
||
"# test_data = test_data.merge(industry_df, on=['cat_l2_code', 'trade_date'], how='left')\n",
|
||
"# test_data = test_data.merge(index_data, on='trade_date', how='left')\n",
|
||
"\n",
|
||
"train_data, test_data = train_data.replace([np.inf, -np.inf], np.nan), test_data.replace([np.inf, -np.inf], np.nan)\n",
|
||
"\n",
|
||
"# feature_columns_new = feature_columns[:]\n",
|
||
"# train_data, _ = create_deviation_within_dates(train_data, feature_columns)\n",
|
||
"# test_data, _ = create_deviation_within_dates(test_data, feature_columns)\n",
|
||
"\n",
|
||
"feature_columns = [\n",
|
||
" 'undist_profit_ps', \n",
|
||
" 'AR_BR', \n",
|
||
" 'pe_ttm',\n",
|
||
" 'alpha_22_improved', \n",
|
||
" 'alpha_003', \n",
|
||
" 'alpha_007', \n",
|
||
" 'alpha_013', \n",
|
||
" 'cat_up_limit', \n",
|
||
" 'cat_down_limit', \n",
|
||
" 'up_limit_count_10d', \n",
|
||
" 'down_limit_count_10d', \n",
|
||
" 'consecutive_up_limit', \n",
|
||
" 'vol_break', \n",
|
||
" 'weight_roc5', \n",
|
||
" 'price_cost_divergence', \n",
|
||
" 'smallcap_concentration', \n",
|
||
" 'cost_stability', \n",
|
||
" 'high_cost_break_days', \n",
|
||
" 'liquidity_risk', \n",
|
||
" 'turnover_std', \n",
|
||
" 'mv_volatility', \n",
|
||
" 'volume_growth', \n",
|
||
" 'mv_growth', \n",
|
||
" 'lg_flow_mom_corr_20_60', \n",
|
||
" 'lg_flow_accel', \n",
|
||
" 'profit_pressure', \n",
|
||
" 'underwater_resistance', \n",
|
||
" 'cost_conc_std_20', \n",
|
||
" 'profit_decay_20', \n",
|
||
" 'vol_amp_loss_20', \n",
|
||
" 'vol_drop_profit_cnt_5', \n",
|
||
" 'lg_flow_vol_interact_20', \n",
|
||
" 'cost_break_confirm_cnt_5', \n",
|
||
" 'atr_norm_channel_pos_14', \n",
|
||
" 'turnover_diff_skew_20', \n",
|
||
" 'lg_sm_flow_diverge_20', \n",
|
||
" 'pullback_strong_20_20', \n",
|
||
" 'vol_wgt_hist_pos_20', \n",
|
||
" 'vol_adj_roc_20',\n",
|
||
" 'cashflow_to_ev_factor',\n",
|
||
" 'ocfps',\n",
|
||
" 'book_to_price_ratio',\n",
|
||
" 'turnover_rate_mean_5',\n",
|
||
" 'variance_20',\n",
|
||
" 'bbi_ratio_factor'\n",
|
||
"]\n",
|
||
"feature_columns = [col for col in feature_columns if col in train_data.columns]\n",
|
||
"feature_columns = [col for col in feature_columns if not col.startswith('_')]\n",
|
||
"numeric_columns = df.select_dtypes(include=['float64', 'int64']).columns\n",
|
||
"numeric_columns = [col for col in numeric_columns if col in feature_columns]\n",
|
||
"# feature_columns = select_top_features_by_rankic(df, numeric_columns, n=10)\n",
|
||
"print(feature_columns)\n",
|
||
"\n",
|
||
"train_data = fill_nan_with_daily_median(train_data, feature_columns)\n",
|
||
"test_data = fill_nan_with_daily_median(test_data, feature_columns)\n",
|
||
"\n",
|
||
"train_data = train_data.dropna(subset=feature_columns)\n",
|
||
"train_data = train_data.dropna(subset=['label'])\n",
|
||
"train_data = train_data.reset_index(drop=True)\n",
|
||
"# print(test_data.tail())\n",
|
||
"test_data = test_data.dropna(subset=feature_columns)\n",
|
||
"# test_data = test_data.dropna(subset=['label'])\n",
|
||
"test_data = test_data.reset_index(drop=True)\n",
|
||
"\n",
|
||
"transform_feature_columns = feature_columns\n",
|
||
"transform_feature_columns = [col for col in transform_feature_columns if col in feature_columns and not col.startswith('cat')]\n",
|
||
"# transform_feature_columns.remove('undist_profit_ps')\n",
|
||
"print('去除极值')\n",
|
||
"cs_mad_filter(train_data, transform_feature_columns)\n",
|
||
"# print('中性化')\n",
|
||
"# cs_neutralize_industry_cap(train_data, transform_feature_columns)\n",
|
||
"# print('标准化')\n",
|
||
"# cs_zscore_standardize(train_data, transform_feature_columns)\n",
|
||
"\n",
|
||
"cs_mad_filter(test_data, transform_feature_columns)\n",
|
||
"# cs_neutralize_industry_cap(test_data, transform_feature_columns)\n",
|
||
"# cs_zscore_standardize(test_data, transform_feature_columns)\n",
|
||
"\n",
|
||
"mad_filter_feature_columns = [col for col in feature_columns if col not in transform_feature_columns and not col.startswith('cat')]\n",
|
||
"cs_mad_filter(train_data, mad_filter_feature_columns)\n",
|
||
"cs_mad_filter(test_data, mad_filter_feature_columns)\n",
|
||
"\n",
|
||
"\n",
|
||
"print(f'feature_columns: {feature_columns}')\n",
|
||
"\n",
|
||
"\n",
|
||
"print(f\"df最小日期: {df['trade_date'].min().strftime('%Y-%m-%d')}\")\n",
|
||
"print(f\"df最大日期: {df['trade_date'].max().strftime('%Y-%m-%d')}\")\n",
|
||
"print(len(train_data))\n",
|
||
"print(f\"train_data最小日期: {train_data['trade_date'].min().strftime('%Y-%m-%d')}\")\n",
|
||
"print(f\"train_data最大日期: {train_data['trade_date'].max().strftime('%Y-%m-%d')}\")\n",
|
||
"print(len(test_data))\n",
|
||
"print(f\"test_data最小日期: {test_data['trade_date'].min().strftime('%Y-%m-%d')}\")\n",
|
||
"print(f\"test_data最大日期: {test_data['trade_date'].max().strftime('%Y-%m-%d')}\")\n",
|
||
"\n",
|
||
"cat_columns = [col for col in feature_columns if col.startswith('cat')]\n",
|
||
"for col in cat_columns:\n",
|
||
" train_data[col] = train_data[col].astype('category')\n",
|
||
" test_data[col] = test_data[col].astype('category')\n",
|
||
"\n",
|
||
"print(df[['ts_code', 'trade_date', 'log_circ_mv']].head(3))\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": null,
|
||
"id": "e23d1759",
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"feature_columns = [\n",
|
||
" 'undist_profit_ps', \n",
|
||
" 'AR_BR', \n",
|
||
" # 'pe_ttm',\n",
|
||
" # 'alpha_22_improved', \n",
|
||
" # 'alpha_003', \n",
|
||
" # 'alpha_007', \n",
|
||
" # 'alpha_013', \n",
|
||
" # 'cat_up_limit', \n",
|
||
" # 'cat_down_limit', \n",
|
||
" # 'up_limit_count_10d', \n",
|
||
" # 'down_limit_count_10d', \n",
|
||
" # 'consecutive_up_limit', \n",
|
||
" # 'vol_break', \n",
|
||
" # 'weight_roc5', \n",
|
||
" # 'price_cost_divergence', \n",
|
||
" # 'smallcap_concentration', \n",
|
||
" # 'cost_stability', \n",
|
||
" # 'high_cost_break_days', \n",
|
||
" # 'liquidity_risk', \n",
|
||
" # 'turnover_std', \n",
|
||
" # 'mv_volatility', \n",
|
||
" # 'volume_growth', \n",
|
||
" # 'mv_growth', \n",
|
||
" # 'lg_flow_mom_corr_20_60', \n",
|
||
" # 'lg_flow_accel', \n",
|
||
" # 'profit_pressure', \n",
|
||
" # 'underwater_resistance', \n",
|
||
" # 'cost_conc_std_20', \n",
|
||
" # 'profit_decay_20', \n",
|
||
" # 'vol_amp_loss_20', \n",
|
||
" # 'vol_drop_profit_cnt_5', \n",
|
||
" # 'lg_flow_vol_interact_20', \n",
|
||
" # 'cost_break_confirm_cnt_5', \n",
|
||
" # 'atr_norm_channel_pos_14', \n",
|
||
" # 'turnover_diff_skew_20', \n",
|
||
" # 'lg_sm_flow_diverge_20', \n",
|
||
" # 'pullback_strong_20_20', \n",
|
||
" # 'vol_wgt_hist_pos_20', \n",
|
||
" # 'vol_adj_roc_20',\n",
|
||
" 'cashflow_to_ev_factor',\n",
|
||
" 'ocfps',\n",
|
||
" 'book_to_price_ratio',\n",
|
||
" 'turnover_rate_mean_5',\n",
|
||
" 'variance_20',\n",
|
||
" 'bbi_ratio_factor'\n",
|
||
"]\n",
|
||
"feature_columns = [col for col in feature_columns if col in train_data.columns]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 103,
|
||
"id": "8f134d435f71e9e2",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T14:57:51.050696Z",
|
||
"start_time": "2025-04-03T14:57:51.034030Z"
|
||
},
|
||
"jupyter": {
|
||
"source_hidden": true
|
||
}
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"from sklearn.preprocessing import StandardScaler\n",
|
||
"from sklearn.linear_model import LinearRegression, BayesianRidge\n",
|
||
"import matplotlib.pyplot as plt # 保持 matplotlib 导入,尽管LightGBM的绘图功能已移除\n",
|
||
"from sklearn.decomposition import PCA\n",
|
||
"import pandas as pd\n",
|
||
"import numpy as np\n",
|
||
"import datetime # 用于日期计算\n",
|
||
"\n",
|
||
"\n",
|
||
"def train_model(train_data_df, feature_columns,\n",
|
||
" print_info=True, # 调整参数名,更通用\n",
|
||
" validation_days=180, use_pca=False, split_date=None,\n",
|
||
" target_column='label'): # 增加目标列参数\n",
|
||
"\n",
|
||
" print('train data size: ', len(train_data_df))\n",
|
||
"\n",
|
||
" # 确保数据按时间排序\n",
|
||
" train_data_df = train_data_df.sort_values(by='trade_date')\n",
|
||
"\n",
|
||
" # 识别数值型特征列\n",
|
||
" numeric_feature_columns = train_data_df[feature_columns].select_dtypes(include=['float64', 'int64']).columns.tolist()\n",
|
||
" \n",
|
||
" # 对数值特征进行横截面标准化(如果您的函数是这样实现的)\n",
|
||
" # 假设 cross_sectional_standardization 接受 DataFrame 和 列名列表\n",
|
||
" # if numeric_feature_columns: # 只对存在数值特征的列进行标准化\n",
|
||
" # train_data_df = cross_sectional_standardization(train_data_df, numeric_feature_columns)\n",
|
||
" # elif print_info:\n",
|
||
" # print(\"警告: 没有找到数值型特征列用于标准化。\")\n",
|
||
"\n",
|
||
"\n",
|
||
" # 去除标签为空的样本\n",
|
||
" initial_len = len(train_data_df)\n",
|
||
" train_data_df = train_data_df.dropna(subset=[target_column])\n",
|
||
"\n",
|
||
" if print_info:\n",
|
||
" print(f'原始样本数: {initial_len}, 去除标签为空后样本数: {len(train_data_df)}')\n",
|
||
"\n",
|
||
" train_data_split = train_data_df\n",
|
||
"\n",
|
||
" # 提取特征和标签,只取数值型特征用于线性回归\n",
|
||
" X_train = train_data_split[numeric_feature_columns]\n",
|
||
" y_train = train_data_split[target_column]\n",
|
||
"\n",
|
||
" # # 标准化数值特征 (使用 StandardScaler 对训练集fit并transform, 对验证集只transform)\n",
|
||
" scaler = StandardScaler()\n",
|
||
" # X_train = scaler.fit_transform(X_train)\n",
|
||
"\n",
|
||
" # 训练线性回归模型\n",
|
||
" # model = LinearRegression()\n",
|
||
" model = BayesianRidge()\n",
|
||
" \n",
|
||
" # 使用处理后的特征和样本权重进行训练\n",
|
||
" model.fit(X_train, y_train)\n",
|
||
"\n",
|
||
"\n",
|
||
" return model, scaler, None # 返回训练好的模型、scaler 和 pca 对象"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 104,
|
||
"id": "c6eb5cd4-e714-420a-ac48-39af3e11ee81",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T15:03:18.426481Z",
|
||
"start_time": "2025-04-03T15:02:19.926352Z"
|
||
}
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"train data size: 36400\n",
|
||
"原始样本数: 36400, 去除标签为空后样本数: 36400\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"\n",
|
||
"gc.collect()\n",
|
||
"\n",
|
||
"use_pca = False\n",
|
||
"# feature_contri = [2 if feat.startswith('act_factor') or 'buy' in feat or 'sell' in feat else 1 for feat in feature_columns]\n",
|
||
"# light_params['feature_contri'] = feature_contri\n",
|
||
"# print(f'feature_contri: {feature_contri}')\n",
|
||
"model, scaler, pca = train_model(train_data.dropna(subset=['label']).groupby('trade_date', group_keys=False).apply(lambda x: x.nsmallest(50, 'total_mv')), feature_columns)\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 105,
|
||
"id": "5d1522a7538db91b",
|
||
"metadata": {
|
||
"ExecuteTime": {
|
||
"end_time": "2025-04-03T15:04:39.656944Z",
|
||
"start_time": "2025-04-03T15:04:39.298483Z"
|
||
}
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"# train_data = train_data.sort_values(by='trade_date')\n",
|
||
"# all_dates = train_data['trade_date'].unique() # 获取所有唯一的 trade_date\n",
|
||
"# split_date = all_dates[-120] # 划分点为倒数第 validation_days 天\n",
|
||
"# print(split_date)\n",
|
||
"# print(all_dates)\n",
|
||
"# val_data_split = train_data[train_data['trade_date'] >= split_date] # 验证集\n",
|
||
"\n",
|
||
"score_df = test_data\n",
|
||
"score_df = fill_nan_with_daily_median(score_df, ['pe_ttm'])\n",
|
||
"score_df = score_df[score_df['pe_ttm'] > 0]\n",
|
||
"# score_df = score_df.groupby('trade_date', group_keys=False).apply(lambda x: x.nsmallest(50, 'total_mv')).reset_index()\n",
|
||
"numeric_columns = score_df.select_dtypes(include=['float64', 'int64']).columns\n",
|
||
"numeric_columns = [col for col in feature_columns if col in numeric_columns]\n",
|
||
"# score_df.loc[:, numeric_columns] = scaler.transform(score_df[numeric_columns])\n",
|
||
"# score_df = cross_sectional_standardization(score_df, numeric_columns)\n",
|
||
"\n",
|
||
"score_df['score'] = model.predict(score_df[numeric_columns])\n",
|
||
"score_df['score_ranks'] = score_df.groupby('trade_date')['score'].rank(ascending=True)\n",
|
||
"\n",
|
||
"score_df = score_df.groupby('trade_date', group_keys=False).apply(\n",
|
||
" lambda x: x[x['score'] >= x['score'].quantile(0.90)] # 计算90%分位数作为阈值,筛选分数>=阈值的行\n",
|
||
").reset_index(drop=True) # drop=True 避免添加旧索引列\n",
|
||
"# save_df = score_df.groupby('trade_date', group_keys=False).apply(lambda x: x.nlargest(1, 'score')).reset_index()\n",
|
||
"save_df = score_df.groupby('trade_date', group_keys=False).apply(lambda x: x.nsmallest(1, 'total_mv')).reset_index()\n",
|
||
"save_df[['trade_date', 'score', 'ts_code']].to_csv('predictions_test.tsv', index=False)\n",
|
||
"# "
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 106,
|
||
"id": "340a82b9",
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
" ts_code trade_date AR_BR undist_profit_ps score \\\n",
|
||
"0 000100.SZ 2023-01-03 11.934320 1.4785 0.010065 \n",
|
||
"1 000166.SZ 2023-01-03 1.216701 1.3199 0.009441 \n",
|
||
"2 000518.SZ 2023-01-03 -15.537010 -0.5179 0.009575 \n",
|
||
"3 000533.SZ 2023-01-03 6.648137 -0.1828 0.009428 \n",
|
||
"4 000536.SZ 2023-01-03 -8.622002 -2.6896 0.016490 \n",
|
||
"... ... ... ... ... ... \n",
|
||
"168082 605208.SH 2025-04-09 89.845359 2.7720 0.026423 \n",
|
||
"168083 605228.SH 2025-04-09 9.234756 0.9922 0.027368 \n",
|
||
"168084 605488.SH 2025-04-09 38.082408 1.7606 0.028328 \n",
|
||
"168085 605555.SH 2025-04-09 47.834082 2.9659 0.030038 \n",
|
||
"168086 605577.SH 2025-04-09 30.978777 4.7919 0.029345 \n",
|
||
"\n",
|
||
" AR BR \n",
|
||
"0 67.759563 55.825243 \n",
|
||
"1 92.622951 91.406250 \n",
|
||
"2 90.694444 106.231454 \n",
|
||
"3 101.730104 95.081967 \n",
|
||
"4 70.720721 79.342723 \n",
|
||
"... ... ... \n",
|
||
"168082 177.835588 87.375312 \n",
|
||
"168083 94.088176 84.853420 \n",
|
||
"168084 107.369795 69.287387 \n",
|
||
"168085 98.648649 50.814566 \n",
|
||
"168086 114.000000 83.021223 \n",
|
||
"\n",
|
||
"[168087 rows x 7 columns]\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"print(score_df[['ts_code', 'trade_date', 'AR_BR', 'undist_profit_ps', 'score', 'AR', 'BR']])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 107,
|
||
"id": "fb2263ce",
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"线性回归模型截距 (Intercept):\n",
|
||
"-0.0821873954475609\n",
|
||
"\n",
|
||
"线性回归模型系数 (Coefficients):\n",
|
||
"[-6.62598705e-05 -2.63806648e-05 -2.57090964e-05 1.14891132e-08\n",
|
||
" 1.14960472e-03 2.26826386e-03 7.33817048e-03 4.86240923e-15\n",
|
||
" 2.33841866e-15 -2.88389337e-15 -2.37944451e-01 -1.22219539e-04\n",
|
||
" -2.67783556e-02 9.18323390e-02 -4.07814304e-15 -7.28200346e-06\n",
|
||
" -2.50383369e-02 2.66757338e-01 -1.89189144e-02 2.41573603e-01\n",
|
||
" 4.69728616e-02 -1.97148056e-03 -1.14382817e-03 -8.75750113e-04\n",
|
||
" 4.21106078e-04 8.52579791e-02]\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"print(\"线性回归模型截距 (Intercept):\")\n",
|
||
"print(model.intercept_)\n",
|
||
"\n",
|
||
"print(\"\\n线性回归模型系数 (Coefficients):\")\n",
|
||
"# 打印系数数组\n",
|
||
"print(model.coef_)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 108,
|
||
"id": "7e9023cc",
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def analyze_factors(\n",
|
||
" df: pd.DataFrame,\n",
|
||
" feature_columns: list[str],\n",
|
||
" target_column: str = 'target', # 假设目标列默认为 'target'\n",
|
||
" trade_date_col: str = 'trade_date', # 假设日期列默认为 'trade_date'\n",
|
||
" mcap_col: str = 'total_mv', # 新增: 市值列名称\n",
|
||
" mcap_bins: int = 5 # 新增: 市值分位数的数量 (例如 5 表示五分位数)\n",
|
||
") -> pd.DataFrame:\n",
|
||
" \"\"\"\n",
|
||
" 分析DataFrame中指定特征列的各种指标,包括基本统计、相关性、日间IC、ICIR以及在不同市值分位数上的IC。\n",
|
||
"\n",
|
||
" Args:\n",
|
||
" df (pd.DataFrame): 包含日期、目标列、特征列和市值列的DataFrame。\n",
|
||
" 需要包含 trade_date_col, target_column, feature_columns 和 mcap_col 中的所有列。\n",
|
||
" feature_columns (list[str]): 需要分析的特征列名称列表。\n",
|
||
" target_column (str): 目标变量列的名称。\n",
|
||
" trade_date_col (str): 交易日期列的名称。\n",
|
||
" mcap_col (str): 市值列的名称。\n",
|
||
" mcap_bins (int): 市值分位数的数量 (例如 5 表示五分位数)。\n",
|
||
"\n",
|
||
" Returns:\n",
|
||
" pd.DataFrame: 包含各个因子分析指标的汇总DataFrame。\n",
|
||
" 同时打印因子在不同市值分位数上的平均IC表格。\n",
|
||
" 如果输入数据或列有问题,可能返回空或包含NaN的DataFrame。\n",
|
||
" \"\"\"\n",
|
||
"\n",
|
||
" # --- 数据校验 ---\n",
|
||
" required_cols = [trade_date_col, target_column, mcap_col] + feature_columns\n",
|
||
" if not all(col in df.columns for col in required_cols):\n",
|
||
" missing = [col for col in required_cols if col not in df.columns]\n",
|
||
" print(f\"错误: 输入DataFrame缺少必需的列: {missing}\")\n",
|
||
" return pd.DataFrame() # 返回空DataFrame\n",
|
||
"\n",
|
||
" # 确保日期列是 datetime 类型\n",
|
||
" df = df.copy() # 在副本上操作\n",
|
||
" df[trade_date_col] = pd.to_datetime(df[trade_date_col], errors='coerce')\n",
|
||
" df.dropna(subset=[trade_date_col], inplace=True) # 移除日期转换失败的行\n",
|
||
"\n",
|
||
" # 过滤掉那些在 feature_columns, target_column, mcap_col 上有 NaN 的行,以确保后续计算是在完整数据上\n",
|
||
" # 直接在 df 副本上进行清洗\n",
|
||
" initial_rows_before_clean = len(df)\n",
|
||
" df.dropna(subset=feature_columns + [target_column, mcap_col], inplace=True)\n",
|
||
" rows_dropped_clean = initial_rows_before_clean - len(df)\n",
|
||
" if rows_dropped_clean > 0:\n",
|
||
" print(f\"警告: 移除了 {rows_dropped_clean} 行,因为其特征、目标或市值列存在空值。\")\n",
|
||
"\n",
|
||
" if df.empty:\n",
|
||
" print(\"错误: 清理缺失值后数据为空,无法进行因子分析。\")\n",
|
||
" return pd.DataFrame() # 返回空DataFrame\n",
|
||
"\n",
|
||
"\n",
|
||
" print(f\"开始分析 {len(feature_columns)} 个因子指标...\")\n",
|
||
"\n",
|
||
" # --- 1. 基本因子统计量 ---\n",
|
||
" basic_stats = df[feature_columns].describe().T\n",
|
||
"\n",
|
||
" print(\"\\n--- 基本因子统计量 ---\")\n",
|
||
" print(basic_stats)\n",
|
||
"\n",
|
||
" # --- 2. 因子与目标变量的整体相关性 ---\n",
|
||
" overall_correlation = {}\n",
|
||
" for feature in feature_columns:\n",
|
||
" # 在清理后的 df 上计算相关性\n",
|
||
" if df[[feature, target_column]].dropna().shape[0] > 1: # 确保至少有两个有效数据点\n",
|
||
" overall_correlation[feature] = {\n",
|
||
" 'Pearson_Correlation_with_Target': df[feature].corr(df[target_column], method='pearson'),\n",
|
||
" 'Spearman_Correlation_with_Target': df[feature].corr(df[target_column], method='spearman')\n",
|
||
" }\n",
|
||
" else:\n",
|
||
" overall_correlation[feature] = {\n",
|
||
" 'Pearson_Correlation_with_Target': np.nan,\n",
|
||
" 'Spearman_Correlation_with_Target': np.nan\n",
|
||
" }\n",
|
||
" overall_corr_df = pd.DataFrame.from_dict(overall_correlation, orient='index')\n",
|
||
"\n",
|
||
" print(\"\\n--- 因子与目标变量的整体相关性 ---\")\n",
|
||
" print(overall_corr_df)\n",
|
||
"\n",
|
||
" # --- 3. 因子之间的相关性矩阵 ---\n",
|
||
" # 在清理后的 df 上计算相关性\n",
|
||
" factor_correlation_matrix = df[feature_columns].corr(method='spearman') # 改回 Spearman\n",
|
||
"\n",
|
||
" print(\"\\n--- 因子之间的相关性矩阵 (Spearman) ---\") # 修正打印信息\n",
|
||
" print(factor_correlation_matrix)\n",
|
||
"\n",
|
||
" # --- 4. 日间 IC 和 ICIR ---\n",
|
||
" print(\"\\n--- 计算日间 IC (Spearman 相关性) 和 ICIR ---\")\n",
|
||
"\n",
|
||
" # 直接在清理后的 df 上计算每日 IC\n",
|
||
" if df.empty: # 理论上上面已经检查过,这里再检查一次更安全\n",
|
||
" daily_ic_series = pd.Series(dtype=float) # 空 Series\n",
|
||
" ic_stats = pd.DataFrame({\n",
|
||
" 'Mean_IC (Spearman)': np.nan, 'Std_Dev_IC': np.nan, 'ICIR': np.nan\n",
|
||
" }, index=feature_columns)\n",
|
||
" else:\n",
|
||
" daily_ic_series = df.groupby(trade_date_col).apply(\n",
|
||
" lambda day_group: {\n",
|
||
" feature: day_group[feature].corr(day_group[target_column], method='spearman')\n",
|
||
" for feature in feature_columns if day_group.shape[0] > 1 # 确保每日数据点多于1才能计算相关性\n",
|
||
" }\n",
|
||
" ).apply(pd.Series) # 将字典结果转换为 DataFrame\n",
|
||
"\n",
|
||
" # 计算 IC 的统计量\n",
|
||
" if not daily_ic_series.empty:\n",
|
||
" ic_mean = daily_ic_series.mean()\n",
|
||
" ic_std = daily_ic_series.std()\n",
|
||
" # 避免除以零\n",
|
||
" ic_ir = ic_mean / ic_std.replace(0, np.nan) # 使用 replace 0 为 NaN\n",
|
||
"\n",
|
||
" ic_stats = pd.DataFrame({\n",
|
||
" 'Mean_IC (Spearman)': ic_mean,\n",
|
||
" 'Std_Dev_IC': ic_std,\n",
|
||
" 'ICIR': ic_ir\n",
|
||
" })\n",
|
||
" print(\"\\n--- 日间 IC 和 ICIR (Spearman) ---\")\n",
|
||
" print(ic_stats)\n",
|
||
" else:\n",
|
||
" ic_stats = pd.DataFrame({\n",
|
||
" 'Mean_IC (Spearman)': np.nan, 'Std_Dev_IC': np.nan, 'ICIR': np.nan\n",
|
||
" }, index=feature_columns)\n",
|
||
"\n",
|
||
"\n",
|
||
" # --- 5. 因子在不同市值分位数上的平均 IC ---\n",
|
||
" print(f\"\\n--- 计算因子在 {mcap_bins} 个市值分位数上的平均 IC (Spearman) ---\")\n",
|
||
"\n",
|
||
" # 在清理后的 df 上计算每日市值分位数,直接添加到 df 中\n",
|
||
" # 使用 transform() 和 qcut() 在每个日期分组内计算分位数\n",
|
||
" # labels=False 返回整数 0 to mcap_bins-1\n",
|
||
" # duplicates='drop' 处理在某些日期股票数量少于 bins 导致分位数边缘重复的情况,会返回 NaN\n",
|
||
" # 添加一个临时列来存储分位数\n",
|
||
" mcap_bin_col_name = f'_mcap_bin_{mcap_bins}'\n",
|
||
" df[mcap_bin_col_name] = df.groupby(trade_date_col)[mcap_col].transform(\n",
|
||
" lambda x: pd.qcut(x, q=mcap_bins, labels=False, duplicates='drop') if len(x) >= mcap_bins else np.nan # 确保股票数量足够进行分位数划分\n",
|
||
" )\n",
|
||
"\n",
|
||
" # 过滤掉无法划分分位数 (NaN) 的行,进行分位数 IC 计算\n",
|
||
" # 创建一个临时 DataFrame df_binned_analysis\n",
|
||
" df_binned_analysis = df.dropna(subset=[mcap_bin_col_name]).copy()\n",
|
||
"\n",
|
||
" if df_binned_analysis.empty:\n",
|
||
" print(\"错误: 划分市值分位数后数据为空,无法计算分位数上的 IC。\")\n",
|
||
" avg_ic_by_bin = pd.DataFrame(index=range(mcap_bins), columns=feature_columns) # Placeholder\n",
|
||
" else:\n",
|
||
" # 按日期和市值分位数分组,计算每个分组内的因子与目标变量的截面相关性 (分位数IC)\n",
|
||
" binned_ic_by_day = df_binned_analysis.groupby([trade_date_col, mcap_bin_col_name]).apply(\n",
|
||
" lambda group: {\n",
|
||
" feature: group[feature].corr(group[target_column], method='spearman')\n",
|
||
" for feature in feature_columns if group.shape[0] > 1 # 确保分位数组内数据点多于1\n",
|
||
" }\n",
|
||
" ).apply(pd.Series) # 将嵌套结果转为 DataFrame\n",
|
||
"\n",
|
||
" # 对每个分位数组的每日 IC 求平均\n",
|
||
" # unstack(level=mcap_bin_col_name) 将 mcap_bin 作为列\n",
|
||
" # mean(axis=0) 对日期索引求平均\n",
|
||
" avg_ic_by_bin = binned_ic_by_day.unstack(level=mcap_bin_col_name).mean(axis=0).unstack()\n",
|
||
"\n",
|
||
" # 重命名索引和列,使表格更清晰\n",
|
||
" if not avg_ic_by_bin.empty:\n",
|
||
" # Index name will be the original column name used for grouping ('_mcap_bin_X')\n",
|
||
" # Rename the index name explicitly\n",
|
||
" avg_ic_by_bin.index.name = 'MarketCap_Bin'\n",
|
||
" avg_ic_by_bin.columns.name = 'Feature'\n",
|
||
" # 可以根据需要对分位数 bin 索引进行排序 (虽然 pd.qcut labels=False usually sorts)\n",
|
||
" avg_ic_by_bin = avg_ic_by_bin.sort_index()\n",
|
||
"\n",
|
||
" print(avg_ic_by_bin)\n",
|
||
"\n",
|
||
"\n",
|
||
" # --- 6. 汇总所有指标 ---\n",
|
||
" # 将基本统计、整体相关性、IC/ICIR 合并到一个 DataFrame\n",
|
||
" # 注意:合并时需要根据索引进行对齐 (因子名称)\n",
|
||
" summary_df = basic_stats\n",
|
||
" summary_df = summary_df.merge(overall_corr_df, left_index=True, right_index=True, how='left')\n",
|
||
" summary_df = summary_df.merge(ic_stats, left_index=True, right_index=True, how='left')\n",
|
||
"\n",
|
||
" # print(\"\\n--- 因子分析汇总报告 ---\")\n",
|
||
" # print(summary_df)\n",
|
||
"\n",
|
||
" # --- 清理临时列 'mcap_bin' ---\n",
|
||
" # 修正:在函数结束时从我们一直在操作的 df 副本中删除临时列\n",
|
||
" if mcap_bin_col_name in df.columns:\n",
|
||
" df.drop(columns=[mcap_bin_col_name], inplace=True)\n",
|
||
"\n",
|
||
"\n",
|
||
" return summary_df # 主要返回汇总报告,分位数IC单独打印\n",
|
||
"\n",
|
||
"# # 运行分析函数\n",
|
||
"# factor_analysis_report = analyze_factors(test_data.copy(), feature_columns, 'future_return')\n",
|
||
"\n",
|
||
"# print(\"\\n--- 最终汇总报告 DataFrame ---\")\n",
|
||
"# print(factor_analysis_report)"
|
||
]
|
||
}
|
||
],
|
||
"metadata": {
|
||
"kernelspec": {
|
||
"display_name": "new_trader",
|
||
"language": "python",
|
||
"name": "python3"
|
||
},
|
||
"language_info": {
|
||
"codemirror_mode": {
|
||
"name": "ipython",
|
||
"version": 3
|
||
},
|
||
"file_extension": ".py",
|
||
"mimetype": "text/x-python",
|
||
"name": "python",
|
||
"nbconvert_exporter": "python",
|
||
"pygments_lexer": "ipython3",
|
||
"version": "3.11.11"
|
||
}
|
||
},
|
||
"nbformat": 4,
|
||
"nbformat_minor": 5
|
||
}
|