{ "cells": [ { "cell_type": "code", "id": "79a7758178bafdd3", "metadata": { "jupyter": { "source_hidden": true }, "ExecuteTime": { "end_time": "2025-04-04T15:35:12.553694Z", "start_time": "2025-04-04T15:35:12.549140Z" } }, "source": [ "# %load_ext autoreload\n", "# %autoreload 2\n", "\n", "import pandas as pd\n", "import warnings\n", "\n", "warnings.filterwarnings(\"ignore\")\n", "\n", "pd.set_option('display.max_columns', None)\n" ], "outputs": [], "execution_count": 30 }, { "cell_type": "code", "id": "a79cafb06a7e0e43", "metadata": { "scrolled": true, "ExecuteTime": { "end_time": "2025-04-04T15:36:03.894111Z", "start_time": "2025-04-04T15:35:12.573328Z" } }, "source": [ "from utils.utils import read_and_merge_h5_data\n", "\n", "print('daily data')\n", "df = read_and_merge_h5_data('../../data/daily_data.h5', key='daily_data',\n", " columns=['ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'vol', 'pct_chg'],\n", " df=None)\n", "\n", "print('daily basic')\n", "df = read_and_merge_h5_data('../../data/daily_basic.h5', key='daily_basic',\n", " columns=['ts_code', 'trade_date', 'turnover_rate', 'pe_ttm', 'circ_mv', 'volume_ratio',\n", " 'is_st'], df=df, join='inner')\n", "\n", "print('stk limit')\n", "df = read_and_merge_h5_data('../../data/stk_limit.h5', key='stk_limit',\n", " columns=['ts_code', 'trade_date', 'pre_close', 'up_limit', 'down_limit'],\n", " df=df)\n", "print('money flow')\n", "df = read_and_merge_h5_data('../../data/money_flow.h5', key='money_flow',\n", " columns=['ts_code', 'trade_date', 'buy_sm_vol', 'sell_sm_vol', 'buy_lg_vol', 'sell_lg_vol',\n", " 'buy_elg_vol', 'sell_elg_vol', 'net_mf_vol'],\n", " df=df)\n", "print('cyq perf')\n", "df = read_and_merge_h5_data('../../data/cyq_perf.h5', key='cyq_perf',\n", " columns=['ts_code', 'trade_date', 'his_low', 'his_high', 'cost_5pct', 'cost_15pct',\n", " 'cost_50pct',\n", " 'cost_85pct', 'cost_95pct', 'weight_avg', 'winner_rate'],\n", " df=df)\n", "print(df.info())" ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "daily data\n", "daily basic\n", "inner merge on ['ts_code', 'trade_date']\n", "stk limit\n", "left merge on ['ts_code', 'trade_date']\n", "money flow\n", "left merge on ['ts_code', 'trade_date']\n", "cyq perf\n", "left merge on ['ts_code', 'trade_date']\n", "\n", "RangeIndex: 8477357 entries, 0 to 8477356\n", "Data columns (total 31 columns):\n", " # Column Dtype \n", "--- ------ ----- \n", " 0 ts_code object \n", " 1 trade_date datetime64[ns]\n", " 2 open float64 \n", " 3 close float64 \n", " 4 high float64 \n", " 5 low float64 \n", " 6 vol float64 \n", " 7 pct_chg float64 \n", " 8 turnover_rate float64 \n", " 9 pe_ttm float64 \n", " 10 circ_mv float64 \n", " 11 volume_ratio float64 \n", " 12 is_st bool \n", " 13 up_limit float64 \n", " 14 down_limit float64 \n", " 15 buy_sm_vol float64 \n", " 16 sell_sm_vol float64 \n", " 17 buy_lg_vol float64 \n", " 18 sell_lg_vol float64 \n", " 19 buy_elg_vol float64 \n", " 20 sell_elg_vol float64 \n", " 21 net_mf_vol float64 \n", " 22 his_low float64 \n", " 23 his_high float64 \n", " 24 cost_5pct float64 \n", " 25 cost_15pct float64 \n", " 26 cost_50pct float64 \n", " 27 cost_85pct float64 \n", " 28 cost_95pct float64 \n", " 29 weight_avg float64 \n", " 30 winner_rate float64 \n", "dtypes: bool(1), datetime64[ns](1), float64(28), object(1)\n", "memory usage: 1.9+ GB\n", "None\n" ] } ], "execution_count": 31 }, { "cell_type": "code", "id": "cac01788dac10678", "metadata": { "jupyter": { "source_hidden": true }, "ExecuteTime": { "end_time": "2025-04-04T15:36:18.904551Z", "start_time": "2025-04-04T15:36:04.059865Z" } }, "source": [ "print('industry')\n", "industry_df = read_and_merge_h5_data('../../data/industry_data.h5', key='industry_data',\n", " columns=['ts_code', 'l2_code', 'in_date'],\n", " df=None, on=['ts_code'], join='left')\n", "\n", "\n", "def merge_with_industry_data(df, industry_df):\n", " # 确保日期字段是 datetime 类型\n", " df['trade_date'] = pd.to_datetime(df['trade_date'])\n", " industry_df['in_date'] = pd.to_datetime(industry_df['in_date'])\n", "\n", " # 对 industry_df 按 ts_code 和 in_date 排序\n", " industry_df_sorted = industry_df.sort_values(['in_date', 'ts_code'])\n", "\n", " # 对原始 df 按 ts_code 和 trade_date 排序\n", " df_sorted = df.sort_values(['trade_date', 'ts_code'])\n", "\n", " # 使用 merge_asof 进行向后合并\n", " merged = pd.merge_asof(\n", " df_sorted,\n", " industry_df_sorted,\n", " by='ts_code', # 按 ts_code 分组\n", " left_on='trade_date',\n", " right_on='in_date',\n", " direction='backward'\n", " )\n", "\n", " # 获取每个 ts_code 的最早 in_date 记录\n", " min_in_date_per_ts = (industry_df_sorted\n", " .groupby('ts_code')\n", " .first()\n", " .reset_index()[['ts_code', 'l2_code']])\n", "\n", " # 填充未匹配到的记录(trade_date 早于所有 in_date 的情况)\n", " merged['l2_code'] = merged['l2_code'].fillna(\n", " merged['ts_code'].map(min_in_date_per_ts.set_index('ts_code')['l2_code'])\n", " )\n", "\n", " # 保留需要的列并重置索引\n", " result = merged.reset_index(drop=True)\n", " return result\n", "\n", "\n", "# 使用示例\n", "df = merge_with_industry_data(df, industry_df)\n", "# print(mdf[mdf['ts_code'] == '600751.SH'][['ts_code', 'trade_date', 'l2_code']])" ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "industry\n" ] } ], "execution_count": 32 }, { "cell_type": "code", "id": "c4e9e1d31da6dba6", "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:36:19.231919Z", "start_time": "2025-04-04T15:36:19.058570Z" } }, "source": [ "def calculate_indicators(df):\n", " \"\"\"\n", " 计算四个指标:当日涨跌幅、5日移动平均、RSI、MACD。\n", " \"\"\"\n", " df = df.sort_values('trade_date')\n", " df['daily_return'] = (df['close'] - df['pre_close']) / df['pre_close'] * 100\n", " # df['5_day_ma'] = df['close'].rolling(window=5).mean()\n", " delta = df['close'].diff()\n", " gain = delta.where(delta > 0, 0)\n", " loss = -delta.where(delta < 0, 0)\n", " avg_gain = gain.rolling(window=14).mean()\n", " avg_loss = loss.rolling(window=14).mean()\n", " rs = avg_gain / avg_loss\n", " df['RSI'] = 100 - (100 / (1 + rs))\n", "\n", " # 计算MACD\n", " ema12 = df['close'].ewm(span=12, adjust=False).mean()\n", " ema26 = df['close'].ewm(span=26, adjust=False).mean()\n", " df['MACD'] = ema12 - ema26\n", " df['Signal_line'] = df['MACD'].ewm(span=9, adjust=False).mean()\n", " df['MACD_hist'] = df['MACD'] - df['Signal_line']\n", "\n", " # 4. 情绪因子1:市场上涨比例(Up Ratio)\n", " df['up_ratio'] = df['daily_return'].apply(lambda x: 1 if x > 0 else 0)\n", " df['up_ratio_20d'] = df['up_ratio'].rolling(window=20).mean() # 过去20天上涨比例\n", "\n", " # 5. 情绪因子2:成交量变化率(Volume Change Rate)\n", " df['volume_mean'] = df['vol'].rolling(window=20).mean() # 过去20天的平均成交量\n", " df['volume_change_rate'] = (df['vol'] - df['volume_mean']) / df['volume_mean'] * 100 # 成交量变化率\n", "\n", " # 6. 情绪因子3:波动率(Volatility)\n", " df['volatility'] = df['daily_return'].rolling(window=20).std() # 过去20天的日收益率标准差\n", "\n", " # 7. 情绪因子4:成交额变化率(Amount Change Rate)\n", " df['amount_mean'] = df['amount'].rolling(window=20).mean() # 过去20天的平均成交额\n", " df['amount_change_rate'] = (df['amount'] - df['amount_mean']) / df['amount_mean'] * 100 # 成交额变化率\n", "\n", " return df\n", "\n", "\n", "def generate_index_indicators(h5_filename):\n", " df = pd.read_hdf(h5_filename, key='index_data')\n", " df['trade_date'] = pd.to_datetime(df['trade_date'], format='%Y%m%d')\n", " df = df.sort_values('trade_date')\n", "\n", " # 计算每个ts_code的相关指标\n", " df_indicators = []\n", " for ts_code in df['ts_code'].unique():\n", " df_index = df[df['ts_code'] == ts_code].copy()\n", " df_index = calculate_indicators(df_index)\n", " df_indicators.append(df_index)\n", "\n", " # 合并所有指数的结果\n", " df_all_indicators = pd.concat(df_indicators, ignore_index=True)\n", "\n", " # 保留trade_date列,并将同一天的数据按ts_code合并成一行\n", " df_final = df_all_indicators.pivot_table(\n", " index='trade_date',\n", " columns='ts_code',\n", " values=['daily_return', 'RSI', 'MACD', 'Signal_line',\n", " 'MACD_hist', 'up_ratio_20d', 'volume_change_rate', 'volatility',\n", " 'amount_change_rate', 'amount_mean'],\n", " aggfunc='last'\n", " )\n", "\n", " df_final.columns = [f\"{col[1]}_{col[0]}\" for col in df_final.columns]\n", " df_final = df_final.reset_index()\n", "\n", " return df_final\n", "\n", "\n", "# 使用函数\n", "h5_filename = '../../data/index_data.h5'\n", "index_data = generate_index_indicators(h5_filename)\n", "index_data = index_data.dropna()\n" ], "outputs": [], "execution_count": 33 }, { "cell_type": "code", "id": "a735bc02ceb4d872", "metadata": { "jupyter": { "source_hidden": true }, "ExecuteTime": { "end_time": "2025-04-04T15:36:19.280300Z", "start_time": "2025-04-04T15:36:19.231919Z" } }, "source": [ "import numpy as np\n", "import talib\n", "\n", "\n", "def get_rolling_factor(df):\n", " old_columns = df.columns.tolist()[:]\n", "\n", " # 按股票和日期排序(如果尚未排序)\n", " df = df.sort_values(by=['ts_code', 'trade_date'])\n", "\n", " grouped = df.groupby('ts_code', group_keys=False)\n", "\n", " # 提前计算布尔掩码\n", " window = 5\n", " return_threshold = 0.0\n", "\n", " df['_is_upside'] = df['pct_chg'] > return_threshold\n", " df['_is_downside'] = df['pct_chg'] < -return_threshold\n", "\n", " # # 1. 上行波动率 (20日)\n", " # def rolling_upside_volatility(series, _is_upside, window):\n", " # # 提取正收益\n", " # positive_returns = series.where(_is_upside, np.nan)\n", " # # 计算滚动窗口标准差\n", " # return positive_returns.rolling(window=window, min_periods=2).std()\n", " #\n", " # df[f'upside_volatility_{window}'] = grouped.apply(\n", " # lambda x: rolling_upside_volatility(x['pct_chg'], x['_is_upside'], window)\n", " # ).reset_index(level=0, drop=True)\n", " #\n", " # # 2. 下行波动率 (20日)\n", " # def rolling_downside_volatility(series, _is_downside, window):\n", " # # 提取负收益\n", " # negative_returns = series.where(_is_downside, np.nan)\n", " # # 计算滚动窗口标准差\n", " # return negative_returns.rolling(window=window, min_periods=2).std()\n", " #\n", " # df[f'downside_volatility_{window}'] = grouped.apply(\n", " # lambda x: rolling_downside_volatility(x['pct_chg'], x['_is_downside'], window)\n", " # ).reset_index(level=0, drop=True)\n", " #\n", " # # 3. 上行/下行波动率比率 (20日)\n", " # df[f'volatility_ratio_{window}'] = df[f'upside_volatility_{window}'] / (df[f'downside_volatility_{window}'] + 1e-8)\n", " #\n", " # # 4. 上行半方差 (20日)\n", " # def rolling_upside_semi_variance(series, _is_upside, window, threshold):\n", " # # 提取正收益\n", " # positive_returns = series.where(_is_upside, np.nan)\n", " # # 计算平方偏差\n", " # squared_deviation = (positive_returns - threshold) ** 2\n", " # # 计算滚动窗口均值\n", " # return squared_deviation.rolling(window=window, min_periods=2).mean()\n", " #\n", " # df[f'upside_semi_variance_{window}'] = grouped.apply(\n", " # lambda x: rolling_upside_semi_variance(x['pct_chg'], x['_is_upside'], window, return_threshold)\n", " # ).reset_index(level=0, drop=True)\n", " #\n", " # # 5. 下行半方差 (20日)\n", " # def rolling_downside_semi_variance(series, _is_downside, window, threshold):\n", " # # 提取负收益\n", " # negative_returns = series.where(_is_downside, np.nan)\n", " # # 计算平方偏差\n", " # squared_deviation = (negative_returns - (-threshold)) ** 2\n", " # # 计算滚动窗口均值\n", " # return squared_deviation.rolling(window=window, min_periods=2).mean()\n", " #\n", " # df[f'downside_semi_variance_{window}'] = grouped.apply(\n", " # lambda x: rolling_downside_semi_variance(x['pct_chg'], x['_is_downside'], window, return_threshold)\n", " # ).reset_index(level=0, drop=True)\n", " #\n", " # # 8. 正负收益天数比率 (20日)\n", " # df[f'positive_negative_days_ratio_{window}'] = grouped['pct_chg'].rolling(window=window, min_periods=2).apply(\n", " # lambda x: np.sum(x > 0) / (np.sum(x < 0) + 1e-8)).reset_index(level=0, drop=True)\n", " #\n", " # # 9. 正收益幅度均值 (20日)\n", " # def average_positive_return_magnitude(series):\n", " # positive_returns = series[series > return_threshold]\n", " # if positive_returns.empty:\n", " # return 0\n", " # return positive_returns.mean()\n", " #\n", " # df[f'avg_positive_return_magnitude_{window}'] = grouped['pct_chg'].rolling(window=window, min_periods=2).apply(\n", " # average_positive_return_magnitude).reset_index(level=0, drop=True)\n", " #\n", " # # 10. 负收益幅度均值 (20日)\n", " # def average_negative_return_magnitude(series):\n", " # negative_returns = series[series < -return_threshold]\n", " # if negative_returns.empty:\n", " # return 0\n", " # return np.abs(negative_returns.mean())\n", " #\n", " # df[f'avg_negative_return_magnitude_{window}'] = grouped['pct_chg'].rolling(window=window, min_periods=2).apply(\n", " # average_negative_return_magnitude).reset_index(level=0, drop=True)\n", "\n", " # df[\"gap_next_open\"] = (df[\"open\"].shift(-1) - df[\"close\"]) / df[\"close\"]\n", "\n", " df['return_skew'] = grouped['pct_chg'].rolling(window=5).skew().reset_index(0, drop=True)\n", " df['return_kurtosis'] = grouped['pct_chg'].rolling(window=5).kurt().reset_index(0, drop=True)\n", "\n", " # 因子 1:短期成交量变化率\n", " df['volume_change_rate'] = (\n", " grouped['vol'].rolling(window=2).mean() /\n", " grouped['vol'].rolling(window=10).mean() - 1\n", " ).reset_index(level=0, drop=True) # 确保索引对齐\n", "\n", " # 因子 2:成交量突破信号\n", " max_volume = grouped['vol'].rolling(window=5).max().reset_index(level=0, drop=True) # 确保索引对齐\n", " df['cat_volume_breakout'] = (df['vol'] > max_volume)\n", "\n", " # 因子 3:换手率均线偏离度\n", " mean_turnover = grouped['turnover_rate'].rolling(window=3).mean().reset_index(level=0, drop=True)\n", " std_turnover = grouped['turnover_rate'].rolling(window=3).std().reset_index(level=0, drop=True)\n", " df['turnover_deviation'] = (df['turnover_rate'] - mean_turnover) / std_turnover\n", "\n", " # 因子 4:换手率激增信号\n", " df['cat_turnover_spike'] = (df['turnover_rate'] > mean_turnover + 2 * std_turnover)\n", "\n", " # 因子 5:量比均值\n", " df['avg_volume_ratio'] = grouped['volume_ratio'].rolling(window=3).mean().reset_index(level=0, drop=True)\n", "\n", " # 因子 6:量比突破信号\n", " max_volume_ratio = grouped['volume_ratio'].rolling(window=5).max().reset_index(level=0, drop=True)\n", " df['cat_volume_ratio_breakout'] = (df['volume_ratio'] > max_volume_ratio)\n", "\n", " df['vol_spike'] = grouped.apply(\n", " lambda x: pd.Series(x['vol'].rolling(20).mean(), index=x.index)\n", " )\n", " df['vol_std_5'] = df['vol'].pct_change().rolling(5).std()\n", "\n", " # 计算 ATR\n", " df['atr_14'] = grouped.apply(\n", " lambda x: pd.Series(talib.ATR(x['high'].values, x['low'].values, x['close'].values, timeperiod=14),\n", " index=x.index)\n", " )\n", " df['atr_6'] = grouped.apply(\n", " lambda x: pd.Series(talib.ATR(x['high'].values, x['low'].values, x['close'].values, timeperiod=6),\n", " index=x.index)\n", " )\n", "\n", " # 计算 OBV 及其均线\n", " df['obv'] = grouped.apply(\n", " lambda x: pd.Series(talib.OBV(x['close'].values, x['vol'].values), index=x.index)\n", " )\n", " df['maobv_6'] = grouped.apply(\n", " lambda x: pd.Series(talib.SMA(x['obv'].values, timeperiod=6), index=x.index)\n", " )\n", "\n", " df['rsi_3'] = grouped.apply(\n", " lambda x: pd.Series(talib.RSI(x['close'].values, timeperiod=3), index=x.index)\n", " )\n", " # df['rsi_6'] = grouped.apply(\n", " # lambda x: pd.Series(talib.RSI(x['close'].values, timeperiod=6), index=x.index)\n", " # )\n", " # df['rsi_9'] = grouped.apply(\n", " # lambda x: pd.Series(talib.RSI(x['close'].values, timeperiod=9), index=x.index)\n", " # )\n", "\n", " # 计算 return_10 和 return_20\n", " df['return_5'] = grouped['close'].apply(lambda x: x / x.shift(5) - 1)\n", " # df['return_10'] = grouped['close'].apply(lambda x: x / x.shift(10) - 1)\n", " df['return_20'] = grouped['close'].apply(lambda x: x / x.shift(20) - 1)\n", "\n", " # df['avg_close_5'] = grouped['close'].apply(lambda x: x.rolling(window=5).mean() / x)\n", "\n", " # 计算标准差指标\n", " df['std_return_5'] = grouped['close'].apply(lambda x: x.pct_change().rolling(window=5).std())\n", " # df['std_return_15'] = grouped['close'].apply(lambda x: x.pct_change().rolling(window=15).std())\n", " # df['std_return_25'] = grouped['close'].apply(lambda x: x.pct_change().rolling(window=25).std())\n", " df['std_return_90'] = grouped['close'].apply(lambda x: x.pct_change().rolling(window=90).std())\n", " df['std_return_90_2'] = grouped['close'].apply(lambda x: x.shift(10).pct_change().rolling(window=90).std())\n", "\n", " # 计算 EMA 指标\n", " df['_ema_5'] = grouped['close'].apply(\n", " lambda x: pd.Series(talib.EMA(x.values, timeperiod=5), index=x.index)\n", " )\n", " df['_ema_13'] = grouped['close'].apply(\n", " lambda x: pd.Series(talib.EMA(x.values, timeperiod=13), index=x.index)\n", " )\n", " df['_ema_20'] = grouped['close'].apply(\n", " lambda x: pd.Series(talib.EMA(x.values, timeperiod=20), index=x.index)\n", " )\n", " df['_ema_60'] = grouped['close'].apply(\n", " lambda x: pd.Series(talib.EMA(x.values, timeperiod=60), index=x.index)\n", " )\n", "\n", " # 计算 act_factor1, act_factor2, act_factor3, act_factor4\n", " df['act_factor1'] = grouped['_ema_5'].apply(\n", " lambda x: np.arctan((x / x.shift(1) - 1) * 100) * 57.3 / 50\n", " )\n", " df['act_factor2'] = grouped['_ema_13'].apply(\n", " lambda x: np.arctan((x / x.shift(1) - 1) * 100) * 57.3 / 40\n", " )\n", " df['act_factor3'] = grouped['_ema_20'].apply(\n", " lambda x: np.arctan((x / x.shift(1) - 1) * 100) * 57.3 / 21\n", " )\n", " df['act_factor4'] = grouped['_ema_60'].apply(\n", " lambda x: np.arctan((x / x.shift(1) - 1) * 100) * 57.3 / 10\n", " )\n", "\n", " # 根据 trade_date 截面计算排名\n", " df['rank_act_factor1'] = df.groupby('trade_date', group_keys=False)['act_factor1'].rank(ascending=False, pct=True)\n", " df['rank_act_factor2'] = df.groupby('trade_date', group_keys=False)['act_factor2'].rank(ascending=False, pct=True)\n", " df['rank_act_factor3'] = df.groupby('trade_date', group_keys=False)['act_factor3'].rank(ascending=False, pct=True)\n", "\n", " df['log(circ_mv)'] = np.log(df['circ_mv'])\n", "\n", " def rolling_covariance(x, y, window):\n", " return x.rolling(window).cov(y)\n", "\n", " def delta(series, period):\n", " return series.diff(period)\n", "\n", " def rank(series):\n", " return series.rank(pct=True)\n", "\n", " def stddev(series, window):\n", " return series.rolling(window).std()\n", "\n", " window_high_volume = 5\n", " window_close_stddev = 20\n", " period_delta = 5\n", " df['cov'] = rolling_covariance(df['high'], df['vol'], window_high_volume)\n", " df['delta_cov'] = delta(df['cov'], period_delta)\n", " df['_rank_stddev'] = rank(stddev(df['close'], window_close_stddev))\n", " df['alpha_22_improved'] = -1 * df['delta_cov'] * df['_rank_stddev']\n", "\n", " df['alpha_003'] = np.where(df['high'] != df['low'],\n", " (df['close'] - df['open']) / (df['high'] - df['low']),\n", " 0)\n", "\n", " df['alpha_007'] = grouped.apply(lambda x: x['close'].rolling(5).corr(x['vol'])).reset_index(level=0, drop=True)\n", " df['alpha_007'] = df.groupby('trade_date', group_keys=False)['alpha_007'].rank(ascending=True, pct=True)\n", "\n", " df['alpha_013'] = grouped['close'].transform(lambda x: x.rolling(5).sum() - x.rolling(20).sum())\n", " df['alpha_013'] = df.groupby('trade_date', group_keys=False)['alpha_013'].rank(ascending=True, pct=True)\n", "\n", " df['cat_up_limit'] = (df['close'] == df['up_limit']) # 是否涨停(1表示涨停,0表示未涨停)\n", " df['cat_down_limit'] = (df['close'] == df['down_limit']) # 是否跌停(1表示跌停,0表示未跌停)\n", " df['up_limit_count_10d'] = grouped['cat_up_limit'].rolling(window=10, min_periods=1).sum().reset_index(level=0,\n", " drop=True)\n", " df['down_limit_count_10d'] = grouped['cat_down_limit'].rolling(window=10, min_periods=1).sum().reset_index(level=0,\n", " drop=True)\n", "\n", " # 3. 最近连续涨跌停天数\n", " def calculate_consecutive_limits(series):\n", " \"\"\"\n", " 计算连续涨停/跌停天数。\n", " \"\"\"\n", " consecutive_up = series * (series.groupby((series != series.shift()).cumsum()).cumcount() + 1)\n", " consecutive_down = series * (series.groupby((series != series.shift()).cumsum()).cumcount() + 1)\n", " return consecutive_up, consecutive_down\n", "\n", " # 连续涨停天数\n", " df['consecutive_up_limit'] = grouped['cat_up_limit'].apply(\n", " lambda x: calculate_consecutive_limits(x)[0]\n", " ).reset_index(level=0, drop=True)\n", "\n", " df['vol_break'] = np.where((df['close'] > df['cost_85pct']) & (df['volume_ratio'] > 2), 1, 0)\n", "\n", " df['weight_roc5'] = grouped['weight_avg'].apply(lambda x: x.pct_change(5))\n", "\n", " def rolling_corr(group):\n", " roc_close = group['close'].pct_change()\n", " roc_weight = group['weight_avg'].pct_change()\n", " return roc_close.rolling(10).corr(roc_weight)\n", "\n", " df['price_cost_divergence'] = grouped.apply(rolling_corr)\n", "\n", " df['smallcap_concentration'] = (1 / df['log(circ_mv)']) * (df['cost_85pct'] - df['cost_15pct'])\n", "\n", " # 16. 筹码稳定性指数 (20日波动率)\n", " df['weight_std20'] = grouped['weight_avg'].apply(lambda x: x.rolling(20).std())\n", " df['cost_stability'] = df['weight_std20'] / grouped['weight_avg'].transform(lambda x: x.rolling(20).mean())\n", "\n", " # 17. 成本区间突破标记\n", " df['high_cost_break_days'] = grouped.apply(lambda g: g['close'].gt(g['cost_95pct']).rolling(5).sum())\n", "\n", " # 20. 筹码-流动性风险\n", " df['liquidity_risk'] = (df['cost_95pct'] - df['cost_5pct']) * (\n", " 1 / grouped['vol'].transform(lambda x: x.rolling(10).mean()))\n", "\n", " # 7. 市值波动率因子\n", " df['turnover_std'] = grouped['turnover_rate'].rolling(window=20).std().reset_index(level=0, drop=True)\n", " df['mv_volatility'] = grouped.apply(lambda x: x['turnover_std'] / x['log(circ_mv)']).reset_index(level=0, drop=True)\n", "\n", " # 8. 市值成长性因子\n", " df['volume_growth'] = grouped['vol'].pct_change(periods=20).reset_index(level=0, drop=True)\n", " df['mv_growth'] = grouped.apply(lambda x: x['volume_growth'] / x['log(circ_mv)']).reset_index(level=0, drop=True)\n", "\n", " df[\"ar\"] = df[\"high\"].div(df[\"open\"]).rolling(3).sum() / df[\"open\"].div(df[\"low\"]).rolling(3).sum() * 100\n", " # 计算 BR 指标\n", " df[\"pre_close\"] = df[\"close\"].shift(1)\n", " df[\"br_up\"] = (df[\"high\"] - df[\"pre_close\"]).clip(lower=0)\n", " df[\"br_down\"] = (df[\"pre_close\"] - df[\"low\"]).clip(lower=0)\n", " df[\"br\"] = df[\"br_up\"].rolling(3).sum() / df[\"br_down\"].rolling(3).sum() * 100\n", " df['arbr'] = df['ar'] - df['br']\n", " df.drop(columns=[\"pre_close\", \"br_up\", \"br_down\", 'ar', 'br'], inplace=True)\n", "\n", " df.drop(columns=['weight_std20'], inplace=True, errors='ignore')\n", " new_columns = [col for col in df.columns.tolist()[:] if col not in old_columns]\n", "\n", " return df, new_columns\n", "\n", "\n", "def get_simple_factor(df):\n", " old_columns = df.columns.tolist()[:]\n", " df = df.sort_values(by=['ts_code', 'trade_date'])\n", "\n", " alpha = 0.5\n", " df['momentum_factor'] = df['volume_change_rate'] + alpha * df['turnover_deviation']\n", " df['resonance_factor'] = df['volume_ratio'] * df['pct_chg']\n", " df['log_close'] = np.log(df['close'])\n", "\n", " df['cat_vol_spike'] = df['vol'] > 2 * df['vol_spike']\n", "\n", " df['up'] = (df['high'] - df[['close', 'open']].max(axis=1)) / df['close']\n", " df['down'] = (df[['close', 'open']].min(axis=1) - df['low']) / df['close']\n", "\n", " df['obv-maobv_6'] = df['obv'] - df['maobv_6']\n", "\n", " # 计算比值指标\n", " df['std_return_5 / std_return_90'] = df['std_return_5'] / df['std_return_90']\n", " # df['std_return_5 / std_return_25'] = df['std_return_5'] / df['std_return_25']\n", "\n", " # 计算标准差差值\n", " df['std_return_90 - std_return_90_2'] = df['std_return_90'] - df['std_return_90_2']\n", "\n", " # df['cat_af1'] = df['act_factor1'] > 0\n", " df['cat_af2'] = df['act_factor2'] > df['act_factor1']\n", " df['cat_af3'] = df['act_factor3'] > df['act_factor2']\n", " df['cat_af4'] = df['act_factor4'] > df['act_factor3']\n", "\n", " # 计算 act_factor5 和 act_factor6\n", " df['act_factor5'] = df['act_factor1'] + df['act_factor2'] + df['act_factor3'] + df['act_factor4']\n", " df['act_factor6'] = (df['act_factor1'] - df['act_factor2']) / np.sqrt(\n", " df['act_factor1'] ** 2 + df['act_factor2'] ** 2)\n", "\n", " df['active_buy_volume_large'] = df['buy_lg_vol'] / df['net_mf_vol']\n", " df['active_buy_volume_big'] = df['buy_elg_vol'] / df['net_mf_vol']\n", " df['active_buy_volume_small'] = df['buy_sm_vol'] / df['net_mf_vol']\n", "\n", " df['buy_lg_vol_minus_sell_lg_vol'] = (df['buy_lg_vol'] - df['sell_lg_vol']) / df['net_mf_vol']\n", " df['buy_elg_vol_minus_sell_elg_vol'] = (df['buy_elg_vol'] - df['sell_elg_vol']) / df['net_mf_vol']\n", "\n", " df['log(circ_mv)'] = np.log(df['circ_mv'])\n", "\n", " df['ctrl_strength'] = (df['cost_85pct'] - df['cost_15pct']) / (df['his_high'] - df['his_low'])\n", "\n", " df['low_cost_dev'] = (df['close'] - df['cost_5pct']) / (df['cost_50pct'] - df['cost_5pct'])\n", "\n", " df['asymmetry'] = (df['cost_95pct'] - df['cost_50pct']) / (df['cost_50pct'] - df['cost_5pct'])\n", "\n", " df['lock_factor'] = df['turnover_rate'] * (\n", " 1 - (df['cost_95pct'] - df['cost_5pct']) / (df['his_high'] - df['his_low']))\n", "\n", " df['cat_vol_break'] = (df['close'] > df['cost_85pct']) & (df['volume_ratio'] > 2)\n", "\n", " df['cost_atr_adj'] = (df['cost_95pct'] - df['cost_5pct']) / df['atr_14']\n", "\n", " # 12. 小盘股筹码集中度\n", " df['smallcap_concentration'] = (1 / df['log(circ_mv)']) * (df['cost_85pct'] - df['cost_15pct'])\n", "\n", " df['cat_golden_resonance'] = ((df['close'] > df['weight_avg']) &\n", " (df['volume_ratio'] > 1.5) &\n", " (df['winner_rate'] > 0.7))\n", "\n", " df['mv_turnover_ratio'] = df['turnover_rate'] / df['log(circ_mv)']\n", "\n", " df['mv_adjusted_volume'] = df['vol'] / df['log(circ_mv)']\n", "\n", " df['mv_weighted_turnover'] = df['turnover_rate'] * (1 / df['log(circ_mv)'])\n", "\n", " df['nonlinear_mv_volume'] = df['vol'] / df['log(circ_mv)']\n", "\n", " df['mv_volume_ratio'] = df['volume_ratio'] / df['log(circ_mv)']\n", "\n", " df['mv_momentum'] = df['turnover_rate'] * df['volume_ratio'] / df['log(circ_mv)']\n", "\n", " drop_columns = [col for col in df.columns if col.startswith('_')]\n", " df.drop(columns=drop_columns, inplace=True, errors='ignore')\n", "\n", " new_columns = [col for col in df.columns.tolist()[:] if col not in old_columns]\n", " return df, new_columns\n" ], "outputs": [], "execution_count": 34 }, { "cell_type": "code", "id": "53f86ddc0677a6d7", "metadata": { "jupyter": { "source_hidden": true }, "scrolled": true, "ExecuteTime": { "end_time": "2025-04-04T15:36:25.455672Z", "start_time": "2025-04-04T15:36:19.322099Z" } }, "source": [ "from utils.factor import get_act_factor\n", "\n", "\n", "def read_industry_data(h5_filename):\n", " # 读取 H5 文件中所有的行业数据\n", " industry_data = pd.read_hdf(h5_filename, key='sw_daily', columns=[\n", " 'ts_code', 'trade_date', 'open', 'close', 'high', 'low', 'pe', 'pb', 'vol'\n", " ]) # 假设 H5 文件的键是 'industry_data'\n", " industry_data = industry_data.sort_values(by=['ts_code', 'trade_date'])\n", " industry_data = industry_data.reindex()\n", " industry_data['trade_date'] = pd.to_datetime(industry_data['trade_date'], format='%Y%m%d')\n", "\n", " grouped = industry_data.groupby('ts_code', group_keys=False)\n", " industry_data['obv'] = grouped.apply(\n", " lambda x: pd.Series(talib.OBV(x['close'].values, x['vol'].values), index=x.index)\n", " )\n", " industry_data['return_5'] = grouped['close'].apply(lambda x: x / x.shift(5) - 1)\n", " industry_data['return_20'] = grouped['close'].apply(lambda x: x / x.shift(20) - 1)\n", "\n", " industry_data = get_act_factor(industry_data, cat=False)\n", " industry_data = industry_data.sort_values(by=['trade_date', 'ts_code'])\n", "\n", " # # 计算每天每个 ts_code 的因子和当天所有 ts_code 的中位数的偏差\n", " # factor_columns = ['obv', 'return_5', 'return_20', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4'] # 因子列\n", " # \n", " # for factor in factor_columns:\n", " # if factor in industry_data.columns:\n", " # # 计算每天每个 ts_code 的因子值与当天所有 ts_code 的中位数的偏差\n", " # industry_data[f'{factor}_deviation'] = industry_data.groupby('trade_date')[factor].transform(\n", " # lambda x: x - x.mean())\n", "\n", " industry_data['return_5_percentile'] = industry_data.groupby('trade_date')['return_5'].transform(\n", " lambda x: x.rank(pct=True))\n", " industry_data['return_20_percentile'] = industry_data.groupby('trade_date')['return_20'].transform(\n", " lambda x: x.rank(pct=True))\n", " industry_data = industry_data.drop(columns=['open', 'close', 'high', 'low', 'pe', 'pb', 'vol'])\n", "\n", " industry_data = industry_data.rename(\n", " columns={col: f'industry_{col}' for col in industry_data.columns if col not in ['ts_code', 'trade_date']})\n", "\n", " industry_data = industry_data.rename(columns={'ts_code': 'cat_l2_code'})\n", " return industry_data\n", "\n", "\n", "industry_df = read_industry_data('../../data/sw_daily.h5')\n" ], "outputs": [], "execution_count": 35 }, { "cell_type": "code", "id": "dbe2fd8021b9417f", "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:36:25.503415Z", "start_time": "2025-04-04T15:36:25.498021Z" } }, "source": [ "origin_columns = df.columns.tolist()\n", "origin_columns = [col for col in origin_columns if\n", " col not in ['turnover_rate', 'pe_ttm', 'volume_ratio', 'vol', 'pct_chg', 'l2_code', 'winner_rate']]\n", "origin_columns = [col for col in origin_columns if col not in index_data.columns]\n", "origin_columns = [col for col in origin_columns if 'cyq' not in col]\n", "print(origin_columns)" ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['ts_code', 'open', 'close', 'high', 'low', 'circ_mv', 'is_st', 'up_limit', 'down_limit', 'buy_sm_vol', 'sell_sm_vol', 'buy_lg_vol', 'sell_lg_vol', 'buy_elg_vol', 'sell_elg_vol', 'net_mf_vol', 'his_low', 'his_high', 'cost_5pct', 'cost_15pct', 'cost_50pct', 'cost_85pct', 'cost_95pct', 'weight_avg', 'in_date']\n" ] } ], "execution_count": 36 }, { "cell_type": "code", "id": "85c3e3d0235ffffa", "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:38:25.829052Z", "start_time": "2025-04-04T15:36:25.591562Z" } }, "source": [ "def filter_data(df):\n", " # df = df.groupby('trade_date').apply(lambda x: x.nlargest(1000, 'act_factor1'))\n", " df = df[~df['is_st']]\n", " df = df[~df['ts_code'].str.endswith('BJ')]\n", " df = df[~df['ts_code'].str.startswith('30')]\n", " df = df[~df['ts_code'].str.startswith('68')]\n", " df = df[~df['ts_code'].str.startswith('8')]\n", " df = df[df['trade_date'] >= '20180101']\n", " df = df.drop(columns=['in_date'])\n", " df = df.reset_index(drop=True)\n", " return df\n", "\n", "\n", "df = filter_data(df)\n", "# df = get_technical_factor(df)\n", "# df = get_act_factor(df)\n", "# df = get_money_flow_factor(df)\n", "# df = get_alpha_factor(df)\n", "# df = get_limit_factor(df)\n", "# df = get_cyp_perf_factor(df)\n", "# df = get_mv_factors(df)\n", "df, _ = get_rolling_factor(df)\n", "df, _ = get_simple_factor(df)\n", "# df = df.merge(industry_df, on=['l2_code', 'trade_date'], how='left')\n", "df = df.rename(columns={'l2_code': 'cat_l2_code'})\n", "# df = df.merge(index_data, on='trade_date', how='left')\n", "\n", "print(df.info())" ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "Index: 5118212 entries, 0 to 5118211\n", "Columns: 115 entries, ts_code to mv_momentum\n", "dtypes: bool(12), datetime64[ns](1), float64(98), int32(1), int64(1), object(2)\n", "memory usage: 4.0+ GB\n", "None\n" ] } ], "execution_count": 37 }, { "cell_type": "code", "id": "f4f16d63ad18d1bc", "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:38:26.184750Z", "start_time": "2025-04-04T15:38:26.172896Z" } }, "source": [ "def create_deviation_within_dates(df, feature_columns):\n", " groupby_col = 'cat_l2_code' # 使用 trade_date 进行分组\n", " new_columns = {}\n", " ret_feature_columns = feature_columns[:]\n", "\n", " # 自动选择所有数值型特征\n", " num_features = [col for col in feature_columns if 'cat' not in col and 'index' not in col]\n", "\n", " # num_features = ['vol', 'pct_chg', 'turnover_rate', 'volume_ratio', 'cat_vol_spike', 'obv', 'maobv_6', 'return_5', 'return_10', 'return_20', 'std_return_5', 'std_return_15', 'std_return_90', 'std_return_90_2', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4', 'act_factor5', 'act_factor6', 'rank_act_factor1', 'rank_act_factor2', 'rank_act_factor3', 'active_buy_volume_large', 'active_buy_volume_big', 'active_buy_volume_small', 'alpha_022', 'alpha_003', 'alpha_007', 'alpha_013']\n", " num_features = [col for col in num_features if 'cat' not in col and 'industry' not in col]\n", " num_features = [col for col in num_features if 'limit' not in col]\n", " num_features = [col for col in num_features if 'cyq' not in col]\n", "\n", " # 遍历所有数值型特征\n", " for feature in num_features:\n", " if feature == 'trade_date': # 不需要对 'trade_date' 计算偏差\n", " continue\n", "\n", " # grouped_mean = df.groupby(['trade_date'])[feature].transform('mean')\n", " # deviation_col_name = f'deviation_mean_{feature}'\n", " # new_columns[deviation_col_name] = df[feature] - grouped_mean\n", " # ret_feature_columns.append(deviation_col_name)\n", "\n", " grouped_mean = df.groupby(['trade_date', groupby_col])[feature].transform('mean')\n", " deviation_col_name = f'deviation_mean_{feature}'\n", " new_columns[deviation_col_name] = df[feature] - grouped_mean\n", " ret_feature_columns.append(deviation_col_name)\n", "\n", " # 将新计算的偏差特征与原始 DataFrame 合并\n", " df = pd.concat([df, pd.DataFrame(new_columns)], axis=1)\n", "\n", " # for feature in ['obv', 'return_20', 'act_factor1', 'act_factor2', 'act_factor3', 'act_factor4']:\n", " # df[f'deviation_industry_{feature}'] = df[feature] - df[f'industry_{feature}']\n", "\n", " return df, ret_feature_columns\n" ], "outputs": [], "execution_count": 38 }, { "cell_type": "code", "id": "40e6b68a91b30c79", "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:38:27.360168Z", "start_time": "2025-04-04T15:38:26.400810Z" } }, "source": [ "import pandas as pd\n", "\n", "from scipy.stats import ks_2samp, wasserstein_distance\n", "from sklearn.metrics import roc_auc_score\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.preprocessing import StandardScaler\n", "\n", "\n", "def remove_shifted_features(train_data, feature_columns, ks_threshold=0.05, wasserstein_threshold=0.1, size=0.8,\n", " log=True):\n", " dropped_features = []\n", "\n", " all_dates = sorted(train_data['trade_date'].unique().tolist()) # 获取所有唯一的 trade_date\n", " split_date = all_dates[int(len(all_dates) * size)] # 划分点为倒数第 validation_days 天\n", " train_data_split = train_data[train_data['trade_date'] < split_date] # 训练集\n", " val_data_split = train_data[train_data['trade_date'] >= split_date] # 验证集\n", "\n", " # **统计数据漂移**\n", " numeric_columns = train_data_split.select_dtypes(include=['float64', 'int64']).columns\n", " numeric_columns = [col for col in numeric_columns if col in feature_columns]\n", " for feature in numeric_columns:\n", " ks_stat, p_value = ks_2samp(train_data_split[feature], val_data_split[feature])\n", " wasserstein_dist = wasserstein_distance(train_data_split[feature], val_data_split[feature])\n", "\n", " if p_value < ks_threshold or wasserstein_dist > wasserstein_threshold:\n", " dropped_features.append(feature)\n", " if log:\n", " print(f\"检测到 {len(dropped_features)} 个可能漂移的特征: {dropped_features}\")\n", "\n", " # **应用阈值进行最终筛选**\n", " filtered_features = [f for f in feature_columns if f not in dropped_features]\n", "\n", " return filtered_features, dropped_features\n", "\n", "\n", "def remove_outliers_label_percentile(label: pd.Series, lower_percentile: float = 0.01, upper_percentile: float = 0.99,\n", " log=True):\n", " if not (0 <= lower_percentile < upper_percentile <= 1):\n", " raise ValueError(\"Percentile values must satisfy 0 <= lower_percentile < upper_percentile <= 1.\")\n", "\n", " # Calculate lower and upper bounds based on percentiles\n", " lower_bound = label.quantile(lower_percentile)\n", " upper_bound = label.quantile(upper_percentile)\n", "\n", " # Filter out values outside the bounds\n", " filtered_label = label[(label >= lower_bound) & (label <= upper_bound)]\n", "\n", " # Print the number of removed outliers\n", " if log:\n", " print(f\"Removed {len(label) - len(filtered_label)} outliers.\")\n", " return filtered_label\n", "\n", "\n", "def calculate_risk_adjusted_target(df, days=5):\n", " df = df.sort_values(by=['ts_code', 'trade_date'])\n", "\n", " df['future_close'] = df.groupby('ts_code')['close'].shift(-days)\n", " df['future_open'] = df.groupby('ts_code')['open'].shift(-1)\n", " df['future_return'] = (df['future_close'] - df['future_open']) / df['future_open']\n", "\n", " df['future_volatility'] = df.groupby('ts_code')['future_return'].rolling(days, min_periods=1).std().reset_index(\n", " level=0, drop=True)\n", " sharpe_ratio = df['future_return'] * df['future_volatility']\n", " sharpe_ratio.replace([np.inf, -np.inf], np.nan, inplace=True)\n", "\n", " return sharpe_ratio\n", "\n", "\n", "def calculate_score(df, days=5, lambda_param=1.0):\n", " def calculate_max_drawdown(prices):\n", " peak = prices.iloc[0] # 初始化峰值\n", " max_drawdown = 0 # 初始化最大回撤\n", "\n", " for price in prices:\n", " if price > peak:\n", " peak = price # 更新峰值\n", " else:\n", " drawdown = (peak - price) / peak # 计算当前回撤\n", " max_drawdown = max(max_drawdown, drawdown) # 更新最大回撤\n", "\n", " return max_drawdown\n", "\n", " def compute_stock_score(stock_df):\n", " stock_df = stock_df.sort_values(by=['trade_date'])\n", " future_return = stock_df['future_return']\n", " # 使用已有的 pct_chg 字段计算波动率\n", " volatility = stock_df['pct_chg'].rolling(days).std().shift(-days)\n", " max_drawdown = stock_df['close'].rolling(days).apply(calculate_max_drawdown, raw=False).shift(-days)\n", " score = future_return - lambda_param * max_drawdown\n", " return score\n", "\n", " # # 确保 DataFrame 按照股票代码和交易日期排序\n", " # df = df.sort_values(by=['ts_code', 'trade_date'])\n", "\n", " # 对每个股票分别计算 score\n", " df['score'] = df.groupby('ts_code').apply(compute_stock_score).reset_index(level=0, drop=True)\n", "\n", " return df['score']\n", "\n", "\n", "def remove_highly_correlated_features(df, feature_columns, threshold=0.9):\n", " numeric_features = df[feature_columns].select_dtypes(include=[np.number]).columns.tolist()\n", " if not numeric_features:\n", " raise ValueError(\"No numeric features found in the provided data.\")\n", "\n", " corr_matrix = df[numeric_features].corr().abs()\n", " upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))\n", " to_drop = [column for column in upper.columns if any(upper[column] > threshold)]\n", " remaining_features = [col for col in feature_columns if col not in to_drop\n", " or 'act' in col or 'af' in col]\n", " return remaining_features\n", "\n", "\n", "import pandas as pd\n", "from sklearn.preprocessing import StandardScaler\n", "\n", "\n", "def cross_sectional_standardization(df, features):\n", " df_sorted = df.sort_values(by='trade_date') # 按时间排序\n", " df_standardized = df_sorted.copy()\n", "\n", " for date in df_sorted['trade_date'].unique():\n", " # 获取当前时间点的数据\n", " current_data = df_standardized[df_standardized['trade_date'] == date]\n", "\n", " # 只对指定特征进行标准化\n", " scaler = StandardScaler()\n", " standardized_values = scaler.fit_transform(current_data[features])\n", "\n", " # 将标准化结果重新赋值回去\n", " df_standardized.loc[df_standardized['trade_date'] == date, features] = standardized_values\n", "\n", " return df_standardized\n", "\n", "\n", "import numpy as np\n", "import pandas as pd\n", "import statsmodels.api as sm\n", "\n", "from concurrent.futures import ProcessPoolExecutor\n", "\n", "\n", "def neutralize_manual(df, features, industry_col, mkt_cap_col):\n", " \"\"\" 手动实现简单回归以提升速度 \"\"\"\n", "\n", " for col in features:\n", " residuals = []\n", " for _, group in df.groupby(industry_col):\n", " if len(group) > 1:\n", " x = np.log(group[mkt_cap_col]) # 市值对数\n", " y = group[col] # 因子值\n", " beta = np.cov(y, x)[0, 1] / np.var(x) # 计算斜率\n", " alpha = np.mean(y) - beta * np.mean(x) # 计算截距\n", " resid = y - (alpha + beta * x) # 计算残差\n", " residuals.extend(resid)\n", " else:\n", " residuals.extend(group[col]) # 样本不足时保留原值\n", "\n", " df[col] = residuals\n", "\n", " return df\n", "\n", "\n", "import gc\n", "\n", "gc.collect()\n", "\n", "\n", "def mad_filter(df, features, n=3):\n", " for col in features:\n", " median = df[col].median()\n", " mad = np.median(np.abs(df[col] - median))\n", " upper = median + n * mad\n", " lower = median - n * mad\n", " df[col] = np.clip(df[col], lower, upper) # 截断极值\n", " return df\n", "\n", "\n", "def percentile_filter(df, features, lower_percentile=0.01, upper_percentile=0.99):\n", " for col in features:\n", " # 按日期分组计算上下百分位数\n", " lower_bound = df.groupby('trade_date')[col].transform(\n", " lambda x: x.quantile(lower_percentile)\n", " )\n", " upper_bound = df.groupby('trade_date')[col].transform(\n", " lambda x: x.quantile(upper_percentile)\n", " )\n", " # 截断超出范围的值\n", " df[col] = np.clip(df[col], lower_bound, upper_bound)\n", " return df\n", "\n", "\n", "from scipy.stats import iqr\n", "\n", "\n", "def iqr_filter(df, features):\n", " for col in features:\n", " df[col] = df.groupby('trade_date')[col].transform(\n", " lambda x: (x - x.median()) / iqr(x) if iqr(x) != 0 else x\n", " )\n", " return df\n", "\n", "\n", "def quantile_filter(df, features, lower_quantile=0.01, upper_quantile=0.99, window=60):\n", " df = df.copy()\n", " for col in features:\n", " # 计算 rolling 统计量,需要按日期进行 groupby\n", " rolling_lower = df.groupby('trade_date')[col].transform(\n", " lambda x: x.rolling(window=min(len(x), window)).quantile(lower_quantile))\n", " rolling_upper = df.groupby('trade_date')[col].transform(\n", " lambda x: x.rolling(window=min(len(x), window)).quantile(upper_quantile))\n", "\n", " # 对数据进行裁剪\n", " df[col] = np.clip(df[col], rolling_lower, rolling_upper)\n", "\n", " return df\n" ], "outputs": [], "execution_count": 39 }, { "cell_type": "code", "id": "1c46817a-b5dd-4bec-8bb4-e6e80bfd9d66", "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:38:27.370464Z", "start_time": "2025-04-04T15:38:27.360168Z" } }, "source": [ "# print(test_data.head()[['act_factor1', 'act_factor2', 'ts_code', 'trade_date']])" ], "outputs": [], "execution_count": 40 }, { "cell_type": "code", "id": "da2bb202843d9275", "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:38:27.480633Z", "start_time": "2025-04-04T15:38:27.463576Z" } }, "source": [ "from sklearn.preprocessing import StandardScaler\n", "import lightgbm as lgb\n", "import matplotlib.pyplot as plt\n", "from sklearn.decomposition import PCA\n", "\n", "\n", "def train_light_model(train_data_df, params, feature_columns, callbacks, evals,\n", " print_feature_importance=True, num_boost_round=100,\n", " validation_days=180, use_pca=False, split_date=None): # 新增参数:validation_days\n", " # 确保数据按时间排序\n", " train_data_df = train_data_df.sort_values(by='trade_date')\n", "\n", " numeric_columns = train_data_df.select_dtypes(include=['float64', 'int64']).columns\n", " numeric_columns = [col for col in numeric_columns if col in feature_columns]\n", " # X_train.loc[:, numeric_columns] = scaler.fit_transform(X_train[numeric_columns])\n", " # X_val.loc[:, numeric_columns] = scaler.transform(X_val[numeric_columns])\n", " # train_data_df = cross_sectional_standardization(train_data_df, numeric_columns)\n", "\n", " # 去除标签为空的样本\n", " train_data_df = train_data_df.dropna(subset=['label'])\n", " print('原始训练集大小: ', len(train_data_df))\n", "\n", " # 按时间顺序划分训练集和验证集\n", " if split_date is None:\n", " all_dates = train_data_df['trade_date'].unique() # 获取所有唯一的 trade_date\n", " if validation_days == 0:\n", " split_date = all_dates[-1]\n", " else:\n", " split_date = all_dates[-validation_days] # 划分点为倒数第 validation_days 天\n", " if validation_days == 0:\n", " train_data_split = train_data_df\n", " else:\n", " train_data_split = train_data_df[train_data_df['trade_date'] < split_date] # 训练集\n", " val_data_split = train_data_df[train_data_df['trade_date'] >= split_date] # 验证集\n", "\n", " # 打印划分结果\n", " print(f\"划分后的训练集大小: {len(train_data_split)}, 验证集大小: {len(val_data_split)}\")\n", "\n", " # 提取特征和标签\n", " X_train = train_data_split[feature_columns]\n", " y_train = train_data_split['label']\n", "\n", " X_val = val_data_split[feature_columns]\n", " y_val = val_data_split['label']\n", "\n", " # 标准化数值特征\n", " scaler = StandardScaler()\n", "\n", " # 计算每个 trade_date 内的样本数(LTR 需要 group 信息)\n", " train_groups = train_data_split.groupby('trade_date').size().tolist()\n", " val_groups = val_data_split.groupby('trade_date').size().tolist()\n", "\n", " # 处理类别特征\n", " categorical_feature = [col for col in feature_columns if 'cat' in col]\n", "\n", " pca = None\n", " if use_pca:\n", " pca = PCA(n_components=0.95) # 或指定 n_components=固定值(如 10)\n", " numeric_features = [col for col in feature_columns if col not in categorical_feature]\n", " numeric_pca = pca.fit_transform(X_train[numeric_features])\n", " X_train = pd.concat([pd.DataFrame(numeric_pca, index=X_train.index), X_train[categorical_feature]], axis=1)\n", "\n", " numeric_pca = pca.transform(X_val[numeric_features])\n", " X_val = pd.concat([pd.DataFrame(numeric_pca, index=X_val.index), X_val[categorical_feature]], axis=1)\n", "\n", " # 计算权重(基于时间)\n", " # trade_date = train_data_split['trade_date'] # 交易日期\n", " # weights = (trade_date - trade_date.min()).dt.days / (trade_date.max() - trade_date.min()).days + 1\n", " # weights = train_data_split.groupby('trade_date')['std_return_5'].transform(\n", " # lambda x: x / x.mean()\n", " # )\n", " ud = sorted(train_data_split[\"trade_date\"].unique().tolist())\n", " date_weights = {date: weight * weight for date, weight in zip(ud, np.linspace(1, 10, len(ud)))}\n", " params['weight'] = train_data_split[\"trade_date\"].map(date_weights).tolist()\n", "\n", " train_dataset = lgb.Dataset(\n", " X_train, label=y_train, group=train_groups,\n", " categorical_feature=categorical_feature\n", " )\n", "\n", " # weights = val_data_split.groupby('trade_date')['std_return_5'].transform(\n", " # lambda x: x / x.mean()\n", " # )\n", " val_dataset = lgb.Dataset(\n", " X_val, label=y_val, group=val_groups,\n", " categorical_feature=categorical_feature\n", " )\n", "\n", " # 训练模型\n", " model = lgb.train(\n", " params, train_dataset, num_boost_round=num_boost_round,\n", " valid_sets=[train_dataset, val_dataset], valid_names=['train', 'valid'],\n", " callbacks=callbacks\n", " )\n", "\n", " # 打印特征重要性(如果需要)\n", " if print_feature_importance:\n", " lgb.plot_metric(evals)\n", " lgb.plot_importance(model, importance_type='split', max_num_features=20)\n", " plt.show()\n", "\n", " return model, scaler, pca" ], "outputs": [], "execution_count": 41 }, { "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:39:38.894592Z", "start_time": "2025-04-04T15:38:27.482638Z" } }, "cell_type": "code", "source": [ "\n", "days = 2\n", "df = df.sort_values(by=['ts_code', 'trade_date'])\n", "# df['future_return'] = df.groupby('ts_code', group_keys=False)['close'].apply(lambda x: x.shift(-days) / x - 1)\n", "df['future_return'] = (df.groupby('ts_code')['close'].shift(-days) - df.groupby('ts_code')['open'].shift(-1)) / \\\n", " df.groupby('ts_code')['open'].shift(-1)\n", "df['future_volatility'] = (\n", " df.groupby('ts_code')['pct_chg']\n", " .transform(lambda x: x.rolling(days).std().shift(-days))\n", ")\n", "df['future_score'] = calculate_score(df, days=2, lambda_param=0.3)\n", "df['label'] = df.groupby('trade_date', group_keys=False)['future_score'].transform(\n", " lambda x: pd.qcut(x, q=20, labels=False, duplicates='drop')\n", ")\n", "# df['label'] = df.groupby('trade_date', group_keys=False)['future_score'].transform(\n", "# lambda x: pd.qcut(x.rank(method='first'), q=20, labels=False, duplicates='raise')\n", "# )\n", "# df['future_score'] = (\n", "# 0.7 * df['future_return']\n", "# * 0.3 * df['future_volatility']\n", "# )" ], "id": "ff19e3f1e051a489", "outputs": [], "execution_count": 42 }, { "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:39:51.373402Z", "start_time": "2025-04-04T15:39:38.993074Z" } }, "cell_type": "code", "source": [ "def select_pre_zt_stocks_dynamic(\n", " stock_df,\n", "):\n", " stock_df = stock_df.groupby('trade_date', group_keys=False).apply(\n", " lambda x: x.nlargest(1000, 'return_20')\n", " )\n", " return stock_df\n", "\n", "\n", "pdf = select_pre_zt_stocks_dynamic(df)\n", "filter_index = pdf['future_return'].between(pdf['future_return'].quantile(0.01), pdf['future_return'].quantile(0.99))\n", "\n", "# filter_index = pdf['future_volatility'].between(pdf['future_volatility'].quantile(0.01),\n", "# pdf['future_volatility'].quantile(0.99)) | filter_index" ], "id": "27dba27b2e108316", "outputs": [], "execution_count": 43 }, { "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:39:58.079901Z", "start_time": "2025-04-04T15:39:51.374912Z" } }, "cell_type": "code", "source": [ "\n", "pdf = pdf.merge(industry_df, on=['cat_l2_code', 'trade_date'], how='left')\n", "pdf = pdf.sort_values(['trade_date'])\n", "pdf = pdf.replace([np.inf, -np.inf], np.nan)\n", "\n", "feature_columns = [col for col in pdf.columns if col in pdf.columns]\n", "feature_columns = [col for col in feature_columns if col not in ['trade_date',\n", " 'ts_code',\n", " 'label']]\n", "feature_columns = [col for col in feature_columns if 'future' not in col]\n", "feature_columns = [col for col in feature_columns if 'label' not in col]\n", "feature_columns = [col for col in feature_columns if 'score' not in col]\n", "feature_columns = [col for col in feature_columns if 'gen' not in col]\n", "feature_columns = [col for col in feature_columns if 'pe_ttm' not in col]\n", "feature_columns = [col for col in feature_columns if 'volatility' not in col]\n", "feature_columns = [col for col in feature_columns if 'cat_l2_code' not in col]\n", "feature_columns = [col for col in feature_columns if col not in origin_columns]\n", "feature_columns = [col for col in feature_columns if not col.startswith('_')]\n", "\n", "numeric_columns = pdf.select_dtypes(include=['float64', 'int64']).columns\n", "numeric_columns = [col for col in numeric_columns if col in feature_columns]" ], "id": "ca96fb81e17c4a90", "outputs": [], "execution_count": 44 }, { "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:46:05.915307Z", "start_time": "2025-04-04T15:39:58.224302Z" } }, "cell_type": "code", "source": [ "pdf = quantile_filter(pdf, numeric_columns)\n", "\n", "pdf = cross_sectional_standardization(pdf, numeric_columns)\n", "\n", "# print('去极值')\n", "# train_data = quantile_filter(train_data, numeric_columns) # 去极值\n", "# # print('中性化')\n", "# # train_data = neutralize_manual(train_data, numeric_columns, industry_col='cat_l2_code', mkt_cap_col='log(circ_mv)') # 中性化\n", "# print('去极值')\n", "# test_data = quantile_filter(test_data, numeric_columns) # 去极值\n", "\n", "feature_columns = remove_highly_correlated_features(pdf,\n", " feature_columns)\n", "print(len(pdf))" ], "id": "81d4570663ae21d7", "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1755000\n" ] } ], "execution_count": 45 }, { "cell_type": "code", "id": "92428d543f4727ad", "metadata": { "ExecuteTime": { "end_time": "2025-04-04T15:46:06.774220Z", "start_time": "2025-04-04T15:46:06.034832Z" } }, "source": [ "# print('train data size: ', len(train_data))\n", "\n", "label_gain = list(range(len(df['label'].unique())))\n", "label_gain = [gain * gain for gain in label_gain]\n", "light_params = {\n", " 'label_gain': label_gain,\n", " 'objective': 'lambdarank',\n", " 'metric': 'ndcg',\n", " 'learning_rate': 0.03,\n", " 'num_leaves': 32,\n", " # 'min_data_in_leaf': 128,\n", " 'max_depth': 8,\n", " 'max_bin': 32,\n", " 'feature_fraction': 0.7,\n", " # 'bagging_fraction': 0.7,\n", " 'bagging_freq': 5,\n", " 'lambda_l1': 0.1,\n", " 'lambda_l2': 0.1,\n", " 'boosting': 'gbdt',\n", " 'verbosity': -1,\n", " 'extra_trees': True,\n", " 'max_position': 5,\n", " 'ndcg_at': 1,\n", " 'quant_train_renew_leaf': True,\n", " 'lambdarank_truncation_level': 3,\n", " # 'lambdarank_position_bias_regularization': 1,\n", " 'seed': 7\n", "}\n", "evals = {}\n", "\n", "gc.collect()" ], "outputs": [ { "data": { "text/plain": [ "0" ] }, "execution_count": 46, "metadata": {}, "output_type": "execute_result" } ], "execution_count": 46 }, { "cell_type": "code", "id": "8f134d435f71e9e2", "metadata": { "ExecuteTime": { "end_time": "2025-04-04T16:14:31.207013Z", "start_time": "2025-04-04T16:14:31.026574Z" } }, "source": [ "gc.collect()\n", "\n", "# def rolling_train_predict(df, train_days, test_days, feature_columns_origin, days=5, use_pca=False, validation_days=60,\n", "# filter_index=None):\n", "# # 1. 按照交易日期排序\n", "# unique_dates = df[df['trade_date'] >= '2020-01-01']['trade_date'].unique().tolist()\n", "# unique_dates = sorted(unique_dates)\n", "# n = len(unique_dates)\n", "#\n", "# # 2. 计算需要跳过的天数,使后续窗口对齐\n", "# extra_days = (n - train_days) % test_days\n", "# start_index = extra_days # 从此索引开始滚动\n", "#\n", "# predictions_list = []\n", "#\n", "# for start in range(start_index, n - train_days - test_days + 1, test_days):\n", "#\n", "# train_dates = unique_dates[start: start + train_days]\n", "# test_dates = unique_dates[start + train_days: start + train_days + test_days]\n", "#\n", "# # 根据日期筛选数据\n", "# train_data = df[filter_index & df['trade_date'].isin(train_dates)]\n", "# test_data = df[df['trade_date'].isin(test_dates)]\n", "#\n", "# train_data = train_data.sort_values('trade_date')\n", "# test_data = test_data.sort_values('trade_date')\n", "#\n", "# feature_columns, _ = remove_shifted_features(train_data, feature_columns_origin, size=0.8, log=False)\n", "#\n", "# train_data = train_data.dropna(subset=feature_columns)\n", "# train_data = train_data.dropna(subset=['label'])\n", "# train_data = train_data.reset_index(drop=True)\n", "#\n", "# # print(test_data.tail())\n", "# test_data = test_data.dropna(subset=feature_columns)\n", "# # test_data = test_data.dropna(subset=['label'])\n", "# test_data = test_data.reset_index(drop=True)\n", "#\n", "# # print(len(train_data))\n", "# # print(f\"最小日期: {train_data['trade_date'].min().strftime('%Y-%m-%d')}\")\n", "# # print(f\"最大日期: {train_data['trade_date'].max().strftime('%Y-%m-%d')}\")\n", "# # # print(len(test_data))\n", "# # print(f\"最小日期: {test_data['trade_date'].min().strftime('%Y-%m-%d')}\")\n", "# print(f\"最大日期: {test_data['trade_date'].max().strftime('%Y-%m-%d')}\")\n", "#\n", "# cat_columns = [col for col in df.columns if col.startswith('cat')]\n", "# for col in cat_columns:\n", "# train_data[col] = train_data[col].astype('category')\n", "# test_data[col] = test_data[col].astype('category')\n", "#\n", "# label_gain = list(range(len(train_data['label'].unique())))\n", "# label_gain = [(gain + 1) * (gain + 1) for gain in label_gain]\n", "# light_params['label_gain'] = label_gain\n", "#\n", "# # ud = train_data[\"trade_date\"].unique()\n", "# # date_weights = {date: weight for date, weight in zip(ud, np.linspace(1, 2, len(unique_dates)))}\n", "# # light_params['weight'] = train_data[\"trade_date\"].map(date_weights).tolist()\n", "#\n", "# # print(f'feature_columns: {feature_columns}')\n", "# # feature_contri = [2 if feat.startswith('act_factor') else 1 for feat in feature_columns]\n", "# # light_params['feature_contri'] = feature_contri\n", "# model, _, _ = train_light_model(train_data.dropna(subset=['label']),\n", "# light_params, feature_columns,\n", "# [lgb.log_evaluation(period=100),\n", "# lgb.callback.record_evaluation(evals),\n", "# # lgb.early_stopping(100, first_metric_only=True)\n", "# ], evals,\n", "# num_boost_round=100, validation_days=validation_days,\n", "# print_feature_importance=False, use_pca=False)\n", "#\n", "# score_df = test_data.copy()\n", "# score_df['score'] = model.predict(score_df[feature_columns])\n", "# score_df = score_df.loc[score_df.groupby('trade_date')['score'].idxmax()]\n", "# score_df = score_df[['trade_date', 'score', 'ts_code']]\n", "# predictions_list.append(score_df)\n", "#\n", "# final_predictions = pd.concat(predictions_list, ignore_index=True)\n", "# return final_predictions\n", "from joblib import Parallel, delayed\n", "import threading\n", "from queue import Queue\n", "\n", "\n", "def worker(df, train_days, test_days, feature_columns_origin, unique_dates, start, filter_index, results_queue,\n", " validation_days):\n", " train_dates = unique_dates[start: start + train_days]\n", " test_dates = unique_dates[start + train_days: start + train_days + test_days]\n", "\n", " train_data = df[filter_index & df['trade_date'].isin(train_dates)].copy()\n", " test_data = df[df['trade_date'].isin(test_dates)].copy()\n", "\n", " train_data = train_data.sort_values('trade_date')\n", " test_data = test_data.sort_values('trade_date')\n", "\n", " feature_columns, _ = remove_shifted_features(train_data, feature_columns_origin, size=0.8, log=False)\n", "\n", " train_data = train_data.dropna(subset=feature_columns + ['label'])\n", " train_data = train_data.reset_index(drop=True)\n", "\n", " test_data = test_data.dropna(subset=feature_columns)\n", " test_data = test_data.reset_index(drop=True)\n", "\n", " cat_columns = [col for col in df.columns if col.startswith('cat')]\n", " for col in cat_columns:\n", " if col in train_data.columns:\n", " train_data[col] = train_data[col].astype('category')\n", " if col in test_data.columns:\n", " test_data[col] = test_data[col].astype('category')\n", "\n", " label_gain = list(range(len(train_data['label'].unique())))\n", " label_gain = [(gain + 1) * (gain + 1) for gain in label_gain]\n", " current_light_params = light_params.copy()\n", " current_light_params['label_gain'] = label_gain\n", "\n", " model, _, _ = train_light_model(train_data.dropna(subset=['label']),\n", " light_params, feature_columns,\n", " [lgb.log_evaluation(period=100),\n", " lgb.callback.record_evaluation(evals),\n", " # lgb.early_stopping(100, first_metric_only=True)\n", " ], evals,\n", " num_boost_round=100, validation_days=validation_days,\n", " print_feature_importance=False, use_pca=False)\n", "\n", " score_df = test_data.copy()\n", " score_df['score'] = model.predict(score_df[feature_columns])\n", " if not score_df.empty:\n", " score_df = score_df.loc[score_df.groupby('trade_date')['score'].idxmax()]\n", " score_df = score_df[['trade_date', 'score', 'ts_code']]\n", " results_queue.put(score_df)\n", "\n", "\n", "def rolling_train_predict(df, train_days, test_days, feature_columns_origin, days=5, use_pca=False,\n", " validation_days=60, filter_index=None, num_threads=4):\n", " unique_dates = df[df['trade_date'] >= '2020-01-01']['trade_date'].unique().tolist()\n", " unique_dates = sorted(unique_dates)\n", " n = len(unique_dates)\n", " extra_days = (n - train_days) % test_days\n", " start_index = extra_days\n", "\n", " predictions_queue = Queue()\n", " threads = []\n", "\n", " for start in range(start_index, n - train_days - test_days + 1, test_days):\n", " thread = threading.Thread(target=worker,\n", " args=(df, train_days, test_days, feature_columns_origin,\n", " unique_dates, start, filter_index, predictions_queue, validation_days))\n", " threads.append(thread)\n", " thread.start()\n", "\n", " for thread in threads:\n", " thread.join()\n", "\n", " predictions_list = []\n", " while not predictions_queue.empty():\n", " predictions_list.append(predictions_queue.get())\n", "\n", " final_predictions = pd.concat(predictions_list, ignore_index=True)\n", " return final_predictions\n" ], "outputs": [], "execution_count": 56 }, { "cell_type": "code", "id": "63235069-dc59-48fb-961a-e80373e41a61", "metadata": { "editable": true, "scrolled": true, "slideshow": { "slide_type": "" }, "tags": [], "ExecuteTime": { "end_time": "2025-04-04T16:19:42.845417Z", "start_time": "2025-04-04T16:14:31.207013Z" } }, "source": [ "\n", "gc.collect()\n", "\n", "print(df[df['ts_code'] == '000001.SZ'].tail(1)[['act_factor1', 'act_factor2']])\n", "print('finish')\n", "# qdf = qdf[qdf['trade_date'] >= '2022-01-01']\n", "\n", "final_predictions = rolling_train_predict(pdf[pdf['trade_date'] >= '2023-01-01'], 5, 1, feature_columns,\n", " days=days, validation_days=0, filter_index=filter_index)\n", "final_predictions.to_csv('predictions_test.tsv', index=False)\n" ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " act_factor1 act_factor2\n", "5115129 -0.222482 -0.256946\n", "finish\n", "原始训练集大小: 1709\n", "划分后的训练集大小: 1709, 验证集大小: 327\n", "原始训练集大小: 1710\n", "划分后的训练集大小: 1710, 验证集大小: 371\n", "原始训练集大小: 1707\n", "划分后的训练集大小: 1707, 验证集大小: 344\n", "原始训练集大小: 1722\n", "划分后的训练集大小: 1722, 验证集大小: 340\n", "原始训练集大小: 1709\n", "原始训练集大小: 1696\n", "划分后的训练集大小: 1709, 验证集大小: 366划分后的训练集大小: 1696, 验证集大小: 298\n", "\n", "原始训练集大小: 1712\n", "原始训练集大小: 1699\n", "划分后的训练集大小: 1699, 验证集大小: 356\n", "原始训练集大小: 1727\n", "划分后的训练集大小: 1712, 验证集大小: 345\n", "划分后的训练集大小: 1727, 验证集大小: 326\n", "原始训练集大小: 1700\n", "原始训练集大小: 1708\n", "原始训练集大小: 1728\n", "划分后的训练集大小: 1728, 验证集大小: 367\n", "原始训练集大小: 1705\n", "划分后的训练集大小: 1708, 验证集大小: 325划分后的训练集大小: 1700, 验证集大小: 383\n", "划分后的训练集大小: 1705, 验证集大小: 348\n", "\n", "原始训练集大小: 1746\n", "划分后的训练集大小: 1746, 验证集大小: 363\n", "原始训练集大小: 1723\n", "划分后的训练集大小: 1723, 验证集大小: 362\n", "原始训练集大小: 1705\n", "划分后的训练集大小: 1705, 验证集大小: 330\n", "原始训练集大小: 1696\n", "划分后的训练集大小: 1696, 验证集大小: 354\n", "原始训练集大小: 1662\n", "划分后的训练集大小: 1662, 验证集大小: 310\n", "原始训练集大小: 1736\n", "划分后的训练集大小: 1736, 验证集大小: 359\n", "原始训练集大小: 1702\n", "原始训练集大小: 1705\n", "划分后的训练集大小: 1702, 验证集大小: 308\n", "划分后的训练集大小: 1705, 验证集大小: 342\n", "原始训练集大小: 1684\n", "划分后的训练集大小: 1684, 验证集大小: 338\n", "原始训练集大小: 1718\n", "划分后的训练集大小: 1718, 验证集大小: 330\n", "原始训练集大小: 1713\n", "划分后的训练集大小: 1713, 验证集大小: 328\n", "原始训练集大小: [100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", " 1687\n", "原始训练集大小: 1679\n", "划分后的训练集大小: 1687, 验证集大小: 341\n", "划分后的训练集大小: 1679, 验证集大小: 342\n", "原始训练集大小: 1709\n", "原始训练集大小: 1684\n", "划分后的训练集大小: 1709, 验证集大小: 374\n", "划分后的训练集大小: 1684, 验证集大小: 302\n", "原始训练集大小: 1716\n", "划分后的训练集大小: 1716, 验证集大小: 322\n", "原始训练集大小: 1723\n", "划分后的训练集大小: 1723, 验证集大小: 325\n", "原始训练集大小: 1679\n", "划分后的训练集大小: 1679, 验证集大小: 325\n", "原始训练集大小: 1746\n", "原始训练集大小: 1689\n", "原始训练集大小: 1683\n", "划分后的训练集大小: 1746, 验证集大小: 366\n", "划分后的训练集大小: 1689, 验证集大小: 358\n", "原始训练集大小: 1699\n", "划分后的训练集大小: 1683, 验证集大小: 355\n", "划分后的训练集大小: 1699, 验证集大小: 358\n", "原始训练集大小: 1703\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1703, 验证集大小: 347\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 1\n", "原始训练集大小: 1704\n", "原始训练集大小: 1663\n", "划分后的训练集大小: 1663, 验证集大小: 349\n", "划分后的训练集大小: 1704, 验证集大小: 349[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 0.81\n", "原始训练集大小: 1700\n", "\n", "原始训练集大小: 1678\n", "原始训练集大小: 1716\n", "划分后的训练集大小: 1678, 验证集大小: 326\n", "划分后的训练集大小: 1716, 验证集大小: 352[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "\n", "划分后的训练集大小: 1700, 验证集大小: 346\n", "原始训练集大小: 1723\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1原始训练集大小: 1688\n", "原始训练集大小: 1699\n", "划分后的训练集大小: 1723, 验证集大小: 353\n", "\n", "划分后的训练集大小: 1688, 验证集大小: 345\n", "原始训练集大小: 1676\n", "原始训练集大小: 1677\n", "划分后的训练集大小: 1699, 验证集大小: 343\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1676, 验证集大小: 355\n", "划分后的训练集大小: 1677, 验证集大小: 319\n", "原始训练集大小: 1644\n", "划分后的训练集大小: 1644, 验证集大小: 328\n", "原始训练集大小: 1724\n", "原始训练集大小: 1692\n", "划分后的训练集大小: 1692, 验证集大小: 327\n", "原始训练集大小: 原始训练集大小: 1723\n", "原始训练集大小: 1700\n", "划分后的训练集大小: 1724, 验证集大小: 379\n", " 1674\n", "划分后的训练集大小: 1723, 验证集大小: 327\n", "原始训练集大小: 1696\n", "划分后的训练集大小: 1674, 验证集大小: 327\n", "划分后的训练集大小: 1700, 验证集大小: 366\n", "原始训练集大小: 1678\n", "划分后的训练集大小: 1696, 验证集大小: 353\n", "划分后的训练集大小: 1678, 验证集大小: 291原始训练集大小: 1721\n", "\n", "划分后的训练集大小: 1721, 验证集大小: 339\n", "原始训练集大小: 原始训练集大小: 1728\n", " 1679\n", "原始训练集大小: 1665\n", "划分后的训练集大小: 1679, 验证集大小: 308\n", "划分后的训练集大小: 1665, 验证集大小: 325\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1728, 验证集大小: 336\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1679\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1727\n", "原始训练集大小: 1713\n", "划分后的训练集大小: 1679, 验证集大小: 331\n", "划分后的训练集大小: 1727, 验证集大小: 337\n", "原始训练集大小: 1694\n", "划分后的训练集大小: 1713, 验证集大小: 325\n", "原始训练集大小: 1693\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1694, 验证集大小: 367\n", "原始训练集大小: 1745\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1693, 验证集大小: 364\n", "原始训练集大小: 1715\n", "划分后的训练集大小: 1745, 验证集大小: 371\n", "划分后的训练集大小: 1715, 验证集大小: 353\n", "[100]\ttrain's ndcg@1: 0.9065\tvalid's ndcg@1: 0.81\n", "原始训练集大小: 1701\n", "原始训练集大小: 1683\n", "划分后的训练集大小: 1701, 验证集大小: 339\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1683, 验证集大小: 370\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1729\n", "原始训练集大小: 划分后的训练集大小: 1729, 验证集大小: 315\n", " 1674\n", "划分后的训练集大小: 1674, 验证集大小: 323\n", "原始训练集大小: 1678\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1685\n", "划分后的训练集大小: 1678, 验证集大小: 344\n", "原始训练集大小: 1567\n", "划分后的训练集大小: 1567, 验证集大小: 345\n", "划分后的训练集大小: 1685, 验证集大小: 329\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1557\n", "原始训练集大小: 1656\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1669\n", "划分后的训练集大小: 1557, 验证集大小: 347\n", "原始训练集大小: 1728\n", "划分后的训练集大小: 1656, 验证集大小: 294\n", "原始训练集大小: 1701\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1640\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1669, 验证集大小: 309\n", "划分后的训练集大小: 1701, 验证集大小: 308\n", "划分后的训练集大小: 1728, 验证集大小: 345\n", "划分后的训练集大小: 1640, 验证集大小: 339\n", "原始训练集大小: 1697\n", "原始训练集大小: 1676\n", "原始训练集大小: 1509\n", "原始训练集大小: 1699\n", "划分后的训练集大小: 1676, 验证集大小: 314\n", "原始训练集大小: 1687\n", "划分后的训练集大小: 1697, 验证集大小: 301\n", "划分后的训练集大小: 1509, 验证集大小: 286\n", "原始训练集大小: 1713\n", "划分后的训练集大小: 1699, 验证集大小: 325\n", "原始训练集大小: 1671\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 0.81\n", "原始训练集大小: 1667\n", "原始训练集大小: 1705\n", "划分后的训练集大小: 1705, 验证集大小: 371\n", "划分后的训练集大小: 1687, 验证集大小: 354\n", "划分后的训练集大小: 1667, 验证集大小: 335\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1671, 验证集大小: 298\n", "原始训练集大小: 1664\n", "原始训练集大小: 1620\n", "划分后的训练集大小: 1713, 验证集大小: 333\n", "原始训练集大小: 1675\n", "划分后的训练集大小: 1664, 验证集大小: 316\n", "划分后的训练集大小: 1620, 验证集大小: 333\n", "划分后的训练集大小: 1675, 验证集大小: 354\n", "原始训练集大小: 1596\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1697\n", "划分后的训练集大小: 1596, 验证集大小: 294\n", "原始训练集大小: 1082\n", "划分后的训练集大小: 1082, 验证集大小: 309\n", "原始训练集大小: 1762\n", "划分后的训练集大小: 1697, 验证集大小: 342原始训练集大小: 1533\n", "划分后的训练集大小: 1533, 验证集大小: 324\n", "划分后的训练集大小: 1762, 验证集大小: 351\n", "\n", "原始训练集大小: 1636\n", "原始训练集大小: 1483\n", "原始训练集大小: 1693\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1636, 验证集大小: 261\n", "原始训练集大小: 1689\n", "划分后的训练集大小: 1483, 验证集大小: 199\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1729\n", "划分后的训练集大小: 1689, 验证集大小: 343\n", "原始训练集大小: 1720\n", "原始训练集大小: 1713\n", "原始训练集大小: 划分后的训练集大小: 1729, 验证集大小: 388\n", " 1655\n", "划分后的训练集大小: 1693, 验证集大小: 351\n", "划分后的训练集大小: 1655, 验证集大小: 343\n", "划分后的训练集大小: 1720, 验证集大小: 333\n", "原始训练集大小: 1519\n", "原始训练集大小: 1719\n", "原始训练集大小: 1690\n", "原始训练集大小: 1655\n", "划分后的训练集大小: 1690, 验证集大小: 315\n", "划分后的训练集大小: 1519, 验证集大小: 308划分后的训练集大小: 1655, 验证集大小: 282\n", "原始训练集大小: 1659\n", "划分后的训练集大小: 1713, 验证集大小: 310\n", "\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1719, 验证集大小: 362\n", "原始训练集大小: 1641\n", "原始训练集大小: 1672\n", "划分后的训练集大小: 1641, 验证集大小: 314\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1659, 验证集大小: 349\n", "原始训练集大小: 1690\n", "划分后的训练集大小: 1672, 验证集大小: 272\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1749\n", "划分后的训练集大小: 1749, 验证集大小: 397\n", "划分后的训练集大小: 1690, 验证集大小: 306\n", "原始训练集大小: 1623\n", "原始训练集大小: 1637\n", "划分后的训练集大小: 1623, 验证集大小: 297\n", "划分后的训练集大小: 1637, 验证集大小: 334原始训练集大小: 1599\n", "原始训练集大小: 1666\n", "\n", "原始训练集大小: 1635\n", "原始训练集大小: 1688\n", "原始训练集大小: 1672\n", "划分后的训练集大小: 1666, 验证集大小: 320\n", "划分后的训练集大小: 1599, 验证集大小: 316\n", "划分后的训练集大小: 1635, 验证集大小: 312\n", "原始训练集大小: 1610\n", "划分后的训练集大小: 1672, 验证集大小: 316\n", "原始训练集大小: 1649\n", "划分后的训练集大小: 1610, 验证集大小: 345\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1649, 验证集大小: 349\n", "划分后的训练集大小: 1688, 验证集大小: 356\n", "原始训练集大小: 1671\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1671, 验证集大小: 349\n", "原始训练集大小: 1666\n", "原始训练集大小: 1669\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1634\n", "原始训练集大小: 1652\n", "划分后的训练集大小: 1666, 验证集大小: 292\n", "划分后的训练集大小: 1652, 验证集大小: 300\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 划分后的训练集大小: 1634, 验证集大小: 354\n", "划分后的训练集大小: 1669, 验证集大小: 324\n", " 1614\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1695\n", "划分后的训练集大小: 1695, 验证集大小: 323\n", "划分后的训练集大小: 1614, 验证集大小: 300\n", "原始训练集大小: 1692\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1692, 验证集大小: 331\n", "原始训练集大小: 1472\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1699\n", "[100]\ttrain's ndcg@1: 0.9055\tvalid's ndcg@1: 1\n", "原始训练集大小: 1632\n", "原始训练集大小: 1683\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1472, 验证集大小: 322\n", "划分后的训练集大小: 1699, 验证集大小: 368\n", "划分后的训练集大小: 1632, 验证集大小: 319\n", "划分后的训练集大小: 1683, 验证集大小: 330\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1660\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1626\n", "原始训练集大小: 1745\n", "划分后的训练集大小: 1660, 验证集大小: 352\n", "划分后的训练集大小: 1626, 验证集大小: 361\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "原始训练集大小: 1700\n", "划分后的训练集大小: 1745, 验证集大小: 343\n", "划分后的训练集大小: 1700, 验证集大小: 360\n", "原始训练集大小: 1665\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1划分后的训练集大小: 1665, 验证集大小: 301\n", "\n", "原始训练集大小: 1638\n", "原始训练集大小: 1589\n", "划分后的训练集大小: 1638, 验证集大小: 341\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1589, 验证集大小: 340\n", "原始训练集大小: 1637\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: [100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", " 1625\n", "原始训练集大小: 1659\n", "原始训练集大小: 1744\n", "划分后的训练集大小: 1637, 验证集大小: 272\n", "划分后的训练集大小: 1744, 验证集大小: 350\n", "划分后的训练集大小: 1625, 验证集大小: 332\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1659, 验证集大小: 323\n", "原始训练集大小: 1678\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1666\n", "划分后的训练集大小: 1678, 验证集大小: 340\n", "原始训练集大小: 1694\n", "原始训练集大小: 1705\n", "划分后的训练集大小: 1694, 验证集大小: 382\n", "划分后的训练集大小: 1666, 验证集大小: 369\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1705, 验证集大小: 376\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1原始训练集大小: 1649\n", "\n", "原始训练集大小: 1710\n", "原始训练集大小: 1708\n", "原始训练集大小: 1604\n", "原始训练集大小: 1577\n", "原始训练集大小: 1692\n", "划分后的训练集大小: 1649, 验证集大小: 282\n", "划分后的训练集大小: 1710, 验证集大小: 324\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1604, 验证集大小: 357\n", "原始训练集大小: 1704\n", "划分后的训练集大小: 1577, 验证集大小: 317\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1692, 验证集大小: 337\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 0.9025原始训练集大小: 1620\n", "划分后的训练集大小: 1708, 验证集大小: 373\n", "\n", "划分后的训练集大小: 1620, 验证集大小: 307\n", "划分后的训练集大小: 1704, 验证集大小: 408\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1217\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "原始训练集大小: 1665\n", "原始训练集大小: 1680\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1665, 验证集大小: 389原始训练集大小: 1711\n", "\n", "划分后的训练集大小: 1680, 验证集大小: 337\n", "原始训练集大小: 1645\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1645, 验证集大小: 335\n", "划分后的训练集大小: 1217, 验证集大小: 334\n", "原始训练集大小: 1601\n", "原始训练集大小: 1668\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1601, 验证集大小: 336\n", "原始训练集大小: 划分后的训练集大小: 1711, 验证集大小: 377\n", "划分后的训练集大小: 1668, 验证集大小: 317\n", " 1541\n", "原始训练集大小: 1666\n", "划分后的训练集大小: 1541, 验证集大小: 268\n", "原始训练集大小: 原始训练集大小: 1667\n", " 1679\n", "原始训练集大小: 1619\n", "划分后的训练集大小: 1666, 验证集大小: 279\n", "划分后的训练集大小: 1667, 验证集大小: 361\n", "[100]\ttrain's ndcg@1: 0.924\tvalid's ndcg@1: 1\n", "原始训练集大小: 1599\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1679, 验证集大小: 383\n", "原始训练集大小: 1393\n", "划分后的训练集大小: 1599, 验证集大小: 358\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1619, 验证集大小: 351\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1346\n", "划分后的训练集大小: 1393, 验证集大小: 300\n", "原始训练集大小: 1610\n", "划分后的训练集大小: 1346, 验证集大小: 124\n", "原始训练集大小: 1565\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1628\n", "[100]\ttrain's ndcg@1: 0.9085\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1610, 验证集大小: 339\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1565, 验证集大小: 307\n", "划分后的训练集大小: 1628, 验证集大小: 312\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1原始训练集大小: 1647\n", "\n", "划分后的训练集大小: 1647, 验证集大小: 250\n", "原始训练集大小: 1692\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1原始训练集大小: 1672\n", "\n", "原始训练集大小: 1630\n", "原始训练集大小: 1706\n", "划分后的训练集大小: 1692, 验证集大小: 328\n", "划分后的训练集大小: 1672, 验证集大小: 387\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1630, 验证集大小: 329\n", "原始训练集大小: 1634\n", "划分后的训练集大小: 1706, 验证集大小: 369\n", "原始训练集大小: 1682\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1682, 验证集大小: 343\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1划分后的训练集大小: 1634, 验证集大小: 303\n", "\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1678\n", "原始训练集大小: 1646\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1653\n", "原始训练集大小: 1045\n", "原始训练集大小: 1648\n", "划分后的训练集大小: 1678, 验证集大小: 336\n", "划分后的训练集大小: 1646, 验证集大小: 341\n", "划分后的训练集大小: 1648, 验证集大小: 279\n", "划分后的训练集大小: 1653, 验证集大小: 370划分后的训练集大小: 1045, 验证集大小: 330\n", "\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 0.81[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1558\n", "原始训练集大小: 1722\n", "\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1558, 验证集大小: 257\n", "原始训练集大小: 1628\n", "划分后的训练集大小: 1722, 验证集大小: 380\n", "划分后的训练集大小: 1628, 验证集大小: 352\n", "原始训练集大小: 1668\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1677\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "原始训练集大小: 1112\n", "原始训练集大小: 1637\n", "划分后的训练集大小: 1112, 验证集大小: 120\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1668, 验证集大小: 302\n", "划分后的训练集大小: 1677, 验证集大小: 283\n", "划分后的训练集大小: 1637, 验证集大小: 284\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1606\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1620\n", "[100]\ttrain's ndcg@1: 0.925\tvalid's ndcg@1: 0.7225\n", "原始训练集大小: 1687\n", "划分后的训练集大小: 1606, 验证集大小: 310\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: [100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", " 1702\n", "划分后的训练集大小: 1687, 验证集大小: 316\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1702, 验证集大小: 339\n", "原始训练集大小: [100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1620, 验证集大小: 349\n", " 1666\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1656\n", "原始训练集大小: 1635\n", "原始训练集大小: 1683\n", "划分后的训练集大小: 1666, 验证集大小: 325\n", "划分后的训练集大小: 1635, 验证集大小: 343\n", "划分后的训练集大小: 1656, 验证集大小: 318\n", "原始训练集大小: 1618\n", "[100]\ttrain's ndcg@1: 0.9445\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1683, 验证集大小: 373\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1634\n", "划分后的训练集大小: 1618, 验证集大小: 300\n", "原始训练集大小: 1576\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1576, 验证集大小: 296\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1634, 验证集大小: 338\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1642\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1原始训练集大小: 1680\n", "原始训练集大小: 1638\n", "\n", "划分后的训练集大小: 1642, 验证集大小: 314\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1657\n", "划分后的训练集大小: 1680, 验证集大小: 334\n", "划分后的训练集大小: 1638, 验证集大小: 346\n", "划分后的训练集大小: 1657, 验证集大小: 316\n", "原始训练集大小: 1602\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1628\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1602, 验证集大小: 329\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1661\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1原始训练集大小: 1637\n", "\n", "划分后的训练集大小: 1628, 验证集大小: 326\n", "原始训练集大小: 1674\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1716\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1650\n", "划分后的训练集大小: 1716, 验证集大小: 397\n", "划分后的训练集大小: 1661, 验证集大小: 329\n", "原始训练集大小: 1637\n", "划分后的训练集大小: 1637, 验证集大小: 270\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025原始训练集大小: 1696\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "\n", "原始训练集大小: 1614\n", "划分后的训练集大小: 1674, 验证集大小: 380\n", "划分后的训练集大小: 1637, 验证集大小: 298\n", "原始训练集大小: 1651\n", "原始训练集大小: 1688\n", "划分后的训练集大小: 1696, 验证集大小: 367\n", "划分后的训练集大小: 1651, 验证集大小: 365[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "\n", "划分后的训练集大小: 1650, 验证集大小: 294\n", "划分后的训练集大小: 1614, 验证集大小: 334\n", "[100]\ttrain's ndcg@1: 0.925\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1688, 验证集大小: 281\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1667\n", "划分后的训练集大小: 1667, 验证集大小: 359\n", "原始训练集大小: 1696\n", "原始训练集大小: 1689\n", "原始训练集大小: 1686\n", "划分后的训练集大小: 1696, 验证集大小: 339\n", "划分后的训练集大小: 1686, 验证集大小: 364\n", "原始训练集大小: 1648\n", "原始训练集大小: 原始训练集大小: 1681\n", " 1633\n", "划分后的训练集大小: 1648, 验证集大小: 298\n", "划分后的训练集大小: 1689, 验证集大小: 384\n", "原始训练集大小: 1644\n", "划分后的训练集大小: 1633, 验证集大小: 313\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1644, 验证集大小: 330\n", "原始训练集大小: 1650\n", "划分后的训练集大小: 1650, 验证集大小: 341\n", "原始训练集大小: 1626\n", "划分后的训练集大小: 1681, 验证集大小: 309\n", "原始训练集大小: 1659\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1659, 验证集大小: 330\n", "划分后的训练集大小: 1626, 验证集大小: 307\n", "原始训练集大小: 1606\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1648\n", "划分后的训练集大小: 1606, 验证集大小: 299\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1681\n", "划分后的训练集大小: 1648, 验证集大小: 295\n", "划分后的训练集大小: 1681, 验证集大小: 330\n", "原始训练集大小: [100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", " 1648\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1630\n", "划分后的训练集大小: 1648, 验证集大小: 343\n", "原始训练集大小: 1621\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1630, 验证集大小: 350\n", "[100]\ttrain's ndcg@1: 0.9445\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1621, 验证集大小: 362\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: [100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", " 1652\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1652, 验证集大小: 347\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9035\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "原始训练集大小: 1659\n", "[100]\ttrain's ndcg@1: 0.925\tvalid's ndcg@1: 0.7225\n", "原始训练集大小: 1696\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1659, 验证集大小: 343\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 0.81\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9445\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1696, 验证集大小: 357\n", "原始训练集大小: [100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1689\n", " 1688\n", "原始训练集大小: 1672\n", "原始训练集大小: 1632\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1689, 验证集大小: 341\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1716\n", "划分后的训练集大小: 1688, 验证集大小: 348\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1671\n", "[100]\ttrain's ndcg@1: 0.924\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1672, 验证集大小: 313\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1671, 验证集大小: 329\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1632, 验证集大小: 309\n", "划分后的训练集大小: 1716, 验证集大小: 420\n", "原始训练集大小: 1646\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "原始训练集大小: 1663\n", "划分后的训练集大小: 1646, 验证集大小: 308\n", "原始训练集大小: 1636\n", "原始训练集大小: 1688\n", "原始训练集大小: 1666\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1663, 验证集大小: 339\n", "原始训练集大小: 1692\n", "原始训练集大小: 1619\n", "原始训练集大小: 1682\n", "划分后的训练集大小: 1688, 验证集大小: 303\n", "划分后的训练集大小: 1666, 验证集大小: 315\n", "划分后的训练集大小: 1636, 验证集大小: 303\n", "划分后的训练集大小: 1619, 验证集大小: 333\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "原始训练集大小: 1659\n", "划分后的训练集大小: 1692, 验证集大小: 331\n", "划分后的训练集大小: 1682, 验证集大小: 337\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1677\n", "原始训练集大小: 1725\n", "原始训练集大小: 1646\n", "划分后的训练集大小: 1659, 验证集大小: 349\n", "划分后的训练集大小: 1677, 验证集大小: 335\n", "原始训练集大小: 1687\n", "原始训练集大小: 1697\n", "原始训练集大小: 1654\n", "划分后的训练集大小: 1725, 验证集大小: 350\n", "原始训练集大小: 1683\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1654, 验证集大小: 333划分后的训练集大小: 1697, 验证集大小: 340\n", "\n", "原始训练集大小: 1667\n", "划分后的训练集大小: 1687, 验证集大小: 296\n", "原始训练集大小: 1661\n", "原始训练集大小: 1626\n", "划分后的训练集大小: 1646, 验证集大小: 286\n", "划分后的训练集大小: 1626, 验证集大小: 338\n", "划分后的训练集大小: 1667, 验证集大小: 344\n", "划分后的训练集大小: 1661, 验证集大小: 324\n", "原始训练集大小: 1630\n", "原始训练集大小: 1714\n", "划分后的训练集大小: 1683, 验证集大小: 378\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1630, 验证集大小: 326\n", "划分后的训练集大小: 1714, 验证集大小: 345原始训练集大小: 1645\n", "\n", "划分后的训练集大小: 1645, 验证集大小: 338\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1647\n", "划分后的训练集大小: 1647, 验证集大小: 325\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "\n", "原始训练集大小: 1642\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1688\n", "划分后的训练集大小: 1688, 验证集大小: 320\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1642, 验证集大小: 320\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1700\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1627\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1700, 验证集大小: 344\n", "[100]\ttrain's ndcg@1: 0.925\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1627, 验证集大小: 308\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1635\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1635, 验证集大小: 303\n", "原始训练集大小: 1663\n", "原始训练集大小: 1668\n", "原始训练集大小: 1686\n", "划分后的训练集大小: 1668, 验证集大小: 333\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1663, 验证集大小: 347\n", "划分后的训练集大小: 1686, 验证集大小: 264\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1618\n", "原始训练集大小: 1670\n", "划分后的训练集大小: 1618, 验证集大小: 337\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1620\n", "划分后的训练集大小: 1670, 验证集大小: 327\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9445\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9045\tvalid's ndcg@1: 1\n", "原始训练集大小: 1696\n", "划分后的训练集大小: 1620, 验证集大小: 318\n", "[100]\ttrain's ndcg@1: 0.9055\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1746\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1696, 验证集大小: 300\n", "原始训练集大小: 1730\n", "划分后的训练集大小: 1746, 验证集大小: 344\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1617\n", "划分后的训练集大小: 1730, 验证集大小: 356\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "原始训练集大小: 1758\n", "划分后的训练集大小: 1617, 验证集大小: 338\n", "划分后的训练集大小: 1758, 验证集大小: 382\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "\n", "原始训练集大小: 1672\n", "原始训练集大小: 1662\n", "原始训练集大小: 1647\n", "原始训练集大小: 1668\n", "划分后的训练集大小: 1662, 验证集大小: 353\n", "划分后的训练集大小: 1672, 验证集大小: 356\n", "划分后的训练集大小: 1647, 验证集大小: 317\n", "原始训练集大小: 1705\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1668, 验证集大小: 346\n", "划分后的训练集大小: 1705, 验证集大小: 343\n", "原始训练集大小: 1644\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "原始训练集大小: 1670\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1644, 验证集大小: 309\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "\n", "原始训练集大小: 1664\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1670, 验证集大小: 356\n", "划分后的训练集大小: 1664, 验证集大小: 303\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: [100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", " 1677\n", "原始训练集大小: 1690\n", "原始训练集大小: 1702\n", "划分后的训练集大小: 1677, 验证集大小: 357\n", "原始训练集大小: [100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", " 1698\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1669\n", "原始训练集大小: 1737\n", "划分后的训练集大小: 1690, 验证集大小: 339\n", "划分后的训练集大小: 1698, 验证集大小: 338\n", "原始训练集大小: 1670\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1747\n", "划分后的训练集大小: 1737, 验证集大小: 384\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1702, 验证集大小: 353\n", "原始训练集大小: 1718\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1715\n", "划分后的训练集大小: 1669, 验证集大小: 356\n", "原始训练集大小: 1700\n", "划分后的训练集大小: 1747, 验证集大小: 354\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1718, 验证集大小: 352\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1715, 验证集大小: 340\n", "划分后的训练集大小: 1670, 验证集大小: 316\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1692\n", "划分后的训练集大小: 1700, 验证集大小: 329[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1695\n", "\n", "划分后的训练集大小: 1692, 验证集大小: 351\n", "原始训练集大小: 1645\n", "原始训练集大小: 1704\n", "划分后的训练集大小: 1695, 验证集大小: 354\n", "划分后的训练集大小: 1645, 验证集大小: 280\n", "原始训练集大小: 1690\n", "原始训练集大小: 1736\n", "划分后的训练集大小: 1704, 验证集大小: 353\n", "原始训练集大小: 1714\n", "划分后的训练集大小: 1736, 验证集大小: 327\n", "划分后的训练集大小: 1714, 验证集大小: 315\n", "原始训练集大小: 1729\n", "划分后的训练集大小: 1690, 验证集大小: 370\n", "原始训练集大小: 1731\n", "原始训练集大小: 1721\n", "划分后的训练集大小: 1729, 验证集大小: 394\n", "原始训练集大小: 1756\n", "划分后的训练集大小: 1721, 验证集大小: 330\n", "划分后的训练集大小: 1756, 验证集大小: 386\n", "原始训练集大小: 划分后的训练集大小: 1731, 验证集大小: 348\n", " 1696\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1669\n", "划分后的训练集大小: 1696, 验证集大小: 305\n", "划分后的训练集大小: 1669, 验证集大小: 301\n", "原始训练集大小: 1714\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1710\n", "[100]\ttrain's ndcg@1: 0.922\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1720\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1714, 验证集大小: 355\n", "划分后的训练集大小: 1710, 验证集大小: 374原始训练集大小: 1672\n", "\n", "划分后的训练集大小: 1720, 验证集大小: 330\n", "原始训练集大小: 1740\n", "划分后的训练集大小: 1740, 验证集大小: 366\n", "原始训练集大小: 1733\n", "原始训练集大小: 1709\n", "划分后的训练集大小: 1733, 验证集大小: 321\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1672, 验证集大小: 338\n", "原始训练集大小: 1713\n", "原始训练集大小: 1698\n", "划分后的训练集大小: 1709, 验证集大小: 319\n", "原始训练集大小: 1738\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1713, 验证集大小: 349\n", "划分后的训练集大小: 1738, 验证集大小: 398\n", "原始训练集大小: 1747\n", "原始训练集大小: 1720\n", "划分后的训练集大小: 1698, 验证集大小: 344\n", "划分后的训练集大小: 1747, 验证集大小: 331\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "原始训练集大小: 1727\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1720, 验证集大小: 381\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1727, 验证集大小: 304\n", "原始训练集大小: 1723\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1665\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 划分后的训练集大小: 1723, 验证集大小: 334\n", " 1691\n", "划分后的训练集大小: 1665, 验证集大小: 316\n", "原始训练集大小: 1629\n", "划分后的训练集大小: 1691, 验证集大小: 344\n", "原始训练集大小: [100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", " 1672\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1718\n", "原始训练集大小: 1760\n", "原始训练集大小: 1684\n", "划分后的训练集大小: 1629, 验证集大小: 332\n", "划分后的训练集大小: 1672, 验证集大小: 344\n", "[100]\ttrain's ndcg@1: 0.905154\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1718, 验证集大小: 329\n", "划分后的训练集大小: 1760, 验证集大小: 380\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1684, 验证集大小: 360\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 1\n", "原始训练集大小: 1708\n", "原始训练集大小: 1623\n", "原始训练集大小: 1659\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "原始训练集大小: 1714\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1702\n", "划分后的训练集大小: 1708, 验证集大小: 320\n", "划分后的训练集大小: 1714, 验证集大小: 361\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1702, 验证集大小: 340\n", "[100]\ttrain's ndcg@1: 0.864828\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1623, 验证集大小: 297\n", "[100]\ttrain's ndcg@1: 0.830154\tvalid's ndcg@1: 0.7225\n", "原始训练集大小: 划分后的训练集大小: 1659, 验证集大小: 360\n", " 1674\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1674\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1685\n", "划分后的训练集大小: 1685, 验证集大小: 348\n", "划分后的训练集大小: 1674, 验证集大小: 359\n", "划分后的训练集大小: 1674, 验证集大小: 368\n", "原始训练集大小: 1724\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1703\n", "原始训练集大小: 1648\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1724, 验证集大小: 334\n", "划分后的训练集大小: 1703, 验证集大小: 353原始训练集大小: 1713\n", "\n", "划分后的训练集大小: 1648, 验证集大小: 326\n", "原始训练集大小: 1714\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1749\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1714, 验证集大小: 348\n", "原始训练集大小: 1451\n", "划分后的训练集大小: 1749, 验证集大小: 338\n", "原始训练集大小: 1707\n", "原始训练集大小: 1447\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1451, 验证集大小: 304\n", "原始训练集大小: 1664\n", "划分后的训练集大小: 1707, 验证集大小: 331\n", "划分后的训练集大小: 1713, 验证集大小: 344\n", "原始训练集大小: 1632\n", "划分后的训练集大小: 1447, 验证集大小: 320\n", "原始训练集大小: 1474\n", "原始训练集大小: 1704\n", "原始训练集大小: 1701\n", "划分后的训练集大小: 1632, 验证集大小: 302\n", "划分后的训练集大小: 1474, 验证集大小: 267\n", "划分后的训练集大小: 1664, 验证集大小: 322\n", "划分后的训练集大小: 1704, 验证集大小: 368\n", "划分后的训练集大小: 1701, 验证集大小: 311\n", "原始训练集大小: 1690\n", "划分后的训练集大小: 1690, 验证集大小: 300\n", "[100]\ttrain's ndcg@1: 0.855\tvalid's ndcg@1: 1\n", "原始训练集大小: 1469\n", "原始训练集大小: 1716\n", "原始训练集大小: 1516\n", "原始训练集大小: 划分后的训练集大小: 1469, 验证集大小: 330\n", " 1721\n", "原始训练集大小: 1636\n", "原始训练集大小: 1698\n", "划分后的训练集大小: 1716, 验证集大小: 336\n", "划分后的训练集大小: 1721, 验证集大小: 360\n", "划分后的训练集大小: 1636, 验证集大小: 330\n", "原始训练集大小: 1740\n", "划分后的训练集大小: 1698, 验证集大小: 357\n", "原始训练集大小: 1696\n", "原始训练集大小: 1731\n", "划分后的训练集大小: 1516, 验证集大小: 321\n", "划分后的训练集大小: 1740, 验证集大小: 393\n", "原始训练集大小: 1677\n", "划分后的训练集大小: 1731, 验证集大小: 296\n", "划分后的训练集大小: 1696, 验证集大小: 354\n", "原始训练集大小: 1689\n", "划分后的训练集大小: 1677, 验证集大小: 320\n", "划分后的训练集大小: 1689, 验证集大小: 359\n", "原始训练集大小: 1739\n", "划分后的训练集大小: 1739, 验证集大小: 360\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1713\n", "原始训练集大小: 1731\n", "划分后的训练集大小: 1713, 验证集大小: 368\n", "划分后的训练集大小: 1731, 验证集大小: 363\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1746\n", "原始训练集大小: 1466\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1573\n", "划分后的训练集大小: 1466, 验证集大小: 203\n", "原始训练集大小: 1509\n", "划分后的训练集大小: 1573, 验证集大小: 352\n", "划分后的训练集大小: 1509, 验证集大小: 309\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 1\n", "原始训练集大小: 划分后的训练集大小: 1746, 验证集大小: 379\n", " 1650\n", "[100]\ttrain's ndcg@1: 0.89\tvalid's ndcg@1: 1\n", "原始训练集大小: 1706\n", "划分后的训练集大小: 1650, 验证集大小: 285\n", "原始训练集大小: 1717\n", "划分后的训练集大小: 1717, 验证集大小: 325\n", "划分后的训练集大小: 1706, 验证集大小: 319\n", "原始训练集大小: 1713\n", "原始训练集大小: 1701\n", "原始训练集大小: 1751\n", "原始训练集大小: 1679\n", "原始训练集大小: 1673\n", "划分后的训练集大小: 1751, 验证集大小: 369\n", "划分后的训练集大小: 1673, 验证集大小: 325\n", "原始训练集大小: 1679\n", "原始训练集大小: 1677\n", "划分后的训练集大小: 1679, 验证集大小: 332\n", "原始训练集大小: 1700\n", "原始训练集大小: 1663\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1713, 验证集大小: 360\n", "原始训练集大小: 1536\n", "划分后的训练集大小: 1701, 验证集大小: 363\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1677, 验证集大小: 354\n", "划分后的训练集大小: 1679, 验证集大小: 379[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1663, 验证集大小: 336\n", "\n", "划分后的训练集大小: 1536, 验证集大小: 248\n", "原始训练集大小: 1733\n", "划分后的训练集大小: 1733, 验证集大小: 374\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1673\n", "划分后的训练集大小: 1700, 验证集大小: 382\n", "划分后的训练集大小: 1673, 验证集大小: 318原始训练集大小: 1661\n", "\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1696\n", "原始训练集大小: 1678\n", "原始训练集大小: 1704\n", "原始训练集大小: 1758\n", "划分后的训练集大小: 1661, 验证集大小: 345\n", "原始训练集大小: 1666\n", "原始训练集大小: 1685\n", "划分后的训练集大小: 1696, 验证集大小: 309\n", "划分后的训练集大小: 1666, 验证集大小: 323\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1698\n", "划分后的训练集大小: 1758, 验证集大小: 339\n", "划分后的训练集大小: 1678, 验证集大小: 316\n", "划分后的训练集大小: 1698, 验证集大小: 353\n", "划分后的训练集大小: 1685, 验证集大小: 329\n", "划分后的训练集大小: 1704, 验证集大小: 345\n", "原始训练集大小: 1673\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "原始训练集大小: 1676\n", "划分后的训练集大小: 1673, 验证集大小: 320\n", "划分后的训练集大小: 1676, 验证集大小: 353\n", "原始训练集大小: 1705\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1684\n", "原始训练集大小: 1692\n", "划分后的训练集大小: 1684, 验证集大小: 313\n", "原始训练集大小: 1485\n", "划分后的训练集大小: 1705, 验证集大小: 352\n", "划分后的训练集大小: 1485, 验证集大小: 343\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "原始训练集大小: 1700\n", "划分后的训练集大小: 1692, 验证集大小: 308\n", "原始训练集大小: 1683\n", "原始训练集大小: 1674\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1653\n", "原始训练集大小: 1684\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1710\n", "划分后的训练集大小: 1700, 验证集大小: 340\n", "划分后的训练集大小: 1653, 验证集大小: 330\n", "划分后的训练集大小: 1674, 验证集大小: 321\n", "划分后的训练集大小: 1710, 验证集大小: 334\n", "划分后的训练集大小: 1683, 验证集大小: 318\n", "原始训练集大小: 1670\n", "划分后的训练集大小: 1684, 验证集大小: 345\n", "[100]\ttrain's ndcg@1: 0.8705\tvalid's ndcg@1: 1\n", "原始训练集大小: 1682\n", "原始训练集大小: 1701\n", "原始训练集大小: 1648\n", "划分后的训练集大小: 1670, 验证集大小: 345\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1761\n", "划分后的训练集大小: 1701, 验证集大小: 348\n", "划分后的训练集大小: 1682, 验证集大小: 335\n", "[100]\ttrain's ndcg@1: 0.928\tvalid's ndcg@1: 0.64\n", "划分后的训练集大小: 1648, 验证集大小: 341\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1693\n", "原始训练集大小: 1705\n", "原始训练集大小: 1633\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 0.81\n", "原始训练集大小: 1482\n", "划分后的训练集大小: 1693, 验证集大小: 346\n", "原始训练集大小: 1693\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1690\n", "原始训练集大小: 1676\n", "原始训练集大小: 1666\n", "划分后的训练集大小: 1633, 验证集大小: 282\n", "划分后的训练集大小: 1482, 验证集大小: 333\n", "划分后的训练集大小: 1761, 验证集大小: 340\n", "原始训练集大小: 1669\n", "划分后的训练集大小: 1705, 验证集大小: 326\n", "划分后的训练集大小: 1693, 验证集大小: 320\n", "划分后的训练集大小: 1676, 验证集大小: 350\n", "原始训练集大小: 1734\n", "原始训练集大小: 1675\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1666, 验证集大小: 345\n", "划分后的训练集大小: 1669, 验证集大小: 345\n", "划分后的训练集大小: 1675, 验证集大小: 344\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1734, 验证集大小: 347\n", "原始训练集大小: 1688\n", "原始训练集大小: 1693\n", "划分后的训练集大小: 1690, 验证集大小: 334\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1717\n", "原始训练集大小: 1727\n", "划分后的训练集大小: 1688, 验证集大小: 330\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 0.81\n", "划分后的训练集大小: 1727, 验证集大小: 326\n", "划分后的训练集大小: 1693, 验证集大小: 354\n", "划分后的训练集大小: 1717, 验证集大小: 350\n", "原始训练集大小: 1690\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "原始训练集大小: 1697\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "原始训练集大小: 1768\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1680\n", "原始训练集大小: 1705\n", "原始训练集大小: 1675\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1683\n", "划分后的训练集大小: 1697, 验证集大小: 362\n", "划分后的训练集大小: 1680, 验证集大小: 358\n", "划分后的训练集大小: 1690, 验证集大小: 334\n", "划分后的训练集大小: 1768, 验证集大小: 368\n", "划分后的训练集大小: 1675, 验证集大小: 349\n", "原始训练集大小: 1677\n", "原始训练集大小: 1678\n", "划分后的训练集大小: 1683, 验证集大小: 326\n", "原始训练集大小: 1664\n", "划分后的训练集大小: 1677, 验证集大小: 346\n", "原始训练集大小: 1672\n", "划分后的训练集大小: 1705, 验证集大小: 347\n", "[100]\ttrain's ndcg@1: 0.886\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1682\n", "原始训练集大小: 1692\n", "原始训练集大小: 1711\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1682, 验证集大小: 323\n", "划分后的训练集大小: 1692, 验证集大小: 325\n", "划分后的训练集大小: 1664, 验证集大小: 341\n", "划分后的训练集大小: 1678, 验证集大小: 322\n", "原始训练集大小: 1671\n", "原始训练集大小: 1707\n", "原始训练集大小: 1690\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1690, 验证集大小: 336\n", "划分后的训练集大小: 1711, 验证集大小: 360\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1672, 验证集大小: 321\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1671, 验证集大小: 333\n", "原始训练集大小: 1708\n", "原始训练集大小: 1671\n", "划分后的训练集大小: 1671, 验证集大小: 308\n", "划分后的训练集大小: 1707, 验证集大小: 322\n", "原始训练集大小: 1656\n", "原始训练集大小: 1695\n", "原始训练集大小: 1758\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1723\n", "原始训练集大小: 划分后的训练集大小: 1708, 验证集大小: 357\n", "原始训练集大小: 1709\n", " 1697\n", "原始训练集大小: 1668\n", "划分后的训练集大小: 1695, 验证集大小: 319\n", "划分后的训练集大小: 1697, 验证集大小: 348\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1678\n", "原始训练集大小: 1669\n", "划分后的训练集大小: 1709, 验证集大小: 317\n", "划分后的训练集大小: 1656, 验证集大小: 336\n", "原始训练集大小: 1692\n", "原始训练集大小: 1654\n", "划分后的训练集大小: 1669, 验证集大小: 327\n", "划分后的训练集大小: 1668, 验证集大小: 320\n", "划分后的训练集大小: 1758, 验证集大小: 379\n", "划分后的训练集大小: 1654, 验证集大小: 313\n", "划分后的训练集大小: 1723, 验证集大小: 342\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1700\n", "划分后的训练集大小: 1692, 验证集大小: 341\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1700, 验证集大小: 352\n", "划分后的训练集大小: 1678, 验证集大小: 320\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 0.81\n", "原始训练集大小: 1347\n", "原始训练集大小: 1687\n", "[100]\ttrain's ndcg@1: 0.9055\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1687, 验证集大小: 331\n", "划分后的训练集大小: 1347, 验证集大小: 327\n", "原始训练集大小: 1651\n", "划分后的训练集大小: 1651, 验证集大小: 336\n", "原始训练集大小: 1670\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1670, 验证集大小: 361\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "原始训练集大小: 1657\n", "原始训练集大小: 1708\n", "原始训练集大小: 1664\n", "划分后的训练集大小: 1708, 验证集大小: 368\n", "原始训练集大小: 1720\n", "原始训练集大小: [100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9445\tvalid's ndcg@1: 1\n", " 1685\n", "划分后的训练集大小: 1685, 验证集大小: 323\n", "划分后的训练集大小: 1664, 验证集大小: 332\n", "划分后的训练集大小: 1720, 验证集大小: 343\n", "划分后的训练集大小: 1657, 验证集大小: 316\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "\n", "原始训练集大小: 1738\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.925\tvalid's ndcg@1: 1\n", "原始训练集大小: 划分后的训练集大小: 1738, 验证集大小: 339\n", " 1670\n", "原始训练集大小: 1679\n", "原始训练集大小: 1664\n", "划分后的训练集大小: 1670, 验证集大小: 313\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1679, 验证集大小: 330\n", "原始训练集大小: 1667\n", "划分后的训练集大小: 1664, 验证集大小: 338\n", "[100]\ttrain's ndcg@1: 0.9445\tvalid's ndcg@1: 1\n", "原始训练集大小: 划分后的训练集大小: 1667, 验证集大小: 335\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", " 1715\n", "划分后的训练集大小: 1715, 验证集大小: 327\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1662\n", "原始训练集大小: 1754\n", "划分后的训练集大小: 1662, 验证集大小: 317\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1754, 验证集大小: 324\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1664\n", "原始训练集大小: 1747\n", "划分后的训练集大小: 1664, 验证集大小: 306\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1747, 验证集大小: 361\n", "原始训练集大小: 1708\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1708, 验证集大小: 297\n", "原始训练集大小: 1683\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1683, 验证集大小: 353\n", "原始训练集大小: 1725\n", "原始训练集大小: 1709\n", "原始训练集大小: 1735\n", "划分后的训练集大小: 1709, 验证集大小: 354\n", "划分后的训练集大小: 1725, 验证集大小: 371\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "\n", "划分后的训练集大小: 1735, 验证集大小: 370\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.924\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 0.81\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "原始训练集大小: 1730\n", "原始训练集大小: 1734\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1734, 验证集大小: 337\n", "划分后的训练集大小: 1730, 验证集大小: 330\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1664\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1657\n", "划分后的训练集大小: 1664, 验证集大小: 291\n", "原始训练集大小: 1751\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1751, 验证集大小: 376\n", "原始训练集大小: 1748\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1657, 验证集大小: 338\n", "原始训练集大小: 1733\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1674\n", "划分后的训练集大小: 1674, 验证集大小: 347划分后的训练集大小: 1748, 验证集大小: 344\n", "原始训练集大小: 1610\n", "\n", "原始训练集大小: 1628\n", "划分后的训练集大小: 1733, 验证集大小: 339\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1628, 验证集大小: 329\n", "划分后的训练集大小: 1610, 验证集大小: 312\n", "原始训练集大小: 1627\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1627, 验证集大小: 301\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1639\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "原始训练集大小: 1711\n", "原始训练集大小: 1700\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1711, 验证集大小: 356\n", "划分后的训练集大小: 1700, 验证集大小: 354\n", "划分后的训练集大小: 1639, 验证集大小: 339\n", "原始训练集大小: 1637\n", "原始训练集大小: 1662\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1637, 验证集大小: 300\n", "划分后的训练集大小: 1662, 验证集大小: 324\n", "原始训练集大小: 1623\n", "原始训练集大小: 1672\n", "原始训练集大小: 1659\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1623, 验证集大小: 322\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1659, 验证集大小: 374\n", "划分后的训练集大小: 1672, 验证集大小: 325\n", "原始训练集大小: 1729\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1729, 验证集大小: 375\n", "原始训练集大小: 1727\n", "划分后的训练集大小: 1727, 验证集大小: 370\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1641\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1641, 验证集大小: 337\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "\n", "原始训练集大小: 1624\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "原始训练集大小: 1668\n", "划分后的训练集大小: 1624, 验证集大小: 271\n", "划分后的训练集大小: 1668, 验证集大小: 344\n", "[100]\ttrain's ndcg@1: 0.8725\tvalid's ndcg@1: 1\n", "原始训练集大小: 1704\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1704, 验证集大小: 336\n", "原始训练集大小: 1671\n", "原始训练集大小: 1659\n", "[100]\ttrain's ndcg@1: 0.885\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1694\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 0.81\n", "划分后的训练集大小: 1671, 验证集大小: 305\n", "原始训练集大小: 1659\n", "划分后的训练集大小: 1694, 验证集大小: 350划分后的训练集大小: 1659, 验证集大小: 344\n", "\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1659, 验证集大小: 364\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 0.81\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1663\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1663, 验证集大小: 284\n", "原始训练集大小: 1667\n", "原始训练集大小: 1640\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1原始训练集大小: 1654\n", "\n", "划分后的训练集大小: 1667, 验证集大小: 325\n", "划分后的训练集大小: 1640, 验证集大小: 329\n", "划分后的训练集大小: 1654, 验证集大小: 320\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1671\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "原始训练集大小: 1659\n", "原始训练集大小: 1675\n", "原始训练集大小: [100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.8735\tvalid's ndcg@1: 1\n", "原始训练集大小: 1699\n", " 1634\n", "划分后的训练集大小: 1671, 验证集大小: 317\n", "划分后的训练集大小: 1675, 验证集大小: 330\n", "原始训练集大小: 1636\n", "原始训练集大小: 1629\n", "划分后的训练集大小: 1699, 验证集大小: 359\n", "划分后的训练集大小: 1659, 验证集大小: 328\n", "原始训练集大小: 1681\n", "划分后的训练集大小: 1634, 验证集大小: 302\n", "划分后的训练集大小: 1636, 验证集大小: 342\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9055\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1681, 验证集大小: 351\n", "原始训练集大小: 1661\n", "划分后的训练集大小: 1629, 验证集大小: 354\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1661, 验证集大小: 339[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1661\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "原始训练集大小: 1686\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1686, 验证集大小: 339\n", "划分后的训练集大小: 1661, 验证集大小: 325\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "原始训练集大小: 1678\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "划分后的训练集大小: 1678, 验证集大小: 335\n", "[100]\ttrain's ndcg@1: 0.8785\tvalid's ndcg@1: 1\n", "原始训练集大小: 1697\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "划分后的训练集大小: 1697, 验证集大小: 329\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 0.81\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.885\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9045\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.905\tvalid's ndcg@1: 0.81\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.887\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9125\tvalid's ndcg@1: 0.5625[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 0.81\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9445\tvalid's ndcg@1: 0.7225\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.923\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9415\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 0.9425\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.962\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.9805\tvalid's ndcg@1: 1\n", "[100]\ttrain's ndcg@1: 0.961\tvalid's ndcg@1: 0.9025\n", "[100]\ttrain's ndcg@1: 1\tvalid's ndcg@1: 1\n" ] } ], "execution_count": 57 }, { "cell_type": "code", "id": "10f15e935aa02a34", "metadata": { "ExecuteTime": { "end_time": "2025-04-04T16:19:43.465583Z", "start_time": "2025-04-04T16:19:43.152412Z" } }, "source": [ "print(df[df['ts_code'] == '000001.SZ'].tail(1)[['act_factor1', 'act_factor2']])\n", "print('finish')" ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " act_factor1 act_factor2\n", "5115129 -0.222482 -0.256946\n", "finish\n" ] } ], "execution_count": 58 }, { "cell_type": "code", "id": "0dc75517-c857-4f1d-8815-e807400a6d33", "metadata": { "ExecuteTime": { "end_time": "2025-04-04T16:19:44.635596Z", "start_time": "2025-04-04T16:19:43.470589Z" } }, "source": [ "train_data = pdf[filter_index & (pdf['trade_date'] == '2023-01-03')]\n", "# train_data = train_data.dropna(subset=feature_columns)\n", "# train_data = train_data.dropna(subset=['label'])\n", "train_data = train_data.reset_index(drop=True)\n", "print(len(train_data))\n" ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "384\n" ] } ], "execution_count": 59 }, { "metadata": { "ExecuteTime": { "end_time": "2025-04-04T16:19:44.702376Z", "start_time": "2025-04-04T16:19:44.686995Z" } }, "cell_type": "code", "source": [ "import pandas as pd\n", "import numpy as np\n", "\n", "\n", "def analyze_nan_factors(df, factor_columns):\n", " \"\"\"\n", " 分析 DataFrame 中指定因子列的 NaN 值情况。\n", "\n", " Args:\n", " df (pd.DataFrame): 包含因子数据的 DataFrame。\n", " factor_columns (list): 包含因子列名的列表。\n", " \"\"\"\n", "\n", " print(\"### 各因子 NaN 值占比 ###\")\n", " nan_percentage = df[factor_columns].isnull().sum() / len(df) * 100\n", " print(nan_percentage.sort_values(ascending=False))\n", " print(\"\\n\")\n", "\n", " print(\"### 包含 NaN 值最多的前 5 个因子 ###\")\n", " top_nan_factors = nan_percentage[nan_percentage > 0].sort_values(ascending=False).head(5)\n", " print(top_nan_factors)\n", " print(\"\\n\")\n", "\n", " if not top_nan_factors.empty:\n", " for factor in top_nan_factors.index:\n", " print(f\"### 因子 '{factor}' 的 NaN 值分析 ###\")\n", "\n", " # 按交易日期分组,计算每日 NaN 值数量\n", " nan_by_date = df.groupby('trade_date')[factor].apply(lambda x: x.isnull().sum())\n", " print(\"\\n每日 NaN 值数量:\")\n", " print(nan_by_date.sort_values(ascending=False).head())\n", "\n", " # # 按股票代码分组,计算每只股票 NaN 值数量\n", " # nan_by_stock = df.groupby('ts_code')[factor].apply(lambda x: x.isnull().sum())\n", " # print(\"\\n每只股票 NaN 值数量:\")\n", " # print(nan_by_stock.sort_values(ascending=False).head())\n", "\n", " print(\"-\" * 30)\n", " print(\"\\n\")\n", "\n", "\n", "# 假设您的 DataFrame 名称是 pdf,并且您已经定义了 feature_columns\n", "if 'pdf' in locals() and 'feature_columns' in locals():\n", " analyze_nan_factors(train_data.copy(), feature_columns)\n", "else:\n", " print(\"请确保您的 DataFrame 名称为 'pdf' 且因子列名列表为 'feature_columns' 后再运行代码。\")" ], "id": "53ce0624ed8e3123", "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "### 各因子 NaN 值占比 ###\n", "mv_growth 2.864583\n", "industry_rank_act_factor3 0.781250\n", "industry_act_factor2 0.781250\n", "std_return_90 - std_return_90_2 0.781250\n", "industry_obv 0.781250\n", " ... \n", "cat_vol_spike 0.000000\n", "up 0.000000\n", "down 0.000000\n", "obv-maobv_6 0.000000\n", "vol 0.000000\n", "Length: 88, dtype: float64\n", "\n", "\n", "### 包含 NaN 值最多的前 5 个因子 ###\n", "mv_growth 2.864583\n", "industry_rank_act_factor3 0.781250\n", "industry_obv 0.781250\n", "industry_rank_act_factor2 0.781250\n", "industry_rank_act_factor1 0.781250\n", "dtype: float64\n", "\n", "\n", "### 因子 'mv_growth' 的 NaN 值分析 ###\n", "\n", "每日 NaN 值数量:\n", "trade_date\n", "2023-01-03 11\n", "Name: mv_growth, dtype: int64\n", "------------------------------\n", "\n", "\n", "### 因子 'industry_rank_act_factor3' 的 NaN 值分析 ###\n", "\n", "每日 NaN 值数量:\n", "trade_date\n", "2023-01-03 3\n", "Name: industry_rank_act_factor3, dtype: int64\n", "------------------------------\n", "\n", "\n", "### 因子 'industry_obv' 的 NaN 值分析 ###\n", "\n", "每日 NaN 值数量:\n", "trade_date\n", "2023-01-03 3\n", "Name: industry_obv, dtype: int64\n", "------------------------------\n", "\n", "\n", "### 因子 'industry_rank_act_factor2' 的 NaN 值分析 ###\n", "\n", "每日 NaN 值数量:\n", "trade_date\n", "2023-01-03 3\n", "Name: industry_rank_act_factor2, dtype: int64\n", "------------------------------\n", "\n", "\n", "### 因子 'industry_rank_act_factor1' 的 NaN 值分析 ###\n", "\n", "每日 NaN 值数量:\n", "trade_date\n", "2023-01-03 3\n", "Name: industry_rank_act_factor1, dtype: int64\n", "------------------------------\n", "\n", "\n" ] } ], "execution_count": 60 } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.11" } }, "nbformat": 4, "nbformat_minor": 5 }