2025-06-22 23:03:50 +08:00
|
|
|
|
{
|
|
|
|
|
|
"cells": [
|
|
|
|
|
|
{
|
2025-07-10 15:07:31 +08:00
|
|
|
|
"cell_type": "code",
|
|
|
|
|
|
"id": "b93c7ca1",
|
2025-06-22 23:03:50 +08:00
|
|
|
|
"metadata": {
|
|
|
|
|
|
"ExecuteTime": {
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"end_time": "2025-10-03T06:58:45.372926Z",
|
|
|
|
|
|
"start_time": "2025-10-03T06:58:45.083976Z"
|
2025-06-22 23:03:50 +08:00
|
|
|
|
}
|
|
|
|
|
|
},
|
|
|
|
|
|
"source": [
|
|
|
|
|
|
"import pandas as pd\n",
|
|
|
|
|
|
"import numpy as np\n",
|
|
|
|
|
|
"import matplotlib.pyplot as plt\n",
|
|
|
|
|
|
"import seaborn as sns\n",
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"import talib as ta # Make sure TA-Lib is installed: pip install TA-Lib\n",
|
2025-06-22 23:03:50 +08:00
|
|
|
|
"import statsmodels.api as sm\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"import warnings\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"# 忽略所有警告\n",
|
|
|
|
|
|
"warnings.filterwarnings(\"ignore\")\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"# --- 0. Configure your file path ---\n",
|
|
|
|
|
|
"# Please replace 'your_futures_data.csv' with the actual path to your CSV file\n",
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"file_path = '/mnt/d/PyProject/NewQuant/data/data/KQ_m@CZCE_SA/KQ_m@CZCE_SA_min15.csv'\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"sns.set(style='whitegrid')\n",
|
|
|
|
|
|
"plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\n",
|
|
|
|
|
|
"plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号"
|
2025-07-28 14:36:58 +08:00
|
|
|
|
],
|
|
|
|
|
|
"outputs": [],
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"execution_count": 1
|
2025-06-22 23:03:50 +08:00
|
|
|
|
},
|
|
|
|
|
|
{
|
|
|
|
|
|
"metadata": {
|
|
|
|
|
|
"ExecuteTime": {
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"end_time": "2025-10-03T06:58:45.441191Z",
|
|
|
|
|
|
"start_time": "2025-10-03T06:58:45.381967Z"
|
2025-06-22 23:03:50 +08:00
|
|
|
|
}
|
|
|
|
|
|
},
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"cell_type": "code",
|
2025-06-22 23:03:50 +08:00
|
|
|
|
"source": [
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"# --- 1. Data Loading and Preprocessing ---\n",
|
|
|
|
|
|
"def load_and_preprocess_data(file_path):\n",
|
|
|
|
|
|
" \"\"\"\n",
|
|
|
|
|
|
" Loads historical futures data and performs basic preprocessing.\n",
|
|
|
|
|
|
" Assumes data contains 'datetime', 'open', 'high', 'low', 'close', 'volume' columns.\n",
|
|
|
|
|
|
" \"\"\"\n",
|
|
|
|
|
|
" try:\n",
|
|
|
|
|
|
" df = pd.read_csv(file_path, parse_dates=['datetime'], index_col='datetime')\n",
|
|
|
|
|
|
" # Ensure data is sorted by time\n",
|
|
|
|
|
|
" df = df.sort_index()\n",
|
|
|
|
|
|
" # Check and handle missing values\n",
|
|
|
|
|
|
" initial_rows = len(df)\n",
|
|
|
|
|
|
" df.dropna(inplace=True)\n",
|
|
|
|
|
|
" if len(df) < initial_rows:\n",
|
|
|
|
|
|
" print(f\"Warning: Missing values found in data, deleted {initial_rows - len(df)} rows.\")\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" # Check if necessary columns exist\n",
|
|
|
|
|
|
" required_columns = ['open', 'high', 'low', 'close', 'volume']\n",
|
|
|
|
|
|
" if not all(col in df.columns for col in required_columns):\n",
|
|
|
|
|
|
" raise ValueError(f\"CSV file is missing required columns. Please ensure it contains: {required_columns}\")\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" print(f\"Successfully loaded {len(df)} rows of data.\")\n",
|
|
|
|
|
|
" print(\"First 5 rows of data:\")\n",
|
|
|
|
|
|
" print(df.head())\n",
|
|
|
|
|
|
" return df\n",
|
|
|
|
|
|
" except FileNotFoundError:\n",
|
|
|
|
|
|
" print(f\"Error: File '{file_path}' not found. Please check the path.\")\n",
|
|
|
|
|
|
" return None\n",
|
|
|
|
|
|
" except Exception as e:\n",
|
|
|
|
|
|
" print(f\"Error during data loading or preprocessing: {e}\")\n",
|
|
|
|
|
|
" return None\n",
|
|
|
|
|
|
"\n",
|
2025-07-28 14:36:58 +08:00
|
|
|
|
"\n",
|
|
|
|
|
|
"df_raw = load_and_preprocess_data(file_path)\n",
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"# df_raw = df_raw[df_raw.index <= '2024-01-01']"
|
2025-07-28 14:36:58 +08:00
|
|
|
|
],
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"id": "3dcf270c1da82220",
|
2025-06-22 23:03:50 +08:00
|
|
|
|
"outputs": [
|
|
|
|
|
|
{
|
|
|
|
|
|
"name": "stdout",
|
|
|
|
|
|
"output_type": "stream",
|
|
|
|
|
|
"text": [
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"Successfully loaded 25662 rows of data.\n",
|
2025-06-22 23:03:50 +08:00
|
|
|
|
"First 5 rows of data:\n",
|
2025-10-05 00:09:59 +08:00
|
|
|
|
" open high low close volume open_oi \\\n",
|
|
|
|
|
|
"datetime \n",
|
|
|
|
|
|
"2020-12-31 14:45:00 1607.0 1611.0 1603.0 1611.0 19480.0 148833.0 \n",
|
|
|
|
|
|
"2021-01-04 09:00:00 1610.0 1636.0 1601.0 1620.0 55486.0 146448.0 \n",
|
|
|
|
|
|
"2021-01-04 09:15:00 1620.0 1620.0 1601.0 1604.0 30314.0 153373.0 \n",
|
|
|
|
|
|
"2021-01-04 09:30:00 1604.0 1606.0 1590.0 1595.0 30803.0 157091.0 \n",
|
|
|
|
|
|
"2021-01-04 09:45:00 1595.0 1601.0 1594.0 1600.0 10031.0 158730.0 \n",
|
2025-06-22 23:03:50 +08:00
|
|
|
|
"\n",
|
2025-10-05 00:09:59 +08:00
|
|
|
|
" close_oi underlying_symbol \n",
|
|
|
|
|
|
"datetime \n",
|
|
|
|
|
|
"2020-12-31 14:45:00 146448.0 CZCE.SA105 \n",
|
|
|
|
|
|
"2021-01-04 09:00:00 153373.0 CZCE.SA105 \n",
|
|
|
|
|
|
"2021-01-04 09:15:00 157091.0 CZCE.SA105 \n",
|
|
|
|
|
|
"2021-01-04 09:30:00 158730.0 CZCE.SA105 \n",
|
|
|
|
|
|
"2021-01-04 09:45:00 160031.0 CZCE.SA105 \n"
|
2025-06-22 23:03:50 +08:00
|
|
|
|
]
|
|
|
|
|
|
}
|
|
|
|
|
|
],
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"execution_count": 2
|
2025-06-22 23:03:50 +08:00
|
|
|
|
},
|
|
|
|
|
|
{
|
|
|
|
|
|
"metadata": {
|
|
|
|
|
|
"ExecuteTime": {
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"end_time": "2025-10-03T07:02:35.779448Z",
|
|
|
|
|
|
"start_time": "2025-10-03T06:58:45.453734Z"
|
2025-06-22 23:03:50 +08:00
|
|
|
|
}
|
|
|
|
|
|
},
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"cell_type": "code",
|
2025-06-22 23:03:50 +08:00
|
|
|
|
"source": [
|
|
|
|
|
|
"import pandas as pd\n",
|
|
|
|
|
|
"import numpy as np\n",
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"from scipy.signal import find_peaks\n",
|
|
|
|
|
|
"from scipy.stats import gaussian_kde\n",
|
2025-06-22 23:03:50 +08:00
|
|
|
|
"import matplotlib.pyplot as plt\n",
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"from tqdm import tqdm\n",
|
|
|
|
|
|
"# 引入numba来JIT编译我们的核心计算,获得接近C的速度\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"def find_hvn_kde_numba(prices: np.ndarray, volumes: np.ndarray) -> np.ndarray:\n",
|
|
|
|
|
|
" if len(prices) < 2 or np.sum(volumes) < 1e-6:\n",
|
|
|
|
|
|
" return np.array([np.nan])\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" min_price, max_price = np.min(prices), np.max(prices)\n",
|
|
|
|
|
|
" if min_price == max_price:\n",
|
|
|
|
|
|
" return np.array([min_price])\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" # Scott's rule for bandwidth\n",
|
|
|
|
|
|
" n = len(prices)\n",
|
|
|
|
|
|
" bw = (n * 3/4)**(-1/5) * np.std(prices)\n",
|
|
|
|
|
|
" if bw < 1e-6:\n",
|
|
|
|
|
|
" return np.array([np.mean(prices)])\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" eval_points = np.linspace(min_price, max_price, 100) # 减少评估点以提高速度\n",
|
|
|
|
|
|
" density = np.zeros_like(eval_points)\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" # 手动实现KDE\n",
|
|
|
|
|
|
" for i in range(len(eval_points)):\n",
|
|
|
|
|
|
" x = eval_points[i]\n",
|
|
|
|
|
|
" kernel_vals = (1 / (bw * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((x - prices) / bw)**2)\n",
|
|
|
|
|
|
" density[i] = np.sum(volumes * kernel_vals)\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" # 手动实现峰值寻找\n",
|
|
|
|
|
|
" peaks_indices = []\n",
|
|
|
|
|
|
" max_density = np.max(density)\n",
|
|
|
|
|
|
" prominence_threshold = max_density * 0.2 # 提高显著性阈值\n",
|
|
|
|
|
|
" for i in range(1, len(density) - 1):\n",
|
|
|
|
|
|
" if density[i] > density[i-1] and density[i] > density[i+1]:\n",
|
|
|
|
|
|
" if density[i] > prominence_threshold:\n",
|
|
|
|
|
|
" peaks_indices.append(i)\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" if len(peaks_indices) > 0:\n",
|
|
|
|
|
|
" return eval_points[np.array(peaks_indices)]\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" return np.array([np.nan])\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"# ==============================================================================\n",
|
|
|
|
|
|
"# 模块二:主分析流程(已彻底重构为向量化版本)\n",
|
|
|
|
|
|
"# ==============================================================================\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"def analyze_hvn_crossing_event_vectorized(\n",
|
|
|
|
|
|
" df_raw: pd.DataFrame,\n",
|
|
|
|
|
|
" profile_window: int = 250,\n",
|
|
|
|
|
|
" forward_window: int = 50,\n",
|
|
|
|
|
|
" tick_size: float = 0.01):\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" print(\"Step 1: 准备数据...\")\n",
|
|
|
|
|
|
" df = df_raw.copy().reset_index(drop=True) # 确保索引是连续的整数\n",
|
|
|
|
|
|
" df['forward_returns'] = np.log(df['close'].shift(-forward_window) / df['close'])\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" print(\"Step 2: 一次性向量化展开所有K线...\")\n",
|
|
|
|
|
|
" # 计算每根K线需要展开的tick数\n",
|
|
|
|
|
|
" num_ticks = ((df['high'] - df['low']) / tick_size).round().astype(int) + 1\n",
|
|
|
|
|
|
" # 创建一个索引,用于将展开后的数据映射回原始的K线\n",
|
|
|
|
|
|
" expanded_idx = np.repeat(df.index, num_ticks)\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" # 计算每个tick的价格和成交量\n",
|
|
|
|
|
|
" # 使用Numba JIT加速这个循环\n",
|
|
|
|
|
|
" def expand_bars(lows, highs, volumes, num_ticks, expanded_size):\n",
|
|
|
|
|
|
" all_prices = np.empty(expanded_size, dtype=np.float64)\n",
|
|
|
|
|
|
" all_volumes = np.empty(expanded_size, dtype=np.float64)\n",
|
|
|
|
|
|
" current_pos = 0\n",
|
|
|
|
|
|
" for i in range(len(lows)):\n",
|
|
|
|
|
|
" n = num_ticks[i]\n",
|
|
|
|
|
|
" if n > 0 and volumes[i] > 0:\n",
|
|
|
|
|
|
" prices = np.linspace(lows[i], highs[i], n)\n",
|
|
|
|
|
|
" vols = np.full(n, volumes[i] / n)\n",
|
|
|
|
|
|
" all_prices[current_pos:current_pos+n] = prices\n",
|
|
|
|
|
|
" all_volumes[current_pos:current_pos+n] = vols\n",
|
|
|
|
|
|
" current_pos += n\n",
|
|
|
|
|
|
" return all_prices, all_volumes\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" all_prices, all_volumes = expand_bars(df['low'].values, df['high'].values,\n",
|
|
|
|
|
|
" df['volume'].values, num_ticks.values,\n",
|
|
|
|
|
|
" len(expanded_idx))\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" # 创建一个包含展开后数据的DataFrame\n",
|
|
|
|
|
|
" df_expanded = pd.DataFrame({\n",
|
|
|
|
|
|
" 'original_index': expanded_idx,\n",
|
|
|
|
|
|
" 'price': all_prices,\n",
|
|
|
|
|
|
" 'volume': all_volumes\n",
|
|
|
|
|
|
" })\n",
|
|
|
|
|
|
" print(f\"K线展开完成,共 {len(df_expanded)} 个价格点。\")\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" print(f\"Step 3: 使用滚动窗口和Numba JIT批量计算HVN...\")\n",
|
|
|
|
|
|
" # --- 这是性能提升的核心 ---\n",
|
|
|
|
|
|
" # 我们将使用一个for循环,但循环内部的计算是高度优化的\n",
|
|
|
|
|
|
" hvn_list = [np.array([np.nan])] * len(df)\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" # Groupby一次,后续查询会非常快\n",
|
|
|
|
|
|
" grouped_expanded = df_expanded.groupby('original_index')\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" for i in tqdm(range(profile_window, len(df)), desc=\"Calculating HVNs\"):\n",
|
|
|
|
|
|
" window_indices = df.index[i - profile_window : i]\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" # 快速收集窗口数据\n",
|
|
|
|
|
|
" window_data = df_expanded[df_expanded['original_index'].isin(window_indices)]\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" # 调用JIT编译的函数\n",
|
|
|
|
|
|
" hvns = find_hvn_kde_numba(\n",
|
|
|
|
|
|
" window_data['price'].values,\n",
|
|
|
|
|
|
" window_data['volume'].values\n",
|
|
|
|
|
|
" )\n",
|
|
|
|
|
|
" hvn_list[i] = hvns\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" df['hvns'] = hvn_list\n",
|
|
|
|
|
|
" df = df.dropna(subset=['hvns'])\n",
|
|
|
|
|
|
" print(\"HVN批量计算完成.\")\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
" # --- 后续的事件识别、分析和可视化流程与之前版本完全相同 ---\n",
|
|
|
|
|
|
" print(\"Step 4: 识别HVN穿越事件...\")\n",
|
|
|
|
|
|
" # ... (与之前版本相同的事件识别、路径分析和绘图代码) ...\n",
|
|
|
|
|
|
" events = []\n",
|
|
|
|
|
|
" for i, row in df.iterrows():\n",
|
|
|
|
|
|
" idx = df.index.get_loc(i)\n",
|
|
|
|
|
|
" if idx == 0: continue\n",
|
|
|
|
|
|
" prev_close = df['close'].iloc[idx-1]\n",
|
|
|
|
|
|
" current_close = row['close']\n",
|
|
|
|
|
|
" hvns = row['hvns']\n",
|
|
|
|
|
|
" if isinstance(hvns, np.ndarray) and not np.isnan(hvns).all():\n",
|
|
|
|
|
|
" for hvn in hvns:\n",
|
|
|
|
|
|
" if prev_close < hvn and current_close > hvn: events.append({'event_index': i, 'event_type': 'bullish_cross'})\n",
|
|
|
|
|
|
" if prev_close > hvn and current_close < hvn: events.append({'event_index': i, 'event_type': 'bearish_cross'})\n",
|
|
|
|
|
|
" if not events: print(\"未找到任何HVN穿越事件。\"); return\n",
|
|
|
|
|
|
" df_events = pd.DataFrame(events); print(f\"共识别出 {len(df_events)} 个事件.\")\n",
|
|
|
|
|
|
" print(\"Step 5: 分析事件后的平均走势...\")\n",
|
|
|
|
|
|
" bullish_cross_indices = df_events[df_events['event_type'] == 'bullish_cross']['event_index']\n",
|
|
|
|
|
|
" bearish_cross_indices = df_events[df_events['event_type'] == 'bearish_cross']['event_index']\n",
|
|
|
|
|
|
" bullish_paths, bearish_paths = [], []\n",
|
|
|
|
|
|
" for index in bullish_cross_indices:\n",
|
|
|
|
|
|
" loc = df.index.get_loc(index)\n",
|
|
|
|
|
|
" if loc + forward_window < len(df):\n",
|
|
|
|
|
|
" start_price = df.loc[index, 'close']\n",
|
|
|
|
|
|
" path = df['close'].iloc[loc : loc + forward_window].values / start_price\n",
|
|
|
|
|
|
" bullish_paths.append(path)\n",
|
|
|
|
|
|
" for index in bearish_cross_indices:\n",
|
|
|
|
|
|
" loc = df.index.get_loc(index)\n",
|
|
|
|
|
|
" if loc + forward_window < len(df):\n",
|
|
|
|
|
|
" start_price = df.loc[index, 'close']\n",
|
|
|
|
|
|
" path = df['close'].iloc[loc : loc + forward_window].values / start_price\n",
|
|
|
|
|
|
" bearish_paths.append(path)\n",
|
|
|
|
|
|
" if not bullish_paths or not bearish_paths: print(\"事件数量不足,无法进行路径分析。\"); return\n",
|
|
|
|
|
|
" avg_bullish_path, avg_bearish_path = np.mean(bullish_paths, axis=0), np.mean(bearish_paths, axis=0)\n",
|
|
|
|
|
|
" print(\"Step 6: 结果可视化...\")\n",
|
|
|
|
|
|
" plt.style.use('seaborn-v0_8-darkgrid'); fig, ax = plt.subplots(figsize=(12, 7))\n",
|
|
|
|
|
|
" ax.plot(avg_bullish_path, label=f'Avg Path after Bullish Cross (N={len(bullish_paths)})', color='green', linewidth=2)\n",
|
|
|
|
|
|
" ax.plot(avg_bearish_path, label=f'Avg Path after Bearish Cross (N={len(bearish_paths)})', color='red', linewidth=2)\n",
|
|
|
|
|
|
" ax.axhline(1.0, color='black', linestyle='--', linewidth=1, label='Event Price Level')\n",
|
|
|
|
|
|
" ax.set_title(f'Average Price Path after HVN Crossing Event (Forward Window = {forward_window} bars)', fontsize=16)\n",
|
|
|
|
|
|
" ax.set_xlabel('Bars after Event', fontsize=12); ax.set_ylabel('Normalized Price (Event Price = 1.0)', fontsize=12)\n",
|
|
|
|
|
|
" ax.legend(fontsize=10); ax.grid(True); plt.show()\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"# ==============================================================================\n",
|
|
|
|
|
|
"# 使用示例\n",
|
|
|
|
|
|
"# ==============================================================================\n",
|
|
|
|
|
|
"print(\"正在生成模拟数据...\")\n",
|
|
|
|
|
|
"np.random.seed(42)\n",
|
|
|
|
|
|
"n_points = 26000\n",
|
|
|
|
|
|
"prices = 100 + np.random.randn(n_points).cumsum() * 0.1\n",
|
|
|
|
|
|
"df_raw = pd.DataFrame({\n",
|
|
|
|
|
|
" 'open': prices, 'high': prices + np.random.uniform(0, 0.5, n_points),\n",
|
|
|
|
|
|
" 'low': prices - np.random.uniform(0, 0.5, n_points),\n",
|
|
|
|
|
|
" 'close': prices + np.random.uniform(-0.25, 0.25, n_points),\n",
|
|
|
|
|
|
" 'volume': np.random.randint(100, 1000, n_points) * 10\n",
|
|
|
|
|
|
"}, index=pd.to_datetime(pd.date_range('2023-01-01', periods=n_points, freq='min')))\n",
|
|
|
|
|
|
"df_raw['open'] = df_raw['close'].shift(1)\n",
|
|
|
|
|
|
"df_raw = df_raw.dropna()\n",
|
|
|
|
|
|
"print(f\"模拟数据生成完毕,共 {len(df_raw)} 行。\")\n",
|
|
|
|
|
|
"\n",
|
|
|
|
|
|
"# --- 运行全新的、高性能的分析函数 ---\n",
|
|
|
|
|
|
"analyze_hvn_crossing_event_vectorized(\n",
|
|
|
|
|
|
" df_raw,\n",
|
|
|
|
|
|
" profile_window=250,\n",
|
|
|
|
|
|
" forward_window=50\n",
|
|
|
|
|
|
")"
|
2025-07-28 14:36:58 +08:00
|
|
|
|
],
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"id": "30958840dbc186d7",
|
2025-06-22 23:03:50 +08:00
|
|
|
|
"outputs": [
|
|
|
|
|
|
{
|
|
|
|
|
|
"name": "stdout",
|
|
|
|
|
|
"output_type": "stream",
|
|
|
|
|
|
"text": [
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"正在生成模拟数据...\n",
|
|
|
|
|
|
"模拟数据生成完毕,共 25999 行。\n",
|
|
|
|
|
|
"Step 1: 准备数据...\n",
|
|
|
|
|
|
"Step 2: 一次性向量化展开所有K线...\n",
|
|
|
|
|
|
"K线展开完成,共 1327133 个价格点。\n",
|
|
|
|
|
|
"Step 3: 使用滚动窗口和Numba JIT批量计算HVN...\n"
|
2025-06-22 23:03:50 +08:00
|
|
|
|
]
|
|
|
|
|
|
},
|
|
|
|
|
|
{
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"name": "stderr",
|
2025-07-28 14:36:58 +08:00
|
|
|
|
"output_type": "stream",
|
|
|
|
|
|
"text": [
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"Calculating HVNs: 100%|██████████| 25749/25749 [03:49<00:00, 112.18it/s]\n"
|
2025-07-28 14:36:58 +08:00
|
|
|
|
]
|
|
|
|
|
|
},
|
|
|
|
|
|
{
|
|
|
|
|
|
"name": "stdout",
|
|
|
|
|
|
"output_type": "stream",
|
|
|
|
|
|
"text": [
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"HVN批量计算完成.\n",
|
|
|
|
|
|
"Step 4: 识别HVN穿越事件...\n",
|
|
|
|
|
|
"共识别出 4748 个事件.\n",
|
|
|
|
|
|
"Step 5: 分析事件后的平均走势...\n",
|
|
|
|
|
|
"Step 6: 结果可视化...\n"
|
2025-07-28 14:36:58 +08:00
|
|
|
|
]
|
|
|
|
|
|
},
|
|
|
|
|
|
{
|
|
|
|
|
|
"data": {
|
|
|
|
|
|
"text/plain": [
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"<Figure size 1200x700 with 1 Axes>"
|
2025-07-28 14:36:58 +08:00
|
|
|
|
],
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"image/png": "iVBORw0KGgoAAAANSUhEUgAABAcAAAJyCAYAAAC4+6kKAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3Xd4U9UbB/DvTdqme1I2iCBpWQXKKEugbFCmIMiWIaCAiPxYKkMRBNkgiEzZIFv2FFCg7CllFQTKbuluRpP7+yPm2tB0pelI+/08T5+md57bnNzc+95z3iOIoiiCiIiIiIiIiAosWW4XgIiIiIiIiIhyF4MDRERERERERAUcgwNEREREREREBRyDA0REREREREQFHIMDRERERERERAUcgwNEREREREREBRyDA0REREREREQFHIMDRERERERERAUcgwNEREREREREBRyDA5Rt2rVrBz8/P1SuXBmvX7/O7eIUGNu2bYOfn5/Jj7+/P2rUqIHOnTtj8eLFiI+Pz/R2Hz9+DD8/PzRp0iQbSp11TZo0SXHclStXRuPGjTFixAicP38+R8oREhICPz8/9OrVK0f2lx6NRoPZs2ejRYsWqFy5cp5+D5MzvochISFpLmd837dt2wYAePjwIfz9/eHn54d79+6lux+tVos6derAz88Pe/fuBfBfXffz80NAQACePXuW6voVK1aEn58fHj9+nImj+8/Tp08xZ84cfPjhh6hTpw4qVaqEmjVromPHjpgyZQquXr1q0XbzGuN5aezYsbldlDT16tUrxXnE3M+CBQtyu6i55vDhw/Dz88OKFStMppv77nnzp2bNmrlUattjrIvpnQONunfvDj8/PyxfvtzsfI1Gg6pVq8LPzw99+/ZNdTuDBg2Cn58f5s6dK00zvn85ZezYsSbndfrPggUL0v2cpfXd988//2Ds2LFo2LAhKleujIYNG2Ls2LF49OhRpsuS1653LPHVV1+hYsWKuHXrVm4XJc+wy+0CUP509epV6YOm1Wqxa9cu9OnTJ5dLVbA4OzujZcuWAACdTodHjx7hypUruHbtGnbs2IF169ahUKFCuVxK6wsMDMRbb70FAIiJicH169exb98+7N+/H2PGjMHHH3+cpe03adIE4eHhOHLkCEqWLGmNImeruXPnYvny5ShUqBCaNm0KJycneHl5ATBcfJ49exarV69GUFBQLpfUOkqXLo1atWrh7Nmz2Lp1K0aPHp3m8kePHsXr16/h6emJZs2apZivVqsxb948TJs2zeplXbp0KebNmwetVgtnZ2dUrVoVPj4+iI+Px+3bt7FmzRqsWbMG/fv3T/c4yLr8/f1RoUKFVOenNS+vssa5S6PR4IcffkCxYsXQo0cPs8sk/+55k6Ojo0X7pfQFBQXhwoULOHv2LPr3759i/tWrV6FSqQAAly9fhkajgYODg8kyOp1OCqTnl++E/Cqtc5Sbm5vZ6RcuXED//v2RmJiI8uXLo0aNGrhz5w62b9+OAwcOYOXKlahWrVo2ljrvGTZsGH7//XdMmTIFa9asye3i5AkMDlC22LJlCwCgSJEieP78ObZs2cLgQA7z8vLCDz/8YDLt6tWr6NOnDx48eIAZM2ZgxowZGd5ekSJFsHfvXtjb21u7qFbVpUsXdOrUSfpbrVZjwoQJ2LFjB3788Uc0btwYb7/9di6WMGft378fALBu3TqUKVMmdwuTQzp37oyzZ89i165dGDlyJOzsUv+q27p1KwBDS6c3L5QFQYCDgwN27tyJfv36oXz58lYr48yZM7F06VLY29tjzJgx6NmzZ4r9X758GXPmzMGDBw+stt/c0rx5c1StWjXVi9a8plmzZhg2bFhuFyPPWbNmDR49eoQJEyZAoVCYXcbcdw9lv6CgICxatAjnz5+HTqeDXC43mW9sgVCpUiXcuHEDV69eTdGS48aNG4iLi4ODgwMCAwOl6cZWVZR3ZPYclZiYiBEjRiAxMRGDBg3CyJEjpXmzZ8/GkiVLMGLECOzfv79ABfGKFi2KLl26YO3atThy5AiaNm2a20XKdexWQFaXmJiIPXv2AABmzJgBZ2dn3L59O980j7VlAQEB6NevHwDg0KFDSEpKyvC69vb2KFeuHEqXLp1dxcsWCoUCEyZMgLOzM3Q6HQ4dOpTbRcpRT548AYACExgAgJYtW8Ld3R0vX77EiRMnUl3uxYsX+PPPPwEAH3zwQYr5MpkMPXv2hE6nw+zZs61WvtOnT2Pp0qUAgDlz5qBfv34pAgMAUK1aNaxatUr6zNoyNzc3lCtXDoULF87topCFdDod1q5dC4VCgXbt2uV2cegN1atXh4ODA+Li4nDjxo0U88+ePQu5XI7BgwcDgNnuCsZp1apVMwn+lCtXDuXKlcumklNO2LZtG168eIEyZcpgxIgRJvNGjBiBMmXK4OnTp9ixY0eulC83de7cGQDw66+/5nJJ8gYGB8jq9u/fj7i4OCiVStSpUwdt2rQB8F9rAqN79+7Bz88PtWrVglqtTnV7nTp1gp+fHw4fPmwyPSkpCb/99ht69eqF2rVro3LlymjSpAkmTpyIp0+fpthO8r5RiYmJmDdvHlq3bo2qVaua9MG+evUqZsyYgc6dO6N+/fqoXLky6tWrh8GDB+PUqVOpllMURWzZsgWdOnVC1apVERQUhAEDBuDixYvp9st6/vw5pk2bJpWnevXq+OCDD7B27dpM3cBnRKVKlQAACQkJUi4IYx+2BQsW4MmTJxg/fjwaNWqESpUqSX2E08s5kJiYiFWrVuGjjz5CrVq1ULlyZQQHB2Pw4MH4/fffza6zf/9+9O/fH3Xq1EHlypXx7rvvYtSoUbh7965Vj9nFxUVqLWDsGx4ZGYnVq1dj4MCBaNKkCQICAhAYGIhOnTrhl19+SVEnjf1pw8PDAQBNmzY16eNn7kJLq9Xil19+wXvvvYeAgAAEBQVh6NChGeoL/6bM1ktjX3xRFAHApKzGYzl79iwAoHfv3inmJxcdHY358+ejffv2qF69OqpWrYq2bdti0aJFSExMTLHvjNSn7OTo6Ij3338fwH8tA8zZsWMHdDodKleuDH9/f7PLDBo0CB4eHjh69CguXLhglfItWrQIgOE9at68eZrLCoJg8nQv+edQp9Nh5cqV6NChA6pXr56iT/DJkycxaNAg1K1bF5UrV0aDBg0wYsQIXLt2zey+YmNjMWfOHLRt2xbVqlWT1unWrZvU/SG569evY8SIEVLf1cDAQDRt2hTDhg1Lcb5OLedA8nOjJZ+X8+fPo3///qhZs6Z03jRe3OZEP+mYmBgEBASgQoUKeP78earLDR8+HH5+fmYvPjNzHkz+/ouiiE2bNqFTp06oVq0aatSogX79+uHSpUsm61hy7jLn6NGjePLkCZo1a2bVFiB79uxBnz59pO/x4OBgjBs3Dvfv3ze7vPHc9vjxYxw+fBi9e/dG7dq1pWOZNm0a/Pz8sHLlyhTrtmnTBn5+ftLNQHILFy6En58f5s2bJ03TarXYuXMnvvzyS7Rq1QqBgYEICAhAy5YtMWXKlFTf8+T5As6fP4/BgwejTp068Pf3Nzm/Pn36FOPGjUODBg1QpUoVtGjRAnPmzJGa/2eGQqGQmoS/+Z5qNBpcvnwZFSpUwLvvvgt7e3vp/J+ccdqbXQpS+ywlfy/OnDmDfv36oVatWggICEDHjh3TvNGMiorC999/j+DgYCk/0LfffouoqKg0jzMpKQkbNmxAt27dUKNGDen/Zu79EEURQUFB8Pf3T5H76urVq9JxrVu3LsV+jJ8TS/ri50XGc/J7770Hmcz09k8mk0nX6pY+QElMTMTs2bPRvHlzVKlSBQ0aNMD48eNT/YycOnUK3333Hdq3b4+goCAp/8GIESNSfZiY0WuLU6dOYfDgwahXrx4qVaqEWrVqoUWLFhg1ahTOnTuXYrsVKlSAv78/QkJCLLo+y2/YrYCszhgEMD6J++CDD7Blyxbs3bsX48ePl5orlStXDtWrV8elS5dw+PBhvPfeeym2devWLdy4cQOFChVC48aNpelxcXEYMmQIzp49C2dnZ1SuXBleXl64ffs
|
2025-07-28 14:36:58 +08:00
|
|
|
|
},
|
|
|
|
|
|
"metadata": {},
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"output_type": "display_data",
|
|
|
|
|
|
"jetTransient": {
|
|
|
|
|
|
"display_id": null
|
|
|
|
|
|
}
|
2025-07-28 14:36:58 +08:00
|
|
|
|
}
|
|
|
|
|
|
],
|
2025-10-05 00:09:59 +08:00
|
|
|
|
"execution_count": 3
|
2025-06-22 23:03:50 +08:00
|
|
|
|
}
|
|
|
|
|
|
],
|
|
|
|
|
|
"metadata": {
|
|
|
|
|
|
"kernelspec": {
|
|
|
|
|
|
"display_name": "quant",
|
|
|
|
|
|
"language": "python",
|
|
|
|
|
|
"name": "python3"
|
|
|
|
|
|
},
|
|
|
|
|
|
"language_info": {
|
|
|
|
|
|
"codemirror_mode": {
|
|
|
|
|
|
"name": "ipython",
|
|
|
|
|
|
"version": 3
|
|
|
|
|
|
},
|
|
|
|
|
|
"file_extension": ".py",
|
|
|
|
|
|
"mimetype": "text/x-python",
|
|
|
|
|
|
"name": "python",
|
|
|
|
|
|
"nbconvert_exporter": "python",
|
|
|
|
|
|
"pygments_lexer": "ipython3",
|
|
|
|
|
|
"version": "3.12.11"
|
|
|
|
|
|
}
|
|
|
|
|
|
},
|
|
|
|
|
|
"nbformat": 4,
|
|
|
|
|
|
"nbformat_minor": 5
|
|
|
|
|
|
}
|