File size: 5,278 Bytes
aed2e18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Transformar os dados RAW (Binance BTCUSDT, all trades) -> OHCL"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "raw_dataset_path = '../datasets/BTCUSDT-Trades/'\n",
    "output_path = '../output'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from loguru import logger\n",
    "import dask.dataframe as dd\n",
    "from dask.diagnostics import ProgressBar\n",
    "import pandas as pd\n",
    "\n",
    "logger.remove() \n",
    "logger.add(lambda msg: print(msg, end=\"\"), level=\"INFO\")\n",
    "\n",
    "from utils import create_dollar_bars\n",
    "\n",
    "def dataSampler(barType=\"standard\", samplerType=\"time\", samplerAmount=100, maxRecords=None):\n",
    "    barTypeDictionary = {\n",
    "        \"standard\": \"Padrão\",\n",
    "        \"imbalance\": \"de Desequilíbrio\",\n",
    "        \"run\": \"de Ordem Iceberg\"\n",
    "    }\n",
    "    samplerDictionary = {\n",
    "        \"time\": \"Temporal\",\n",
    "        \"ticks\": \"Ticks\",\n",
    "        \"volume\": \"Volume\",\n",
    "        \"dollar\": \"Dollar\"\n",
    "    }\n",
    "    \n",
    "    output_directory = os.path.join(output_path, f\"{samplerType}-bars-[{samplerAmount}]\")\n",
    "    if not os.path.exists(output_directory):\n",
    "        os.makedirs(output_directory)\n",
    "        logger.info(f\"Diretório criado: {output_directory}\")\n",
    "    \n",
    "    print(f\"Criando Barras {samplerDictionary[samplerType]} {barTypeDictionary[barType]} agrupadas a cada {samplerAmount}...\")       \n",
    "\n",
    "    # Verificar se já existem arquivos .parquet no diretório de saída\n",
    "    parquet_files_output = [f for f in os.listdir(output_directory) if f.endswith('.parquet')]\n",
    "    if parquet_files_output:\n",
    "        logger.info(f\"'{output_directory}' já existe e contém arquivos .parquet. Carregando de {output_directory}...\")\n",
    "        \n",
    "        # Carregar todos os arquivos .parquet usando Dask\n",
    "        try:\n",
    "            bars = dd.read_parquet(os.path.join(output_directory, '*.parquet')).compute()\n",
    "            logger.info(\"'dollar_bars' carregado com sucesso.\")\n",
    "            return bars\n",
    "        except Exception as e:\n",
    "            logger.error(f\"Erro ao carregar arquivos .parquet: {e}\")\n",
    "    \n",
    "    logger.info(\"Criando 'dollar_bars'...\")\n",
    "\n",
    "    dollar_bars_path = os.path.join(output_directory, 'dollar_bars.parquet')\n",
    "\n",
    "    # Obter a lista de todos os arquivos Parquet no raw_dataset_path\n",
    "    parquet_files = [os.path.join(raw_dataset_path, f) for f in os.listdir(raw_dataset_path) if f.endswith('.parquet')]\n",
    "    parquet_files.sort()\n",
    "    \n",
    "    if not parquet_files:\n",
    "        logger.warning(f\"Nenhum arquivo .parquet encontrado em '{raw_dataset_path}'.\")\n",
    "        return []\n",
    "    \n",
    "    logger.info(f\"Total de arquivos .parquet a serem processados: {len(parquet_files)}\")\n",
    "    \n",
    "    # Carregar todos os arquivos .parquet usando Dask diretamente\n",
    "    try:\n",
    "        df_dask = dd.read_parquet(os.path.join(raw_dataset_path, '*.parquet'))\n",
    "        logger.info(\"Todos os arquivos .parquet foram carregados com sucesso.\")\n",
    "    except Exception as e:\n",
    "        logger.error(f\"Erro ao carregar arquivos .parquet: {e}\")\n",
    "        return []\n",
    "    \n",
    "    # Se maxRecords estiver definido, limitar o DataFrame\n",
    "    if maxRecords is not None:\n",
    "        df_dask = df_dask.head(maxRecords, compute=False)\n",
    "        logger.info(f\"Limite de registros definido para {maxRecords}.\")\n",
    "    \n",
    "    # Criar e salvar 'dollar_bars'\n",
    "    try:\n",
    "        dollar_bars = create_dollar_bars(df_dask, samplerAmount, dollar_bars_path)\n",
    "        logger.info(\"'dollar_bars' criado e salvo com sucesso.\")\n",
    "        return dollar_bars\n",
    "    except Exception as e:\n",
    "        logger.error(f\"Erro ao criar 'dollar_bars': {e}\")\n",
    "        return []\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Realiza Amostragem"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "serieBars = dataSampler(\n",
    "    barType=\"standard\",\n",
    "    samplerType=\"dollar\",\n",
    "    samplerAmount=10_000_000\n",
    ")\n",
    "\n",
    "sample_bars = serieBars.head()\n",
    "display(sample_bars)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}