id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,288,000 | pota.py | cwhelchel_hunterlog/src/pota/pota.py | import requests
import logging as L
import urllib.parse
from utils.callsigns import get_basecall
from version import __version__
logging = L.getLogger("potaApi")
SPOT_URL = "https://api.pota.app/spot/activator"
SPOT_COMMENTS_URL = "https://api.pota.app/spot/comments/{act}/{park}"
ACTIVATOR_URL = "https://api.pota.app/stats/user/{call}"
PARK_URL = "https://api.pota.app/park/{park}"
LOCATIONS_URL = "https://api.pota.app/programs/locations/"
POST_SPOT_URL = "https://api.pota.app/spot/"
class Api():
'''Class that calls the POTA endpoints and returns their results'''
def get_spots(self):
'''Return all current spots from POTA API'''
response = requests.get(SPOT_URL)
if response.status_code == 200:
json = response.json()
return json
def get_spot_comments(self, activator, park):
'''
Return all spot + comments from a given activation
:param str activator: Full call of activator including stroke pre and
suffixes. Will be URL encoded for the request.
:param str park: the park reference.
'''
quoted = urllib.parse.quote_plus(activator)
url = SPOT_COMMENTS_URL.format(act=quoted, park=park)
response = requests.get(url)
if response.status_code == 200:
json = response.json()
return json
def get_activator_stats(self, activator: str):
'''Return all spot + comments from a given activation'''
s = get_basecall(activator)
url = ACTIVATOR_URL.format(call=s)
response = requests.get(url)
if response.status_code == 200:
json = response.json()
return json
else:
return None
def get_park(self, park_ref: str):
url = PARK_URL.format(park=park_ref)
response = requests.get(url)
if response.status_code == 200:
json = response.json()
return json
@staticmethod
def get_locations():
'''
This file is quite large
'''
url = LOCATIONS_URL
response = requests.get(url)
if response.status_code == 200:
json = response.json()
return json
@staticmethod
def post_spot(activator_call: str, park_ref: str,
freq: str, mode: str,
spotter_call: str, spotter_comments: str):
'''
Posts a spot to the POTA spot endpoint. Adding or re-spotting a
activation.
'''
url = POST_SPOT_URL
headers = {
"accept": "application/json, text/plain, */*",
"accept-encoding": "gzip, deflate, br, zstd",
"Content-Type": "application/json",
"origin": "https://pota.app",
"referer": "https://pota.app/",
'user-agent': f"hunterlog/{__version__}"
}
json_data = {
'activator': activator_call,
'spotter': spotter_call,
'frequency': freq,
'reference': park_ref,
'mode': mode,
'source': 'hunterlog',
'comments': spotter_comments
}
r = requests.post(url=url, json=json_data, headers=headers)
logging.debug(f"code: {r.status_code} : {r.reason}")
| 3,283 | Python | .py | 87 | 28.793103 | 77 | 0.59038 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,001 | stats.py | cwhelchel_hunterlog/src/pota/stats.py | import csv
import os
from dataclasses import dataclass
@dataclass
class LocationStat:
hunts: int
activations: int
class PotaStats:
'''
This class exposes some POTA statistics calculated from the user's hunter
and activator csv files.
'''
def __init__(self, hunt_file: str, act_file: str = '') -> None:
self.activated_parks = []
self.hunted_parks = []
self.loc_stats: dict[str, LocationStat] = {}
self.hunted_park_stats: dict[str, int] = {}
self._get_activations_csv(act_file)
self._get_hunts_csv(hunt_file)
def has_hunted(self, ref: str) -> bool:
'''Returns true if the user has hunted the given POTA reference'''
return ref in self.hunted_parks
def has_activated(self, ref: str) -> bool:
'''Returns true if the user has activated the given POTA reference'''
return ref in self.activated_parks
def get_hunt_count(self, location: str) -> int:
'''Returns number of hunted references in a given location'''
if location in self.loc_stats:
return self.loc_stats[location].hunts
else:
return 0
def get_park_hunt_count(self, park: str) -> int:
'''Returns number of hunter QSOs for a given park'''
if park in self.hunted_park_stats:
return self.hunted_park_stats[park]
else:
return 0
def get_actx_count(self, location: str) -> int:
'''Returns number of activated references in a given location'''
if location in self.loc_stats:
return self.loc_stats[location].activations
else:
return 0
def get_all_hunts(self) -> list[str]:
'''Returns a list of all the hunted parks'''
return self.hunted_parks
def _get_activations_csv(self, act_file: str):
'''
Read activations downloaded from EXPORT CSV on Users POTA My Stats page
see https://pota.app/#/user/stats
'''
file_n = act_file # "activator_parks.csv"
if not os.path.exists(file_n):
return
with open(file_n, encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
skip_headers = True
for row in csv_reader:
if skip_headers:
skip_headers = False
continue
else:
location = row["HASC"]
self._inc_activations(location)
self.activated_parks.append(row['Reference'])
def _get_hunts_csv(self, hunt_file: str):
'''
Read hunted parks downloaded from EXPORT CSV button on the POTA User's
My Stats page.
- see https://pota.app/#/user/stats
'''
file_n = hunt_file # "hunter_parks.csv"
if not os.path.exists(file_n):
return
with open(file_n, encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
skip_headers = True
for row in csv_reader:
if skip_headers:
skip_headers = False
continue
else:
park = row['Reference']
location = row["HASC"]
self._inc_hunts(location)
self.hunted_parks.append(park)
self.hunted_park_stats[park] = row['QSOs']
def _inc_hunts(self, location: str):
if location in self.loc_stats:
self.loc_stats[location].hunts += 1
else:
self.loc_stats[location] = LocationStat(1, 0)
def _inc_activations(self, location: str):
if location in self.loc_stats:
self.loc_stats[location].activations += 1
else:
self.loc_stats[location] = LocationStat(0, 1)
| 3,876 | Python | .py | 97 | 29.391753 | 79 | 0.575951 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,002 | __init__.py | cwhelchel_hunterlog/src/alembic_src/__init__.py | # import logging
# log = logging.basicConfig(filename='alembic.log',
# encoding='utf-8',
# format='%(asctime)s %(message)s',
# level=logging.DEBUG) | 208 | Python | .py | 5 | 40.6 | 55 | 0.507389 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,003 | script.py.mako | cwhelchel_hunterlog/src/alembic_src/script.py.mako | """${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision: str = ${repr(up_revision)}
down_revision: Union[str, None] = ${repr(down_revision)}
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
def upgrade() -> None:
${upgrades if upgrades else "pass"}
def downgrade() -> None:
${downgrades if downgrades else "pass"}
| 635 | Python | .py | 18 | 33.388889 | 71 | 0.720854 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,004 | env.py | cwhelchel_hunterlog/src/alembic_src/env.py | # from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
# if config.config_file_name is not None:
# fileConfig(config.config_file_name,
# disable_existing_loggers=False) # cmw added for module level use
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 2,193 | Python | .py | 59 | 32.491525 | 82 | 0.71334 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,005 | d087ce5d50a6_add_loc_hunts_col.py | cwhelchel_hunterlog/src/alembic_src/versions/d087ce5d50a6_add_loc_hunts_col.py | """add loc_hunts col
Revision ID: d087ce5d50a6
Revises: 6f1777640ea8
Create Date: 2024-03-05 21:46:31.654161
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = 'd087ce5d50a6'
down_revision: Union[str, None] = '6f1777640ea8'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("spots", sa.Column("loc_hunts", sa.Integer, nullable=True))
def downgrade() -> None:
op.drop_column("spots", "loc_hunts")
| 597 | Python | .py | 17 | 33.117647 | 77 | 0.747811 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,006 | 922a7b854b71_fix_spot_source.py | cwhelchel_hunterlog/src/alembic_src/versions/922a7b854b71_fix_spot_source.py | """fix spot source
Revision ID: 922a7b854b71
Revises: dfc792b4b40b
Create Date: 2024-03-25 21:44:36.970584
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = '922a7b854b71'
down_revision: Union[str, None] = 'dfc792b4b40b'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# cant use alter column bc SQLITE doesn't support modifying columns
op.drop_column("comments", "source")
op.add_column("comments", sa.Column("source", sa.String, nullable=True))
def downgrade() -> None:
op.drop_column("comments", "source")
op.add_column("comments", sa.Column("source", sa.String(10), nullable=True))
| 788 | Python | .py | 20 | 36.95 | 80 | 0.741765 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,007 | 6f1777640ea8_add_location_stats.py | cwhelchel_hunterlog/src/alembic_src/versions/6f1777640ea8_add_location_stats.py | """add location stats
Revision ID: 6f1777640ea8
Revises: de225609f3b5
Create Date: 2024-03-04 15:36:28.763615
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = '6f1777640ea8'
down_revision: Union[str, None] = 'de225609f3b5'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.create_table(
"locations",
# locations.json lowest lvl obj
sa.Column('locationId', sa.Integer, primary_key=True),
sa.Column('descriptor', sa.String),
sa.Column('name', sa.String),
sa.Column('latitude', sa.Float),
sa.Column('longitude', sa.Float),
sa.Column('parks', sa.Integer),
# ancestor data
sa.Column('entityId', sa.Integer),
sa.Column('programId', sa.Integer),
)
def downgrade() -> None:
op.drop_table("locations")
| 977 | Python | .py | 29 | 28.931034 | 62 | 0.678381 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,008 | ccdd7352ed32_add_rig_interface_type.py | cwhelchel_hunterlog/src/alembic_src/versions/ccdd7352ed32_add_rig_interface_type.py | """add rig interface type
Revision ID: ccdd7352ed32
Revises: 44ec51a942f9
Create Date: 2024-04-16 08:57:36.695892
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = 'ccdd7352ed32'
down_revision: Union[str, None] = '44ec51a942f9'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# currently accepted values are "flrig" and "rigctld"
op.add_column("config", sa.Column("rig_if_type", sa.String, server_default="flrig"))
def downgrade() -> None:
op.drop_column("config", "rig_if_type")
| 674 | Python | .py | 18 | 35.277778 | 88 | 0.744977 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,009 | 53ee59dfaf8f_more_spot_data.py | cwhelchel_hunterlog/src/alembic_src/versions/53ee59dfaf8f_more_spot_data.py | """more spot data
Revision ID: 53ee59dfaf8f
Revises: 5daac9aa5d91
Create Date: 2024-03-13 19:15:22.059262
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = '53ee59dfaf8f'
down_revision: Union[str, None] = '5daac9aa5d91'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("spots", sa.Column("act_cmts", sa.String, nullable=True))
op.add_column("spots", sa.Column("cw_wpm", sa.Integer, nullable=True))
def downgrade() -> None:
op.drop_column("spots", "act_cmts")
op.drop_column("spots", "cw_wpm")
| 704 | Python | .py | 19 | 34.736842 | 75 | 0.730769 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,010 | 5daac9aa5d91_add_loc_total_col.py | cwhelchel_hunterlog/src/alembic_src/versions/5daac9aa5d91_add_loc_total_col.py | """add loc total col
Revision ID: 5daac9aa5d91
Revises: d087ce5d50a6
Create Date: 2024-03-06 15:16:32.199996
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = '5daac9aa5d91'
down_revision: Union[str, None] = 'd087ce5d50a6'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("spots", sa.Column("loc_total", sa.Integer, nullable=True))
def downgrade() -> None:
op.drop_column("spots", "loc_total")
| 601 | Python | .py | 17 | 33.117647 | 77 | 0.742609 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,011 | dfc792b4b40b_add_logger_config.py | cwhelchel_hunterlog/src/alembic_src/versions/dfc792b4b40b_add_logger_config.py | """add logger config
Revision ID: dfc792b4b40b
Revises: 53ee59dfaf8f
Create Date: 2024-03-16 18:35:12.835407
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = 'dfc792b4b40b'
down_revision: Union[str, None] = '53ee59dfaf8f'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("config", sa.Column("logger_type", sa.Integer, nullable=True))
def downgrade() -> None:
op.drop_column("config", "logger_type")
| 603 | Python | .py | 17 | 33.470588 | 80 | 0.752166 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,012 | __init__.py | cwhelchel_hunterlog/src/alembic_src/versions/__init__.py | '''copied from https://stackoverflow.com/a/74875605'''
"""DB migrations"""
from pathlib import Path
import os
import sys
from alembic.config import Config
from alembic import command
from alembic import config
def get_app_global_path():
'''Returns the correct location for a bundled pyinstaller executable file'''
if getattr(sys, 'frozen', False) and getattr(sys, '_MEIPASS', False):
return sys._MEIPASS
elif __file__:
# were running from source (npm run start) and this file is in
# src/alembic_src/versions we need to back up a little so the code
# below works
return os.path.dirname(__file__) + "../../../"
# we dont want to worry about alembic.ini when running programmatically. so we
# just create our config the hard way. but with the correct frozen dir
alembic_cfg = Config()
src_path = Path(get_app_global_path(), "alembic_src")
alembic_cfg.set_main_option("script_location", str(src_path))
alembic_cfg.set_main_option("sqlalchemy.url", "sqlite:///spots.db")
def current(verbose=False):
command.current(alembic_cfg, verbose=verbose)
def upgrade(revision="head"):
command.upgrade(alembic_cfg, revision)
def downgrade(revision):
command.downgrade(alembic_cfg, revision)
def ensure_versions():
command.ensure_version(alembic_cfg) | 1,321 | Python | .py | 31 | 38.83871 | 80 | 0.730769 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,013 | 3c58d5fd6e23_add_win_size_cfg.py | cwhelchel_hunterlog/src/alembic_src/versions/3c58d5fd6e23_add_win_size_cfg.py | """add win size cfg
Revision ID: 3c58d5fd6e23
Revises: ac1e18c1148f
Create Date: 2024-04-10 13:25:32.699482
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = '3c58d5fd6e23'
down_revision: Union[str, None] = 'ac1e18c1148f'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("config", sa.Column("size_x", sa.Integer, server_default=str(800)))
op.add_column("config", sa.Column("size_y", sa.Integer, server_default=str(600)))
op.add_column("config", sa.Column("is_max", sa.Boolean, server_default=str(0)))
def downgrade() -> None:
op.drop_column("config", "size_x")
op.drop_column("config", "size_y")
op.drop_column("config", "is_max")
| 851 | Python | .py | 21 | 37.904762 | 85 | 0.718293 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,014 | ac1e18c1148f_add_bearing.py | cwhelchel_hunterlog/src/alembic_src/versions/ac1e18c1148f_add_bearing.py | """add bearing
Revision ID: ac1e18c1148f
Revises: 922a7b854b71
Create Date: 2024-04-10 11:10:44.670763
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = 'ac1e18c1148f'
down_revision: Union[str, None] = '922a7b854b71'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("qsos", sa.Column("bearing", sa.Float, nullable=True))
def downgrade() -> None:
op.drop_column("qsos", "bearing")
| 583 | Python | .py | 17 | 32.294118 | 72 | 0.748654 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,015 | f01009b22b92_add_win_pos_cfg.py | cwhelchel_hunterlog/src/alembic_src/versions/f01009b22b92_add_win_pos_cfg.py | """add_win_pos_cfg
Revision ID: f01009b22b92
Revises: ccdd7352ed32
Create Date: 2024-04-29 19:53:08.433752
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = 'f01009b22b92'
down_revision: Union[str, None] = 'ccdd7352ed32'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("config", sa.Column("pos_x", sa.Integer))
op.add_column("config", sa.Column("pos_y", sa.Integer))
def downgrade() -> None:
op.drop_column("config", "pos_x")
op.drop_column("config", "pos_y")
| 672 | Python | .py | 19 | 33.052632 | 59 | 0.725155 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,016 | de225609f3b5_.py | cwhelchel_hunterlog/src/alembic_src/versions/de225609f3b5_.py | """empty message
Revision ID: de225609f3b5
Revises:
Create Date: 2024-03-02 18:47:24.752899
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = 'de225609f3b5'
down_revision: Union[str, None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column('spots', sa.Column('is_qrt', sa.Boolean))
pass
def downgrade() -> None:
op.drop_column('spots', sa.Column('is_qrt', sa.Boolean))
pass
| 591 | Python | .py | 19 | 28.736842 | 60 | 0.731794 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,017 | db9920460979_add_rig_mode_opts.py | cwhelchel_hunterlog/src/alembic_src/versions/db9920460979_add_rig_mode_opts.py | """add rig mode opts
Revision ID: db9920460979
Revises: 3c58d5fd6e23
Create Date: 2024-04-11 12:45:55.107737
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = 'db9920460979'
down_revision: Union[str, None] = '3c58d5fd6e23'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("config", sa.Column("cw_mode", sa.String, server_default="CW"))
op.add_column("config", sa.Column("ftx_mode", sa.String, server_default="USB"))
def downgrade() -> None:
op.drop_column("config", "cw_mode")
op.drop_column("config", "ftx_mode") | 724 | Python | .py | 19 | 35.842105 | 83 | 0.728838 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,018 | 44ec51a942f9_add_config_qthmsg.py | cwhelchel_hunterlog/src/alembic_src/versions/44ec51a942f9_add_config_qthmsg.py | """add config qthmsg
Revision ID: 44ec51a942f9
Revises: db9920460979
Create Date: 2024-04-15 14:11:31.707574
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = '44ec51a942f9'
down_revision: Union[str, None] = 'db9920460979'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("config", sa.Column("qth_string", sa.String, nullable=True))
def downgrade() -> None:
op.drop_column("config", "qth_string")
| 600 | Python | .py | 17 | 33.294118 | 78 | 0.750871 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,019 | you2text.py | jhnwnstd_linguist_toolkit/you2text.py | import yt_dlp
from pathlib import Path
from typing import Optional
import re
import unicodedata
# Pre-compiled regex patterns for filename sanitization
ILLEGAL_CHAR_PATTERN = re.compile(r'[^\w\s-]')
SPACE_PATTERN = re.compile(r'\s+')
YOUTUBE_URL_REGEX = re.compile(
r'^(https?://)?(www\.)?'
r'(youtube\.com/watch\?v=|youtube\.[a-z]{2,3}/watch\?v=|youtu\.be/)'
r'[^&\s]+$',
re.IGNORECASE
)
def is_valid_youtube_url(url: str) -> bool:
"""Validate if the provided URL is a valid YouTube video URL."""
return bool(YOUTUBE_URL_REGEX.match(url))
def sanitize_filename(title: str, max_length=255) -> str:
"""Sanitize a string to create a safe and clean filename."""
sanitized_title = unicodedata.normalize('NFKD', title)
sanitized_title = ILLEGAL_CHAR_PATTERN.sub('', sanitized_title)
sanitized_title = SPACE_PATTERN.sub('_', sanitized_title).strip(".")
sanitized_title = sanitized_title.encode('utf-8')[:max_length].decode('utf-8', 'ignore').rstrip('_')
return unicodedata.normalize('NFC', sanitized_title)
def download_subtitles(
video_url: str,
output_dir: Path,
cookies_file: Optional[Path] = None,
lang: str = 'en',
fmt: str = 'vtt',
verbose: bool = False
) -> tuple:
"""
Downloads subtitles for a YouTube video using yt-dlp.
Tries to download manual subtitles; if not available, falls back to auto-generated subtitles.
Args:
video_url (str): The URL of the YouTube video.
output_dir (Path): Directory where the subtitle file will be saved.
cookies_file (Optional[Path]): Path to the cookies.txt file for authentication.
lang (str): Subtitle language code (default: 'en').
fmt (str): Subtitle format ('vtt' or 'srt').
verbose (bool): Enable verbose output.
Returns:
tuple: (success (bool), message (str))
"""
if not is_valid_youtube_url(video_url):
return False, f"Invalid URL: {video_url}"
output_dir.mkdir(parents=True, exist_ok=True)
ydl_opts = {
'skip_download': True,
'writesubtitles': True,
'writeautomaticsub': True,
'subtitlesformat': fmt,
'subtitleslangs': [lang],
'outtmpl': str(output_dir / '%(title)s.%(ext)s'),
'quiet': not verbose,
'no_warnings': not verbose,
'cookiefile': str(cookies_file) if cookies_file else None,
}
try:
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(video_url, download=False)
title = sanitize_filename(info_dict.get('title', 'video'))
subtitle_file = output_dir / f"{title}.{fmt}"
subtitles = info_dict.get('subtitles', {})
auto_captions = info_dict.get('automatic_captions', {})
available_langs = set(subtitles.keys()) | set(auto_captions.keys())
if lang in subtitles:
if verbose:
print(f"Manual subtitles found for language '{lang}', downloading...")
elif lang in auto_captions:
if verbose:
print(f"Auto-generated subtitles found for language '{lang}', downloading...")
else:
return False, f"No subtitles found for language '{lang}'."
ydl.download([video_url])
if verbose:
print(f"Subtitles downloaded to {subtitle_file.resolve()}")
return True, f"Downloaded subtitles for: {title}"
except yt_dlp.utils.DownloadError as e:
return False, f"Error downloading subtitles for {video_url}: {e}"
def download_subtitles_from_file(
file_path: str,
output_dir: Path,
cookies_file: Optional[Path] = None,
lang: str = 'en',
fmt: str = 'vtt',
verbose: bool = False
):
file_path = Path(file_path)
if not file_path.exists():
print(f"'{file_path.name}' not found. Creating the file for you to add video URLs.")
file_path.touch()
return
urls = file_path.read_text().splitlines()
if not urls:
print(f"'{file_path.name}' is empty. Please add some video URLs to it.")
return
valid_urls = [url.strip() for url in urls if is_valid_youtube_url(url.strip())]
if not valid_urls:
print(f"No valid video URLs found in '{file_path.name}'.")
return
print(f"Starting subtitle downloads for {len(valid_urls)} valid video URL(s)...")
for index, url in enumerate(valid_urls, start=1):
success, message = download_subtitles(
url,
output_dir,
cookies_file=cookies_file,
lang=lang,
fmt=fmt,
verbose=verbose
)
status = "Success" if success else "Failed"
print(f"{index}. [{status}] {message}")
print("Subtitle download session completed.")
def get_cookies_file():
"""Prompt the user to provide the path to the cookies.txt file or use the default in the working directory."""
default_cookies_path = Path.cwd() / "cookies.txt"
if default_cookies_path.exists():
use_default = input(f"Found 'cookies.txt' in the current directory. Use it? (y/n): ").strip().lower()
if use_default == 'y':
return default_cookies_path
else:
print("No 'cookies.txt' found in the current directory.")
cookies_path = input("Enter the path to your 'cookies.txt' file (or press Enter to skip): ").strip()
if cookies_path:
cookies_file = Path(cookies_path)
if cookies_file.exists():
return cookies_file
else:
print(f"Cookies file '{cookies_file}' not found.")
else:
print("Proceeding without cookies. Note: Some subtitles may not download without authentication.")
return None
def run_ui():
"""User Interface function to handle user input and execute corresponding actions."""
print("\nWelcome to the YouTube Subtitle Downloader")
while True:
print("\nSelect an option:")
print("(1) Download subtitles for a YouTube video")
print("(2) Download subtitles for videos from urls.txt")
print("(3) Quit")
user_input = input("Enter 1, 2, or 3: ").strip().lower()
if user_input == '3' or user_input == 'quit':
print("Exiting the program. Goodbye!")
break
elif user_input == '2':
file_path = Path.cwd() / "urls.txt"
cookies_file = get_cookies_file()
lang = input("Enter subtitle language code (e.g., 'en' for English): ").strip() or 'en'
fmt = input("Enter subtitle format ('vtt' or 'srt'): ").strip().lower() or 'vtt'
output_dir = Path(input("Enter output directory (default: 'Subtitles'): ").strip() or 'Subtitles')
download_subtitles_from_file(
str(file_path),
output_dir,
cookies_file=cookies_file,
lang=lang,
fmt=fmt
)
elif user_input == '1':
video_url = input("Enter the YouTube video URL: ").strip()
if is_valid_youtube_url(video_url):
cookies_file = get_cookies_file()
lang = input("Enter subtitle language code (e.g., 'en' for English): ").strip() or 'en'
fmt = input("Enter subtitle format ('vtt' or 'srt'): ").strip().lower() or 'vtt'
output_dir = Path(input("Enter output directory (default: 'Subtitles'): ").strip() or 'Subtitles')
success, message = download_subtitles(
video_url,
output_dir,
cookies_file=cookies_file,
lang=lang,
fmt=fmt
)
print(message if success else f"Failed to download subtitles: {message}")
else:
print("Invalid URL. Please enter a valid YouTube video URL.")
else:
print("Invalid option. Please enter 1, 2, or 3.")
if __name__ == "__main__":
run_ui()
| 8,016 | Python | .py | 180 | 35.522222 | 114 | 0.603585 | jhnwnstd/linguist_toolkit | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,020 | audio_converter.py | jhnwnstd_linguist_toolkit/audio_converter.py | import yt_dlp
from pathlib import Path
import unicodedata
import re
# Configuration options
VERBOSE = False # Toggle True for verbose output
# Pre-compiled regex patterns for filename sanitization
ILLEGAL_CHAR_PATTERN = re.compile(r'[^\w\s-]')
SPACE_PATTERN = re.compile(r'\s+')
YOUTUBE_URL_REGEX = re.compile(
r'^(https?://)?(www\.)?(youtube\.com/watch\?v=|youtube\.[a-z]{2,3}/watch\?v=|youtu\.be/)[^&\s]+$',
re.IGNORECASE
)
def is_valid_youtube_url(url: str) -> bool:
"""Validate if the provided URL is a valid YouTube video URL."""
return bool(YOUTUBE_URL_REGEX.match(url))
def sanitize_filename(title: str, max_length=255) -> str:
"""Sanitize a string to create a safe and clean filename, considering maximum length."""
sanitized_title = unicodedata.normalize('NFKD', title)
sanitized_title = ILLEGAL_CHAR_PATTERN.sub('', sanitized_title)
sanitized_title = SPACE_PATTERN.sub('_', sanitized_title).strip(".")
sanitized_title = sanitized_title.encode('utf-8')[:max_length].decode('utf-8', 'ignore').rstrip('_')
return unicodedata.normalize('NFC', sanitized_title)
def download_and_process_video(video_url: str, folder_name: str = "Downloaded_Videos_Audio", verbose=False, cookies_file=None):
"""
Download a YouTube video using yt-dlp, merge streams if necessary,
and extract audio to WAV format.
"""
if not is_valid_youtube_url(video_url):
return False, f"Invalid URL: {video_url}"
download_path = Path(folder_name)
download_path.mkdir(parents=True, exist_ok=True)
# Prepare yt-dlp options
ydl_opts = {
'format': 'bestvideo+bestaudio/best',
'outtmpl': str(download_path / '%(title)s.%(ext)s'),
'quiet': not verbose,
'no_warnings': not verbose,
'cookiefile': str(cookies_file) if cookies_file else None,
'merge_output_format': 'mp4',
'postprocessors': [
{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'wav',
'preferredquality': '192',
}
],
}
try:
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(video_url, download=True)
title = sanitize_filename(info_dict.get('title', 'video'))
video_file = download_path / f"{title}.mp4"
audio_file = download_path / f"{title}.wav"
if verbose:
print(f"Video saved to: '{video_file}'")
print(f"Audio extracted to WAV format: '{audio_file}'")
return True, f"Downloaded and processed: {title}"
except yt_dlp.utils.DownloadError as e:
return False, f"Error downloading {video_url}: {e}"
def download_videos_from_file(file_path: str, folder_name: str = "Downloaded_Videos_Audio", verbose=False, cookies_file=None):
file_path = Path(file_path)
if not file_path.exists():
print(f"'{file_path.name}' not found. Creating the file for you to add video URLs.")
file_path.touch()
return
urls = file_path.read_text().splitlines()
if not urls:
print(f"'{file_path.name}' is empty. Please add some video URLs to it.")
return
valid_urls = [url.strip() for url in urls if is_valid_youtube_url(url.strip())]
if not valid_urls:
print(f"No valid video URLs found in '{file_path.name}'.")
return
print(f"Starting downloads for {len(valid_urls)} valid video URL(s)...")
for index, url in enumerate(valid_urls, start=1):
success, message = download_and_process_video(
url, folder_name, verbose=verbose, cookies_file=cookies_file
)
status = "Success" if success else "Failed"
print(f"{index}. [{status}] {message}")
print("Download session completed.")
def get_cookies_file():
"""Prompt the user to provide the path to the cookies.txt file or use the default in the working directory."""
default_cookies_path = Path.cwd() / "cookies.txt"
if default_cookies_path.exists():
use_default = input(f"Found 'cookies.txt' in the current directory. Use it? (y/n): ").strip().lower()
if use_default == 'y':
return default_cookies_path
else:
print("No 'cookies.txt' found in the current directory.")
cookies_path = input("Enter the path to your 'cookies.txt' file (or press Enter to skip): ").strip()
if cookies_path:
cookies_file = Path(cookies_path)
if cookies_file.exists():
return cookies_file
else:
print(f"Cookies file '{cookies_file}' not found.")
else:
print("Proceeding without cookies. Note: Some videos may not download without authentication.")
return None
def run_ui():
"""User Interface function to handle user input and execute corresponding actions."""
print("\nWelcome to the You2Wav Downloader")
while True:
print("\nSelect an option:")
print("(1) Download a YouTube video")
print("(2) Download videos from urls.txt")
print("(3) Quit")
user_input = input("Enter 1, 2, or 3: ").strip().lower()
if user_input == '3' or user_input == 'quit':
print("Exiting the program. Goodbye!")
break
elif user_input == '2':
file_path = Path.cwd() / "urls.txt"
cookies_file = get_cookies_file()
download_videos_from_file(str(file_path), "Downloaded_Videos_Audio", cookies_file=cookies_file)
elif user_input == '1':
video_url = input("Enter the YouTube video URL: ").strip()
if is_valid_youtube_url(video_url):
cookies_file = get_cookies_file()
success, message = download_and_process_video(
video_url, "Downloaded_Videos_Audio", cookies_file=cookies_file
)
print(message if success else f"Failed to download: {message}")
else:
print("Invalid URL. Please enter a valid YouTube video URL.")
else:
print("Invalid option. Please enter 1, 2, or 3.")
if __name__ == "__main__":
run_ui() | 6,138 | Python | .py | 131 | 38.755725 | 127 | 0.628739 | jhnwnstd/linguist_toolkit | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,021 | image_to_text.py | jhnwnstd_linguist_toolkit/image_to_text.py | import logging
import multiprocessing
import re
import subprocess
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
import cv2
import numpy as np
import pytesseract
from PIL import Image
from tqdm import tqdm
# Specify the Tesseract command path if it's not in the system's PATH
# pytesseract.pytesseract.tesseract_cmd = r'<path_to_your_tesseract_executable>'
def check_tesseract_installed(min_version=5):
"""Check if Tesseract is installed and meets the minimum version requirement."""
try:
# Run the Tesseract command to get its version
result = subprocess.run(["tesseract", "--version"], capture_output=True, text=True, check=True)
# Extract the version number from the output
match = re.search(r'tesseract (\d+)', result.stdout)
if match:
version = int(match.group(1))
if version >= min_version:
print(f"Tesseract version {version} is installed.")
return True
else:
print(f"Tesseract version {version} is installed, but version {min_version} or higher is required.")
return False
else:
print("Failed to parse Tesseract version.")
return False
except subprocess.CalledProcessError:
print("Tesseract is not installed or not found in PATH.")
return False
except FileNotFoundError:
print("Tesseract command is not found. Ensure Tesseract is installed and added to your system's PATH.")
return False
def is_image_file(filename: str) -> bool:
"""Check if a file is an image based on its extension."""
return re.search(r'\.(jpe?g|png|gif|bmp|tiff?)$', filename, re.IGNORECASE) is not None
def preprocess_image(image_path: Path) -> Image.Image:
"""Preprocess the image for improved OCR accuracy."""
image_cv = cv2.imread(str(image_path))
gray = cv2.cvtColor(image_cv, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
_, binarized = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
kernel = np.ones((1, 1), np.uint8)
img_dilated = cv2.dilate(binarized, kernel, iterations=1)
img_eroded = cv2.erode(img_dilated, kernel, iterations=1)
scale_percent = 150 # percent of original size
width = int(img_eroded.shape[1] * scale_percent / 100)
height = int(img_eroded.shape[0] * scale_percent / 100)
resized = cv2.resize(img_eroded, (width, height), interpolation=cv2.INTER_AREA)
return Image.fromarray(resized)
def extract_text(image_path: Path, output_file: Path, tesseract_config: str = ''):
"""Extract text from a single image and append it to the output file."""
try:
image = preprocess_image(image_path)
text = pytesseract.image_to_string(image, config=tesseract_config)
with output_file.open("a", encoding="utf-8") as file_out:
file_out.write(f"--- {image_path.name} ---\n{text}\n\n")
print(f"Processed: {image_path.name}")
except Exception as e:
print(f"Failed to process {image_path.name}: {e}")
def extract_text_from_images(directory: str, tesseract_config: str = '', output_dir: str = None):
"""
Extract text from images in the specified directory and save the extracted text to a file.
"""
if output_dir is None:
output_dir = Path(directory) / "extracted_texts"
else:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
output_file = output_dir / "extracted_text.txt"
output_file.write_text("", encoding="utf-8") # Clear the output file at the start
image_paths = [file for file in Path(directory).glob('*') if is_image_file(file.name)]
executor_workers = min(32, max(4, multiprocessing.cpu_count() + 4))
with ThreadPoolExecutor(max_workers=executor_workers) as executor:
future_to_image = {executor.submit(extract_text, image_path, output_file, tesseract_config): image_path for image_path in image_paths}
# Wrap the as_completed iterator with tqdm for progress visualization
for future in tqdm(as_completed(future_to_image), total=len(future_to_image), desc="Processing Images"):
image_path = future_to_image[future]
try:
future.result()
logging.info(f"Processed: {image_path}")
except Exception as e:
logging.error(f"Failed to process {image_path}: {e}")
if __name__ == "__main__":
if not check_tesseract_installed():
print("Exiting due to Tesseract requirements not being met.")
else:
directory_path = input("Enter the directory path to scan for images: ")
# Set the default Tesseract configuration to use English and LSTM engine
default_tesseract_config = "-l eng --oem 1"
user_input = input(f"Enter Tesseract configuration options (default is '{default_tesseract_config}'). Press Enter to use default or specify new options: ")
tesseract_config = user_input.strip() if user_input.strip() else default_tesseract_config
extract_text_from_images(directory_path, tesseract_config)
print("Text extraction completed.")
| 5,342 | Python | .py | 98 | 46.040816 | 164 | 0.668656 | jhnwnstd/linguist_toolkit | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,022 | you2wav.py | jhnwnstd_linguist_toolkit/you2wav.py | import yt_dlp
from pathlib import Path
import unicodedata
import re
import logging
from typing import Optional, List, Tuple
from concurrent.futures import ThreadPoolExecutor, as_completed
# Configuration options
VERBOSE = False # Toggle True for verbose output
MAX_WORKERS = 8 # Number of threads for concurrent downloads
# Set up logging
logging.basicConfig(
level=logging.DEBUG if VERBOSE else logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Pre-compiled regex patterns for filename sanitization
ILLEGAL_CHAR_PATTERN = re.compile(r'[^\w\s-]')
SPACE_PATTERN = re.compile(r'\s+')
YOUTUBE_URL_REGEX = re.compile(
r'^(https?://)?(www\.)?'
r'(youtube\.com/watch\?v=|youtube\.[a-z]{2,3}/watch\?v=|youtu\.be/)'
r'[^&\s]+$',
re.IGNORECASE
)
class YouTubeDownloader:
def __init__(self,
download_folder: str = "Downloaded_Videos",
verbose: bool = False,
max_workers: int = MAX_WORKERS):
"""
Initializes the YouTubeDownloader with specified settings.
Args:
download_folder (str): Directory where videos will be downloaded.
verbose (bool): Enable verbose logging.
max_workers (int): Number of threads for concurrent downloads.
"""
self.download_path = Path(download_folder)
self.download_path.mkdir(parents=True, exist_ok=True)
self.verbose = verbose
self.max_workers = max_workers
# Update logger level based on verbosity
if self.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
@staticmethod
def is_valid_youtube_url(url: str) -> bool:
"""Validate if the provided URL is a valid YouTube video URL."""
return bool(YOUTUBE_URL_REGEX.match(url.strip()))
@staticmethod
def sanitize_filename(title: str, max_length: int = 255) -> str:
"""
Sanitize a string to create a safe and clean filename.
Args:
title (str): Original title of the video.
max_length (int): Maximum allowed length for the filename.
Returns:
str: Sanitized filename.
"""
sanitized_title = unicodedata.normalize('NFKD', title)
sanitized_title = ILLEGAL_CHAR_PATTERN.sub('', sanitized_title)
sanitized_title = SPACE_PATTERN.sub('_', sanitized_title).strip(".")
sanitized_title = sanitized_title.encode('utf-8')[:max_length].decode('utf-8', 'ignore').rstrip('_')
return unicodedata.normalize('NFC', sanitized_title)
def get_cookies_file(self) -> Optional[Path]:
"""
Prompt the user to provide the path to the cookies.txt file or use the default in the working directory.
Returns:
Optional[Path]: Path to the cookies.txt file or None.
"""
default_cookies_path = Path.cwd() / "cookies.txt"
if default_cookies_path.exists():
use_default = input(f"Found 'cookies.txt' in the current directory. Use it? (y/n): ").strip().lower()
if use_default == 'y':
logger.debug(f"Using default cookies file: {default_cookies_path}")
return default_cookies_path
else:
logger.debug("No 'cookies.txt' found in the current directory.")
cookies_path_input = input("Enter the path to your 'cookies.txt' file (or press Enter to skip): ").strip()
if cookies_path_input:
cookies_file = Path(cookies_path_input)
if cookies_file.exists():
logger.debug(f"Using provided cookies file: {cookies_file}")
return cookies_file
else:
logger.warning(f"Cookies file '{cookies_file}' not found.")
else:
logger.info("Proceeding without cookies. Some videos may require authentication.")
return None
def download_video(self, video_url: str, cookies_file: Optional[Path] = None) -> Tuple[bool, str]:
"""
Download a single YouTube video using yt-dlp, ensuring the output is an MP4 file.
Args:
video_url (str): URL of the YouTube video.
cookies_file (Optional[Path]): Path to the cookies.txt file for authentication.
Returns:
Tuple[bool, str]: Success status and message.
"""
if not self.is_valid_youtube_url(video_url):
logger.error(f"Invalid URL: {video_url}")
return False, f"Invalid URL: {video_url}"
# Prepare yt-dlp options to ensure MP4 output
ydl_opts = {
'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best',
'outtmpl': str(self.download_path / '%(title)s.%(ext)s'),
'quiet': not self.verbose,
'no_warnings': not self.verbose,
'merge_output_format': 'mp4', # Ensure the final output is MP4
'cookiefile': str(cookies_file) if cookies_file else None,
'postprocessors': [{
'key': 'FFmpegVideoConvertor',
'preferedformat': 'mp4',
}],
}
try:
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
logger.debug(f"Starting download for: {video_url}")
info_dict = ydl.extract_info(video_url, download=True)
title = self.sanitize_filename(info_dict.get('title', 'video'))
ext = 'mp4' # Ensure extension is mp4
video_file = self.download_path / f"{title}.{ext}"
if video_file.exists():
logger.info(f"Successfully downloaded: '{video_file}'")
return True, f"Downloaded: {title}"
else:
logger.error(f"Download completed but file not found: '{video_file}'")
return False, f"Download completed but file not found: {title}"
except yt_dlp.utils.DownloadError as e:
logger.error(f"Error downloading {video_url}: {e}")
return False, f"Error downloading {video_url}: {e}"
def download_videos_from_file(self, file_path: str, cookies_file: Optional[Path] = None) -> None:
"""
Download multiple YouTube videos listed in a file concurrently.
Args:
file_path (str): Path to the file containing YouTube URLs.
cookies_file (Optional[Path]): Path to the cookies.txt file for authentication.
"""
file_path = Path(file_path)
if not file_path.exists():
logger.error(f"'{file_path.name}' not found. Creating the file for you to add video URLs.")
file_path.touch()
return
urls = [line.strip() for line in file_path.read_text().splitlines() if self.is_valid_youtube_url(line.strip())]
if not urls:
logger.warning(f"No valid video URLs found in '{file_path.name}'.")
return
logger.info(f"Starting downloads for {len(urls)} video(s)...")
# Use ThreadPoolExecutor for concurrent downloads
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
future_to_url = {executor.submit(self.download_video, url, cookies_file): url for url in urls}
for future in as_completed(future_to_url):
url = future_to_url[future]
try:
success, message = future.result()
status = "Success" if success else "Failed"
logger.info(f"[{status}] {message}")
except Exception as e:
logger.error(f"[Failed] Exception occurred while downloading {url}: {e}")
logger.info("Download session completed.")
def run_ui(self) -> None:
"""
User Interface function to handle user input and execute corresponding actions.
"""
logger.info("\nWelcome to the YouTube Video Downloader")
while True:
print("\nSelect an option:")
print("(1) Download a YouTube video")
print("(2) Download videos from urls.txt")
print("(3) Quit")
user_input = input("Enter 1, 2, or 3: ").strip().lower()
if user_input in ('3', 'quit'):
logger.info("Exiting the program. Goodbye!")
break
elif user_input == '2':
file_path = Path.cwd() / "urls.txt"
cookies_file = self.get_cookies_file()
self.download_videos_from_file(str(file_path), cookies_file=cookies_file)
elif user_input == '1':
video_url = input("Enter the YouTube video URL: ").strip()
if self.is_valid_youtube_url(video_url):
cookies_file = self.get_cookies_file()
success, message = self.download_video(
video_url, cookies_file=cookies_file
)
if success:
logger.info(message)
else:
logger.error(f"Failed to download: {message}")
else:
logger.error("Invalid URL. Please enter a valid YouTube video URL.")
else:
logger.warning("Invalid option. Please enter 1, 2, or 3.")
def main():
downloader = YouTubeDownloader(verbose=VERBOSE)
downloader.run_ui()
if __name__ == "__main__":
main() | 9,707 | Python | .py | 198 | 36.671717 | 120 | 0.581075 | jhnwnstd/linguist_toolkit | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,023 | subtitle_cleaner.py | jhnwnstd_linguist_toolkit/subtitle_cleaner.py | import re
from pathlib import Path
from typing import List
import nltk
def process_subtitle_file(input_file: Path) -> List[str]:
"""
Processes the subtitle file by removing redundancy, cleaning up text,
and tokenizing sentences using NLTK, while preserving punctuation.
Args:
input_file (Path): Path to the subtitle file.
Returns:
List[str]: A list of tokenized sentences.
"""
# Initialize variables
current_text = ''
previous_text = ''
# Regular expressions for cleaning
timestamp_regex = re.compile(r'<\d{2}:\d{2}:\d{2}\.\d{3}>')
tag_regex = re.compile(r'</?c>')
timecode_line_regex = re.compile(
r'\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}.*'
)
# Read and process the file line by line
with open(input_file, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line or line.startswith(('WEBVTT', 'Kind:', 'Language:')):
continue
if timecode_line_regex.match(line):
continue
if 'align:' in line or 'position:' in line:
continue
# Remove inline timestamps and tags
cleaned_line = timestamp_regex.sub('', line)
cleaned_line = tag_regex.sub('', cleaned_line)
cleaned_line = cleaned_line.strip()
if not cleaned_line:
continue
if cleaned_line != previous_text:
current_text += ' ' + cleaned_line
previous_text = cleaned_line
# Ensure punctuation is preserved
current_text = current_text.strip()
# Tokenize sentences using NLTK
nltk.download('punkt', quiet=True)
sentences = nltk.sent_tokenize(current_text)
return sentences
def main():
# Path to the Subtitles folder
subtitle_folder = Path('Subtitles')
# Path to the Cleaned Subtitles folder
cleaned_subtitles_folder = Path('Cleaned Subtitles')
cleaned_subtitles_folder.mkdir(parents=True, exist_ok=True) # Create folder if it doesn't exist
# Iterate over all .vtt files in the Subtitles folder
for subtitle_file in subtitle_folder.glob('*.vtt'):
print(f"Processing file: {subtitle_file}")
# Process the subtitle file
sentences = process_subtitle_file(subtitle_file)
# Generate a cleaned subtitle filename
cleaned_file = cleaned_subtitles_folder / subtitle_file.name
# Write the cleaned sentences to the new file
with open(cleaned_file, 'w', encoding='utf-8') as f:
for idx, sentence in enumerate(sentences, 1):
f.write(f"{idx}. {sentence}\n")
print(f"Saved cleaned subtitles to: {cleaned_file}")
if __name__ == '__main__':
main() | 2,792 | Python | .py | 67 | 33.462687 | 100 | 0.622872 | jhnwnstd/linguist_toolkit | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,024 | youtube_downloader.py | jhnwnstd_linguist_toolkit/youtube_downloader.py | import yt_dlp
from pathlib import Path
import unicodedata
import re
import logging
from typing import Optional, Tuple
from concurrent.futures import ThreadPoolExecutor, as_completed
# Configuration options
VERBOSE = False # Toggle to True for verbose output
MAX_WORKERS = 8 # Number of threads for concurrent downloads
# Set up logging
logging.basicConfig(
level=logging.DEBUG if VERBOSE else logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Pre-compiled regex patterns for filename sanitization
ILLEGAL_CHAR_PATTERN = re.compile(r'[^\w\s-]')
SPACE_PATTERN = re.compile(r'\s+')
YOUTUBE_URL_REGEX = re.compile(
r'^(https?://)?(www\.)?'
r'(youtube\.com/watch\?v=|youtube\.[a-z]{2,3}/watch\?v=|youtu\.be/)'
r'[^&\s]+$',
re.IGNORECASE
)
class YouTubeDownloader:
def __init__(self,
video_folder: str = "Downloaded_Videos",
audio_folder: str = "Downloaded_Audio",
verbose: bool = False,
max_workers: int = MAX_WORKERS):
"""
Initializes the YouTubeDownloader with specified settings.
Args:
video_folder (str): Directory where videos will be saved.
audio_folder (str): Directory where audio files will be saved.
verbose (bool): Enable verbose logging.
max_workers (int): Number of threads for concurrent downloads.
"""
self.video_path = Path(video_folder)
self.audio_path = Path(audio_folder)
self.video_path.mkdir(parents=True, exist_ok=True)
self.audio_path.mkdir(parents=True, exist_ok=True)
self.verbose = verbose
self.max_workers = max_workers
# Update logger level based on verbosity
if self.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
@staticmethod
def is_valid_youtube_url(url: str) -> bool:
"""Validate if the provided URL is a valid YouTube video URL."""
return bool(YOUTUBE_URL_REGEX.match(url.strip()))
@staticmethod
def sanitize_filename(title: str, max_length: int = 255) -> str:
"""
Sanitize a string to create a safe and clean filename.
Args:
title (str): Original title of the video.
max_length (int): Maximum allowed length for the filename.
Returns:
str: Sanitized filename.
"""
sanitized_title = unicodedata.normalize('NFKD', title)
sanitized_title = ILLEGAL_CHAR_PATTERN.sub('', sanitized_title)
sanitized_title = SPACE_PATTERN.sub('_', sanitized_title).strip(".")
sanitized_title = sanitized_title.encode('utf-8')[:max_length].decode('utf-8', 'ignore').rstrip('_')
return unicodedata.normalize('NFC', sanitized_title)
def get_cookies_file(self) -> Optional[Path]:
"""
Prompt the user to provide the path to the cookies.txt file or use the default in the working directory.
Returns:
Optional[Path]: Path to the cookies.txt file or None.
"""
default_cookies_path = Path.cwd() / "cookies.txt"
if default_cookies_path.exists():
use_default = input(f"Found 'cookies.txt' in the current directory. Use it? (y/n): ").strip().lower()
if use_default == 'y':
logger.debug(f"Using default cookies file: {default_cookies_path}")
return default_cookies_path
else:
logger.debug("No 'cookies.txt' found in the current directory.")
cookies_path_input = input("Enter the path to your 'cookies.txt' file (or press Enter to skip): ").strip()
if cookies_path_input:
cookies_file = Path(cookies_path_input)
if cookies_file.exists():
logger.debug(f"Using provided cookies file: {cookies_file}")
return cookies_file
else:
logger.warning(f"Cookies file '{cookies_file}' not found.")
else:
logger.info("Proceeding without cookies. Some videos may require authentication.")
return None
def download_video_and_audio(self, video_url: str, cookies_file: Optional[Path] = None) -> Tuple[bool, str]:
"""
Download a single YouTube video as MP4 and extract audio as WAV, saving them in different folders.
Args:
video_url (str): URL of the YouTube video.
cookies_file (Optional[Path]): Path to the cookies.txt file for authentication.
Returns:
Tuple[bool, str]: Success status and message.
"""
if not self.is_valid_youtube_url(video_url):
logger.error(f"Invalid URL: {video_url}")
return False, f"Invalid URL: {video_url}"
# Prepare yt-dlp options
ydl_opts = {
'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best',
'outtmpl': '%(title)s.%(ext)s',
'paths': {
'video': str(self.video_path),
'audio': str(self.audio_path),
},
'quiet': not self.verbose,
'no_warnings': not self.verbose,
'merge_output_format': 'mp4',
'cookiefile': str(cookies_file) if cookies_file else None,
'postprocessors': [
{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'wav',
}
],
}
try:
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
logger.debug(f"Starting download for: {video_url}")
info_dict = ydl.extract_info(video_url, download=True)
title = self.sanitize_filename(info_dict.get('title', 'video'))
# Video file
video_file = self.video_path / f"{title}.mp4"
if not video_file.exists():
logger.error(f"Download completed but video file not found: '{video_file}'")
return False, f"Download completed but video file not found: {title}"
# Audio file
audio_file = self.audio_path / f"{title}.wav"
if not audio_file.exists():
logger.error(f"Audio extraction completed but file not found: '{audio_file}'")
return False, f"Audio extraction completed but file not found: {title}"
logger.info(f"Successfully downloaded video and extracted audio for: '{title}'")
return True, f"Downloaded and processed: {title}"
except yt_dlp.utils.DownloadError as e:
logger.error(f"Error downloading {video_url}: {e}")
return False, f"Error downloading {video_url}: {e}"
except Exception as e:
logger.error(f"Unexpected error: {e}")
return False, f"Unexpected error: {e}"
def download_videos_from_file(self, file_path: str, cookies_file: Optional[Path] = None) -> None:
"""
Download multiple YouTube videos and extract audio concurrently.
Args:
file_path (str): Path to the file containing YouTube URLs.
cookies_file (Optional[Path]): Path to the cookies.txt file for authentication.
"""
file_path = Path(file_path)
if not file_path.exists():
logger.error(f"'{file_path.name}' not found. Creating the file for you to add video URLs.")
file_path.touch()
return
urls = [line.strip() for line in file_path.read_text().splitlines() if self.is_valid_youtube_url(line.strip())]
if not urls:
logger.warning(f"No valid video URLs found in '{file_path.name}'.")
return
logger.info(f"Starting downloads for {len(urls)} video(s)...")
# Use ThreadPoolExecutor for concurrent downloads
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
future_to_url = {executor.submit(self.download_video_and_audio, url, cookies_file): url for url in urls}
for future in as_completed(future_to_url):
url = future_to_url[future]
try:
success, message = future.result()
status = "Success" if success else "Failed"
logger.info(f"[{status}] {message}")
except Exception as e:
logger.error(f"[Failed] Exception occurred while processing {url}: {e}")
logger.info("Download session completed.")
def run_ui(self) -> None:
"""
User Interface function to handle user input and execute corresponding actions.
"""
logger.info("\nWelcome to the YouTube Video and Audio Downloader")
while True:
print("\nSelect an option:")
print("(1) Download a YouTube video and extract audio")
print("(2) Download videos from urls.txt and extract audio")
print("(3) Quit")
user_input = input("Enter 1, 2, or 3: ").strip().lower()
if user_input in ('3', 'quit'):
logger.info("Exiting the program. Goodbye!")
break
elif user_input == '2':
file_path = Path.cwd() / "urls.txt"
cookies_file = self.get_cookies_file()
self.download_videos_from_file(str(file_path), cookies_file=cookies_file)
elif user_input == '1':
video_url = input("Enter the YouTube video URL: ").strip()
if self.is_valid_youtube_url(video_url):
cookies_file = self.get_cookies_file()
success, message = self.download_video_and_audio(
video_url, cookies_file=cookies_file
)
if success:
logger.info(message)
else:
logger.error(f"Failed to download: {message}")
else:
logger.error("Invalid URL. Please enter a valid YouTube video URL.")
else:
logger.warning("Invalid option. Please enter 1, 2, or 3.")
def main():
downloader = YouTubeDownloader(verbose=VERBOSE)
downloader.run_ui()
if __name__ == "__main__":
main() | 10,330 | Python | .py | 215 | 36.544186 | 119 | 0.589904 | jhnwnstd/linguist_toolkit | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,025 | plot.py | tum-pbs_StableBPTT/CartPole/Plots/01/plot.py | import numpy as np
import matplotlib.pyplot as plt
colors = ['#ef476f','#ffd166','#06d6a0','#073b4c']
labels={'F':"R",'P':'M','C':"C",'S':'S'}
gfm_dict = {
'F':0,
'P':1,
'C':2,
'S':3
}
def smooth(a):
kernel_size = 30
kernel = np.ones(kernel_size) / kernel_size
a_ext = np.concatenate([a[0]*np.ones((kernel_size-1,)),a])
b = np.convolve(a_ext, kernel, mode='valid')
return b
def plot_result(run, ax,cmod,nc):
results = np.loadtxt(run+'/results.txt')
params = np.load(run+'/params.npy',allow_pickle=True).item()
if params['cmod']!=cmod: return 0
if params['NC']!=nc: return 0
ci1 = gfm_dict[params['gfm']]
label = params['gfm']
curve = results[:,2]
curve = smooth(curve)
if params['cmod']=='NORM':
ax.plot(curve,label=labels[label],color=colors[ci1])
else:
ax.plot(curve,color=colors[ci1])
fig = plt.figure(figsize=(7,5))
gs = fig.add_gridspec(2,2)
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax = [ax0,ax1,ax2,ax3]
for cmod in ['VALUE','NORM','NONE']:
for nc in [1,2,3,4]:
for i in range(48):
run = 'Data/01/01_'+str(i).zfill(4)
try: plot_result(run,ax[nc-1],cmod,nc)
except Exception: pass
for iax,axi in enumerate(ax):
axi.set_yscale('log')
axi.set_ylim([1*10**-6,3])
axi.set_title('Number of Poles: '+str(1+iax),fontsize=12)
if iax>1: axi.set_xlabel('Epochs',fontsize=12)
if iax%2==0: axi.set_ylabel('Loss',fontsize=12)
if iax==2:axi.legend(loc=8,ncol=2)
plt.suptitle(' Cartpole',fontsize=22)
plt.tight_layout(rect=[0, 0, 1, 0.98])
plt.savefig('Plots/01/cartpole.png')
| 1,742 | Python | .py | 52 | 28.942308 | 64 | 0.614371 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,026 | multi_cartpole_controller.py | tum-pbs_StableBPTT/CartPole/Simulations/01/multi_cartpole_controller.py | import tensorflow as tf
def build_fully_connected_network(width, depth, use_bias, use_zero_initialization,n_poles):
activation = tf.keras.activations.tanh
layers = []
layers.append(tf.keras.layers.InputLayer(input_shape=(1+n_poles,2)))
layers.append(tf.keras.layers.Reshape((-1,)))
for _ in range(depth):
layers.append(tf.keras.layers.Dense(
width, activation=activation, use_bias=use_bias))
layers.append(tf.keras.layers.Dense(
1, activation='linear', use_bias=use_bias))
model = tf.keras.models.Sequential(layers)
model.summary()
if use_zero_initialization == True:
if use_bias == True:
last_weights_index = -2
else:
last_weights_index = -1
weights = model.get_weights()
weights[last_weights_index] = 0.0 * weights[last_weights_index]
model.set_weights(weights)
return model
| 917 | Python | .py | 22 | 34.318182 | 91 | 0.664036 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,027 | training.py | tum-pbs_StableBPTT/CartPole/Simulations/01/training.py | import tensorflow as tf
import time
import numpy as np
from loss_formulation import LOSS_FORMULATION
def combine_grads(grad_i_base,grad_i_comp):
sign_base = tf.math.sign(grad_i_base)
sign_comp = tf.math.sign(grad_i_comp)
cond = sign_base==sign_comp
return tf.where(cond,grad_i_base,0)
def combine_grad_lists(grad_base,grad_comp):
result = []
for i in range(len(grad_base)):
combined_i = combine_grads(grad_base[i],grad_comp[i])
result.append(combined_i)
return result
@tf.function
def get_grad_size(grad):
grad_size = tf.constant(0.0, dtype=tf.float32)
for weight in grad:
grad_size += tf.reduce_sum(weight**2)
return grad_size
def flatten(tensor_list):
flat_list = []
for tensor in tensor_list:
flat_list.append(tf.reshape(tensor,(-1,)))
return tf.concat(flat_list,axis=0)
def norm(v):
return tf.reduce_sum(v*v)**0.5
def ip(v,w):
return tf.reduce_sum(v*w)
@tf.function
def cosine_similarity(grad1,grad2):
v = flatten(grad1)
w = flatten(grad2)
return ip(v,w)/(norm(v)*norm(w))
class TRAINING_SETUP():
def __init__(self, data, controller, simulator, Nt, cartpole_loss, loss_mode,gfm, opt_p):
gf_dict = {
'F':[1,'F'],
'P':[1,'P'],
'S':[1,'S'],
'C':[2,'P','F'],
}
gf_p = gf_dict[gfm]
self.dataloader,self.train_data,self.test_data = data
self.controller = controller
self.nb = self.dataloader.cardinality()
self.nep = opt_p['ep']
opt_dict = {
'ADAM':tf.keras.optimizers.Adam,
'SGD':tf.keras.optimizers.SGD,
'SGDMOM080':lambda lr, **kwargs: tf.keras.optimizers.SGD(lr, momentum=0.8, **kwargs),
'SGDMOM090':lambda lr, **kwargs: tf.keras.optimizers.SGD(lr, momentum=0.9, **kwargs),
'SGDMOM095':lambda lr, **kwargs: tf.keras.optimizers.SGD(lr, momentum=0.95, **kwargs),
'ADAGRAD':tf.keras.optimizers.Adagrad,
'ADADELTA':tf.keras.optimizers.Adadelta,
'RMSPROP':tf.keras.optimizers.RMSprop
}
opt = opt_dict[opt_p['opt']]
clip_dict = {
'NONE': opt(opt_p['lr']),
'VALUE':opt(opt_p['lr'], clipvalue=opt_p['cnum']),
'NORM':opt(opt_p['lr'],global_clipnorm=opt_p['cnum'])
}
self.optimizer = clip_dict[opt_p['cmod']]
self.results = []
if gf_p[0]==1:
LF1=LOSS_FORMULATION(simulator,controller,cartpole_loss,
Nt,gf_p[1],loss_mode)
self.LF1_compute = LF1.build_loss()
self.update = self.update_1bp
if gf_p[0]==2:
LF1=LOSS_FORMULATION(simulator,controller,cartpole_loss,
Nt,gf_p[1],loss_mode)
self.LF1_compute = LF1.build_loss()
LF2=LOSS_FORMULATION(simulator,controller,cartpole_loss,
Nt,gf_p[2],loss_mode)
self.LF2_compute = LF2.build_loss()
self.update = self.update_2bp
@tf.function
def update_1bp(self,batch_states):
with tf.GradientTape() as tape:
loss = self.LF1_compute(batch_states)
grad = tape.gradient(loss,self.controller.variables)
self.optimizer.apply_gradients(zip(grad, self.controller.variables))
return loss,[grad]
@tf.function
def update_2bp(self,batch_states):
with tf.GradientTape() as tape:
loss = self.LF1_compute(batch_states)
grad_base = tape.gradient(loss,self.controller.variables)
with tf.GradientTape() as tape:
loss = self.LF2_compute(batch_states)
grad_comp = tape.gradient(loss,self.controller.variables)
grad = combine_grad_lists(grad_base,grad_comp)
self.optimizer.apply_gradients(zip(grad, self.controller.variables))
return loss,[grad,grad_base,grad_comp]
def mini_batch_update(self, batch_states):
t0 = time.time()
loss,grads = self.update(batch_states)
t1 = time.time()-t0
grad_size = get_grad_size(grads[0])
if len(grads)>1:
cossim = cosine_similarity(grads[1],grads[2])
else:
cossim = 0.0
return loss, grad_size,cossim, t1
def epoch_update(self, i):
lb,gb,cb,tb = [],[],[],[]
for j, states in enumerate(self.dataloader):
loss, grad_size,cossim, t1 = self.mini_batch_update(states)
lb.append(loss)
gb.append(grad_size)
cb.append(cossim)
tb.append(t1)
l = tf.reduce_mean(lb)
g = tf.reduce_mean(gb)
c = tf.reduce_mean(cb)
t = tf.reduce_sum(tb)
l_train = self.LF1_compute(self.train_data)
l_test = self.LF1_compute(self.test_data)
return l, l_train,l_test,g,c,t
def run(self):
for i in range(self.nep):
l, l_train,l_test,g,c,t = self.epoch_update(i)
tf.print('Epoch: ', i, ' Loss: ', l,
' Grad :', g, ' Epoch Time :', t)
self.results.append([l, l_train,l_test,g,c,t])
return np.array(self.results)
| 5,234 | Python | .py | 129 | 30.891473 | 98 | 0.586728 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,028 | start.py | tum-pbs_StableBPTT/CartPole/Simulations/01/start.py | from multi_cartpole_data import generate_data
from multi_cartpole_controller import build_fully_connected_network
from cartpole_simulator import build_cartpole_step,build_cartpole_loss
from loss_formulation import LOSS_FORMULATION
from training import TRAINING_SETUP
import tensorflow as tf
import numpy as np
import argparse,os
parser = argparse.ArgumentParser(description='CmdLine Parser', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# MISC
parser.add_argument( '--gpu', default=0, type=str, help='Cuda visible devices')
parser.add_argument( '--save', default=False, type=bool, help='Save model?')
parser.add_argument( '--load', default=0, type=str, help='Load model? Path or 0')
# DATA
parser.add_argument( '--Nd', default=0, type=int, help='Number of data points')
# NETWORK
parser.add_argument( '--width', default=0, type=int, help='Network width')
parser.add_argument( '--depth', default=0, type=int, help='Network depth')
parser.add_argument( '--bias', default=True, type=bool, help='Use bias?')
parser.add_argument( '--zero', default=False, type=bool, help='Use zero initialization?')
# PHYSICS
parser.add_argument( '--T', default=0, type=float, help='Total time (physics system)')
parser.add_argument( '--Nt', default=0, type=int, help='Number of solver time steps')
parser.add_argument( '--NC', default=0, type=int, help='Number of poles, complexity')
# LOSS
parser.add_argument( '--lm', default=0, type=str, help='Loss mode (CONTINUOUS, FINAL)')
# GRADIENT FLOW
parser.add_argument( '--gfm', default=0, type=str, help='Gradient Flow Mode')
# OPTIMIZATION
parser.add_argument( '--opt', default=0, type=str, help='Optimizer: ADAM')
parser.add_argument( '--lr', default=0, type=float, help='Learning rate')
parser.add_argument( '--cmod', default=0, type=str, help='Clipping mode: NONE, VALUE')
parser.add_argument( '--cnum', default=0, type=float, help='Clipping number')
parser.add_argument( '--bs', default=0, type=int, help='Batch size')
parser.add_argument( '--ep', default=0, type=int, help='Number of epochs')
#PATHS
parser.add_argument( '--script_name', default=0, type=str, help='Script name')
parser.add_argument( '--folder_name', default=0, type=str, help='Folder name')
p = {}
p.update(vars(parser.parse_args()))
dt = p['T']/p['Nt']
p['dt']=dt
for i in p.keys():
print(i,p[i])
path = 'Data/'+p['folder_name']+'/'+p['script_name']+'_'
i = 0
pi = str(i).zfill(4)
while(os.path.exists(path+pi)):
i = i+1
pi = str(i).zfill(4)
sim_path = path+pi+'/'
results_path = sim_path+'results.txt'
dict_path = sim_path+'params.txt'
dict_np = sim_path+'params.npy'
network_path = sim_path+'network.h5'
os.makedirs(os.path.dirname(sim_path))
with open(dict_path, 'x') as file:
for i in p.keys():
print(i.ljust(20,' '),p[i],file=file)
np.save(dict_np,p)
# load with: np.load(p,allow_pickle='TRUE').item()
os.environ['PYTHONHASHSEED'] = '42'
os.environ['TF_DETERMINISTIC_OPS'] = '1'
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
np.random.seed(42)
tf.random.set_seed(42)
### SIMULATION ###
os.environ["CUDA_VISIBLE_DEVICES"]=p['gpu']
train_data,test_data = generate_data(p['Nd'],p['NC'])
dataloader = tf.data.Dataset.from_tensor_slices(train_data).batch(p['bs'])
data = [dataloader,train_data,test_data]
if p['load']=='0':
#network = get_model_1(p['Nx'],100,True)
controller = build_fully_connected_network(p['width'],p['depth'],p['bias'],p['zero'],p['NC'])
print('NEW MODEL')
else:
controller = tf.keras.models.load_model(p['load'])
print('MODEL LOADED')
cartpole_step = build_cartpole_step(p['dt'])
cartpole_loss = build_cartpole_loss()
opt_p = {i:p[i] for i in ['opt','lr','cmod','cnum','bs','ep']}
TS = TRAINING_SETUP(data,controller,cartpole_step,p['Nt'],
cartpole_loss,p['lm'],p['gfm'],opt_p)
res = TS.run()
np.savetxt(results_path,res)
if p['save']: controller.save(network_path)
| 3,956 | Python | .py | 86 | 43.872093 | 118 | 0.69435 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,029 | multi_cartpole_data.py | tum-pbs_StableBPTT/CartPole/Simulations/01/multi_cartpole_data.py | import numpy as np
def generate_data(N,n_poles):
# x,x_dot,theta,theta_dot
train_states = np.random.rand(N,n_poles+1,2).astype(np.float32)
train_states = train_states*2-1
train_states[:, 1:, 0] = np.pi + train_states[:, 1:,0] * np.pi/6
test_states = np.random.rand(N,n_poles+1,2).astype(np.float32)
test_states = test_states*2-1
test_states[:, 1:, 0] = np.pi + test_states[:, 1:,0] * np.pi/6
return train_states, test_states
| 470 | Python | .py | 10 | 41.5 | 68 | 0.646532 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,030 | cartpole_simulator.py | tum-pbs_StableBPTT/CartPole/Simulations/01/cartpole_simulator.py | ### CARTPOLE ###
import tensorflow as tf
def build_cartpole_step(dt):
g = 9.8
m = 0.1 # mass pole
M = 1.1 #total mass = mass pole + mass cart
l = 0.5 #length
ml = m * l
@tf.function
def cartpole_step(state,force):
"""
multiple poles
state: batch, object i (cart, pole1,pole2,...), (x,x_dot)
force: batch,
NOT batch,1
"""
#unstack
cart,poles = tf.split(state,[1,state.shape[1]-1],1)
x,x_dot = tf.unstack(cart,axis=2)
thetas,thetas_dot = tf.unstack(poles,axis=2)
force = tf.expand_dims(force, axis=-1)
# acceleration
cos = tf.cos(thetas)
sin = tf.sin(thetas)
sintd2 = sin * thetas_dot**2
A = (force + ml * sintd2) / M
B = l * (4.0/3.0 - m * cos**2 / M)
thetas_dot2 = (g * sin - cos * A) / B
C = tf.reduce_sum(sintd2 - thetas_dot2 * cos,axis=1)
C = tf.expand_dims(C,axis=-1)
x_dot2 = (force + ml * C)/M
# time step
thetas_dot_new = thetas_dot + dt * thetas_dot2
thetas_new = thetas + dt * thetas_dot_new
x_dot_new = x_dot + dt * x_dot2
x_new = x + dt * x_dot_new
# stack
cart_new = tf.stack([x_new,x_dot_new],axis=2)
poles_new = tf.stack([thetas_new,thetas_dot_new],axis=2)
state_new = tf.concat([cart_new, poles_new],axis=1)
return state_new
return cartpole_step
def build_cartpole_loss():
@tf.function
def cartpole_loss(states):
loss_theta = 1-tf.reduce_mean(tf.cos(states[..., 1:,0]))
return loss_theta
return cartpole_loss
| 1,673 | Python | .py | 48 | 26.604167 | 65 | 0.548712 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,031 | loss_formulation.py | tum-pbs_StableBPTT/CartPole/Simulations/01/loss_formulation.py | import tensorflow as tf
stop = tf.stop_gradient
class LOSS_FORMULATION():
def __init__(self, simulator_time_step, controller, loss_function, Nt, gradient_flow, loss_mode):
self.simulator_time_step = simulator_time_step
self.controller = controller
self.loss_function = loss_function
self.Nt = Nt
self.loss_mode = loss_mode
self.gradient_flow = gradient_flow
def physics_flow(self,state):
if self.gradient_flow in ['F', 'P']:#['full', 'physics']
stateb = state
elif self.gradient_flow in ['N','S']:#['network','stop']
stateb = stop(state)
return stateb
def control_flow(self, state):
if self.gradient_flow in ['F', 'N']:
control = self.controller(state)
elif self.gradient_flow in ['P', 'S']:
control = self.controller(stop(state))
return control[:,0]
@tf.function
def time_evolution(self, initial_state):
states = [initial_state]
forces = []
for n in range(1, self.Nt+1):
control_n = self.control_flow(states[-1])
state_nb = self.physics_flow(states[-1])
state_n = self.simulator_time_step(state_nb, control_n)
forces.append(control_n)
states.append(state_n)
states = tf.stack(states, axis=1)
forces = tf.stack(forces, axis=1)
return states, forces
def build_loss(self):
if self.loss_mode == 'FINAL':
def choose_states(states): return states[:, -1, :]
elif self.loss_mode == 'CONTINUOUS':
def choose_states(states): return states[:, 1:, :]
def full_loss(states):
chosen_states = choose_states(states)
physics_loss = self.loss_function(chosen_states)
loss = physics_loss
return loss
@tf.function
def compute_loss(initial_states):
states, forces = self.time_evolution(initial_states)
loss = full_loss(states)
return loss
return compute_loss
| 2,084 | Python | .py | 51 | 31.137255 | 101 | 0.592483 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,032 | plot.py | tum-pbs_StableBPTT/GuidanceByRepulsion/Plots/01/plot.py | import numpy as np
import matplotlib.pyplot as plt
colors = ['#ef476f','#ffd166','#06d6a0','#073b4c']
gfm_dict = {'F':0,'P':1,'C':2,'S':3}
regu_dict={0.001:4, 0.01:3, 0.1:2, 1.0:1}
labels={'F':"R",'P':'M','C':"C",'S':'S'}
def smooth(a):
kernel_size = 30
kernel = np.ones(kernel_size) / kernel_size
a_ext = np.concatenate([a[0]*np.ones((kernel_size-1,)),a])
b = np.convolve(a_ext, kernel, mode='valid')
return b
def plot_result(run, ax):
results = np.loadtxt(run+'/results.txt')
params = np.load(run+'/params.npy',allow_pickle=True).item()
ci = gfm_dict[params['gf']]
label = params['gf']
i=regu_dict[params['rc']]-1
curve = results[:,2]
curve = smooth(curve)
ax[i].plot(curve,label=labels[label],color=colors[ci])
ax[i].set_title('Regularization: '+str(params['rc']))
return params
fig = plt.figure(figsize=(8,5))
gs = fig.add_gridspec(2,2)
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax = [ax0,ax1,ax2,ax3]
runs =[]
for i in range(48):
run = 'Data/01/01_'+str(i).zfill(4)
runs.append(run)
for i,run in enumerate(runs):
try:
params = plot_result(run,ax)
except Exception as Error:
pass
for iax,axi in enumerate(ax[:]):
axi.set_yscale('log')
axi.set_ylim([0.4,6])
axi.set_yticks([0.5,0.7,1,2,3,5])
axi.set_yticklabels(['0.5','0.7','1','2','3','5'])
if iax>1: axi.set_xlabel('Epochs',fontsize=12)
if iax%2==0: axi.set_ylabel('Loss',fontsize=12)
if iax==0:
handles, labels = axi.get_legend_handles_labels()
handle_list, label_list = [], []
for handle, label in zip(handles, labels):
if label not in label_list:
handle_list.append(handle)
label_list.append(label)
axi.legend(handle_list, label_list,loc=8,ncol=2)
plt.suptitle('Guidance by Repulsion',fontsize=18)
plt.tight_layout(rect=[0, 0, 1, 0.98])
plt.savefig('Plots/01/Guidance_by_repulsion.png')
| 2,045 | Python | .py | 57 | 31.140351 | 64 | 0.62069 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,033 | training.py | tum-pbs_StableBPTT/GuidanceByRepulsion/Simulations/01/training.py | import tensorflow as tf
import time
import numpy as np
from loss_formulation import LOSS_FORMULATION
def combine_grads(grad_i_base,grad_i_comp):
sign_base = tf.math.sign(grad_i_base)
sign_comp = tf.math.sign(grad_i_comp)
cond = sign_base==sign_comp
return tf.where(cond,grad_i_base,0)
def combine_grad_lists(grad_base,grad_comp):
result = []
for i in range(len(grad_base)):
combined_i = combine_grads(grad_base[i],grad_comp[i])
result.append(combined_i)
return result
@tf.function
def get_grad_size(grad):
grad_size = tf.constant(0.0, dtype=tf.float32)
for weight in grad:
grad_size += tf.reduce_sum(weight**2)
return grad_size
def flatten(tensor_list):
flat_list = []
for tensor in tensor_list:
flat_list.append(tf.reshape(tensor,(-1,)))
return tf.concat(flat_list,axis=0)
def norm(v):
return tf.reduce_sum(v*v)**0.5
def ip(v,w):
return tf.reduce_sum(v*w)
@tf.function
def cosine_similarity(grad1,grad2):
v = flatten(grad1)
w = flatten(grad2)
return ip(v,w)/(norm(v)*norm(w))
class TRAINING_SETUP():
def __init__(self, data, simulator, controller , Nt, loss_function,
loss_mode, regu_coef, gfm, opt_p):
gf_dict = {
'F':[1,'F'],
'P':[1,'P'],
'S':[1,'S'],
'C':[2,'P','F'],
}
gf_p = gf_dict[gfm]
self.dataloader,self.train_data,self.test_data = data
self.controller = controller
self.nb = self.dataloader.cardinality()
self.nep = opt_p['ep']
opt_dict = {
'ADAM':tf.keras.optimizers.Adam,
'SGD':tf.keras.optimizers.SGD,
'SGDMOM080':lambda lr, **kwargs: tf.keras.optimizers.SGD(lr, momentum=0.8, **kwargs),
'SGDMOM090':lambda lr, **kwargs: tf.keras.optimizers.SGD(lr, momentum=0.9, **kwargs),
'SGDMOM095':lambda lr, **kwargs: tf.keras.optimizers.SGD(lr, momentum=0.95, **kwargs),
'ADAGRAD':tf.keras.optimizers.Adagrad,
'ADADELTA':tf.keras.optimizers.Adadelta,
'RMSPROP':tf.keras.optimizers.RMSprop
}
opt = opt_dict[opt_p['opt']]
clip_dict = {
'NONE': opt(opt_p['lr']),
'VALUE':opt(opt_p['lr'], clipvalue=opt_p['cnum']),
'NORM':opt(opt_p['lr'],global_clipnorm=opt_p['cnum'])
}
self.optimizer = clip_dict[opt_p['cmod']]
if gf_p[0]==1:
self.LF1=LOSS_FORMULATION(simulator,controller,loss_function,
Nt,loss_mode,regu_coef,gf_p[1])
self.LF1_compute = self.LF1.build_loss()
self.update = self.update_1bp
if gf_p[0]==2:
self.LF1=LOSS_FORMULATION(simulator,controller,loss_function,
Nt,loss_mode,regu_coef,gf_p[1])
self.LF1_compute = self.LF1.build_loss()
self.LF2=LOSS_FORMULATION(simulator,controller,loss_function,
Nt,loss_mode,regu_coef,gf_p[2])
self.LF2_compute = self.LF2.build_loss()
self.update = self.update_2bp
self.results = []
@tf.function
def update_1bp(self,batch_states):
with tf.GradientTape() as tape:
loss,lp = self.LF1_compute(batch_states)
grad = tape.gradient(loss,self.controller.variables)
self.optimizer.apply_gradients(zip(grad, self.controller.variables))
return loss,lp,[grad]
@tf.function
def update_2bp(self,batch_states):
with tf.GradientTape() as tape:
loss,lp = self.LF1_compute(batch_states)
grad_base = tape.gradient(loss,self.controller.variables)
with tf.GradientTape() as tape:
loss,lp = self.LF2_compute(batch_states)
grad_comp = tape.gradient(loss,self.controller.variables)
grad = combine_grad_lists(grad_base,grad_comp)
self.optimizer.apply_gradients(zip(grad, self.controller.variables))
return loss,lp,[grad,grad_base,grad_comp]
def mini_batch_update(self, batch_states):
t0 = time.time()
loss,lp,grads = self.update(batch_states)
t1 = time.time()-t0
grad_size = get_grad_size(grads[0])
if len(grads)>1:
cossim = cosine_similarity(grads[1],grads[2])
else:
cossim = 0.0
return loss,lp, grad_size,cossim, t1
def epoch_update(self, i):
lb,lpb,gb,cb,tb = [],[],[],[],[]
for j, states in enumerate(self.dataloader):
loss, lossphy,grad_size,cossim, t1 = self.mini_batch_update(states)
lb.append(loss)
lpb.append(lossphy)
gb.append(grad_size)
cb.append(cossim)
tb.append(t1)
l = tf.reduce_mean(lb)
lp = tf.reduce_mean(lpb)
g = tf.reduce_mean(gb)
c = tf.reduce_mean(cb)
t = tf.reduce_sum(tb)
l_train,lptrain = self.LF1_compute(self.train_data)
l_test,lptest = self.LF1_compute(self.test_data)
return l,l_train,l_test,lp,lptrain,lptest,g,c,t
def run(self):
for i in range(self.nep):
l, l_train,l_test,lp,lptrain,lptest,g,c,t = self.epoch_update(i)
tf.print('Epoch: ', i, ' Loss: ', l,
' Grad :', g, ' Epoch Time :', t)
self.results.append([l, l_train,l_test,lp,lptrain,lptest,g,c,t])
return np.array(self.results)
| 5,483 | Python | .py | 132 | 31.848485 | 98 | 0.588878 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,034 | start.py | tum-pbs_StableBPTT/GuidanceByRepulsion/Simulations/01/start.py | from data import generate_data
from controller import build_fully_connected_network
from simulator import DRIVER_EVADER_SYSTEM
from training import TRAINING_SETUP
import tensorflow as tf
import numpy as np
import argparse,os
#tf.config.run_functions_eagerly(True)
parser = argparse.ArgumentParser(description='CmdLine Parser', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# MISC
parser.add_argument( '--gpu', default=0, type=str, help='Cuda visible devices')
parser.add_argument( '--save', default=False, type=bool, help='Save model?')
parser.add_argument( '--load', default=0, type=str, help='Load model? Path or 0')
# DATA
parser.add_argument( '--Nd', default=0, type=int, help='Number of data points')
# NETWORK
parser.add_argument( '--width', default=0, type=int, help='Network width')
parser.add_argument( '--depth', default=0, type=int, help='Network depth')
parser.add_argument( '--bias', default=True, type=bool, help='Use bias?')
parser.add_argument( '--zero', default=False, type=bool, help='Use zero initialization?')
# PHYSICS
parser.add_argument( '--T', default=0, type=float, help='Total time (physics system)')
parser.add_argument( '--Nt', default=0, type=int, help='Number of solver time steps')
parser.add_argument( '--NC', default=0, type=str, help='Number of drivers and evaders, Complexity')
parser.add_argument( '--tarx', default=0, type=float, help='Target x')
parser.add_argument( '--tary', default=0, type=float, help='Target y')
# LOSS
parser.add_argument( '--lm', default=0, type=str, help='Loss mode (CONTINUOUS, FINAL)')
parser.add_argument( '--rc', default=0, type=float, help='Regularization coefficient')
# OPTIMIZATION
parser.add_argument( '--gf', default=0, type=str, help='Training mode (gradient flow): NORMAL (BPTT), STOP (one step gradients), PHY')
parser.add_argument( '--opt', default=0, type=str, help='Optimizer: ADAM')
parser.add_argument( '--lr', default=0, type=float, help='Learning rate')
parser.add_argument( '--cmod', default=0, type=str, help='Clipping mode: NONE, VALUE')
parser.add_argument( '--cnum', default=0, type=float, help='Clipping number')
parser.add_argument( '--bs', default=0, type=int, help='Batch size')
parser.add_argument( '--ep', default=0, type=int, help='Number of epochs')
#PATHS
parser.add_argument( '--script_name', default=0, type=str, help='Script name')
parser.add_argument( '--folder_name', default=0, type=str, help='Folder name')
p = {}
p.update(vars(parser.parse_args()))
dt = p['T']/p['Nt']
p['dt']=dt
n_dr = int(p['NC'][0])
p['n_dr']=n_dr
n_ev = int(p['NC'][-1])
p['n_dr']=n_ev
for i in p.keys():
print(i,p[i])
path = 'Data/'+p['folder_name']+'/'+p['script_name']+'_'
i = 0
pi = str(i).zfill(4)
while(os.path.exists(path+pi)):
i = i+1
pi = str(i).zfill(4)
sim_path = path+pi+'/'
results_path = sim_path+'results.txt'
dict_path = sim_path+'params.txt'
dict_np = sim_path+'params.npy'
network_path = sim_path+'network.h5'
os.makedirs(os.path.dirname(sim_path))
with open(dict_path, 'x') as file:
for i in p.keys():
print(i.ljust(20,' '),p[i],file=file)
np.save(dict_np,p)
### SIMULATION ###
os.environ["CUDA_VISIBLE_DEVICES"]=p['gpu']
tf.random.set_seed(42)
train_data,test_data = generate_data(p['Nd'],n_dr,n_ev)
dataloader = tf.data.Dataset.from_tensor_slices(train_data).batch(p['bs'])
data = dataloader,train_data,test_data
if p['load']=='0':
controller = build_fully_connected_network(p['width'],p['depth'],p['bias'],p['zero'],n_dr,n_ev)
print('NEW MODEL')
else:
controller = tf.keras.models.load_model(p['load'])
print('MODEL LOADED')
dt = p['T']/p['Nt']
DE = DRIVER_EVADER_SYSTEM(n_dr,n_ev,0.1)
dist = 0.5
DE.set_evader_parameters(15.0,4.0,0.2,dist)
DE.set_driver_parameters(0.2,1.0,0.2)
de_step = DE.construct_simulator(dt)
tarx = p['tarx']
tary = p['tary']
de_loss = DE.construct_loss_function([tarx,tary],n_dr)
loss_form_params = [de_step,controller,de_loss,p['Nt'],p['lm'],p['rc']]
opt_params = {i:p[i] for i in ['opt','lr','cmod','cnum','bs','ep']}
TS = TRAINING_SETUP(data,de_step,controller,p['Nt'],de_loss,
p['lm'],p['rc'],p['gf'],opt_params)
res = TS.run()
np.savetxt(results_path,res)
if p['save']: controller.save(network_path)
| 4,307 | Python | .py | 94 | 43.776596 | 137 | 0.68484 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,035 | controller.py | tum-pbs_StableBPTT/GuidanceByRepulsion/Simulations/01/controller.py | import tensorflow as tf
def build_fully_connected_network(width, depth, use_bias, use_zero_initialization,n_dr,n_ev):
activation = tf.keras.activations.tanh
layers = []
layers.append(tf.keras.layers.InputLayer(input_shape=(n_dr+n_ev,2,2)))
layers.append(tf.keras.layers.Reshape((-1,)))
for _ in range(depth):
layers.append(tf.keras.layers.Dense(
width, activation=activation, use_bias=use_bias))
layers.append(tf.keras.layers.Dense(
2*n_dr, activation='linear', use_bias=use_bias))
layers.append(tf.keras.layers.Reshape((1,n_dr,2)))
model = tf.keras.models.Sequential(layers)
model.summary()
if use_zero_initialization == True:
if use_bias == True:
last_weights_index = -2
else:
last_weights_index = -1
weights = model.get_weights()
weights[last_weights_index] = 0.0 * weights[last_weights_index]
model.set_weights(weights)
return model
| 980 | Python | .py | 23 | 35.391304 | 93 | 0.662105 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,036 | loss_formulation.py | tum-pbs_StableBPTT/GuidanceByRepulsion/Simulations/01/loss_formulation.py | import tensorflow as tf
stop = tf.stop_gradient
class LOSS_FORMULATION():
def __init__(self, simulator_time_step, controller, loss_function,
Nt, loss_mode, regu_coef,gradient_flow):
self.simulator_time_step = simulator_time_step
self.controller = controller
self.loss_function = loss_function
self.Nt = Nt
self.loss_mode = loss_mode
self.gradient_flow = gradient_flow
self.regu_coef = regu_coef
def physics_flow(self,state):
if self.gradient_flow in ['F', 'P']:#['full', 'physics']
stateb = state
elif self.gradient_flow in ['N','S']:#['network','stop']
stateb = stop(state)
return stateb
def control_flow(self, state):
if self.gradient_flow in ['F', 'N']:
control = self.controller(state)
elif self.gradient_flow in ['P', 'S']:
control = self.controller(stop(state))
return control[:,0]
@tf.function
def time_evolution(self, initial_state):
states = [initial_state]
forces = []
for n in range(1, self.Nt+1):
control_n = self.control_flow(states[-1])
state_nb = self.physics_flow(states[-1])
state_n = self.simulator_time_step(state_nb, control_n)
forces.append(control_n)
states.append(state_n)
states = tf.stack(states, axis=1)
forces = tf.stack(forces, axis=1)
return states, forces
def build_loss(self,):
if self.loss_mode == 'FINAL':
def choose_states(states): return states[:, -1:, :]
elif self.loss_mode == 'CONTINUOUS':
def choose_states(states): return states[:, 1:, :]
def full_loss(states, forces):
chosen_states = choose_states(states)
physics_loss = self.loss_function(chosen_states)
regularization_loss = tf.reduce_mean(forces**2)
loss = physics_loss + self.regu_coef * regularization_loss
return loss, physics_loss
@tf.function
def compute_loss(initial_states):
states, forces = self.time_evolution(initial_states)
loss, physics_loss = full_loss(states, forces)
return loss, physics_loss
return compute_loss
| 2,309 | Python | .py | 54 | 32.759259 | 71 | 0.597321 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,037 | simulator.py | tum-pbs_StableBPTT/GuidanceByRepulsion/Simulations/01/simulator.py | import tensorflow as tf
'''
x
0: batch
1: number
2: x , v
3: x,y direction
dimensions: None,2,2,2
batch,number(de),x/v,dimension
'''
class DRIVER_EVADER_SYSTEM():
def __init__(self,n_drivers,n_evaders,epsilon=0.1) -> None:
self.n_drivers = n_drivers
self.n_evaders = n_evaders
self.epsilon = epsilon
def set_evader_parameters(self, c_evade, c_fric, c_flock,d_flock):
self.c_evade = c_evade
self.c_fric_ev = c_fric
self.c_flock = c_flock
self.d_flock = d_flock
def set_driver_parameters(self,c_drive, c_fric, c_spread):
self.c_drive = c_drive
self.c_spread = c_spread
self.c_fric_dr = c_fric
def construct_simulator(self,dt):
def dist(x):
return self.epsilon + tf.reduce_sum(x**2,-1)**0.5
def get_orthog(x):
x1,x2=tf.unstack(x,axis=-1)
return tf.stack([-x2,x1],axis=-1)
def center_diff(x_drivers,x_evaders):
center = tf.expand_dims(tf.reduce_sum(x_evaders,axis=1),axis=1)
return x_drivers-center
### CAUSES EVADERS TO RUN AWAY FROM DRIVERS
def evading_function(d):
p = -1.0 * d ** -2
return p
def F_evading(x_driver,x_evaders):
relative_position = tf.expand_dims(x_driver,1) - x_evaders
d = dist(relative_position)
F = tf.einsum('ij,ijk->ijk',evading_function(d),relative_position)
return F
def F_evading_total(x_drivers,x_evaders):
F = 0
for i in range(self.n_drivers):
F += F_evading(x_drivers[:,i,:],x_evaders)
return F
### CAUSES DRIVERS TO STAY CLOSE EVADERS BUT NOT TOO CLOSE
def driving_function(d):
r = 2 + 3 * d ** -2 - 4 *d **-4
return -r
def F_driving(x_drivers,x_evader):
relative_position = x_drivers - tf.expand_dims(x_evader,1)
d = dist(relative_position)
F = tf.einsum('ij,ijk->ijk',driving_function(d),relative_position)
return F
def F_driving_total(x_drivers,x_evaders):
F = 0
for i in range(self.n_evaders):
F += F_driving(x_drivers,x_evaders[:,i,:])
return F
### CAUSES DRIVERS TO SPREAD
def spreading_function(d):
r = d**-2
return r
def F_spreading(x_drivers,x_driver):
relative_position = x_drivers - tf.expand_dims(x_driver,1)
d = dist(relative_position)
F = tf.einsum('ij,ijk->ijk',spreading_function(d),relative_position)
return F
def tf_delete(array,entry):
a = array[:,:entry,:]
b = array[:,entry+1:,:]
c = tf.concat([a,b],axis=1)
return c
def tf_insert(array,entry):
a = array[:,:entry,:]
b = array[:,entry:,:]
c = tf.zeros(([b.shape[0],1,b.shape[2]]),dtype=tf.float32)
d = tf.concat([a,c,b],axis=1)
return d
def F_spreading_total(x_drivers):
F = 0
for i in range(self.n_drivers):
F_i = F_spreading(tf_delete(x_drivers,i),x_drivers[:,i,:])
F += tf_insert(F_i,i)
return F
# CAUSES EVADERS TO STAY CLOSE TO EACH OTHER BUT NOT TOO CLOSE
def flock_function(d):
p = (self.d_flock / d - 1)/d
return p
def F_flock(x_evaders,x_evader_i):
relative_position = tf.expand_dims(x_evader_i,1) - x_evaders
d = dist(relative_position)
F = tf.einsum('ij,ijk->ijk',flock_function(d),relative_position)
return F
def F_flock_total(x_evaders):
F = 0
for i in range(self.n_evaders):
F_i = F_flock(tf_delete(x_evaders,i),x_evaders[:,i,:])
F += tf_insert(F_i,i)
return F
def ode(u,c):
# u 0 1 2 3
# batch, object, x/v, spatial
# c 0 1(smaller,only drivers) 3
x,v = tf.unstack(u,2,2) # 0 1 3
x_drivers, x_evaders = tf.split(x,[self.n_drivers,self.n_evaders],1) # 0 3
v_drivers, v_evaders = tf.split(v,[self.n_drivers,self.n_evaders],1)
center_vec = center_diff(x_drivers,x_evaders)
orthog_vec = get_orthog(center_vec)
F_con = c[...,:1]* center_vec + c[...,1:]*orthog_vec
F_ev = self.c_evade * F_evading_total(x_drivers,x_evaders)
F_fric_ev = - self.c_fric_ev * v_evaders
F_flock = self.c_flock * F_flock_total(x_evaders)
F_dr = 0.0
F_fric_dr = - self.c_fric_dr * v_drivers
F_spread = self.c_spread * F_spreading_total(x_drivers)
x_drivers_dot = v_drivers
x_evaders_dot = v_evaders
v_drivers_dot = F_con + F_dr + F_fric_dr + F_spread
v_evaders_dot = F_ev + F_fric_ev + F_flock
x_dot = tf.concat([x_drivers_dot,x_evaders_dot],axis=1)
v_dot = tf.concat([v_drivers_dot,v_evaders_dot],axis=1)
u_dot = tf.stack([x_dot,v_dot],2)
return u_dot
@tf.function
def euler_step(u,c):
u_dot = ode(u,c)
u_new = u + u_dot * dt
return u_new
return euler_step
def construct_loss_function(self,target,n_dr):
target = tf.reshape(tf.constant([0.0,0.0]),(1,1,1,2))
@tf.function
def de_loss(states):
# batch time object x/v spatial
x_evaders = states[:,:,self.n_drivers:,0,:]
diff = x_evaders - target
l = tf.reduce_mean(diff**2)
return l
return de_loss | 5,927 | Python | .py | 142 | 29.690141 | 86 | 0.52915 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,038 | data.py | tum-pbs_StableBPTT/GuidanceByRepulsion/Simulations/01/data.py | import numpy as np
def generate_data(Nd, n_dr,n_ev):
N=2*Nd
np.random.seed(42)
states = np.zeros((N,n_dr+n_ev,2,2)).astype(np.float32)
def rectangle(N,xmin,xmax,ymin,ymax):
x = np.random.rand(N,).astype(np.float32)
y = np.random.rand(N,).astype(np.float32)
x = (xmax-xmin) * x + xmin
y = (ymax-ymin) * y + ymin
return np.stack([x,y],axis=-1)
def annulus(N,rmin,rmax):
s1 = np.random.rand(N,).astype(np.float32)
s2 = np.random.rand(N,).astype(np.float32)
phi = 2 * np.pi * s1
r =(rmax-rmin) * s2 + rmin
x = r * np.cos(phi)
y = r * np.sin(phi)
return np.stack([x,y],axis=-1)
# batch object x/v spatial
#drivers
for k in range(n_dr):
states[:,k,0,:] = annulus(N,3,4)
#evaders
com = annulus(N,1,2)
for k in range(n_ev):
states[:,n_dr+k,0,:] = com+rectangle(N,-0.5,0.5,-0.5,0.5)
return states[:Nd],states[Nd:]
| 981 | Python | .py | 28 | 27.75 | 65 | 0.553895 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,039 | plot.py | tum-pbs_StableBPTT/QuantumControl/Plots/01/plot.py | import numpy as np
import matplotlib.pyplot as plt
colors = ['#ef476f','#ffd166','#06d6a0','#073b4c']
labels={'F':"R",'P':'M','C':"C",'S':'S'}
gfm_dict = {'F':0,'P':1,'C':2,'S':3}
def smooth(a):
kernel_size = 30
kernel = np.ones(kernel_size) / kernel_size
a_ext = np.concatenate([a[0]*np.ones((kernel_size-1,)),a])
b = np.convolve(a_ext, kernel, mode='valid')
return b
def plot_result(run, ax, nc):
if nc==6:
nc = 5
pl = 3
else: pl = 2
results = np.loadtxt(run+'/results.txt')
params = np.load(run+'/params.npy',allow_pickle=True).item()
if params['TS']!=nc:
return 0
ci1 = gfm_dict[params['gfm']]
curve = results[:,pl]
curve = smooth(curve)
ax.plot(curve,label=labels[params['gfm']],color=colors[ci1])
return params
fig = plt.figure(figsize=(8,5))
gs = fig.add_gridspec(2,2)
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax = [ax0,ax1,ax2,ax3]
for nc in [3,4,5,6]:
runs =[]
for i in range(48):
run = 'Data/01/01_'+str(i).zfill(4)
runs.append(run)
for i,run in enumerate(runs):
try:
params = plot_result(run,ax[nc-3],nc)
except Exception as Error:
pass
for iax,axi in enumerate(ax):
axi.set_title('Target state: '+str(2+iax),fontsize=12)
axi.set_yscale('log')
axi.set_xlabel('Epochs',fontsize=12)
axi.set_ylabel('Loss',fontsize=12)
if iax!=3:
axi.set_ylim([16.5,135])
axi.set_yticks([20,30,40,60,90])
axi.set_yticklabels(['20','30','40','60','90'])
if iax==3:
axi.set_title('Update size',fontsize=12)
axi.set_ylabel('L2 norm',fontsize=12)
if iax==0:
handles, labels = axi.get_legend_handles_labels()
handle_list, label_list = [], []
for handle, label in zip(handles, labels):
if label not in label_list:
handle_list.append(handle)
label_list.append(label)
axi.legend(handle_list, label_list,loc='upper center',ncol=2,fontsize=9)
plt.tight_layout(rect=[0, 0, 1, 0.98])
plt.savefig('Plots/01/QMC.png')
plt.close() | 2,210 | Python | .py | 65 | 28.138462 | 80 | 0.596714 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,040 | qmc_start.py | tum-pbs_StableBPTT/QuantumControl/Simulations/01/qmc_start.py | from qmc_training_setup import TRAINING_SETUP
from qmc_solver import QMC
from model import get_cnn
from data import generate_data
import tensorflow as tf
import numpy as np
import argparse,os
parser = argparse.ArgumentParser(description='CmdLine Parser', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# MISC
parser.add_argument( '--gpu', default=0, type=str, help='Cuda visible devices')
parser.add_argument( '--save', default=False, type=bool, help='Save model?')
parser.add_argument( '--load', default=0, type=str, help='Load model? Path or 0')
parser.add_argument( '--weighting', default=0, type=str, help='Dataset: weighting of eigenstates')
# DATA
parser.add_argument( '--Nd', default=0, type=int, help='Number of data points')
# NETWORK
parser.add_argument( '--width', default=0, type=int, help='Network width')
parser.add_argument( '--depth', default=0, type=int, help='Network depth')
parser.add_argument( '--bias', default=True, type=bool, help='Use bias?')
parser.add_argument( '--zero', default=False, type=bool, help='Use zero initialization?')
# PHYSICS
parser.add_argument( '--Nx', default=0, type=int, help='Resolution (physics system)')
parser.add_argument( '--dt', default=0, type=float, help='Time step (physics system)')
parser.add_argument( '--Nt', default=0, type=int, help='Number of solver time steps')
parser.add_argument( '--TS', default=0, type=int, help='Target state (number of eigenstate: 1-groundstate, 2-first excited state, ...)')
# LOSS
parser.add_argument( '--LT', default=0, type=str, help='Loss type (CONTINUOUS, FINAL)')
# GRADIENT FLOW
parser.add_argument( '--gfm', default=0, type=str, help='Gradient flow mode')
# OPTIMIZATION
parser.add_argument( '--opt', default=0, type=str, help='Optimizer: ADAM')
parser.add_argument( '--lr', default=0, type=float, help='Learning rate')
parser.add_argument( '--cmod', default=0, type=str, help='Clipping mode: NONE, VALUE')
parser.add_argument( '--cnum', default=0, type=float, help='Clipping number')
parser.add_argument( '--bs', default=0, type=int, help='Batch size')
parser.add_argument( '--ep', default=0, type=int, help='Number of epochs')
#PATHS
parser.add_argument( '--script_name', default=0, type=str, help='Script name')
parser.add_argument( '--folder_name', default=0, type=str, help='Folder name')
p = {}
p.update(vars(parser.parse_args()))
for i in p.keys():
print(i,p[i])
path = 'Data/'+p['folder_name']+'/'+p['script_name']+'_'
i = 0
pi = str(i).zfill(4)
while(os.path.exists(path+pi)):
i = i+1
pi = str(i).zfill(4)
sim_path = path+pi+'/'
results_path = sim_path+'results.txt'
dict_path = sim_path+'params.txt'
dict_np = sim_path+'params.npy'
network_path = sim_path+'network.h5'
os.makedirs(os.path.dirname(sim_path))
with open(dict_path, 'x') as file:
for i in p.keys():
print(i.ljust(20,' '),p[i],file=file)
np.save(dict_np,p)
os.environ['PYTHONHASHSEED'] = '42'
os.environ['TF_DETERMINISTIC_OPS'] = '1'
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
np.random.seed(42)
tf.random.set_seed(42)
### SIMULATION ###
os.environ["CUDA_VISIBLE_DEVICES"]=p['gpu']
train,test = generate_data(p['Nd']*2,p['Nx'],p['weighting'])
dataloader = tf.data.Dataset.from_tensor_slices(train).batch(p['bs'])
data = [dataloader,train,test]
if p['load']=='0':
network = get_cnn(p['Nx'],p['width'],p['bias'],p['zero'])
print('NEW MODEL')
else:
network = tf.keras.models.load_model(p['load'])
print('MODEL LOADED')
solver = QMC(p['dt'],p['Nx']).step
opt_p = {i:p[i] for i in ['opt','lr','cmod','cnum','bs','ep']}
ts = TRAINING_SETUP(data,network,solver,p['Nt'],p['Nx'],p['TS'],p['LT'],p['gfm'],opt_p)
res = ts.run()
res = np.array(res)
np.savetxt(results_path,res)
if p['save']: network.save(network_path)
| 3,824 | Python | .py | 82 | 44.780488 | 140 | 0.687651 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,041 | qmc_training_setup.py | tum-pbs_StableBPTT/QuantumControl/Simulations/01/qmc_training_setup.py | from qmc_solver import *
from qmc_loss_formulation import *
import numpy as np
import time
def combine_grads(grad_i_base,grad_i_comp):
sign_base = tf.math.sign(grad_i_base)
sign_comp = tf.math.sign(grad_i_comp)
cond = sign_base==sign_comp
return tf.where(cond,grad_i_base,0)
def combine_grad_lists(grad_base,grad_comp):
result = []
for i in range(len(grad_base)):
combined_i = combine_grads(grad_base[i],grad_comp[i])
result.append(combined_i)
return result
@tf.function
def get_grad_size(grad):
grad_size = tf.constant(0.0, dtype=tf.float32)
for weight in grad:
grad_size += tf.reduce_sum(weight**2)
return grad_size
def flatten(tensor_list):
flat_list = []
for tensor in tensor_list:
flat_list.append(tf.reshape(tensor,(-1,)))
return tf.concat(flat_list,axis=0)
def norm(v):
return tf.reduce_sum(v*v)**0.5
def ip(v,w):
return tf.reduce_sum(v*w)
@tf.function
def cosine_similarity(grad1,grad2):
v = flatten(grad1)
w = flatten(grad2)
return ip(v,w)/(norm(v)*norm(w))
class TRAINING_SETUP():
def __init__(self,data,controller,simulator,Nt,Nx,target_state,loss_type,gfm,opt_p):
gf_dict = {
'F':[1,'F'],
'P':[1,'P'],
'S':[1,'S'],
'C':[2,'P','F'],
}
gf_p = gf_dict[gfm]
self.dataloader,self.train_data,self.test_data=data
self.controller = controller
self.nb = self.dataloader.cardinality()
self.nep = opt_p['ep']
opt_dict = {
'ADAM':tf.keras.optimizers.Adam,
'SGD':tf.keras.optimizers.SGD,
'SGDMOM080':lambda lr, **kwargs: tf.keras.optimizers.SGD(lr, momentum=0.8, **kwargs),
'SGDMOM090':lambda lr, **kwargs: tf.keras.optimizers.SGD(lr, momentum=0.9, **kwargs),
'SGDMOM095':lambda lr, **kwargs: tf.keras.optimizers.SGD(lr, momentum=0.95, **kwargs),
'ADAGRAD':tf.keras.optimizers.Adagrad,
'ADADELTA':tf.keras.optimizers.Adadelta,
'RMSPROP':tf.keras.optimizers.RMSprop
}
opt = opt_dict[opt_p['opt']]
clip_dict = {
'NONE': opt(opt_p['lr']),
'VALUE':opt(opt_p['lr'], clipvalue=opt_p['cnum']),
'NORM':opt(opt_p['lr'],global_clipnorm=opt_p['cnum'])
}
self.optimizer = clip_dict[opt_p['cmod']]
self.results = []
if gf_p[0]==1:
self.LF1=LOSS_FORMULATION(controller,simulator,Nt,Nx,target_state,gf_p[1],loss_type)
self.update = self.update_1bp
if gf_p[0]==2:
self.LF1=LOSS_FORMULATION(controller,simulator,Nt,Nx,target_state,gf_p[1],loss_type)
self.LF2=LOSS_FORMULATION(controller,simulator,Nt,Nx,target_state,gf_p[2],loss_type)
self.update = self.update_2bp
@tf.function
def update_1bp(self,batch_states):
with tf.GradientTape() as tape:
loss = self.LF1.compute_loss(batch_states)
grad = tape.gradient(loss,self.controller.variables)
self.optimizer.apply_gradients(zip(grad, self.controller.variables))
return loss,[grad]
@tf.function
def update_2bp(self,batch_states):
with tf.GradientTape() as tape:
loss = self.LF1.compute_loss(batch_states)
grad_base = tape.gradient(loss,self.controller.variables)
with tf.GradientTape() as tape:
loss = self.LF2.compute_loss(batch_states)
grad_comp = tape.gradient(loss,self.controller.variables)
grad = combine_grad_lists(grad_base,grad_comp)
self.optimizer.apply_gradients(zip(grad, self.controller.variables))
return loss,[grad,grad_base,grad_comp]
def mini_batch_update(self,ys):
t0 = time.time()
loss,grads = self.update(ys)
t1 = time.time()-t0
grad_size = get_grad_size(grads[0])
if len(grads)>1:
cossim = cosine_similarity(grads[1],grads[2])
else:
cossim = 0.0
return loss, grad_size,cossim, t1
def epoch_update(self, i):
lb,gb,cb,tb = [],[],[],[]
for j, states in enumerate(self.dataloader):
loss, grad_size,cossim, t1 = self.mini_batch_update(states)
lb.append(loss)
gb.append(grad_size)
cb.append(cossim)
tb.append(t1)
l = tf.reduce_mean(lb)
g = tf.reduce_mean(gb)
c = tf.reduce_mean(cb)
t = tf.reduce_sum(tb)
l_train = self.LF1.compute_loss(self.train_data)
l_test = self.LF1.compute_loss(self.test_data)
return l, l_train,l_test,g,c,t
def run(self):
for i in range(self.nep):
l, l_train,l_test,g,c,t = self.epoch_update(i)
tf.print('Epoch: ', i, ' Loss: ', l,
' Grad :', g, ' Epoch Time :', t)
self.results.append([l, l_train,l_test,g,c,t])
return np.array(self.results)
| 4,993 | Python | .py | 123 | 31.609756 | 98 | 0.59975 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,042 | qmc_solver.py | tum-pbs_StableBPTT/QuantumControl/Simulations/01/qmc_solver.py | import tensorflow as tf
from tensorflow.python.ops import gen_array_ops
import numpy as np
def normalize_probability(psi):
prob = np.abs(psi) ** 2
total_prob = np.sum(prob)
res = psi / (total_prob) ** 0.5
return res
@tf.function
def to_real(a_c):
a_r = tf.stack([tf.math.real(a_c), tf.math.imag(a_c)], axis=-1)
return a_r
@tf.function
def to_complex(a_r):
k = tf.cast(a_r, tf.complex64)
a_c = k[..., 0] + 1.0j * k[..., 1]
return a_c
def eigenstate(n, L):
points = np.arange(L + 2, dtype=np.complex64)
k_n = np.pi * n / (L + 1)
wave = np.sin(k_n * points)
wave = normalize_probability(wave)
return wave[1:-1]
@tf.function
def ip_loss(ar,br):
a = to_complex(ar)
b = to_complex(br)
ip_batch = tf.reduce_sum(tf.math.conj(a)*b,axis=1)
loss_dp = 1-tf.abs(ip_batch)**2
loss = tf.reduce_sum(loss_dp)
return loss
class QMC:
def __init__(self, dt, Nx):
self.Nx = Nx
self.dt = dt
self.xmin = 0
self.xmax = 2
self.L = self.xmax - self.xmin
self.dx = self.L / (self.Nx + 1)
self.step = self.construct_solver()
def construct_solver(self):
xmin = self.xmin
xmax = self.xmax
Nx = self.Nx
dx = self.dx
dt = self.dt
@tf.function
def qm_step_batch(psi_batch, control_batch):
psi_batch = to_complex(psi_batch)
control=tf.reshape(control_batch,(-1,1))
therange=tf.reshape(tf.range(xmin, xmax, delta=dx, dtype=tf.float32, name='range')[1:],(1,-1))
pot_batch=0.5j * dt * tf.cast(tf.tensordot(control,therange,axes=(1,0)),tf.complex64)
"(batch,spatial)"
batch_size = psi_batch.shape[0]
spatial_size = psi_batch.shape[1]
alpha_batch = 1.j*(0.5 * dt * tf.ones((batch_size,spatial_size), dtype=tf.complex64) / dx / dx)
gamma_batch = tf.ones((batch_size,spatial_size), dtype=tf.complex64) - 1.j * dt / dx / dx
eta_batch = tf.ones((batch_size,spatial_size), dtype=tf.complex64) + 1.j * dt / dx / dx
U_2_diag = gamma_batch - pot_batch
U_2_subdiag = alpha_batch
U_2_stack = tf.stack([U_2_subdiag,U_2_diag,U_2_subdiag],axis=1)
U_2_batch = gen_array_ops.matrix_diag_v2(U_2_stack, k=(-1, 1), num_rows=-1, num_cols=-1, padding_value=0)
U_1_diag = eta_batch + pot_batch
U_1_subdiag = - alpha_batch
U_1_stack = tf.stack([U_1_subdiag,U_1_diag,U_1_subdiag],axis=1)
U_1_batch = gen_array_ops.matrix_diag_v2(U_1_stack, k=(-1, 1), num_rows=-1, num_cols=-1, padding_value=0)
psi_batch_1 = tf.expand_dims(psi_batch,-1)
b_batch = tf.tensordot(U_2_batch, psi_batch_1,axes=(2,1))
b_batch1 = tf.transpose(b_batch,perm=(1,3,0,2))
b_batch2 = tf.linalg.diag_part(b_batch1)
b_batch3 = tf.transpose(b_batch2,perm=(2,0,1))
phi_t_batch = tf.linalg.solve(U_1_batch, b_batch3)[:,:,0]
phi_t_batch = to_real(phi_t_batch)
return phi_t_batch
return qm_step_batch
| 3,161 | Python | .py | 75 | 33.706667 | 117 | 0.57428 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,043 | model.py | tum-pbs_StableBPTT/QuantumControl/Simulations/01/model.py | import tensorflow as tf
def get_cnn(Nx,non,bias,zinit=True):
act = tf.keras.activations.tanh
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(Nx,2)),
tf.keras.layers.Reshape((Nx,2,1)),
tf.keras.layers.Conv2D(non,(3,2),2, activation=act),
tf.keras.layers.Reshape((Nx//2-1,-1)),
tf.keras.layers.Conv1D(non,3,2, activation=act),
tf.keras.layers.Reshape((-1,)),
tf.keras.layers.Dense(1, activation='linear',use_bias=bias)
])
weights = model.get_weights()
if zinit==True:
w = weights[-1]
wnew = w * 0.0
weights[-1]=wnew
model.set_weights(weights)
init_weights = model.get_weights()
return model
| 737 | Python | .py | 20 | 29.9 | 67 | 0.62605 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,044 | qmc_loss_formulation.py | tum-pbs_StableBPTT/QuantumControl/Simulations/01/qmc_loss_formulation.py | import tensorflow as tf
from qmc_solver import to_complex,eigenstate
import numpy as np
stop = tf.stop_gradient
class LOSS_FORMULATION():
def __init__(self,network,solver,Nt,Nx,target_state,gradient_flow,loss_type):
self.solver = solver
self.network = network
self.Nt = Nt
self.Nx = Nx
self.target_state = target_state
self.loss_type = loss_type
if type(gradient_flow) is list:
self.gradient_flow = gradient_flow
else:
self.gradient_flow = [gradient_flow] * Nt
self.compute_loss = self.construct_compute_loss()
def physics_flow(self,x0,gradient_flow):
if gradient_flow in ['F', 'P']:#['full', 'physics']
x0b = x0
elif gradient_flow in ['N','S']:#['network','stop']
x0b = stop(x0)
return x0b
def control_flow(self,x0,gradient_flow):
if gradient_flow in ['F', 'N']:
c1 = self.network(x0)
elif gradient_flow in ['P','S']:
c1 = self.network(stop(x0))
return c1
def time_evolution(self,x0):
xs = [x0]
for n in range(self.Nt):
cn = self.control_flow(xs[-1],self.gradient_flow[n])
xnb = self.physics_flow(xs[-1],self.gradient_flow[n])
xn = self.solver(xnb,cn)
xs.append(xn)
xs = tf.stack(xs,axis=1)
return xs
def ip_loss(self,ar,br):
a = to_complex(ar)
b = to_complex(br)
ip_batch_time = tf.reduce_sum(tf.math.conj(a)*b,axis=-1)
loss_batch_time = 1-tf.abs(ip_batch_time)**2
loss_batch = tf.reduce_sum(loss_batch_time,axis=-1)
loss = tf.reduce_mean(loss_batch)
return loss
def construct_compute_loss(self):
psi2 = eigenstate(self.target_state, self.Nx).reshape((1, -1))
es2 = np.stack([psi2, psi2 * 0], axis=-1)
if self.loss_type=="CONTINUOUS":
@tf.function
def qm_compute_loss(x0):
xs = self.time_evolution(x0)
xp = xs[:,1:,:,:]
l = self.ip_loss(xp,es2)
return l
return qm_compute_loss
elif self.loss_type=="FINAL":
@tf.function
def qm_compute_loss_final(x0):
xs = self.time_evolution(x0)
xp = xs[:,-1:,:,:]
l = self.ip_loss(xp,es2)
return l
return qm_compute_loss_final
| 2,518 | Python | .py | 65 | 27.676923 | 81 | 0.550524 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,045 | data.py | tum-pbs_StableBPTT/QuantumControl/Simulations/01/data.py | import numpy as np
from qmc_solver import *
def generate_data(N,Nx,weighting = '1-1'):
es1 = eigenstate(1, Nx).reshape((1, -1))
es2 = eigenstate(2, Nx).reshape((1, -1))
if weighting == '1-1':
w1,w2 = 1,1
elif weighting == '3-1':
w1,w2 = 3,1
elif weighting == '7-1':
w1,w2 = 7,1
elif weighting == '15-1':
w1,w2 = 15,1
c1 = np.random.normal(size=(N,1)).astype(np.float32)*w1
c2 = np.random.normal(size=(N,1)).astype(np.float32)*w2
p1 = np.random.uniform(0,2*np.pi,size=(N,1)).astype(np.float32)
p2 = np.random.uniform(0,2*np.pi,size=(N,1)).astype(np.float32)
norm = np.sqrt(c1**2+c2**2)
f1 = c1 *np.exp(1.j*p1)/norm
f2 = c2 *np.exp(1.j*p2)/ norm
states = tf.tensordot(f1,es1,axes=(1,0))+tf.tensordot(f2,es2,axes=(1,0))
states = to_real(states)
n2 = N//2
train = states[:n2]
test= states[n2:]
return train,test
| 921 | Python | .py | 26 | 30.076923 | 76 | 0.589888 | tum-pbs/StableBPTT | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,046 | main.py | password123456_some-tweak-to-hide-jwt-payload-values/main.py | import jwt
import base64
from datetime import datetime
import hashlib
# Get the current timestamp in seconds
def get_current_unix_timestamp():
return str(int(datetime.now().timestamp()))
# Convert the current Unix timestamp to a human-readable format
def get_human_readable_timestamp(timestamp):
current_datetime = datetime.fromtimestamp(int(timestamp))
return current_datetime.strftime("%Y-%m-%d %H:%M:%S")
# Encode the user ID with the timestamp using XOR operation
def encode_userid_with_timestamp(userid, current_unix_timestamp, secret_key):
# Hash the timestamp and trim to 32 characters
hashed_timestamp = hashlib.sha256((current_unix_timestamp + secret_key.decode()).encode()).hexdigest()[:32]
# Combine the hashed timestamp with the user ID
data = userid + "|" + hashed_timestamp
data_bytes = data.encode()
# Perform XOR operation
repeated_key = secret_key * ((len(data_bytes) // len(secret_key)) + 1)
repeated_key = repeated_key[:len(data_bytes)]
encoded_data_bytes = bytes([a ^ b for a, b in zip(data_bytes, repeated_key)])
# Encode the result in Base64 and return
encoded_data = base64.b64encode(encoded_data_bytes).decode()
return encoded_data
# Decode the XOR-encoded data
def decode_userid_timestamp(encoded_data, key):
# Decode from Base64
encoded_data_bytes = base64.b64decode(encoded_data.encode())
# Perform XOR operation
repeated_key = key * ((len(encoded_data_bytes) // len(key)) + 1)
repeated_key = repeated_key[:len(encoded_data_bytes)]
decoded_data_bytes = bytes([a ^ b for a, b in zip(encoded_data_bytes, repeated_key)])
# Separate the user ID and the hashed timestamp
decoded_data = decoded_data_bytes.decode()
userid, hashed_timestamp = decoded_data.split("|")
# Return the hashed timestamp
return userid, hashed_timestamp
def main():
userid = "23243232"
xor_secret_key = b'generally_user_salt_or_hash_or_random_uuid_this_value_must_be_in_dbms'
jwt_secret_key = 'yes_your_service_jwt_secret_key'
current_unix_timesstamp = get_current_unix_timestamp()
human_readable_timestamp = get_human_readable_timestamp(current_unix_timesstamp)
encoded_userid_timestamp = encode_userid_with_timestamp(userid, current_unix_timesstamp, xor_secret_key)
decoded_userid, hashed_timestamp = decode_userid_timestamp(encoded_userid_timestamp, xor_secret_key)
jwt_token = jwt.encode({'timestamp': human_readable_timestamp, 'userid': encoded_userid_timestamp}, jwt_secret_key, algorithm='HS256')
decoded_token = jwt.decode(jwt_token, jwt_secret_key, algorithms=['HS256'])
print("")
print("- Current Unix Timestamp:", current_unix_timesstamp)
print("- Current Unix Timestamp to Human Readable:", human_readable_timestamp)
print("")
print("- userid:", userid)
print("- XOR Symmetric key:", xor_secret_key)
print("- JWT Secret key:", jwt_secret_key)
print("")
print("- Encoded UserID and Timestamp:", encoded_userid_timestamp)
print("- Decoded UserID and Hashed Timestamp:", decoded_userid + "|" + hashed_timestamp)
print("")
print("- JWT Token:", jwt_token)
print("- Decoded JWT:", decoded_token)
if __name__ == "__main__":
main()
| 3,249 | Python | .py | 63 | 47.126984 | 138 | 0.718483 | password123456/some-tweak-to-hide-jwt-payload-values | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,047 | backtest_visualization.ipynb | JurajZelman_airl-market-making/backtest_visualization.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Visualization of backtests\n",
"\n",
"This notebook can be used for the visualization of backtest results for the comparison of pure market making strategies and the trained AIRL strategy."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import pickle\n",
"import time\n",
"\n",
"import pandas as pd\n",
"import polars as pl\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.dates as mdates\n",
"\n",
"from datetime import datetime\n",
"\n",
"from lob.backtest_metrics import drawdowns\n",
"from lob.commissions import BitCommissions\n",
"from lob.exchange import Exchange\n",
"from lob.traders import PureMarketMaker\n",
"from lob.plots import (\n",
" set_plot_style,\n",
" visualize_backtest,\n",
" make_drawdown_plot,\n",
" make_plot\n",
")\n",
"from lob.utils import get_lot_size, get_tick_size, ensure_dir_exists\n",
"from rl.utils import send_notification"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Configure Polars \n",
"cfg = pl.Config()\n",
"cfg.set_tbl_rows(20)\n",
"\n",
"# Configure plotting\n",
"set_plot_style()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Define custom colors\n",
"color_green = \"#13961a\"\n",
"color_red = \"#eb5c14\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set random seed\n",
"SEED = 1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"save_dir = \"images\"\n",
"ensure_dir_exists(save_dir)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Visualize backtests for PMM with priority 1 and 100 volume"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"save_path = \"automated_backtests/results_2024-02-25_10-58-39.pickle\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the results from a pickle file\n",
"with open(save_path, \"rb\") as handle:\n",
" results = pickle.load(handle)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"### PMM priority 1 volume 100\n",
"\n",
"# Load the results\n",
"ts = results[\"PMM_prior_1_vol_100\"][\"timestamps\"]\n",
"trader_stats = results[\"PMM_prior_1_vol_100\"][\"trader_stats\"]\n",
"initial_cost = results[\"PMM_prior_1_vol_100\"][\"initial_cost\"]\n",
"\n",
"# Plot the results\n",
"# ----------------------------------------------------------------------------\n",
"# PLOT - Adjusted PnL\n",
"make_plot(\n",
" x=ts,\n",
" y=trader_stats[\"adj_pnl\"],\n",
" xlabel=\"Time\",\n",
" ylabel=\"P&L (USDT)\",\n",
" save_path=os.path.join(save_dir, \"PMM_pnl_100.pdf\")\n",
")\n",
"print(f\"Final P&L: {trader_stats['adj_pnl'][-1]}\")\n",
"\n",
"# PLOT - Returns\n",
"equity = pd.Series(np.array(trader_stats[\"adj_pnl\"]) + initial_cost)\n",
"returns = equity.pct_change() * 100\n",
"make_plot(\n",
" x=ts,\n",
" y=returns,\n",
" xlabel=\"Time\",\n",
" ylabel=\"Returns (%)\",\n",
" save_path=os.path.join(save_dir, \"PMM_returns_100.pdf\")\n",
")\n",
"print(\"Returns stats\")\n",
"print(returns.describe())\n",
"\n",
"# PLOT - Drawdowns\n",
"dd = drawdowns(equity)\n",
"make_drawdown_plot(\n",
" x=ts,\n",
" y=dd,\n",
" xlabel=\"Time\",\n",
" ylabel=\"Drawdown (%)\",\n",
" save_path=os.path.join(save_dir, \"PMM_drawdowns_100.pdf\")\n",
")\n",
"print(\"Drawdown stats\")\n",
"print(dd.describe())\n",
"\n",
"# PLOT - Inventory\n",
"make_plot(\n",
" x=ts,\n",
" y=trader_stats[\"inventory\"],\n",
" xlabel=\"Time\",\n",
" ylabel=\"Inventory (SOL)\",\n",
" color=\"darkorange\",\n",
" save_path=os.path.join(save_dir, \"PMM_inventory_100.pdf\")\n",
")\n",
"print(\"Inventory stats\")\n",
"print(pd.Series(trader_stats[\"inventory\"]).describe())\n",
"\n",
"# PLOT - Total traded volume\n",
"make_plot(\n",
" x=ts,\n",
" y=trader_stats[\"total_volume\"],\n",
" xlabel=\"Time\",\n",
" ylabel=\"Traded volume (USDT)\",\n",
" ylim=(-40000, 840000),\n",
" color=\"darkorange\",\n",
" save_path=os.path.join(save_dir, \"PMM_volume_100.pdf\")\n",
")\n",
"print(\"Total volume: \", trader_stats[\"total_volume\"][-1])\n",
"\n",
"# PLOT - Transaction costs\n",
"make_plot(\n",
" x=ts,\n",
" y=trader_stats[\"cum_costs\"],\n",
" xlabel=\"Time\",\n",
" ylabel=\"Transaction fees (USDT)\",\n",
" ylim=(-20, 420),\n",
" color=\"red\",\n",
" save_path=os.path.join(save_dir, \"PMM_fees_100.pdf\")\n",
")\n",
"print(\"Total fees: \", trader_stats[\"cum_costs\"][-1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from scipy.stats import gaussian_kde"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"returns.dropna()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Histogram of returns with KDE\n",
"fig = plt.figure(figsize=(12, 4.5))\n",
"plt.hist(returns, bins=50, alpha=1, log=True)\n",
"# Add kernel density estimate\n",
"plt.xlabel(\"Returns (%)\")\n",
"plt.ylabel(\"Frequency\")\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_returns_hist_100.pdf\"))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Comparison of PMM strategies with volume 100"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"save_path = \"automated_backtests/results_2024-02-25_10-58-39.pickle\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the results from a pickle file\n",
"with open(save_path, \"rb\") as handle:\n",
" results = pickle.load(handle)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"results.keys()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# PnL plot \n",
"\n",
"# Set parameters\n",
"figsize = (12, 4.5)\n",
"xlabel = \"Time\"\n",
"ylabel = \"P&L (USDT)\"\n",
"\n",
"plt.figure(figsize=figsize)\n",
"i = 0\n",
"for key, value in results.items():\n",
" x = value[\"timestamps\"]\n",
" y = value[\"trader_stats\"][\"adj_pnl\"]\n",
" label = f\"PMM (priority {i})\"\n",
" plt.plot(x, y, label=label)\n",
" print(f\"{key} - {y[-1]:.2f}\")\n",
" i += 1\n",
" \n",
"# plt.plot(x, y, color=color)\n",
"plt.xlabel(xlabel)\n",
"plt.ylabel(ylabel)\n",
"plt.legend()\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_comparison_pnl_100.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"figsize = (12, 4.5)\n",
"xlabel = \"Time\"\n",
"ylabel = \"Traded volume (USDT)\"\n",
"\n",
"plt.figure(figsize=figsize)\n",
"i = 0\n",
"for key, value in results.items():\n",
" x = value[\"timestamps\"]\n",
" y = value[\"trader_stats\"][\"total_volume\"]\n",
" label = f\"PMM (priority {i})\"\n",
" plt.plot(x, y, label=label)\n",
" print(f\"{key} - {y[-1]:.2f}\")\n",
" i += 1\n",
" \n",
"plt.xlabel(xlabel)\n",
"plt.ylabel(ylabel)\n",
"plt.legend()\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_comparison_volume_100.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Compute hitting probability\n",
"for key, value in results.items():\n",
" trades = np.array(value[\"trader_stats\"][\"trade_count\"])\n",
" hits = np.where(trades > 0, 1, 0)\n",
" \n",
" print(f\"{key} - {np.mean(hits)*100:.2f}\")\n",
" print()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"index = 0\n",
"\n",
"# Spread histogram\n",
"fig = plt.figure(figsize=(12, 4.5))\n",
"for key, value in results.items():\n",
" spread = np.array(value[\"trader_stats\"][\"quoted_ask_price\"]) - np.array(value[\"trader_stats\"][\"quoted_bid_price\"])\n",
" plt.hist(spread, bins=50, alpha=0.75, log=False, label=f\"PMM (priority {index})\")\n",
" mean = np.mean(spread)\n",
" plt.vlines(mean, 0, 50000, color=f\"C{index}\", linestyle=\"--\")\n",
" print(f\"{key} - mean: {mean:.4f}\")\n",
" index += 1\n",
"# Add kernel density estimate\n",
"plt.xlabel(\"Spread (USDT)\")\n",
"plt.ylabel(\"Count\")\n",
"# Cut off the outliers\n",
"plt.xlim(0, 0.24)\n",
"plt.xticks(np.arange(0, 0.24, 0.01), rotation=45)\n",
"plt.legend()\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_comparison_spread_100.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Comparison of PMM strategies with volume 10"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"save_path = \"automated_backtests/results_2024-02-25_13-08-03.pickle\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the results from a pickle file\n",
"with open(save_path, \"rb\") as handle:\n",
" results = pickle.load(handle)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"results.keys()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# PnL plot \n",
"\n",
"# Set parameters\n",
"figsize = (12, 4.5)\n",
"xlabel = \"Time\"\n",
"ylabel = \"P&L (USDT)\"\n",
"\n",
"\n",
"plt.figure(figsize=figsize)\n",
"i = 0\n",
"for key, value in results.items():\n",
" x = value[\"timestamps\"]\n",
" y = value[\"trader_stats\"][\"adj_pnl\"]\n",
" label = f\"PMM (priority {i})\"\n",
" plt.plot(x, y, label=label)\n",
" print(f\"{key} - {y[-1]:.2f}\")\n",
" i += 1\n",
"# plt.plot(x, y, color=color)\n",
"plt.xlabel(xlabel)\n",
"plt.ylabel(ylabel)\n",
"plt.ylim(-105, 55)\n",
"plt.legend()\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_comparison_pnl_10.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"figsize = (12, 4.5)\n",
"xlabel = \"Time\"\n",
"ylabel = \"Traded volume (USDT)\"\n",
"\n",
"\n",
"plt.figure(figsize=figsize)\n",
"i = 0\n",
"for key, value in results.items():\n",
" x = value[\"timestamps\"]\n",
" y = value[\"trader_stats\"][\"total_volume\"]\n",
" label = f\"PMM (priority {i})\"\n",
" plt.plot(x, y, label=label)\n",
" print(f\"{key} - {y[-1]:.2f}\")\n",
" i += 1\n",
"plt.xlabel(xlabel)\n",
"plt.ylabel(ylabel)\n",
"plt.legend()\n",
"plt.ylim(-10000, 410000)\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_comparison_volume_10.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"figsize = (12, 4.5)\n",
"xlabel = \"Time\"\n",
"ylabel = \"Transaction fees (USDT)\"\n",
"\n",
"\n",
"plt.figure(figsize=figsize)\n",
"i = 0\n",
"for key, value in results.items():\n",
" x = value[\"timestamps\"]\n",
" y = value[\"trader_stats\"][\"cum_costs\"]\n",
" label = f\"PMM (priority {i})\"\n",
" plt.plot(x, y, label=label)\n",
" print(f\"{key} - {y[-1]:.2f}\")\n",
" i += 1\n",
"plt.xlabel(xlabel)\n",
"plt.ylabel(ylabel)\n",
"plt.legend()\n",
"plt.tight_layout()\n",
"# plt.savefig(os.path.join(save_dir, \"PMM_comparison_volume_10.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Compute hitting probability\n",
"for key, value in results.items():\n",
" trades = np.array(value[\"trader_stats\"][\"trade_count\"])\n",
" hits = np.where(trades > 0, 1, 0)\n",
" \n",
" print(f\"{key} - {np.mean(hits)*100:.2f}\")\n",
" print()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Comparison of PMM strategies with volume 100 (different SEEDs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"save_path = \"automated_backtests/results_2024-02-25_15-16-39.pickle\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the results from a pickle file\n",
"with open(save_path, \"rb\") as handle:\n",
" results_pmm = pickle.load(handle)\n",
" \n",
"results_pmm.keys()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# PnL plot \n",
"\n",
"# Set parameters\n",
"figsize = (12, 4.5)\n",
"xlabel = \"Time\"\n",
"ylabel = \"P&L (USDT)\"\n",
"avg = []\n",
"\n",
"plt.figure(figsize=figsize)\n",
"i = 0\n",
"for key, value in results_pmm.items():\n",
" x = value[\"timestamps\"]\n",
" y = value[\"trader_stats\"][\"adj_pnl\"]\n",
" label = f\"PMM (priority {i})\"\n",
" plt.plot(x, y, label=label)\n",
" print(f\"{key} - {y[-1]:.2f}\")\n",
" i += 1\n",
" avg.append(value[\"trader_stats\"][\"adj_pnl\"][-1])\n",
" \n",
"# plt.plot(x, y, color=color)\n",
"plt.xlabel(xlabel)\n",
"plt.ylabel(ylabel)\n",
"plt.ylim(-60, 210)\n",
"# plt.legend()\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_seeds_comparison_pnl.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f\"Mean pnl: {np.mean(avg):.2f}\")\n",
"print(f\"Std pnl: {np.std(avg):.2f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"figsize = (12, 4.5)\n",
"xlabel = \"Time\"\n",
"ylabel = \"Traded volume (USDT)\"\n",
"avg = []\n",
"\n",
"plt.figure(figsize=figsize)\n",
"i = 0\n",
"for key, value in results_pmm.items():\n",
" x = value[\"timestamps\"]\n",
" y = value[\"trader_stats\"][\"total_volume\"]\n",
" label = f\"PMM (priority {i})\"\n",
" plt.plot(x, y, label=label)\n",
" print(f\"{key} - {y[-1]:.2f}\")\n",
" avg.append(value[\"trader_stats\"][\"total_volume\"][-1])\n",
" i += 1\n",
" \n",
"plt.xlabel(xlabel)\n",
"plt.ylabel(ylabel)\n",
"# plt.legend()\n",
"plt.ylim(-10000, 230000)\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_seeds_comparison_volume.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f\"Mean volume: {np.mean(avg):.2f}\")\n",
"print(f\"Std volume: {np.std(avg):.2f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"figsize = (12, 4.5)\n",
"xlabel = \"Time\"\n",
"ylabel = \"Transaction fees (USDT)\"\n",
"avg = []\n",
"\n",
"plt.figure(figsize=figsize)\n",
"i = 0\n",
"for key, value in results_pmm.items():\n",
" x = value[\"timestamps\"]\n",
" y = value[\"trader_stats\"][\"cum_costs\"]\n",
" label = f\"PMM (priority {i})\"\n",
" plt.plot(x, y, label=label)\n",
" print(f\"{key} - {y[-1]:.2f}\")\n",
" avg.append(value[\"trader_stats\"][\"cum_costs\"][-1])\n",
" i += 1\n",
" \n",
"plt.xlabel(xlabel)\n",
"plt.ylabel(ylabel)\n",
"# plt.legend()\n",
"plt.ylim(-5, 115)\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_seeds_comparison_fees.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f\"Mean fees: {np.mean(avg):.2f}\")\n",
"print(f\"Std fees: {np.std(avg):.2f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"avg = []\n",
"\n",
"# Compute hitting probability\n",
"for key, value in results_pmm.items():\n",
" trades = np.array(value[\"trader_stats\"][\"trade_count\"])\n",
" hits = np.where(trades > 0, 1, 0)\n",
" avg.append(np.mean(hits) * 100)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f\"Mean hitting probability: {np.mean(avg):.2f}%\")\n",
"print(f\"Std hitting probability: {np.std(avg):.2f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"results_pmm.keys()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Comparison of AIRL strategies with volume 100 (different SEEDs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"save_path = \"automated_backtests/results_2024-02-25_21-22-46.pickle\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the results from a pickle file\n",
"with open(save_path, \"rb\") as handle:\n",
" results_airl = pickle.load(handle)\n",
" \n",
"results_airl.keys()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# PnL plot \n",
"\n",
"# Set parameters\n",
"figsize = (12, 4.5)\n",
"xlabel = \"Time\"\n",
"ylabel = \"P&L (USDT)\"\n",
"avg = []\n",
"\n",
"plt.figure(figsize=figsize)\n",
"i = 0\n",
"for key, value in results_airl.items():\n",
" x = value[\"timestamps\"]\n",
" y = value[\"trader_stats\"][\"adj_pnl\"]\n",
" label = f\"PMM (priority {i})\"\n",
" plt.plot(x, y, label=label)\n",
" print(f\"{key} - {y[-1]:.2f}\")\n",
" i += 1\n",
" avg.append(value[\"trader_stats\"][\"adj_pnl\"][-1])\n",
" \n",
"# plt.plot(x, y, color=color)\n",
"plt.xlabel(xlabel)\n",
"plt.ylabel(ylabel)\n",
"plt.ylim(-60, 210)\n",
"# plt.legend()\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"AIRL_seeds_comparison_pnl.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f\"Mean pnl: {np.mean(avg):.2f}\")\n",
"print(f\"Std pnl: {np.std(avg):.2f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"figsize = (12, 4.5)\n",
"xlabel = \"Time\"\n",
"ylabel = \"Traded volume (USDT)\"\n",
"avg = []\n",
"\n",
"plt.figure(figsize=figsize)\n",
"i = 0\n",
"for key, value in results_airl.items():\n",
" x = value[\"timestamps\"]\n",
" y = value[\"trader_stats\"][\"total_volume\"]\n",
" label = f\"PMM (priority {i})\"\n",
" plt.plot(x, y, label=label)\n",
" print(f\"{key} - {y[-1]:.2f}\")\n",
" avg.append(value[\"trader_stats\"][\"total_volume\"][-1])\n",
" i += 1\n",
" \n",
"plt.xlabel(xlabel)\n",
"plt.ylabel(ylabel)\n",
"# plt.legend()\n",
"plt.ylim(-10000, 230000)\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"AIRL_seeds_comparison_volume.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f\"Mean volume: {np.mean(avg):.2f}\")\n",
"print(f\"Std volume: {np.std(avg):.2f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"figsize = (12, 4.5)\n",
"xlabel = \"Time\"\n",
"ylabel = \"Transaction fees (USDT)\"\n",
"avg = []\n",
"\n",
"plt.figure(figsize=figsize)\n",
"i = 0\n",
"for key, value in results_airl.items():\n",
" x = value[\"timestamps\"]\n",
" y = value[\"trader_stats\"][\"cum_costs\"]\n",
" label = f\"PMM (priority {i})\"\n",
" plt.plot(x, y, label=label)\n",
" print(f\"{key} - {y[-1]:.2f}\")\n",
" avg.append(value[\"trader_stats\"][\"cum_costs\"][-1])\n",
" i += 1\n",
" \n",
"plt.xlabel(xlabel)\n",
"plt.ylabel(ylabel)\n",
"# plt.legend()\n",
"plt.ylim(-5, 115)\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"AIRL_seeds_comparison_fees.pdf\"))\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f\"Mean fees: {np.mean(avg):.2f}\")\n",
"print(f\"Std fees: {np.std(avg):.2f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"avg = []\n",
"\n",
"# Compute hitting probability\n",
"for key, value in results_airl.items():\n",
" trades = np.array(value[\"trader_stats\"][\"trade_count\"])\n",
" hits = np.where(trades > 0, 1, 0)\n",
" avg.append(np.mean(hits) * 100)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f\"Mean hitting probability: {np.mean(avg):.2f}%\")\n",
"print(f\"Std hitting probability: {np.std(avg):.2f}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Seed comparison"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Fix the seed for visualization\n",
"seed_visual = 4"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"### PMM\n",
"\n",
"# Load the results\n",
"ts = results_pmm[f\"PMM_prior_1_vol_100_{seed_visual}\"][\"timestamps\"]\n",
"trader_stats = results_pmm[f\"PMM_prior_1_vol_100_{seed_visual}\"][\"trader_stats\"]\n",
"initial_cost = results_pmm[f\"PMM_prior_1_vol_100_{seed_visual}\"][\"initial_cost\"]\n",
"\n",
"# Plot the results\n",
"# ----------------------------------------------------------------------------\n",
"# PLOT - PnL\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.plot(ts, trader_stats[\"adj_pnl\"])\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"P&L (USDT)\")\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_seeds_pnl.pdf\"))\n",
"plt.show()\n",
"print(f\"Final P&L: {trader_stats['adj_pnl'][-1]}\")\n",
"\n",
"# PLOT - Returns\n",
"equity = pd.Series(np.array(trader_stats[\"adj_pnl\"]) + initial_cost)\n",
"returns = equity.pct_change() * 100\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.plot(ts, returns)\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Returns (%)\")\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_seeds_returns.pdf\"))\n",
"plt.show()\n",
"print(\"Returns stats\")\n",
"print(returns.describe())\n",
"\n",
"# PLOT - Drawdowns\n",
"dd = drawdowns(equity)\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.fill_between(ts, dd, 0, color=\"red\", alpha=0.3)\n",
"plt.plot(ts, dd, color=\"red\", alpha=0.5)\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Drawdown (%)\")\n",
"plt.ylim(-0.85, 0.05)\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_seeds_drawdowns.pdf\"))\n",
"plt.show()\n",
"print(\"Drawdown stats\")\n",
"print(dd.describe())\n",
"\n",
"# PLOT - Inventory\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.plot(ts, trader_stats[\"inventory\"], color=\"darkorange\",)\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Inventory (SOL)\")\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_seeds_inventory.pdf\"))\n",
"plt.show()\n",
"print(\"Inventory stats\")\n",
"print(pd.Series(trader_stats[\"inventory\"]).describe())\n",
"\n",
"# PLOT - Total traded volume\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.plot(ts, trader_stats[\"total_volume\"], color=\"darkorange\",)\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Traded volume (USDT)\")\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_seeds_volume.pdf\"))\n",
"plt.show()\n",
"print(\"Total volume: \", trader_stats[\"total_volume\"][-1])\n",
"\n",
"# PLOT - Transaction costs\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.plot(ts, trader_stats[\"cum_costs\"], color=\"red\",)\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Transaction fees (USDT)\")\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"PMM_seeds_fees.pdf\"))\n",
"plt.show()\n",
"print(\"Total fees: \", trader_stats[\"cum_costs\"][-1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"### AIRL \n",
"\n",
"# Load the results\n",
"airl_ts = results_airl[f\"RL_prior_1_vol_100_{seed_visual}\"][\"timestamps\"]\n",
"airl_trader_stats = results_airl[f\"RL_prior_1_vol_100_{seed_visual}\"][\"trader_stats\"]\n",
"airl_initial_cost = results_airl[f\"RL_prior_1_vol_100_{seed_visual}\"][\"initial_cost\"]\n",
"\n",
"# Plot the results\n",
"# ----------------------------------------------------------------------------\n",
"# PLOT - PnL\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.plot(airl_ts, airl_trader_stats[\"adj_pnl\"], label=\"AIRL\")\n",
"plt.plot(ts, trader_stats[\"adj_pnl\"], label=\"Expert policy\")\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"P&L (USDT)\")\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.legend(loc=\"upper left\")\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"AIRL_seeds_pnl.pdf\"))\n",
"plt.show()\n",
"print(f\"Final P&L: {airl_trader_stats['adj_pnl'][-1]}\")\n",
"\n",
"# PLOT - Returns\n",
"equity = pd.Series(np.array(airl_trader_stats[\"adj_pnl\"]) + initial_cost)\n",
"airl_returns = equity.pct_change() * 100\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.plot(airl_ts, airl_returns)\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Returns (%)\")\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"AIRL_seeds_returns.pdf\"))\n",
"plt.show()\n",
"print(\"Returns stats\")\n",
"print(returns.describe())\n",
"\n",
"# PLOT - Drawdowns\n",
"dd = drawdowns(equity)\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.fill_between(ts, dd, 0, color=\"red\", alpha=0.3)\n",
"plt.plot(airl_ts, dd, color=\"red\", alpha=0.5)\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Drawdown (%)\")\n",
"plt.ylim(-0.85, 0.05)\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"AIRL_seeds_drawdowns.pdf\"))\n",
"plt.show()\n",
"print(\"Drawdown stats\")\n",
"print(dd.describe())\n",
"\n",
"# PLOT - Inventory\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.plot(airl_ts, airl_trader_stats[\"inventory\"], color=\"darkorange\",)\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Inventory (SOL)\")\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"AIRL_seeds_inventory.pdf\"))\n",
"plt.show()\n",
"print(\"Inventory stats\")\n",
"print(pd.Series(airl_trader_stats[\"inventory\"]).describe())\n",
"\n",
"# PLOT - Total traded volume\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.plot(airl_ts, airl_trader_stats[\"total_volume\"], label=\"AIRL\")\n",
"plt.plot(ts, trader_stats[\"total_volume\"], label=\"Expert policy\")\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Traded volume (USDT)\")\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.legend(loc=\"upper left\")\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"AIRL_seeds_volume.pdf\"))\n",
"plt.show()\n",
"print(\"Total volume: \", airl_trader_stats[\"total_volume\"][-1])\n",
"\n",
"# PLOT - Transaction costs\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.plot(airl_ts, airl_trader_stats[\"cum_costs\"], label=\"AIRL\")\n",
"plt.plot(ts, trader_stats[\"cum_costs\"], label=\"Expert policy\")\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Transaction fees (USDT)\")\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.legend(loc=\"upper left\")\n",
"plt.tight_layout()\n",
"plt.savefig(os.path.join(save_dir, \"AIRL_seeds_fees.pdf\"))\n",
"plt.show()\n",
"print(\"Total fees: \", airl_trader_stats[\"cum_costs\"][-1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "reinforcement-learning-NTwPF8vr-py3.11",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 32,669 | Python | .py | 1,134 | 24.337743 | 156 | 0.52456 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,048 | rl_generator.ipynb | JurajZelman_airl-market-making/rl_generator.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Reinforcement learning (generator)\n",
"\n",
"In this notebook, I implement a pure reinforcement learning agent. This is done to analyze the stability of training of the `generator` in the adversarial inverse reinforcement learning setting. I tested here various hyperparameters while using the perfect reward function (i.e. excluding the `discriminator` from inverse reinforcement learning) to gain better understanding of the generator's learning process."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import pandas as pd\n",
"import torch as th\n",
"import gymnasium as gym\n",
"\n",
"from datetime import datetime\n",
"\n",
"from stable_baselines3.ppo import PPO, MlpPolicy\n",
"from stable_baselines3.common.evaluation import evaluate_policy\n",
"from stable_baselines3.common.env_checker import check_env\n",
"from stable_baselines3.common.monitor import Monitor\n",
"\n",
"from lob.traders import RLMarketMaker\n",
"from lob.commissions import BitComCommissions, BinanceCommissions\n",
"from lob.utils import set_plot_style, get_lot_size, get_tick_size\n",
"from rl.environments import LimitOrderBookGym"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set plot style\n",
"set_plot_style()\n",
"\n",
"# Set seed and random number generator\n",
"SEED = 1\n",
"RNG = np.random.default_rng(SEED)\n",
"\n",
"# Set device\n",
"DEVICE = th.device(\"cuda\" if th.cuda.is_available() else \"cpu\")\n",
"print(f\"Using device: {DEVICE}\")\n",
"\n",
"# Pandas display options (show all columns)\n",
"pd.set_option(\"display.max_columns\", None)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Initialize the market making agent"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters\n",
"EXCHANGE_NAME = \"BIT.COM\" \n",
"# EXCHANGE_NAME = \"BINANCE\"\n",
"# EXCHANGE_NAME = \"OKX\"\n",
"# EXCHANGE_NAME = \"GATEIO\"\n",
"SYMBOL = \"SOL-USDT\"\n",
"PATH = \"~/Projects/thesis-market-making/reinforcement-learning/data/\"\n",
"ORDER_FLOW_PENALTY = 2 # Penalty for division of incoming order flow\n",
"TICK_SIZE = get_tick_size(EXCHANGE_NAME) # Tick size of the limit order book\n",
"LOT_SIZE = get_lot_size(EXCHANGE_NAME) # Lot size of the limit order book\n",
"DEPTH = 20 # Depth of the data to load to the limit order book (max 20)\n",
"EXCHANGE_TRADER_ID = \"Exchange\"\n",
"MAX_STEPS = 300 # Maximum number of steps in an episode\n",
"TS_START = pd.Timestamp(\"2023-09-01 00:00:00\") # Start of the episode\n",
"TS_END = pd.Timestamp(\"2023-09-10 23:59:59\") # End of the episode\n",
"DETERMINISTIC = False # Indicates whether to use a deterministic environment\n",
"WIN = 0 # Window size for the features computation\n",
"LOGGING = False # Indicates whether to log events\n",
"TS_SAVE = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") # Ts for model saving\n",
"traders = {} # Dictionary of traders\n",
"\n",
"print(\"Timestamp for saving: \", TS_SAVE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the RL agent\n",
"rl_trader_id = \"RLMarketMaker\"\n",
"com_model = BinanceCommissions(tier=10)\n",
"volume = 10\n",
"# TODO: Update commissions and volume\n",
"\n",
"trader = RLMarketMaker(\n",
" id=rl_trader_id,\n",
" com_model=com_model,\n",
" volume=volume,\n",
")\n",
"traders[rl_trader_id] = trader\n",
"\n",
"# Write a description of the experiment\n",
"description = \"RL market maker simulation.\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Register the limit order book environment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the environment\n",
"ID = \"LimitOrderBookGym-v0\"\n",
"ENTRY_POINT=LimitOrderBookGym\n",
"KWARGS = {\n",
" \"exchange_name\": EXCHANGE_NAME,\n",
" \"symbol_name\": SYMBOL,\n",
" \"tick_size\": TICK_SIZE,\n",
" \"lot_size\": LOT_SIZE,\n",
" \"depth\": DEPTH,\n",
" \"order_flow_penalty\": ORDER_FLOW_PENALTY,\n",
" \"traders\": traders,\n",
" \"max_steps\": MAX_STEPS,\n",
" \"ts_start\": TS_START,\n",
" \"ts_end\": TS_END,\n",
" \"deterministic\": DETERMINISTIC,\n",
" \"win\": WIN,\n",
" \"path\": PATH,\n",
" \"rl_trader_id\": rl_trader_id,\n",
" \"logging\": LOGGING,\n",
" \"ts_save\": TS_SAVE,\n",
" \"description\": description,\n",
"}\n",
"\n",
"# Register the environment\n",
"gym.envs.register(\n",
" id=ID,\n",
" entry_point=ENTRY_POINT,\n",
" kwargs=KWARGS,\n",
" max_episode_steps=MAX_STEPS,\n",
")\n",
"\n",
"# Create the environment\n",
"env = Monitor(gym.make(ID))\n",
"check_env(env)\n",
"env.reset()\n",
"\n",
"# Save the saving ts\n",
"ts_save = env.unwrapped.exchange.ts_save\n",
"print(f\"Saving ts: {ts_save}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize the deterministic policy\n",
"env.reset()\n",
"terminated = False\n",
"while not terminated:\n",
" # action = env.action_space.sample() # this is where you would insert your policy\n",
" action = 12\n",
" observation, reward, terminated, truncated, info = env.step(action)\n",
" print(f\"Reward: {reward}\")\n",
" print()\n",
" \n",
" print(f\"Observation: {observation}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Define custom tensors and methods for better monitoring"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Define custom tensors for monitoring \n",
"monitor_states_orig = [\n",
" th.tensor([-1]),\n",
" th.tensor([-0.5]),\n",
" th.tensor([0]),\n",
" th.tensor([0.5]),\n",
" th.tensor([1]),\n",
"]\n",
"n_actions = 22\n",
"monitor_actions_orig = [\n",
" th.tensor(x) for x in range(n_actions)\n",
"]\n",
"eye = th.eye(n_actions)\n",
"monitor_actions_hot_orig = [eye[x] for x in range(n_actions)]\n",
"\n",
"monitor_states = th.stack(\n",
" [x for x in monitor_states_orig for _ in range(n_actions)]\n",
").to(DEVICE)\n",
"monitor_actions = th.stack(\n",
" monitor_actions_orig * len(monitor_states_orig)\n",
").to(DEVICE)\n",
"monitor_actions_hot = th.stack(\n",
" monitor_actions_hot_orig * len(monitor_states_orig)\n",
").to(DEVICE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def evaluate_probabilities(\n",
" model: PPO,\n",
" monitor_states: th.Tensor,\n",
" monitor_states_orig: list,\n",
" monitor_actions: th.Tensor,\n",
" n_actions: int,\n",
") -> pd.DataFrame:\n",
" _, logprobs_policy, _ = model.policy.evaluate_actions(\n",
" monitor_states,\n",
" monitor_actions,\n",
" )\n",
" probs_policy = th.exp(logprobs_policy).reshape(\n",
" len(monitor_states_orig), n_actions\n",
" )\n",
" probs_policy = np.hstack(\n",
" [\n",
" th.stack(monitor_states_orig).detach().numpy(),\n",
" probs_policy.cpu().detach().numpy(),\n",
" ]\n",
" )\n",
" \n",
" # Convert to dataframe\n",
" df_probs_policy = pd.DataFrame(\n",
" probs_policy,\n",
" columns=[\"state\", *[f\"A{x}\" for x in range(n_actions)]],\n",
" )\n",
" df_probs_policy = df_probs_policy.round(2)\n",
"\n",
" return df_probs_policy"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Initialize the reinforcement learning agent"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the underlying policy\n",
"learning_rate = 0.001 # Learning rate, can be a function of progress\n",
"\n",
"n_steps = 4500 # Number of steps to run for each environment per update\n",
"batch_size = 15 # Mini batch size for each gradient update\n",
"n_epochs = 10 # Number of epoch when optimizing the surrogate loss\n",
"\n",
"gamma = 0 # Discount factor\n",
"gae_lambda = 0.95 # Generalized Advantage Estimator factor \n",
"clip_range = 0.1 # Clipping parameter, can be a function of progress\n",
"ent_coef = 0.01 # Entropy coefficient for the loss calculation\n",
"vf_coef = 0.5 # Value function coefficient for the loss calculation\n",
"max_grad_norm = 0.5 # The maximum value for the gradient clipping\n",
"\n",
"seed = SEED # Seed for the pseudo random generators\n",
"verbose = 0 # Verbosity level: 0 no output, 1 info, 2 debug\n",
"normalize_advantage = True # Whether to normalize or not the advantage\n",
"\n",
"clip_range_vf = None # Clip for the value function, can be a func of progress\n",
"use_sde = False # Whether to use State Dependent Exploration or not\n",
"sde_sample_freq = -1 # Sample a new noise matrix every n steps (-1 = disable)\\"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the learner policy\n",
"learner = PPO(\n",
" env=env,\n",
" policy=MlpPolicy,\n",
" learning_rate=learning_rate,\n",
" n_steps=n_steps,\n",
" batch_size=batch_size,\n",
" n_epochs=n_epochs,\n",
" gamma=gamma,\n",
" gae_lambda=gae_lambda,\n",
" clip_range=clip_range,\n",
" clip_range_vf=clip_range_vf,\n",
" normalize_advantage=normalize_advantage,\n",
" ent_coef=ent_coef,\n",
" vf_coef=vf_coef,\n",
" max_grad_norm=max_grad_norm,\n",
" use_sde=use_sde,\n",
" sde_sample_freq=sde_sample_freq,\n",
" verbose=verbose,\n",
" seed=seed,\n",
" device=DEVICE,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Evaluate the random policy\n",
"mean_reward, std_reward = evaluate_policy(learner, env, n_eval_episodes=5, deterministic=False)\n",
"print(f\"mean_reward:{mean_reward:.2f} +/- {std_reward:.2f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize the policy before training\n",
"observation = env.reset()[0]\n",
"\n",
"terminated = False\n",
"while not terminated:\n",
" # action = env.action_space.sample() # this is where you would insert your policy\n",
" action, _ = learner.predict(observation, deterministic=True)\n",
" print(f\"Observation: {observation}\")\n",
" print(f\"Action: {action}\")\n",
" observation, reward, terminated, truncated, info = env.step(action)\n",
" print(f\"Reward: {reward}\")\n",
" print()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Evaluate the probabilities of states and actions\n",
"probs = evaluate_probabilities(\n",
" model=learner,\n",
" monitor_states=monitor_states,\n",
" monitor_states_orig=monitor_states_orig,\n",
" monitor_actions=monitor_actions,\n",
" n_actions=n_actions,\n",
")\n",
"probs"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Train the agent"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_steps = 3000\n",
"\n",
"# Train the agent\n",
"for i in range(15):\n",
" # Train the agent for n steps\n",
" learner.learn(total_timesteps=train_steps, progress_bar=False)\n",
" \n",
" # Evaluate the probabilities of states and actions\n",
" probs = evaluate_probabilities(\n",
" model=learner,\n",
" monitor_states=monitor_states,\n",
" monitor_states_orig=monitor_states_orig,\n",
" monitor_actions=monitor_actions,\n",
" n_actions=n_actions,\n",
" )\n",
" print(\"Probabilities for iteration: \", i)\n",
" print(probs)\n",
" print()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Evaluate the trained agent\n",
"mean_reward, std_reward = evaluate_policy(learner, env, n_eval_episodes=5)\n",
"print(f\"Mean reward:{mean_reward:.2f} +/- {std_reward:.2f}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize the probabilities of states and actions\n",
"probs = evaluate_probabilities(\n",
" model=learner,\n",
" monitor_states=monitor_states,\n",
" monitor_states_orig=monitor_states_orig,\n",
" monitor_actions=monitor_actions,\n",
" n_actions=n_actions,\n",
" )\n",
"probs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize the learned policy\n",
"observation = env.reset()[0]\n",
"\n",
"terminated = False\n",
"while not terminated:\n",
" # action = env.action_space.sample() # this is where you would insert your policy\n",
" action, _ = learner.predict(observation, deterministic=True)\n",
" print(f\"Observation: {observation}\")\n",
" print(f\"Action: {action}\")\n",
" observation, reward, terminated, truncated, info = env.step(action)\n",
" print(f\"Reward: {reward}\")\n",
" print()\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "simulator-rdDX6k4t-py3.11",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}
| 15,701 | Python | .py | 506 | 26.606719 | 417 | 0.558802 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,049 | imitation_airl.ipynb | JurajZelman_airl-market-making/imitation_airl.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Adversarial Inverse Reinforcement Learning\n",
"\n",
"This notebook contains the code for training the _Adversarial Inverse Reinforcement Learning_ (AIRL) algorithm from [Fu et al. (2018)](https://arxiv.org/abs/1710.11248) utilizing the [imitation](https://github.com/HumanCompatibleAI/imitation) and [stable-baselines3](https://github.com/DLR-RM/stable-baselines3) libraries and the custom `gym` limit order book environment."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import time\n",
"\n",
"import gymnasium as gym\n",
"import pandas as pd\n",
"import numpy as np\n",
"import torch as th\n",
"\n",
"from datetime import datetime\n",
"from imitation.algorithms.adversarial.airl import AIRL\n",
"from imitation.data import serialize\n",
"from imitation.data import rollout\n",
"from imitation.util.util import make_vec_env\n",
"from imitation.data.wrappers import RolloutInfoWrapper\n",
"from stable_baselines3 import PPO\n",
"from stable_baselines3.common.evaluation import evaluate_policy\n",
"from stable_baselines3.ppo import MlpPolicy\n",
"from stable_baselines3.common.monitor import Monitor\n",
"from stable_baselines3.common.vec_env import VecCheckNan\n",
"\n",
"from lob.commissions import BitCommissions\n",
"from lob.exchange import Exchange\n",
"from lob.traders import RLMarketMaker\n",
"from lob.plots import visualize_backtest, set_plot_style\n",
"from lob.utils import get_lot_size, get_tick_size\n",
"from rl.environments import LimitOrderBookGym\n",
"from rl.utils import save_model, load_model\n",
"from rl.plotting import visualize_airl_train_stats\n",
"from rl.experts import RandomPolicy_v1, ExpertPolicy_v1\n",
"from rl.rewards import NegativeRewardNet"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set plot style\n",
"set_plot_style()\n",
"\n",
"# Set device\n",
"DEVICE = th.device(\"cuda\" if th.cuda.is_available() else \"cpu\")\n",
"print(f\"Using device: {DEVICE}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set strict error checking\n",
"th.autograd.set_detect_anomaly(True)\n",
"np.seterr(all=\"raise\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Seed for the pseudo random generator\n",
"# SEED = 1\n",
"# SEED = 2\n",
"# SEED = 3\n",
"# SEED = 4\n",
"SEED = 5"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Register custom vectorized environment\n",
"\n",
"In this section I load the limit order book gym environment and register it as a custom vectorized environment. This is necessary for the `stable-baselines3` library to work with the environment."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters\n",
"EXCHANGE_NAME = \"BIT.COM\"\n",
"SYMBOL = \"SOL-USDT\"\n",
"PATH = \"~/Projects/thesis-market-making/reinforcement-learning/data/\"\n",
"TICK_SIZE = get_tick_size(EXCHANGE_NAME) # Tick size of the limit order book\n",
"LOT_SIZE = get_lot_size(EXCHANGE_NAME) # Lot size of the limit order book\n",
"DEPTH = 20 # Depth of the data to load to the limit order book (max 20)\n",
"EXCHANGE_TRADER_ID = \"Exchange\"\n",
"MAX_STEPS = 300 # Maximum number of steps in an episode\n",
"TS_START = pd.Timestamp(\"2023-09-01 00:00:00\") # Start of the episode\n",
"TS_END = pd.Timestamp(\"2023-09-10 23:59:59\") # End of the episode\n",
"DETERMINISTIC = False # Indicates whether to use a deterministic environment\n",
"WIN = 0 # Window size for the features computation\n",
"LOGGING = False # Indicates whether to log events\n",
"TS_SAVE = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") # Ts for model saving\n",
"LATENCY_COMP_PARAMS = {} # Parameters for the stochastic backtest\n",
"RNG = np.random.default_rng(seed=SEED) # Random number generator\n",
"traders = {} # Dictionary of traders\n",
"\n",
"print(\"Timestamp for saving: \", TS_SAVE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the RL agent\n",
"rl_trader_id = \"RLMarketMaker\"\n",
"com_model = BitCommissions(tier=5)\n",
"volume = 100\n",
"\n",
"# Initialize the trader\n",
"trader = RLMarketMaker(\n",
" id=rl_trader_id,\n",
" com_model=com_model,\n",
" volume=volume,\n",
")\n",
"traders[rl_trader_id] = trader\n",
"\n",
"# Write a description of the experiment\n",
"description = \"RL market maker simulation.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the environment\n",
"ID = \"LimitOrderBookGym-v1\"\n",
"ENTRY_POINT=LimitOrderBookGym\n",
"KWARGS = {\n",
" \"exchange_name\": EXCHANGE_NAME,\n",
" \"symbol_name\": SYMBOL,\n",
" \"tick_size\": TICK_SIZE,\n",
" \"lot_size\": LOT_SIZE,\n",
" \"depth\": DEPTH,\n",
" \"traders\": traders,\n",
" \"max_steps\": MAX_STEPS,\n",
" \"ts_start\": TS_START,\n",
" \"ts_end\": TS_END,\n",
" \"deterministic\": DETERMINISTIC,\n",
" \"win\": WIN,\n",
" \"path\": PATH,\n",
" \"rl_trader_id\": rl_trader_id,\n",
" \"latency_comp_params\": LATENCY_COMP_PARAMS,\n",
" \"logging\": LOGGING,\n",
" \"ts_save\": TS_SAVE,\n",
" \"description\": description,\n",
" \"rng\": RNG,\n",
"}\n",
"\n",
"# Register the environment\n",
"gym.envs.register(\n",
" id=ID,\n",
" entry_point=ENTRY_POINT,\n",
" kwargs=KWARGS,\n",
" max_episode_steps=MAX_STEPS,\n",
")\n",
"\n",
"# Create the environment\n",
"env = Monitor(gym.make(ID))\n",
"\n",
"# Save the saving ts\n",
"ts_save = env.unwrapped.exchange.ts_save\n",
"print(f\"Saving ts: {ts_save}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Create the vectorized environment\n",
"venv = make_vec_env(\n",
" ID,\n",
" rng=RNG,\n",
" n_envs=1,\n",
" post_wrappers=[\n",
" lambda env, _: RolloutInfoWrapper(env)\n",
" ], # needed for computing rollouts later\n",
" parallel=False,\n",
")\n",
"venv = VecCheckNan(venv, raise_exception=True) # Check for NaN observations\n",
"venv.reset()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Generate rollouts with random and expert policies\n",
"\n",
"In this section I define an expert policy that will be used as a target of the imitation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the rollout\n",
"min_timesteps = None\n",
"min_episodes = 1\n",
"\n",
"# Rollout the environment with a random policy\n",
"rollouts = rollout.rollout(\n",
" None, # Random policy\n",
" venv,\n",
" sample_until=rollout.make_sample_until(\n",
" min_timesteps=min_timesteps,\n",
" min_episodes=min_episodes\n",
" ),\n",
" rng=RNG,\n",
")\n",
"\n",
"# Print the first rollout\n",
"for i in range(len(rollouts[0].obs) - 1):\n",
" print(\"Observation: \", rollouts[0].obs[i])\n",
" print(\"Action: \", rollouts[0].acts[i])\n",
" print()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the random policy\n",
"random_policy = RandomPolicy_v1(venv.action_space)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Evaluate the random policy\n",
"reward_random_policy, _ = evaluate_policy(\n",
" random_policy, env, 1, return_episode_rewards=True\n",
")\n",
"print(\"Reward: \", np.mean(reward_random_policy))\n",
"print(\"Std : \", np.std(reward_random_policy))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the expert policy\n",
"expert = ExpertPolicy_v1()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the rollout\n",
"min_timesteps = None\n",
"min_episodes = 1\n",
"\n",
"# Rollout the environment with the expert policy\n",
"rollouts = rollout.rollout(\n",
" expert.predict,\n",
" venv,\n",
" sample_until=rollout.make_sample_until(\n",
" min_timesteps=min_timesteps,\n",
" min_episodes=min_episodes\n",
" ),\n",
" rng=RNG,\n",
")\n",
"\n",
"# Print the first rollout\n",
"for i in range(len(rollouts[0].obs) - 1):\n",
" print(f\"Obs: {rollouts[0].obs[i][0]: .3f} --> Action: {rollouts[0].acts[i]}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Flatten the trajectories into transitions\n",
"transitions = rollout.flatten_trajectories(rollouts)\n",
"transitions"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Evaluate the expert\n",
"reward_expert_policy, _ = evaluate_policy(\n",
" expert, venv, 1, return_episode_rewards=True\n",
")\n",
"print(\"Reward: \", np.mean(reward_expert_policy))\n",
"print(\"Std : \", np.std(reward_expert_policy))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the expert trajectories\n",
"path = f\"rollouts/rollouts_2024-01-20_18-33-28.pkl\"\n",
"\n",
"# If the file exists load the rollouts\n",
"if os.path.exists(path):\n",
" rollouts = serialize.load(path)\n",
" \n",
"# Else, generate the rollouts\n",
"else:\n",
" # Set the parameters for the rollout\n",
" min_timesteps = 45000 * 3 + 4500\n",
" min_episodes = None\n",
"\n",
" # Rollout the environment with the expert policy\n",
" rollouts = rollout.rollout(\n",
" expert.predict,\n",
" venv,\n",
" sample_until=rollout.make_sample_until(\n",
" min_timesteps=min_timesteps,\n",
" min_episodes=min_episodes\n",
" ),\n",
" rng=RNG,\n",
" )\n",
" \n",
" # Ensure the directory exists\n",
" if not os.path.exists(\"rollouts\"):\n",
" os.mkdir(\"rollouts\")\n",
" path = f\"rollouts/rollouts_{ts_save}.pkl\"\n",
"\n",
" # Save the rollouts\n",
" serialize.save(path, rollouts)\n",
"\n",
"# Print the first rollout\n",
"for i in range(len(rollouts[0].obs) - 1):\n",
" print(\"Observation: \", rollouts[0].obs[i])\n",
" print(\"Action: \", rollouts[0].acts[i])\n",
" print()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Adversarial Inverse Reinforcement Learning Agent\n",
"\n",
"In this section I develop a pipeline for training the adversarial inverse reinforcement learning agent. The goal is to learn the reward function of the expert policy by training of the discriminator network and the agent policy network."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters for PPO (generator)\n",
"learning_rate = 0.001 # Learning rate, can be a function of progress \n",
"batch_size = 60 # Mini batch size for each gradient update\n",
"n_epochs = 10 # Number of epoch when optimizing the surrogate loss\n",
"\n",
"gamma = 0.5 # Discount factor, focus on the current reward\n",
"gae_lambda = 0 # Generalized advantage estimation\n",
"clip_range = 0.1 # Clipping parameter, can be a function of progress\n",
"ent_coef = 0.01 # Entropy coefficient for the loss calculation\n",
"vf_coef = 0.5 # Value function coefficient for the loss calculation\n",
"max_grad_norm = 0.5 # The maximum value for the gradient clipping\n",
"\n",
"verbose = 0 # Verbosity level: 0 no output, 1 info, 2 debug\n",
"normalize_advantage = True # Whether to normalize or not the advantage\n",
"\n",
"clip_range_vf = None # Clip for the value function, can be a func of progress\n",
"use_sde = False # Whether to use State Dependent Exploration or not\n",
"sde_sample_freq = -1 # Sample a new noise matrix every n steps (-1 = disable)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the (negative) reward net\n",
"use_state = True # Current state is used for the reward \n",
"use_action = True # Current action is used for the reward\n",
"use_next_state = False # Next state is used for the reward\n",
"use_done = False # Done flag is used for the reward "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the AIRL trainer\n",
"gen_replay_buffer_capacity = None \n",
"allow_variable_horizon = True # TODO: Getting issues without this setting\n",
"\n",
"disc_opt_kwargs = {\n",
" \"lr\": 0.001,\n",
"}\n",
"policy_kwargs = {\"use_expln\": True} # Fixing the issue with the NaNs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the number of timesteps, batch size and number of disc updates\n",
"\n",
"# Total number of timesteps in the whole training\n",
"total_timesteps = 3000 * 600\n",
"\n",
"# Generator\n",
"gen_train_timesteps = 3000 # N steps in the environment per one round\n",
"n_steps = gen_train_timesteps\n",
"\n",
"# Discriminator batches\n",
"demo_minibatch_size = 60 # N samples in minibatch for one discriminator update\n",
"demo_batch_size = 300 * 10 # N samples in the batch of expert data (batch)\n",
"n_disc_updates_per_round = 4 # N discriminator updates per one round"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# TODO: Be careful here and use the multiples of episode length (otherwise you\n",
"# might run into unexpected issues with variable horizons during training). "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the learner policy\n",
"learner = PPO(\n",
" env=venv,\n",
" policy=MlpPolicy,\n",
" policy_kwargs=policy_kwargs,\n",
" learning_rate=learning_rate,\n",
" n_steps=n_steps,\n",
" batch_size=batch_size,\n",
" n_epochs=n_epochs,\n",
" gamma=gamma,\n",
" gae_lambda=gae_lambda,\n",
" clip_range=clip_range,\n",
" clip_range_vf=clip_range_vf,\n",
" normalize_advantage=normalize_advantage,\n",
" ent_coef=ent_coef,\n",
" vf_coef=vf_coef,\n",
" max_grad_norm=max_grad_norm,\n",
" use_sde=use_sde,\n",
" sde_sample_freq=sde_sample_freq,\n",
" verbose=verbose,\n",
" seed=SEED,\n",
" device=DEVICE,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the custom reward network\n",
"reward_net = NegativeRewardNet(\n",
" observation_space=venv.observation_space,\n",
" action_space=venv.action_space,\n",
" use_state=use_state,\n",
" use_action=use_action,\n",
" use_next_state=use_next_state,\n",
" use_done=use_done,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the AIRL trainer\n",
"airl_trainer = AIRL(\n",
" demonstrations=rollouts,\n",
" demo_batch_size=demo_batch_size,\n",
" demo_minibatch_size=demo_minibatch_size,\n",
" n_disc_updates_per_round=n_disc_updates_per_round,\n",
" gen_train_timesteps=gen_train_timesteps,\n",
" gen_replay_buffer_capacity=gen_replay_buffer_capacity,\n",
" venv=venv,\n",
" gen_algo=learner,\n",
" reward_net=reward_net,\n",
" allow_variable_horizon=allow_variable_horizon,\n",
" disc_opt_kwargs=disc_opt_kwargs,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Evaluate the policy before training\n",
"venv.seed(SEED)\n",
"learner_rewards_before_training, _ = evaluate_policy(\n",
" learner, venv, 1, return_episode_rewards=True\n",
")\n",
"print(\"Mean: \", np.mean(learner_rewards_before_training))\n",
"print(\"Std: \", np.std(learner_rewards_before_training))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize actions of the policy before training\n",
"for _ in range(1):\n",
" obs = venv.reset()\n",
" done = False\n",
" while not done:\n",
" print(obs)\n",
" action, _ = learner.predict(obs, deterministic=True)\n",
" print(action)\n",
" print()\n",
" obs, _, done, _ = venv.step(action)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Train the model\n",
"airl_trainer.train(total_timesteps=total_timesteps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Evaluate the policy after training\n",
"venv.seed(SEED)\n",
"learner_rewards_after_training, _ = evaluate_policy(\n",
" learner, venv, 5, return_episode_rewards=True\n",
")\n",
"print(\"Mean: \", np.mean(learner_rewards_after_training))\n",
"print(\"Std: \", np.std(learner_rewards_after_training))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize actions of the policy after training\n",
"for _ in range(1):\n",
" obs = venv.reset()\n",
" done = False\n",
" while not done:\n",
" action, _ = learner.predict(obs, deterministic=True)\n",
" print(f\"Obs: {obs[0][0]} --> Action: {action}\")\n",
" obs, _, done, _ = venv.step(action)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Access the training log statistics\n",
"stats = airl_trainer.logger._logger.stats"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"visualize_airl_train_stats(stats)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Save the trained model and stats"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Save the model\n",
"save_path = os.path.join(os.getcwd(), \"models\")\n",
"ts = airl_trainer.ts_now\n",
"print(f\"Saving the model with timestamp: {ts}\")\n",
"save_model(learner, reward_net, stats, save_path, ts)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load the trained model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Pick the timestamp of the model to load\n",
"# ts = \"2024-01-21_17-12-35\" # seed 1\n",
"# ts = \"2024-01-22_18-03-01\" # seed 2\n",
"# ts = \"2024-01-23_19-14-27\" # seed 3\n",
"# ts = \"2024-01-24_09-40-47\" # seed 4\n",
"ts = \"2024-01-24_22-39-37\" # seed 5"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the model\n",
"load_path = os.path.join(os.getcwd(), \"models\")\n",
"learner, reward_net, stats = load_model(load_path, ts)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(ts)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Evaluate the trained model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize the stats\n",
"save_fig = True\n",
"visualize_airl_train_stats(stats, save_fig=save_fig)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Evaluate the policy after training\n",
"venv.seed(SEED)\n",
"learner_rewards_after_training, _ = evaluate_policy(\n",
" learner, venv, 5, return_episode_rewards=True\n",
")\n",
"print(\"Mean: \", np.mean(learner_rewards_after_training))\n",
"print(\"Std: \", np.std(learner_rewards_after_training))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize actions of the policy after training\n",
"for _ in range(1):\n",
" obs = venv.reset()\n",
" done = False\n",
" while not done:\n",
" action, _ = learner.predict(obs, deterministic=True)\n",
" print(f\"Obs: {obs[0][0]: .5f} --> Action: {action}\")\n",
" obs, _, done, _ = venv.step(action)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters\n",
"EXCHANGE_NAME = \"BIT.COM\"\n",
"SYMBOL = \"SOL-USDT\"\n",
"PATH = \"~/Projects/thesis-market-making/reinforcement-learning/data/\"\n",
"TICK_SIZE = get_tick_size(EXCHANGE_NAME) # Tick size of the limit order book\n",
"LOT_SIZE = get_lot_size(EXCHANGE_NAME) # Lot size of the limit order book\n",
"DEPTH = 20 # Depth of the data to load to the limit order book (max 20)\n",
"EXCHANGE_TRADER_ID = \"Exchange\"\n",
"MAX_STEPS = None # Maximum number of steps in an episode\n",
"TS_START = pd.Timestamp(\"2023-09-11 00:00:00\") # Start of the episode\n",
"TS_END = pd.Timestamp(\"2023-09-13 23:59:59\") # End of the episode\n",
"WIN = 0 # Window size for the features computation\n",
"LOGGING = False # Indicates whether to log events\n",
"LATENCY_COMP_PARAMS = {\n",
" 0: {\"prob\": 0.9, \"divisor\": 1},\n",
" 1: {\"prob\": 0.9, \"divisor\": 1},\n",
" 2: {\"prob\": 0.9, \"divisor\": 1},\n",
" 3: {\"prob\": 0.9, \"divisor\": 1},\n",
"} # Latency compensation parameters for stochastic backtest\n",
"RNG = np.random.default_rng(seed=SEED) # Random number generator"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the limit order book and traders\n",
"start = time.time()\n",
"traders = {}\n",
"\n",
"# Behavior cloning agent\n",
"rl_trader_id = \"RLMarketMaker\"\n",
"com_model = BitCommissions(tier=5)\n",
"volume = 100\n",
"trader = RLMarketMaker(\n",
" id=rl_trader_id,\n",
" com_model=com_model,\n",
" volume=volume,\n",
" policy=learner.policy,\n",
")\n",
"traders[rl_trader_id] = trader\n",
"\n",
"description = f\"AIRL agent.\"\n",
"\n",
"# Initialize the exchange\n",
"exchange = Exchange(\n",
" exchange_name=EXCHANGE_NAME,\n",
" symbol_name=SYMBOL,\n",
" tick_size=TICK_SIZE,\n",
" lot_size=LOT_SIZE,\n",
" depth=DEPTH,\n",
" traders=traders,\n",
" max_steps=MAX_STEPS,\n",
" ts_start=TS_START,\n",
" ts_end=TS_END,\n",
" win=WIN,\n",
" path=PATH,\n",
" rl_trader_id=rl_trader_id,\n",
" latency_comp_params=LATENCY_COMP_PARAMS,\n",
" logging=LOGGING,\n",
" ts_save=TS_SAVE,\n",
" description=description,\n",
" rng=RNG,\n",
" )\n",
"end = round(time.time() - start, 2)\n",
"print(f\"Time taken for initialization of the exchange: {end} sec.\")\n",
"\n",
"# Run the exchange simulation\n",
"start = time.time()\n",
"exchange.run()\n",
"end = round(time.time() - start, 2)\n",
"print(f\"Time taken for running the exchange: {end} sec.\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"timestamps = exchange.stats[\"ts\"]\n",
"trader_stats = traders[rl_trader_id].stats\n",
"initial_cost = 20.5 * volume * 2 # TODO: Adjust"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"visualize_backtest(timestamps, trader_stats, initial_cost)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "imitation-csF8rUtb-py3.11",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 26,681 | Python | .py | 881 | 25.886493 | 378 | 0.564806 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,050 | backtest_automation.ipynb | JurajZelman_airl-market-making/backtest_automation.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Market making backtests\n",
"\n",
"In this notebook I automate the generation of market making backtests for the thesis."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Initial setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import pickle\n",
"import time\n",
"\n",
"import pandas as pd\n",
"import polars as pl\n",
"import numpy as np\n",
"\n",
"from datetime import datetime\n",
"\n",
"from lob.exchange import Exchange\n",
"from lob.traders import PureMarketMaker\n",
"from lob.commissions import BitCommissions\n",
"from lob.plots import set_plot_style\n",
"from lob.utils import get_lot_size, get_tick_size, ensure_dir_exists\n",
"from rl.utils import send_notification"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Configure Polars \n",
"cfg = pl.Config()\n",
"cfg.set_tbl_rows(20)\n",
"\n",
"# Configure plotting\n",
"set_plot_style()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Define custom colors\n",
"color_green = \"#13961a\"\n",
"color_red = \"#eb5c14\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set random seed\n",
"SEED = 1"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Pure market makers (volume 100)\n",
"\n",
"In this section I generate the statistics of the pure market making strategy with multiple priorities."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TS_START = pd.Timestamp(\"2023-09-01 00:00:00\") # Start of the episode\n",
"TS_END = pd.Timestamp(\"2023-09-13 23:59:59\") # End of the episode"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters\n",
"EXCHANGE_NAME = \"BIT.COM\" \n",
"SYMBOL = \"SOL-USDT\"\n",
"PATH = \"~/Projects/thesis-market-making/reinforcement-learning/data/\"\n",
"TICK_SIZE = get_tick_size(EXCHANGE_NAME) # Tick size of the limit order book\n",
"LOT_SIZE = get_lot_size(EXCHANGE_NAME) # Lot size of the limit order book\n",
"DEPTH = 20 # Depth of the data to load to the limit order book (max 20)\n",
"EXCHANGE_TRADER_ID = \"Exchange\"\n",
"MAX_STEPS = None # Maximum number of steps in an episode\n",
"WIN = 0 # Window size for the features computation\n",
"# LOGGING = False # Indicates whether to log events\n",
"LOGGING = True\n",
"TS_SAVE = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") # Ts for model saving\n",
"RNG = np.random.default_rng(seed=SEED) # Random number generator\n",
"\n",
"# Set the parameters for the stochastic backtest\n",
"LATENCY_COMP_PARAMS = {\n",
" 0: {\"prob\": 0.9, \"divisor\": 1},\n",
" 1: {\"prob\": 0.9, \"divisor\": 1},\n",
" 2: {\"prob\": 0.9, \"divisor\": 1},\n",
" 3: {\"prob\": 0.9, \"divisor\": 1},\n",
"} # Latency compensation parameters for the stochastic backtest"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the automated backtest\n",
"priorities = [0, 1, 2, 3]\n",
"volumes = [100]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the results dictionary\n",
"results = {}\n",
"\n",
"# Run the backtests\n",
"for priority in priorities:\n",
" for volume in volumes: \n",
"\n",
" # Initialize the limit order book and traders\n",
" start = time.time()\n",
" traders = {}\n",
"\n",
" # Pure market making strategy\n",
" trader_id = f\"PMM_prior_{priority}_vol_{volume}\"\n",
" inventory_manage = True\n",
" description = f\"Pure market maker with priority {priority} and volume {volume}.\"\n",
" \n",
" # Set the commission model\n",
" if volume == 100:\n",
" if priority == 0:\n",
" tier = 5\n",
" elif priority == 1:\n",
" tier = 5\n",
" elif priority == 2:\n",
" tier = 2\n",
" elif priority == 3:\n",
" tier = 1\n",
" elif volume == 10:\n",
" if priority == 0:\n",
" tier = 4\n",
" elif priority == 1:\n",
" tier = 3\n",
" elif priority == 2:\n",
" tier = 1\n",
" elif priority == 3:\n",
" tier = 1\n",
" \n",
" com_model = BitCommissions(tier=tier)\n",
" trader = PureMarketMaker(\n",
" trader_id,\n",
" com_model=com_model,\n",
" volume=volume,\n",
" priority=priority,\n",
" inventory_manage=inventory_manage,\n",
" )\n",
" traders[trader.id] = trader\n",
"\n",
" # Initialize the exchange\n",
" exchange = Exchange(\n",
" exchange_name=EXCHANGE_NAME,\n",
" symbol_name=SYMBOL,\n",
" tick_size=TICK_SIZE,\n",
" lot_size=LOT_SIZE,\n",
" depth=DEPTH,\n",
" traders=traders,\n",
" max_steps=MAX_STEPS,\n",
" ts_start=TS_START,\n",
" ts_end=TS_END,\n",
" win=WIN,\n",
" path=PATH,\n",
" rl_trader_id=\"\",\n",
" latency_comp_params=LATENCY_COMP_PARAMS,\n",
" logging=LOGGING,\n",
" ts_save=TS_SAVE,\n",
" description=description,\n",
" rng=RNG,\n",
" )\n",
" end = round(time.time() - start, 2)\n",
"\n",
" # Run the exchange simulation\n",
" start = time.time()\n",
" exchange.run()\n",
" end = round(time.time() - start, 2)\n",
"\n",
" # Save the results\n",
" timestamps = exchange.stats[\"ts\"]\n",
" trader_stats = traders[trader_id].stats\n",
" initial_cost = 20.5 * volume * 2\n",
" results[trader_id] = {\n",
" \"timestamps\": timestamps,\n",
" \"trader_stats\": trader_stats,\n",
" \"initial_cost\": initial_cost,\n",
" }\n",
" \n",
"send_notification(message=\"Backtest finished!\", time=20000)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Save the results to a pickle file\n",
"save_dir = \"automated_backtests\"\n",
"\n",
"ensure_dir_exists(save_dir)\n",
"save_path = os.path.join(save_dir, f\"results_{TS_SAVE}.pickle\")\n",
"with open(save_path, \"wb\") as handle:\n",
" pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)\n",
" \n",
"print(f\"Results saved to {save_path}.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Pure market makers (volume 10)\n",
"\n",
"In this section I generate the statistics of the pure market making strategy with multiple priorities."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TS_START = pd.Timestamp(\"2023-09-01 00:00:00\") # Start of the episode\n",
"TS_END = pd.Timestamp(\"2023-09-13 23:59:59\") # End of the episode"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters\n",
"EXCHANGE_NAME = \"BIT.COM\" \n",
"SYMBOL = \"SOL-USDT\"\n",
"PATH = \"~/Projects/thesis-market-making/reinforcement-learning/data/\"\n",
"TICK_SIZE = get_tick_size(EXCHANGE_NAME) # Tick size of the limit order book\n",
"LOT_SIZE = get_lot_size(EXCHANGE_NAME) # Lot size of the limit order book\n",
"DEPTH = 20 # Depth of the data to load to the limit order book (max 20)\n",
"EXCHANGE_TRADER_ID = \"Exchange\"\n",
"MAX_STEPS = None # Maximum number of steps in an episode\n",
"WIN = 0 # Window size for the features computation\n",
"# LOGGING = False # Indicates whether to log events\n",
"LOGGING = True\n",
"TS_SAVE = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") # Ts for model saving\n",
"RNG = np.random.default_rng(seed=SEED) # Random number generator\n",
"\n",
"# Set the parameters for the stochastic backtest\n",
"LATENCY_COMP_PARAMS = {\n",
" 0: {\"prob\": 0.9, \"divisor\": 1},\n",
" 1: {\"prob\": 0.9, \"divisor\": 1},\n",
" 2: {\"prob\": 0.9, \"divisor\": 1},\n",
" 3: {\"prob\": 0.9, \"divisor\": 1},\n",
"} # Latency compensation parameters for the stochastic backtest"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the automated backtest\n",
"priorities = [0, 1, 2, 3]\n",
"volumes = [10]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the results dictionary\n",
"results = {}\n",
"\n",
"# Run the backtests\n",
"for priority in priorities:\n",
" for volume in volumes: \n",
"\n",
" # Initialize the limit order book and traders\n",
" start = time.time()\n",
" traders = {}\n",
"\n",
" # Pure market making strategy\n",
" trader_id = f\"PMM_prior_{priority}_vol_{volume}\"\n",
" inventory_manage = True\n",
" description = f\"Pure market maker with priority {priority} and volume {volume}.\"\n",
" \n",
" # Set the commission model\n",
" if volume == 100:\n",
" if priority == 0:\n",
" tier = 5\n",
" elif priority == 1:\n",
" tier = 5\n",
" elif priority == 2:\n",
" tier = 2\n",
" elif priority == 3:\n",
" tier = 1\n",
" elif volume == 10:\n",
" if priority == 0:\n",
" tier = 4\n",
" elif priority == 1:\n",
" tier = 3\n",
" elif priority == 2:\n",
" tier = 1\n",
" elif priority == 3:\n",
" tier = 1\n",
" \n",
" com_model = BitCommissions(tier=tier)\n",
" trader = PureMarketMaker(\n",
" trader_id,\n",
" com_model=com_model,\n",
" volume=volume,\n",
" priority=priority,\n",
" inventory_manage=inventory_manage,\n",
" )\n",
" traders[trader.id] = trader\n",
"\n",
" # Initialize the exchange\n",
" exchange = Exchange(\n",
" exchange_name=EXCHANGE_NAME,\n",
" symbol_name=SYMBOL,\n",
" tick_size=TICK_SIZE,\n",
" lot_size=LOT_SIZE,\n",
" depth=DEPTH,\n",
" traders=traders,\n",
" max_steps=MAX_STEPS,\n",
" ts_start=TS_START,\n",
" ts_end=TS_END,\n",
" win=WIN,\n",
" path=PATH,\n",
" rl_trader_id=\"\",\n",
" latency_comp_params=LATENCY_COMP_PARAMS,\n",
" logging=LOGGING,\n",
" ts_save=TS_SAVE,\n",
" description=description,\n",
" rng=RNG,\n",
" )\n",
" end = round(time.time() - start, 2)\n",
"\n",
" # Run the exchange simulation\n",
" start = time.time()\n",
" exchange.run()\n",
" end = round(time.time() - start, 2)\n",
"\n",
" # Save the results\n",
" timestamps = exchange.stats[\"ts\"]\n",
" trader_stats = traders[trader_id].stats\n",
" initial_cost = 20.5 * volume * 2\n",
" results[trader_id] = {\n",
" \"timestamps\": timestamps,\n",
" \"trader_stats\": trader_stats,\n",
" \"initial_cost\": initial_cost,\n",
" }\n",
" \n",
"send_notification(message=\"Backtest finished!\", time=20000)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Save the results to a pickle file\n",
"save_dir = \"automated_backtests\"\n",
"\n",
"ensure_dir_exists(save_dir)\n",
"save_path = os.path.join(save_dir, f\"results_{TS_SAVE}.pickle\")\n",
"with open(save_path, \"wb\") as handle:\n",
" pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)\n",
" \n",
"print(f\"Results saved to {save_path}.\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Pure market maker (50 seeds)\n",
"\n",
"In this section I generate the statistics of the pure market making strategy with multiple priorities."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TS_START = pd.Timestamp(\"2023-09-11 00:00:00\") # Start of the episode\n",
"TS_END = pd.Timestamp(\"2023-09-13 23:59:59\") # End of the episode"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters\n",
"EXCHANGE_NAME = \"BIT.COM\" \n",
"SYMBOL = \"SOL-USDT\"\n",
"PATH = \"~/Projects/thesis-market-making/reinforcement-learning/data/\"\n",
"TICK_SIZE = get_tick_size(EXCHANGE_NAME) # Tick size of the limit order book\n",
"LOT_SIZE = get_lot_size(EXCHANGE_NAME) # Lot size of the limit order book\n",
"DEPTH = 20 # Depth of the data to load to the limit order book (max 20)\n",
"EXCHANGE_TRADER_ID = \"Exchange\"\n",
"MAX_STEPS = None # Maximum number of steps in an episode\n",
"WIN = 0 # Window size for the features computation\n",
"# LOGGING = False # Indicates whether to log events\n",
"LOGGING = False\n",
"TS_SAVE = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") # Ts for model saving\n",
"\n",
"\n",
"# Set the parameters for the stochastic backtest\n",
"LATENCY_COMP_PARAMS = {\n",
" 0: {\"prob\": 0.9, \"divisor\": 1},\n",
" 1: {\"prob\": 0.9, \"divisor\": 1},\n",
" 2: {\"prob\": 0.9, \"divisor\": 1},\n",
" 3: {\"prob\": 0.9, \"divisor\": 1},\n",
"} # Latency compensation parameters for the stochastic backtest"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the automated backtest\n",
"priorities = [1]\n",
"volumes = [100]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the results dictionary\n",
"results = {}\n",
"\n",
"# Run the backtests\n",
"for seed in range(1, 51):\n",
" for priority in priorities:\n",
" for volume in volumes: \n",
" RNG = np.random.default_rng(seed=seed)\n",
"\n",
" # Initialize the limit order book and traders\n",
" start = time.time()\n",
" traders = {}\n",
"\n",
" # Pure market making strategy\n",
" trader_id = f\"PMM_prior_{priority}_vol_{volume}_{seed}\"\n",
" inventory_manage = True\n",
" description = f\"Pure market maker with priority {priority} and volume {volume}.\"\n",
" \n",
" # Set the commission model\n",
" if volume == 100:\n",
" if priority == 0:\n",
" tier = 5\n",
" elif priority == 1:\n",
" tier = 5\n",
" elif priority == 2:\n",
" tier = 2\n",
" elif priority == 3:\n",
" tier = 1\n",
" elif volume == 10:\n",
" if priority == 0:\n",
" tier = 4\n",
" elif priority == 1:\n",
" tier = 3\n",
" elif priority == 2:\n",
" tier = 1\n",
" elif priority == 3:\n",
" tier = 1\n",
" \n",
" com_model = BitCommissions(tier=tier)\n",
" trader = PureMarketMaker(\n",
" trader_id,\n",
" com_model=com_model,\n",
" volume=volume,\n",
" priority=priority,\n",
" inventory_manage=inventory_manage,\n",
" )\n",
" traders[trader.id] = trader\n",
"\n",
" # Initialize the exchange\n",
" exchange = Exchange(\n",
" exchange_name=EXCHANGE_NAME,\n",
" symbol_name=SYMBOL,\n",
" tick_size=TICK_SIZE,\n",
" lot_size=LOT_SIZE,\n",
" depth=DEPTH,\n",
" traders=traders,\n",
" max_steps=MAX_STEPS,\n",
" ts_start=TS_START,\n",
" ts_end=TS_END,\n",
" win=WIN,\n",
" path=PATH,\n",
" rl_trader_id=\"\",\n",
" latency_comp_params=LATENCY_COMP_PARAMS,\n",
" logging=LOGGING,\n",
" ts_save=TS_SAVE,\n",
" description=description,\n",
" rng=RNG,\n",
" )\n",
" end = round(time.time() - start, 2)\n",
"\n",
" # Run the exchange simulation\n",
" start = time.time()\n",
" exchange.run()\n",
" end = round(time.time() - start, 2)\n",
"\n",
" # Save the results\n",
" timestamps = exchange.stats[\"ts\"]\n",
" trader_stats = traders[trader_id].stats\n",
" initial_cost = 20.5 * volume * 2\n",
" results[trader_id] = {\n",
" \"timestamps\": timestamps,\n",
" \"trader_stats\": trader_stats,\n",
" \"initial_cost\": initial_cost,\n",
" }\n",
" \n",
"send_notification(message=\"Backtest finished!\", time=20000)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Save the results to a pickle file\n",
"save_dir = \"automated_backtests\"\n",
"\n",
"ensure_dir_exists(save_dir)\n",
"save_path = os.path.join(save_dir, f\"results_{TS_SAVE}.pickle\")\n",
"with open(save_path, \"wb\") as handle:\n",
" pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)\n",
" \n",
"print(f\"Results saved to {save_path}.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### AIRL market maker (50 seeds)\n",
"\n",
"In this section I generate the statistics of the AIRL market making strategy with multiple priorities."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TS_START = pd.Timestamp(\"2023-09-11 00:00:00\") # Start of the episode\n",
"TS_END = pd.Timestamp(\"2023-09-13 23:59:59\") # End of the episode"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters\n",
"EXCHANGE_NAME = \"BIT.COM\" \n",
"SYMBOL = \"SOL-USDT\"\n",
"PATH = \"~/Projects/thesis-market-making/reinforcement-learning/data/\"\n",
"TICK_SIZE = get_tick_size(EXCHANGE_NAME) # Tick size of the limit order book\n",
"LOT_SIZE = get_lot_size(EXCHANGE_NAME) # Lot size of the limit order book\n",
"DEPTH = 20 # Depth of the data to load to the limit order book (max 20)\n",
"EXCHANGE_TRADER_ID = \"Exchange\"\n",
"MAX_STEPS = None # Maximum number of steps in an episode\n",
"WIN = 0 # Window size for the features computation\n",
"# LOGGING = False # Indicates whether to log events\n",
"LOGGING = False\n",
"TS_SAVE = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") # Ts for model saving\n",
"\n",
"\n",
"# Set the parameters for the stochastic backtest\n",
"LATENCY_COMP_PARAMS = {\n",
" 0: {\"prob\": 0.9, \"divisor\": 1},\n",
" 1: {\"prob\": 0.9, \"divisor\": 1},\n",
" 2: {\"prob\": 0.9, \"divisor\": 1},\n",
" 3: {\"prob\": 0.9, \"divisor\": 1},\n",
"} # Latency compensation parameters for stochastic backtest"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the automated backtest\n",
"priorities = [1]\n",
"volumes = [100]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize the results dictionary\n",
"results = {}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Pick the timestamp of the model to load\n",
"# ts = \"2024-01-21_17-12-35\" # seed 1\n",
"# ts = \"2024-01-22_18-03-01\" # seed 2\n",
"# ts = \"2024-01-23_19-14-27\" # seed 3\n",
"# ts = \"2024-01-24_09-40-47\" # seed 4\n",
"# ts = \"2024-01-24_22-39-37\" # seed 5\n",
"ts = \"2024-01-24_22-39-37_best_9_297.5\" # seed 5 (best model)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from rl.utils import save_model, load_model\n",
"\n",
"# Load the model\n",
"load_path = os.path.join(os.getcwd(), \"models\")\n",
"# load_path = os.path.join(os.getcwd(), \"saved_models\")\n",
"learner, reward_net, stats = load_model(load_path, ts)\n",
"print(f\"Loaded model for timestamp: {ts}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from lob.traders import RLMarketMaker"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for seed in range(1, 51):\n",
" for priority in priorities:\n",
" for volume in volumes: \n",
" RNG = np.random.default_rng(seed=seed)\n",
"\n",
" # Initialize the limit order book and traders\n",
" start = time.time()\n",
" traders = {}\n",
"\n",
" # # Pure market making strategy\n",
" trader_id = f\"RL_prior_{priority}_vol_{volume}_{seed}\"\n",
" # inventory_manage = True\n",
" description = f\"RL market maker with priority {priority} and volume {volume}.\"\n",
" \n",
" # Set the commission model\n",
" if volume == 100:\n",
" if priority == 0:\n",
" tier = 5\n",
" elif priority == 1:\n",
" tier = 5\n",
" elif priority == 2:\n",
" tier = 2\n",
" elif priority == 3:\n",
" tier = 1\n",
" elif volume == 10:\n",
" if priority == 0:\n",
" tier = 4\n",
" elif priority == 1:\n",
" tier = 3\n",
" elif priority == 2:\n",
" tier = 1\n",
" elif priority == 3:\n",
" tier = 1\n",
" \n",
" com_model = BitCommissions(tier=tier)\n",
" trader = RLMarketMaker(\n",
" id=trader_id,\n",
" com_model=com_model,\n",
" volume=volume,\n",
" policy=learner.policy,\n",
" )\n",
" traders[trader.id] = trader\n",
"\n",
" # Initialize the exchange\n",
" exchange = Exchange(\n",
" exchange_name=EXCHANGE_NAME,\n",
" symbol_name=SYMBOL,\n",
" tick_size=TICK_SIZE,\n",
" lot_size=LOT_SIZE,\n",
" depth=DEPTH,\n",
" traders=traders,\n",
" max_steps=MAX_STEPS,\n",
" ts_start=TS_START,\n",
" ts_end=TS_END,\n",
" win=WIN,\n",
" path=PATH,\n",
" rl_trader_id=trader_id,\n",
" latency_comp_params=LATENCY_COMP_PARAMS,\n",
" logging=LOGGING,\n",
" ts_save=TS_SAVE,\n",
" description=description,\n",
" rng=RNG,\n",
" )\n",
" end = round(time.time() - start, 2)\n",
"\n",
" # Run the exchange simulation\n",
" start = time.time()\n",
" exchange.run()\n",
" end = round(time.time() - start, 2)\n",
"\n",
" # Save the results\n",
" timestamps = exchange.stats[\"ts\"]\n",
" trader_stats = traders[trader_id].stats\n",
" initial_cost = 20.5 * volume * 2\n",
" results[trader_id] = {\n",
" \"timestamps\": timestamps,\n",
" \"trader_stats\": trader_stats,\n",
" \"initial_cost\": initial_cost,\n",
" }\n",
" \n",
"send_notification(message=\"Backtest finished!\", time=20000)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Save the results to a pickle file\n",
"save_dir = \"automated_backtests\"\n",
"\n",
"ensure_dir_exists(save_dir)\n",
"save_path = os.path.join(save_dir, f\"results_{TS_SAVE}.pickle\")\n",
"with open(save_path, \"wb\") as handle:\n",
" pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)\n",
" \n",
"print(f\"Results saved to {save_path}.\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "reinforcement-learning-NTwPF8vr-py3.11",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 28,011 | Python | .py | 845 | 28.622485 | 108 | 0.480012 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,051 | experts.py | JurajZelman_airl-market-making/rl/experts.py | """Expert policies for imitation learning."""
import math
import gymnasium as gym
import numpy as np
class RandomPolicy_v1:
"""Random policy for the v1 problem."""
def __init__(self, action_space: gym.spaces.Space) -> None:
"""
Initialize the random policy.
Args:
action_space: Action space of the environment.
"""
self.action_space = action_space
def predict(self, obs, state, *args, **kwargs) -> tuple:
"""
Get the random actions for the given observations, states and dones.
Args:
obs: Observations of the environment.
state: States.
args: Additional arguments.
kwargs: Additional keyword arguments.
Returns:
Actions and states.
"""
return np.array([self.action_space.sample()]), state
class ExpertPolicy_v1:
"""Expert policy for the v1 problem."""
def __init__(self) -> None:
"""Initialize the expert policy."""
def predict(self, obs, state, *args, **kwargs) -> tuple:
"""
Get the expert actions for the given observations, states and dones.
Args:
obs: Observations of the environment.
state: States.
args: Additional arguments.
kwargs: Additional keyword arguments.
Returns:
Actions and states.
"""
if math.isclose(obs[0][0], 0):
return np.array([5]), None
else:
return np.array([17]), None
| 1,543 | Python | .py | 44 | 26.181818 | 76 | 0.59097 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,052 | features.py | JurajZelman_airl-market-making/rl/features.py | """Feature processing methods."""
import datetime
from typing import Union
import numpy as np
import pandas as pd
import polars as pl
def get_features(
ts: pd.Timestamp, win: int, order_book: pl.DataFrame, time_step: float
) -> list:
"""
Get the features from the data.
Args:
ts: The timestamp.
win: The window size.
order_book: The order book data.
time_step: The normalized time step.
Returns:
The features.
"""
data = order_book.filter(
pl.col("received_time").is_between(ts - pd.Timedelta(seconds=win), ts)
).collect()
data = compute_features(data)
data = rolling_normalization(data, win)
row = data.row(-1)
features = [
time_step, # Normalize time step
row[1], # Bid 0 price
row[2], # Bid 0 size
row[3], # Bid 1 price
row[4], # Bid 1 size
row[5], # Bid 2 price
row[6], # Bid 2 size
row[41], # Ask 0 price
row[42], # Ask 0 size
row[43], # Ask 1 price
row[44], # Ask 1 size
row[45], # Ask 2 price
row[46], # Ask 2 size
row[84], # Mid price
row[85], # Mid price change
row[86], # Spread
]
features = np.clip(np.array(features, dtype=np.float32), a_min=-2, a_max=2)
return features
def verify_nans(data: pd.DataFrame):
"""
Verifies that there are no NaN values in the dataset.
Args:
data: The dataset to verify.
Raises:
ValueError: If there are NaN values in the dataset.
Returns:
True if there are no NaN values in the dataset, False otherwise.
"""
test = data.isnull().values.any()
if test:
raise ValueError("There are NaN values in the dataset.")
return test
def filter_data(
data: Union[pd.DataFrame, pl.DataFrame],
ts_start: datetime.datetime,
ts_end: datetime.datetime,
) -> Union[pd.DataFrame, pl.DataFrame]:
"""
Filters the data to the specified time range.
Args:
data: The data to filter.
ts_start: The start timestamp.
ts_end: The end timestamp.
Returns:
The filtered data.
"""
return data[(data.index >= ts_start) & (data.index <= ts_end)]
def compute_features(order_book: pl.DataFrame) -> pl.DataFrame:
"""
Computes the features for the orderbook data. If both ts_start and ts_end
are specified, the data are filtered out to the specified time range after
the computation of features.
Args:
order_book: The orderbook data.
ts_start: The start timestamp.
ts_end: The end timestamp.
Returns:
The orderbook data with the computed features.
"""
order_book = order_book.with_columns(
((order_book["bid_0_price"] + order_book["ask_0_price"]) / 2).alias(
"mid_price"
)
)
order_book = order_book.with_columns(
(
(order_book["mid_price"] - order_book["mid_price"].shift(1))
/ order_book["mid_price"].shift(1)
).alias("mid_price_change")
)
order_book = order_book.with_columns(
(order_book["ask_0_price"] - order_book["bid_0_price"]).alias("spread")
)
# Transform bid prices
for i in range(20):
order_book = order_book.with_columns(
(
(order_book[f"bid_{i}_price"] - order_book["mid_price"])
/ order_book["mid_price"]
).alias(f"bid_{i}_price")
)
# Transform ask prices
for i in range(20):
order_book = order_book.with_columns(
(
(order_book[f"ask_{i}_price"] - order_book["mid_price"])
/ order_book["mid_price"]
).alias(f"ask_{i}_price")
)
return order_book
def rolling_normalization(
order_book: pl.DataFrame, win_size: int
) -> pl.DataFrame:
"""
Normalize the dataset with rolling window mean and std.
Args:
order_book: Order book data to normalize.
win_size: Size of the rolling window.
Returns:
Normalized order book data.
"""
# Columns to normalize
columns = (
[f"bid_{i}_price" for i in range(20)]
+ [f"bid_{i}_size" for i in range(20)]
+ [f"ask_{i}_price" for i in range(20)]
+ [f"ask_{i}_size" for i in range(20)]
+ ["mid_price", "mid_price_change", "spread"]
)
# Compute rolling mean and standard deviation
for i in columns:
mean = order_book[i].rolling_mean(
window_size=win_size, min_periods=win_size
)
std = order_book[i].rolling_std(
window_size=win_size, min_periods=win_size
)
order_book = order_book.with_columns(
((order_book[i] - mean) / std).alias(i)
)
return order_book
| 4,829 | Python | .py | 148 | 25.412162 | 79 | 0.590157 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,053 | plotting.py | JurajZelman_airl-market-making/rl/plotting.py | """Methods for plotting and monitoring."""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.gridspec import GridSpec
def visualize_bc_train_stats(train_stats: dict):
"""
Visualize the training statistics of the behavior cloning agent.
Args:
train_stats: Training statistics.
"""
FIGSIZE = (12, 18)
fig = plt.figure(figsize=FIGSIZE)
gs = GridSpec(4, 2, figure=fig)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[0, 1])
ax3 = plt.subplot(gs[1, 0])
ax4 = plt.subplot(gs[1, 1])
ax5 = plt.subplot(gs[2, 0])
ax6 = plt.subplot(gs[2, 1])
ax7 = plt.subplot(gs[3, 0])
# ax8 = plt.subplot(gs[3, 1])
x = train_stats["num_samples_so_far"]
# Loss plot
ax1.plot(x, train_stats["loss"])
ax1.set_title("Loss")
# Entropy plot
ax2.plot(x, train_stats["entropy"])
ax2.set_title("Entropy")
# Entropy loss plot
ax3.plot(x, train_stats["ent_loss"])
ax3.set_title("Entropy loss")
# Probability of true action plot
ax4.plot(x, train_stats["prob_true_act"])
ax4.set_title("Probability of true action")
# L2 loss plot
ax5.plot(x, train_stats["l2_loss"])
ax5.set_title(r"$L_2$ loss")
# L2 norm plot
ax6.plot(x, train_stats["l2_norm"])
ax6.set_title(r"$L_2$ norm")
# Neglogp plot
ax7.plot(x, train_stats["neglogp"])
ax7.set_title("Neglogp")
plt.tight_layout()
plt.show()
def visualize_airl_train_stats(
train_stats: dict, save_fig: bool = False
) -> None:
"""
Visualize the training statistics of the AIRL trainer.
Args:
train_stats: Training statistics.
save_fig: Whether to save figures.
"""
FIGSIZE_DISC = (12, 18)
FIGSIZE_GEN = (12, 18)
# --------------------------------------------------
# Discriminator plot
# --------------------------------------------------
fig = plt.figure(figsize=FIGSIZE_DISC)
gs = GridSpec(4, 2, figure=fig)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax4 = fig.add_subplot(gs[2, 0])
ax5 = fig.add_subplot(gs[2, 1])
ax6 = fig.add_subplot(gs[3, 0])
ax7 = fig.add_subplot(gs[3, 1])
# Discriminator loss
ax1.plot(train_stats["mean/disc/disc_loss"])
ax1.set_title("Discriminator loss")
# Discriminator accuracy
ax2.plot(train_stats["mean/disc/disc_acc"])
ax2.set_title("Discriminator accuracy")
# Discriminator entropy
ax3.plot(train_stats["mean/disc/disc_entropy"])
ax3.set_title("Discriminator entropy")
# Discriminator accuracy (expert)
ax4.plot(train_stats["mean/disc/disc_acc_expert"])
ax4.set_title("Discriminator accuracy (expert)")
# Discriminator accuracy (generator)
ax5.plot(train_stats["mean/disc/disc_acc_gen"])
ax5.set_title("Discriminator accuracy (generator)")
# Discriminator expert proportion (true)
ax6.plot(train_stats["mean/disc/disc_proportion_expert_true"])
ax6.set_title("Proportion of expert actions (true)")
# Discriminator expert proportion (predicted)
ax7.plot(train_stats["mean/disc/disc_proportion_expert_pred"])
ax7.set_title("Proportion of expert actions (predicted)")
fig.tight_layout()
if save_fig:
fig.savefig("images/disc_train_stats.pdf")
# --------------------------------------------------
# Generator plot
# --------------------------------------------------
fig = plt.figure(figsize=FIGSIZE_GEN)
gs = GridSpec(4, 2, figure=fig)
ax1 = fig.add_subplot(gs[0, :])
# ax2 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax4 = fig.add_subplot(gs[2, 0])
ax5 = fig.add_subplot(gs[2, 1])
ax6 = fig.add_subplot(gs[3, 0])
ax7 = fig.add_subplot(gs[3, 1])
# Generator loss
ax1.plot(train_stats["mean/gen/train/loss"])
ax1.set_title("Generator loss")
# Generator entropy loss
ax2.plot(train_stats["mean/gen/train/entropy_loss"])
ax2.set_title("Generator entropy loss")
# Generator explained variance
ax3.plot(np.clip(train_stats["mean/gen/train/explained_variance"], -1, 1))
ax3.set_title("Generator explained variance")
# Generator value loss
ax4.plot(train_stats["mean/gen/train/value_loss"])
ax4.set_title("Generator value loss")
# Generator policy gradient loss
ax5.plot(train_stats["mean/gen/train/policy_gradient_loss"])
ax5.set_title("Generator policy gradient loss")
# Generator clip fraction
ax6.plot(train_stats["mean/gen/train/clip_fraction"])
ax6.set_title("Generator clip fraction")
# Generator approx kl
ax7.plot(train_stats["mean/gen/train/approx_kl"])
ax7.set_title("Generator approximate Kullback-Leibler div")
fig.tight_layout()
# fig.show()
# Save figure
if save_fig:
fig.savefig("images/gen_train_stats.pdf")
fig = plt.figure(figsize=(12, 4))
plt.plot(train_stats["mean/gen/rollout/ep_rew_mean"])
# Set y-axis limits
plt.ylim(0, 310)
plt.title("Mean episode reward")
if save_fig:
fig.savefig("images/mean_ep_rew.pdf")
| 5,166 | Python | .py | 138 | 32.173913 | 78 | 0.635325 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,054 | rewards.py | JurajZelman_airl-market-making/rl/rewards.py | """Reward networks and reward functions."""
import gymnasium as gym
import torch as th
from imitation.rewards.reward_nets import RewardNet
from stable_baselines3.common import preprocessing
class NegativeRewardNet(RewardNet):
"""
Simple reward neural network (multi-layer perceptron) that ensures that the
reward is always negative. This is needed for the inverse reinforcement
learning algorithms to work correctly.
"""
def __init__(
self,
observation_space: gym.Space,
action_space: gym.Space,
use_state: bool = True,
use_action: bool = True,
use_next_state: bool = False,
use_done: bool = False,
**kwargs,
) -> None:
"""Builds reward MLP.
Args:
observation_space: The observation space.
action_space: The action space.
use_state: Indicates whether the current state should be included as
an input to the network.
use_action: Indicates whether the current action should be included
as an input to the network.
use_next_state: Indicates whether the next state should be included
as an input to the network.
use_done: Indicates whether the done flag should be included as an
input to the network.
kwargs: passed straight through to `build_mlp`.
"""
super().__init__(observation_space, action_space)
# Compute the size of the input layer
combined_size = 0
self.use_state = use_state
if self.use_state:
combined_size += preprocessing.get_flattened_obs_dim(
observation_space
)
self.use_action = use_action
if self.use_action:
combined_size += preprocessing.get_flattened_obs_dim(action_space)
self.use_next_state = use_next_state
if self.use_next_state:
combined_size += preprocessing.get_flattened_obs_dim(
observation_space
)
self.use_done = use_done
if self.use_done:
combined_size += 1
# Define the layers
# self.relu = th.nn.ReLU()
# self.leaky_relu = th.nn.LeakyReLU()
# self.sigmoid = th.nn.Sigmoid()
self.relu = th.nn.ReLU()
self.log_sigmoid = th.nn.LogSigmoid()
self.linear1 = th.nn.Linear(combined_size, 256)
self.linear2 = th.nn.Linear(256, 256)
self.linear3 = th.nn.Linear(256, 1)
self.squeeze = SqueezeLayer()
self.scale = False
def forward(self, state, action, next_state, done) -> th.Tensor:
"""
Forward pass of the reward network.
Args:
state: State of the environment.
action: Action taken in the environment.
next_state: Next state of the environment.
done: Whether the episode has terminated.
Returns:
The reward for the given state-action-next_state-done pair.
"""
# Concatenate the inputs
inputs = []
if self.use_state:
inputs.append(th.flatten(state, 1))
if self.use_action:
inputs.append(th.flatten(action, 1))
if self.use_next_state:
inputs.append(th.flatten(next_state, 1))
if self.use_done:
inputs.append(th.reshape(done, [-1, 1]))
inputs_concat = th.cat(inputs, dim=1)
# Compute the outputs
outputs = self.linear1(inputs_concat)
# outputs = self.sigmoid(outputs)
outputs = self.relu(outputs)
outputs = self.linear2(outputs)
# outputs = self.sigmoid(outputs)
outputs = self.relu(outputs)
outputs = self.linear3(outputs)
outputs = self.log_sigmoid(outputs)
# Cap the reward to -700, enough for machine precision when in exp
outputs = th.clamp(outputs, min=-50)
if self.scale: # Scale [-np.inf, 0] to [-1, 0] for RL training
outputs = th.tanh(outputs)
outputs = self.squeeze(outputs)
assert outputs.shape == state.shape[:1]
return outputs
class SqueezeLayer(th.nn.Module):
"""Torch module that squeezes a B*1 tensor down into a size-B vector."""
def forward(self, x) -> th.Tensor:
"""
Forward pass of the squeeze layer.
Args:
x: A tensor to squeeze.
Returns:
The squeezed tensor.
"""
assert x.ndim == 2 and x.shape[1] == 1
new_value = x.squeeze(1)
assert new_value.ndim == 1
return new_value
| 4,627 | Python | .py | 117 | 29.897436 | 80 | 0.604547 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,055 | utils.py | JurajZelman_airl-market-making/rl/utils.py | """Various helper functions for RL algorithms."""
import os
import pickle
from datetime import datetime, timedelta
from random import uniform
import torch as th
from imitation.rewards import reward_nets
from stable_baselines3.common import base_class
from stable_baselines3.ppo import PPO
def send_notification(message: str, time: int = 10000) -> None:
"""
Send a notification to the user.
Args:
message: The message to send.
time: The time for which the notification should be displayed.
"""
os.system(
f'notify-send -t {time} "VSCode notification manager" "{message}"'
) # nosec: B605
def save_model(
learner: base_class.BaseAlgorithm,
reward_net: reward_nets,
stats: dict,
path: str,
ts: datetime,
) -> None:
"""
Saves the model to the specified path.
Args:
learner: Learner policy.
reward_net: Reward network.
stats: Training statistics.
path: Path to save the model to.
ts: Timestamp to include in the file names.
"""
if not os.path.exists(path):
os.makedirs(path)
# Save the learner
learner.save(f"{path}/{ts}_learner")
# Save the reward net
th.save(reward_net, f"{path}/{ts}_reward_nn")
# Save the training statistics
with open(f"{path}/{ts}_stats.pkl", "wb") as f:
pickle.dump(stats, f)
def load_model(path: str, ts: datetime) -> tuple:
"""
Loads the model from the specified path.
Args:
path: Path to load the model from.
ts: Timestamp to include in the file names.
Returns:
The learner, reward net, and training statistics.
"""
# Load the learner
learner = PPO.load(f"{path}/{ts}_learner", verbose=1)
# Load the reward net
reward_net = th.load(f"{path}/{ts}_reward_nn")
# Load the training statistics
with open(f"{path}/{ts}_stats.pkl", "rb") as f:
stats = pickle.load(f)
return learner, reward_net, stats
def random_timestamp(
start_timestamp: datetime, end_timestamp: datetime
) -> datetime:
"""
Return a random timestamp between the start and end timestamps.
Args:
start_timestamp: Start timestamp.
end_timestamp: End timestamp.
Returns:
Random timestamp between the start and end timestamps.
"""
# Calculate the time difference
diff = end_timestamp - start_timestamp
# Generate a random timedelta within the time difference
random_timedelta = timedelta(seconds=uniform(0, diff.total_seconds()))
# Add the random timedelta to the start datetime
random_timestamp = start_timestamp + random_timedelta
return random_timestamp
| 2,686 | Python | .py | 79 | 28.582278 | 74 | 0.678544 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,056 | environments.py | JurajZelman_airl-market-making/rl/environments.py | """Reinforcement learning environments."""
from datetime import datetime, timedelta
from typing import TypeVar
import gymnasium as gym
import numpy as np
from gymnasium.spaces import Box, Discrete
from lob.exchange import Exchange
from rl.utils import random_timestamp
ObsType = TypeVar("ObsType")
ActType = TypeVar("ActType")
class LimitOrderBookGym(gym.Env):
"""Gym environment with limit order book simulator."""
def __init__(
self,
exchange_name: str,
symbol_name: str,
tick_size: float,
lot_size: float,
depth: int,
traders: list,
max_steps: int,
ts_start: datetime,
ts_end: datetime,
deterministic: bool,
win: int,
path: str,
rl_trader_id: str,
latency_comp_params: dict,
logging: bool,
ts_save: datetime,
description: str,
rng: np.random.Generator,
) -> None:
"""
Initialize the LimitOrderBookGym environment.
Args:
exchange_name: Name of the exchange.
symbol_name: Name of the symbol.
tick_size: Tick size of the symbol.
lot_size: Lot size of the symbol.
depth: Depth of the limit order book.
traders: List of traders participating in the exchange.
max_steps: Maximum number of steps in the environment.
ts_start: Start timestamp.
ts_end: End timestamp.
deterministic: Whether to use a deterministic environment. If False,
the environment will randomly sample trajectories between the
start and end timestamps of the desired length.
win: Window size for features.
path: Path to the directory containing the datasets.
rl_trader_id: ID of the RL trader.
latency_comp_params: Parameters for the latency compensation model.
Each number represents the level of the order book to from which
a volume is sampled for a front running order that is placed
before the actual order with a specified probability.
logging: Whether to log the environment.
ts_save: Timestamp to include in the file names.
description: Description of the environment.
rng: Random number generator.
"""
# Load the parameters (for reset method)
self.exchange_name = exchange_name
self.symbol_name = symbol_name
self.tick_size = tick_size
self.lot_size = lot_size
self.depth = depth
self.traders = traders
self.max_steps = max_steps
self.ts_start = ts_start
self.ts_end = ts_end
self.deterministic = deterministic
self.win = win
self.path = path
self.rl_trader_id = rl_trader_id
self.latency_comp_params = latency_comp_params
self.logging = logging
self.ts_save = ts_save
self.description = description
self.rng = rng
self.action_space = Discrete(21)
self.observation_space = Box(low=-1, high=1, shape=(12,))
# Initialize the environment
self.reset()
def reset(self, seed: int = None, options: dict = None) -> ObsType:
"""
Reinitialize the LimitOrderBookGym environment.
Args:
seed: Seed for the environment.
options: Options for the environment.
Returns:
obs: Observation of the environment.
"""
# Reset the agents
for trader in self.traders:
if trader != "Exchange":
self.traders[trader].reset()
# Set the start timestamp (randomly if not deterministic)
if self.deterministic is False:
ts_end_lag = self.ts_end - timedelta(hours=1)
ts = random_timestamp(self.ts_start, ts_end_lag)
else:
ts = self.ts_start
# Initialize the exchange
self.exchange = Exchange(
exchange_name=self.exchange_name,
symbol_name=self.symbol_name,
tick_size=self.tick_size,
lot_size=self.lot_size,
depth=self.depth,
traders=self.traders,
max_steps=self.max_steps,
ts_start=ts,
ts_end=self.ts_end,
win=self.win,
path=self.path,
rl_trader_id=self.rl_trader_id,
latency_comp_params=self.latency_comp_params,
logging=self.logging,
ts_save=self.ts_save,
description=self.description,
initialize=False,
rng=self.rng,
)
obs = self.exchange.initialize_first_observation()
info = {}
return obs, info
def step(self, action: ActType) -> tuple[ObsType, float, bool, bool, dict]:
"""
Take a step in the environment.
Args:
action: Action to take in the environment.
Returns:
obs: Observation of the environment.
reward: Reward from the environment.
term: Whether the episode terminated.
trunc: Whether the episode was truncated.
info: Additional information about the environment.
"""
(
obs,
reward,
terminated,
truncated,
info,
) = self.exchange.process_timestep(action=action)
# Close environment when the episode terminates
if terminated:
self.close()
return obs, reward, terminated, truncated, info
def render(self) -> None:
"""Render the environment."""
pass
def close(self) -> None:
"""Close the environment."""
self.exchange.lob.close_parquet_writer()
| 5,809 | Python | .py | 155 | 27.341935 | 80 | 0.599645 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,057 | actions.py | JurajZelman_airl-market-making/rl/actions.py | """Methods related to actions of reinforcement learning agents."""
from typing import TypeVar
ActType = TypeVar("ActType")
# TODO: Update to the latest version of the environment
def decode_action_v1(action: ActType):
"""
Decode an action.
Args:
action: Action to decode.
"""
# If action 0, do not place any orders
if action == 0:
return [], []
# If action 1, place orders on both sides
elif action == 1:
return 1
| 476 | Python | .py | 16 | 24.875 | 66 | 0.660793 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,058 | exchange.py | JurajZelman_airl-market-making/lob/exchange.py | """Exchange simulator."""
import copy
import os
import pickle
from datetime import datetime
from typing import TypeVar
import numpy as np
import pandas as pd
import polars as pl
from tqdm import tqdm
from lob.data import scan_parquet
from lob.distributions import EmpiricalOrderVolumeDistribution
from lob.limit_order_book import LimitOrderBook
from lob.orders import Order
from lob.time import TimeManager
from lob.traders import ExchangeTrader, Trader
from lob.utils import get_rnd_str
ObsType = TypeVar("ObsType")
ActType = TypeVar("ActType")
pl.enable_string_cache() # Fix polars issues
class Exchange:
"""
Class representing an exchange simulator which handles the interactions
between traders and the limit order book.
"""
def __init__(
self,
exchange_name: str,
symbol_name: str,
tick_size: float,
lot_size: float,
depth: int,
traders: dict[str:Trader],
max_steps: int,
ts_start: pd.Timestamp,
ts_end: pd.Timestamp,
win: int,
path: str,
rl_trader_id: str,
initialize: bool = True,
logging: bool = False,
latency_comp_params: dict = {},
ts_save: datetime = None,
description: str = "",
rng: np.random.Generator = np.random.default_rng(seed=42),
) -> None:
"""
Initialize an exchange simulator.
Args:
exchange_name: Name of the exchange.
symbol_name: Name of the symbol.
tick_size: Tick size of the symbol.
lot_size: Lot size of the symbol.
depth: Max depth of the limit order book to load.
traders: List of traders participating in the exchange.
max_steps: Maximum number of steps to run in the simulation.
ts_start: Start timestamp.
ts_end: End timestamp.
win: Window size for features.
path: Path to the directory containing the datasets.
rl_trader_id: ID of the RL trader.
initialize: Indicates whether to process the first step in the
simulation. Since RL algorithms need the first observation we
allow to reset manually to get this observation.
logging: Indicates whether to log the limit order book.
latency_comp_params: Parameters for the latency compensation model.
Each number represents the level of the order book to from which
a volume is sampled for a front running order that is placed
before the actual order with a specified probability.
ts_save: Timestamp to include in the file names.
description: Description of the simulation.
rng: Random number generator.
"""
self.exchange_name = exchange_name
self.symbol_name = symbol_name
self.tick_size = tick_size
self.lot_size = lot_size
self.depth = depth
self.logging = logging
self.ts_save = ts_save
self.lob = LimitOrderBook(
tick_size=self.tick_size,
lot_size=self.lot_size,
logging=self.logging,
ts_save=self.ts_save,
)
self.exchange_trader = ExchangeTrader(id="Exchange", depth=self.depth)
self.traders = traders
self.traders["Exchange"] = self.exchange_trader
self.stats = {
"ts": [],
"bids": [],
"asks": [],
"bid_volumes": [],
"ask_volumes": [],
"trader_ids": [],
}
self.ts_start = ts_start
self.ts_end = ts_end
self.win = win
self.path = path
self.latency_comp_params = latency_comp_params
self.rl_trader_id = rl_trader_id
self.description = description
self.rng = rng
self.sampler = EmpiricalOrderVolumeDistribution(rng=self.rng)
if ts_save is None:
ts_save = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
self.ts_save = ts_save
# Set the limit order book reference for each trader
self.exchange_trader.set_lob(self.lob)
for i in self.traders.keys():
traders[i].set_lob(self.lob)
if i != "Exchange":
self.stats["trader_ids"].append(i)
# Initialize the timestamp iterator
self.time_manager = TimeManager(
exchange=self.exchange_name,
symbol=self.symbol_name,
ts_start=self.ts_start,
ts_end=self.ts_end,
path=self.path,
win_size=self.win,
max_steps=max_steps,
)
self.max_steps = self.time_manager.get_last_time_step() + 1
self.stats["ts"] = self.time_manager.get_timeline()
book_name = f"{self.exchange_name}_{self.symbol_name}_order_book"
self.book_data = scan_parquet(
name=book_name,
path=self.path,
ts_start=self.ts_start,
ts_end=self.ts_end,
win=self.win,
time_manager=self.time_manager,
)
trades_name = f"{self.exchange_name}_{self.symbol_name}_trades"
self.trades_data = scan_parquet(
name=trades_name,
path=self.path,
ts_start=self.ts_start,
ts_end=self.ts_end,
win=self.win,
time_manager=self.time_manager,
)
if initialize:
self.initialize_first_observation()
def initialize_first_observation(self) -> ObsType:
"""
Process the initial state of the limit order book. This is done by
processing the first limit orders in the dataframe. The initial state
is returned as an observation for RL algorithms.
Returns:
Initial observation of the limit order book.
"""
# ----------------------------------------------------------------------
# Load market data
# ----------------------------------------------------------------------
current_ts = self.time_manager.get_current_ts()
time_step = self.time_manager.get_current_time_step()
self.lob.ts = current_ts
book_data_now = self.book_data.filter(
pl.col("received_time") == current_ts
).collect()
# ----------------------------------------------------------------------
# Process exchange trader orders
# ----------------------------------------------------------------------
cancel_orders, new_orders = self.exchange_trader.place_orders(
time_step, book_data_now
)
cancel_orders.sort(key=lambda x: x.entry_time)
new_orders.sort(key=lambda x: x.entry_time)
while len(cancel_orders) > 0:
order = cancel_orders.pop(0)
self.remove_order_from_lob(order)
while len(new_orders) > 0:
order = new_orders.pop(0)
self.add_order_to_lob(order)
self.lob.update_mid_price_history()
# ----------------------------------------------------------------------
# Compute features
# ----------------------------------------------------------------------
# Compute scaled price distances to mid price
mid_price = self.lob.get_mid_price()
snapshot = self.lob.get_book_info(max_depth=3)
bid_dist = [(item[0] / mid_price - 1) for item in snapshot["bid_side"]]
ask_dist = [(item[0] / mid_price - 1) for item in snapshot["ask_side"]]
bid_dist[0] = bid_dist[0] / 0.006255
bid_dist[1] = bid_dist[1] / 0.011612
bid_dist[2] = bid_dist[2] / 0.014852
bid_dist = bid_dist[:3]
ask_dist[0] = ask_dist[0] / 0.006255
ask_dist[1] = ask_dist[1] / 0.008974
ask_dist[2] = ask_dist[2] / 0.011694
ask_dist = ask_dist[:3]
# Compute the mid-price change
mid_price_change = 0
# Compute the spread change
spread = self.lob.best_ask_price - self.lob.best_bid_price
spread_change = 0
self.prev_spread = spread
# Compute the order book imbalances
bid_vols = [item[1] for item in snapshot["bid_side"]]
ask_vols = [item[1] for item in snapshot["ask_side"]]
lob_imbalances = [
(ask_vols[0] - bid_vols[0]) / (ask_vols[0] + bid_vols[0]),
(ask_vols[1] - bid_vols[1]) / (ask_vols[1] + bid_vols[1]),
(ask_vols[2] - bid_vols[2]) / (ask_vols[2] + bid_vols[2]),
]
# Inventory ratio
if self.rl_trader_id:
agent = self.traders[self.rl_trader_id]
inventory = agent.inventory / agent.volume
else:
inventory = 0
feat = (
[inventory]
+ bid_dist
+ ask_dist
+ [mid_price_change, spread_change]
+ lob_imbalances
)
features = np.array(feat, dtype=np.float32)
features = np.clip(features, -1, 1)
self.last_obs = features
return features
def process_timestep(
self, action: ActType = None
) -> tuple[ObsType, float, bool, dict]:
"""
Process orders from all traders for one timestep. This includes the
following steps:
1. Load market data.
2. Process the actions of traders.
3. Update the LOB stats (best bid/ask, volumes, etc.)
4. Process the incoming market orders.
5. Compute rewards and check termination.
6. Update trader statistics.
7. Process limit orders from the exchange at time t+1.
8. Compute features for t+1 and return them as observation.
Args:
action: Action from the RL agent.
Returns:
obs: Observation of the environment.
reward: Reward for the current timestep.
terminated: Whether the simulation is terminated.
info: Additional information about the environment.
"""
# ----------------------------------------------------------------------
# 1. Load market data
# ----------------------------------------------------------------------
current_ts = self.time_manager.get_current_ts()
time_step = self.time_manager.get_current_time_step()
self.lob.ts = current_ts
next_ts = self.time_manager.get_next_ts()
trades_data_now = self.trades_data.filter(
pl.col("received_time").is_between(current_ts, next_ts)
).collect()
# ----------------------------------------------------------------------
# 2. Process orders from all agents (traders)
# ----------------------------------------------------------------------
cancel_orders, new_orders = [], []
for key in self.traders.keys():
# Ignore exchange trader
if key == self.exchange_trader.id:
continue
# Process action from agents (traders)
if key == self.rl_trader_id:
c, n = self.traders[key].place_orders(
time_step,
current_ts,
action,
self.last_obs,
)
elif key == "Avellaneda-Stoikov":
c, n = self.traders[key].place_orders(
time_step,
current_ts,
self.time_manager.get_last_time_step(),
)
else:
c, n = self.traders[key].place_orders(
time_step,
current_ts,
)
cancel_orders.extend(c)
new_orders.extend(n)
# Sort both lists based on arrival time
cancel_orders.sort(key=lambda x: x.entry_time)
new_orders.sort(key=lambda x: x.entry_time)
# Process the cancel orders
while len(cancel_orders) > 0:
order = cancel_orders.pop(0)
self.remove_order_from_lob(order)
# Process the new orders (including front running)
while len(new_orders) > 0:
order = new_orders.pop(0)
if self.latency_comp_params != {}:
# # Detect the level for bid price
# price = order.price
# if order.side:
# bids = self.lob.get_bids()
# lev = 0
# while lev < len(bids) and price <= bids[lev] and lev < 3:
# lev += 1
# # Detect the level for bid price
# else:
# asks = self.lob.get_asks()
# lev = 0
# while lev < len(asks) and price >= asks[lev] and lev < 3:
# lev += 1
# Always fix the level to 2 for enough volume
lev = 2
# Front run the order with a specified probability
rnd_unif = self.rng.uniform()
if rnd_unif < self.latency_comp_params[lev]["prob"]:
order_copy = copy.deepcopy(order)
order_copy.volume = (
self.sampler.sample(level=max(lev - 1, 0))
/ self.latency_comp_params[lev]["divisor"]
)
order_copy.id = "FrontRun" + get_rnd_str(4)
order_copy.trader_id = self.exchange_trader.id
self.add_order_to_lob(order_copy)
self.add_order_to_lob(order)
# ----------------------------------------------------------------------
# 3. Update the LOB statistics
# ----------------------------------------------------------------------
self.stats["bids"].append(self.lob.best_bid_price)
self.stats["asks"].append(self.lob.best_ask_price)
self.stats["bid_volumes"].append(self.lob.get_best_bid_volume())
self.stats["ask_volumes"].append(self.lob.get_best_ask_volume())
# ----------------------------------------------------------------------
# 4. Process the incoming market orders at [t, t+1)
# ----------------------------------------------------------------------
buy_orders = self.exchange_trader.process_historical_trades(
trades_data_now, ts=current_ts, side=True
)
sell_orders = self.exchange_trader.process_historical_trades(
trades_data_now, ts=current_ts, side=False
)
# Limit volume of the market orders to lob volume and ignore the rest
# to avoid order book depletion
while len(buy_orders) > 0:
order = buy_orders.pop(0)
max_volume = self.lob.get_ask_volume()
order.volume = min(max_volume, order.volume)
if order.volume > 0:
self.add_order_to_lob(order)
while len(sell_orders) > 0:
order = sell_orders.pop(0)
max_volume = self.lob.get_bid_volume()
order.volume = min(max_volume, order.volume)
if order.volume > 0:
self.add_order_to_lob(order)
# ----------------------------------------------------------------------
# 5. Compute rewards and check termination
# ----------------------------------------------------------------------
reward = 0
if self.rl_trader_id in self.traders.keys():
reward = self.traders[self.rl_trader_id].reward
# Update the time step and check termination
next_ts = self.time_manager.step_forward()
terminated = False if next_ts else True
# ----------------------------------------------------------------------
# 6.Update the trader statistics
# ----------------------------------------------------------------------
for key in self.traders.keys():
if key == self.exchange_trader.id:
continue
self.traders[key].update_stats(time_step)
# ----------------------------------------------------------------------
# 7. Process limit orders from the exchange at time t+1
# ----------------------------------------------------------------------
# Cancel agent orders
cancel_orders = []
for key in self.traders.keys():
# Ignore exchange trader
if key == self.exchange_trader.id:
continue
c = self.traders[key].cancel_orders()
cancel_orders.extend(c)
# Sort both lists based on arrival time
cancel_orders.sort(key=lambda x: x.entry_time)
# Process the orders
while len(cancel_orders) > 0:
order = cancel_orders.pop(0)
self.remove_order_from_lob(order)
prev_mid_price = self.lob.mid_price_history[-1]
if not terminated:
book_data_next = self.book_data.filter(
pl.col("received_time") == next_ts
).collect()
cancel_orders, new_orders = self.exchange_trader.place_orders(
time_step, book_data_next
)
# cancel_orders.sort(key=lambda x: x.entry_time)
new_orders.sort(key=lambda x: x.entry_time)
while len(cancel_orders) > 0:
order = cancel_orders.pop(0)
self.remove_order_from_lob(order)
while len(new_orders) > 0:
order = new_orders.pop(0)
self.add_order_to_lob(order)
self.lob.update_mid_price_history()
# ----------------------------------------------------------------------
# 8. Compute features for t+1 and return them as observation
# ----------------------------------------------------------------------
# Compute scaled price distances to mid price
mid_price = self.lob.get_mid_price()
snapshot = self.lob.get_book_info(max_depth=3)
bid_dist = [(item[0] / mid_price - 1) for item in snapshot["bid_side"]]
ask_dist = [(item[0] / mid_price - 1) for item in snapshot["ask_side"]]
bid_dist[0] = bid_dist[0] / 0.006255
bid_dist[1] = bid_dist[1] / 0.011612
bid_dist[2] = bid_dist[2] / 0.014852
bid_dist = bid_dist[:3]
ask_dist[0] = ask_dist[0] / 0.006255
ask_dist[1] = ask_dist[1] / 0.008974
ask_dist[2] = ask_dist[2] / 0.011694
ask_dist = ask_dist[:3]
# Compute the scaled mid-price change
mid_price_change = mid_price / prev_mid_price - 1
mid_price_change = mid_price_change / 0.17
# Compute the spread change
spread = self.lob.best_ask_price - self.lob.best_bid_price
spread_change = spread / self.prev_spread - 1
spread_change = spread_change / 0.14
self.prev_spread = spread
# Compute the order book imbalances
bid_vols = [item[1] for item in snapshot["bid_side"]]
ask_vols = [item[1] for item in snapshot["ask_side"]]
lob_imbalances = [
(ask_vols[0] - bid_vols[0]) / (ask_vols[0] + bid_vols[0]),
(ask_vols[1] - bid_vols[1]) / (ask_vols[1] + bid_vols[1]),
(ask_vols[2] - bid_vols[2]) / (ask_vols[2] + bid_vols[2]),
]
# Inventory ratio
if self.rl_trader_id:
agent = self.traders[self.rl_trader_id]
inventory = agent.inventory / agent.volume
inventory = np.clip(inventory, -1, 1)
else:
inventory = 0
feat = (
[inventory]
+ bid_dist
+ ask_dist
+ [mid_price_change, spread_change]
+ lob_imbalances
)
features = np.array(feat, dtype=np.float32)
features = np.clip(features, -1, 1)
truncated = False
# Save the observation
self.last_obs = features
return (
features,
float(reward),
terminated,
truncated,
{},
)
def run(self, visualize_step: int = None) -> None:
"""
Run the exchange simulation.
Args:
max_steps: Maximum number of steps to run in the simulation.
visualize_step: Visualize the limit order book every n steps.
"""
# Run the simulation until the end of the data or until the max steps
iterable = range(self.max_steps)
for i in tqdm(iterable, desc="Running the exchange simulation"):
self.process_timestep()
if visualize_step and i % visualize_step == 0:
self.lob.visualize()
# Finish the simulation
self.lob.close_parquet_writer()
self.save_exchange_stats()
def add_order_to_lob(self, order: Order) -> None:
"""Add an order to the limit order book."""
trades, new_order = self.lob.add_order(order)
time_step = self.time_manager.get_current_time_step()
# Process the trades
for trade in trades:
trade_price, trade_volume = trade["price"], trade["volume"]
order_make, order_take = trade["order_make"], trade["order_take"]
self.traders[order_make.trader_id].process_trade(
time_step, trade_price, trade_volume, order_make, True
)
self.traders[order_take.trader_id].process_trade(
time_step, trade_price, trade_volume, order_take, False
)
# Add the new order to the trader's active orders
if new_order:
self.traders[new_order.trader_id].add_order(new_order)
def remove_order_from_lob(self, order: Order) -> None:
"""Remove an order from the limit order book."""
order = self.lob.remove_order_by_id(order.id)
if order:
self.traders[order.trader_id].remove_order(order)
def save_exchange_stats(self) -> None:
"""Save the exchange statistics to a pickle file."""
if self.logging:
file_name = f"exchange_stats_{self.ts_save}.pkl"
with open(os.path.join("results_backtest", file_name), "wb") as f:
pickle.dump(self.stats, f)
# Save the trader stats
for key in self.traders.keys():
if key == self.exchange_trader.id:
continue
path = "results_backtest"
self.traders[key].save_stats(path, self.ts_save)
# Save the description
file_name = f"description_{self.ts_save}.txt"
with open(os.path.join("results_backtest", file_name), "w") as f:
f.write(self.description)
| 22,659 | Python | .py | 517 | 32.678917 | 80 | 0.522953 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,059 | orders.py | JurajZelman_airl-market-making/lob/orders.py | """Classes representing orders available at the market."""
import sys
from abc import ABC, abstractmethod
from datetime import datetime
class Order(ABC):
"""Abstract base class for orders."""
@abstractmethod
def __init__(
self,
ticker: str,
id: str,
trader_id: int,
side: bool,
volume: float,
entry_time: datetime,
) -> None:
"""
Initialize an order.
Args:
ticker: Ticker of the traded security.
id: Unique identifier of the order.
trader_id: Unique identifier of the trader who posted the order.
side: Side of the order (True for buy, False for sell).
volume: Volume of the security to buy or sell.
entry_time: Datetime when the order was posted.
"""
self.ticker = ticker
self.id = id
self.trader_id = trader_id
self.side = side
self.volume = volume
self.entry_time = entry_time
class LimitOrder(Order):
"""Limit order class."""
def __init__(
self,
ticker: str,
id: str,
trader_id: int,
side: bool,
volume: float,
entry_time: datetime,
price: float,
) -> None:
"""
Initialize a limit order.
Args:
ticker: Ticker of the traded security.
id: Unique identifier of the order.
trader_id: Unique identifier of the trader who posted the order.
side: Side of the order (True for buy, False for sell).
volume: Volume of the security to buy or sell.
entry_time: Datetime when the order was posted.
price: Limit price of the order.
"""
super().__init__(ticker, id, trader_id, side, volume, entry_time)
self.price = price
def __repr__(self) -> str:
"""
Return a string representation of the limit order.
Returns:
A string representation of the limit order.
"""
return (
f"LimitOrder(ticker={self.ticker}, id={self.id}, "
f"trader_id={self.trader_id}, side={self.side}, "
f"volume={self.volume}, entry_time={self.entry_time}, "
f"price={self.price})"
)
class MarketOrder(LimitOrder):
"""Market order class."""
def __init__(
self,
ticker: str,
id: str,
trader_id: int,
side: bool,
volume: float,
entry_time: datetime,
) -> None:
"""
Initialize a market order.
Args:
ticker: Ticker of the traded security.
id: Unique identifier of the order.
trader_id: Unique identifier of the trader who posted the order.
side: Side of the order (True for buy, False for sell).
volume: Volume of the security to buy or sell.
entry_time: Datetime when the order was posted.
"""
price = sys.maxsize if side else -sys.maxsize
super().__init__(ticker, id, trader_id, side, volume, entry_time, price)
def __repr__(self) -> str:
"""
Return a string representation of the market order.
Returns:
A string representation of the market order.
"""
return (
f"MarketOrder(ticker={self.ticker}, id={self.id}, "
f"trader_id={self.trader_id}, side={self.side}, "
f"volume={self.volume}, entry_time={self.entry_time})"
)
| 3,525 | Python | .py | 103 | 24.980583 | 80 | 0.566226 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,060 | order_queue.py | JurajZelman_airl-market-making/lob/order_queue.py | """Price order queue for orders of the same price."""
from pyllist import dllist, dllistnode
from lob.orders import Order
from lob.utils import round_to_lot
class OrderQueue:
"""
Price order queue for orders of the same price. The queue is implemented
as a double-linked list and is sorted by the entry time of the orders
(price-time priority rule).
"""
def __init__(self, lot_size: float) -> None:
"""
Initialize a price order queue.
Args:
lot_size: Lot size of the orders in the queue.
"""
self.lot_size = lot_size # Lot size of the orders in the queue
self.queue = dllist() # Double-linked list of orders
self.num_orders = 0 # Number of orders in the queue
self.volume = 0 # Cumulative volume of the orders in the queue
@property
def first_order(self) -> Order:
"""Return the first order in the queue."""
return self._first_order
@first_order.getter
def first_order(self) -> Order:
"""Return the first order in the queue."""
return self.queue.first.value
@property
def first_order_node(self) -> dllistnode:
"""Return the node of the first order in the queue."""
return self._first_order_node
@first_order_node.getter
def first_order_node(self) -> dllistnode:
"""Return the node of the first order in the queue."""
return self.queue.first
@property
def last_order(self) -> Order:
"""Return the last order in the queue."""
return self._last_order
@last_order.getter
def last_order(self) -> Order:
"""Return the last order in the queue."""
return self.queue.last.value
@property
def last_order_node(self) -> dllistnode:
"""Return the node of the last order in the queue."""
return self._last_order
@last_order_node.getter
def last_order_node(self) -> dllistnode:
"""Return the node of the last order in the queue."""
return self.queue.last
def add_order(self, order: Order) -> dllistnode:
"""
Add an order to the price order queue.
Args:
order: Order to add.
Returns:
Node of the added order in the double-linked list.
"""
# Add order to the last position in the queue
if (
self.num_orders == 0
or order.entry_time >= self.last_order.entry_time
):
self.num_orders += 1
self.volume += order.volume
self.volume = round_to_lot(self.volume, self.lot_size)
return self.queue.append(order)
# Find the position where to place the order in the queue
else:
temp = self.last_order_node
while temp is not None and order.entry_time < temp.value.entry_time:
temp = temp.prev
if temp is None:
self.num_orders += 1
self.volume += order.volume
self.volume = round_to_lot(self.volume, self.lot_size)
return self.queue.appendleft(order)
else:
self.num_orders += 1
self.volume += order.volume
self.volume = round_to_lot(self.volume, self.lot_size)
self.queue.insert(order, after=temp)
def remove_order(self, order: Order, order_node: dllistnode) -> None:
"""
Remove an order from the price order queue.
Args:
order: Order to remove.
order_node: Node of the order in the double-linked list.
"""
self.volume -= order.volume
self.volume = round_to_lot(self.volume, self.lot_size)
self.queue.remove(order_node)
self.num_orders -= 1
def update_order_volume(self, order: Order, volume: float) -> None:
"""
Update the volume of an order in the price order queue. This is not
meant to be used by the agent, it just serves as an helper function for
the exchange for updating the volume of a partially matched order.
Args:
order: Order to update.
volume: New volume of the order.
"""
if volume <= 0:
raise ValueError("Volume must be positive.")
if volume == order.volume:
return
self.volume = self.volume - order.volume + volume
self.volume = round_to_lot(self.volume, self.lot_size)
order.volume = volume
def __repr__(self) -> str:
"""
Return a string representation of the order queue. The string is a
concatenation of the string representations of the orders in the queue,
with the last line being the number of orders and the total volume
of the orders in the queue.
Returns:
repr: String representation of the order queue.
"""
repr, temp = "", self.first_order_node
while temp:
repr += f"{temp.__repr__()} \n"
temp = temp.next
repr += f"Num orders: {self.num_orders}, Volume: {self.volume}"
return repr
| 5,131 | Python | .py | 126 | 31.404762 | 80 | 0.602891 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,061 | distributions.py | JurajZelman_airl-market-making/lob/distributions.py | """Module for sampling from the empirical distributions."""
import os
import numpy as np
import pandas as pd
class EmpiricalOrderVolumeDistribution:
"""
Class for sampling order volumes from the empirical distribution estimated
on the insample order book data.
"""
def __init__(self, rng: np.random.Generator) -> None:
"""
Initialize the class by loading the volume distributions from the pickle
files.
Args:
rng: Numpy random generator.
"""
self.vols_level_0 = pd.read_pickle(
os.path.join(os.getcwd(), "distributions", "volumes_level_0.pkl")
).to_numpy()
self.vols_level_1 = pd.read_pickle(
os.path.join(os.getcwd(), "distributions", "volumes_level_1.pkl")
).to_numpy()
self.vols_level_2 = pd.read_pickle(
os.path.join(os.getcwd(), "distributions", "volumes_level_2.pkl")
).to_numpy()
self.rng = rng
def sample(self, level: int) -> float:
"""
Sample a volume from the empirical distribution.
Args:
level: The level of the order book to sample from.
Returns:
The sampled volume.
"""
if level == 0:
return self.rng.choice(self.vols_level_0)
elif level == 1:
return self.rng.choice(self.vols_level_1)
elif level == 2:
return self.rng.choice(self.vols_level_2)
else:
raise ValueError("Level must be between 0 and 2.")
| 1,537 | Python | .py | 42 | 27.952381 | 80 | 0.601615 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,062 | commissions.py | JurajZelman_airl-market-making/lob/commissions.py | """Commission models for computation of transaction costs."""
from abc import ABC, abstractmethod
class CommissionModel(ABC):
"""Abstract class for commission models."""
@abstractmethod
def maker_fee(self, quantity: float, price: float) -> float:
"""
Compute the maker fee.
Args:
quantity: Quantity of the asset that is being purchased (positive)
or sold (negative).
price: Price at which the asset is being purchased or sold.
"""
raise NotImplementedError("'maker_fee' method is not implemented.")
@abstractmethod
def taker_fee(self, quantity: float, price: float) -> float:
"""
Compute the taker fee.
Args:
quantity: Quantity of the asset that is being purchased (positive)
or sold (negative).
price: Price at which the asset is being purchased or sold.
"""
raise NotImplementedError("'taker_fee' method is not implemented.")
class NoCommissionModel(CommissionModel):
"""Commission model with no transaction costs."""
def maker_fee(self, quantity: float, price: float) -> float:
"""
Compute the maker fee.
Args:
quantity: Quantity of the asset that is being purchased (positive)
or sold (negative).
price: Price at which the asset is being purchased or sold.
Returns:
Maker fee to be paid to the trading platform.
"""
return 0
def taker_fee(self, quantity: float, price: float) -> float:
"""
Compute the taker fee.
Args:
quantity: Quantity of the asset that is being purchased (positive)
or sold (negative).
price: Price at which the asset is being purchased or sold.
Returns:
Taker fee to be paid to the trading platform.
"""
return 0
class BinanceCommissions(CommissionModel):
"""Binance commission model for spot and margin trading."""
def __init__(self, tier: int, bnb: bool = False) -> None:
"""
Initialize the commission model.
Args:
tier: Binance tier of the user. Goes from `0` to `14` indicating the
regular user, VIP 1, VIP 2, ..., VIP 9 tiers and values `10-14`
indicating the Binance Liquidity Program tiers with rebates.
bnb: Indicates whether to use Binance `BNB` 25% discount when paying
with the `BNB` token. Defaults to False.
"""
self.tier = tier
self.bnb = bnb
def maker_fee(self, quantity: float, price: float) -> float:
"""
Compute the maker fee.
Args:
quantity: Quantity of the asset that is being purchased (positive)
or sold (negative).
price: Price at which the asset is being purchased or sold.
Returns:
Maker fee to be paid to the trading platform. Negative fee indicates
a rebate.
"""
quantity = abs(quantity)
size = quantity * price
if self.bnb and self.tier < 10:
size = size * 0.75
match self.tier:
case 0:
return 0.001 * size
case 1:
return 0.0009 * size
case 2:
return 0.0008 * size
case 3:
return 0.00042 * size
case 4:
return 0.00042 * size
case 5:
return 0.00036 * size
case 6:
return 0.0003 * size
case 7:
return 0.00024 * size
case 8:
return 0.00018 * size
case 9:
return 0.00012 * size
case 10:
return 0.0000 * size
case 11:
return -0.00004 * size
case 12:
return -0.00006 * size
case 13:
return -0.00008 * size
case 14:
return -0.0001 * size
case _:
raise ValueError(f"Invalid tier: {self.tier}")
def taker_fee(self, quantity: float, price: float) -> float:
"""
Compute the taker fee.
Args:
quantity: Quantity of the asset that is being purchased (positive)
or sold (negative).
price: Price at which the asset is being purchased or sold.
Returns:
Taker fee to be paid to the trading platform.
"""
quantity = abs(quantity)
size = quantity * price
if self.bnb:
size = size * 0.75
match self.tier:
case 0:
return 0.001 * size
case 1:
return 0.001 * size
case 2:
return 0.001 * size
case 3:
return 0.0006 * size
case 4:
return 0.00054 * size
case 5:
return 0.00048 * size
case 6:
return 0.00042 * size
case 7:
return 0.00036 * size
case 8:
return 0.0003 * size
case 9:
return 0.00024 * size
case 10:
return 0.001 * size
case 11:
return 0.001 * size
case 12:
return 0.001 * size
case 13:
return 0.001 * size
case 14:
return 0.001 * size
case _:
raise ValueError(f"Invalid tier: {self.tier}")
class BitCommissions(CommissionModel):
"""BIT exchange commission model for spot and margin trading."""
def __init__(self, tier: int) -> None:
"""
Initialize the commission model.
Args:
tier: BIT.com tier of the user. Goes from `1` to `9` indicating the
regular VIP 1, VIP 2, ..., VIP 9 tiers.
"""
self.tier = tier
def maker_fee(self, quantity: float, price: float) -> float:
"""
Compute the maker fee.
Args:
quantity: Quantity of the asset that is being purchased (positive)
or sold (negative).
price: Price at which the asset is being purchased or sold.
Returns:
Maker fee to be paid to the trading platform. Negative fee indicates
a rebate.
"""
quantity = abs(quantity)
size = quantity * price
match self.tier:
case 1:
return 0.0008 * size
case 2:
return 0.0007 * size
case 3:
return 0.0006 * size
case 4:
return 0.0005 * size
case 5:
return 0.0004 * size
case 6:
return 0.0003 * size
case 7:
return 0.0002 * size
case 8:
return 0.0001 * size
case 9:
return 0 * size
case _:
raise ValueError(f"Invalid tier: {self.tier}")
def taker_fee(self, quantity: float, price: float) -> float:
"""
Compute the taker fee.
Args:
quantity: Quantity of the asset that is being purchased (positive)
or sold (negative).
price: Price at which the asset is being purchased or sold.
Returns:
Taker fee to be paid to the trading platform.
"""
quantity = abs(quantity)
size = quantity * price
match self.tier:
case 1:
return 0.001 * size
case 2:
return 0.0009 * size
case 3:
return 0.0008 * size
case 4:
return 0.0007 * size
case 5:
return 0.0006 * size
case 6:
return 0.0005 * size
case 7:
return 0.0004 * size
case 8:
return 0.00035 * size
case 9:
return 0.0003 * size
case _:
raise ValueError(f"Invalid tier: {self.tier}")
| 8,270 | Python | .py | 234 | 23.08547 | 80 | 0.513128 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,063 | plots.py | JurajZelman_airl-market-making/lob/plots.py | """Plotting functionalities."""
import datetime
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from lob.backtest_metrics import drawdowns
COLOR_GREEN = "#13961a"
COLOR_RED = "#eb5c14"
def set_plot_style() -> None:
"""Set the plotting style."""
plt.style.use("seaborn-v0_8")
plt.rcParams.update(
{"axes.prop_cycle": plt.cycler("color", plt.cm.tab10.colors)}
)
# Change to computer modern font and increase font size
plt.rcParams.update({"font.family": "cmr10", "font.size": 12})
plt.rcParams.update({"axes.formatter.use_mathtext": True})
SMALL_SIZE = 16
MEDIUM_SIZE = 18
BIGGER_SIZE = 20
plt.rc("font", size=SMALL_SIZE) # controls default text sizes
plt.rc("axes", titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc("axes", labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc("xtick", labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc("ytick", labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc("legend", fontsize=SMALL_SIZE) # legend fontsize
plt.rc("figure", titlesize=BIGGER_SIZE) # fontsize of the figure title
def visualize_backtest(
ts: list[datetime.datetime], trader_stats: dict, initial_cost: float
) -> None:
"""
Visualize the backtest results.
Args:
ts: Timestamps.
trader_stats: Trader statistics.
initial_cost: Initial cost of the trader used for the computation of the
equity curve.
"""
# PLOT - Adjusted PnL
make_plot(
x=ts,
y=trader_stats["adj_pnl"],
title="P&L",
xlabel="Timestamp",
ylabel="P&L (USDT)",
)
# PLOT - Returns
equity = pd.Series(np.array(trader_stats["adj_pnl"]) + initial_cost)
make_plot(
x=ts,
y=equity.pct_change() * 100,
title="Returns",
xlabel="Timestamp",
ylabel="Returns (%)",
)
# PLOT - Drawdowns
dd = drawdowns(equity)
make_drawdown_plot(
x=ts,
y=dd,
title="Drawdowns",
xlabel="Timestamp",
ylabel="Drawdown (%)",
)
# PLOT - Inventory
make_plot(
x=ts,
y=trader_stats["inventory"],
title="Inventory",
xlabel="Timestamp",
ylabel="Inventory (SOL)",
color="darkorange",
)
# PLOT - Total traded volume
make_plot(
x=ts,
y=trader_stats["total_volume"],
title="Total traded volume",
xlabel="Timestamp",
ylabel="Total traded volume (USDT)",
color="darkorange",
)
# PLOT - Transaction costs
make_plot(
x=ts,
y=trader_stats["cum_costs"],
title="Cumulative transaction fees",
xlabel="Timestamp",
ylabel="Transaction fees (USDT)",
)
# PLOT - Number of trades
make_plot(
x=ts,
y=trader_stats["trade_count"],
title="Number of trades",
xlabel="Timestamp",
ylabel="Number of trades",
)
# PLOT - Quoted spreads
asks = np.array(trader_stats["quoted_ask_price"])
bids = np.array(trader_stats["quoted_bid_price"])
spreads = np.where(
np.isnan(asks) | np.isnan(bids), np.nan, np.subtract(asks, bids)
)
make_plot(
x=ts,
y=spreads,
title="Quoted spread",
xlabel="Timestamp",
ylabel="Quoted spread",
color="black",
)
# PLOT - Quoted bid volume
make_plot(
x=ts,
y=trader_stats["quoted_bid_volume"],
title="Quoted bid volume",
xlabel="Timestamp",
ylabel="Quoted bid volume (SOL)",
color=COLOR_GREEN,
)
# PLOT - Quoted ask volume
make_plot(
x=ts,
y=trader_stats["quoted_ask_volume"],
title="Quoted ask volume",
xlabel="Timestamp",
ylabel="Quoted ask volume (SOL)",
color=COLOR_RED,
)
def make_plot(
x: Union[list, np.ndarray, pd.Series],
y: Union[list, np.ndarray, pd.Series],
title: str = None,
xlabel: str = None,
ylabel: str = None,
ylim: tuple = None,
legend: bool = False,
figsize: tuple = (12, 4.5),
color: str = None,
save_path: str = None,
) -> None:
"""
Make a plot.
Args:
x: X-axis data.
y: Y-axis data.
title: Title of the plot.
xlabel: Label of the x-axis.
ylabel: Label of the y-axis.
legend: Whether to show the legend.
figsize: Size of the figure.
color: Color of the plot.
"""
default_color = plt.rcParams["axes.prop_cycle"].by_key()["color"][0]
color = color if color else default_color
plt.figure(figsize=figsize)
plt.plot(x, y, color=color)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if title:
plt.title(title)
if legend:
plt.legend()
plt.tight_layout()
if ylim:
plt.ylim(ylim)
if save_path:
plt.savefig(save_path)
plt.show()
def make_drawdown_plot(
x: Union[list, np.ndarray, pd.Series],
y: Union[list, np.ndarray, pd.Series],
title: str = None,
xlabel: str = None,
ylabel: str = None,
legend: bool = False,
figsize: tuple = (12, 5),
save_path: str = None,
) -> None:
"""
Make a drawdown plot.
Args:
x: X-axis data.
y: Y-axis data.
title: Title of the plot.
xlabel: Label of the x-axis.
ylabel: Label of the y-axis.
legend: Whether to show the legend.
figsize: Size of the figure.
"""
plt.figure(figsize=figsize)
plt.fill_between(x, y, 0, color="red", alpha=0.3)
plt.plot(x, y, color="red", alpha=0.5)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if title:
plt.title(title)
if legend:
plt.legend()
plt.tight_layout()
if save_path:
plt.savefig(save_path)
plt.show()
| 5,967 | Python | .py | 209 | 22.047847 | 80 | 0.597174 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,064 | time.py | JurajZelman_airl-market-making/lob/time.py | """Methods for handling of time and timestamps."""
import datetime
import polars as pl
class TimeManager:
"""Timeline class for timestamps management."""
def __init__(
self,
exchange: str,
symbol: str,
ts_start: datetime.datetime,
ts_end: datetime.datetime,
path: str,
win_size: int = None,
max_steps: int = None,
) -> None:
"""
Initialize the timeline.
Args:
exchange: Exchange to load the timeline for.
symbol: Symbol to load the timeline for.
ts_start: Start timestamp.
ts_end: End timestamp.
path: Path to the directory containing datasets.
win_size: Window size for the number of timestamps to preload before
the start timestamp. If None, preload no timestamps.
max_steps: Maximum number of steps to load. If None, load all
timestamps in the given range.
"""
self.exchange = exchange
self.symbol = symbol
self.ts_start = ts_start
self.ts_end = ts_end
self.path = path
self.win_size = win_size
self.max_steps = max_steps
self.timeline = self.load_timeline()
def get_current_time_step(self) -> int:
"""Get the current time step."""
if self.win_size is None:
return self.iter
return self.iter - self.win_size
def get_current_time_ratio(self) -> float:
"""Get the current time ratio."""
return self.get_current_time_step() / self.last_time_step
def get_last_time_step(self) -> int:
"""Get the last time step."""
return self.last_time_step
def get_current_index(self) -> int:
"""Get the current index."""
return self.iter
def get_current_ts(self) -> datetime.datetime:
"""Get the current timestamp."""
if self.iter >= len(self.timeline):
return None
return self.timeline[self.iter]
def step_forward(self) -> datetime.datetime:
"""Step forward and return the next timestamp."""
self.iter += 1
if self.iter < len(self.timeline):
return self.timeline[self.iter]
return None
def get_next_ts(self) -> datetime.datetime:
"""
Get the next timestamp. If the current timestamp is the last one,
return None.
"""
next_idx = self.iter + 1
if next_idx < len(self.timeline):
return self.timeline[next_idx]
return None
def get_previous_ts(self) -> datetime.datetime:
"""
Get the previous timestamp. If the current timestamp is the first one,
return None.
"""
prev_idx = self.iter - 1
if prev_idx >= 0:
return self.timeline[prev_idx]
return None
def load_timeline(self) -> list[datetime.datetime]:
"""Load the timeline for the given exchange and symbol."""
timeline = []
# 1. Load the timeline for all days in the given range.
dates = []
date = self.ts_start.date()
while date <= self.ts_end.date():
dates.append(date)
date += datetime.timedelta(days=1)
for date in dates:
dt = date.strftime("%Y_%m_%d")
name = f"{self.path}/{self.exchange}_{self.symbol}_order_book_{dt}"
df = pl.scan_parquet(f"{name}.parquet")
timestamps = df.select(pl.col("received_time")).collect()
timestamps = timestamps.get_columns()[0].to_list()
timeline += timestamps
# 2. Check whether the timeline contains a sufficient window and if not,
# load the previous day.
if self.win_size is not None:
idx_start = next(
i for i, x in enumerate(timeline) if x > self.ts_start
)
idx_win_start = idx_start - self.win_size
# Preload one more file for sufficient window
if idx_win_start < 0:
dt = (dates[0] - datetime.timedelta(days=1)).strftime(
"%Y_%m_%d"
)
name = (
f"{self.path}/{self.exchange}_{self.symbol}_order_book_{dt}"
)
df = pl.scan_parquet(f"{name}.parquet")
timestamps = df.select(pl.col("received_time")).collect()
timestamps = timestamps.get_columns()[0].to_list()
timeline = timestamps + timeline
# 3. Filter the timeline to the given range.
idx_start = next(i for i, x in enumerate(timeline) if x > self.ts_start)
idx_win_start = idx_start - self.win_size
try:
idx_end = next(i for i, x in enumerate(timeline) if x > self.ts_end)
except StopIteration:
idx_end = None
if not idx_end:
timeline = timeline[idx_win_start:]
else:
timeline = timeline[idx_win_start:idx_end]
if self.max_steps is not None:
timeline = timeline[
: min(self.max_steps + self.win_size, len(timeline))
]
# 4. Set the current index to the start index.
self.iter = self.win_size if self.win_size is not None else 0
self.last_time_step = len(timeline) - self.iter - 1
return timeline
def get_timeline(self, with_win: bool = False) -> list[datetime.datetime]:
"""
Get the timeline.
Args:
with_win: Whether to return the timeline with the window or not.
Returns:
The timeline.
"""
if with_win and self.win_size is not None:
return self.timeline
if with_win and self.win_size is None:
raise ValueError("Window size is None.")
return self.timeline[self.win_size :]
def get_ts_n_steps_from(
self, ts: datetime.datetime, n: int
) -> datetime.datetime:
"""
Get the timestamp n steps from the given timestamp. Positive n
represents n steps forward, negative n represents n steps backward.
Args:
ts: Timestamp to get the n steps from.
n: Number of steps to get.
"""
index = self.get_index_for_ts(ts)
return self.get_ts_for_index(index + n)
def get_index_for_ts(self, ts: datetime.datetime) -> int:
"""
Get the index for the given timestamp.
Args:
ts: Timestamp to get the index for.
"""
return self.timeline.index(ts)
def get_ts_for_index(self, index: int) -> datetime.datetime:
"""
Get the timestamp for the given index.
Args:
index: Index of the timestamp to get.
"""
if index < 0 or index >= len(self.timeline):
raise IndexError("Index out of bounds.")
return self.timeline[index]
def get_ts_larger_equal_than(self, ts) -> datetime.datetime:
"""
Get the first timestamp larger than the given timestamp.
Args:
ts: Timestamp to get the next timestamp for.
"""
return next(x for x in self.timeline if x >= ts)
def get_ts_smaller_equal_than(self, ts) -> datetime.datetime:
"""
Get the first timestamp smaller than the given timestamp.
Args:
ts: Timestamp to get the previous timestamp for.
Returns:
The first smaller timestamp.
"""
return next(x for x in reversed(self.timeline) if x <= ts)
| 7,558 | Python | .py | 190 | 29.626316 | 80 | 0.576299 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,065 | utils.py | JurajZelman_airl-market-making/lob/utils.py | """Various helper functions for the lob package."""
import os
import random
import string
import numpy as np
import pandas as pd
def generate_second_timestamps(ts_start: pd.Timestamp, ts_end: pd.Timestamp):
"""
Generate a list of timestamps for each second between the start and end.
Args:
ts_start: Start timestamp.
ts_end: End timestamp.
Returns:
List of timestamps.
"""
return pd.date_range(ts_start, ts_end, freq="S").tolist()
def round_to_tick(price: float, tick_size: float):
"""
Round a price to the nearest multiple of the tick size.
Args:
price: The original price to be rounded.
tick_size: The minimum tick size for rounding. Smallest allowed value
is 0.00001 (due to a rounding to 5 decimal places to avoid floating
point errors).
Returns:
The rounded price.
"""
return round(round(price / tick_size) * tick_size, 5)
def round_to_lot(volume: float, lot_size: float):
"""
Round a volume to the nearest multiple of the lot size.
Args:
volume: The original volume to be rounded.
lot_size: The minimum lot size for rounding.
Returns:
The rounded volume.
"""
return round(round(volume / lot_size) * lot_size, 7)
def get_lot_size(exchange: "str") -> float:
"""
Returns the lot size for the given exchange.
Args:
exchange: The exchange to get the lot size for.
Returns:
The lot size for the given exchange.
"""
match exchange:
case "BINANCE":
return 0.01
case "OKX":
return 0.000001
case "GATEIO":
return 0.000001
case "BIT.COM":
return 0.01
case _:
raise ValueError(f"Lot size for exchange {exchange} not set.")
def get_tick_size(exchange: "str") -> float:
"""
Returns the tick size for the given exchange.
Args:
exchange: The exchange to get the tick size for.
Returns:
The tick size for the given exchange.
"""
match exchange:
case "BINANCE":
return 0.01
case "OKX":
return 0.001
case "GATEIO":
return 0.001
case "BIT.COM":
return 0.01
case _:
raise ValueError(f"Tick size for exchange {exchange} not set.")
def get_rnd_str(length: int = 3) -> str:
"""Get a random string of given length."""
chars = string.ascii_uppercase + string.digits
return "".join(random.choices(chars, k=length))
def get_rnd_id(length: int = 6) -> int:
"""Get a random int of given length."""
return random.randint(10 ** (length - 1), 10**length - 1)
def get_rnd_side() -> bool:
"""Get a random boolean."""
return random.choice([True, False])
def get_rnd_price_around_mean(mean: float, spread: float, tick: float) -> float:
"""Get a random price around the mean value."""
prices = list(np.arange(mean - spread, mean + spread, tick))
return round(random.choice(prices), 2)
def get_rnd_volume() -> int:
"""Get a random volume between 1 and 200."""
return random.randint(1, 200)
def ensure_dir_exists(path: str) -> None:
"""Check if a directory exists. If not, create it."""
if not os.path.exists(path):
os.makedirs(path)
| 3,346 | Python | .py | 97 | 27.865979 | 80 | 0.628927 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,066 | traders.py | JurajZelman_airl-market-making/lob/traders.py | """Implementations of market participants."""
import datetime
import math
import pickle
from abc import ABC, abstractmethod
from typing import Any, TypeVar
import numpy as np
import polars as pl
from lob.commissions import CommissionModel
from lob.limit_order_book import LimitOrderBook
from lob.orders import LimitOrder, MarketOrder, Order
from lob.utils import get_rnd_str, round_to_lot, round_to_tick
ActType = TypeVar("ActType")
ObsType = TypeVar("ObsType")
COIN_SYMBOL = "SOL-USDT"
class Trader(ABC):
"""Abstract class representing a trader at the exchange."""
@abstractmethod
def __init__(self, id: str):
"""
Initialize a trader.
Args:
id: Unique identifier of the trader.
"""
self.id = id # ID of the trader
self.lob = None # Reference to the limit order book
self.active_orders = [] # List of trader's active orders
def set_lob(self, lob: LimitOrderBook) -> None:
"""
Set the limit order book for the trader.
Args:
lob: Limit order book.
"""
self.lob = lob
@abstractmethod
def place_orders(
self, time_step: int, *args, **kwargs
) -> tuple[list, list]:
"""
Create lists of orders to be canceled and added to the lob.
Args:
time_step: Current time step.
Returns:
Tuple of lists of orders to cancel and add.
"""
@abstractmethod
def cancel_orders(self) -> list:
"""Cancel all active orders."""
@abstractmethod
def add_order(self, order: Order) -> None:
"""
Add an order to the trader's active orders.
Args:
order: Order to add.
"""
self.active_orders.append(order)
@abstractmethod
def remove_order(self, order: Order) -> None:
"""
Remove an order from the trader's active orders.
Args:
order: Order to remove.
"""
@abstractmethod
def process_trade(
self,
time_step: int,
price: float,
volume: float,
order: Order,
is_make: bool,
) -> None:
"""
Process a trade.
Args:
time_step: Current time step.
price: Price of the trade.
volume: Volume of the trade.
order: Order that was matched.
is_make: True if the order was a maker, False if taker.
"""
@abstractmethod
def save_stats(self, path: int, date: str) -> None:
"""
Save the trader's statistics.
Args:
path: Path to the directory where the statistics should be saved.
date: Date of the simulation start.
"""
class ExchangeTrader(Trader):
"""
Trader that is used to replicate the state of the real limit order book from
input dataset. Currently, the trader supports datasets from Crypto lake.
"""
def __init__(self, id: str, depth: int = 10) -> None:
"""
Initialize an exchange trader.
Args:
id: Unique identifier of the trader.
depth: Depth of the limit order book to replicate.
"""
super().__init__(id)
self.depth = depth # Depth of the limit order book to replicate
self.timestamps = None
def place_orders(
self, time_step: int, book_data: pl.DataFrame
) -> tuple[list, list]:
"""
Create lists of orders to be canceled and added to the lob.
Args:
time_step: Current time step.
book_data: Dataframe with limit order book data for the current
time step.
Returns:
Tuple of lists of orders to cancel and add.
"""
entry_time = book_data["received_time"][0]
order_id = str(int(book_data["sequence_number"][0]))
# Delete previous bid / ask orders
cancel_orders = self.active_orders
# Process all bid / ask orders for a chosen depth
new_orders = []
order_ids = [
(
order_id + f"B{chr(65+j)}"
if j < self.depth
else order_id + f"A{chr(65+j-self.depth)}"
)
for j in range(self.depth * 2)
]
for j in range(self.depth * 2):
name = f"bid_{j}" if j < self.depth else f"ask_{j - self.depth}"
if book_data[name + "_size"][0] and book_data[name + "_price"][0]:
order = LimitOrder(
book_data["symbol"][0],
order_ids[j],
self.id,
True if j < self.depth else False,
book_data[name + "_size"][0],
entry_time,
book_data[name + "_price"][0],
)
new_orders.append(order)
return cancel_orders, new_orders
def cancel_orders(self) -> list:
"""Cancel all active orders."""
return self.active_orders
def add_order(self, order: Order) -> None:
"""
Add an order to the trader's active orders.
Args:
order: Order to add.
"""
self.active_orders.append(order)
def remove_order(self, order: Order) -> None:
"""
Remove an order from the trader's active orders.
Args:
order: Order to remove.
"""
if order in self.active_orders:
self.active_orders.remove(order)
def process_trade(
self,
time_step: int,
price: float,
volume: float,
order: Order,
is_make: bool,
) -> None:
"""
Process a trade.
Args:
time_step: Current time step.
price: Price of the trade.
volume: Volume of the trade.
order: Order that was matched.
is_make: True if the order was a maker, False if taker.
"""
if order in self.active_orders and math.isclose(0, order.volume):
self.active_orders.remove(order)
def process_historical_trades(
self, data: pl.DataFrame, ts, side: bool
) -> list[Order]:
"""Load historical trades data."""
side_str = "buy" if side else "sell"
data = data.filter(pl.col("side") == side_str)
orders = []
for row in data.rows(named=True):
order = MarketOrder(
COIN_SYMBOL,
id=get_rnd_str(4), # TODO: Preprocess the ids
trader_id=self.id,
side=side,
volume=row["quantity"],
entry_time=row["received_time"],
)
orders.append(order)
return orders
def save_stats(self, path: int, date: str) -> None:
"""
Save the trader's statistics.
Args:
path: Path to the directory where the statistics should be saved.
date: Date of the simulation start.
"""
pass
class PureMarketMaker(Trader):
"""
Pure market making strategy that places orders at the specified level of the
limit order book.
"""
def __init__(
self,
id: str,
com_model: CommissionModel,
volume: float = 1,
priority: int = 0,
inventory_manage: bool = False,
) -> None:
"""
Initialize a pure market maker.
Args:
id: Unique identifier of the trader.
volume: Volume of the orders to be placed.
com_model: Commission model.
priority: Priority of the orders to be placed. 0 means that the
orders will be placed at the best bid and ask prices or one tick
better if the spread is at least 3 ticks wide. 1 means that the
orders will be placed at the first price level, 2 at the second
price level, etc.
inventory_manage: True if the trader should manage its inventory
to minimize inventory risk.
"""
super().__init__(id)
self.volume = volume # Volume of the orders to be placed
self.priority = priority # Priority of the orders to be placed
self.com_model = com_model # Commission model
self.inventory_manage = inventory_manage # Inventory management
self.reset()
def reset(self) -> None:
"""Reset the trader's statistics."""
self.lob = None
self.active_orders = []
self.realized_pnl = 0 # Realized pnl
self.adj_pnl = 0 # Adjusted pnl
self.inventory = 0 # Long (positive) or short (negative) inventory
self.cum_costs = 0 # Cumulative transaction costs
self.total_volume = 0 # Total volume traded
self.trade_count = 0 # Number of trades in one time step
self.stats = {
"realized_pnl": [],
"adj_pnl": [],
"inventory": [],
"cum_costs": [],
"total_volume": [],
"trade_count": [],
"quoted_bid_price": [],
"quoted_ask_price": [],
"quoted_bid_volume": [],
"quoted_ask_volume": [],
}
def place_orders(self, time_step: int, timestamp) -> tuple[list, list]:
"""
Create lists of orders to be canceled and added to the lob.
Args:
time_step: Current time step.
Returns:
Tuple of lists of orders to cancel and add.
"""
# Cancel old orders
cancel_orders = self.active_orders
# Set the order prices
bids, asks = self.lob.get_bids(), self.lob.get_asks()
if not bids or not asks:
self.stats["quoted_bid_price"].append(np.nan)
self.stats["quoted_ask_price"].append(np.nan)
self.stats["quoted_bid_volume"].append(0)
self.stats["quoted_ask_volume"].append(0)
return cancel_orders, []
if self.priority == 0:
diff = round_to_lot(asks[0] - bids[0], self.lob.lot_size)
if diff >= 3 * self.lob.tick_size:
bid_price, ask_price = (
bids[0] + self.lob.tick_size,
asks[0] - self.lob.tick_size,
)
else:
bid_price = (
bids[1] + self.lob.tick_size if 2 <= len(bids) else bids[-1]
)
ask_price = (
asks[1] - self.lob.tick_size if 2 <= len(asks) else asks[-1]
)
else:
bid_price = (
bids[self.priority] + self.lob.tick_size
if self.priority + 1 <= len(bids)
else bids[-1]
)
ask_price = (
asks[self.priority] - self.lob.tick_size
if self.priority + 1 <= len(asks)
else asks[-1]
)
new_orders = []
if self.inventory_manage and self.inventory < 0:
bid_volume = min(self.volume, abs(self.inventory))
elif self.inventory_manage and self.inventory > 0:
bid_volume = 0
else:
bid_volume = self.volume
if bid_volume != 0:
new_orders.append(
LimitOrder(
COIN_SYMBOL,
get_rnd_str(6),
self.id,
True,
bid_volume,
timestamp,
bid_price,
)
)
if self.inventory_manage and self.inventory > 0:
ask_volume = min(self.volume, self.inventory)
elif self.inventory_manage and self.inventory < 0:
ask_volume = 0
else:
ask_volume = self.volume
if ask_volume != 0:
new_orders.append(
LimitOrder(
COIN_SYMBOL,
get_rnd_str(6),
self.id,
False,
ask_volume,
timestamp,
ask_price,
)
)
self.stats["quoted_bid_price"].append(bid_price)
self.stats["quoted_ask_price"].append(ask_price)
self.stats["quoted_bid_volume"].append(bid_volume)
self.stats["quoted_ask_volume"].append(ask_volume)
return cancel_orders, new_orders
def cancel_orders(self) -> list:
"""Cancel all active orders."""
return self.active_orders
def add_order(self, order: Order) -> None:
"""
Add an order to the trader's active orders.
Args:
order: Order to add.
"""
self.active_orders.append(order)
def remove_order(self, order: Order) -> None:
"""
Remove an order from the trader's active orders.
Args:
order: Order to remove.
"""
if order in self.active_orders:
self.active_orders.remove(order)
def process_trade(
self,
time_step: int,
price: float,
volume: float,
order: Order,
is_make: bool,
) -> None:
"""
Process a trade.
Args:
time_step: Current time step.
price: Price of the trade.
volume: Volume of the trade.
order: Order that was matched.
is_make: True if the order was a maker, False if taker.
"""
if order in self.active_orders and math.isclose(0, order.volume):
self.active_orders.remove(order)
if order.side:
self.inventory += volume
self.realized_pnl -= price * volume
else:
self.inventory -= volume
self.realized_pnl += price * volume
mid_price_median = self.lob.get_mid_price_median()
if is_make:
costs = self.com_model.maker_fee(volume, price)
else:
costs = self.com_model.taker_fee(volume, price)
self.realized_pnl -= costs
self.adj_pnl = self.realized_pnl + self.inventory * mid_price_median
self.cum_costs += costs
self.total_volume += volume * price
self.trade_count += 1
def update_stats(self, time_step: int) -> None:
"""
Update the trader's statistics.
Args:
time_step: Current time step.
"""
self.inventory = round_to_lot(self.inventory, self.lob.lot_size)
self.stats["realized_pnl"].append(self.realized_pnl)
self.stats["adj_pnl"].append(self.adj_pnl)
self.stats["inventory"].append(self.inventory)
self.stats["cum_costs"].append(self.cum_costs)
self.stats["total_volume"].append(self.total_volume)
self.stats["trade_count"].append(self.trade_count)
self.trade_count = 0
def save_stats(self, path: int, date: str) -> None:
"""
Save the trader's statistics.
Args:
path: Path to the directory where the statistics should be saved.
date: Date of the simulation start.
"""
file_name = f"{path}/trader_{self.id}_{date}.pkl"
with open(file_name, "wb") as f:
pickle.dump(self.stats, f)
class AvellanedaStoikov(Trader):
"""Avellaneda-Stoikov market making strategy."""
def __init__(
self,
id: str,
com_model: CommissionModel,
gamma: float = 0.001, # Risk aversion parameter
sigma: float = 0.0426, # Volatility of the asset
kappa: float = 200, # Liquidity parameter
volume: float = 1,
inventory_manage: bool = False,
) -> None:
"""
Initialize an Avellaneda-Stoikov market maker.
Args:
id: Unique identifier of the trader.
com_model: Commission model.
gamma: Risk aversion parameter.
sigma: Volatility of the asset.
kappa: Liquidity parameter.
volume: Volume of the orders to be placed.
inventory_manage: True if the trader should manage its inventory
to minimize inventory risk.
"""
super().__init__(id)
self.volume = volume # Volume of the orders to be placed
self.com_model = com_model # Commission model
self.inventory_manage = inventory_manage # Inventory management
self.gamma = gamma
self.sigma = sigma
self.kappa = kappa
self.reset()
def reset(self) -> None:
"""Reset the trader's statistics."""
self.lob = None
self.active_orders = []
self.realized_pnl = 0 # Realized pnl
self.adj_pnl = 0 # Adjusted pnl
self.inventory = 0 # Long (positive) or short (negative) inventory
self.cum_costs = 0 # Cumulative transaction costs
self.total_volume = 0 # Total volume traded
self.trade_count = 0 # Number of trades in one time step
self.stats = {
"realized_pnl": [],
"adj_pnl": [],
"inventory": [],
"cum_costs": [],
"total_volume": [],
"trade_count": [],
"quoted_bid_price": [],
"quoted_ask_price": [],
"quoted_bid_volume": [],
"quoted_ask_volume": [],
}
def place_orders(
self, time_step: int, timestamp: datetime.datetime, last_time_step: int
) -> tuple[list, list]:
"""
Create lists of orders to be canceled and added to the lob.
Args:
time_step: Current time step.
timestamp: Current timestamp.
last_time_step: Last time step.
Returns:
Tuple of lists of orders to cancel and add.
"""
# Cancel old orders
cancel_orders = self.active_orders
mid_price = self.lob.get_mid_price()
r = mid_price - self.inventory * self.gamma * self.sigma**2 * (
last_time_step - time_step
)
spread = self.gamma * self.sigma**2 * (
last_time_step - time_step
) + 2 / self.gamma * math.log(1 + self.gamma / self.kappa)
bid_price = round_to_tick(r - spread / 2, self.lob.tick_size)
ask_price = round_to_tick(r + spread / 2, self.lob.tick_size)
if math.isclose(bid_price, ask_price):
self.stats["quoted_bid_price"].append(np.nan)
self.stats["quoted_ask_price"].append(np.nan)
self.stats["quoted_bid_volume"].append(0)
self.stats["quoted_ask_volume"].append(0)
return cancel_orders, []
new_orders = []
if self.inventory_manage and self.inventory < 0:
bid_volume = min(self.volume, abs(self.inventory))
elif self.inventory_manage and self.inventory > 0:
bid_volume = 0
else:
bid_volume = self.volume
if bid_volume != 0:
new_orders.append(
LimitOrder(
COIN_SYMBOL,
get_rnd_str(6),
self.id,
True,
bid_volume,
timestamp,
bid_price,
)
)
if self.inventory_manage and self.inventory > 0:
ask_volume = min(self.volume, self.inventory)
elif self.inventory_manage and self.inventory < 0:
ask_volume = 0
else:
ask_volume = self.volume
if ask_volume != 0:
new_orders.append(
LimitOrder(
COIN_SYMBOL,
get_rnd_str(6),
self.id,
False,
ask_volume,
timestamp,
ask_price,
)
)
self.stats["quoted_bid_price"].append(bid_price)
self.stats["quoted_ask_price"].append(ask_price)
self.stats["quoted_bid_volume"].append(bid_volume)
self.stats["quoted_ask_volume"].append(ask_volume)
return cancel_orders, new_orders
def cancel_orders(self) -> list:
"""Cancel all active orders."""
return self.active_orders
def add_order(self, order: Order) -> None:
"""
Add an order to the trader's active orders.
Args:
order: Order to add.
"""
self.active_orders.append(order)
def remove_order(self, order: Order) -> None:
"""
Remove an order from the trader's active orders.
Args:
order: Order to remove.
"""
if order in self.active_orders:
self.active_orders.remove(order)
def process_trade(
self,
time_step: int,
price: float,
volume: float,
order: Order,
is_make: bool,
) -> None:
"""
Process a trade.
Args:
time_step: Current time step.
price: Price of the trade.
volume: Volume of the trade.
order: Order that was matched.
is_make: True if the order was a maker, False if taker.
"""
if order in self.active_orders and math.isclose(0, order.volume):
self.active_orders.remove(order)
if order.side:
self.inventory += volume
self.realized_pnl -= price * volume
else:
self.inventory -= volume
self.realized_pnl += price * volume
mid_price_median = self.lob.get_mid_price_median()
if is_make:
costs = self.com_model.maker_fee(volume, price)
else:
costs = self.com_model.taker_fee(volume, price)
self.realized_pnl -= costs
self.adj_pnl = self.realized_pnl + self.inventory * mid_price_median
self.cum_costs += costs
self.total_volume += volume * price
self.trade_count += 1
def update_stats(self, time_step: int) -> None:
"""
Update the trader's statistics.
Args:
time_step: Current time step.
"""
self.inventory = round_to_lot(self.inventory, self.lob.lot_size)
self.stats["realized_pnl"].append(self.realized_pnl)
self.stats["adj_pnl"].append(self.adj_pnl)
self.stats["inventory"].append(self.inventory)
self.stats["cum_costs"].append(self.cum_costs)
self.stats["total_volume"].append(self.total_volume)
self.stats["trade_count"].append(self.trade_count)
self.trade_count = 0
def save_stats(self, path: int, date: str) -> None:
"""
Save the trader's statistics.
Args:
path: Path to the directory where the statistics should be saved.
date: Date of the simulation start.
"""
file_name = f"{path}/trader_{self.id}_{date}.pkl"
with open(file_name, "wb") as f:
pickle.dump(self.stats, f)
class RLMarketMaker(Trader):
"""Reinforcement learning market making strategy."""
def __init__(
self,
id: str,
com_model: CommissionModel,
volume: float = 1,
policy: Any = None,
) -> None:
"""
Initialize a reinforcement learning market maker.
Args:
id: Unique identifier of the trader.
volume: Volume of the orders to be placed.
com_model: Commission model.
inventory_manage: True if the trader should manage its inventory
to minimize inventory risk.
policy: Policy to use for the RL agent.
"""
super().__init__(id)
self.volume = volume # Volume of the orders to be placed
self.com_model = com_model # Commission model
self.policy = policy # Policy
self.reset()
def reset(self):
"""Reset the agent's statistics."""
self.lob = None
self.active_orders = []
self.realized_pnl = 0 # Realized pnl
self.adj_pnl = 0 # Adjusted pnl
self.inventory = 0 # Long (positive) or short (negative) inventory
self.cum_costs = 0 # Cumulative transaction costs
self.total_volume = 0 # Total volume traded
self.trade_count = 0 # Number of trades in one time step
self.reward = 0 # Reward
self.stats = {
"realized_pnl": [],
"adj_pnl": [],
"inventory": [],
"cum_costs": [],
"total_volume": [],
"trade_count": [],
"quoted_bid_price": [],
"quoted_ask_price": [],
"quoted_bid_volume": [],
"quoted_ask_volume": [],
"reward": [],
}
def place_orders(
self, time_step: int, timestamp, action: ActType, obs: ObsType
) -> tuple[list, list]:
"""
Create lists of orders to be canceled and added to the lob.
Args:
time_step: Current time step.
timestamp: Current timestamp.
action: Action to take.
obs: Observation of the environment.
Returns:
Tuple of lists of orders to cancel and add.
"""
if action is None and self.policy is not None:
action = int(self.policy.predict(obs, deterministic=True)[0])
elif action is None and self.policy is None:
raise ValueError("Policy is not defined.")
# ----------------------------------------------------------------------
# First handle trivial action (20)
# ----------------------------------------------------------------------
# Action 20: Wait and no new quote
if action == 20:
cancel_orders = []
new_orders = []
self.stats["quoted_bid_price"].append(np.nan)
self.stats["quoted_ask_price"].append(np.nan)
self.stats["quoted_bid_volume"].append(0)
self.stats["quoted_ask_volume"].append(0)
return cancel_orders, new_orders
# ----------------------------------------------------------------------
# Next handle other actions (0-19) consisting of bid-ask combinations
# ----------------------------------------------------------------------
# Cancel old orders and get the current bid and ask prices
cancel_orders = self.active_orders
bids, asks = self.lob.get_bids(), self.lob.get_asks()
if not bids or not asks:
self.stats["quoted_bid_price"].append(np.nan)
self.stats["quoted_ask_price"].append(np.nan)
self.stats["quoted_bid_volume"].append(0)
self.stats["quoted_ask_volume"].append(0)
return cancel_orders, []
# Set the correct bid price
# ----------------------------------------------------------------------
# Bid - priority 0
if action in [0, 4, 8, 12, 16]:
diff = round_to_lot(asks[0] - bids[0], self.lob.lot_size)
if diff >= 3 * self.lob.tick_size:
bid_price = bids[0] + self.lob.tick_size
else:
bid_price = (
bids[1] + self.lob.tick_size if len(bids) >= 2 else bids[-1]
)
# Bid - priority 1
elif action in [1, 5, 9, 13, 17]:
bid_price = (
bids[1] + self.lob.tick_size if len(bids) >= 2 else bids[-1]
)
# Bid - priority 2
elif action in [2, 6, 10, 14, 18]:
bid_price = (
bids[2] + self.lob.tick_size if len(bids) >= 3 else bids[-1]
)
# Bid - priority 3
elif action in [3, 7, 11, 15, 19]:
bid_price = (
bids[3] + self.lob.tick_size if len(bids) >= 4 else bids[-1]
)
# Set the correct ask price
# ----------------------------------------------------------------------
# Ask - priority 0
if action in [0, 1, 2, 3, 16]:
diff = round_to_lot(asks[0] - bids[0], self.lob.lot_size)
if diff >= 3 * self.lob.tick_size:
ask_price = asks[0] - self.lob.tick_size
else:
ask_price = (
asks[1] - self.lob.tick_size if len(asks) >= 2 else asks[-1]
)
# Ask - priority 1
elif action in [4, 5, 6, 7, 17]:
ask_price = (
asks[1] - self.lob.tick_size if len(asks) >= 2 else asks[-1]
)
# Ask - priority 2
elif action in [8, 9, 10, 11, 18]:
ask_price = (
asks[2] - self.lob.tick_size if len(asks) >= 3 else asks[-1]
)
# Ask - priority 3
elif action in [12, 13, 14, 15, 19]:
ask_price = (
asks[3] - self.lob.tick_size if len(asks) >= 4 else asks[-1]
)
# Set quoted volumes
# ----------------------------------------------------------------------
if action >= 0 and action <= 15:
bid_volume, ask_volume = self.volume, self.volume
elif action >= 16 and action <= 19:
if self.inventory > 0:
bid_volume, ask_volume = 0, abs(self.inventory)
elif self.inventory < 0:
bid_volume, ask_volume = abs(self.inventory), 0
else:
bid_volume, ask_volume = 0, 0
else:
raise ValueError(f"Invalid action. {action}")
# ----------------------------------------------------------------------
# Create orders
# ----------------------------------------------------------------------
new_orders = []
if bid_volume != 0:
new_orders.append(
LimitOrder(
COIN_SYMBOL,
get_rnd_str(6),
self.id,
True,
bid_volume,
timestamp,
bid_price,
)
)
if ask_volume != 0:
new_orders.append(
LimitOrder(
COIN_SYMBOL,
get_rnd_str(6),
self.id,
False,
ask_volume,
timestamp,
ask_price,
)
)
# ----------------------------------------------------------------------
# Save stats and set reward
# ----------------------------------------------------------------------
self.stats["quoted_bid_price"].append(bid_price)
self.stats["quoted_ask_price"].append(ask_price)
self.stats["quoted_bid_volume"].append(bid_volume)
self.stats["quoted_ask_volume"].append(ask_volume)
if abs(self.inventory) > 0 and action == 17:
self.reward = 1
elif abs(self.inventory) == 0 and action == 5:
self.reward = 1
return cancel_orders, new_orders
def cancel_orders(self) -> list:
"""Cancel all active orders."""
return self.active_orders
def add_order(self, order: Order) -> None:
"""
Add an order to the trader's active orders.
Args:
order: Order to add.
"""
self.active_orders.append(order)
def remove_order(self, order: Order) -> None:
"""
Remove an order from the trader's active orders.
Args:
order: Order to remove.
"""
if order in self.active_orders:
self.active_orders.remove(order)
def process_trade(
self,
time_step: int,
price: float,
volume: float,
order: Order,
is_make: bool,
) -> None:
"""
Process a trade.
Args:
time_step: Current time step.
price: Price of the trade.
volume: Volume of the trade.
order: Order that was matched.
is_make: True if the order was a maker, False if taker.
"""
if order in self.active_orders and math.isclose(0, order.volume):
self.active_orders.remove(order)
if order.side:
self.inventory += volume
self.realized_pnl -= price * volume
else:
self.inventory -= volume
self.realized_pnl += price * volume
mid_price_median = self.lob.get_mid_price_median()
if is_make:
costs = self.com_model.maker_fee(volume, price)
else:
costs = self.com_model.taker_fee(volume, price)
self.realized_pnl -= costs
self.adj_pnl = self.realized_pnl + self.inventory * mid_price_median
self.cum_costs += costs
self.total_volume += volume * price
self.trade_count += 1
def update_stats(self, time_step: int) -> None:
"""
Update the trader's statistics.
Args:
time_step: Current time step.
"""
self.inventory = round_to_lot(self.inventory, self.lob.lot_size)
self.stats["realized_pnl"].append(self.realized_pnl)
if len(self.stats["adj_pnl"]) == 0:
prev_pnl = 0
else:
prev_pnl = self.stats["adj_pnl"][-1]
self.stats["adj_pnl"].append(self.adj_pnl)
self.stats["inventory"].append(self.inventory)
self.stats["cum_costs"].append(self.cum_costs)
self.stats["total_volume"].append(self.total_volume)
self.stats["trade_count"].append(self.trade_count)
self.trade_count = 0
prev_pnl = 1e-8 if prev_pnl == 0 else prev_pnl
self.pnl_change = (self.adj_pnl - prev_pnl) / abs(prev_pnl)
self.pnl_change = np.clip(self.pnl_change, a_min=-2, a_max=2)
self.stats["reward"].append(self.reward)
self.reward = 0
def save_stats(self, path: int, date: str) -> None:
"""
Save the trader's statistics.
Args:
path: Path to the directory where the statistics should be saved.
date: Date of the simulation start.
"""
file_name = f"{path}/trader_{self.id}_{date}.pkl"
with open(file_name, "wb") as f:
pickle.dump(self.stats, f)
| 34,171 | Python | .py | 906 | 26.769316 | 80 | 0.524649 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,067 | limit_order_book.py | JurajZelman_airl-market-making/lob/limit_order_book.py | """Limit order book."""
import math
import os
from datetime import datetime
from typing import Optional, Union
import matplotlib.pyplot as plt
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
from pyllist import dllistnode
from sortedcontainers import SortedDict
from lob.order_queue import OrderQueue
from lob.orders import Order
from lob.plots import set_plot_style
from lob.utils import ensure_dir_exists, round_to_lot, round_to_tick
class LimitOrderBook:
"""Limit order book class."""
def __init__(
self,
tick_size: float,
lot_size: float,
logging: bool = False,
ts_save: datetime = None,
) -> None:
"""
Initialize a limit order book.
Args:
tick_size: Minimum tick size for rounding prices.
lot_size: Minimum lot size for rounding volumes.
logging: Indicates whether to log the events.
ts_save: Start time of the simulation. Used for logging.
"""
self.tick_size = tick_size
self.lot_size = lot_size
self.logging = logging
self.ts_save = ts_save
self.orders = {}
self.bid_side = SortedDict()
self.ask_side = SortedDict()
self.num_bid_orders = 0
self.num_ask_orders = 0
self.bid_volume = 0
self.ask_volume = 0
self.best_bid_price = None
self.last_best_bid_price = None # Last non None bid price
self.best_ask_price = None
self.last_best_ask_price = None # Last non None ask price
self.parquet_writer = None
self.log_file_name = None
self.log = None
self.ts = None
self.mid_price_history = []
ensure_dir_exists(os.path.join(os.getcwd(), "results_backtest"))
self.initialize_log()
set_plot_style()
def add_order(self, order: Order) -> tuple[list, Order]:
"""
Add an order to the limit order book.
Args:
order: Order to add.
Returns:
A tuple of the list of trades and the remaining order.
"""
# Round the order price to the tick size
order.price = round_to_tick(order.price, self.tick_size)
order.volume = round_to_lot(order.volume, self.lot_size)
# Try to match the order with existing orders
trades, order = self.match_order(order)
if order is None:
return trades, None
# Add the order to the order book and update the order queue
order_tree = self.bid_side if order.side else self.ask_side
if not order_tree.__contains__(order.price):
order_tree[order.price] = OrderQueue(self.lot_size)
node = order_tree[order.price].add_order(order)
self.orders[order.id] = node
if order.side:
self.num_bid_orders += 1
self.bid_volume += order.volume
self.update_bid_price(order.price)
else:
self.num_ask_orders += 1
self.ask_volume += order.volume
self.update_ask_price(order.price)
if self.logging:
self.update_log(
max(order.entry_time, self.ts),
"Insert",
order.ticker,
order.id,
order.trader_id,
order.side,
order.price,
order.volume,
)
return trades, order
def remove_order_by_id(self, order_id: str, log: bool = True) -> Order:
"""
Remove an order from the limit order book.
Args:
order_id: ID of the order to remove.
log: Indicated whether to log the order removal.
Returns:
The removed order.
"""
node = self.get_order_by_id(order_id, node=True)
if node is not None:
order = node.value
else:
return None
order_tree = self.bid_side if order.side else self.ask_side
if order.side:
self.num_bid_orders -= 1
self.bid_volume -= order.volume
else:
self.num_ask_orders -= 1
self.ask_volume -= order.volume
del self.orders[order_id]
order_tree[order.price].remove_order(order, node)
if order_tree[order.price].num_orders == 0:
del order_tree[order.price]
self.update_bid_price() if order.side else self.update_ask_price()
# Log the order removal
if log and self.logging:
self.update_log(
max(order.entry_time, self.ts),
"Cancel",
order.ticker,
order.id,
order.trader_id,
order.side,
order.price,
order.volume,
)
return order
def update_order_volume(
self, order_id: str, volume: int, log: bool = True
) -> None:
"""
Update the volume of an order. This method is only used for order
matching by the exchange. It should not be used by agents, since they
should not be able to change the volume of an order without posting
a new order with a new timestamp. This is the case for the Binance
exchange, where each change of the order volume updates the timestamp
of the order as well.
Args:
order_id: ID of the order to update.
volume: New volume.
log: Whether to log the order update.
"""
order = self.get_order_by_id(order_id)
if order is None:
return
if order.side:
self.bid_volume = self.bid_volume - order.volume + volume
else:
self.ask_volume = self.ask_volume - order.volume + volume
order_tree = self.bid_side if order.side else self.ask_side
order_tree[order.price].update_order_volume(order, volume)
if log and self.logging:
self.update_log(
max(order.entry_time, self.ts),
"Update",
order.ticker,
order.id,
order.trader_id,
order.side,
order.price,
volume,
)
def match_order(self, order: Order) -> tuple[list, Order]:
"""
If possible, partially or fully match an order. If the order is fully
matched, it won't be added to the order book and None will be returned.
If the order is partially matched, the remaining volume will be
returned as a new order, so that it can be added to the order book.
Args:
order: Order to be matched.
Returns:
A tuple of the list of trades and the remaining order.
"""
bid, ask = self.best_bid_price, self.best_ask_price
# Return the order if it cannot be matched
if order.side and (ask is None or order.price < ask):
return [], order
if not order.side and (bid is None or order.price > bid):
return [], order
# Match the order
orders = self.ask_side if order.side else self.bid_side
match_price = ask if order.side else bid
match_order = orders[match_price].first_order
trade_price = ask if order.side else bid
trade_volume = min(order.volume, match_order.volume)
# Log the trade
if self.logging:
self.update_log(
max(order.entry_time, match_order.entry_time, self.ts),
"Trade",
order.ticker,
order.id,
order.trader_id,
order.side,
trade_price,
trade_volume,
match_order.id,
match_order.trader_id,
match_order.side,
)
# Update the order quantities
if math.isclose(order.volume, match_order.volume):
order.volume = 0
match_order.volume = 0
self.remove_order_by_id(match_order.id, log=False)
return [
{
"price": trade_price,
"volume": trade_volume,
"order_take": order,
"order_make": match_order,
}
], None
elif order.volume < match_order.volume:
diff = round_to_lot(
match_order.volume - order.volume, self.lot_size
)
self.update_order_volume(match_order.id, diff, log=False)
order.volume = 0
return [
{
"price": trade_price,
"volume": trade_volume,
"order_take": order,
"order_make": match_order,
}
], None
else:
order.volume -= match_order.volume
order.volume = round_to_lot(order.volume, self.lot_size)
match_order.volume = 0
self.remove_order_by_id(match_order.id, log=False)
trades, remaining_order = self.match_order(order)
trades.insert(
0,
{
"price": trade_price,
"volume": trade_volume,
"order_take": order,
"order_make": match_order,
},
)
return trades, remaining_order
def update_bid_price(self, price: float = None) -> None:
"""
Update the best bid price.
Args:
price: New best bid price, if known.
"""
if self.num_bid_orders == 0:
self.best_bid_price = None
elif price is not None and (
self.best_bid_price is None or price > self.best_bid_price
):
self.best_bid_price = price
self.last_best_bid_price = price
else:
self.best_bid_price = self.bid_side.peekitem(index=-1)[0]
self.last_best_bid_price = self.best_bid_price
def update_ask_price(self, price: float = None) -> None:
"""
Update the best ask price.
Args:
price: New best ask price, if known.
"""
if self.num_ask_orders == 0:
self.best_ask_price = None
elif price is not None and (
self.best_ask_price is None or price < self.best_ask_price
):
self.best_ask_price = price
self.last_best_ask_price = price
else:
self.best_ask_price = self.ask_side.peekitem(index=0)[0]
self.last_best_ask_price = self.best_ask_price
def get_order_by_id(
self, order_id: str, node: bool = False
) -> Union[Order, dllistnode]:
"""
Get an order object by its ID. If node is True, returns the node
containing the order in the double-linked list.
"""
try:
if node:
return self.orders[order_id]
else:
return self.orders[order_id].value
except KeyError:
return None
except Exception as e:
raise e
def get_best_bid(self) -> float:
"""Returns the best bid price."""
return self.best_bid_price
def get_best_bid_volume(self) -> float:
"""Returns the volume at the best bid."""
return self.get_volume_at_price(self.best_bid_price)
def get_best_ask(self) -> float:
"""Returns the best ask price."""
return self.best_ask_price
def get_best_ask_volume(self) -> float:
"""Returns the volume at the best ask."""
return self.get_volume_at_price(self.best_ask_price)
def get_bid_ask_spread(self) -> float:
"""Returns the bid-ask spread."""
if self.best_bid_price is None or self.best_ask_price is None:
return None
return self.best_ask_price - self.best_bid_price
def get_mid_price(self) -> float:
"""Returns the mid price."""
bid = (
self.best_bid_price
if self.best_bid_price is not None
else self.last_best_bid_price
)
ask = (
self.best_ask_price
if self.best_ask_price is not None
else self.last_best_ask_price
)
return (bid + ask) / 2
def get_bids(self) -> list[float]:
"""Returns a list of all bid prices, decreasing from the best bid."""
return list(self.bid_side.keys())[::-1]
def get_asks(self) -> list[float]:
"""Returns a list of all ask prices, increasing from the best ask."""
return list(self.ask_side.keys())
def get_book_info(
self, max_depth: int = 5
) -> dict[str, list[tuple[float, float]]]:
"""
Returns a dictionary with information about the order book. The
dictionary contains price-depth pairs for the best bid and ask prices,
up to a given maximum depth.
Args:
max_depth: Maximum depth to return.
"""
bids, asks = self.get_bids(), self.get_asks()
bids = bids[: min(max_depth, len(bids))]
asks = asks[: min(max_depth, len(asks))]
return {
"bid_side": [(p, self.get_volume_at_price(p)) for p in bids],
"ask_side": [(p, self.get_volume_at_price(p)) for p in asks],
}
def get_volume_at_price(self, price: float) -> float:
"""
Get the volume of shares for a given price. Assumes that the price
cannot be matched and is one of the prices waiting in the lob.
Args:
price: Price to get the volume for.
Returns:
The volume of shares at the given price.
"""
best_bid = self.get_best_bid()
if best_bid is None or price > best_bid:
order_tree = self.ask_side
else:
order_tree = self.bid_side
# order_tree = self.bid_side if price <= best_bid else self.ask_side
if order_tree.__contains__(price):
return order_tree[price].volume
else:
return 0
def get_bid_volume(self) -> float:
"""Returns the total volume of orders on the bid side."""
return self.bid_volume
def get_ask_volume(self) -> float:
"""Returns the total volume of orders on the ask side."""
return self.ask_volume
def get_total_volume(self) -> float:
"""Returns the total volume of orders on both sides."""
return self.bid_volume + self.ask_volume
def get_order_position(self, order_id: str) -> Union[int, None]:
"""
Returns the position of an order in the order book. If the output is
1, the order is the first order on its side (bid/ask) of the order book.
Args:
order_id: ID of the order to get the position for.
Returns:
The position of the order in the order book.
"""
order = self.get_order_by_id(order_id)
if order is None:
return None
order_side = self.bid_side if order.side else self.ask_side
prices = self.get_bids() if order.side else self.get_asks()
prices = prices[: prices.index(order.price)]
num_orders = sum(order_side[p].num_orders for p in prices)
temp = order.prev
while temp is not None:
num_orders += 1
temp = temp.prev
return num_orders + 1
def get_num_orders_per_price(self, price: float) -> int:
"""
Get the number of orders for a given price. Assumes that the price
cannot be matched and is one of the prices waiting in the lob.
Args:
price: Price to get the number of orders for.
Returns:
The number of orders at the given price.
"""
best_bid = self.get_best_bid()
order_tree = self.bid_side if price <= best_bid else self.ask_side
if order_tree.__contains__(price):
return order_tree[price].num_orders
else:
return 0
def update_mid_price_history(self) -> None:
"""
Update the mid price history list. This list is used for finding
the median of recent mid prices since in illiquid markets the mid
price can sometime jump by a large amount for a short period.
"""
if len(self.mid_price_history) < 11:
self.mid_price_history.append(self.get_mid_price())
else:
self.mid_price_history.pop(0)
self.mid_price_history.append(self.get_mid_price())
def get_mid_price_median(self) -> float:
"""Returns the median of the mid price history."""
return sorted(self.mid_price_history)[len(self.mid_price_history) // 2]
def get_VAMP(self, q: float, max_depth: int = 10) -> float:
"""
Get volume adjusted mid price, suggested by Stoikov and Covario. They
suggest the difference between the VAMP and the mid price as a good
predictor for the direction of the stock price in various timescales
between 1-60 seconds.
Args:
q: Parameter for the VAMP, should be the desired trading volume.
max_depth: Maximum depth of the order book to consider.
"""
if self.best_bid_price is None or self.best_ask_price is None:
return None
return (self.get_VABP(q, max_depth) + self.get_VAAP(q, max_depth)) / 2
def get_VABP(self, q: float, max_depth: int = 10) -> float:
"""
Get volume adjusted bid price (Stoikov, Covario). The VABP is defined as
the weighted average of the bid prices, where the weights are the
volumes at each price level (up to a maximum depth) divided by a
parameter trading volume parameter q specified by a trader.
Args:
q: Parameter for the VABP, should be the desired trading volume.
max_depth: Maximum depth of the order book to consider.
"""
if self.best_bid_price is None:
return None
bids = self.get_bids()
bids = bids[: min(max_depth, len(bids))]
volumes = [self.get_volume_at_price(bid) for bid in bids]
return sum(bids[i] * volumes[i] for i in range(len(bids))) / q
def get_VAAP(self, q: float, max_depth: int = 10) -> float:
"""
Get volume adjusted ask price (Stoikov, Covario). The VAAP is defined as
the weighted average of the ask prices, where the weights are the
volumes at each price level (up to a maximum depth) divided by a
parameter trading volume parameter q specified by a trader.
Args:
q: Parameter for the VAAP, should be the desired trading volume.
max_depth: Maximum depth of the order book to consider.
"""
if self.best_ask_price is None:
return None
asks = self.get_asks()
asks = asks[: min(max_depth, len(asks))]
volumes = [self.get_volume_at_price(ask) for ask in asks]
return sum(asks[i] * volumes[i] for i in range(len(asks))) / q
def visualize(self, depth: int = 6) -> None:
"""Creates a plot of the limit order book."""
bids, asks = self.get_bids(), self.get_asks()
bid_volumes = [self.get_volume_at_price(p) for p in bids]
ask_volumes = [self.get_volume_at_price(p) for p in asks]
if len(bids) > depth:
bids = bids[:depth]
bid_volumes = bid_volumes[:depth]
if len(asks) > depth:
asks = asks[:depth]
ask_volumes = ask_volumes[:depth]
spread_space = 1 # Number of ticks to leave in the middle
x_axis = np.arange(0, len(bids) + len(asks) + spread_space, 1)
plt.figure(figsize=(12, 5))
plt.bar(
x_axis[: len(bids)],
bid_volumes[::-1],
label="Bid",
color="#9ED166",
width=1,
edgecolor="black",
linewidth=1.3,
)
plt.bar(
x_axis[len(bids) + spread_space :],
ask_volumes,
label="Ask",
color="#EB735F",
width=1,
edgecolor="black",
linewidth=1.3,
)
x_ticks = np.append(bids[::-1], asks)
x_ticks = [str(x) for x in x_ticks]
x_ticks = np.insert(x_ticks, len(bids), "")
plt.xticks(x_axis, x_ticks, rotation=45, size=12)
plt.xlabel("Price")
plt.ylabel("Volume")
plt.show()
# Save figure as pdf image
# plt.savefig("lob.pdf", format="pdf")
def initialize_log(self):
"""Initialize the log with the column names."""
self.log = {
"ts": [],
"type": [],
"ticker": [],
"id": [],
"trader_id": [],
"side": [],
"price": [],
"volume": [],
"id2": [],
"trader_id2": [],
"side2": [],
}
def update_log(
self,
ts,
order_type,
ticker,
order_id,
trader_id,
side,
price,
volume,
id2=None,
trader_id2=None,
side2=None,
) -> None:
"""
Update the log with the order entry.
Args:
ts: _description_
order_type: _description_
ticker: _description_
order_id: _description_
trader_id: _description_
side: _description_
price: _description_
volume: _description_
id2: _description_. Defaults to None.
trader_id2: _description_. Defaults to None.
side2: _description_. Defaults to None.
"""
self.log["ts"].append(ts)
self.log["type"].append(order_type)
self.log["ticker"].append(ticker)
self.log["id"].append(order_id)
self.log["trader_id"].append(trader_id)
self.log["side"].append(side)
self.log["price"].append(price)
self.log["volume"].append(volume)
self.log["id2"].append(id2)
self.log["trader_id2"].append(trader_id2)
self.log["side2"].append(side2)
if len(self.log["ts"]) > 30000:
self.write_log_to_parquet()
def write_log_to_parquet(self, date: Optional[datetime] = None) -> None:
"""
Save the log to a parquet file. The file will be saved in the logs
folder with the name log_{current_date}.parquet.
Args:
date: Date to use for the log file name. If None, the current
date will be used.
"""
if self.logging:
if self.log_file_name is None:
self.log_file_name = os.path.join(
os.getcwd(),
"results_backtest",
f"log_{self.ts_save}.parquet",
)
df = pa.Table.from_arrays(
[
pa.array(self.log["ts"]),
pa.array(self.log["type"]),
pa.array(self.log["ticker"]),
pa.array(self.log["id"]),
pa.array(self.log["trader_id"]),
pa.array(self.log["side"]),
pa.array(self.log["price"]),
pa.array(self.log["volume"]),
pa.array(self.log["id2"]),
pa.array(self.log["trader_id2"]),
pa.array(self.log["side2"]),
],
names=[
"ts",
"type",
"ticker",
"id",
"trader_id",
"side",
"price",
"volume",
"id2",
"trader_id2",
"side2",
],
)
if self.parquet_writer is None:
self.parquet_writer = pq.ParquetWriter(
self.log_file_name, df.schema
)
self.parquet_writer.write_table(df)
self.initialize_log()
def close_parquet_writer(self):
"""Close and reset the parquet writer."""
if self.logging:
self.write_log_to_parquet()
self.parquet_writer.close()
self.parquet_writer = None
| 24,385 | Python | .py | 628 | 27.678344 | 80 | 0.549911 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,068 | data.py | JurajZelman_airl-market-making/lob/data.py | """Methods for data handling and processing."""
import os
import pandas as pd
import polars as pl
from lob.time import TimeManager
def scan_parquet(
name: str,
ts_start: pd.Timestamp,
ts_end: pd.Timestamp,
win: int,
path: str,
time_manager: TimeManager,
) -> pl.DataFrame:
"""
Scan the parquet datasets into one dataframe.
Args:
name: Name of the dataset. It is assumed that the datafiles are
using this name convention with the date appended to the end.
ts_start: Start timestamp.
ts_end: End timestamp.
win: Window size to pre-load.
path: Path to the direcory containing datasets.
time_manager: Time manager for timestamp operations.
"""
# Detect the days between the start and end
first_day, last_day = ts_start.date(), ts_end.date()
n_days = (last_day - first_day).days + 1
days = [first_day + pd.Timedelta(days=i) for i in range(n_days)]
# Merge the dataframes
df = []
for day in days:
file_name = f"{name}_{day.strftime('%Y_%m_%d')}.parquet"
df.append(pl.scan_parquet(os.path.join(path, file_name)))
df = pl.concat(df, how="vertical")
# Filter the data
ts_start = time_manager.get_ts_larger_equal_than(ts_start)
ts_end = time_manager.get_ts_smaller_equal_than(ts_end)
ts_start_win = time_manager.get_ts_n_steps_from(ts_start, -win)
df = df.filter(pl.col("received_time").is_between(ts_start_win, ts_end))
# Read the data for the previous day if needed to account for the window
date_start_win = ts_start_win.date()
if date_start_win < first_day:
file_name = f"{name}_{date_start_win.strftime('%Y_%m_%d')}.parquet"
df_prev = pl.scan_parquet(os.path.join(path, file_name))
df_prev = df_prev.filter(pl.col("received_time").ge(ts_start_win))
df = pl.concat([df_prev, df], how="vertical")
return df
| 1,925 | Python | .py | 47 | 35.170213 | 76 | 0.657571 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,069 | backtest_metrics.py | JurajZelman_airl-market-making/lob/backtest_metrics.py | """Metrics for evaluating the performance of a trading strategy."""
import numpy as np
import pandas as pd
def total_return(equity: pd.Series) -> float:
"""
Compute the total return of a strategy in percent.
Args:
equity: Equity curve of the strategy.
Returns:
The total return in percent.
"""
equity = equity.to_numpy()
return (equity[-1] / equity[0] - 1) * 100
def win_rate(equity: pd.Series) -> float:
"""
Compute the win rate of a strategy in percent.
Args:
equity: Equity curve of the strategy.
Returns:
The win rate in percent.
"""
returns = get_returns_from_equity(equity)
return (returns > 0).mean() * 100
def get_returns_from_equity(equity: pd.Series) -> pd.Series:
"""
Compute the returns of a strategy.
Args:
equity: Equity curve of the strategy.
Returns:
The returns of the strategy.
"""
return pd.Series(np.diff(equity), index=equity.index[1:])
def best_return(equity: pd.Series) -> pd.Series:
"""
Compute the best return of a strategy in percent.
Args:
equity: Equity curve of the strategy.
Returns:
The best return in percent.
"""
returns = get_returns_from_equity(equity)
return returns.max() * 100
def worst_return(equity: pd.Series) -> pd.Series:
"""
Compute the worst return of a strategy in percent.
Args:
equity: Equity curve of the strategy.
Returns:
The worst return in percent.
"""
returns = get_returns_from_equity(equity)
return returns.min() * 100
def average_return(equity: pd.Series) -> pd.Series:
"""
Compute the average return of a strategy in percent.
Args:
equity: Equity curve of the strategy.
Returns:
The average return in percent.
"""
returns = get_returns_from_equity(equity)
return returns.mean() * 100
def skewness(returns: pd.Series) -> float:
"""
Compute the skewness of the returns.
Args:
returns: Returns of the strategy.
Returns:
The skewness.
"""
return returns.skew(axis=0, skipna=True)
def kurtosis(returns: pd.Series) -> float:
"""
Compute the kurtosis of the returns.
Args:
returns: Returns of the strategy.
Returns:
The kurtosis.
"""
return returns.kurt(axis=0, skipna=True)
def volatility(returns: pd.Series) -> float:
"""
Compute the volatility of the returns.
Args:
returns: Returns of the strategy.
Returns:
The volatility.
"""
return returns.std(axis=0, skipna=True)
def downside_volatility(returns: pd.Series, threshold=0) -> float:
"""
Compute downside volatility of returns below a specified threshold.
Args:
returns: Returns of the strategy.
threshold: Minimum acceptable return (default is 0).
Returns:
The downside volatility.
"""
excess_returns = np.minimum(returns - threshold, 0)
downside_volatility = np.std(excess_returns, ddof=1)
return downside_volatility
def drawdowns(equity: pd.Series) -> pd.Series:
"""
Compute the drawdowns of a strategy. Values are expressed in percentage.
Args:
pnl: Profit and loss of the strategy.
Returns:
A DataFrame containing the wealth index and the previous peaks.
"""
peaks = np.maximum.accumulate(equity)
drawdowns = (equity - peaks) / peaks
return drawdowns * 100
def max_drawdown(equity: pd.Series) -> float:
"""
Compute the maximum drawdown of a strategy. Value is expressed in
percentage.
Args:
pnl: Profit and loss of the strategy.
Returns:
The maximum drawdown.
"""
dd = drawdowns(equity)
max_drawdown = np.min(dd)
return max_drawdown
def max_drawdown_duration(equity: pd.Series) -> float:
"""
Compute the maximum drawdown duration of a strategy. Value is expressed in
number of time steps.
Args:
pnl: Profit and loss of the strategy.
Returns:
The maximum drawdown duration in the number of time steps.
"""
dd = drawdowns(equity)
counter, max_length = 0, 0
for value in dd:
if value == 0:
counter = 0
else:
counter += 1
max_length = max(max_length, counter)
return max_length
| 4,383 | Python | .py | 142 | 24.950704 | 78 | 0.655601 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,070 | imitation_reward_wrapper.py | JurajZelman_airl-market-making/package_modifications/imitation_reward_wrapper.py | """Common wrapper for adding custom reward values to an environment."""
import collections
from typing import Deque
import numpy as np
from imitation.data import types
from imitation.rewards import reward_function
from stable_baselines3.common import callbacks
from stable_baselines3.common import logger as sb_logger
from stable_baselines3.common import vec_env
class WrappedRewardCallback(callbacks.BaseCallback):
"""Logs mean wrapped reward as part of RL (or other) training."""
def __init__(self, episode_rewards: Deque[float], *args, **kwargs):
"""Builds WrappedRewardCallback.
Args:
episode_rewards: A queue that episode rewards will be placed into.
*args: Passed through to `callbacks.BaseCallback`.
**kwargs: Passed through to `callbacks.BaseCallback`.
"""
self.episode_rewards = episode_rewards
super().__init__(*args, **kwargs)
def _on_step(self) -> bool:
return True
def _on_rollout_start(self) -> None:
if len(self.episode_rewards) == 0:
return
mean = sum(self.episode_rewards) / len(self.episode_rewards)
assert isinstance(self.logger, sb_logger.Logger)
self.logger.record("rollout/ep_rew_wrapped_mean", mean)
class RewardVecEnvWrapper(vec_env.VecEnvWrapper):
"""Uses a provided reward_fn to replace the reward function returned by `step()`.
Automatically resets the inner VecEnv upon initialization. A tricky part
about this class is keeping track of the most recent observation from each
environment.
Will also include the previous reward given by the inner VecEnv in the
returned info dict under the `original_env_rew` key.
"""
def __init__(
self,
venv: vec_env.VecEnv,
reward_fn: reward_function.RewardFn,
ep_history: int = 100,
):
"""Builds RewardVecEnvWrapper.
Args:
venv: The VecEnv to wrap.
reward_fn: A function that wraps takes in vectorized transitions
(obs, act, next_obs) a vector of episode timesteps, and returns a
vector of rewards.
ep_history: The number of episode rewards to retain for computing
mean reward.
"""
assert not isinstance(venv, RewardVecEnvWrapper)
super().__init__(venv)
self.episode_rewards: Deque = collections.deque(maxlen=ep_history)
self._cumulative_rew = np.zeros((venv.num_envs,))
self.reward_fn = reward_fn
self._old_obs = None
self._actions = None
self.reset()
def make_log_callback(self) -> WrappedRewardCallback:
"""Creates `WrappedRewardCallback` connected to this `RewardVecEnvWrapper`."""
return WrappedRewardCallback(self.episode_rewards)
@property
def envs(self):
return self.venv.envs
def reset(self):
self._old_obs = self.venv.reset()
return self._old_obs
def step_async(self, actions):
self._actions = actions
return self.venv.step_async(actions)
def step_wait(self):
obs, old_rews, dones, infos = self.venv.step_wait()
# The vecenvs automatically reset the underlying environments once they
# encounter a `done`, in which case the last observation corresponding to
# the `done` is dropped. We're going to pull it back out of the info dict!
obs_fixed = []
obs = types.maybe_wrap_in_dictobs(obs)
for single_obs, single_done, single_infos in zip(obs, dones, infos):
if single_done:
single_obs = single_infos["terminal_observation"]
obs_fixed.append(types.maybe_wrap_in_dictobs(single_obs))
obs_fixed = (
types.DictObs.stack(obs_fixed)
if isinstance(obs, types.DictObs)
else np.stack(obs_fixed)
)
rews = self.reward_fn(
self._old_obs,
self._actions,
types.maybe_unwrap_dictobs(obs_fixed),
np.array(dones),
)
assert len(rews) == len(obs), "must return one rew for each env"
done_mask = np.asarray(dones, dtype="bool").reshape((len(dones),))
# Update statistics
self._cumulative_rew += rews
for single_done, single_ep_rew in zip(dones, self._cumulative_rew):
if single_done:
self.episode_rewards.append(single_ep_rew)
self._cumulative_rew[done_mask] = 0
# we can just use obs instead of obs_fixed because on the next iteration
# after a reset we DO want to access the first observation of the new
# trajectory, not the last observation of the old trajectory
obs = types.maybe_unwrap_dictobs(obs)
self._old_obs = obs
for info_dict, old_rew in zip(infos, old_rews):
info_dict["original_env_rew"] = old_rew
# TODO: Debugging
# print("obs: ", obs, "rews: ", rews)
return obs, rews, dones, infos
| 5,012 | Python | .py | 111 | 36.396396 | 86 | 0.647867 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,071 | sb3_common_utils.py | JurajZelman_airl-market-making/package_modifications/sb3_common_utils.py | import glob
import math # TODO:
import os
import platform
import random
import re
import sys # TODO:
from collections import deque
from itertools import zip_longest
from typing import Dict, Iterable, List, Optional, Tuple, Union
import cloudpickle
import gymnasium as gym
import numpy as np
import stable_baselines3 as sb3
import torch as th
from gymnasium import spaces
# Check if tensorboard is available for pytorch
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
SummaryWriter = None # type: ignore[misc, assignment]
from stable_baselines3.common.logger import Logger, configure
from stable_baselines3.common.type_aliases import (
GymEnv,
Schedule,
TensorDict,
TrainFreq,
TrainFrequencyUnit,
)
def set_random_seed(seed: int, using_cuda: bool = False) -> None:
"""
Seed the different random generators.
:param seed:
:param using_cuda:
"""
# Seed python RNG
random.seed(seed)
# Seed numpy RNG
np.random.seed(seed)
# seed the RNG for all devices (both CPU and CUDA)
th.manual_seed(seed)
if using_cuda:
# Deterministic operations for CuDNN, it may impact performances
th.backends.cudnn.deterministic = True
th.backends.cudnn.benchmark = False
# From stable baselines
def explained_variance(y_pred: np.ndarray, y_true: np.ndarray) -> np.ndarray:
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
:param y_pred: the prediction
:param y_true: the expected value
:return: explained variance of ypred and y
"""
assert y_true.ndim == 1 and y_pred.ndim == 1
var_y = np.var(y_true)
# TODO: Fix the overflow warning
max_float_value = sys.float_info.max
var = np.var(y_true - y_pred)
var_y = np.clip(var_y, -max_float_value, max_float_value)
var = np.clip(var, -max_float_value, max_float_value)
if math.isclose(var_y, 0):
return np.nan
else:
return 1 - var / var_y
# Instead of this line, we use the above lines to avoid overflow warning
# return np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y
def update_learning_rate(
optimizer: th.optim.Optimizer, learning_rate: float
) -> None:
"""
Update the learning rate for a given optimizer.
Useful when doing linear schedule.
:param optimizer: Pytorch optimizer
:param learning_rate: New learning rate value
"""
for param_group in optimizer.param_groups:
param_group["lr"] = learning_rate
def get_schedule_fn(value_schedule: Union[Schedule, float]) -> Schedule:
"""
Transform (if needed) learning rate and clip range (for PPO)
to callable.
:param value_schedule: Constant value of schedule function
:return: Schedule function (can return constant value)
"""
# If the passed schedule is a float
# create a constant function
if isinstance(value_schedule, (float, int)):
# Cast to float to avoid errors
value_schedule = constant_fn(float(value_schedule))
else:
assert callable(value_schedule)
return value_schedule
def get_linear_fn(start: float, end: float, end_fraction: float) -> Schedule:
"""
Create a function that interpolates linearly between start and end
between ``progress_remaining`` = 1 and ``progress_remaining`` = ``end_fraction``.
This is used in DQN for linearly annealing the exploration fraction
(epsilon for the epsilon-greedy strategy).
:params start: value to start with if ``progress_remaining`` = 1
:params end: value to end with if ``progress_remaining`` = 0
:params end_fraction: fraction of ``progress_remaining``
where end is reached e.g 0.1 then end is reached after 10%
of the complete training process.
:return: Linear schedule function.
"""
def func(progress_remaining: float) -> float:
if (1 - progress_remaining) > end_fraction:
return end
else:
return (
start + (1 - progress_remaining) * (end - start) / end_fraction
)
return func
def constant_fn(val: float) -> Schedule:
"""
Create a function that returns a constant
It is useful for learning rate schedule (to avoid code duplication)
:param val: constant value
:return: Constant schedule function.
"""
def func(_):
return val
return func
def get_device(device: Union[th.device, str] = "auto") -> th.device:
"""
Retrieve PyTorch device.
It checks that the requested device is available first.
For now, it supports only cpu and cuda.
By default, it tries to use the gpu.
:param device: One for 'auto', 'cuda', 'cpu'
:return: Supported Pytorch device
"""
# Cuda by default
if device == "auto":
device = "cuda"
# Force conversion to th.device
device = th.device(device)
# Cuda not available
if device.type == th.device("cuda").type and not th.cuda.is_available():
return th.device("cpu")
return device
def get_latest_run_id(log_path: str = "", log_name: str = "") -> int:
"""
Returns the latest run number for the given log name and log path,
by finding the greatest number in the directories.
:param log_path: Path to the log folder containing several runs.
:param log_name: Name of the experiment. Each run is stored
in a folder named ``log_name_1``, ``log_name_2``, ...
:return: latest run number
"""
max_run_id = 0
for path in glob.glob(
os.path.join(log_path, f"{glob.escape(log_name)}_[0-9]*")
):
file_name = path.split(os.sep)[-1]
ext = file_name.split("_")[-1]
if (
log_name == "_".join(file_name.split("_")[:-1])
and ext.isdigit()
and int(ext) > max_run_id
):
max_run_id = int(ext)
return max_run_id
def configure_logger(
verbose: int = 0,
tensorboard_log: Optional[str] = None,
tb_log_name: str = "",
reset_num_timesteps: bool = True,
) -> Logger:
"""
Configure the logger's outputs.
:param verbose: Verbosity level: 0 for no output, 1 for the standard output to be part of the logger outputs
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param tb_log_name: tensorboard log
:param reset_num_timesteps: Whether the ``num_timesteps`` attribute is reset or not.
It allows to continue a previous learning curve (``reset_num_timesteps=False``)
or start from t=0 (``reset_num_timesteps=True``, the default).
:return: The logger object
"""
save_path, format_strings = None, ["stdout"]
if tensorboard_log is not None and SummaryWriter is None:
raise ImportError(
"Trying to log data to tensorboard but tensorboard is not installed."
)
if tensorboard_log is not None and SummaryWriter is not None:
latest_run_id = get_latest_run_id(tensorboard_log, tb_log_name)
if not reset_num_timesteps:
# Continue training in the same directory
latest_run_id -= 1
save_path = os.path.join(
tensorboard_log, f"{tb_log_name}_{latest_run_id + 1}"
)
if verbose >= 1:
format_strings = ["stdout", "tensorboard"]
else:
format_strings = ["tensorboard"]
elif verbose == 0:
format_strings = [""]
return configure(save_path, format_strings=format_strings)
def check_for_correct_spaces(
env: GymEnv, observation_space: spaces.Space, action_space: spaces.Space
) -> None:
"""
Checks that the environment has same spaces as provided ones. Used by BaseAlgorithm to check if
spaces match after loading the model with given env.
Checked parameters:
- observation_space
- action_space
:param env: Environment to check for valid spaces
:param observation_space: Observation space to check against
:param action_space: Action space to check against
"""
if observation_space != env.observation_space:
raise ValueError(
f"Observation spaces do not match: {observation_space} != {env.observation_space}"
)
if action_space != env.action_space:
raise ValueError(
f"Action spaces do not match: {action_space} != {env.action_space}"
)
def check_shape_equal(space1: spaces.Space, space2: spaces.Space) -> None:
"""
If the spaces are Box, check that they have the same shape.
If the spaces are Dict, it recursively checks the subspaces.
:param space1: Space
:param space2: Other space
"""
if isinstance(space1, spaces.Dict):
assert isinstance(
space2, spaces.Dict
), "spaces must be of the same type"
assert (
space1.spaces.keys() == space2.spaces.keys()
), "spaces must have the same keys"
for key in space1.spaces.keys():
check_shape_equal(space1.spaces[key], space2.spaces[key])
elif isinstance(space1, spaces.Box):
assert space1.shape == space2.shape, "spaces must have the same shape"
def is_vectorized_box_observation(
observation: np.ndarray, observation_space: spaces.Box
) -> bool:
"""
For box observation type, detects and validates the shape,
then returns whether or not the observation is vectorized.
:param observation: the input observation to validate
:param observation_space: the observation space
:return: whether the given observation is vectorized or not
"""
if observation.shape == observation_space.shape:
return False
elif observation.shape[1:] == observation_space.shape:
return True
else:
raise ValueError(
f"Error: Unexpected observation shape {observation.shape} for "
+ f"Box environment, please use {observation_space.shape} "
+ "or (n_env, {}) for the observation shape.".format(
", ".join(map(str, observation_space.shape))
)
)
def is_vectorized_discrete_observation(
observation: Union[int, np.ndarray], observation_space: spaces.Discrete
) -> bool:
"""
For discrete observation type, detects and validates the shape,
then returns whether or not the observation is vectorized.
:param observation: the input observation to validate
:param observation_space: the observation space
:return: whether the given observation is vectorized or not
"""
if (
isinstance(observation, int) or observation.shape == ()
): # A numpy array of a number, has shape empty tuple '()'
return False
elif len(observation.shape) == 1:
return True
else:
raise ValueError(
f"Error: Unexpected observation shape {observation.shape} for "
+ "Discrete environment, please use () or (n_env,) for the observation shape."
)
def is_vectorized_multidiscrete_observation(
observation: np.ndarray, observation_space: spaces.MultiDiscrete
) -> bool:
"""
For multidiscrete observation type, detects and validates the shape,
then returns whether or not the observation is vectorized.
:param observation: the input observation to validate
:param observation_space: the observation space
:return: whether the given observation is vectorized or not
"""
if observation.shape == (len(observation_space.nvec),):
return False
elif len(observation.shape) == 2 and observation.shape[1] == len(
observation_space.nvec
):
return True
else:
raise ValueError(
f"Error: Unexpected observation shape {observation.shape} for MultiDiscrete "
+ f"environment, please use ({len(observation_space.nvec)},) or "
+ f"(n_env, {len(observation_space.nvec)}) for the observation shape."
)
def is_vectorized_multibinary_observation(
observation: np.ndarray, observation_space: spaces.MultiBinary
) -> bool:
"""
For multibinary observation type, detects and validates the shape,
then returns whether or not the observation is vectorized.
:param observation: the input observation to validate
:param observation_space: the observation space
:return: whether the given observation is vectorized or not
"""
if observation.shape == observation_space.shape:
return False
elif (
len(observation.shape) == len(observation_space.shape) + 1
and observation.shape[1:] == observation_space.shape
):
return True
else:
raise ValueError(
f"Error: Unexpected observation shape {observation.shape} for MultiBinary "
+ f"environment, please use {observation_space.shape} or "
+ f"(n_env, {observation_space.n}) for the observation shape."
)
def is_vectorized_dict_observation(
observation: np.ndarray, observation_space: spaces.Dict
) -> bool:
"""
For dict observation type, detects and validates the shape,
then returns whether or not the observation is vectorized.
:param observation: the input observation to validate
:param observation_space: the observation space
:return: whether the given observation is vectorized or not
"""
# We first assume that all observations are not vectorized
all_non_vectorized = True
for key, subspace in observation_space.spaces.items():
# This fails when the observation is not vectorized
# or when it has the wrong shape
if observation[key].shape != subspace.shape:
all_non_vectorized = False
break
if all_non_vectorized:
return False
all_vectorized = True
# Now we check that all observation are vectorized and have the correct shape
for key, subspace in observation_space.spaces.items():
if observation[key].shape[1:] != subspace.shape:
all_vectorized = False
break
if all_vectorized:
return True
else:
# Retrieve error message
error_msg = ""
try:
is_vectorized_observation(
observation[key], observation_space.spaces[key]
)
except ValueError as e:
error_msg = f"{e}"
raise ValueError(
f"There seems to be a mix of vectorized and non-vectorized observations. "
f"Unexpected observation shape {observation[key].shape} for key {key} "
f"of type {observation_space.spaces[key]}. {error_msg}"
)
def is_vectorized_observation(
observation: Union[int, np.ndarray], observation_space: spaces.Space
) -> bool:
"""
For every observation type, detects and validates the shape,
then returns whether or not the observation is vectorized.
:param observation: the input observation to validate
:param observation_space: the observation space
:return: whether the given observation is vectorized or not
"""
is_vec_obs_func_dict = {
spaces.Box: is_vectorized_box_observation,
spaces.Discrete: is_vectorized_discrete_observation,
spaces.MultiDiscrete: is_vectorized_multidiscrete_observation,
spaces.MultiBinary: is_vectorized_multibinary_observation,
spaces.Dict: is_vectorized_dict_observation,
}
for space_type, is_vec_obs_func in is_vec_obs_func_dict.items():
if isinstance(observation_space, space_type):
return is_vec_obs_func(observation, observation_space) # type: ignore[operator]
else:
# for-else happens if no break is called
raise ValueError(
f"Error: Cannot determine if the observation is vectorized with the space type {observation_space}."
)
def safe_mean(arr: Union[np.ndarray, list, deque]) -> float:
"""
Compute the mean of an array if there is at least one element.
For empty array, return NaN. It is used for logging only.
:param arr: Numpy array or list of values
:return:
"""
return np.nan if len(arr) == 0 else float(np.mean(arr)) # type: ignore[arg-type]
def get_parameters_by_name(
model: th.nn.Module, included_names: Iterable[str]
) -> List[th.Tensor]:
"""
Extract parameters from the state dict of ``model``
if the name contains one of the strings in ``included_names``.
:param model: the model where the parameters come from.
:param included_names: substrings of names to include.
:return: List of parameters values (Pytorch tensors)
that matches the queried names.
"""
return [
param
for name, param in model.state_dict().items()
if any([key in name for key in included_names])
]
def zip_strict(*iterables: Iterable) -> Iterable:
r"""
``zip()`` function but enforces that iterables are of equal length.
Raises ``ValueError`` if iterables not of equal length.
Code inspired by Stackoverflow answer for question #32954486.
:param \*iterables: iterables to ``zip()``
"""
# As in Stackoverflow #32954486, use
# new object for "empty" in case we have
# Nones in iterable.
sentinel = object()
for combo in zip_longest(*iterables, fillvalue=sentinel):
if sentinel in combo:
raise ValueError("Iterables have different lengths")
yield combo
def polyak_update(
params: Iterable[th.Tensor],
target_params: Iterable[th.Tensor],
tau: float,
) -> None:
"""
Perform a Polyak average update on ``target_params`` using ``params``:
target parameters are slowly updated towards the main parameters.
``tau``, the soft update coefficient controls the interpolation:
``tau=1`` corresponds to copying the parameters to the target ones whereas nothing happens when ``tau=0``.
The Polyak update is done in place, with ``no_grad``, and therefore does not create intermediate tensors,
or a computation graph, reducing memory cost and improving performance. We scale the target params
by ``1-tau`` (in-place), add the new weights, scaled by ``tau`` and store the result of the sum in the target
params (in place).
See https://github.com/DLR-RM/stable-baselines3/issues/93
:param params: parameters to use to update the target params
:param target_params: parameters to update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
"""
with th.no_grad():
# zip does not raise an exception if length of parameters does not match.
for param, target_param in zip_strict(params, target_params):
target_param.data.mul_(1 - tau)
th.add(
target_param.data, param.data, alpha=tau, out=target_param.data
)
def obs_as_tensor(
obs: Union[np.ndarray, Dict[str, np.ndarray]], device: th.device
) -> Union[th.Tensor, TensorDict]:
"""
Moves the observation to the given device.
:param obs:
:param device: PyTorch device
:return: PyTorch tensor of the observation on a desired device.
"""
if isinstance(obs, np.ndarray):
return th.as_tensor(obs, device=device)
elif isinstance(obs, dict):
return {
key: th.as_tensor(_obs, device=device)
for (key, _obs) in obs.items()
}
else:
raise Exception(f"Unrecognized type of observation {type(obs)}")
def should_collect_more_steps(
train_freq: TrainFreq,
num_collected_steps: int,
num_collected_episodes: int,
) -> bool:
"""
Helper used in ``collect_rollouts()`` of off-policy algorithms
to determine the termination condition.
:param train_freq: How much experience should be collected before updating the policy.
:param num_collected_steps: The number of already collected steps.
:param num_collected_episodes: The number of already collected episodes.
:return: Whether to continue or not collecting experience
by doing rollouts of the current policy.
"""
if train_freq.unit == TrainFrequencyUnit.STEP:
return num_collected_steps < train_freq.frequency
elif train_freq.unit == TrainFrequencyUnit.EPISODE:
return num_collected_episodes < train_freq.frequency
else:
raise ValueError(
"The unit of the `train_freq` must be either TrainFrequencyUnit.STEP "
f"or TrainFrequencyUnit.EPISODE not '{train_freq.unit}'!"
)
def get_system_info(print_info: bool = True) -> Tuple[Dict[str, str], str]:
"""
Retrieve system and python env info for the current system.
:param print_info: Whether to print or not those infos
:return: Dictionary summing up the version for each relevant package
and a formatted string.
"""
env_info = {
# In OS, a regex is used to add a space between a "#" and a number to avoid
# wrongly linking to another issue on GitHub. Example: turn "#42" to "# 42".
"OS": re.sub(
r"#(\d)", r"# \1", f"{platform.platform()} {platform.version()}"
),
"Python": platform.python_version(),
"Stable-Baselines3": sb3.__version__,
"PyTorch": th.__version__,
"GPU Enabled": str(th.cuda.is_available()),
"Numpy": np.__version__,
"Cloudpickle": cloudpickle.__version__,
"Gymnasium": gym.__version__,
}
try:
import gym as openai_gym
env_info.update({"OpenAI Gym": openai_gym.__version__})
except ImportError:
pass
env_info_str = ""
for key, value in env_info.items():
env_info_str += f"- {key}: {value}\n"
if print_info:
print(env_info_str)
return env_info, env_info_str
| 21,859 | Python | .py | 536 | 34.294776 | 113 | 0.670875 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,072 | imitation_adversarial_common.py | JurajZelman_airl-market-making/package_modifications/imitation_adversarial_common.py | """Core code for adversarial imitation learning, shared between GAIL and AIRL."""
import abc
import dataclasses
import logging
import os # TODO:
from datetime import datetime # TODO:
from typing import (
Callable,
Iterable,
Iterator,
Mapping,
Optional,
Type,
overload,
)
import numpy as np
import torch as th
import torch.utils.tensorboard as thboard
import tqdm
from imitation.algorithms import base
from imitation.data import buffer, rollout, types, wrappers
from imitation.rewards import reward_nets, reward_wrapper
from imitation.util import logger, networks, util
from stable_baselines3.common import (
base_class,
distributions,
on_policy_algorithm,
policies,
vec_env,
)
from stable_baselines3.common.evaluation import evaluate_policy # TODO:
from stable_baselines3.sac import policies as sac_policies
from torch.nn import functional as F
from rl.utils import save_model # TODO:
# TODO: Define a random number generator
RNG = np.random.default_rng(1)
def compute_train_stats(
disc_logits_expert_is_high: th.Tensor,
labels_expert_is_one: th.Tensor,
disc_loss: th.Tensor,
) -> Mapping[str, float]:
"""Train statistics for GAIL/AIRL discriminator.
Args:
disc_logits_expert_is_high: discriminator logits produced by
`AdversarialTrainer.logits_expert_is_high`.
labels_expert_is_one: integer labels describing whether logit was for an
expert (0) or generator (1) sample.
disc_loss: final discriminator loss.
Returns:
A mapping from statistic names to float values.
"""
with th.no_grad():
# Logits of the discriminator output; >0 for expert samples, <0 for generator.
bin_is_generated_pred = disc_logits_expert_is_high < 0
# Binary label, so 1 is for expert, 0 is for generator.
bin_is_generated_true = labels_expert_is_one == 0
bin_is_expert_true = th.logical_not(bin_is_generated_true)
int_is_generated_pred = bin_is_generated_pred.long()
int_is_generated_true = bin_is_generated_true.long()
n_generated = float(th.sum(int_is_generated_true))
n_labels = float(len(labels_expert_is_one))
n_expert = n_labels - n_generated
pct_expert = (
n_expert / float(n_labels) if n_labels > 0 else float("NaN")
)
n_expert_pred = int(n_labels - th.sum(int_is_generated_pred))
if n_labels > 0:
pct_expert_pred = n_expert_pred / float(n_labels)
else:
pct_expert_pred = float("NaN")
correct_vec = th.eq(bin_is_generated_pred, bin_is_generated_true)
acc = th.mean(correct_vec.float())
_n_pred_expert = th.sum(th.logical_and(bin_is_expert_true, correct_vec))
if n_expert < 1:
expert_acc = float("NaN")
else:
# float() is defensive, since we cannot divide Torch tensors by
# Python ints
expert_acc = _n_pred_expert.item() / float(n_expert)
_n_pred_gen = th.sum(th.logical_and(bin_is_generated_true, correct_vec))
_n_gen_or_1 = max(1, n_generated)
generated_acc = _n_pred_gen / float(_n_gen_or_1)
label_dist = th.distributions.Bernoulli(
logits=disc_logits_expert_is_high
)
entropy = th.mean(label_dist.entropy())
return {
"disc_loss": float(th.mean(disc_loss)),
"disc_acc": float(acc),
"disc_acc_expert": float(
expert_acc
), # accuracy on just expert examples
"disc_acc_gen": float(
generated_acc
), # accuracy on just generated examples
# entropy of the predicted label distribution, averaged equally across
# both classes (if this drops then disc is very good or has given up)
"disc_entropy": float(entropy),
# true number of expert demos and predicted number of expert demos
"disc_proportion_expert_true": float(pct_expert),
"disc_proportion_expert_pred": float(pct_expert_pred),
"n_expert": float(n_expert),
"n_generated": float(n_generated),
}
class AdversarialTrainer(base.DemonstrationAlgorithm[types.Transitions]):
"""Base class for adversarial imitation learning algorithms like GAIL and AIRL."""
venv: vec_env.VecEnv
"""The original vectorized environment."""
venv_train: vec_env.VecEnv
"""Like `self.venv`, but wrapped with train reward unless in debug mode.
If `debug_use_ground_truth=True` was passed into the initializer then
`self.venv_train` is the same as `self.venv`."""
_demo_data_loader: Optional[Iterable[types.TransitionMapping]]
_endless_expert_iterator: Optional[Iterator[types.TransitionMapping]]
venv_wrapped: vec_env.VecEnvWrapper
def __init__(
self,
*,
demonstrations: base.AnyTransitions,
demo_batch_size: int,
venv: vec_env.VecEnv,
gen_algo: base_class.BaseAlgorithm,
reward_net: reward_nets.RewardNet,
demo_minibatch_size: Optional[int] = None,
n_disc_updates_per_round: int = 2,
log_dir: types.AnyPath = "output/",
disc_opt_cls: Type[th.optim.Optimizer] = th.optim.Adam,
disc_opt_kwargs: Optional[Mapping] = None,
gen_train_timesteps: Optional[int] = None,
gen_replay_buffer_capacity: Optional[int] = None,
custom_logger: Optional[logger.HierarchicalLogger] = None,
init_tensorboard: bool = False,
init_tensorboard_graph: bool = False,
debug_use_ground_truth: bool = False,
allow_variable_horizon: bool = False,
):
"""Builds AdversarialTrainer.
Args:
demonstrations: Demonstrations from an expert (optional). Transitions
expressed directly as a `types.TransitionsMinimal` object, a sequence
of trajectories, or an iterable of transition batches (mappings from
keywords to arrays containing observations, etc).
demo_batch_size: The number of samples in each batch of expert data. The
discriminator batch size is twice this number because each discriminator
batch contains a generator sample for every expert sample.
venv: The vectorized environment to train in.
gen_algo: The generator RL algorithm that is trained to maximize
discriminator confusion. Environment and logger will be set to
`venv` and `custom_logger`.
reward_net: a Torch module that takes an observation, action and
next observation tensors as input and computes a reward signal.
demo_minibatch_size: size of minibatch to calculate gradients over.
The gradients are accumulated until the entire batch is
processed before making an optimization step. This is
useful in GPU training to reduce memory usage, since
fewer examples are loaded into memory at once,
facilitating training with larger batch sizes, but is
generally slower. Must be a factor of `demo_batch_size`.
Optional, defaults to `demo_batch_size`.
n_disc_updates_per_round: The number of discriminator updates after each
round of generator updates in AdversarialTrainer.learn().
log_dir: Directory to store TensorBoard logs, plots, etc. in.
disc_opt_cls: The optimizer for discriminator training.
disc_opt_kwargs: Parameters for discriminator training.
gen_train_timesteps: The number of steps to train the generator policy for
each iteration. If None, then defaults to the batch size (for on-policy)
or number of environments (for off-policy).
gen_replay_buffer_capacity: The capacity of the
generator replay buffer (the number of obs-action-obs samples from
the generator that can be stored). By default this is equal to
`gen_train_timesteps`, meaning that we sample only from the most
recent batch of generator samples.
custom_logger: Where to log to; if None (default), creates a new logger.
init_tensorboard: If True, makes various discriminator
TensorBoard summaries.
init_tensorboard_graph: If both this and `init_tensorboard` are True,
then write a Tensorboard graph summary to disk.
debug_use_ground_truth: If True, use the ground truth reward for
`self.train_env`.
This disables the reward wrapping that would normally replace
the environment reward with the learned reward. This is useful for
sanity checking that the policy training is functional.
allow_variable_horizon: If False (default), algorithm will raise an
exception if it detects trajectories of different length during
training. If True, overrides this safety check. WARNING: variable
horizon episodes leak information about the reward via termination
condition, and can seriously confound evaluation. Read
https://imitation.readthedocs.io/en/latest/guide/variable_horizon.html
before overriding this.
Raises:
ValueError: if the batch size is not a multiple of the minibatch size.
"""
self.demo_batch_size = demo_batch_size
self.demo_minibatch_size = demo_minibatch_size or demo_batch_size
if self.demo_batch_size % self.demo_minibatch_size != 0:
raise ValueError("Batch size must be a multiple of minibatch size.")
self._demo_data_loader = None
self._endless_expert_iterator = None
super().__init__(
demonstrations=demonstrations,
custom_logger=custom_logger,
allow_variable_horizon=allow_variable_horizon,
)
self._global_step = 0
self._disc_step = 0
self.n_disc_updates_per_round = n_disc_updates_per_round
self.debug_use_ground_truth = debug_use_ground_truth
self.venv = venv
self.gen_algo = gen_algo
self._reward_net = reward_net.to(gen_algo.device)
self._log_dir = util.parse_path(log_dir)
# Create graph for optimising/recording stats on discriminator
self._disc_opt_cls = disc_opt_cls
self._disc_opt_kwargs = disc_opt_kwargs or {}
self._init_tensorboard = init_tensorboard
self._init_tensorboard_graph = init_tensorboard_graph
self._disc_opt = self._disc_opt_cls(
self._reward_net.parameters(),
**self._disc_opt_kwargs,
)
if self._init_tensorboard:
logging.info(f"building summary directory at {self._log_dir}")
summary_dir = self._log_dir / "summary"
summary_dir.mkdir(parents=True, exist_ok=True)
self._summary_writer = thboard.SummaryWriter(str(summary_dir))
self.venv_buffering = wrappers.BufferingWrapper(self.venv)
if debug_use_ground_truth:
# Would use an identity reward fn here, but RewardFns can't see rewards.
self.venv_wrapped = self.venv_buffering
self.gen_callback = None
else:
self.venv_wrapped = reward_wrapper.RewardVecEnvWrapper(
self.venv_buffering,
reward_fn=self.reward_train.predict_processed,
)
self.gen_callback = self.venv_wrapped.make_log_callback()
self.venv_train = self.venv_wrapped
self.gen_algo.set_env(self.venv_train)
self.gen_algo.set_logger(self.logger)
if gen_train_timesteps is None:
gen_algo_env = self.gen_algo.get_env()
assert gen_algo_env is not None
self.gen_train_timesteps = gen_algo_env.num_envs
if isinstance(self.gen_algo, on_policy_algorithm.OnPolicyAlgorithm):
self.gen_train_timesteps *= self.gen_algo.n_steps
else:
self.gen_train_timesteps = gen_train_timesteps
if gen_replay_buffer_capacity is None:
gen_replay_buffer_capacity = self.gen_train_timesteps
self._gen_replay_buffer = buffer.ReplayBuffer(
gen_replay_buffer_capacity,
self.venv,
)
# TODO: Model saving
self.ts_now = datetime.now().strftime(
"%Y-%m-%d_%H-%M-%S"
) # Ts for model saving
self.save_path = os.path.join(os.getcwd(), "models")
self.highest_reward = -np.inf
@property
def policy(self) -> policies.BasePolicy:
policy = self.gen_algo.policy
assert policy is not None
return policy
@abc.abstractmethod
def logits_expert_is_high(
self,
state: th.Tensor,
action: th.Tensor,
next_state: th.Tensor,
done: th.Tensor,
log_policy_act_prob: Optional[th.Tensor] = None,
) -> th.Tensor:
"""Compute the discriminator's logits for each state-action sample.
A high value corresponds to predicting expert, and a low value corresponds to
predicting generator.
Args:
state: state at time t, of shape `(batch_size,) + state_shape`.
action: action taken at time t, of shape `(batch_size,) + action_shape`.
next_state: state at time t+1, of shape `(batch_size,) + state_shape`.
done: binary episode completion flag after action at time t,
of shape `(batch_size,)`.
log_policy_act_prob: log probability of generator policy taking
`action` at time t.
Returns:
Discriminator logits of shape `(batch_size,)`. A high output indicates an
expert-like transition.
""" # noqa: DAR202
@property
@abc.abstractmethod
def reward_train(self) -> reward_nets.RewardNet:
"""Reward used to train generator policy."""
@property
@abc.abstractmethod
def reward_test(self) -> reward_nets.RewardNet:
"""Reward used to train policy at "test" time after adversarial training."""
def set_demonstrations(self, demonstrations: base.AnyTransitions) -> None:
self._demo_data_loader = base.make_data_loader(
demonstrations,
self.demo_batch_size,
)
self._endless_expert_iterator = util.endless_iter(
self._demo_data_loader
)
def _next_expert_batch(self) -> Mapping:
assert self._endless_expert_iterator is not None
return next(self._endless_expert_iterator)
def train_disc(
self,
*,
expert_samples: Optional[Mapping] = None,
gen_samples: Optional[Mapping] = None,
) -> Mapping[str, float]:
"""Perform a single discriminator update, optionally using provided samples.
Args:
expert_samples: Transition samples from the expert in dictionary form.
If provided, must contain keys corresponding to every field of the
`Transitions` dataclass except "infos". All corresponding values can be
either NumPy arrays or Tensors. Extra keys are ignored. Must contain
`self.demo_batch_size` samples. If this argument is not provided, then
`self.demo_batch_size` expert samples from `self.demo_data_loader` are
used by default.
gen_samples: Transition samples from the generator policy in same dictionary
form as `expert_samples`. If provided, must contain exactly
`self.demo_batch_size` samples. If not provided, then take
`len(expert_samples)` samples from the generator replay buffer.
Returns:
Statistics for discriminator (e.g. loss, accuracy).
"""
with self.logger.accumulate_means("disc"):
# optionally write TB summaries for collected ops
write_summaries = (
self._init_tensorboard and self._global_step % 20 == 0
)
# compute loss
self._disc_opt.zero_grad()
batch_iter = self._make_disc_train_batches(
gen_samples=gen_samples,
expert_samples=expert_samples,
)
for batch in batch_iter:
disc_logits = self.logits_expert_is_high(
batch["state"],
batch["action"],
batch["next_state"],
batch["done"],
batch["log_policy_act_prob"],
)
loss = F.binary_cross_entropy_with_logits(
disc_logits,
batch["labels_expert_is_one"].float(),
)
# Renormalise the loss to be averaged over the whole
# batch size instead of the minibatch size.
assert len(batch["state"]) == 2 * self.demo_minibatch_size
loss *= self.demo_minibatch_size / self.demo_batch_size
loss.backward()
# TODO: Update the discriminator
self._disc_opt.step()
self._disc_opt.zero_grad()
# do gradient step
# self._disc_opt.step()
self._disc_step += 1
# compute/write stats and TensorBoard data
with th.no_grad():
train_stats = compute_train_stats(
disc_logits,
batch["labels_expert_is_one"],
loss,
)
self.logger.record("global_step", self._global_step)
for k, v in train_stats.items():
self.logger.record(k, v)
self.logger.dump(self._disc_step)
if write_summaries:
self._summary_writer.add_histogram(
"disc_logits", disc_logits.detach()
)
return train_stats
def train_gen(
self,
total_timesteps: Optional[int] = None,
learn_kwargs: Optional[Mapping] = None,
) -> None:
"""Trains the generator to maximize the discriminator loss.
After the end of training populates the generator replay buffer (used in
discriminator training) with `self.disc_batch_size` transitions.
Args:
total_timesteps: The number of transitions to sample from
`self.venv_train` during training. By default,
`self.gen_train_timesteps`.
learn_kwargs: kwargs for the Stable Baselines `RLModel.learn()`
method.
"""
if total_timesteps is None:
total_timesteps = self.gen_train_timesteps
if learn_kwargs is None:
learn_kwargs = {}
with self.logger.accumulate_means("gen"):
self.gen_algo.learn(
total_timesteps=total_timesteps,
reset_num_timesteps=False,
callback=self.gen_callback,
**learn_kwargs,
)
self._global_step += 1
gen_trajs, ep_lens = self.venv_buffering.pop_trajectories()
self._check_fixed_horizon(ep_lens)
gen_samples = rollout.flatten_trajectories_with_rew(gen_trajs)
self._gen_replay_buffer.store(gen_samples)
def train(
self,
total_timesteps: int,
callback: Optional[Callable[[int], None]] = None,
) -> None:
"""Alternates between training the generator and discriminator.
Every "round" consists of a call to `train_gen(self.gen_train_timesteps)`,
a call to `train_disc`, and finally a call to `callback(round)`.
Training ends once an additional "round" would cause the number of transitions
sampled from the environment to exceed `total_timesteps`.
Args:
total_timesteps: An upper bound on the number of transitions to sample
from the environment during training.
callback: A function called at the end of every round which takes in a
single argument, the round number. Round numbers are in
`range(total_timesteps // self.gen_train_timesteps)`.
"""
n_rounds = total_timesteps // self.gen_train_timesteps
assert n_rounds >= 1, (
"No updates (need at least "
f"{self.gen_train_timesteps} timesteps, have only "
f"total_timesteps={total_timesteps})!"
)
for r in tqdm.tqdm(range(0, n_rounds), desc="round"):
# TODO: Turn on reward scaling
self._reward_net.scale = True
self.train_gen(self.gen_train_timesteps)
# TODO: Turn off reward scaling
self._reward_net.scale = False
for _ in range(self.n_disc_updates_per_round):
with networks.training(self.reward_train):
# switch to training mode (affects dropout, normalization)
self.train_disc()
if callback:
callback(r)
self.logger.dump(self._global_step)
# Evaluate the policy after training
n_episodes = 3
learner_rewards_after_training, _ = evaluate_policy(
self.gen_algo,
self.venv,
n_episodes,
return_episode_rewards=True,
)
reward_mean = np.mean(learner_rewards_after_training)
reward_std = np.std(learner_rewards_after_training)
# TODO: Save the model
if r % 10 == 0:
stats = self.logger._logger.stats
ts_partial = f"{self.ts_now}_{r}"
# print(f"Saving the model with timestamp: {ts_partial}")
save_model(
self.gen_algo,
self._reward_net,
stats,
self.save_path,
ts_partial,
)
print(f"Reward mean: {reward_mean}")
# Save the model if the reward is the highest so far
if (
reward_mean - (0.95 * reward_std / n_episodes)
>= self.highest_reward
):
ts_highest = f"{self.ts_now}_best"
self.highest_reward = reward_mean - (
0.95 * reward_std / n_episodes
)
print(f" New highest reward: {reward_mean}. Saving the model.")
save_model(
self.gen_algo,
self._reward_net,
stats,
self.save_path,
ts_highest,
)
@overload
def _torchify_array(self, ndarray: np.ndarray) -> th.Tensor:
...
@overload
def _torchify_array(self, ndarray: None) -> None:
...
def _torchify_array(
self, ndarray: Optional[np.ndarray]
) -> Optional[th.Tensor]:
if ndarray is not None:
return th.as_tensor(ndarray, device=self.reward_train.device)
return None
def _get_log_policy_act_prob(
self,
obs_th: th.Tensor,
acts_th: th.Tensor,
) -> Optional[th.Tensor]:
"""Evaluates the given actions on the given observations.
Args:
obs_th: A batch of observations.
acts_th: A batch of actions.
Returns:
A batch of log policy action probabilities.
"""
if isinstance(self.policy, policies.ActorCriticPolicy):
# policies.ActorCriticPolicy has a concrete implementation of
# evaluate_actions to generate log_policy_act_prob given obs and actions.
_, log_policy_act_prob_th, _ = self.policy.evaluate_actions(
obs_th,
acts_th,
)
elif isinstance(self.policy, sac_policies.SACPolicy):
gen_algo_actor = self.policy.actor
assert gen_algo_actor is not None
# generate log_policy_act_prob from SAC actor.
mean_actions, log_std, _ = gen_algo_actor.get_action_dist_params(
obs_th
)
assert isinstance(
gen_algo_actor.action_dist,
distributions.SquashedDiagGaussianDistribution,
) # Note: this is just a hint to mypy
distribution = gen_algo_actor.action_dist.proba_distribution(
mean_actions,
log_std,
)
# SAC applies a squashing function to bound the actions to a finite range
# `acts_th` need to be scaled accordingly before computing log prob.
# Scale actions only if the policy squashes outputs.
assert self.policy.squash_output
scaled_acts = self.policy.scale_action(acts_th.numpy(force=True))
scaled_acts_th = th.as_tensor(
scaled_acts, device=mean_actions.device
)
log_policy_act_prob_th = distribution.log_prob(scaled_acts_th)
else:
return None
return log_policy_act_prob_th
def _make_disc_train_batches(
self,
*,
gen_samples: Optional[Mapping] = None,
expert_samples: Optional[Mapping] = None,
) -> Iterator[Mapping[str, th.Tensor]]:
"""Build and return training minibatches for the next discriminator update.
Args:
gen_samples: Same as in `train_disc`.
expert_samples: Same as in `train_disc`.
Yields:
The training minibatch: state, action, next state, dones, labels
and policy log-probabilities.
Raises:
RuntimeError: Empty generator replay buffer.
ValueError: `gen_samples` or `expert_samples` batch size is
different from `self.demo_batch_size`.
"""
batch_size = self.demo_batch_size
if expert_samples is None:
expert_samples = self._next_expert_batch()
if gen_samples is None:
if self._gen_replay_buffer.size() == 0:
raise RuntimeError(
"No generator samples for training. "
"Call `train_gen()` first.",
)
gen_samples_dataclass = self._gen_replay_buffer.sample(batch_size)
gen_samples = types.dataclass_quick_asdict(gen_samples_dataclass)
if not (
len(gen_samples["obs"]) == len(expert_samples["obs"]) == batch_size
):
raise ValueError(
"Need to have exactly `demo_batch_size` number of expert and "
"generator samples, each. "
f"(n_gen={len(gen_samples['obs'])} "
f"n_expert={len(expert_samples['obs'])} "
f"demo_batch_size={batch_size})",
)
# Guarantee that Mapping arguments are in mutable form.
expert_samples = dict(expert_samples)
gen_samples = dict(gen_samples)
# TODO: Balance expert samples
acts = expert_samples["acts"]
obs = expert_samples["obs"]
next_obs = expert_samples["next_obs"]
dones = expert_samples["dones"]
infos = expert_samples["infos"]
if len(np.unique(acts)) > 1:
# Get the indices of the actions
id_12 = np.where(acts == 12)[0].tolist()
id_20 = np.where(acts == 20)[0].tolist()
# Compute the number of times the list needs to be repeated
a = int(np.ceil(len(acts) / (2 * len(id_12))))
b = int(np.ceil(len(acts) / (2 * len(id_20))))
# Sample the indices from the original list
id_12 = np.tile(id_12, a).tolist()[: int(len(acts) / 2)]
id_20 = np.tile(id_20, b).tolist()[: int(len(acts) / 2)]
# Join the indices and shuffle them randomly
joint_id = id_12 + id_20
joint_id = RNG.permutation(joint_id)
# Replace the samples
expert_samples["acts"] = acts[joint_id]
expert_samples["obs"] = obs[joint_id]
expert_samples["next_obs"] = next_obs[joint_id]
expert_samples["dones"] = dones[joint_id]
expert_samples["infos"] = infos
# Convert applicable Tensor values to NumPy.
for field in dataclasses.fields(types.Transitions):
k = field.name
if k == "infos":
continue
for d in [gen_samples, expert_samples]:
if isinstance(d[k], th.Tensor):
d[k] = d[k].detach().numpy()
assert isinstance(gen_samples["obs"], np.ndarray)
assert isinstance(expert_samples["obs"], np.ndarray)
# Check dimensions.
assert batch_size == len(expert_samples["acts"])
assert batch_size == len(expert_samples["next_obs"])
assert batch_size == len(gen_samples["acts"])
assert batch_size == len(gen_samples["next_obs"])
for start in range(0, batch_size, self.demo_minibatch_size):
end = start + self.demo_minibatch_size
# take minibatch slice (this creates views so no memory issues)
expert_batch = {k: v[start:end] for k, v in expert_samples.items()}
gen_batch = {k: v[start:end] for k, v in gen_samples.items()}
# Concatenate rollouts, and label each row as expert or generator.
obs = np.concatenate([expert_batch["obs"], gen_batch["obs"]])
acts = np.concatenate([expert_batch["acts"], gen_batch["acts"]])
next_obs = np.concatenate(
[expert_batch["next_obs"], gen_batch["next_obs"]]
)
dones = np.concatenate([expert_batch["dones"], gen_batch["dones"]])
# notice that the labels use the convention that expert samples are
# labelled with 1 and generator samples with 0.
labels_expert_is_one = np.concatenate(
[
np.ones(self.demo_minibatch_size, dtype=int),
np.zeros(self.demo_minibatch_size, dtype=int),
],
)
# Calculate generator-policy log probabilities.
with th.no_grad():
obs_th = th.as_tensor(obs, device=self.gen_algo.device)
acts_th = th.as_tensor(acts, device=self.gen_algo.device)
log_policy_act_prob = self._get_log_policy_act_prob(
obs_th, acts_th
)
if log_policy_act_prob is not None:
assert (
len(log_policy_act_prob) == 2 * self.demo_minibatch_size
)
log_policy_act_prob = log_policy_act_prob.reshape(
(2 * self.demo_minibatch_size,),
)
del obs_th, acts_th # unneeded
(
obs_th,
acts_th,
next_obs_th,
dones_th,
) = self.reward_train.preprocess(
obs,
acts,
next_obs,
dones,
)
batch_dict = {
"state": obs_th,
"action": acts_th,
"next_state": next_obs_th,
"done": dones_th,
"labels_expert_is_one": self._torchify_array(
labels_expert_is_one
),
"log_policy_act_prob": log_policy_act_prob,
}
yield batch_dict
| 31,672 | Python | .py | 683 | 34.458272 | 88 | 0.592038 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,073 | sb3_logger.py | JurajZelman_airl-market-making/package_modifications/sb3_logger.py | import datetime
import json
import os
import sys
import tempfile
import warnings
from collections import defaultdict
from io import TextIOBase
from typing import (
Any,
Dict,
List,
Mapping,
Optional,
Sequence,
TextIO,
Tuple,
Union,
)
import matplotlib.figure
import numpy as np
import pandas
import torch as th
try:
from torch.utils.tensorboard import SummaryWriter
from torch.utils.tensorboard.summary import hparams
except ImportError:
SummaryWriter = None # type: ignore[misc, assignment]
try:
from tqdm import tqdm
except ImportError:
tqdm = None
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class Video:
"""
Video data class storing the video frames and the frame per seconds
:param frames: frames to create the video from
:param fps: frames per second
"""
def __init__(self, frames: th.Tensor, fps: float):
self.frames = frames
self.fps = fps
class Figure:
"""
Figure data class storing a matplotlib figure and whether to close the figure after logging it
:param figure: figure to log
:param close: if true, close the figure after logging it
"""
def __init__(self, figure: matplotlib.figure.Figure, close: bool):
self.figure = figure
self.close = close
class Image:
"""
Image data class storing an image and data format
:param image: image to log
:param dataformats: Image data format specification of the form NCHW, NHWC, CHW, HWC, HW, WH, etc.
More info in add_image method doc at https://pytorch.org/docs/stable/tensorboard.html
Gym envs normally use 'HWC' (channel last)
"""
def __init__(
self, image: Union[th.Tensor, np.ndarray, str], dataformats: str
):
self.image = image
self.dataformats = dataformats
class HParam:
"""
Hyperparameter data class storing hyperparameters and metrics in dictionaries
:param hparam_dict: key-value pairs of hyperparameters to log
:param metric_dict: key-value pairs of metrics to log
A non-empty metrics dict is required to display hyperparameters in the corresponding Tensorboard section.
"""
def __init__(
self,
hparam_dict: Mapping[str, Union[bool, str, float, None]],
metric_dict: Mapping[str, float],
):
self.hparam_dict = hparam_dict
if not metric_dict:
raise Exception(
"`metric_dict` must not be empty to display hyperparameters to the HPARAMS tensorboard tab."
)
self.metric_dict = metric_dict
class FormatUnsupportedError(NotImplementedError):
"""
Custom error to display informative message when
a value is not supported by some formats.
:param unsupported_formats: A sequence of unsupported formats,
for instance ``["stdout"]``.
:param value_description: Description of the value that cannot be logged by this format.
"""
def __init__(
self, unsupported_formats: Sequence[str], value_description: str
):
if len(unsupported_formats) > 1:
format_str = f"formats {', '.join(unsupported_formats)} are"
else:
format_str = f"format {unsupported_formats[0]} is"
super().__init__(
f"The {format_str} not supported for the {value_description} value logged.\n"
f"You can exclude formats via the `exclude` parameter of the logger's `record` function."
)
class KVWriter:
"""
Key Value writer
"""
def write(
self,
key_values: Dict[str, Any],
key_excluded: Dict[str, Tuple[str, ...]],
step: int = 0,
) -> None:
"""
Write a dictionary to file
:param key_values:
:param key_excluded:
:param step:
"""
raise NotImplementedError
def close(self) -> None:
"""
Close owned resources
"""
raise NotImplementedError
class SeqWriter:
"""
sequence writer
"""
def write_sequence(self, sequence: List[str]) -> None:
"""
write_sequence an array to file
:param sequence:
"""
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
"""A human-readable output format producing ASCII tables of key-value pairs.
Set attribute ``max_length`` to change the maximum length of keys and values
to write to output (or specify it when calling ``__init__``).
:param filename_or_file: the file to write the log to
:param max_length: the maximum length of keys and values to write to output.
Outputs longer than this will be truncated. An error will be raised
if multiple keys are truncated to the same value. The maximum output
width will be ``2*max_length + 7``. The default of 36 produces output
no longer than 79 characters wide.
"""
def __init__(
self, filename_or_file: Union[str, TextIO], max_length: int = 36
):
self.max_length = max_length
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "w")
self.own_file = True
elif isinstance(filename_or_file, TextIOBase) or hasattr(
filename_or_file, "write"
):
# Note: in theory `TextIOBase` check should be sufficient,
# in practice, libraries don't always inherit from it, see GH#1598
self.file = filename_or_file # type: ignore[assignment]
self.own_file = False
else:
raise ValueError(f"Expected file or str, got {filename_or_file}")
def write(
self,
key_values: Dict[str, Any],
key_excluded: Dict[str, Tuple[str, ...]],
step: int = 0,
) -> None:
# Create strings for printing
key2str = {}
tag = ""
for (key, value), (_, excluded) in zip(
sorted(key_values.items()), sorted(key_excluded.items())
):
if excluded is not None and (
"stdout" in excluded or "log" in excluded
):
continue
elif isinstance(value, Video):
raise FormatUnsupportedError(["stdout", "log"], "video")
elif isinstance(value, Figure):
raise FormatUnsupportedError(["stdout", "log"], "figure")
elif isinstance(value, Image):
raise FormatUnsupportedError(["stdout", "log"], "image")
elif isinstance(value, HParam):
raise FormatUnsupportedError(["stdout", "log"], "hparam")
elif isinstance(value, float):
# Align left
value_str = f"{value:<8.3g}"
else:
value_str = str(value)
if key.find("/") > 0: # Find tag and add it to the dict
tag = key[: key.find("/") + 1]
key2str[(tag, self._truncate(tag))] = ""
# Remove tag from key and indent the key
if len(tag) > 0 and tag in key:
key = f"{'':3}{key[len(tag) :]}"
truncated_key = self._truncate(key)
if (tag, truncated_key) in key2str:
raise ValueError(
f"Key '{key}' truncated to '{truncated_key}' that already exists. Consider increasing `max_length`."
)
key2str[(tag, truncated_key)] = self._truncate(value_str)
# Find max widths
if len(key2str) == 0:
warnings.warn("Tried to write empty key-value dict")
return
else:
tagless_keys = map(lambda x: x[1], key2str.keys())
key_width = max(map(len, tagless_keys))
val_width = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (key_width + val_width + 7)
lines = [dashes]
for (_, key), value in key2str.items():
key_space = " " * (key_width - len(key))
val_space = " " * (val_width - len(value))
lines.append(f"| {key}{key_space} | {value}{val_space} |")
lines.append(dashes)
# TODO: Surpress the print statements
# if tqdm is not None and hasattr(self.file, "name") and self.file.name == "<stdout>":
# # Do not mess up with progress bar
# tqdm.write("\n".join(lines) + "\n", file=sys.stdout, end="")
# else:
# self.file.write("\n".join(lines) + "\n")
# # Flush the output to the file
# self.file.flush()
def _truncate(self, string: str) -> str:
if len(string) > self.max_length:
string = string[: self.max_length - 3] + "..."
return string
def write_sequence(self, sequence: List[str]) -> None:
for i, elem in enumerate(sequence):
self.file.write(elem)
if i < len(sequence) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self) -> None:
"""
closes the file
"""
if self.own_file:
self.file.close()
def filter_excluded_keys(
key_values: Dict[str, Any],
key_excluded: Dict[str, Tuple[str, ...]],
_format: str,
) -> Dict[str, Any]:
"""
Filters the keys specified by ``key_exclude`` for the specified format
:param key_values: log dictionary to be filtered
:param key_excluded: keys to be excluded per format
:param _format: format for which this filter is run
:return: dict without the excluded keys
"""
def is_excluded(key: str) -> bool:
return (
key in key_excluded
and key_excluded[key] is not None
and _format in key_excluded[key]
)
return {
key: value for key, value in key_values.items() if not is_excluded(key)
}
class JSONOutputFormat(KVWriter):
"""
Log to a file, in the JSON format
:param filename: the file to write the log to
"""
def __init__(self, filename: str):
self.file = open(filename, "w")
def write(
self,
key_values: Dict[str, Any],
key_excluded: Dict[str, Tuple[str, ...]],
step: int = 0,
) -> None:
def cast_to_json_serializable(value: Any):
if isinstance(value, Video):
raise FormatUnsupportedError(["json"], "video")
if isinstance(value, Figure):
raise FormatUnsupportedError(["json"], "figure")
if isinstance(value, Image):
raise FormatUnsupportedError(["json"], "image")
if isinstance(value, HParam):
raise FormatUnsupportedError(["json"], "hparam")
if hasattr(value, "dtype"):
if value.shape == () or len(value) == 1:
# if value is a dimensionless numpy array or of length 1, serialize as a float
return float(value.item())
else:
# otherwise, a value is a numpy array, serialize as a list or nested lists
return value.tolist()
return value
key_values = {
key: cast_to_json_serializable(value)
for key, value in filter_excluded_keys(
key_values, key_excluded, "json"
).items()
}
self.file.write(json.dumps(key_values) + "\n")
self.file.flush()
def close(self) -> None:
"""
closes the file
"""
self.file.close()
class CSVOutputFormat(KVWriter):
"""
Log to a file, in a CSV format
:param filename: the file to write the log to
"""
def __init__(self, filename: str):
self.file = open(filename, "w+t")
self.keys: List[str] = []
self.separator = ","
self.quotechar = '"'
def write(
self,
key_values: Dict[str, Any],
key_excluded: Dict[str, Tuple[str, ...]],
step: int = 0,
) -> None:
# Add our current row to the history
key_values = filter_excluded_keys(key_values, key_excluded, "csv")
extra_keys = key_values.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for i, key in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(key)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.separator * len(extra_keys))
self.file.write("\n")
for i, key in enumerate(self.keys):
if i > 0:
self.file.write(",")
value = key_values.get(key)
if isinstance(value, Video):
raise FormatUnsupportedError(["csv"], "video")
elif isinstance(value, Figure):
raise FormatUnsupportedError(["csv"], "figure")
elif isinstance(value, Image):
raise FormatUnsupportedError(["csv"], "image")
elif isinstance(value, HParam):
raise FormatUnsupportedError(["csv"], "hparam")
elif isinstance(value, str):
# escape quotechars by prepending them with another quotechar
value = value.replace(
self.quotechar, self.quotechar + self.quotechar
)
# additionally wrap text with quotechars so that any delimiters in the text are ignored by csv readers
self.file.write(self.quotechar + value + self.quotechar)
elif value is not None:
self.file.write(str(value))
self.file.write("\n")
self.file.flush()
def close(self) -> None:
"""
closes the file
"""
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
:param folder: the folder to write the log to
"""
def __init__(self, folder: str):
assert (
SummaryWriter is not None
), "tensorboard is not installed, you can use `pip install tensorboard` to do so"
self.writer = SummaryWriter(log_dir=folder)
self._is_closed = False
def write(
self,
key_values: Dict[str, Any],
key_excluded: Dict[str, Tuple[str, ...]],
step: int = 0,
) -> None:
assert (
not self._is_closed
), "The SummaryWriter was closed, please re-create one."
for (key, value), (_, excluded) in zip(
sorted(key_values.items()), sorted(key_excluded.items())
):
if excluded is not None and "tensorboard" in excluded:
continue
if isinstance(value, np.ScalarType):
if isinstance(value, str):
# str is considered a np.ScalarType
self.writer.add_text(key, value, step)
else:
self.writer.add_scalar(key, value, step)
if isinstance(value, th.Tensor):
self.writer.add_histogram(key, value, step)
if isinstance(value, Video):
self.writer.add_video(key, value.frames, step, value.fps)
if isinstance(value, Figure):
self.writer.add_figure(
key, value.figure, step, close=value.close
)
if isinstance(value, Image):
self.writer.add_image(
key, value.image, step, dataformats=value.dataformats
)
if isinstance(value, HParam):
# we don't use `self.writer.add_hparams` to have control over the log_dir
experiment, session_start_info, session_end_info = hparams(
value.hparam_dict, metric_dict=value.metric_dict
)
self.writer.file_writer.add_summary(experiment)
self.writer.file_writer.add_summary(session_start_info)
self.writer.file_writer.add_summary(session_end_info)
# Flush the output to the file
self.writer.flush()
def close(self) -> None:
"""
closes the file
"""
if self.writer:
self.writer.close()
self._is_closed = True
def make_output_format(
_format: str, log_dir: str, log_suffix: str = ""
) -> KVWriter:
"""
return a logger for the requested format
:param _format: the requested format to log to ('stdout', 'log', 'json' or 'csv' or 'tensorboard')
:param log_dir: the logging directory
:param log_suffix: the suffix for the log file
:return: the logger
"""
os.makedirs(log_dir, exist_ok=True)
if _format == "stdout":
return HumanOutputFormat(sys.stdout)
elif _format == "log":
return HumanOutputFormat(os.path.join(log_dir, f"log{log_suffix}.txt"))
elif _format == "json":
return JSONOutputFormat(
os.path.join(log_dir, f"progress{log_suffix}.json")
)
elif _format == "csv":
return CSVOutputFormat(
os.path.join(log_dir, f"progress{log_suffix}.csv")
)
elif _format == "tensorboard":
return TensorBoardOutputFormat(log_dir)
else:
raise ValueError(f"Unknown format specified: {_format}")
# ================================================================
# Backend
# ================================================================
class Logger:
"""
The logger class.
:param folder: the logging location
:param output_formats: the list of output formats
"""
def __init__(self, folder: Optional[str], output_formats: List[KVWriter]):
self.name_to_value: Dict[str, float] = defaultdict(
float
) # values this iteration
self.name_to_count: Dict[str, int] = defaultdict(int)
self.name_to_excluded: Dict[str, Tuple[str, ...]] = {}
self.level = INFO
self.dir = folder
self.output_formats = output_formats
# TODO:
self.stats = {}
@staticmethod
def to_tuple(
string_or_tuple: Optional[Union[str, Tuple[str, ...]]]
) -> Tuple[str, ...]:
"""
Helper function to convert str to tuple of str.
"""
if string_or_tuple is None:
return ("",)
if isinstance(string_or_tuple, tuple):
return string_or_tuple
return (string_or_tuple,)
def record(
self,
key: str,
value: Any,
exclude: Optional[Union[str, Tuple[str, ...]]] = None,
) -> None:
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
:param key: save to log this key
:param value: save to log this value
:param exclude: outputs to be excluded
"""
self.name_to_value[key] = value
self.name_to_excluded[key] = self.to_tuple(exclude)
def record_mean(
self,
key: str,
value: Optional[float],
exclude: Optional[Union[str, Tuple[str, ...]]] = None,
) -> None:
"""
The same as record(), but if called many times, values averaged.
:param key: save to log this key
:param value: save to log this value
:param exclude: outputs to be excluded
"""
if value is None:
return
old_val, count = self.name_to_value[key], self.name_to_count[key]
self.name_to_value[key] = old_val * count / (count + 1) + value / (
count + 1
)
self.name_to_count[key] = count + 1
self.name_to_excluded[key] = self.to_tuple(exclude)
def dump(self, step: int = 0) -> None:
"""
Write all of the diagnostics from the current iteration
"""
if self.level == DISABLED:
return
for _format in self.output_formats:
if isinstance(_format, KVWriter):
_format.write(self.name_to_value, self.name_to_excluded, step)
# TODO: Save stats
keys = list(self.name_to_value.keys())
if keys[0].startswith("mean"):
values = list(self.name_to_value.values())
for i in range(len(self.name_to_value)):
if keys[i] not in self.stats:
self.stats[keys[i]] = []
self.stats[keys[i]].append(values[i])
self.name_to_value.clear()
self.name_to_count.clear()
self.name_to_excluded.clear()
def log(self, *args, level: int = INFO) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
:param args: log the arguments
:param level: the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
if self.level <= level:
self._do_log(args)
def debug(self, *args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the DEBUG level.
:param args: log the arguments
"""
self.log(*args, level=DEBUG)
def info(self, *args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the INFO level.
:param args: log the arguments
"""
self.log(*args, level=INFO)
def warn(self, *args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the WARN level.
:param args: log the arguments
"""
self.log(*args, level=WARN)
def error(self, *args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the ERROR level.
:param args: log the arguments
"""
self.log(*args, level=ERROR)
# Configuration
# ----------------------------------------
def set_level(self, level: int) -> None:
"""
Set logging threshold on current logger.
:param level: the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
self.level = level
def get_dir(self) -> Optional[str]:
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
:return: the logging directory
"""
return self.dir
def close(self) -> None:
"""
closes the file
"""
for _format in self.output_formats:
_format.close()
# Misc
# ----------------------------------------
def _do_log(self, args: Tuple[Any, ...]) -> None:
"""
log to the requested format outputs
:param args: the arguments to log
"""
for _format in self.output_formats:
if isinstance(_format, SeqWriter):
_format.write_sequence(list(map(str, args)))
def configure(
folder: Optional[str] = None, format_strings: Optional[List[str]] = None
) -> Logger:
"""
Configure the current logger.
:param folder: the save location
(if None, $SB3_LOGDIR, if still None, tempdir/SB3-[date & time])
:param format_strings: the output logging format
(if None, $SB3_LOG_FORMAT, if still None, ['stdout', 'log', 'csv'])
:return: The logger object.
"""
if folder is None:
folder = os.getenv("SB3_LOGDIR")
if folder is None:
folder = os.path.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("SB3-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(folder, str)
os.makedirs(folder, exist_ok=True)
log_suffix = ""
if format_strings is None:
format_strings = os.getenv("SB3_LOG_FORMAT", "stdout,log,csv").split(
","
)
format_strings = list(filter(None, format_strings))
output_formats = [
make_output_format(f, folder, log_suffix) for f in format_strings
]
logger = Logger(folder=folder, output_formats=output_formats)
# Only print when some files will be saved
if len(format_strings) > 0 and format_strings != ["stdout"]:
logger.log(f"Logging to {folder}")
return logger
# ================================================================
# Readers
# ================================================================
def read_json(filename: str) -> pandas.DataFrame:
"""
read a json file using pandas
:param filename: the file path to read
:return: the data in the json
"""
data = []
with open(filename) as file_handler:
for line in file_handler:
data.append(json.loads(line))
return pandas.DataFrame(data)
def read_csv(filename: str) -> pandas.DataFrame:
"""
read a csv file using pandas
:param filename: the file path to read
:return: the data in the csv
"""
return pandas.read_csv(filename, index_col=None, comment="#")
| 25,710 | Python | .py | 675 | 29.014815 | 120 | 0.576748 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,074 | imitation_bc.py | JurajZelman_airl-market-making/package_modifications/imitation_bc.py | """Behavioural Cloning (BC).
Trains policy by applying supervised learning to a fixed dataset of (observation,
action) pairs generated by some expert demonstrator.
"""
import dataclasses
import itertools
from typing import (
Any,
Callable,
Iterable,
Iterator,
Mapping,
Optional,
Tuple,
Type,
Union,
)
import gym
import numpy as np
import torch as th
import tqdm
from imitation.algorithms import base as algo_base
from imitation.data import rollout, types
from imitation.policies import base as policy_base
from imitation.util import logger as imit_logger
from imitation.util import util
from stable_baselines3.common import policies, utils, vec_env
@dataclasses.dataclass(frozen=True)
class BatchIteratorWithEpochEndCallback:
"""Loops through batches from a batch loader and calls a callback after every epoch.
Will throw an exception when an epoch contains no batches.
"""
batch_loader: Iterable[algo_base.TransitionMapping]
n_epochs: Optional[int]
n_batches: Optional[int]
on_epoch_end: Optional[Callable[[int], None]]
def __post_init__(self) -> None:
epochs_and_batches_specified = (
self.n_epochs is not None and self.n_batches is not None
)
neither_epochs_nor_batches_specified = (
self.n_epochs is None and self.n_batches is None
)
if epochs_and_batches_specified or neither_epochs_nor_batches_specified:
raise ValueError(
"Must provide exactly one of `n_epochs` and `n_batches` arguments.",
)
def __iter__(self) -> Iterator[algo_base.TransitionMapping]:
def batch_iterator() -> Iterator[algo_base.TransitionMapping]:
# Note: the islice here ensures we do not exceed self.n_epochs
for epoch_num in itertools.islice(itertools.count(), self.n_epochs):
some_batch_was_yielded = False
for batch in self.batch_loader:
yield batch
some_batch_was_yielded = True
if not some_batch_was_yielded:
raise AssertionError(
f"Data loader returned no data during epoch "
f"{epoch_num} -- did it reset correctly?",
)
if self.on_epoch_end is not None:
self.on_epoch_end(epoch_num)
# Note: the islice here ensures we do not exceed self.n_batches
return itertools.islice(batch_iterator(), self.n_batches)
@dataclasses.dataclass(frozen=True)
class BCTrainingMetrics:
"""Container for the different components of behavior cloning loss."""
neglogp: th.Tensor
entropy: Optional[th.Tensor]
ent_loss: th.Tensor # set to 0 if entropy is None
prob_true_act: th.Tensor
l2_norm: th.Tensor
l2_loss: th.Tensor
loss: th.Tensor
@dataclasses.dataclass(frozen=True)
class BehaviorCloningLossCalculator:
"""Functor to compute the loss used in Behavior Cloning."""
ent_weight: float
l2_weight: float
def __call__(
self,
policy: policies.ActorCriticPolicy,
obs: Union[th.Tensor, np.ndarray],
acts: Union[th.Tensor, np.ndarray],
) -> BCTrainingMetrics:
"""Calculate the supervised learning loss used to train the behavioral clone.
Args:
policy: The actor-critic policy whose loss is being computed.
obs: The observations seen by the expert.
acts: The actions taken by the expert.
Returns:
A BCTrainingMetrics object with the loss and all the components it
consists of.
"""
obs = util.safe_to_tensor(obs)
acts = util.safe_to_tensor(acts)
_, log_prob, entropy = policy.evaluate_actions(obs, acts)
prob_true_act = th.exp(log_prob).mean()
log_prob = log_prob.mean()
entropy = entropy.mean() if entropy is not None else None
l2_norms = [th.sum(th.square(w)) for w in policy.parameters()]
l2_norm = (
sum(l2_norms) / 2
) # divide by 2 to cancel with gradient of square
# sum of list defaults to float(0) if len == 0.
assert isinstance(l2_norm, th.Tensor)
ent_loss = -self.ent_weight * (
entropy if entropy is not None else th.zeros(1)
)
neglogp = -log_prob
l2_loss = self.l2_weight * l2_norm
loss = neglogp + ent_loss + l2_loss
return BCTrainingMetrics(
neglogp=neglogp,
entropy=entropy,
ent_loss=ent_loss,
prob_true_act=prob_true_act,
l2_norm=l2_norm,
l2_loss=l2_loss,
loss=loss,
)
def enumerate_batches(
batch_it: Iterable[algo_base.TransitionMapping],
) -> Iterable[Tuple[Tuple[int, int, int], algo_base.TransitionMapping]]:
"""Prepends batch stats before the batches of a batch iterator."""
num_samples_so_far = 0
for num_batches, batch in enumerate(batch_it):
batch_size = len(batch["obs"])
num_samples_so_far += batch_size
yield (num_batches, batch_size, num_samples_so_far), batch
@dataclasses.dataclass(frozen=True)
class RolloutStatsComputer:
"""Computes statistics about rollouts.
Args:
venv: The vectorized environment in which to compute the rollouts.
n_episodes: The number of episodes to base the statistics on.
"""
venv: Optional[vec_env.VecEnv]
n_episodes: int
# TODO(shwang): Maybe instead use a callback that can be shared between
# all algorithms' `.train()` for generating rollout stats.
# EvalCallback could be a good fit:
# https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html#evalcallback
def __call__(
self,
policy: policies.ActorCriticPolicy,
rng: np.random.Generator,
) -> Mapping[str, float]:
if self.venv is not None and self.n_episodes > 0:
trajs = rollout.generate_trajectories(
policy,
self.venv,
rollout.make_min_episodes(self.n_episodes),
rng=rng,
)
return rollout.rollout_stats(trajs)
else:
return dict()
class BCLogger:
"""Utility class to help logging information relevant to Behavior Cloning."""
def __init__(self, logger: imit_logger.HierarchicalLogger):
"""Create new BC logger.
Args:
logger: The logger to feed all the information to.
"""
self._logger = logger
self._tensorboard_step = 0
self._current_epoch = 0
def reset_tensorboard_steps(self):
self._tensorboard_step = 0
def log_epoch(self, epoch_number):
self._current_epoch = epoch_number
def log_batch(
self,
batch_num: int,
batch_size: int,
num_samples_so_far: int,
training_metrics: BCTrainingMetrics,
rollout_stats: Mapping[str, float],
):
self._logger.record("batch_size", batch_size)
self._logger.record("bc/epoch", self._current_epoch)
self._logger.record("bc/batch", batch_num)
self._logger.record("bc/samples_so_far", num_samples_so_far)
for k, v in training_metrics.__dict__.items():
self._logger.record(f"bc/{k}", float(v) if v is not None else None)
for k, v in rollout_stats.items():
if "return" in k and "monitor" not in k:
self._logger.record("rollout/" + k, v)
self._logger.dump(self._tensorboard_step)
self._tensorboard_step += 1
def __getstate__(self):
state = self.__dict__.copy()
del state["_logger"]
return state
def reconstruct_policy(
policy_path: str,
device: Union[th.device, str] = "auto",
) -> policies.ActorCriticPolicy:
"""Reconstruct a saved policy.
Args:
policy_path: path where `.save_policy()` has been run.
device: device on which to load the policy.
Returns:
policy: policy with reloaded weights.
"""
policy = th.load(policy_path, map_location=utils.get_device(device))
assert isinstance(policy, policies.ActorCriticPolicy)
return policy
class BC(algo_base.DemonstrationAlgorithm):
"""Behavioral cloning (BC).
Recovers a policy via supervised learning from observation-action pairs.
"""
def __init__(
self,
*,
observation_space: gym.Space,
action_space: gym.Space,
rng: np.random.Generator,
policy: Optional[policies.ActorCriticPolicy] = None,
demonstrations: Optional[algo_base.AnyTransitions] = None,
batch_size: int = 32,
minibatch_size: Optional[int] = None,
optimizer_cls: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Mapping[str, Any]] = None,
ent_weight: float = 1e-3,
l2_weight: float = 0.0,
device: Union[str, th.device] = "auto",
custom_logger: Optional[imit_logger.HierarchicalLogger] = None,
):
"""Builds BC.
Args:
observation_space: the observation space of the environment.
action_space: the action space of the environment.
rng: the random state to use for the random number generator.
policy: a Stable Baselines3 policy; if unspecified,
defaults to `FeedForward32Policy`.
demonstrations: Demonstrations from an expert (optional). Transitions
expressed directly as a `types.TransitionsMinimal` object, a sequence
of trajectories, or an iterable of transition batches (mappings from
keywords to arrays containing observations, etc).
batch_size: The number of samples in each batch of expert data.
minibatch_size: size of minibatch to calculate gradients over.
The gradients are accumulated until `batch_size` examples
are processed before making an optimization step. This
is useful in GPU training to reduce memory usage, since
fewer examples are loaded into memory at once,
facilitating training with larger batch sizes, but is
generally slower. Must be a factor of `batch_size`.
Optional, defaults to `batch_size`.
optimizer_cls: optimiser to use for supervised training.
optimizer_kwargs: keyword arguments, excluding learning rate and
weight decay, for optimiser construction.
ent_weight: scaling applied to the policy's entropy regularization.
l2_weight: scaling applied to the policy's L2 regularization.
device: name/identity of device to place policy on.
custom_logger: Where to log to; if None (default), creates a new logger.
Raises:
ValueError: If `weight_decay` is specified in `optimizer_kwargs` (use the
parameter `l2_weight` instead), or if the batch size is not a multiple
of the minibatch size.
"""
self._demo_data_loader: Optional[
Iterable[algo_base.TransitionMapping]
] = None
self.batch_size = batch_size
self.minibatch_size = minibatch_size or batch_size
if self.batch_size % self.minibatch_size != 0:
raise ValueError("Batch size must be a multiple of minibatch size.")
super().__init__(
demonstrations=demonstrations,
custom_logger=custom_logger,
)
self._bc_logger = BCLogger(self.logger)
self.action_space = action_space
self.observation_space = observation_space
self.rng = rng
if policy is None:
policy = policy_base.FeedForward32Policy(
observation_space=observation_space,
action_space=action_space,
# Set lr_schedule to max value to force error if policy.optimizer
# is used by mistake (should use self.optimizer instead).
lr_schedule=lambda _: th.finfo(th.float32).max,
)
self._policy = policy.to(utils.get_device(device))
# TODO(adam): make policy mandatory and delete observation/action space params?
assert self.policy.observation_space == self.observation_space
assert self.policy.action_space == self.action_space
if optimizer_kwargs:
if "weight_decay" in optimizer_kwargs:
raise ValueError(
"Use the parameter l2_weight instead of weight_decay."
)
optimizer_kwargs = optimizer_kwargs or {}
self.optimizer = optimizer_cls(
self.policy.parameters(),
**optimizer_kwargs,
)
self.loss_calculator = BehaviorCloningLossCalculator(
ent_weight, l2_weight
)
# TODO: My custom modification for storing training data
self.train_logger = {
"batch_num": [],
"minibatch_size": [],
"num_samples_so_far": [],
"neglogp": [],
"entropy": [],
"ent_loss": [],
"prob_true_act": [],
"l2_norm": [],
"l2_loss": [],
"loss": [],
# "rollout_stats": [],
}
@property
def policy(self) -> policies.ActorCriticPolicy:
return self._policy
def set_demonstrations(
self, demonstrations: algo_base.AnyTransitions
) -> None:
self._demo_data_loader = algo_base.make_data_loader(
demonstrations,
self.minibatch_size,
)
def train(
self,
*,
n_epochs: Optional[int] = None,
n_batches: Optional[int] = None,
on_epoch_end: Optional[Callable[[], None]] = None,
on_batch_end: Optional[Callable[[], None]] = None,
log_interval: int = 500,
log_rollouts_venv: Optional[vec_env.VecEnv] = None,
log_rollouts_n_episodes: int = 5,
progress_bar: bool = True,
reset_tensorboard: bool = False,
):
"""Train with supervised learning for some number of epochs.
Here an 'epoch' is just a complete pass through the expert data loader,
as set by `self.set_expert_data_loader()`. Note, that when you specify
`n_batches` smaller than the number of batches in an epoch, the `on_epoch_end`
callback will never be called.
Args:
n_epochs: Number of complete passes made through expert data before ending
training. Provide exactly one of `n_epochs` and `n_batches`.
n_batches: Number of batches loaded from dataset before ending training.
Provide exactly one of `n_epochs` and `n_batches`.
on_epoch_end: Optional callback with no parameters to run at the end of each
epoch.
on_batch_end: Optional callback with no parameters to run at the end of each
batch.
log_interval: Log stats after every log_interval batches.
log_rollouts_venv: If not None, then this VecEnv (whose observation and
actions spaces must match `self.observation_space` and
`self.action_space`) is used to generate rollout stats, including
average return and average episode length. If None, then no rollouts
are generated.
log_rollouts_n_episodes: Number of rollouts to generate when calculating
rollout stats. Non-positive number disables rollouts.
progress_bar: If True, then show a progress bar during training.
reset_tensorboard: If True, then start plotting to Tensorboard from x=0
even if `.train()` logged to Tensorboard previously. Has no practical
effect if `.train()` is being called for the first time.
"""
if reset_tensorboard:
self._bc_logger.reset_tensorboard_steps()
self._bc_logger.log_epoch(0)
compute_rollout_stats = RolloutStatsComputer(
log_rollouts_venv,
log_rollouts_n_episodes,
)
def _on_epoch_end(epoch_number: int):
if tqdm_progress_bar is not None:
total_num_epochs_str = (
f"of {n_epochs}" if n_epochs is not None else ""
)
tqdm_progress_bar.display(
f"Epoch {epoch_number} {total_num_epochs_str}",
pos=1,
)
self._bc_logger.log_epoch(epoch_number + 1)
if on_epoch_end is not None:
on_epoch_end()
mini_per_batch = self.batch_size // self.minibatch_size
n_minibatches = (
n_batches * mini_per_batch if n_batches is not None else None
)
assert self._demo_data_loader is not None
demonstration_batches = BatchIteratorWithEpochEndCallback(
self._demo_data_loader,
n_epochs,
n_minibatches,
_on_epoch_end,
)
batches_with_stats = enumerate_batches(demonstration_batches)
tqdm_progress_bar: Optional[tqdm.tqdm] = None
if progress_bar:
batches_with_stats = tqdm.tqdm(
batches_with_stats,
unit="batch",
total=n_minibatches,
)
tqdm_progress_bar = batches_with_stats
def process_batch():
self.optimizer.step()
self.optimizer.zero_grad()
if batch_num % log_interval == 0:
rollout_stats = compute_rollout_stats(self.policy, self.rng)
# TODO: Disable logging
# self._bc_logger.log_batch(
# batch_num,
# minibatch_size,
# num_samples_so_far,
# training_metrics,
# rollout_stats,
# )
# TODO: Test (process statistics)
self.train_logger["batch_num"].append(batch_num)
self.train_logger["minibatch_size"].append(minibatch_size)
self.train_logger["num_samples_so_far"].append(
num_samples_so_far
)
self.train_logger["neglogp"].append(
training_metrics.neglogp.item()
)
self.train_logger["entropy"].append(
training_metrics.entropy.item()
)
self.train_logger["ent_loss"].append(
training_metrics.ent_loss.item()
)
self.train_logger["prob_true_act"].append(
training_metrics.prob_true_act.item()
)
self.train_logger["l2_norm"].append(
training_metrics.l2_norm.item()
)
self.train_logger["l2_loss"].append(
training_metrics.l2_loss.item()
)
self.train_logger["loss"].append(training_metrics.loss.item())
# self.train_logger["rollout_stats"].append(rollout_stats)
if on_batch_end is not None:
on_batch_end()
self.optimizer.zero_grad()
for (
batch_num,
minibatch_size,
num_samples_so_far,
), batch in batches_with_stats:
obs = th.as_tensor(batch["obs"], device=self.policy.device).detach()
acts = th.as_tensor(
batch["acts"], device=self.policy.device
).detach()
training_metrics = self.loss_calculator(self.policy, obs, acts)
# Renormalise the loss to be averaged over the whole
# batch size instead of the minibatch size.
# If there is an incomplete batch, its gradients will be
# smaller, which may be helpful for stability.
loss = training_metrics.loss * minibatch_size / self.batch_size
loss.backward()
batch_num = batch_num * self.minibatch_size // self.batch_size
if num_samples_so_far % self.batch_size == 0:
process_batch()
if num_samples_so_far % self.batch_size != 0:
# if there remains an incomplete batch
batch_num += 1
process_batch()
def save_policy(self, policy_path: types.AnyPath) -> None:
"""Save policy to a path. Can be reloaded by `.reconstruct_policy()`.
Args:
policy_path: path to save policy to.
"""
th.save(self.policy, util.parse_path(policy_path))
| 20,732 | Python | .py | 476 | 32.798319 | 92 | 0.603667 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,075 | order_book_data_analysis.ipynb | JurajZelman_airl-market-making/data_processing/order_book_data_analysis.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exploratory analysis of lob data\n",
"\n",
"In this notebook I explore the datasets and plot some of the data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# TODO:\n",
"# Plot the order book heatmap\n",
"# Auto-correlation analysis?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import datetime\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.dates as mdates\n",
"import numpy as np\n",
"import pandas as pd\n",
"import polars as pl\n",
"\n",
"from data.utils import get_list_of_dates_between, set_plot_style, ensure_dir_exists"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pl.enable_string_cache(True)\n",
"set_plot_style()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Indicate whether to save figures\n",
"save_fig = False\n",
"\n",
"# Set path for figures saving\n",
"FIGURES_PATH = \"/home/juraj/Projects/thesis-market-making/thesis/images\"\n",
"ensure_dir_exists(FIGURES_PATH)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Define custom colors\n",
"color_green = \"#13961a\"\n",
"color_red = \"#eb5c14\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# BTC\n",
"# exchange = \"BINANCE\"\n",
"# symbol = \"BTC-USDT\"\n",
"\n",
"# SOL\n",
"# exchange = \"BINANCE\"\n",
"# exchange = \"OKX\"\n",
"# exchange = \"GATEIO\"\n",
"exchange = \"BIT.COM\"\n",
"symbol = \"SOL-USDT\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load all the data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"start_date = datetime.datetime(2023, 9, 1)\n",
"end_date = datetime.datetime(2023, 9, 13)\n",
"path = os.path.join(os.getcwd(), \"datasets\")\n",
"second = False"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Get the list of dates\n",
"dates = get_list_of_dates_between(start_date, end_date)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the data\n",
"prefix = \"order_book\"\n",
"for date in dates:\n",
" file_name = f\"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet\"\n",
" df_single = pd.read_parquet(os.path.join(path, file_name))\n",
" if date == start_date:\n",
" df = df_single\n",
" else:\n",
" df = pd.concat([df, df_single])\n",
" \n",
"df.sort_index(inplace=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Analysis"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df[\"mid_price\"] = (df[\"bid_0_price\"] + df[\"ask_0_price\"]) / 2\n",
"\n",
"for i in range(3):\n",
" df[f\"spread_{i}\"] = df[f\"ask_{i}_price\"] - df[f\"bid_{i}_price\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Mid-price and returns analysis"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Plot the mid-price evolution\n",
"plt.figure(figsize=(12, 4.5))\n",
"plt.plot(df[\"mid_price\"])\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Price (USDT)\")\n",
"plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))\n",
"plt.tight_layout()\n",
"# plt.show()\n",
"if save_fig:\n",
" plt.savefig(os.path.join(FIGURES_PATH, f\"{symbol}_mid_price.pdf\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df[\"mid_price\"].describe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# plt.figure(figsize=(12, 4))\n",
"# plt.plot(df[\"mid_price\"].pct_change())\n",
"# plt.xlabel(\"Time\")\n",
"# plt.ylabel(\"Returns\")\n",
"# plt.tight_layout()\n",
"# plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Merge the above two plots into one figure with two subplots\n",
"# fig, axs = plt.subplots(2, 1, figsize=(12, 8), sharex=False)\n",
"# axs[0].plot(df[\"mid_price\"])\n",
"# axs[0].set_ylabel(\"Price\")\n",
"# axs[1].plot(df[\"mid_price\"].diff())\n",
"# axs[1].set_ylabel(\"Returns\")\n",
"# plt.xlabel(\"Time\")\n",
"# plt.tight_layout()\n",
"# plt.show()\n",
"\n",
"# # Save the figure\n",
"# if save_fig:\n",
"# fig.savefig(f\"{FIGURES_PATH}/mid_price.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Plot the histogram of the mid-price returns\n",
"# fig = plt.figure(figsize=(12, 4))\n",
"# plt.hist(df[\"mid_price\"].diff(), bins=100, edgecolor=\"black\", log=False)\n",
"# plt.xlabel(\"Returns\")\n",
"# plt.ylabel(\"Frequency (log scale)\")\n",
"# plt.tight_layout()\n",
"# plt.show()\n",
"\n",
"# # Save the figure\n",
"# if save_fig:\n",
"# fig.savefig(f\"{FIGURES_PATH}/mid_price_returns_hist.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Print the summary statistics of the mid-price returns\n",
"# print(df[\"mid_price\"].diff().describe())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Spread analysis"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Create a grid of subplots\n",
"fig, axs = plt.subplots(3, 1, figsize=(12, 12), sharey=False)\n",
"\n",
"# Plot the bid-ask spread evolution for each level\n",
"for i in range(3):\n",
" axs[i].plot(df[f\"spread_{i}\"])\n",
" axs[i].set_xlabel(\"Time\")\n",
" axs[i].set_ylabel(f\"Spread on level {i+1}\")\n",
"\n",
"plt.tight_layout()\n",
"plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_lob_spreads.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Describe the spread\n",
"for i in range(3):\n",
" print(f\"Spread on level {i+1}\")\n",
" print(df[f\"spread_{i}\"].describe())\n",
" print()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prices on different levels"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Visualize bid price for each level\n",
"# for level in range(20):\n",
"# fig = plt.figure(figsize=(12, 4))\n",
"# plt.plot(df[f\"bid_{level}_price\"])\n",
"# plt.xlabel(\"Time\")\n",
"# plt.ylabel(f\"Bid price for level {level}\")\n",
"# plt.tight_layout()\n",
"# plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Visualize ask price for each level\n",
"# for level in range(20):\n",
"# fig = plt.figure(figsize=(12, 4))\n",
"# plt.plot(df[f\"ask_{level}_price\"])\n",
"# plt.xlabel(\"Time\")\n",
"# plt.ylabel(f\"Ask price for level {level}\")\n",
"# plt.tight_layout()\n",
"# plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Best bid and ask volume analysis"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Plot the best bid volumes\n",
"# plt.figure(figsize=(12, 4))\n",
"# plt.plot(df[\"bid_0_size\"], color=color_green)\n",
"# plt.xlabel(\"Time\")\n",
"# plt.ylabel(\"Volume\")\n",
"# plt.tight_layout()\n",
"# plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# df[\"bid_0_size\"].describe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Plot the best ask volumes\n",
"# plt.figure(figsize=(12, 4))\n",
"# plt.plot(df[\"ask_0_size\"], color=color_red)\n",
"# plt.xlabel(\"Time\")\n",
"# plt.ylabel(\"Volume\")\n",
"# plt.tight_layout()\n",
"# plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# df[\"ask_0_size\"].describe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Create plot with five subfigures with the best bid volumes\n",
"fig, axs = plt.subplots(3, 1, figsize=(12, 12), sharey=True)\n",
"for i in range(3):\n",
" axs[i].plot(df[f\"bid_{i}_size\"], color=color_green)\n",
" axs[i].set_ylabel(f\"Level {i+1} volume\")\n",
" axs[i].set_xlabel(\"Time\")\n",
"\n",
"plt.tight_layout()\n",
"plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_lob_bid_volumes.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for i in range(3):\n",
" print(f\"Level {i} volume statistics\")\n",
" # Show descriptive statistics in non-scientific notation\n",
" pd.options.display.float_format = '{:.3f}'.format\n",
" print(df[f\"bid_{i}_size\"].describe())\n",
" print()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Create plot with five subfigures containing histograms of the best bid volumes\n",
"fig, axs = plt.subplots(3, 1, figsize=(12, 12))\n",
"for i in range(3):\n",
" axs[i].hist(df[f\"bid_{i}_size\"], bins=100, edgecolor=\"black\", log=True, color=color_green, linewidth=0.3)\n",
" axs[i].set_ylabel(f\"Level {i+1} volume\")\n",
" axs[i].set_xlabel(\"Volume\")\n",
"\n",
"# Compute max volume for each level\n",
"max_volumes = [df[f\"bid_{i}_size\"].max() for i in range(3)]\n",
"max_volume = max(max_volumes)\n",
"\n",
"# Set the same x-axis and bins for all subplots\n",
"for i in range(3):\n",
" axs[i].set_xlim(0, max_volume)\n",
" \n",
"plt.tight_layout()\n",
"plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_lob_bid_volumes_hist.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Create plot with five subfigures with the best ask volumes\n",
"fig, axs = plt.subplots(3, 1, figsize=(12, 12))\n",
"for i in range(3):\n",
" axs[i].plot(df[f\"ask_{i}_size\"], color=color_red)\n",
" axs[i].set_ylabel(f\"Level {i+1} volume\")\n",
" axs[i].set_xlabel(\"Time\")\n",
"\n",
"plt.tight_layout()\n",
"plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_lob_ask_volumes.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for i in range(3):\n",
" print(f\"Level {i} volume statistics\")\n",
" print(df[f\"ask_{i}_size\"].describe())\n",
" print()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Create plot with five subfigures containing histograms of the best bid volumes\n",
"fig, axs = plt.subplots(3, 1, figsize=(12, 12), sharey=True)\n",
"for i in range(3):\n",
" axs[i].hist(df[f\"ask_{i}_size\"], bins=100, edgecolor=\"black\", log=True, color=color_red)\n",
" axs[i].set_ylabel(f\"Level {i+1} volume\")\n",
" axs[i].set_xlabel(\"Volume\")\n",
"\n",
"# Compute max volume for each level\n",
"max_volumes = [df[f\"ask_{i}_size\"].max() for i in range(3)]\n",
"max_volume = max(max_volumes)\n",
"\n",
"# Set the same x-axis and bins for all subplots\n",
"for i in range(3):\n",
" axs[i].set_xlim(0, max_volume)\n",
" \n",
"plt.tight_layout()\n",
"plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_lob_ask_volumes_hist.pdf\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Order book imbalance analysis"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Compute the total volume at each level\n",
"df[\"bid_total_volume\"] = 0\n",
"df[\"ask_total_volume\"] = 0\n",
"for i in range(20):\n",
" temp_bid_size = df[f\"bid_{i}_size\"]\n",
" temp_ask_size = df[f\"ask_{i}_size\"]\n",
" temp_bid_size = temp_bid_size.fillna(0)\n",
" temp_ask_size = temp_ask_size.fillna(0)\n",
" df[\"bid_total_volume\"] += temp_bid_size\n",
" df[\"ask_total_volume\"] += temp_ask_size\n",
"\n",
"df[\"imbalance\"] = (df[\"bid_total_volume\"] - df[\"ask_total_volume\"]) / (df[\"bid_total_volume\"] + df[\"ask_total_volume\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df[\"imbalance\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Plot the imbalance evolution\n",
"ts_start = datetime.datetime(2023, 9, 1, 9, 0, 0)\n",
"ts_end = datetime.datetime(2023, 9, 1, 12, 0, 0)\n",
"\n",
"fig = plt.figure(figsize=(12, 4))\n",
"# plt.plot(df[\"imbalance\"][start_index:max_index], color=\"black\")\n",
"plt.plot(df[\"imbalance\"][ts_start:ts_end], color=\"black\")\n",
"# Show only hours and minutes in the x-axis\n",
"plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\"%H:%M\"))\n",
"plt.xlabel(\"Time\")\n",
"plt.ylabel(\"Volume imbalance\")\n",
"plt.tight_layout()\n",
"plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_lob_volume_imbalance.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Compute the imbalance signal for each level\n",
"for i in range(20):\n",
" df[f\"imbalance_{i}\"] = (df[f\"bid_{i}_size\"] - df[f\"ask_{i}_size\"]) / (df[f\"bid_{i}_size\"] + df[f\"ask_{i}_size\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Plot the imbalance signal for top 5 levels\n",
"ts_start = datetime.datetime(2023, 9, 1, 9, 0, 0)\n",
"ts_end = datetime.datetime(2023, 9, 1, 12, 0, 0)\n",
"\n",
"fig, axs = plt.subplots(3, 1, figsize=(12, 12), sharey=True)\n",
"\n",
"for i in range(3):\n",
" axs[i].plot(df[f\"imbalance_{i}\"][ts_start:ts_end], color=\"black\")\n",
" axs[i].set_ylabel(f\"Level {i+1} imbalance\")\n",
" axs[i].xaxis.set_major_formatter(mdates.DateFormatter(\"%H:%M\"))\n",
" axs[i].set_xlabel(\"Time\")\n",
"\n",
"plt.tight_layout()\n",
"plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_lob_level_imbalance.pdf\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Orderbook snapshots"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Get the timestamps\n",
"all_timestamps = df.index"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Find the first index that is larger than the given timestamp\n",
"def find_first_index_larger_than(timestamp):\n",
" for i, ts in enumerate(all_timestamps):\n",
" if ts > timestamp:\n",
" return i"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"index = find_first_index_larger_than(datetime.datetime(2023, 9, 9, 12, 4, 46))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"depth = 15 if exchange == \"BINANCE\" else 6\n",
"# index_start = 12450\n",
"# index_end = index_start + 1\n",
"index_start = index\n",
"index_end = index_start + 1\n",
"\n",
"for i in range(index_start, index_end):\n",
" ts = all_timestamps[i]\n",
" bid_prices_labels = [f\"bid_{i}_price\" for i in range(depth)]\n",
" ask_prices_labels = [f\"ask_{i}_price\" for i in range(depth)]\n",
" bid_sizes_labels = [f\"bid_{i}_size\" for i in range(depth)]\n",
" ask_sizes_labels = [f\"ask_{i}_size\" for i in range(depth)]\n",
"\n",
" # Process for one timestamp\n",
" row = df.loc[ts]\n",
" bid_prices = row[bid_prices_labels].to_numpy().flatten()\n",
" ask_prices = row[ask_prices_labels].to_numpy().flatten()\n",
" bid_volumes = row[bid_sizes_labels].to_numpy().cumsum()\n",
" ask_volumes = row[ask_sizes_labels].to_numpy().cumsum()\n",
" \n",
" # Visualization for trading rules\n",
" # bid_prices = np.insert(bid_prices, 1, 19.54)\n",
" # bid_volumes = np.insert(bid_volumes, 1, 0)\n",
" # bid_volumes[0] = 50\n",
" # ask_volumes[0] = 50\n",
" # print(bid_prices)\n",
" # print(bid_volumes)\n",
" \n",
" # X-axis\n",
" spread_space = 1\n",
" x_axis = np.arange(0, 2 * depth + spread_space, 1)\n",
" # Visualization for trading rules\n",
" # spread_space = 2 # Number of ticks to leave in the middle\n",
" # x_axis = np.arange(0, 2 * depth + spread_space + 1, 1)\n",
" \n",
" fig = plt.figure(figsize=(12, 5))\n",
" \n",
" plt.bar(\n",
" # x_axis[:depth + 1], # Visualization for trading rules\n",
" x_axis[:depth],\n",
" bid_volumes[::-1],\n",
" label=\"Bid\",\n",
" color=\"#9ED166\",\n",
" width=1,\n",
" edgecolor=\"black\",\n",
" linewidth=1.3,\n",
" )\n",
" plt.bar(\n",
" # x_axis[depth + 1 + spread_space:], # Visualization for trading rules\n",
" x_axis[depth + spread_space:], \n",
" ask_volumes,\n",
" label=\"Ask\",\n",
" color=\"#EB735F\",\n",
" width=1,\n",
" edgecolor=\"black\",\n",
" linewidth=1.3,\n",
" )\n",
" x_ticks = np.append(bid_prices[::-1], ask_prices)\n",
" x_ticks = np.insert(x_ticks, depth, \"\")\n",
" \n",
" # Visualization for trading rules\n",
" # x_ticks = np.insert(x_ticks, depth + 1, \"19.56\")\n",
" # x_ticks = np.insert(x_ticks, depth + 2, \"19.57\")\n",
" # print(x_ticks)\n",
" \n",
" plt.xticks(x_axis, x_ticks, rotation=45, size=12)\n",
" \n",
" # plt.title(f\"Order book at {ts.strftime('%Y-%m-%d %H:%M:%S')}\")\n",
" plt.xlabel(\"Price\")\n",
" plt.ylabel(\"Volume\")\n",
" plt.tight_layout()\n",
" plt.show()\n",
" \n",
" # Save the figure\n",
" if save_fig:\n",
" ts_str = ts.strftime(\"%Y_%m_%d_%H_%M_%S\")\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_lob_{ts_str}.pdf\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Orderbook heatmap"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"col_prices = [f\"bid_{i}_price\" for i in range(20)] + [f\"ask_{i}_price\" for i in range(20)]\n",
"col_volumes = [f\"bid_{i}_size\" for i in range(20)] + [f\"ask_{i}_size\" for i in range(20)]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"row = df.iloc[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Plot the limit order book heatmap\n",
"plt.figure(figsize=(10, 4))\n",
"row = df.iloc[0]\n",
"ts = row[\"received_time\"]\n",
"prices = row[col_prices].values\n",
"volumes = row[col_volumes].values\n",
"\n",
"plt.scatter(ts, prices, c=\"black\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import pandas as pd\n",
"\n",
"# Sample data\n",
"data = {\n",
" 'Timestamp': pd.to_datetime(['2023-01-01 10:00:00', '2023-01-01 10:01:00', '2023-01-01 10:02:00']),\n",
" 'Price': [100, 101, 99],\n",
" 'Volume': [50, 30, 20],\n",
"}\n",
"\n",
"df = pd.DataFrame(data)\n",
"\n",
"# Create a scatter plot\n",
"plt.figure(figsize=(10, 6))\n",
"\n",
"# Plot each data point with a color representing volume\n",
"for i in range(len(df)):\n",
" plt.scatter(df['Timestamp'][i], df['Price'][i], s=df['Volume'][i], c=np.random.rand(3,))\n",
"\n",
"# Set axis labels and title\n",
"plt.xlabel('Timestamp')\n",
"plt.ylabel('Price')\n",
"plt.title('Limit Order Book')\n",
"\n",
"# Show the plot\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"# Sample data\n",
"timestamps = ['10:00', '10:01', '10:02', '10:03']\n",
"bid_prices = [[100, 99, 98, 97, 96, 95, 94, 93, 92, 91],\n",
" [101, 100, 99, 98, 97, 96, 95, 94, 93, 92],\n",
" [102, 101, 100, 99, 98, 97, 96, 95, 94, 93],\n",
" [103, 102, 101, 100, 99, 98, 97, 96, 95, 94]]\n",
"ask_prices = [[105, 106, 107, 108, 109, 110, 111, 112, 113, 114],\n",
" [104, 105, 106, 107, 108, 109, 110, 111, 112, 113],\n",
" [103, 104, 105, 106, 107, 108, 109, 110, 111, 112],\n",
" [102, 103, 104, 105, 106, 107, 108, 109, 110, 111]]\n",
"bid_volumes = [[10, 15, 8, 5, 12, 7, 10, 6, 8, 14],\n",
" [8, 10, 12, 15, 7, 9, 11, 13, 6, 10],\n",
" [14, 7, 10, 12, 8, 15, 9, 11, 13, 6],\n",
" [9, 12, 8, 14, 10, 11, 7, 13, 6, 15]]\n",
"ask_volumes = [[5, 10, 7, 12, 9, 14, 8, 11, 6, 13],\n",
" [12, 8, 15, 7, 11, 10, 9, 13, 6, 14],\n",
" [10, 13, 6, 11, 14, 8, 9, 7, 12, 15],\n",
" [11, 7, 13, 10, 9, 12, 8, 14, 6, 15]]\n",
"\n",
"# Plotting\n",
"fig, ax = plt.subplots(figsize=(10, 6))\n",
"\n",
"for i in range(len(timestamps)):\n",
" # Plotting bid prices and volumes\n",
" ax.scatter([i]*len(bid_prices[i]), bid_prices[i], s=bid_volumes[i], c='b', label='Bid', alpha=0.5)\n",
"\n",
" # Plotting ask prices and volumes\n",
" ax.scatter([i]*len(ask_prices[i]), ask_prices[i], s=ask_volumes[i], c='r', label='Ask', alpha=0.5)\n",
"\n",
"ax.set_xticks(range(len(timestamps)))\n",
"ax.set_xticklabels(timestamps)\n",
"ax.set_xlabel('Timestamp')\n",
"ax.set_ylabel('Price')\n",
"ax.legend()\n",
"plt.title('Limit Order Book Visualization')\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"# Sample data\n",
"timestamps = ['10:00', '10:01', '10:02', '10:03']\n",
"bid_prices = [[100, 99, 98, 97, 96, 95, 94, 93, 92, 91],\n",
" [101, 100, 99, 98, 97, 96, 95, 94, 93, 92],\n",
" [102, 101, 100, 99, 98, 97, 96, 95, 94, 93],\n",
" [103, 102, 101, 100, 99, 98, 97, 96, 95, 94]]\n",
"ask_prices = [[105, 106, 107, 108, 109, 110, 111, 112, 113, 114],\n",
" [104, 105, 106, 107, 108, 109, 110, 111, 112, 113],\n",
" [103, 104, 105, 106, 107, 108, 109, 110, 111, 112],\n",
" [102, 103, 104, 105, 106, 107, 108, 109, 110, 111]]\n",
"bid_volumes = [[10, 15, 8, 5, 12, 7, 10, 6, 8, 14],\n",
" [8, 10, 12, 15, 7, 9, 11, 13, 6, 10],\n",
" [14, 7, 10, 12, 8, 15, 9, 11, 13, 6],\n",
" [9, 12, 8, 14, 10, 11, 7, 13, 6, 15]]\n",
"ask_volumes = [[5, 10, 7, 12, 9, 14, 8, 11, 6, 13],\n",
" [12, 8, 15, 7, 11, 10, 9, 13, 6, 14],\n",
" [10, 13, 6, 11, 14, 8, 9, 7, 12, 15],\n",
" [11, 7, 13, 10, 9, 12, 8, 14, 6, 15]]\n",
"\n",
"# Plotting\n",
"fig, ax = plt.subplots(figsize=(10, 6))\n",
"\n",
"for i in range(len(timestamps)):\n",
" # Set color based on volume using the viridis colormap\n",
" bid_colors = plt.cm.viridis(np.array(bid_volumes[i]) / max(bid_volumes[i]))\n",
" ask_colors = plt.cm.viridis(np.array(ask_volumes[i]) / max(ask_volumes[i]))\n",
"\n",
" # Plotting bid prices and volumes with color\n",
" ax.scatter([i]*len(bid_prices[i]), bid_prices[i], c=bid_colors, label='Bid', alpha=0.8)\n",
"\n",
" # Plotting ask prices and volumes with color\n",
" ax.scatter([i]*len(ask_prices[i]), ask_prices[i], c=ask_colors, label='Ask', alpha=0.8)\n",
"\n",
"ax.set_xticks(range(len(timestamps)))\n",
"ax.set_xticklabels(timestamps)\n",
"ax.set_xlabel('Timestamp')\n",
"ax.set_ylabel('Price')\n",
"ax.legend()\n",
"plt.title('Limit Order Book Visualization with Volume-based Color')\n",
"plt.show()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"# Sample data\n",
"timestamps = ['10:00', '10:01', '10:02', '10:03']\n",
"bid_prices = [[100, 99, 98, 97, 96, 95, 94, 93, 92, 91],\n",
" [101, 100, 99, 98, 97, 96, 95, 94, 93, 92],\n",
" [102, 101, 100, 99, 98, 97, 96, 95, 94, 93],\n",
" [103, 102, 101, 100, 99, 98, 97, 96, 95, 94]]\n",
"ask_prices = [[105, 106, 107, 108, 109, 110, 111, 112, 113, 114],\n",
" [104, 105, 106, 107, 108, 109, 110, 111, 112, 113],\n",
" [103, 104, 105, 106, 107, 108, 109, 110, 111, 112],\n",
" [102, 103, 104, 105, 106, 107, 108, 109, 110, 111]]\n",
"bid_volumes = [[10, 15, 8, 5, 12, 7, 10, 6, 8, 14],\n",
" [8, 10, 12, 15, 7, 9, 11, 13, 6, 10],\n",
" [14, 7, 10, 12, 8, 15, 9, 11, 13, 6],\n",
" [9, 12, 8, 14, 10, 11, 7, 13, 6, 15]]\n",
"ask_volumes = [[5, 10, 7, 12, 9, 14, 8, 11, 6, 13],\n",
" [12, 8, 15, 7, 11, 10, 9, 13, 6, 14],\n",
" [10, 13, 6, 11, 14, 8, 9, 7, 12, 15],\n",
" [11, 7, 13, 10, 9, 12, 8, 14, 6, 15]]\n",
"\n",
"# Plotting\n",
"fig, ax = plt.subplots(figsize=(10, 6))\n",
"\n",
"timestamps = df[\"received_time\"][:10]\n",
"max_volume = 0\n",
"for i in range(1):\n",
" max_volume = max(\n",
" df[f\"bid_{i}_size\"].max(), df[f\"ask_{i}_size\"].max(), max_volume\n",
" )\n",
" \n",
"for i in range(len(timestamps)):\n",
" row = df.iloc[i]\n",
" bid_prices = list(row[col_prices].values)\n",
" bid_volumes = list(row[col_volumes].values)\n",
" ask_prices = list(row[col_prices].values)\n",
" ask_volumes = list(row[col_volumes].values)\n",
" \n",
" # Set color based on volume using the viridis colormap\n",
" bid_colors = plt.cm.viridis(np.array(bid_volumes) / max_volume)\n",
" ask_colors = plt.cm.viridis(np.array(ask_volumes) / max_volume)\n",
"\n",
" # Plotting bid prices and volumes with color\n",
" ax.scatter([i]*len(bid_prices), bid_prices, c=bid_colors, label='Bid', alpha=0.8)\n",
"\n",
" # Plotting ask prices and volumes with color\n",
" ax.scatter([i]*len(ask_prices), ask_prices, c=ask_colors, label='Ask', alpha=0.8)\n",
"\n",
"ax.set_xticks(range(len(timestamps)))\n",
"# ax.set_xticklabels(timestamps)\n",
"ax.set_xlabel('Timestamp')\n",
"ax.set_ylabel('Price')\n",
"# ax.legend()\n",
"plt.title('Limit Order Book Visualization with Volume-based Color')\n",
"plt.show()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"bid_colors"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Generate random sequence number for each snapshot"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# BTC\n",
"# exchange = \"BINANCE\"\n",
"# symbol = \"BTC-USDT\"\n",
"\n",
"# SOL\n",
"# exchange = \"BINANCE\"\n",
"exchange = \"OKX\"\n",
"# exchange = \"GATEIO\"\n",
"# exchange = \"BIT.COM\"\n",
"symbol = \"SOL-USDT\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"start_date = datetime.datetime(2023, 9, 1)\n",
"end_date = datetime.datetime(2023, 9, 13)\n",
"path = os.path.join(os.getcwd(), \"datasets\")\n",
"second = False"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Get the list of dates\n",
"dates = get_list_of_dates_between(start_date, end_date)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Load the data\n",
"# prefix = \"order_book\"\n",
"# for date in dates:\n",
"# file_name = f\"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet\"\n",
"# df = pd.read_parquet(os.path.join(path, file_name))\n",
" \n",
"# # Generate random sequence numbers\n",
"# df[\"sequence_number\"] = np.random.randint(10000000, 100000000, df.shape[0])\n",
"# df.to_parquet(os.path.join(path, file_name))\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load a single day of data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"date = datetime.datetime(2023, 9, 1)\n",
"path = os.path.join(os.getcwd(), \"datasets\")\n",
"second = False"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the data\n",
"prefix = \"order_book_second\" if second else \"order_book\"\n",
"file_name = f\"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet\"\n",
"df = pl.read_parquet(os.path.join(path, file_name))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check df length\n",
"print(f\"Number of rows: {len(df)}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# df.head(10)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "data-gI9vukfY-py3.10",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 33,814 | Python | .py | 1,171 | 24.507259 | 135 | 0.506908 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,076 | downloaders.py | JurajZelman_airl-market-making/data_processing/downloaders.py | """Methods for dowloading and processing the data."""
import datetime
import os
import lakeapi
import pandas as pd
from data.utils import (
get_list_of_second_timestamps,
get_parquet_args,
get_rnd_id,
)
def download_lob_data(
date: datetime.datetime,
symbol: str,
exchange: str,
path: str,
second: bool = True,
raw: bool = False,
) -> None:
"""
Download limit order book snapshots for a given date.
Args:
date: The date to download the data for. Data are downloaded for the
whole day.
symbol: The symbol to download the data for.
exchange: The exchange to download the data for.
path: The path to save the data to.
second (optional): Whether to download second data or tick data.
Defaults to `True`.
raw (optional): Whether to download the raw unprocessed data. Defaults
to `False`.
"""
# Get the parquet arguments
parquet_args = get_parquet_args()
# Download the data
book_data = lakeapi.load_data(
table="book",
start=date,
end=date + datetime.timedelta(days=1),
symbols=[symbol],
exchanges=[exchange],
).sort_values(by="received_time")
book_data.set_index("received_time", inplace=True)
if raw:
# Save the data
if not os.path.exists(path):
os.makedirs(path)
prefix = "order_book"
file_name = (
f"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet"
)
book_data.to_parquet(os.path.join(path, file_name), **parquet_args)
return
# Process the dataset to second data
if second:
book_data = book_data.resample("1S").first().ffill()
book_data.drop(columns=["origin_time"], inplace=True)
# Filter data
book_data = book_data[book_data.index >= date]
book_data = book_data[book_data.index < date + datetime.timedelta(days=1)]
book_data.sort_index(inplace=True)
# # Sanity checks
# for i in range(20):
# # Check not none
# assert (book_data[f"bid_{i}_price"].notnull()).all()
# assert (book_data[f"bid_{i}_size"].notnull()).all()
# assert (book_data[f"ask_{i}_price"].notnull()).all()
# assert (book_data[f"ask_{i}_size"].notnull()).all()
# # Check positive / non-negative
# assert (book_data[f"bid_{i}_price"] >= 0).all()
# assert (book_data[f"bid_{i}_size"] > 0).all()
# assert (book_data[f"ask_{i}_price"] >= 0).all()
# assert (book_data[f"ask_{i}_size"] > 0).all()
# # Check indices are unique, sorted and in the correct range
# assert len(book_data.index.unique()) == len(book_data.index)
# assert (book_data.index == book_data.index.sort_values()).all()
# if second:
# seconds = get_list_of_second_timestamps(date)
# assert set(book_data.index) == set(seconds)
# Save the data
if not os.path.exists(path):
os.makedirs(path)
prefix = "order_book_second" if second else "order_book"
file_name = (
f"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet"
)
book_data.to_parquet(os.path.join(path, file_name), **parquet_args)
def download_trade_data(
date: datetime.datetime,
symbol: str,
exchange: str,
path: str,
tick_size: float = 0.01,
raw: bool = False,
) -> None:
"""
Download trade data for a given date. The data are downloaded for the whole
day, the 'fake' trades are detected and removed, and the data are aggregated
to second data.
Args:
date: The date to download the data for.
symbol: The symbol to download the data for.
exchange: The exchange to download the data for.
path: The path to save the data to.
tick_size (optional): The tick size of the symbol. Defaults to 0.01.
raw (optional): Whether to download the raw unprocessed data. Defaults
to `False`.
"""
# Get the parquet arguments
parquet_args = get_parquet_args()
# Load the trades data
trades = lakeapi.load_data(
table="trades",
start=date,
end=date + datetime.timedelta(days=1),
symbols=[symbol],
exchanges=[exchange],
).sort_values(by="received_time")
if raw:
# Save the data
if not os.path.exists(path):
os.makedirs(path)
prefix = "trades"
file_name = (
f"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet"
)
trades.to_parquet(os.path.join(path, file_name), **parquet_args)
return
# Load the top level order book data
book_data = lakeapi.load_data(
table="book",
start=date,
end=date + datetime.timedelta(days=1),
symbols=[symbol],
exchanges=[exchange],
).sort_values(by="received_time")
cols = ["received_time", "symbol", "exchange", "bid_0_price", "ask_0_price"]
book_data = book_data[cols]
# Merge trades and book data
book_data["future_bid"] = book_data.bid_0_price.shift(-1)
book_data["future_ask"] = book_data.ask_0_price.shift(-1)
df = pd.merge_asof(
left=trades.rename(columns={"received_time": "trade_received_time"}),
right=book_data.rename(
columns={"received_time": "depth_received_time"}
),
left_on="trade_received_time",
right_on="depth_received_time",
tolerance=pd.Timedelta(minutes=60),
)
df = df.dropna().reset_index(drop=True)
# Detection of fake trades
epsilon = 3 * tick_size
df["fake"] = (
# We consider trade to be fake when it is inside the spread (+- epsilon)
(df["price"] > df["bid_0_price"] + epsilon)
& (df["price"] < df["ask_0_price"] - epsilon)
&
# To prevent false positives, we also test for the future spread
(df["price"] > df["future_bid"] + epsilon)
& (df["price"] < df["future_ask"] - epsilon)
)
# Drop the fake trades
df = df[df.fake == False].reset_index(drop=True) # noqa: E712
trades = df.drop(
columns=[
"fake",
"future_bid",
"future_ask",
"bid_0_price",
"ask_0_price",
"depth_received_time",
]
)
trades = trades.rename(columns={"trade_received_time": "received_time"})
trades = trades.set_index("received_time")
# Aggregate the data to second data
buy_trades = trades[trades["side"] == "buy"]
sell_trades = trades[trades["side"] == "sell"]
buy_trade_data_second = buy_trades.resample("S").agg(
{"price": "ohlc", "quantity": "sum"}
)
sell_trade_data_second = sell_trades.resample("S").agg(
{"price": "ohlc", "quantity": "sum"}
)
buy_trade_data_second["side"] = "buy"
sell_trade_data_second["side"] = "sell"
trades_second = pd.concat(
[buy_trade_data_second, sell_trade_data_second], axis=0
)
trades_second.sort_index(inplace=True)
trades_second.columns = [
"_".join(col).strip() for col in trades_second.columns.values
]
trades_second.rename(
columns={"side_": "side", "quantity_quantity": "quantity"}, inplace=True
)
# Generate random ids for the trades
trades_second["id"] = trades_second.apply(
lambda x: str(get_rnd_id()), axis=1
)
# Impute missing seconds
buy_trades = trades_second[trades_second["side"] == "buy"]
sell_trades = trades_second[trades_second["side"] == "sell"]
seconds = get_list_of_second_timestamps(date)
# Missing second timestamps
missing_df_buy = get_missing_trade_dataframe(buy_trades, "buy", seconds)
missing_df_sell = get_missing_trade_dataframe(sell_trades, "sell", seconds)
trades_second = pd.concat(
[trades_second, missing_df_buy, missing_df_sell], axis=0
)
trades_second.sort_index(inplace=True)
# Fill the null prices (just technicality, since volume is zero)
trades_second.fillna(method="ffill", inplace=True)
trades_second.fillna(method="bfill", inplace=True)
# Sanity checks
buy_trades = trades_second[trades_second["side"] == "buy"]
sell_trades = trades_second[trades_second["side"] == "sell"]
assert (trades_second["price_open"].notnull()).all()
assert (trades_second["price_high"].notnull()).all()
assert (trades_second["price_low"].notnull()).all()
assert (trades_second["price_close"].notnull()).all()
assert (trades_second["quantity"].notnull()).all()
assert (trades_second["price_open"] >= 0).all()
assert (trades_second["price_high"] >= 0).all()
assert (trades_second["price_low"] >= 0).all()
assert (trades_second["price_close"] >= 0).all()
assert (trades_second["quantity"] >= 0).all()
assert set(buy_trades.index) == set(seconds)
assert set(sell_trades.index) == set(seconds)
assert len(buy_trades.index.unique()) == len(buy_trades.index)
assert len(sell_trades.index.unique()) == len(sell_trades.index)
# Save the data
if not os.path.exists(path):
os.makedirs(path)
file_name = (
f"{exchange}_{symbol}_trades_second_{date.strftime('%Y_%m_%d')}.parquet"
)
trades_second.to_parquet(os.path.join(path, file_name), **parquet_args)
def get_missing_trade_dataframe(
data: pd.DataFrame, side: str, seconds: list
) -> pd.DataFrame:
"""
Get a dataframe with missing seconds for a given side.
Args:
data: The dataframe with the data.
side: The side to get the missing data for.
seconds: The list of seconds to check for.
"""
missing_seconds = set(seconds) - set(data.index.to_list())
rnd_ids = [str(get_rnd_id()) for _ in range(len(missing_seconds))]
empty_df = pd.DataFrame(
{
"price_open": [None] * len(missing_seconds),
"price_high": [None] * len(missing_seconds),
"price_low": [None] * len(missing_seconds),
"price_close": [None] * len(missing_seconds),
"quantity": [0] * len(missing_seconds),
"side": [side] * len(missing_seconds),
"id": rnd_ids,
"received_time": list(missing_seconds),
}
)
empty_df = empty_df.set_index("received_time")
return empty_df
| 10,317 | Python | .py | 264 | 32.333333 | 80 | 0.615968 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,077 | trade_data_pipeline.ipynb | JurajZelman_airl-market-making/data_processing/trade_data_pipeline.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Trade data pipeline\n",
"\n",
"Full pipeline for downloading and processing trade data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import datetime\n",
"import lakeapi\n",
"import polars as pl\n",
"\n",
"from tqdm import tqdm\n",
"\n",
"from data.downloaders import download_trade_data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Data download\n",
"\n",
"This section contains the code for downloading trade data between two dates."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the download\n",
"# symbol = \"BTC-USDT\"\n",
"# exchange = \"BINANCE\"\n",
"symbol = \"SOL-USDT\"\n",
"# exchange = \"BIT.COM\"\n",
"# exchange = \"BINANCE\"\n",
"# exchange = \"GATEIO\"\n",
"exchange = \"OKX\"\n",
"path = os.path.join(os.getcwd(), \"datasets\")\n",
"second = False # Process to second data \n",
"raw = True\n",
"\n",
"# Set start and end dates\n",
"start_date = datetime.datetime(2023, 9, 1)\n",
"end_date = datetime.datetime(2023, 9, 7)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Generate a list of dates between start and end dates\n",
"dates = [start_date + datetime.timedelta(days=x) for x in range((end_date - start_date).days + 1)]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Download the data\n",
"for date in tqdm(dates):\n",
" download_trade_data(date, symbol, exchange, path, second, raw)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "data-gI9vukfY-py3.10",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 2,583 | Python | .py | 117 | 18.094017 | 104 | 0.548256 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,078 | trade_data_analysis.ipynb | JurajZelman_airl-market-making/data_processing/trade_data_analysis.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exploratory analysis of trade data\n",
"\n",
"In this notebook I explore the datasets and plot some of the data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import datetime\n",
"import polars as pl\n",
"import matplotlib.pyplot as plt\n",
"import pandas as pd\n",
"\n",
"from data.utils import (\n",
" get_list_of_second_timestamps,\n",
" get_rnd_id,\n",
" set_plot_style,\n",
" ensure_dir_exists\n",
")\n",
"\n",
"set_plot_style()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Indicate whether to save figures\n",
"save_fig = False\n",
"\n",
"# Set path for figures saving\n",
"FIGURES_PATH = \"/home/juraj/Projects/thesis-market-making/thesis/images\"\n",
"ensure_dir_exists(FIGURES_PATH)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the exchange and symbol\n",
"# exchange = \"BINANCE\"\n",
"# exchange = \"OKX\"\n",
"# exchange = \"GATEIO\"\n",
"exchange = \"BIT.COM\"\n",
"\n",
"symbol = \"SOL-USDT\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load multiple dataframes"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"start_date = datetime.datetime(2023, 9, 1)\n",
"end_date = datetime.datetime(2023, 9, 13)\n",
"path = os.path.join(os.getcwd(), \"datasets\")\n",
"second = False"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Generate a list of dates\n",
"dates = [start_date + datetime.timedelta(days=x) for x in range((end_date - start_date).days + 1)]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the data\n",
"prefix = \"trades\"\n",
"for date in dates:\n",
" file_name = f\"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet\"\n",
" # df_single = pl.read_parquet(os.path.join(path, file_name))\n",
" df_single = pd.read_parquet(os.path.join(path, file_name))\n",
" if date == start_date:\n",
" df = df_single\n",
" else:\n",
" df = pd.concat([df, df_single])\n",
" \n",
"df.set_index(\"received_time\", inplace=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Process the data for each day\n",
"avg_buy_volume = 0\n",
"avg_sell_volume = 0\n",
"avg_buy_orders = 0\n",
"avg_sell_orders = 0\n",
"\n",
"prefix = \"trades\"\n",
"for date in dates:\n",
" file_name = f\"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet\"\n",
" # df_single = pl.read_parquet(os.path.join(path, file_name))\n",
" df_single = pd.read_parquet(os.path.join(path, file_name))\n",
" print(f\"Statistics for date: {date.strftime('%Y-%m-%d')}\")\n",
" \n",
" # Compute the number of buy and sell orders\n",
" buy_orders = df_single[df_single[\"side\"] == \"buy\"]\n",
" sell_orders = df_single[df_single[\"side\"] == \"sell\"]\n",
" avg_buy_orders += buy_orders.shape[0]\n",
" avg_sell_orders += sell_orders.shape[0]\n",
" print(f\"Number of buy orders: {buy_orders.shape[0]}\")\n",
" print(f\"Number of sell orders: {sell_orders.shape[0]}\")\n",
" \n",
" # Compute the total volume of buy and sell orders\n",
" buy_volume = buy_orders[\"quantity\"].sum()\n",
" sell_volume = sell_orders[\"quantity\"].sum()\n",
" avg_buy_volume += buy_volume\n",
" avg_sell_volume += sell_volume\n",
" print(f\"Total buy volume: {round(buy_volume, 2)}\")\n",
" print(f\"Total sell volume: {round(sell_volume, 2)}\")\n",
" \n",
" # Compute the total volume\n",
" total_volume = df_single[\"quantity\"].sum()\n",
" print()\n",
"\n",
"# Compute the average number of buy and sell orders\n",
"avg_buy_orders /= len(dates)\n",
"avg_sell_orders /= len(dates)\n",
"print(f\"Average number of buy orders: {round(avg_buy_orders, 2)}\")\n",
"print(f\"Average number of sell orders: {round(avg_sell_orders, 2)}\")\n",
"\n",
"# Compute the average buy and sell volume\n",
"avg_buy_volume /= len(dates)\n",
"avg_sell_volume /= len(dates)\n",
"print(f\"Average buy volume: {round(avg_buy_volume, 2)}\")\n",
"print(f\"Average sell volume: {round(avg_sell_volume, 2)}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df.head(10)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Separate \n",
"buy_orders = df[df[\"side\"] == \"buy\"]\n",
"sell_orders = df[df[\"side\"] == \"sell\"]\n",
"# buy_orders.set_index(\"received_time\")\n",
"# sell_orders.set_index(\"received_time\")\n",
"\n",
"# Check the number of buy and sell orders\n",
"print(f\"Number of buy orders: {buy_orders.shape[0]}\")\n",
"print(f\"Number of sell orders: {sell_orders.shape[0]}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Visualize buy and sell volumes"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Plot hours only instead of full timestamps\n",
"from matplotlib.dates import DateFormatter\n",
"date_format = DateFormatter(\"%H:%M\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Define custom colors\n",
"color_green = \"#13961a\"\n",
"color_red = \"#eb5c14\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize the buy volumes\n",
"fig = plt.figure(figsize=(12, 4))\n",
"plt.plot(buy_orders['quantity'], color=color_green)\n",
"# plt.gca().xaxis.set_major_formatter(date_format)\n",
"plt.xlabel('Time')\n",
"plt.ylabel('Volume (SOL)')\n",
"\n",
"plt.tight_layout()\n",
"# plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_buy_volume.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Compute the changes in incoming volume\n",
"# buy_volume_diff = buy_orders[\"quantity\"].diff()\n",
"\n",
"# # Plot the changes in incoming buy volume\n",
"# plt.figure(figsize=(10, 4))\n",
"# plt.plot(buy_volume_diff, color=color_green)\n",
"# plt.xlabel('Time')\n",
"# plt.ylabel('Volume change')\n",
"# plt.tight_layout()\n",
"# plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Merge the above two plots into one figure with two subplots\n",
"# fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 8))\n",
"# ax1.plot(buy_orders['quantity'], color=color_green)\n",
"# ax1.set_ylabel('Volume')\n",
"# ax2.plot(buy_volume_diff, color=color_green)\n",
"# ax2.set_ylabel('Volume change')\n",
"# plt.xlabel('Time')\n",
"# plt.tight_layout()\n",
"\n",
"# # Change tick label size\n",
"# # ax1.tick_params(axis='x', labelsize=18)\n",
"# # ax1.tick_params(axis='y', labelsize=18)\n",
"# plt.show()\n",
"\n",
"# # Save the figure\n",
"# # if save_fig:\n",
"# # fig.savefig(f\"{FIGURES_PATH}/buy_volume.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# buy_volume_diff.describe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize the sell volumes\n",
"fig = plt.figure(figsize=(12, 4))\n",
"plt.plot(sell_orders['quantity'], color=color_red)\n",
"# plt.gca().xaxis.set_major_formatter(date_format)\n",
"plt.xlabel('Time')\n",
"plt.ylabel('Volume (SOL)')\n",
"plt.tight_layout()\n",
"# plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_sell_volume.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Compute the changes in incoming volume\n",
"# sell_volume_diff = sell_orders[\"quantity\"].diff()\n",
"\n",
"# # Plot the changes in incoming buy volume\n",
"# plt.figure(figsize=(10, 4))\n",
"# plt.plot(sell_volume_diff, color=color_red)\n",
"# plt.xlabel('Time')\n",
"# plt.ylabel('Volume change')\n",
"# plt.tight_layout()\n",
"# plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Merge the above two plots into one figure with two subplots\n",
"# fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 8))\n",
"# ax1.plot(sell_orders['quantity'], color=color_red)\n",
"# ax1.set_ylabel('Volume')\n",
"# ax2.plot(sell_volume_diff, color=color_red)\n",
"# ax2.set_ylabel('Volume change')\n",
"# plt.xlabel('Time')\n",
"# plt.tight_layout()\n",
"# plt.show()\n",
"\n",
"# # Save the figure\n",
"# if save_fig:\n",
"# fig.savefig(f\"{FIGURES_PATH}/sell_volume.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# sell_volume_diff.describe()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Volume histograms"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# buy_orders_describe = buy_orders.filter()\n",
"# buy_orders.describe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize the buy volumes (excluding outliers at 0.99 quantile)\n",
"# buy_filtered = buy_orders.filter(pl.col('quantity') < buy_orders['quantity'].quantile(0.99))\n",
"\n",
"fig = plt.figure(figsize=(12, 4))\n",
"# plt.figure(figsize=(12, 4))\n",
"plt.hist(buy_orders['quantity'], bins=100, color=color_green, edgecolor='black', linewidth=1.1, log=True)\n",
"# plt.hist(buy_orders[buy_orders[\"quantity\"] > 0][\"quantity\"], bins=100, color=color_green, edgecolor='black', linewidth=1.1, log=True)\n",
"plt.xlabel('Volume (SOL)')\n",
"plt.ylabel('Count')\n",
"plt.tight_layout()\n",
"plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_buy_volume_hist.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualize the sell volumes (excluding outliers at 0.99 quantile)\n",
"# sell_filtered = sell_orders.filter(pl.col('quantity') < sell_orders['quantity'].quantile(0.99))\n",
"\n",
"fig = plt.figure(figsize=(12, 4))\n",
"plt.hist(sell_orders['quantity'], bins=100, color=color_red, edgecolor='black', linewidth=1.1, log=True)\n",
"plt.xlabel('Volume (SOL)')\n",
"plt.ylabel('Count')\n",
"plt.tight_layout()\n",
"plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_sell_volume_hist.pdf\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Describe the buy orders statistics\n",
"buy_orders.describe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Describe the sell orders statistics\n",
"sell_orders.describe()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Trade flow imbalance"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Resample the data to 1 minute intervals\n",
"buy_orders_1min = buy_orders[\"quantity\"].resample(\"1min\").sum()\n",
"sell_order_1min = sell_orders[\"quantity\"].resample(\"1min\").sum()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Compute the order flow imbalance\n",
"eps = 1e-8\n",
"denominator = buy_orders_1min + sell_order_1min\n",
"denominator = denominator.replace(0, eps)\n",
"imbalance = (buy_orders_1min - sell_order_1min) / denominator"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Describe the order flow imbalance statistics\n",
"imbalance.describe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Visualize the order flow imbalance\n",
"start_index = 720\n",
"end_index = 1080\n",
"\n",
"fig = plt.figure(figsize=(12, 4))\n",
"plt.plot(imbalance[start_index:end_index], color='black')\n",
"plt.gca().xaxis.set_major_formatter(date_format)\n",
"plt.xlabel('Time (hours)')\n",
"plt.ylabel('Order flow imbalance')\n",
"plt.tight_layout()\n",
"plt.show()\n",
"\n",
"# Save the figure\n",
"if save_fig:\n",
" fig.savefig(f\"{FIGURES_PATH}/{exchange}_{symbol}_trade_flow_imbalance.pdf\")\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Volume differences analysis"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load single dataframe"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"date = datetime.datetime(2023, 9, 1)\n",
"path = os.path.join(os.getcwd(), \"datasets\")\n",
"second = True"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"seconds = get_list_of_second_timestamps(date)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the data\n",
"prefix = \"trades\"\n",
"file_name = f\"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet\"\n",
"df = pl.read_parquet(os.path.join(path, file_name))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df.head(10)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# select all rows where the 'column_name' column has the value 'value'\n",
"buy_orders = df.filter(pl.col('side') == 'buy')\n",
"sell_orders = df.filter(pl.col('side') == 'sell')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check the number of buy and sell orders\n",
"print(f\"Number of buy orders: {buy_orders.shape[0]}\")\n",
"print(f\"Number of sell orders: {sell_orders.shape[0]}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Check that there is no timestamp duplication\n",
"assert len(buy_orders['received_time'].unique()) == len(buy_orders[\"received_time\"])\n",
"assert len(sell_orders['received_time'].unique()) == len(sell_orders[\"received_time\"])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "data-gI9vukfY-py3.10",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 16,801 | Python | .py | 624 | 22.661859 | 148 | 0.549855 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,079 | utils.py | JurajZelman_airl-market-making/data_processing/utils.py | """Helper functions for the data analysis."""
import datetime
import os
import random
import matplotlib.pyplot as plt
def set_plot_style() -> None:
"""Set the plotting style."""
plt.style.use("seaborn-v0_8")
plt.rcParams.update(
{"axes.prop_cycle": plt.cycler("color", plt.cm.tab10.colors)}
)
# Change to computer modern font and increase font size
plt.rcParams.update({"font.family": "cmr10", "font.size": 12})
plt.rcParams.update({"axes.formatter.use_mathtext": True})
SMALL_SIZE = 16
MEDIUM_SIZE = 18
BIGGER_SIZE = 20
plt.rc("font", size=SMALL_SIZE) # controls default text sizes
plt.rc("axes", titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc("axes", labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc("xtick", labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc("ytick", labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc("legend", fontsize=SMALL_SIZE) # legend fontsize
plt.rc("figure", titlesize=BIGGER_SIZE) # fontsize of the figure title
def get_parquet_args():
"""
Returns the parquet arguments for saving the data and avoiding the timestamp
conversion issues.
"""
return {
"coerce_timestamps": "us", # Coerce timestamps to microseconds
"allow_truncated_timestamps": True, # Allow truncated timestamps
}
def get_rnd_id(length: int = 6) -> int:
"""Get a random int of given length."""
return random.randint(10 ** (length - 1), 10**length - 1)
def get_list_of_second_timestamps(date: datetime.datetime) -> list:
"""Generate a list of second timestamps for a given date."""
seconds = [date + datetime.timedelta(seconds=x) for x in range(86400)]
return seconds
def get_list_of_dates_between(
start_date: datetime.datetime, end_date: datetime.datetime
) -> list:
"""Generate a list of dates between two dates."""
days = [
start_date + datetime.timedelta(days=x)
for x in range((end_date - start_date).days + 1)
]
return days
def ensure_dir_exists(path: str) -> None:
"""Ensure that the directory exists."""
if not os.path.exists(path):
os.makedirs(path)
| 2,209 | Python | .py | 53 | 36.811321 | 80 | 0.681159 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,080 | volume_analysis.ipynb | JurajZelman_airl-market-making/data_processing/volume_analysis.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LOB volumes analysis\n",
"\n",
"The goal of this notebook is to preprocess sample distributions for each level of the order book from which one can sample random volumes that can be used in the simulation, e.g. for simulating reactions to agent's limit orders."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import datetime\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import pandas as pd\n",
"import polars as pl\n",
"\n",
"from data.utils import get_list_of_dates_between, set_plot_style, ensure_dir_exists"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pl.enable_string_cache(True)\n",
"set_plot_style()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load the insample data\n",
"\n",
"Load the insample dataset from the daily parquet files."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# SOL-USDT\n",
"exchange = \"BIT.COM\"\n",
"symbol = \"SOL-USDT\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"start_date = datetime.datetime(2023, 9, 1)\n",
"end_date = datetime.datetime(2023, 9, 10) # Use the insample data\n",
"path = os.path.join(os.getcwd(), \"datasets\")\n",
"second = False"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Get the list of dates\n",
"dates = get_list_of_dates_between(start_date, end_date)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load the data\n",
"prefix = \"order_book\"\n",
"for date in dates:\n",
" file_name = f\"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet\"\n",
" df_single = pd.read_parquet(os.path.join(path, file_name))\n",
" if date == start_date:\n",
" df = df_single\n",
" else:\n",
" df = pd.concat([df, df_single])\n",
" \n",
"df.sort_index(inplace=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for i in range(3):\n",
" vols_level = list(df[f\"bid_{i}_size\"].values) + list(df[f\"ask_{i}_size\"].values)\n",
" \n",
" fig = plt.figure(figsize=(10, 5))\n",
" plt.hist(vols_level, bins=100, log=True)\n",
" plt.xlabel(\"Volume\")\n",
" plt.ylabel(\"Frequency\")\n",
" plt.title(f\"Volume distribution for level {i+1}\")\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vols_level_0 = list(df[f\"bid_0_size\"].values) + list(df[f\"ask_0_size\"].values)\n",
"vols_level_1 = list(df[f\"bid_1_size\"].values) + list(df[f\"ask_1_size\"].values)\n",
"vols_level_2 = list(df[f\"bid_2_size\"].values) + list(df[f\"ask_2_size\"].values)\n",
"\n",
"# # Make the assumption that there are on average 2 orders per level\n",
"# vols_level_0 = [vol/2 for vol in vols_level_0]\n",
"# vols_level_1 = [vol/2 for vol in vols_level_1]\n",
"# vols_level_2 = [vol/2 for vol in vols_level_2]\n",
"\n",
"# Save the data\n",
"ensure_dir_exists(os.path.join(os.getcwd(), \"distributions\"))\n",
"\n",
"# Save all three lists as pickle files\n",
"vols_level_0 = pd.Series(vols_level_0)\n",
"vols_level_1 = pd.Series(vols_level_1)\n",
"vols_level_2 = pd.Series(vols_level_2)\n",
"vols_level_0.to_pickle(\n",
" os.path.join(os.getcwd(), \"distributions\", \"volumes_level_0.pkl\")\n",
")\n",
"vols_level_1.to_pickle(\n",
" os.path.join(os.getcwd(), \"distributions\", \"volumes_level_1.pkl\")\n",
")\n",
"vols_level_2.to_pickle(\n",
" os.path.join(os.getcwd(), \"distributions\", \"volumes_level_2.pkl\")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class EmpiricalOrderVolumeDistribution():\n",
" \"\"\"\n",
" Class for sampling order volumes from the empirical distribution estimated\n",
" on the insample order book data.\n",
" \"\"\"\n",
" \n",
" def __init__(self) -> None:\n",
" \"\"\"\n",
" Initialize the class by loading the volume distributions from the pickle\n",
" files.\n",
" \"\"\"\n",
" self.vols_level_0 = pd.read_pickle(\n",
" os.path.join(os.getcwd(), \"distributions\", \"volumes_level_0.pkl\")\n",
" )\n",
" self.vols_level_1 = pd.read_pickle(\n",
" os.path.join(os.getcwd(), \"distributions\", \"volumes_level_1.pkl\")\n",
" )\n",
" self.vols_level_2 = pd.read_pickle(\n",
" os.path.join(os.getcwd(), \"distributions\", \"volumes_level_2.pkl\")\n",
" )\n",
" \n",
" def sample(self, level: int) -> float:\n",
" \"\"\"\n",
" Sample a volume from the empirical distribution.\n",
"\n",
" Args:\n",
" level: The level of the order book to sample from.\n",
"\n",
" Returns:\n",
" The sampled volume.\n",
" \"\"\"\n",
" if level == 0:\n",
" return self.vols_level_0.sample().values[0]\n",
" elif level == 1:\n",
" return self.vols_level_1.sample().values[0]\n",
" elif level == 2:\n",
" return self.vols_level_2.sample().values[0]\n",
" else:\n",
" raise ValueError(\"Level must be between 0 and 2.\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dist = EmpiricalOrderVolumeDistribution()\n",
"for i in range(100):\n",
" print(dist.sample(2))\n",
" "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "data-gI9vukfY-py3.10",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 6,896 | Python | .py | 240 | 24.445833 | 234 | 0.527344 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,081 | data_cleaning.ipynb | JurajZelman_airl-market-making/data_processing/data_cleaning.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data cleaning\n",
"\n",
"This notebook is used for the cleaning of few outliers in the SOL-USDT dataset from the BIT.COM exchange where the top ask price skyrocketed to something like 120 from ~20 USDT. This would be worth mentioning in the report but I rather exclude it from the analysis since I'm not sure whether you would be able to capture such spread anyway."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import datetime\n",
"import polars as pl\n",
"import matplotlib.pyplot as plt\n",
"import pandas as pd\n",
"\n",
"from data.utils import (\n",
" get_list_of_second_timestamps,\n",
" get_rnd_id,\n",
" set_plot_style,\n",
" ensure_dir_exists\n",
")\n",
"\n",
"set_plot_style()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Indicate whether to save figures\n",
"save_fig = False\n",
"\n",
"# Set path for figures saving\n",
"FIGURES_PATH = \"/home/juraj/Projects/thesis-market-making/thesis/images\"\n",
"ensure_dir_exists(FIGURES_PATH)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set the exchange and symbol\n",
"# exchange = \"BINANCE\"\n",
"# exchange = \"OKX\"\n",
"# exchange = \"GATEIO\"\n",
"exchange = \"BIT.COM\"\n",
"\n",
"symbol = \"SOL-USDT\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Set parameters\n",
"start_date = datetime.datetime(2023, 9, 1)\n",
"end_date = datetime.datetime(2023, 9, 13)\n",
"path = os.path.join(os.getcwd(), \"data_test\")\n",
"second = False"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Generate a list of dates\n",
"dates = [start_date + datetime.timedelta(days=x) for x in range((end_date - start_date).days + 1)]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Iterate over datafiles and remove outliers\n",
"prefix = \"order_book\"\n",
"for date in dates:\n",
" file_name = f\"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet\"\n",
" # df_single = pl.read_parquet(os.path.join(path, file_name))\n",
" df = pd.read_parquet(os.path.join(path, file_name))\n",
" \n",
" # Count number of ts with these outliers\n",
" print(f\"Date: \", date)\n",
" a = df[\"ask_0_price\"] > 30\n",
" b = df[\"ask_1_price\"] > 30\n",
" c = df[\"ask_2_price\"] > 30\n",
" print(f\"Number of rows to be filtered: {df[a | b | c].shape[0]} out of {df.shape[0]}\")\n",
" print(f\"Number of rows to be filtered: {df[a | b | c].shape[0] / df.shape[0] * 100:.2f}%\")\n",
" print()\n",
" # a_neg = df[\"ask_0_price\"] < 30\n",
" # b_neg = df[\"ask_1_price\"] < 30\n",
" # c_neg = df[\"ask_2_price\"] < 30\n",
" # df_filtered = df[a_neg & b_neg & c_neg]\n",
" # file_name_filtered = f\"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}_filtered.parquet\"\n",
" # df_filtered.to_parquet(os.path.join(path, file_name))\n",
" \n",
" # # Remove outliers\n",
" # new_df = df[df[\"ask_0_price\"] < 30]\n",
" # new_df = new_df[new_df[\"ask_1_price\"] < 30]\n",
" # new_df = new_df[new_df[\"ask_2_price\"] < 30]\n",
" # new_df = new_df[new_df[\"ask_3_price\"] < 30]\n",
" \n",
" # # Save the new dataframe\n",
" # new_file_name = f\"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}.parquet\"\n",
" # new_df.to_parquet(os.path.join(path, new_file_name))\n",
" # file_name_original = f\"{exchange}_{symbol}_{prefix}_{date.strftime('%Y_%m_%d')}_original.parquet\"\n",
" # df.to_parquet(os.path.join(path, file_name_original))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "data-gI9vukfY-py3.10",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 4,769 | Python | .py | 156 | 26.352564 | 346 | 0.542814 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,082 | order_book_data_pipeline.ipynb | JurajZelman_airl-market-making/data_processing/order_book_data_pipeline.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Limit Order Book data pipeline\n",
"\n",
"Full pipeline for downloading and processing the limit order book data."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import datetime\n",
"import lakeapi\n",
"import polars as pl\n",
"\n",
"from tqdm import tqdm\n",
"\n",
"from data.downloaders import download_lob_data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Data download\n",
"\n",
"This section contains the code for downloading lob data between two dates."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# Set the parameters for the download\n",
"# symbol = \"BTC-USDT\"\n",
"# exchange = \"BINANCE\"\n",
"symbol = \"SOL-USDT\"\n",
"exchange = \"BIT.COM\"\n",
"# exchange = \"BINANCE\"\n",
"# exchange = \"OKX\"\n",
"# exchange = \"GATEIO\"\n",
"\n",
"path = os.path.join(os.getcwd(), \"data_test\")\n",
"second = False # Process to second data \n",
"raw = True\n",
"\n",
"# Set start and end dates\n",
"start_date = datetime.datetime(2023, 9, 1)\n",
"end_date = datetime.datetime(2023, 9, 13)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Generate a list of dates between start and end dates\n",
"dates = [start_date + datetime.timedelta(days=x) for x in range((end_date - start_date).days + 1)]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Download the data\n",
"for date in tqdm(dates):\n",
" download_lob_data(date, symbol, exchange, path, second, raw)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "data-gI9vukfY-py3.10",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 2,493 | Python | .py | 111 | 18.45045 | 104 | 0.549958 | JurajZelman/airl-market-making | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,083 | errors.py | WillJRoper_h5forest/src/h5forest/errors.py | """A module containing functions for graceful error handling."""
def error_handler(func):
"""
Wrap a function in a try/except block to catch errors.
Errors are printed to the mini buffer.
Args:
func (function):
The function to wrap.
"""
def wrapper(*args, **kwargs):
"""Wrap the function."""
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
# Re-raise the KeyboardInterrupt to ensure it's not caught here
raise
except Exception as e:
# Nested import to avoid circular dependencies
from h5forest.h5_forest import H5Forest
H5Forest().print(f"ERROR@{func.__name__}: {e}")
return wrapper
| 754 | Python | .py | 21 | 27.333333 | 75 | 0.612948 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,084 | tree.py | WillJRoper_h5forest/src/h5forest/tree.py | """Tree class for the HDF5 file viewer.
This module contains the Tree class which is used to represent the HDF5 file
as a tree structure. The Tree contains Nodes which are used to represent the
groups and datasets in the HDF5 file. Each Node is lazy loaded, meaning that
its children are only loaded when it is opened.
Example usage:
tree = Tree("example.h5")
print(tree.get_tree_text())
"""
import h5py
from prompt_toolkit.layout.processors import Processor, Transformation
from h5forest.node import Node
class TreeProcessor(Processor):
def __init__(self, tree):
self.tree = tree
def apply_transformation(self, ti):
lineno = ti.lineno
fragments = ti.fragments
# Access the node corresponding to the current line
if lineno < len(self.tree.nodes_by_row):
node = self.tree.nodes_by_row[lineno]
style = ""
if node.is_group:
style += " class:group"
if node.is_highlighted:
style += " class:highlighted"
if node.is_under_cursor:
style += " class:under_cursor"
# Apply the style to the entire line
new_fragments = [(style, text) for _style, text in fragments]
return Transformation(new_fragments)
else:
# Line number exceeds the number of nodes, return original
# fragments
return Transformation(fragments)
class Tree:
"""A class to represent the HDF5 file as a tree structure.
This class is used to represent the HDF5 file as a tree structure. The
tree is constructed from Node objects representing Groups and Datasets
which are lazy loaded, meaning that the children of a node are only
loaded when needed.
Attributes:
filepath (str):
The path to the HDF5 file.
roots (list):
A list of the root level nodes in the tree.
nodes_by_row (list):
A list of all nodes in the tree by row in the tree output.
"""
def __init__(self, filepath):
"""
Initialise the tree.
This will set up the attributes we'll need and parses the root level of
the HDF5 file ready to be displayed.
Args:
filepath (str):
The path to the HDF5 file.
"""
# Store the file path we're working with
self.filepath = filepath
self.filename = (
filepath.split("/")[-1].replace(".h5", "").replace(".hdf5", "")
)
# Initialise a container to store nodes by row in the tree output
self.nodes_by_row = []
# Intialise containers to hold the tree text and split version
# to avoid wasted computation
self.tree_text = ""
self.tree_text_split = []
# Get the root of the level
with h5py.File(self.filepath, "r") as hdf:
self.root = Node(self.filename, hdf, self.filepath)
self.root.is_under_cursor = True
# Store the previous node under the cursor (we need some memory for
# correct highlighting)
self.prev_node = self.root
@property
def length(self):
"""Return the length of the tree text."""
return len(self.tree_text)
@property
def height(self):
"""Return the height of the tree text."""
return len(self.nodes_by_row)
@property
def width(self):
"""
Return the width of the tree text.
Note that this works because every line is padded with spaces to
the same length.
"""
return len(self.tree_text_split[0])
def parse_level(self, parent):
"""
Open the parent group.
This will populate the chidlren dict on the node which will be parsed
later when a text representation is requested updating the tree.
Args:
parent (Node):
The parent node to open.
"""
# Open this group
parent.open_node()
def _get_tree_text_recursive(self, current_node, text, nodes_by_row):
"""
Parse the open nodes to produce the text tree representation.
This will recurse through the open nodes constructing the output.
Args:
current_node (Node):
The current node to parse.
text (str):
The current text representation of the tree.
nodes_by_row (list):
A list containing the nodes where the index is the row
they are on in the text representation.
Returns:
str:
The text representation of the tree.
list:
A list containing the nodes where the index is the row
they are on in the text representation.
"""
# Add this nodes representation
text += f"{current_node.to_tree_text()}\n"
# Append this node to the by row list
nodes_by_row.append(current_node)
# And include any children
for child in current_node.children:
text, nodes_by_row = self._get_tree_text_recursive(
child,
text,
nodes_by_row,
)
return text, nodes_by_row
def get_tree_text(self):
"""
Return the text representation of the tree.
Note that this is only called at initialisation and thus only parses
the roots of the tree. Any future updates will be done via
update_tree_text. This is to avoid recalculating the full tree for
every change.
Returns:
str:
The text representation of the tree.
"""
text = ""
nodes_by_row = []
text, nodes_by_row = self._get_tree_text_recursive(
self.root,
text,
nodes_by_row,
)
# Store the nodes by row
self.nodes_by_row = nodes_by_row
# Store the tree text
self.tree_text = text
self.tree_text_split = text.split("\n")
return text
def update_tree_text(self, parent, current_row):
"""
Update the tree text for the parent node.
Args:
parent (Node):
The parent node to update.
current_row (int):
The row in the tree text where the parent is.
"""
# Open the parent
self.parse_level(parent)
# Update the parent node to reflect that it is now open
self.tree_text_split[current_row] = parent.to_tree_text()
# Create the text and node list for the children ready to insert
child_test = [child.to_tree_text() for child in parent.children]
child_nodes_by_row = [child for child in parent.children]
# Insert the children into the tree text and nodes by row list
self.tree_text_split[current_row + 1 : current_row + 1] = child_test
self.nodes_by_row[current_row + 1 : current_row + 1] = (
child_nodes_by_row
)
# Update the tree text area
self.tree_text = "\n".join(self.tree_text_split)
return self.tree_text
def close_node(self, node, current_row):
"""
Close the node.
Args:
node (Node):
The node to close.
current_row (int):
The row in the tree text where the node is.
"""
# Close the node itself
node.close_node()
# Now we need to remove all the children from the tree, these have
# already been closed recursively by the call to `close_node` above
# so we just need to remove them from the tree text and the nodes by
# row list
# We can do this by removing everything between the node and the next
# node at the same depth
for i, n in enumerate(self.nodes_by_row[current_row + 1 :]):
if n.depth <= node.depth:
break
del self.nodes_by_row[current_row + 1]
del self.tree_text_split[current_row + 1]
# Update the parent node to reflect that it is now closed
self.tree_text_split[current_row] = node.to_tree_text()
# Update the tree text area
self.tree_text = "\n".join(self.tree_text_split)
return self.tree_text
def get_current_node(self, row):
"""
Return the current node.
This will also unhighlight the previous node, and highlight the new
node.
Args:
row (int):
The row in the tree.
Returns:
Node:
The node at row.
"""
# Unhighlight the previous node
self.prev_node.is_under_cursor = False
# Get the new node and highlight it
new_node = self.nodes_by_row[row]
new_node.is_under_cursor = True
# New node will now be the previous node
self.prev_node = new_node
return new_node
| 9,023 | Python | .py | 229 | 29.467249 | 79 | 0.595969 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,085 | plotting.py | WillJRoper_h5forest/src/h5forest/plotting.py | """A module for plotting with matplotlib directly from the HDF5 file.
This is only ever called from the h5forest module and is not intended to be
used directly by the user.
"""
import os
import threading
import warnings
import h5py
import matplotlib.pyplot as plt
import numpy as np
from prompt_toolkit.application import get_app
from h5forest.errors import error_handler
from h5forest.progress import ProgressBar
# Supress warnings related to numpy
warnings.filterwarnings("ignore")
class Plotter:
"""
A class to handle the plotting of data from the HDF5 file.
This is the parent class to all other plotting classes and contains only
the generic plotting methods.
Attributes:
plot_params (dict):
A dictionary to store the plot parameters.
default_plot_text (str):
The default text to display in the plotting TextArea.
plot_text (str):
The text to display in the plotting TextArea.
"""
def __init__(self):
"""Initialise the plotter."""
# Container for the plot parameters
self.plot_params = {}
# Placeholder for the fig and ax
self.fig = None
self.ax = None
@error_handler
def get_row(self, row):
"""
Return the current row in the plot text.
Args:
row (int):
The row to return.
"""
return self.plot_text.split("\n")[row]
def __len__(self):
"""Return the number of plot parameters."""
return len(self.plot_params)
@error_handler
def show(self):
"""Show the plot and reset everything."""
plt.show()
@error_handler
def save(self):
"""Save the plot and reset everything."""
from h5forest.h5_forest import H5Forest
def save_callback():
"""Get the filepath and save the plot."""
# Strip the user input
out_path = H5Forest().user_input.strip()
self.fig.savefig(out_path, dpi=100, bbox_inches="tight")
H5Forest().print("Plot saved!")
H5Forest().default_focus()
H5Forest().return_to_normal_mode()
H5Forest().input(
"Enter the filepath to save the plot: ",
save_callback,
mini_buffer_text=os.getcwd() + "/",
)
@error_handler
def plot_and_show(self, text):
"""
Plot the data and show the plot.
Args:
text (str):
The text to extract the plot parameters from.
"""
# Compute the plot
self._plot(text)
# Show the plot
self.show()
@error_handler
def plot_and_save(self, text):
"""
Plot the data and save the plot.
Args:
text (str):
The text to extract the plot parameters from.
"""
# Compute the plot
self._plot(text)
# Save the plot
self.save()
class ScatterPlotter(Plotter):
"""
The scatter plotting class.
Attributes:
plot_params (dict):
A dictionary to store the plot parameters.
default_plot_text (str):
The default text to display in the plotting TextArea.
plot_text (str):
The text to display in the plotting TextArea.
x_min (float):
The minimum value for the x-axis.
x_max (float):
The maximum value for the x-axis.
y_min (float):
The minimum value for the y-axis.
y_max (float):
The maximum value for the y-axis.
x_data (np.ndarray):
The x-axis data.
y_data (np.ndarray):
The y-axis data.
"""
def __init__(self):
"""Initialise the scatter plotter."""
# Call the parent class
super().__init__()
# Define the default text for the plotting TextArea
self.default_plot_text = (
"x-axis: <key>\n"
"y-axis: <key>\n"
"x-label: <label>\n"
"y-label: <label>\n"
"x-scale: linear\n"
"y-scale: linear\n"
"marker: .\n"
)
# Define the text for the plotting TextArea
self.plot_text = self.default_plot_text
# Initialise containters for minima and maxima
self.x_min = None
self.x_max = None
self.y_min = None
self.y_max = None
# Initialise the container for the scatter data
self.x_data = None
self.y_data = None
# Attributes for working with threads
self.assignx_thread = None
self.assigny_thread = None
self.plot_thread = None
def set_x_key(self, node):
"""
Set the x-axis key for the plot.
This will set the plot parameter for the x-axis key and update the
plotting text.
Args:
node (h5forest.h5_forest.Node):
The node to use for the x-axis.
"""
from h5forest.h5_forest import H5Forest
# Check the node is 1D
if node.ndim > 1:
H5Forest().print("Dataset must be 1D!")
return self.plot_text
# If we have any datasets already check we have a compatible shape
for key in self.plot_params:
if node.shape != self.plot_params[key].shape:
H5Forest().print("Datasets must have the same shape!")
return self.plot_text
# Set the plot parameter for the x-axis key
self.plot_params["x"] = node
# Set the text in the plotting area
split_text = self.plot_text.split("\n")
split_text[0] = f"x-axis: {node.path}"
split_text[2] = f"x-label: {node.path}"
self.plot_text = "\n".join(split_text)
def run_in_thread():
# Get the minimum and maximum values for the x and y axes
self.x_min, self.x_max = node.get_min_max()
self.assignx_thread = threading.Thread(target=run_in_thread)
self.assignx_thread.start()
return self.plot_text
def set_y_key(self, node):
"""
Set the y-axis key for the plot.
This will set the plot parameter for the y-axis key and update the
plotting text.
Args:
node (h5forest.h5_forest.Node):
The node to use for the y-axis.
"""
from h5forest.h5_forest import H5Forest
# Check the node is 1D
if node.ndim > 1:
H5Forest().print("Dataset must be 1D!")
return self.plot_text
# If we have any datasets already check we have a compatible shape
for key in self.plot_params:
if node.shape != self.plot_params[key].shape:
H5Forest().print("Datasets must have the same shape!")
return self.plot_text
# Set the plot parameter for the y-axis key
self.plot_params["y"] = node
# Set the text in the plotting area
split_text = self.plot_text.split("\n")
split_text[1] = f"y-axis: {node.path}"
split_text[3] = f"y-label: {node.path}"
self.plot_text = "\n".join(split_text)
def run_in_thread():
# Get the minimum and maximum values for the x and y axes
self.y_min, self.y_max = node.get_min_max()
self.assigny_thread = threading.Thread(target=run_in_thread)
self.assigny_thread.start()
return self.plot_text
def reset(self):
"""Reset the plotting text."""
self.plot_text = self.default_plot_text
self.count_density = None
self.sum_density = None
self.mean_density = None
self.xs = None
self.ys = None
self.plot_params = {}
return self.plot_text
def _plot(self, text):
"""
Compute a scatter plot of the datasets.
Args:
text (str):
The text to extract the plot parameters from.
"""
# Don't move on until the data is assigned
if self.assignx_thread is not None:
self.assignx_thread.join()
self.assignx_thread = None
if self.assigny_thread is not None:
self.assigny_thread.join()
self.assigny_thread = None
# Unpack the nodes
x_node = self.plot_params["x"]
y_node = self.plot_params["y"]
# Unpack the labels scales
split_text = text.split("\n")
x_label = split_text[2].split(": ")[1].strip()
y_label = split_text[3].split(": ")[1].strip()
x_scale = split_text[4].split(": ")[1].strip()
y_scale = split_text[5].split(": ")[1].strip()
marker = split_text[6].split(": ")[1].strip()
# Create the figure
self.fig = plt.figure(figsize=(3.5, 3.5))
self.ax = self.fig.add_subplot(111)
# Draw a grid and make sure its behind everything
self.ax.grid(True)
self.ax.set_axisbelow(True)
def run_in_thread():
# Now lets plot the data, if we have chunked data we will plot each
# chunk separately
if (
x_node.chunks == (1,)
and y_node.chunks == (1,)
or x_node.chunks != y_node.chunks
):
# Get the data
with h5py.File(x_node.filepath, "r") as hdf:
self.x_data = hdf[x_node.path][...]
self.y_data = hdf[y_node.path][...]
# Plot the data
self.ax.scatter(
self.x_data,
self.y_data,
marker=marker,
color="r",
)
else:
# Loop over chunks and plot each one
with h5py.File(x_node.filepath, "r") as hdf:
with ProgressBar(
total=x_node.size, description="Scatter"
) as pb:
for chunk_index in np.ndindex(*x_node.chunks):
# Get the current slice for each dimension
slices = tuple(
slice(
c_idx * c_size,
min((c_idx + 1) * c_size, s),
)
for c_idx, c_size, s in zip(
chunk_index, x_node.chunks, x_node.shape
)
)
# Get the data
x_data = hdf[x_node.path][slices]
y_data = hdf[y_node.path][slices]
# Plot the data
self.ax.scatter(
x_data,
y_data,
marker=marker,
color="r",
)
pb.advance(step=x_data.size)
# Set the labels
self.ax.set_xlabel(x_label)
self.ax.set_ylabel(y_label)
# Set the scale
self.ax.set_xscale(x_scale)
self.ax.set_yscale(y_scale)
self.plot_thread = threading.Thread(target=run_in_thread)
self.plot_thread.start()
self.plot_thread.join()
self.plot_thread = None
class HistogramPlotter(Plotter):
"""
The histogram plotting class.
Attributes:
plot_params (dict):
A dictionary to store the plot parameters.
default_plot_text (str):
The default text to display in the plotting TextArea.
plot_text (str):
The text to display in the plotting TextArea.
x_min (float):
The minimum value for the x-axis.
x_max (float):
The maximum value for the x-axis.
hist (np.ndarray):
The histogram.
xs (np.ndarray):
The x-axis grid.
widths (np.ndarray):
The bin widths.
"""
def __init__(self):
"""Initialise the histogram plotter."""
# Call the parent class
super().__init__()
# Define the default text for the plotting TextArea
self.default_plot_text = (
"data: <key>\n"
"nbins: 50\n"
"x-label: <label>\n"
"x-scale: linear\n"
"y-scale: linear\n"
)
# Define the text for the plotting TextArea
self.plot_text = self.default_plot_text
# Initialise containters for minima and maxima
self.x_min = None
self.x_max = None
# Initialise the scaling of each axis (we'll assume linear for now)
self.x_scale = "linear"
self.y_scale = "linear"
# Plotting data containers
self.hist = None
self.xs = None
self.widths = None
# Attributes for working with threads
self.assign_data_thread = None
self.compute_hist_thread = None
@error_handler
def set_data_key(self, node):
"""
Set the data key for the plot.
This will set the plot parameter for the data key and update the
plotting text.
Args:
node (h5forest.h5_forest.Node):
The node to use for the data.
"""
# Set the plot parameter for the data key
self.plot_params["data"] = node
# Set the text in the plotting area
split_text = self.plot_text.split("\n")
split_text[0] = f"data: {node.path}"
split_text[2] = f"x-label: {node.path}"
self.plot_text = "\n".join(split_text)
def run_in_thread():
# Get the minimum and maximum values for the x and y axes
self.x_min, self.x_max = node.get_min_max()
# Run the thread but don't move on until it's finished
self.assign_data_thread = threading.Thread(target=run_in_thread)
# Start the thread (we'll join later to ensure its finished when we
# need it)
self.assign_data_thread.start()
return self.plot_text
@error_handler
def compute_hist(self, text):
"""
Compute the histogram.
Args:
text (str):
The text to extract the plot parameters from.
"""
@error_handler
def run_in_thread():
"""Compute the histogram."""
# Unpack the node
node = self.plot_params["data"]
# Split the text
split_text = text.split("\n")
# Unpack the number of bins
nbins = int(split_text[1].split(": ")[1].strip())
# Unpack scales
x_scale = split_text[3].split(": ")[1].strip()
# We need to wait for the data assignment thread to finish
if self.assign_data_thread is not None:
self.assign_data_thread.join()
self.assign_data_thread = None
# If we got this far we're ready to go so force a redraw
get_app().invalidate()
# Define the bins
if x_scale == "log":
bins = np.logspace(
np.log10(self.x_min), np.log10(self.x_max), nbins + 1
)
else:
bins = np.linspace(self.x_min, self.x_max, nbins + 1)
self.widths = bins[1:] - bins[:-1]
self.xs = (bins[1:] + bins[:-1]) / 2
# Get the number of chunks
chunks = node.chunks if node.is_chunked else 1
# If neither node is not chunked we can just read and grid the data
if chunks == 1:
# Get the data
with h5py.File(node.filepath, "r") as hdf:
data = hdf[node.path][...]
# Compute the grid
self.hist, _ = np.histogram(data, bins=bins)
# Otherwise we need to read in the data chunk by chunk and add each
# chunks grid to the total grid
else:
# Initialise the grid
self.hist = np.zeros(nbins)
# Get the data
with h5py.File(node.filepath, "r") as hdf:
data = hdf[node.path]
# Loop over the chunks
with ProgressBar(
total=node.size, description="Hist"
) as pb:
for chunk_index in np.ndindex(*node.n_chunks):
# Get the current slice for each dimension
slices = tuple(
slice(
c_idx * c_size,
min((c_idx + 1) * c_size, s),
)
for c_idx, c_size, s in zip(
chunk_index, node.chunks, node.shape
)
)
# Get the chunk
chunk_data = data[slices]
# Compute the grid for the chunk
chunk_density, _ = np.histogram(
chunk_data, bins=bins
)
# Add it to the total
self.hist += chunk_density
pb.advance(step=chunk_data.size)
self.compute_hist_thread = threading.Thread(target=run_in_thread)
self.compute_hist_thread.start()
return self.plot_text
@error_handler
def _plot(self, text):
"""
Plot the histogram.
Args:
text (str):
The text to extract the plot parameters from.
"""
# Don't move on until the histogram is computed
self.compute_hist_thread.join()
self.compute_hist_thread = None
# Unpack the labels scales
split_text = text.split("\n")
x_label = split_text[2].split(": ")[1].strip()
x_scale = split_text[3].split(": ")[1].strip()
y_scale = split_text[4].split(": ")[1].strip()
# Create the figure
self.fig = plt.figure(figsize=(3.5, 3.5))
self.ax = self.fig.add_subplot(111)
# Draw a grid and make sure its behind everything
self.ax.grid(True)
self.ax.set_axisbelow(True)
# Draw the bars
self.ax.bar(self.xs, self.hist, width=self.widths)
# Set the labels
self.ax.set_xlabel(x_label)
self.ax.set_ylabel("$N$")
# Set the scale
self.ax.set_xscale(x_scale)
self.ax.set_yscale(y_scale)
def reset(self):
"""Reset the histogram."""
self.hist = None
self.xs = None
self.widths = None
self.plot_text = self.default_plot_text
self.fig = None
self.ax = None
self.plot_params = {}
return self.plot_text
| 19,169 | Python | .py | 496 | 26.366935 | 79 | 0.522313 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,086 | utils.py | WillJRoper_h5forest/src/h5forest/utils.py | """A module containing utility functions and classes for the HDF5 viewer."""
import os
class DynamicTitle:
"""
A class to represent a dynamic title for the application.
This can be used to update any title in the application dynamically.
Attributes:
title (str): The title to display.
"""
def __init__(self, initial_title="Initial Title"):
"""
Initialise the title.
Args:
initial_title (str): The initial title to display.
"""
self.title = initial_title
def __call__(self):
"""Return the current title."""
return self.title
def update_title(self, new_title):
"""
Update the title.
Args:
new_title (str): The new title to display.
"""
self.title = new_title
def get_window_size():
"""
Get the terminal window size in lines and characters.
Returns:
tuple: The number of lines and characters in the terminal window.
"""
rows, columns = os.popen("stty size", "r").read().split()
return int(rows), int(columns)
| 1,112 | Python | .py | 34 | 25.647059 | 76 | 0.616901 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,087 | h5_forest.py | WillJRoper_h5forest/src/h5forest/h5_forest.py | """The main application for the HDF5 Forest.
This application provides a CLI application for exploring HDF5 files. This is
enabled by the h5forest entry point set up when the package is installed.
Example Usage:
h5forest /path/to/file.hdf5
"""
import sys
from prompt_toolkit import Application
from prompt_toolkit.application import get_app
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.document import Document
from prompt_toolkit.filters import Condition
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout import ConditionalContainer, HSplit, VSplit
from prompt_toolkit.layout.containers import Window
from prompt_toolkit.layout.controls import BufferControl
from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.mouse_events import MouseEventType
from prompt_toolkit.widgets import Frame, TextArea
from h5forest._version import __version__
from h5forest.bindings import (
_init_app_bindings,
_init_dataset_bindings,
_init_hist_bindings,
_init_jump_bindings,
_init_plot_bindings,
_init_tree_bindings,
_init_window_bindings,
)
from h5forest.plotting import HistogramPlotter, ScatterPlotter
from h5forest.styles import style
from h5forest.tree import Tree, TreeProcessor
from h5forest.utils import DynamicTitle, get_window_size
class H5Forest:
"""
The main application for the HDF5 Forest.
This class is a singleton. Any attempt to create a new instance will
return the existing instance. This makes the instance available globally.
Attributes:
tree (Tree):
The tree object representing the HDF5 file. Each Group or
Dataset in the HDF5 file is represented by a Node object.
flag_values_visible (bool):
A flag to control the visibility of the values text area.
_flag_normal_mode (bool):
A flag to control the normal mode of the application.
_flag_jump_mode (bool):
A flag to control the jump mode of the application.
_flag_dataset_mode (bool):
A flag to control the dataset mode of the application.
_flag_window_mode (bool):
A flag to control the window mode of the application.
jump_keys (VSplit):
The hotkeys for the jump mode.
dataset_keys (VSplit):
The hotkeys for the dataset mode.
window_keys (VSplit):
The hotkeys for the window mode.
kb (KeyBindings):
The keybindings for the application.
value_title (DynamicTitle):
A dynamic title for the values text area.
tree_content (TextArea):
The text area for the tree.
metadata_content (TextArea):
The text area for the metadata.
attributes_content (TextArea):
The text area for the attributes.
values_content (TextArea):
The text area for the values.
mini_buffer_content (TextArea):
The text area for the mini buffer.
hot_keys (VSplit):
The hotkeys for the application.
hotkeys_panel (HSplit):
The panel to display hotkeys.
prev_row (int):
The previous row the cursor was on. This means we can avoid
updating the metadata and attributes when the cursor hasn't moved.
tree_frame (Frame):
The frame for the tree text area.
metadata_frame (Frame):
The frame for the metadata text area.
attrs_frame (Frame):
The frame for the attributes text area.
values_frame (Frame):
The frame for the values text area.
mini_buffer (Frame):
The frame for the mini buffer text area.
layout (Layout):
The layout of the application.
user_input (str):
A container for the user input from the most recent mini buffer.
app (Application):
The main application object.
"""
# Singleton instance
_instance = None
def __new__(cls, *args, **kwargs):
"""
Create a new instance of the class.
This method ensures that only one instance of the class is created.
This method takes precendence over the usual __init__ method.
"""
if cls._instance is None:
cls._instance = super(H5Forest, cls).__new__(cls)
# Call the init method explicitly to initialize the instance
cls._instance._init(*args, **kwargs)
return cls._instance
def _init(self, hdf5_filepath):
"""
Initialise the application.
Constructs all the frames necessary for the app, builds the HDF5 tree
(populating only the root), and populates the Layout.
Args:
hdf5_filepath (str):
The path to the HDF5 file to be explored.
"""
# We do, set up the Tree with the file
# This will parse the root of the HDF5 file ready to populate the
# tree text area
self.tree = Tree(hdf5_filepath)
# Set up the tree processor
self.tree_processor = TreeProcessor(self.tree)
# Define flags we need to control behaviour
self.flag_values_visible = False
self.flag_progress_bar = False
self.flag_expanded_attrs = False
# Define the leader key mode flags
# NOTE: These must be unset when the leader mode is exited, and in
# _flag_normal_mode when the escape key is pressed
self._flag_normal_mode = True
self._flag_jump_mode = False
self._flag_dataset_mode = False
self._flag_window_mode = False
self._flag_plotting_mode = False
self._flag_hist_mode = False
# Set up the main app and tree bindings. The hot keys for these are
# combined into a single hot keys panel which will be shown whenever
# in normal mode
self.kb = KeyBindings()
app_keys = _init_app_bindings(self)
tree_keys = _init_tree_bindings(self)
self.hot_keys = VSplit([*tree_keys, *app_keys])
# Set up the rest of the keybindings and attach hot keys
self.dataset_keys = _init_dataset_bindings(self)
self.jump_keys = _init_jump_bindings(self)
self.window_keys = _init_window_bindings(self)
self.plot_keys = _init_plot_bindings(self)
self.hist_keys = _init_hist_bindings(self)
# Attributes for dynamic titles
self.value_title = DynamicTitle("Values")
# Attach the hexbin plotter
self.scatter_plotter = ScatterPlotter()
self.histogram_plotter = HistogramPlotter()
# Set up the text areas that will populate the layout
self.tree_buffer = None
self.tree_content = None
self.metadata_content = None
self.attributes_content = None
self.values_content = None
self.mini_buffer_content = None
self.progress_bar_content = None
self.plot_content = None
self.hist_content = None
self._init_text_areas()
# We need to hang on to some information to avoid over the
# top computations running in the background for threaded functions
self.prev_row = None
# Set up the layout
self.tree_frame = None
self.metadata_frame = None
self.attrs_frame = None
self.values_frame = None
self.plot_frame = None
self.hist_frame = None
self.hotkeys_panel = None
self.layout = None
self._init_layout()
# Intialise a container for user input
self.user_input = None
# With all that done we can set up the application
self.app = Application(
layout=self.layout,
key_bindings=self.kb,
full_screen=True,
mouse_support=True,
style=style,
)
def run(self):
"""Run the application."""
self.app.run()
@property
def current_row(self):
"""
Return the row under the cursor.
Returns:
int:
The row under the cursor.
"""
# Get the tree content
doc = self.tree_buffer.document
# Get the current cursor row
current_row = doc.cursor_position_row
return current_row
@property
def current_column(self):
"""
Return the column under the cursor.
Returns:
int:
The column under the cursor.
"""
# Get the tree content
doc = self.tree_buffer.document
# Get the current cursor row
current_col = doc.cursor_position_col
return current_col
@property
def current_position(self):
"""
Return the current position in the tree.
Returns:
int:
The current position in the tree.
"""
return self.tree_buffer.document.cursor_position
@property
def flag_normal_mode(self):
"""
Return the normal mode flag.
This accounts for whether we are awaiting user input in the mini
buffer.
Returns:
bool:
The flag for normal mode.
"""
return self._flag_normal_mode and not self.app.layout.has_focus(
self.mini_buffer_content
)
@property
def flag_jump_mode(self):
"""
Return the jump mode flag.
This accounts for whether we are awaiting user input in the mini
buffer.
Returns:
bool:
The flag for jump mode.
"""
return self._flag_jump_mode and not self.app.layout.has_focus(
self.mini_buffer_content
)
@property
def flag_dataset_mode(self):
"""
Return the dataset mode flag.
This accounts for whether we are awaiting user input in the mini
buffer.
Returns:
bool:
The flag for dataset mode.
"""
return self._flag_dataset_mode and not self.app.layout.has_focus(
self.mini_buffer_content
)
@property
def flag_window_mode(self):
"""
Return the window mode flag.
This accounts for whether we are awaiting user input in the mini
buffer.
Returns:
bool:
The flag for window mode.
"""
return self._flag_window_mode and not self.app.layout.has_focus(
self.mini_buffer_content
)
@property
def flag_plotting_mode(self):
"""
Return the plotting mode flag.
This accounts for whether we are awaiting user input in the mini
buffer.
Returns:
bool:
The flag for plotting mode.
"""
return self._flag_plotting_mode and not self.app.layout.has_focus(
self.mini_buffer_content
)
@property
def flag_hist_mode(self):
"""
Return the histogram mode flag.
This accounts for whether we are awaiting user input in the mini
buffer.
Returns:
bool:
The flag for histogram mode.
"""
return self._flag_hist_mode and not self.app.layout.has_focus(
self.mini_buffer_content
)
def return_to_normal_mode(self):
"""Return to normal mode."""
self._flag_normal_mode = True
self._flag_jump_mode = False
self._flag_dataset_mode = False
self._flag_window_mode = False
self._flag_plotting_mode = False
self._flag_hist_mode = False
def _init_text_areas(self):
"""Initialise the content for each frame."""
# Buffer for the tree content itself
self.tree_buffer = Buffer(
on_cursor_position_changed=self.cursor_moved_action,
read_only=True,
)
# Set the text of the buffer
self.tree_buffer.set_document(
Document(
text=self.tree.get_tree_text(),
cursor_position=0,
),
bypass_readonly=True,
)
self.tree_content = Window(
content=BufferControl(
buffer=self.tree_buffer,
input_processors=[self.tree_processor],
focusable=True,
),
)
self.tree_content.content.mouse_handler = self._create_mouse_handler(
self.tree_content
)
# Get the root node, we'll need to to populate the initial metadata
# and attributes
root_node = self.tree.root
self.metadata_content = TextArea(
text="Metadata details here...",
scrollbar=False,
focusable=False,
read_only=True,
)
self.metadata_content.text = root_node.get_meta_text()
self.attributes_content = TextArea(
text="Attributes here...",
read_only=True,
scrollbar=True,
focusable=True,
)
self.attributes_content.text = root_node.get_attr_text()
self.attributes_content.control.mouse_handler = (
self._create_mouse_handler(self.attributes_content)
)
self.values_content = TextArea(
text="Values here...",
read_only=True,
scrollbar=True,
focusable=False,
)
self.mini_buffer_content = TextArea(
text=f"Welcome to h5forest! (v{__version__})",
scrollbar=False,
focusable=True,
read_only=False,
)
self.input_buffer_content = TextArea(
text="",
scrollbar=False,
focusable=False,
read_only=True,
)
self.progress_bar_content = TextArea(
text="",
scrollbar=False,
focusable=False,
read_only=True,
)
self.plot_content = TextArea(
text=self.scatter_plotter.default_plot_text,
scrollbar=True,
focusable=True,
read_only=True,
)
self.hist_content = TextArea(
text=self.histogram_plotter.default_plot_text,
scrollbar=True,
focusable=True,
read_only=True,
)
def set_cursor_position(self, text, new_cursor_pos):
"""
Set the cursor position in the tree.
This is a horrid workaround but seems to be the only way to do it
in prompt_toolkit. We reset the entire Document with the
tree content text and a new cursor position.
"""
# Create a new tree_content document with the updated cursor
# position
self.tree_buffer.set_document(
Document(text=text, cursor_position=new_cursor_pos),
bypass_readonly=True,
)
def cursor_moved_action(self, event):
"""
Apply changes when the cursor has been moved.
This will update the metadata and attribute outputs to display
what is currently under the cursor.
"""
# Get the current node
try:
node = self.tree.get_current_node(self.current_row)
self.metadata_content.text = node.get_meta_text()
self.attributes_content.text = node.get_attr_text()
except IndexError:
self.set_cursor_position(
self.tree.tree_text,
new_cursor_pos=self.tree.length
- len(self.tree.tree_text_split[self.tree.height - 1]),
)
self.metadata_content.text = ""
self.attributes_content.text = ""
get_app().invalidate()
def _init_layout(self):
"""Intialise the layout."""
# Get the window size
rows, columns = get_window_size()
def tree_width():
"""Return the width of the tree."""
# If values, hist, or plot are visible, the tree should fill half
# the full width
if self.flag_values_visible:
return columns // 2
elif self.flag_plotting_mode or len(self.scatter_plotter) > 0:
return columns // 2
elif self.flag_hist_mode or len(self.histogram_plotter) > 0:
return columns // 2
elif self.flag_expanded_attrs:
return columns // 2
else:
return columns
# Create each individual element of the UI before packaging it
# all into the layout
self.tree_frame = Frame(
self.tree_content,
title="HDF5 File Tree",
width=tree_width,
)
# Set up the metadata and attributes frames with their shared height
# function controlling their height (these are placed next to each
# other in a VSplit below)
self.metadata_frame = Frame(
self.metadata_content,
title="Metadata",
height=10,
)
self.attrs_frame = ConditionalContainer(
Frame(
self.attributes_content,
title="Attributes",
height=10,
width=columns // 2,
),
filter=Condition(lambda: not self.flag_expanded_attrs),
)
self.expanded_attrs_frame = ConditionalContainer(
Frame(
self.attributes_content,
title="Attributes",
width=columns // 2,
),
filter=Condition(lambda: self.flag_expanded_attrs),
)
# Set up the values frame (this is where we'll display the values of
# a dataset)
self.values_frame = Frame(
self.values_content,
title=self.value_title,
)
# Set up the mini buffer and input buffer (these are where we'll
# display messages to the user and accept input)
self.mini_buffer = Frame(
self.mini_buffer_content,
height=3,
)
self.input_buffer = Frame(
self.input_buffer_content,
height=3,
)
self.input_buffer = ConditionalContainer(
self.input_buffer,
filter=Condition(lambda: len(self.input_buffer_content.text) > 0),
)
# Wrap those frames that need it in conditional containers
self.values_frame = ConditionalContainer(
content=self.values_frame,
filter=Condition(lambda: self.flag_values_visible),
)
# Set up the hotkeys panel
self.hotkeys_panel = HSplit(
[
ConditionalContainer(
content=self.hot_keys,
filter=Condition(lambda: self.flag_normal_mode),
),
ConditionalContainer(
content=self.jump_keys,
filter=Condition(lambda: self.flag_jump_mode),
),
ConditionalContainer(
content=self.dataset_keys,
filter=Condition(lambda: self.flag_dataset_mode),
),
ConditionalContainer(
content=self.window_keys,
filter=Condition(lambda: self.flag_window_mode),
),
ConditionalContainer(
content=self.plot_keys,
filter=Condition(lambda: self.flag_plotting_mode),
),
ConditionalContainer(
content=self.hist_keys,
filter=Condition(lambda: self.flag_hist_mode),
),
]
)
self.hotkeys_frame = ConditionalContainer(
Frame(self.hotkeys_panel, height=3),
filter=Condition(
lambda: self.flag_normal_mode
or self.flag_jump_mode
or self.flag_dataset_mode
or self.flag_window_mode
or self.flag_plotting_mode
or self.flag_hist_mode
),
)
# Set up the plot frame
self.plot_frame = ConditionalContainer(
Frame(
self.plot_content,
title="Plotting",
),
filter=Condition(
lambda: self.flag_plotting_mode
or len(self.scatter_plotter) > 0
),
)
# Set up the plot frame
self.hist_frame = ConditionalContainer(
Frame(
self.hist_content,
title="Histogram",
),
filter=Condition(
lambda: self.flag_hist_mode or len(self.histogram_plotter) > 0
),
)
# Set up the progress bar and buffer conditional containers
self.progress_frame = ConditionalContainer(
Frame(self.progress_bar_content, height=3),
filter=Condition(lambda: self.flag_progress_bar),
)
buffers = HSplit([self.input_buffer, self.mini_buffer])
# Layout using split views
self.layout = Layout(
HSplit(
[
VSplit(
[
self.tree_frame,
HSplit(
[
self.expanded_attrs_frame,
self.values_frame,
self.plot_frame,
self.hist_frame,
],
width=Dimension(min=0, max=columns // 2),
),
]
),
VSplit(
[
self.metadata_frame,
self.attrs_frame,
],
),
self.hotkeys_frame,
self.progress_frame,
buffers,
]
)
)
def print(self, *args):
"""Print a single line to the mini buffer."""
args = [str(a) for a in args]
self.mini_buffer_content.text = " ".join(args)
self.app.invalidate()
def input(self, prompt, callback, mini_buffer_text=""):
"""
Accept input from the user.
Note, this is pretty hacky! It will store the input into
self.user_input which will then be processed by the passed
callback function. This call back function must take self as
its only argument and it must safely process the input handling
possible errors gracefully.
Args:
prompt (str):
The string/s to print to the mini buffer.
callback (function):
The function using user input.
"""
# Store the current focus
current_focus = self.app.layout.current_window
# Prepare to recieve an input
self.user_input = None
# Set the input read-only text
self.input_buffer_content.text = prompt
self.mini_buffer_content.document = Document(
mini_buffer_text, cursor_position=len(mini_buffer_text)
)
self.app.invalidate()
# Shift focus to the mini buffer to await input
self.shift_focus(self.mini_buffer_content)
def on_enter(event):
"""Take the users input and process it."""
# Read the text from the mini_buffer_content TextArea
self.user_input = self.mini_buffer_content.text
# Clear buffers_content TextArea after processing
self.input_buffer_content.text = ""
# Run the callback function
callback()
def on_esc(event):
"""Return to normal mode."""
# Clear buffers_content TextArea after processing
self.input_buffer_content.text = ""
self.return_to_normal_mode()
self.shift_focus(current_focus)
# Add a temporary keybinding for Enter specific to this input action
self.kb.add(
"enter",
filter=Condition(
lambda: self.app.layout.has_focus(self.mini_buffer_content)
),
)(on_enter)
self.kb.add(
"escape",
filter=Condition(
lambda: self.app.layout.has_focus(self.mini_buffer_content)
),
)(on_esc)
# Update the app
get_app().invalidate()
def default_focus(self):
"""Shift the focus to the tree."""
self.app.layout.focus(self.tree_content)
def shift_focus(self, focused_area):
"""
Shift the focus to a different area.
Args:
focused_area (TextArea):
The text area to focus on.
"""
self.app.layout.focus(focused_area)
def _create_mouse_handler(self, content_area):
def mouse_handler(mouse_event):
if mouse_event.event_type == MouseEventType.MOUSE_UP:
get_app().layout.focus(content_area)
return mouse_handler
def main():
"""Intialise and run the application."""
# First port of call, check we have been given a valid input
if len(sys.argv) != 2:
print("Usage: h5forest /path/to/file.hdf5")
sys.exit(1)
# Extract the filepath
filepath = sys.argv[1]
# Set up the app
app = H5Forest(filepath)
# Lets get going!
app.run()
if __name__ == "__main__":
main()
| 25,689 | Python | .py | 690 | 26.163768 | 78 | 0.575246 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,088 | progress.py | WillJRoper_h5forest/src/h5forest/progress.py | """A module containing a custom progress bar.
This module contains a custom progress bar that can be used to display
progress in the application. This is needed because the prompt_toolkit
ProgressBar doesn't work within widget based applications. Additionally the
tqdm would require redirecting stdout and stderr which would break the entire
application.
Example usage:
with ProgressBar(100) as bar:
for i in range(100):
bar.advance()
"""
from h5forest.utils import get_window_size
class ProgressBar:
"""
A class to represent a progress bar.
This class is a custom progress bar with a simple approach to displaying
progress in a prompt_toolkit TextArea. This is needed because the
prompt_toolkit ProgressBar doesn't work within widget based applications.
Additionally the tqdm would require redirecting stdout and stderr which
would break the entire application.
Attributes:
total_steps (int):
The total number of steps to complete.
max_length (int):
The maximum length of the progress bar.
current_step (int):
The current step in the progress bar.
forest (h5forest.h5_forest.H5Forest):
The H5Forest instance to use for the progress bar.
text_area (h5forest.h5_forest.TextArea):
The text area to display the progress bar in.
"""
def __init__(self, total, description=""):
"""
Initialize the progress bar.
Args:
total (int):
The total number of steps to complete.
"""
from h5forest.h5_forest import H5Forest
self.total_steps = total
self.max_length = get_window_size()[1] - 4
self.current_step = 0
self.description = description
self.forest = H5Forest()
self.text_area = self.forest.progress_bar_content
self.forest.flag_progress_bar = True
def update_progress(self, step):
"""
Update the progress bar.
Args:
step (int):
The number of steps to increment the progress bar by.
"""
# Increment the step
self.current_step += step
# Define the text that'll appear at the end
back = (
f"{self.current_step/self.total_steps * 100:.2f}% "
f"({self.current_step}/{self.total_steps})"
f" [{self.description}]"
)
# How long can the bar be including the end text?
bar_length = self.max_length - len(back) - 3
# Work out how many characters should be filled
filled_length = int(bar_length * self.current_step // self.total_steps)
# Define the bar
bar = "Γ’βΛ" * filled_length + " " * (bar_length - filled_length)
# Update the text area
self.text_area.text = f"{bar} | {back}"
self.forest.app.invalidate()
def __enter__(self):
"""Begin the progress bar."""
# Initialize the progress display
self.update_progress(step=0)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
End the progress bar.
Args:
exc_type (type):
The type of the exception.
exc_val (Exception):
The exception instance.
exc_tb (Traceback):
The traceback.
"""
# Ensure the progress bar shows as complete
self.update_progress(self.total_steps)
# Reset the progress bar for potential reuse
self.current_step = 0
# Cleanup and final update if necessary
self.forest.flag_progress_bar = False
self.forest.app.invalidate()
def advance(self, step=1):
"""Advance the progress bar."""
self.update_progress(step=step)
| 3,827 | Python | .py | 97 | 30.659794 | 79 | 0.625202 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,089 | node.py | WillJRoper_h5forest/src/h5forest/node.py | """This module contains the Node class for the HDF5 file viewer.
The Node class is used to represent a Group/Dataset in the HDF5 file. Nodes
can be linked via parent/child relationships to form a tree structure
representing the HDF5 file. A Node is lazy loaded, i.e. it only opens the
HDF5 file when it is expanded. This allows for fast loading of the tree
structure and only opening the HDF5 file when necessary.
Example usage:
node = Node("group", group, "file.h5")
print(node)
print(node.open_node())
print(node.get_attr_text())
print(node.get_value_text())
print(node.is_expanded)
"""
import h5py
import numpy as np
from h5forest.progress import ProgressBar
class Node:
"""
A class to represent a node in the HDF5 file.
This class is used to represent a Group/Dataset in the HDF5 file. Nodes
can be linked via parent/child relationships to form a tree structure
representing the HDF5 file. A Node is lazy loaded, i.e. it only opens the
HDF5 file when it is expanded. This allows for fast loading of the tree
structure and only opening the HDF5 file when necessary.
Attributes:
name (str):
The name of the node.
filepath (str):
The filepath of the HDF5 file.
path (str):
The full path of the node.
children (list):
A list of the child nodes.
parent (Node):
The parent node.
depth (int):
The depth of the node in the tree.
obj_type (type):
The type of the object.
is_group (bool):
Whether the node is a group.
is_dataset (bool):
Whether the node is a dataset.
nr_child (int):
The number of children the node has.
has_children (bool):
Whether the node has children.
nr_attrs (int):
The number of attributes the node has.
has_attrs (bool):
Whether the node has attributes.
attrs (dict):
A dictionary of the attributes.
shape (tuple):
The shape of a dataset, None for a Group.
datatype (str):
The datatype of a dataset, None for a Group.
compression (str):
The compression type of a dataset, None for a Group.
compression_opts (int):
The compression options of a dataset, None for a Group.
chunks (tuple):
The chunk shape of a dataset, None for a Group.
is_chunked (bool):
Whether a dataset is chunked, None for a Group.
fillvalue (int):
The fillvalue of a dataset, None for a Group.
nbytes (int):
The number of bytes the dataset uses, None for a Group.
_attr_text (str):
The attribute text for the node.
_meta_text (str):
The metadata text for the node.
"""
def __init__(self, name, obj, filepath, parent=None):
"""
Initialise the Node.
Args:
name (str):
The name (key) of the node.
obj (h5py.Group/h5py.Dataset):
The object the node represents.
filepath (str):
The filepath of the HDF5 file.
parent (Node, optional):
The parent node. Defaults to None.
"""
# Store the name of the node
self.name = name
# Store the filepath
self.filepath = filepath
# Store the full path of the node
if parent is not None:
self.path = f"{parent.path}/{self.name}"
else:
self.path = "/"
# For the print path we don't want to treat the root as a /
if parent is not None:
self.print_path = f"{parent.print_path}/{self.name}"
else:
self.print_path = f"/{self.name}"
# Set up a container to hold the child nodes
self.children = []
# Store the parent node
self.parent = parent
# Compute the depth of the tree here
self.depth = len(self.print_path.split("/")) - 2
# Store the type of the obj
self.obj_type = type(obj)
# Store whether the node is a group or dataset
if isinstance(obj, h5py.Group):
self.is_group = True
self.is_dataset = False
else:
self.is_group = False
self.is_dataset = True
# Does this node have children?
if self.is_group:
self.nr_child = len(obj.keys())
self.has_children = bool(self.nr_child > 0)
else:
self.nr_child = 0
self.has_children = False
# Does this node have attributes?
self.nr_attrs = len(obj.attrs.keys())
self.has_attrs = bool(self.nr_attrs > 0)
self.attrs = {key: obj.attrs[key] for key in obj.attrs.keys()}
# For a dataset we can get a bunch of metadata to display
if self.is_dataset:
self.shape = obj.shape
self.size = obj.size
self.datatype = str(obj.dtype)
self.compression = obj.compression
self.compression_opts = obj.compression_opts
self.chunks = obj.chunks if obj.chunks is not None else obj.shape
self.is_chunked = obj.chunks != obj.shape
self.n_chunks = (
1
if not self.is_chunked
else (
int(np.ceil(s / c))
for s, c in zip(self.shape, self.chunks)
)
)
self.fillvalue = obj.fillvalue
self.nbytes = obj.nbytes
self.ndim = obj.ndim
else:
self.shape = None
self.size = None
self.datatype = None
self.compression = None
self.compression_opts = None
self.chunks = None
self.is_chunked = None
self.fillvalue = None
self.nbytes = None
self.ndim = None
# Construct tree_text, attribute and metadata text to avoid computation
self._attr_text = None
self._meta_text = None
# Define a flags for syntax highlighting
self.is_under_cursor = False
self.is_highlighted = False
# Start with the root node open
if self.depth == 0:
self.open_node()
@property
def is_expanded(self):
"""
Return whether the node expanded.
This is a property that returns whether the node is expanded. A node
is expanded if it has children and all of its children are expanded.
Returns:
bool:
True if the children have been loaded (i.e. noded is
expanded).
"""
return len(self.children) > 0
def __repr__(self):
"""
Return a string representation of the node.
Returns:
str:
A string representation of the node.
"""
return f"Node({self.path})"
def to_tree_text(self):
"""
Return a string representing the node for inclusion in the tree.
This will return a one line string with the correct indentation and
arrow representing the node in the tree.
Returns:
str:
A string representing the node for inclusion in the tree text.
"""
# Create the tree text
if self.has_children:
out = (
f"{' ' * self.depth}"
f"{'βΌ' if self.is_expanded else 'βΆ'} {self.name}"
)
else:
out = f"{' ' * self.depth} {self.name}"
return out
def open_node(self):
"""Open the node of the HDF5 file."""
if self.is_dataset:
raise ValueError("Cannot open a dataset as a group.")
with h5py.File(self.filepath, "r") as hdf:
if self.nr_child > 0:
for key in hdf[self.path].keys():
child = hdf[f"{self.path}/{key}"]
self.children.append(
Node(key, child, self.filepath, parent=self)
)
def close_node(self):
"""Close the node of the HDF5 file."""
# Close all children
for child in self.children:
child.close_node()
self.children = []
def _get_meta_text(self):
"""
Return the metadata text for the node.
Returns:
str:
The metadata text for the node.
"""
if self.is_group:
text = f"Group: {self.print_path}\n"
else:
text = f"Dataset: {self.print_path}\n"
# For a group there isn't much to display
if self.is_group:
text += f"N_children: {self.nr_child}\n"
text += f"N_attrs: {self.nr_attrs}\n"
text += f"Depth: {self.depth}\n"
else:
# For a dataset we can get a bunch of metadata to display
text += f"Shape: {self.shape}\n"
text += f"Datatype: {self.datatype}\n"
if self.nbytes < 1000:
text += f"Compressed Memory: {self.nbytes} B\n"
elif self.nbytes < 10**6:
text += f"Compressed Memory: {self.nbytes / 1000} KB\n"
elif self.nbytes < 10**9:
text += f"Compressed Memory: {self.nbytes / 10**6} MB\n"
else:
text += f"Compressed Memory: {self.nbytes / 10**9} GB\n"
text += f"Compression: {self.compression}\n"
text += f"Compression_opts: {self.compression_opts}\n"
if self.chunks != self.shape:
text += f"Chunks: {self.chunks}\n"
if self.fillvalue is not None:
text += f"Fillvalue: {self.fillvalue}\n"
return text
def get_meta_text(self):
"""
Return the text containing the metadata.
The first time this is called the private variable will be populated.
Returns:
str:
The metadata text for the node (stored in a private attribute).
"""
# Construct the metadata text if it hasn't been done already
if self._meta_text is None:
self._meta_text = self._get_meta_text()
return self._meta_text
def _get_attr_text(self):
"""
Return the attribute text for the node.
Returns:
str:
The attribute text for the node.
"""
text = ""
for key, value in self.attrs.items():
text += f"{key}: {value}\n"
return text
def get_attr_text(self):
"""
Return the text containing the attributes.
The first time this is called the private variable will be populated.
Returns:
str:
The attribute text for the node (stored in a private
attribute).
"""
# Construct the attribute text if it hasn't already been done
if self._attr_text is None:
self._attr_text = self._get_attr_text()
return self._attr_text
def get_value_text(self, start_index=None, end_index=None):
"""
Return the value text for the node (optionally in a range).
If this node is a Group then an empty string is returned and showing
the value frame won't be triggered. Note that this should be handled
outside but is included for safety.
When no range is specified this method will try to limit to a sensible
output size if necessary. If the Dataset is small enough we can
just read everything and display it. If the dataset is too large
we will only show a truncated view (first 100 elements or what will
fit in the TextArea).
When a range is stated that range of values will be read in and
displayed.
Returns:
str:
The value text for the node.
"""
if self.is_group:
return ""
else:
with h5py.File(self.filepath, "r") as hdf:
dataset = hdf[self.path]
# How many values roughly can we show maximally?
max_count = 1000
# If a range has been given follow that
if start_index is not None:
data_subset = dataset[start_index:end_index]
truncated = (
f"\n\nShowing {len(data_subset)}/"
f"{dataset.size} elements ({start_index}-{end_index})."
)
# If the dataset is small enough we can just read everything
elif dataset.size < max_count:
data_subset = dataset[...]
truncated = ""
else:
# Divide the max count by the number of dimensions
dim_count = max_count // dataset.ndim
# Work out how many elements we can read and display
slices = []
for dim in dataset.shape:
slices.append(slice(0, dim_count))
data_subset = dataset[tuple(slices)]
# Flag in the header we are only showing a truncated view
truncated = (
f"\n\nShowing {max_count}/{dataset.size} elements."
)
# Combine path and data for output
return str(data_subset) + truncated
def get_min_max(self):
"""
Return the minimum and maximum values of the dataset.
This will return the global minimum and maximum values of the dataset.
If the dataset is chunked we will use them to limit the memory load
and read in the data in manageable chunks and compute the
minimum and maximum values on the fly.
Returns:
tuple:
The minimum and maximum values of the dataset.
"""
if self.is_group:
return None, None
else:
with h5py.File(self.filepath, "r") as hdf:
dataset = hdf[self.path]
# If chunks and shape are equal just get the min and max
if not self.is_chunked:
arr = dataset[:]
return arr.min(), arr.max()
# OK, we have chunks, lets use them to avoid loading too
# much. behaviours
# based on dimensions
# For 1D arrays we can just loop getting the min and max.
# Define the initial min and max
min_val = np.inf
max_val = -np.inf
# Loop over all possible chunks
with ProgressBar(total=self.size, description="Min/Max") as pb:
for chunk_index in np.ndindex(*self.n_chunks):
# Get the current slice for each dimension
slices = tuple(
slice(
c_idx * c_size,
min((c_idx + 1) * c_size, s),
)
for c_idx, c_size, s in zip(
chunk_index, self.chunks, self.shape
)
)
# Read the chunk data
chunk_data = dataset[slices]
# Get the minimum and maximum
min_val = np.min((min_val, np.min(chunk_data)))
max_val = np.max((max_val, np.max(chunk_data)))
pb.advance(step=chunk_data.size)
return min_val, max_val
def get_mean(self):
"""
Return the mean of the dataset values.
This will calculate the global mean of the dataset, ignoring any axes.
If the dataset is chunked we will use them to limit the memory load
and read in the data in manageable chunks and compute the mean value
on the fly.
Returns:
float:
The mean of the dataset values.
"""
if self.is_group:
return None, None
else:
with h5py.File(self.filepath, "r") as hdf:
dataset = hdf[self.path]
# If chunks and shape are equal just get the min and max
if not self.is_chunked:
arr = dataset[:]
return arr.mean()
# OK, we have chunks, lets use them to make sure we don't load
# too much into memory. Now we need to have slightly
# different behaviours based on dimensions
# Define initial sum
val_sum = 0
# Loop over all possible chunks
with ProgressBar(total=self.size, description="Mean") as pb:
for chunk_index in np.ndindex(*self.n_chunks):
# Get the current slice for each dimension
slices = tuple(
slice(
c_idx * c_size,
min((c_idx + 1) * c_size, s),
)
for c_idx, c_size, s in zip(
chunk_index, self.chunks, self.shape
)
)
# Read the chunk data
chunk_data = dataset[slices]
# Get the sum
val_sum += np.sum(
chunk_data,
)
pb.advance(step=chunk_data.size)
# Return the mean
return val_sum / (self.size)
def get_std(self):
"""
Return the standard deviation of the dataset values.
This will calculate the global standard deviation of the dataset,
ignoring any axes.
If the dataset is chunked we will use them to limit the memory load
and read in the data in manageable chunks and compute the standard
deviation on the fly.
Returns:
float:
The standard deviation of the dataset values.
"""
if self.is_group:
return None, None
else:
with h5py.File(self.filepath, "r") as hdf:
dataset = hdf[self.path]
# If chunks and shape are equal just get the min and max
if not self.is_chunked:
arr = dataset[:]
return arr.std()
# OK, we have chunks, lets use them to make sure we don't load
# too much into memory.
# Define initial sum
val_sum = 0
spu_val_sum = 0
# Loop over all possible chunks
with ProgressBar(total=self.size, description="StDev") as pb:
for chunk_index in np.ndindex(*self.n_chunks):
# Get the current slice for each dimension
slices = tuple(
slice(
c_idx * c_size,
min((c_idx + 1) * c_size, s),
)
for c_idx, c_size, s in zip(
chunk_index, self.chunks, self.shape
)
)
# Read the chunk data
chunk_data = dataset[slices]
# Get the sum and sum of squares
val_sum += np.sum(chunk_data)
spu_val_sum += np.sum(chunk_data**2)
pb.advance(step=chunk_data.size)
# Return the standard deviation
return np.sqrt(
(spu_val_sum / self.size) - (val_sum / self.size) ** 2
)
| 20,301 | Python | .py | 486 | 28.092593 | 79 | 0.519911 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,090 | styles.py | WillJRoper_h5forest/src/h5forest/styles.py | from prompt_toolkit.styles import Style
style = Style.from_dict(
{
"group": "bold",
"highlighted": "reverse",
"group highlighted": "bold reverse",
"under_cursor": "blink",
}
)
| 217 | Python | .py | 9 | 18.555556 | 44 | 0.589372 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,091 | tree_bindings.py | WillJRoper_h5forest/src/h5forest/bindings/tree_bindings.py | """A module containing the keybindings for the file tree.
This module contains the keybinding functions for the file tree. The functions
in this module should not be called directly, but are intended to be used by
the application.
"""
from prompt_toolkit.document import Document
from prompt_toolkit.filters import Condition
from prompt_toolkit.layout import ConditionalContainer
from prompt_toolkit.widgets import Label
from h5forest.errors import error_handler
def _init_tree_bindings(app):
"""
Set up the keybindings for the basic UI.
These are always active and are not dependent on any leader key.
"""
@error_handler
def move_up_ten(event):
"""Move up ten lines."""
app.tree_buffer.cursor_up(10)
@error_handler
def move_down_ten(event):
"""Move down ten lines."""
app.tree_buffer.cursor_down(10)
@error_handler
def expand_collapse_node(event):
"""
Expand the node under the cursor.
This uses lazy loading so only the group at the expansion point
will be loaded.
"""
# Get the current cursor row and position
current_row = app.current_row
current_pos = app.current_position
# Get the node under the cursor
node = app.tree.get_current_node(current_row)
# If we have a dataset just do nothing
if node.is_dataset:
app.print(f"{node.path} is not a Group")
return
# If the node has no children, do nothing
if not node.has_children:
app.print(f"{node.path} has no children")
return
# If the node is already open, close it
if node.is_expanded:
app.tree_buffer.set_document(
Document(
app.tree.close_node(node, current_row),
cursor_position=current_pos,
),
bypass_readonly=True,
)
else: # Otherwise, open it
app.tree_buffer.set_document(
Document(
app.tree.update_tree_text(node, current_row),
cursor_position=current_pos,
),
bypass_readonly=True,
)
# Bind the functions
app.kb.add(
"{",
filter=Condition(lambda: app.app.layout.has_focus(app.tree_content)),
)(move_up_ten)
app.kb.add(
"}",
filter=Condition(lambda: app.app.layout.has_focus(app.tree_content)),
)(move_down_ten)
app.kb.add(
"enter",
filter=Condition(lambda: app.app.layout.has_focus(app.tree_content)),
)(expand_collapse_node)
# Add hot keys
hot_keys = [
ConditionalContainer(
Label("Enter β Open Group"),
filter=Condition(
lambda: app.app.layout.has_focus(app.tree_content)
),
),
ConditionalContainer(
Label("{/} β Move Up/Down 10 Lines"),
filter=Condition(
lambda: app.app.layout.has_focus(app.tree_content)
),
),
]
return hot_keys
| 3,125 | Python | .py | 89 | 25.921348 | 78 | 0.600663 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,092 | hist_bindings.py | WillJRoper_h5forest/src/h5forest/bindings/hist_bindings.py | """A module containing the bindings for the histogram class.
This module contains the function that defines the bindings for the histogram
and attaches them to the application. It should not be used directly.
"""
from prompt_toolkit.document import Document
from prompt_toolkit.filters import Condition
from prompt_toolkit.layout import ConditionalContainer, VSplit
from prompt_toolkit.widgets import Label
from h5forest.errors import error_handler
def _init_hist_bindings(app):
"""Set up the bindings for histogram mode."""
@error_handler
def edit_hist_entry(event):
"""Edit histogram param under cursor."""
# Get the current position and row in the plot content
current_row = app.hist_content.document.cursor_position_row
current_pos = app.hist_content.document.cursor_position
# Get the current row text in the plot content split into
# key and value
split_line = app.histogram_plotter.get_row(current_row).split(": ")
# Split the current plot content into lines
split_text = app.hist_content.text.split("\n")
# If we're on a toggle option (i.e. scaling is linear or log) lets
# toggle it rather than edit it
if "scale" in split_line[0]:
if split_line[1].strip() == "linear":
split_text[current_row] = (
f"{split_line[0]}: ".ljust(13) + "log"
)
else:
split_text[current_row] = (
f"{split_line[0]}: ".ljust(13) + "linear"
)
app.hist_content.text = "\n".join(split_text)
# And put the cursor back where it was
app.hist_content.document = Document(
text=app.hist_content.text, cursor_position=current_pos
)
app.histogram_plotter.plot_text = app.hist_content.text
app.app.invalidate()
return
def edit_hist_entry_callback():
"""Edit the plot param under cursor."""
# Strip the user input
user_input = app.user_input.strip()
# And set the text here
split_text[current_row] = (
f"{split_line[0]}: ".ljust(13) + f"{user_input}"
)
# And display the new text
app.hist_content.text = "\n".join(split_text)
app.histogram_plotter.plot_text = app.hist_content.text
# And shift focus back to the plot content
app.shift_focus(app.hist_content)
# And put the cursor back where it was
app.hist_content.document = Document(
text=app.hist_content.text, cursor_position=current_pos
)
# Get the modified entry from the user
app.input(split_line[0], edit_hist_entry_callback)
@error_handler
def plot_hist(event):
"""Plot the histogram."""
# Don't update if we already have everything
if len(app.histogram_plotter.plot_params) == 0:
# Get the node under the cursor
node = app.tree.get_current_node(app.current_row)
# Exit if the node is not a Dataset
if node.is_group:
app.print(f"{node.path} is not a Dataset")
return
# Set the text in the plotting area
app.hist_content.text = app.histogram_plotter.set_data_key(node)
# Compute the histogram
app.hist_content.text = app.histogram_plotter.compute_hist(
app.hist_content.text
)
# Get the plot
app.histogram_plotter.plot_and_show(app.hist_content.text)
@error_handler
def save_hist(event):
"""Plot the histogram."""
# Don't update if we already have everything
if len(app.histogram_plotter.plot_params) == 0:
# Get the node under the cursor
node = app.tree.get_current_node(app.current_row)
# Exit if the node is not a Dataset
if node.is_group:
app.print(f"{node.path} is not a Dataset")
return
# Set the text in the plotting area
app.hist_content.text = app.histogram_plotter.set_data_key(node)
# Compute the histogram
app.hist_content.text = app.histogram_plotter.compute_hist(
app.hist_content.text
)
# Get the plot
app.histogram_plotter.plot_and_save(app.hist_content.text)
@error_handler
def reset_hist(event):
"""Reset the histogram content."""
app.hist_content.text = app.histogram_plotter.reset()
app.return_to_normal_mode()
app.default_focus()
@error_handler
def edit_hist(event):
"""Edit the histogram."""
app.shift_focus(app.hist_content)
def exit_edit_hist(event):
"""Exit the edit mode."""
app.shift_focus(app.tree_content)
# Bind the functions
app.kb.add(
"enter",
filter=Condition(lambda: app.app.layout.has_focus(app.hist_content)),
)(edit_hist_entry)
app.kb.add("h", filter=Condition(lambda: app.flag_hist_mode))(plot_hist)
app.kb.add("H", filter=Condition(lambda: app.flag_hist_mode))(save_hist)
app.kb.add("r", filter=Condition(lambda: app.flag_hist_mode))(reset_hist)
app.kb.add(
"e",
filter=Condition(
lambda: app.flag_hist_mode
and len(app.histogram_plotter.plot_params) > 0
),
)(edit_hist)
app.kb.add(
"q",
filter=Condition(lambda: app.app.layout.has_focus(app.hist_content)),
)(exit_edit_hist)
# Add the hot keys
hot_keys = VSplit(
[
ConditionalContainer(
Label("e β Edit Config"),
Condition(lambda: len(app.histogram_plotter.plot_params) > 0),
),
ConditionalContainer(
Label("Enter β Edit entry"),
Condition(lambda: app.app.layout.has_focus(app.hist_content)),
),
Label("h β Show Histogram"),
Label("H β Save Histogram"),
Label("r β Reset"),
ConditionalContainer(
Label("q β Exit Hist Mode"),
Condition(
lambda: not app.app.layout.has_focus(app.hist_content)
),
),
ConditionalContainer(
Label("q β Exit Config"),
Condition(lambda: app.app.layout.has_focus(app.hist_content)),
),
]
)
return hot_keys
| 6,573 | Python | .py | 157 | 31.203822 | 78 | 0.590388 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,093 | dataset_bindings.py | WillJRoper_h5forest/src/h5forest/bindings/dataset_bindings.py | """A module containing the keybindings for the dataset mode.
This module contains the keybindings for the dataset mode. This mode is
activated when the user selects a dataset in the tree view. The dataset
mode allows the user to interact with the dataset, such as viewing the
values, getting the minimum and maximum, mean, and standard deviation.
The functions in this module should not be called directly, but are
intended to be used by the main application.
"""
import threading
from prompt_toolkit.filters import Condition
from prompt_toolkit.layout.containers import VSplit
from prompt_toolkit.widgets import Label
from h5forest.errors import error_handler
def _init_dataset_bindings(app):
"""Set up the keybindings for the dataset mode."""
@error_handler
def show_values(event):
"""
Show the values of a dataset.
This will truncate the value list if the array is large so as not
to flood memory.
"""
# Get the node under the cursor
node = app.tree.get_current_node(app.current_row)
# Exit if the node is not a Dataset
if node.is_group:
app.print(f"{node.path} is not a Dataset")
return
# Get the value string
text = node.get_value_text()
# Ensure there's something to draw
if len(text) == 0:
return
app.value_title.update_title(f"Values: {node.path}")
# Update the text
app.values_content.text = text
# Flag that there are values to show
app.flag_values_visible = True
# Exit values mode
app.return_to_normal_mode()
@error_handler
def show_values_in_range(event):
"""Show the values of a dataset in an index range."""
# Get the node under the cursor
node = app.tree.get_current_node(app.current_row)
# Exit if the node is not a Dataset
if node.is_group:
app.print(f"{node.path} is not a Dataset")
return
def values_in_range_callback():
"""Get the start and end indices from the user input."""
# Parse the range
string_values = tuple(
[s.strip() for s in app.user_input.split("-")]
)
# Attempt to convert to an int
try:
start_index = int(string_values[0])
end_index = int(string_values[1])
except ValueError:
app.print(
"Invalid input! Input must be a integers "
f"separated by -, not ({app.user_input})"
)
# Exit this attempt gracefully
app.default_focus()
app.return_to_normal_mode()
return
# Return focus to the tree
app.default_focus()
# Get the value string
text = node.get_value_text(
start_index=start_index, end_index=end_index
)
# Ensure there's something to draw
if len(text) == 0:
return
app.value_title.update_title(f"Values: {node.path}")
# Update the text
app.values_content.text = text
# Flag that there are values to show
app.flag_values_visible = True
# Exit values mode
app.return_to_normal_mode()
# Get the indices from the user
app.input(
"Enter the index range (seperated by -):",
values_in_range_callback,
)
@error_handler
def close_values(event):
"""Close the value pane."""
app.flag_values_visible = False
app.values_content.text = ""
# Exit values mode
app.return_to_normal_mode()
@error_handler
def minimum_maximum(event):
"""Show the minimum and maximum values of a dataset."""
# Get the node under the cursor
node = app.tree.get_current_node(app.current_row)
# Exit if the node is not a Dataset
if node.is_group:
app.print(f"{node.path} is not a Dataset")
return
def run_in_thread():
# Get the value string
vmin, vmax = node.get_min_max()
# Print the result on the main thread
app.app.loop.call_soon_threadsafe(
app.print,
f"{node.path}: Minimum = {vmin}, Maximum = {vmax}",
)
# Exit values mode
app.return_to_normal_mode()
# Start the operation in a new thread
threading.Thread(target=run_in_thread, daemon=True).start()
@error_handler
def mean(event):
"""Show the mean of a dataset."""
# Get the node under the cursor
node = app.tree.get_current_node(app.current_row)
# Exit if the node is not a Dataset
if node.is_group:
app.print(f"{node.path} is not a Dataset")
return
def run_in_thread():
# Get the value string
vmean = node.get_mean()
# Print the result on the main thread
app.app.loop.call_soon_threadsafe(
app.print,
f"{node.path}: Mean = {vmean}",
)
# Exit values mode
app.return_to_normal_mode()
# Start the operation in a new thread
threading.Thread(target=run_in_thread, daemon=True).start()
@error_handler
def std(event):
"""Show the standard deviation of a dataset."""
# Get the node under the cursor
node = app.tree.get_current_node(app.current_row)
# Exit if the node is not a Dataset
if node.is_group:
app.print(f"{node.path} is not a Dataset")
return
def run_in_thread():
# Get the value string
vstd = node.get_std()
# Print the result on the main thread
app.app.loop.call_soon_threadsafe(
app.print,
f"{node.path}: Standard Deviation = {vstd}",
)
# Exit values mode
app.return_to_normal_mode()
# Start the operation in a new thread
threading.Thread(target=run_in_thread, daemon=True).start()
# Bind the functions
app.kb.add("v", filter=Condition(lambda: app.flag_dataset_mode))(
show_values
)
app.kb.add("V", filter=Condition(lambda: app.flag_dataset_mode))(
show_values_in_range
)
app.kb.add("c", filter=Condition(lambda: app.flag_dataset_mode))(
close_values
)
app.kb.add("m", filter=Condition(lambda: app.flag_dataset_mode))(
minimum_maximum
)
app.kb.add("M", filter=Condition(lambda: app.flag_dataset_mode))(mean)
app.kb.add("s", filter=Condition(lambda: app.flag_dataset_mode))(std)
# Add the hot keys
hot_keys = VSplit(
[
Label("v β Show Values"),
Label("V β Show Values In Range"),
Label("m β Get Minimum and Maximum"),
Label("M β Get Mean"),
Label("s β Get Standard Deviation"),
Label("c β Close Value View"),
Label("q β Exit Dataset Mode"),
]
)
return hot_keys
| 7,268 | Python | .py | 187 | 28.534759 | 74 | 0.579818 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,094 | __init__.py | WillJRoper_h5forest/src/h5forest/bindings/__init__.py | from h5forest.bindings.hist_bindings import _init_hist_bindings
from h5forest.bindings.plot_bindings import _init_plot_bindings
from h5forest.bindings.window_bindings import _init_window_bindings
from h5forest.bindings.jump_bindings import _init_jump_bindings
from h5forest.bindings.dataset_bindings import _init_dataset_bindings
from h5forest.bindings.tree_bindings import _init_tree_bindings
from h5forest.bindings.bindings import _init_app_bindings
| 452 | Python | .py | 7 | 63.571429 | 69 | 0.860674 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,095 | plot_bindings.py | WillJRoper_h5forest/src/h5forest/bindings/plot_bindings.py | """This module contains the keybindings for the plotting mode.
The functions in this module are used to define the keybindings for the
plotting mode and attach them to the application. It should not be used
directly.
"""
from prompt_toolkit.document import Document
from prompt_toolkit.filters import Condition
from prompt_toolkit.layout import ConditionalContainer, VSplit
from prompt_toolkit.widgets import Label
from h5forest.errors import error_handler
def _init_plot_bindings(app):
"""Set up the keybindings for the plotting mode."""
@error_handler
def select_x(event):
"""Select the x-axis."""
# Get the node under the cursor
node = app.tree.get_current_node(app.current_row)
# Exit if the node is not a Dataset
if node.is_group:
app.print(f"{node.path} is not a Dataset")
return
# Set the text in the plotting area
app.plot_content.text = app.scatter_plotter.set_x_key(node)
@error_handler
def select_y(event):
"""Select the y-axis."""
# Get the node under the cursor
node = app.tree.get_current_node(app.current_row)
# Exit if the node is not a Dataset
if node.is_group:
app.print(f"{node.path} is not a Dataset")
return
# Set the text in the plotting area
app.plot_content.text = app.scatter_plotter.set_y_key(node)
@error_handler
def edit_plot_entry(event):
"""Edit plot param under cursor."""
# Get the current position and row in the plot content
current_row = app.plot_content.document.cursor_position_row
current_pos = app.plot_content.document.cursor_position
# Get the current row text in the plot content split into
# key and value
split_line = app.scatter_plotter.get_row(current_row).split(": ")
# Split the current plot content into lines
split_text = app.plot_content.text.split("\n")
# If we're on a toggle option (i.e. scaling is linear or log) lets
# toggle it rather than edit it
if "scale" in split_line[0]:
if split_line[1].strip() == "linear":
split_text[current_row] = (
f"{split_line[0]}: ".ljust(13) + "log"
)
else:
split_text[current_row] = (
f"{split_line[0]}: ".ljust(13) + "linear"
)
app.plot_content.text = "\n".join(split_text)
# And put the cursor back where it was
app.plot_content.document = Document(
text=app.plot_content.text, cursor_position=current_pos
)
app.scatter_plotter.plot_text = app.plot_content.text
app.app.invalidate()
return
def edit_plot_entry_callback():
"""Edit the plot param under cursor."""
# Strip the user input
user_input = app.user_input.strip()
# And set the text here
split_text[current_row] = (
f"{split_line[0]}: ".ljust(13) + f"{user_input}"
)
# And display the new text
app.plot_content.text = "\n".join(split_text)
app.scatter_plotter.plot_text = app.plot_content.text
# And shift focus back to the plot content
app.shift_focus(app.plot_content)
# And put the cursor back where it was
app.plot_content.document = Document(
text=app.plot_content.text, cursor_position=current_pos
)
# Get the modified entry from the user
app.input(split_line[0], edit_plot_entry_callback)
@error_handler
def plot_scatter(event):
"""Plot and show pcolormesh with mean in bins."""
# Make the plot
app.scatter_plotter.plot_and_show(app.plot_content.text)
app.return_to_normal_mode()
app.default_focus()
@error_handler
def save_scatter(event):
"""Save the plot."""
app.scatter_plotter.plot_and_save(app.plot_content.text)
@error_handler
def reset(event):
"""Reset the plot content."""
app.plot_content.text = app.scatter_plotter.reset()
app.app.invalidate()
app.return_to_normal_mode()
app.default_focus()
@error_handler
def edit_plot(event):
"""Edit the plot."""
app.shift_focus(app.plot_content)
def exit_edit_plot(event):
"""Exit edit plot mode."""
app.shift_focus(app.tree_content)
# Bind the functions
app.kb.add("x", filter=Condition(lambda: app.flag_plotting_mode))(select_x)
app.kb.add("y", filter=Condition(lambda: app.flag_plotting_mode))(select_y)
app.kb.add(
"enter",
filter=Condition(lambda: app.app.layout.has_focus(app.plot_content)),
)(edit_plot_entry)
app.kb.add("p", filter=Condition(lambda: app.flag_plotting_mode))(
plot_scatter
)
app.kb.add("P", filter=Condition(lambda: app.flag_plotting_mode))(
save_scatter
)
app.kb.add("r", filter=Condition(lambda: app.flag_plotting_mode))(reset)
app.kb.add(
"e",
filter=Condition(
lambda: app.flag_plotting_mode
and len(app.scatter_plotter.plot_params) > 0
),
)(edit_plot)
app.kb.add(
"q",
filter=Condition(lambda: app.app.layout.has_focus(app.plot_content)),
)(exit_edit_plot)
# Add the hot keys
hot_keys = VSplit(
[
ConditionalContainer(
Label("e β Edit Config"),
Condition(lambda: len(app.scatter_plotter.plot_params) > 0),
),
ConditionalContainer(
Label("Enter β Edit entry"),
Condition(lambda: app.app.layout.has_focus(app.plot_content)),
),
ConditionalContainer(
Label("x β Select x-axis"),
filter=Condition(
lambda: "x" not in app.scatter_plotter.plot_params
),
),
ConditionalContainer(
Label("y β Select y-axis"),
filter=Condition(
lambda: "y" not in app.scatter_plotter.plot_params
),
),
ConditionalContainer(
Label("p β Plot"),
Condition(lambda: len(app.scatter_plotter) > 0),
),
ConditionalContainer(
Label("P β Save Plot"),
Condition(lambda: len(app.scatter_plotter) > 0),
),
Label("r β Reset"),
ConditionalContainer(
Label("q β Exit Plotting Mode"),
Condition(
lambda: not app.app.layout.has_focus(app.plot_content)
),
),
ConditionalContainer(
Label("q β Exit Config"),
Condition(lambda: app.app.layout.has_focus(app.plot_content)),
),
]
)
return hot_keys
| 7,090 | Python | .py | 178 | 29.252809 | 79 | 0.579385 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,096 | window_bindings.py | WillJRoper_h5forest/src/h5forest/bindings/window_bindings.py | """A module for binding window events to functions.
This module contains the functions for binding window events to functions. This
should not be used directly, but instead provides the functions for the
application.
"""
from prompt_toolkit.filters import Condition
from prompt_toolkit.layout.containers import ConditionalContainer, VSplit
from prompt_toolkit.widgets import Label
from h5forest.errors import error_handler
def _init_window_bindings(app):
"""Set up the keybindings for the window mode."""
@error_handler
def move_tree(event):
"""Move focus to the tree."""
app.shift_focus(app.tree_content)
app.return_to_normal_mode()
@error_handler
def move_attr(event):
"""Move focus to the attributes."""
app.shift_focus(app.attributes_content)
app.return_to_normal_mode()
@error_handler
def move_values(event):
"""Move focus to values."""
app.shift_focus(app.values_content)
app.return_to_normal_mode()
@error_handler
def move_plot(event):
"""Move focus to the plot."""
app.shift_focus(app.plot_content)
# Plotting is special case where we also want to enter plotting
# mode
app._flag_normal_mode = False
app._flag_window_mode = False
app._flag_plotting_mode = True
@error_handler
def move_hist(event):
"""Move focus to the plot."""
app.shift_focus(app.hist_content)
# Plotting is special case where we also want to enter plotting
# mode
app._flag_normal_mode = False
app._flag_window_mode = False
app._flag_hist_mode = True
@error_handler
def move_to_default(event):
"""
Move focus to the default area.
This is the tree content.
"""
app.default_focus()
app.return_to_normal_mode()
# Bind the functions
app.kb.add("t", filter=Condition(lambda: app.flag_window_mode))(move_tree)
app.kb.add("a", filter=Condition(lambda: app.flag_window_mode))(move_attr)
app.kb.add(
"v",
filter=Condition(
lambda: app.flag_window_mode and app.flag_values_visible
),
)(move_values)
app.kb.add("p", filter=Condition(lambda: app.flag_window_mode))(move_plot)
app.kb.add("h", filter=Condition(lambda: app.flag_window_mode))(move_hist)
app.kb.add("escape")(move_to_default)
# Add the hot keys
hot_keys = VSplit(
[
ConditionalContainer(
Label("t β Move to Tree"),
Condition(
lambda: not app.app.layout.has_focus(app.tree_content)
),
),
ConditionalContainer(
Label("a β Move to Attributes"),
Condition(
lambda: not app.app.layout.has_focus(
app.attributes_content
)
),
),
ConditionalContainer(
Label("v β Move to Values"),
Condition(
lambda: app.flag_values_visible
and not app.app.layout.has_focus(app.values_content)
),
),
ConditionalContainer(
Label("p β Move to Plot"),
Condition(
lambda: not app.app.layout.has_focus(app.plot_content)
),
),
Label("q β Exit Window Mode"),
]
)
return hot_keys
| 3,530 | Python | .py | 98 | 26.295918 | 79 | 0.587665 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,097 | jump_bindings.py | WillJRoper_h5forest/src/h5forest/bindings/jump_bindings.py | """This module contains the keybindings for the jump mode.
The jump mode is a mode that allows the user to quickly navigate the tree using
a set of keybindings. This is useful for large trees where the user knows the
name of the node they want to jump to.
This module defines the functions for binding jump mode events to functions.
This should not be used directly, but instead provides the functions for the
application.
"""
from prompt_toolkit.filters import Condition
from prompt_toolkit.layout import VSplit
from prompt_toolkit.widgets import Label
from h5forest.errors import error_handler
def _init_jump_bindings(app):
"""Set up the keybindings for the jump mode."""
@error_handler
def jump_to_top(event):
"""Jump to the top of the tree."""
app.set_cursor_position(app.tree.tree_text, new_cursor_pos=0)
# Exit jump mode
app.return_to_normal_mode()
@error_handler
def jump_to_bottom(event):
"""Jump to the bottom of the tree."""
app.set_cursor_position(
app.tree.tree_text, new_cursor_pos=app.tree.length
)
# Exit jump mode
app.return_to_normal_mode()
@error_handler
def jump_to_parent(event):
"""Jump to the parent of the current node."""
# Get the current node
node = app.tree.get_current_node(app.current_row)
# Get the node's parent
parent = node.parent
# if we're at the top, do nothing
if parent is None:
app.print(f"{node.path} is a root Group!")
app.return_to_normal_mode()
return
# Get position of the first character in this row
pos = app.current_position - app.current_column
# Loop backwards until we hit the parent
for row in range(app.current_row - 1, -1, -1):
# Compute the position at this row
pos -= len(app.tree.tree_text_split[row]) + 1
# If we are at the parent stop
if app.tree.get_current_node(row) is parent:
break
# Safety check, avoid doing something stupid
if pos < 0:
pos = 0
# Move the cursor
app.set_cursor_position(app.tree.tree_text, pos)
app.return_to_normal_mode()
@error_handler
def jump_to_next(event):
"""Jump to the next node."""
# Get the current node
node = app.tree.get_current_node(app.current_row)
# Get the depth of this node and the target depth
depth = node.depth
target_depth = depth - 1 if depth > 0 else 0
# Get the position of the first character in this row
pos = app.current_position - app.current_column
# Do nothing if we are at the end
if app.current_row == app.tree.height - 1:
app.return_to_normal_mode()
return
# Loop forwards until we hit the next node at the level above
# this node's depth. If at the root just move to the next
# root group.
for row in range(app.current_row, app.tree.height):
# Compute the position at this row
pos += len(app.tree.tree_text_split[row]) + 1
# Ensure we don't over shoot
if row + 1 > app.tree.height:
app.return_to_normal_mode()
return
# If we are at the next node stop
if app.tree.get_current_node(row + 1).depth == target_depth:
break
# Move the cursor
app.set_cursor_position(app.tree.tree_text, pos)
app.return_to_normal_mode()
@error_handler
def jump_to_key(event):
"""Jump to next key containing user input."""
def jump_to_key_callback():
"""Jump to next key containing user input."""
# Unpack user input
key = app.user_input.strip()
# Get the position of the first character in this row
pos = app.current_position - app.current_column
# Loop over keys until we find a key containing the
# user input
for row in range(app.current_row, app.tree.height):
# Compute the position at this row
pos += len(app.tree.tree_text_split[row]) + 1
# Ensure we don't over shoot
if row + 1 > app.tree.height - 1:
app.print("Couldn't find matching key!")
app.default_focus()
app.return_to_normal_mode()
return
# If we are at the next node stop
if key in app.tree.get_current_node(row + 1).name:
break
# Return to normal
app.default_focus()
app.return_to_normal_mode()
# Move the cursor
app.set_cursor_position(app.tree.tree_text, pos)
# Get the indices from the user
app.input(
"Jump to next key containing:",
jump_to_key_callback,
)
# Bind the functions
app.kb.add("t", filter=Condition(lambda: app.flag_jump_mode))(jump_to_top)
app.kb.add("b", filter=Condition(lambda: app.flag_jump_mode))(
jump_to_bottom
)
app.kb.add("p", filter=Condition(lambda: app.flag_jump_mode))(
jump_to_parent
)
app.kb.add("n", filter=Condition(lambda: app.flag_jump_mode))(jump_to_next)
app.kb.add("k", filter=Condition(lambda: app.flag_jump_mode))(jump_to_key)
# Add the hot keys
hot_keys = VSplit(
[
Label("t β Jump to Top"),
Label("b β Jump to Bottom"),
Label("p β Jump to Parent"),
Label("n β Jump to Next"),
Label("k β Jump to Next Key"),
Label("q β Exit Jump Mode"),
]
)
return hot_keys
| 5,827 | Python | .py | 140 | 31.571429 | 79 | 0.593329 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,098 | bindings.py | WillJRoper_h5forest/src/h5forest/bindings/bindings.py | """A module containing the keybindings for the basic UI.
This module contains the keybindings for the basic UI. These keybindings are
always active and are not dependent on any leader key. The functions in this
module should not be called directly, but are intended to be used by the main
application.
"""
from prompt_toolkit.filters import Condition
from prompt_toolkit.layout import ConditionalContainer
from prompt_toolkit.widgets import Label
from h5forest.errors import error_handler
def _init_app_bindings(app):
"""
Set up the keybindings for the basic UI.
This includes basic closing functionality and leader keys for different
modes. These are always active and are not dependent on any leader key.
"""
def exit_app(event):
"""Exit the app."""
event.app.exit()
def jump_leader_mode(event):
"""Enter jump mode."""
app._flag_normal_mode = False
app._flag_jump_mode = True
def dataset_leader_mode(event):
"""Enter dataset mode."""
app._flag_normal_mode = False
app._flag_dataset_mode = True
def window_leader_mode(event):
"""Enter window mode."""
app._flag_normal_mode = False
app._flag_window_mode = True
def plotting_leader_mode(event):
"""Enter plotting mode."""
app._flag_normal_mode = False
app._flag_plotting_mode = True
def hist_leader_mode(event):
"""Enter hist mode."""
app._flag_normal_mode = False
app._flag_hist_mode = True
@error_handler
def exit_leader_mode(event):
"""Exit leader mode."""
app.return_to_normal_mode()
app.default_focus()
event.app.invalidate()
def expand_attributes(event):
"""Expand the attributes."""
app.flag_expanded_attrs = True
event.app.invalidate()
def collapse_attributes(event):
"""Collapse the attributes."""
app.flag_expanded_attrs = False
event.app.invalidate()
# Bind the functions
app.kb.add("q", filter=Condition(lambda: app.flag_normal_mode))(exit_app)
app.kb.add("c-q")(exit_app)
app.kb.add("j", filter=Condition(lambda: app.flag_normal_mode))(
jump_leader_mode
)
app.kb.add("d", filter=Condition(lambda: app.flag_normal_mode))(
dataset_leader_mode
)
app.kb.add("w", filter=Condition(lambda: app.flag_normal_mode))(
window_leader_mode
)
app.kb.add("p", filter=Condition(lambda: app.flag_normal_mode))(
plotting_leader_mode
)
app.kb.add("h", filter=Condition(lambda: app.flag_normal_mode))(
hist_leader_mode
)
app.kb.add("q", filter=Condition(lambda: not app.flag_normal_mode))(
exit_leader_mode
)
app.kb.add(
"A",
filter=Condition(
lambda: app.flag_normal_mode and not app.flag_expanded_attrs
),
)(expand_attributes)
app.kb.add(
"A",
filter=Condition(
lambda: app.flag_normal_mode and app.flag_expanded_attrs
),
)(collapse_attributes)
# Add the hot keys
hot_keys = [
ConditionalContainer(
Label("A β Expand Attributes"),
filter=Condition(lambda: not app.flag_expanded_attrs),
),
ConditionalContainer(
Label("A β Shrink Attributes"),
filter=Condition(lambda: app.flag_expanded_attrs),
),
Label("d β Dataset Mode"),
Label("h β Hist Mode"),
Label("j β Jump Mode"),
Label("p β Plotting Mode"),
Label("w β Window Mode"),
Label("q β Exit"),
]
return hot_keys
| 3,657 | Python | .py | 104 | 27.913462 | 77 | 0.633134 | WillJRoper/h5forest | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,288,099 | r.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/r.py | ...
for path, im, im0s, vid_cap, s in dataset:
# s = ''
t1 = time_sync()
with dt[0]:
im = torch.from_numpy(im).to(model.device)
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
t2 = time_sync()
sdt[0] += t2 - t1
# Inference
with dt[1]:
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
pred = model(im, augment=augment, visualize=visualize)
pred = pred[0][1]
t3 = time_sync()
sdt[1] += t3 - t2
# Apply NMS
with dt[2]:
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
sdt[2] += time_sync() - t3
# Second-stage classifier (optional)
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
seen += 1
if webcam: # bs >= 1
p, im0, _ = path[i], im0s[i].copy(), dataset.count
p = Path(p) # to Path
s += f'{i}: '
txt_file_name = p.stem + f'_{i}' # Unique text file name
save_path = str(save_dir / p.stem) + f'_{i}' # Unique video file name
else:
p, im0, _ = path, im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
# video file
if source.endswith(VID_FORMATS):
txt_file_name = p.stem
save_path = str(save_dir / p.stem) # im.jpg, vid.mp4, ...
# folder with imgs
else:
txt_file_name = p.parent.name # get folder name containing current img
save_path = str(save_dir / p.parent.name) # im.jpg, vid.mp4, ...
curr_frames[i] = im0
txt_path = str(save_dir / 'tracks' / txt_file_name) # Unique text file path
s += '%gx%g ' % im.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
...
| 2,281 | Python | .py | 52 | 34.692308 | 103 | 0.546847 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.