id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,287,500 | constants.py | Skirlax_MuAlphaZeroLibrary/mu_alpha_zero/AlphaZero/constants.py |
board_size = 8
SAMPLE_AZ_ARGS = {
"num_net_channels": 512,
"num_net_in_channels": 1,
"net_dropout": 0.3,
"net_action_size": board_size ** 2,
"num_simulations": 1317,
"self_play_games": 300,
"num_iters": 50,
"epochs": 500,
"lr": 0.0032485504583772953,
"max_buffer_size": 100_000,
"num_pit_games": 40,
"random_pit_freq": 3,
"board_size": board_size,
"batch_size": 256,
"tau": 1,
"arena_tau": 0.04139160592420218,
"c": 1,
"checkpoint_dir": None,
"update_threshold": 0.6,
"minimax_depth": 4,
"show_tqdm": True,
"num_workers": 5,
"num_to_win": 5,
"log_epsilon": 1e-9,
"zero_tau_after": 5,
"az_net_linear_input_size": 18432,
"log_dir": "Logs",
"pushbullet_token": None
}
TRAINED_AZ_NET_ARGS = {
"num_net_channels": 512,
"num_net_in_channels": 1,
"net_dropout": 0.3,
"net_action_size": board_size ** 2,
"num_simulations": 1317,
"self_play_games": 3,
"num_iters": 50,
"epochs": 320,
"lr": 0.0032485504583772953,
"max_buffer_size": 100_000,
"num_pit_games": 40,
"random_pit_freq": 3,
"board_size": board_size,
"batch_size": 128,
"tau": 1.0,
"arena_tau": 0, # 0.04139160592420218
"c": 1.15,
"checkpoint_dir": None,
"update_threshold": 0.6,
"minimax_depth": 4,
"show_tqdm": True,
"num_workers": 5,
"num_to_win": 4,
"log_epsilon": 1.4165210108199043e-08,
"log_dir": "Logs",
"pushbullet_token": None
}
SAMPLE_MZ_ARGS = {
"num_net_channels": 512,
"num_net_out_channels": 256,
"num_net_in_channels": 1,
"net_dropout": 0.3,
"net_action_size": 14,
"net_latent_size": 36,
"num_simulations": 240,
"self_play_games": 5,
"K": 5,
"gamma": 0.997,
"frame_buffer_size": 32,
"frame_skip": 4,
"num_steps": 400,
"num_iters": 50,
"epochs": 100,
"lr": 0.001,
"max_buffer_size": 70_000,
"num_pit_games": 40,
"random_pit_freq": 2,
"board_size": board_size,
"batch_size": 255,
"tau": 1,
"arena_tau": 1e-2,
"c": 1,
"c2": 19652,
"alpha": 0.8,
"checkpoint_dir": None,
"update_threshold": 0.6,
"minimax_depth": None, # don't use with muzero
"show_tqdm": False,
"num_workers": 5,
"num_to_win": 5,
"log_epsilon": 1e-9,
"zero_tau_after": 5,
"beta": 1,
"env_id": "ALE/Asteroids-v5",
"pickle_dir": "Pickles/Data",
"target_resolution": (96, 96),
"az_net_linear_input_size": 8192,
"log_dir": "Logs",
"pushbullet_token": None
}
| 2,570 | Python | .py | 102 | 20.421569 | 51 | 0.569631 | Skirlax/MuAlphaZeroLibrary | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,501 | checkpointer.py | Skirlax_MuAlphaZeroLibrary/mu_alpha_zero/AlphaZero/checkpointer.py | import atexit
import os
import pickle
import sys
import torch as th
from mu_alpha_zero.AlphaZero.utils import DotDict
from mu_alpha_zero.General.utils import find_project_root
from mu_alpha_zero.config import Config
class CheckPointer:
def __init__(self, checkpoint_dir: str | None, verbose: bool = True) -> None:
self.__checkpoint_dir = checkpoint_dir
self.make_dir()
self.__checkpoint_num = self.initialize_checkpoint_num()
self.__name_prefix = "improved_net_"
self.verbose = verbose
atexit.register(self.cleanup)
def make_dir(self) -> None:
if self.__checkpoint_dir is not None:
os.makedirs(self.__checkpoint_dir, exist_ok=True)
return
root_dir = find_project_root()
checkpoint_dir = f"{root_dir}/Checkpoints/NetVersions"
self.__checkpoint_dir = checkpoint_dir
os.makedirs(checkpoint_dir, exist_ok=True)
def save_checkpoint(self, net: th.nn.Module, opponent: th.nn.Module, optimizer: th.optim,
lr: float,
iteration: int, mu_alpha_zero_config: Config, name: str = None) -> None:
if name is None:
name = self.__name_prefix + str(self.__checkpoint_num)
checkpoint_path = f"{self.__checkpoint_dir}/{name}.pth"
th.save({
"net": net.state_dict(),
"optimizer": optimizer.state_dict() if isinstance(optimizer, th.optim.Optimizer) else optimizer,
"lr": lr,
"iteration": iteration,
"args": mu_alpha_zero_config.to_dict(),
'opponent_state_dict': opponent.state_dict()
}, checkpoint_path)
self.print_verbose(f"Saved checkpoint to {checkpoint_path} at iteration {iteration}.")
self.__checkpoint_num += 1
def save_state_dict_checkpoint(self, net: th.nn.Module, name: str) -> None:
checkpoint_path = f"{self.__checkpoint_dir}/{name}.pth"
th.save(net.state_dict(), checkpoint_path)
self.print_verbose(f"Saved state dict checkpoint.")
def load_state_dict_checkpoint(self, net: th.nn.Module, name: str) -> None:
checkpoint_path = f"{self.__checkpoint_dir}/{name}.pth"
net.load_state_dict(th.load(checkpoint_path))
self.print_verbose(f"Loaded state dict checkpoint.")
def load_checkpoint_from_path(self, checkpoint_path: str) -> tuple:
sys.modules['mem_buffer'] = sys.modules['mu_alpha_zero.mem_buffer']
checkpoint = th.load(checkpoint_path)
self.print_verbose(f"Restoring checkpoint {checkpoint_path} made at iteration {checkpoint['iteration']}.")
if "memory" in checkpoint:
memory = checkpoint["memory"]
else:
memory = None
return checkpoint["net"], checkpoint["optimizer"], memory, checkpoint["lr"], \
DotDict(checkpoint["args"]), checkpoint["opponent_state_dict"]
def load_checkpoint_from_num(self, checkpoint_num: int) -> tuple:
checkpoint_path = f"{self.__checkpoint_dir}/{self.__name_prefix}{checkpoint_num}.pth"
return self.load_checkpoint_from_path(checkpoint_path)
def clear_checkpoints(self) -> None:
# This method doesn't obey the verbose flag as it's a destructive operation.
print("Clearing all checkpoints.")
answer = input("Are you sure?? (y/n): ")
if answer != "y":
print("Aborted.")
return
for file_name in os.listdir(self.__checkpoint_dir):
os.remove(f"{self.__checkpoint_dir}/{file_name}")
print(f"Cleared {len(os.listdir(self.__checkpoint_dir))} saved checkpoints (all).")
def save_temp_net_checkpoint(self, net) -> None:
process_pid = os.getpid()
os.makedirs(f"{self.__checkpoint_dir}/Temp", exist_ok=True)
checkpoint_path = f"{self.__checkpoint_dir}/Temp/temp_net_{process_pid}.pth"
th.save(net.state_dict(), checkpoint_path)
def load_temp_net_checkpoint(self, net) -> None:
process_pid = os.getpid()
checkpoint_path = f"{self.__checkpoint_dir}/Temp/temp_net_{process_pid}.pth"
net.load_state_dict(th.load(checkpoint_path))
def initialize_checkpoint_num(self) -> int:
return len([x for x in os.listdir(self.__checkpoint_dir) if x.endswith(".pth")])
def get_highest_checkpoint_num(self) -> int:
return max([int(file_name.split("_")[2].split(".")[0]) for file_name in os.listdir(self.__checkpoint_dir)])
def get_temp_path(self) -> str:
return f"{self.__checkpoint_dir}/Temp/temp_net.pth"
def get_checkpoint_dir(self) -> str:
return self.__checkpoint_dir
def get_latest_name_match(self, name: str):
name_matches = [os.path.join(self.__checkpoint_dir, x) for x in os.listdir(self.__checkpoint_dir) if name in x]
name_matches.sort(key=lambda x: os.path.getctime(x))
return name_matches[-1]
def get_name_prefix(self):
return self.__name_prefix
def print_verbose(self, msg: str) -> None:
if self.verbose:
print(msg)
def cleanup(self):
import shutil
shutil.rmtree(f"{self.__checkpoint_dir}/Temp/", ignore_errors=True)
def save_losses(self, losses: list[float]):
with open(f"{self.__checkpoint_dir}/training_losses.pkl", "wb") as f:
pickle.dump(losses, f)
| 5,354 | Python | .py | 103 | 43.213592 | 119 | 0.639372 | Skirlax/MuAlphaZeroLibrary | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,502 | utils.py | Skirlax_MuAlphaZeroLibrary/mu_alpha_zero/AlphaZero/utils.py | import json
import os
import shutil
import time
from typing import Type, Literal, Callable
import numpy as np
import optuna
# import pygraphviz
import torch as th
from mu_alpha_zero.AlphaZero.constants import SAMPLE_AZ_ARGS as test_args
from mu_alpha_zero.mem_buffer import MemBuffer
from mu_alpha_zero.config import Config, AlphaZeroConfig
from mu_alpha_zero.General.network import GeneralNetwork
class DotDict(dict):
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
def augment_experience_with_symmetries(game_experience: list, board_size) -> list:
game_experience_ = []
for state, pi, v, _ in game_experience:
pi = np.array([x for x in pi.values()])
game_experience_.append((state, pi, v))
for axis, k in zip([0, 1], [1, 3]):
state_ = np.rot90(state.copy(), k=k)
pi_ = np.rot90(pi.copy().reshape(board_size, board_size), k=k).flatten()
game_experience_.append((state_, pi_, v))
del state_, pi_
state_ = np.flip(state.copy(), axis=axis)
pi_ = np.flip(pi.copy().reshape(board_size, board_size), axis=axis).flatten()
game_experience_.append((state_, pi_, v))
return game_experience_
def rotate_stack(state: np.ndarray, k: int):
for dim in range(state.shape[0]):
state[dim] = np.rot90(state[dim], k=k)
return state
def flip_stack(state: np.ndarray, axis: int):
for dim in range(state.shape[0]):
state[dim] = np.flip(state[dim], axis=axis)
return state
def make_channels(game_experience: list):
experience = []
for state, pi, v, current_player, move in game_experience:
state = make_channels_from_single(state)
experience.append((state, pi, v, current_player, move))
return experience
def make_channels_from_single(state: np.ndarray):
player_one_state = np.where(state == 1, 1, 0) # fill with 1 where player 1 has a piece else 0
player_minus_one_state = np.where(state == -1, 1, 0) # fill with 1 where player -1 has a piece else 0
empty_state = np.where(state == 0, 1, 0) # fill with 1 where empty spaces else 0
return np.stack([state, player_one_state, player_minus_one_state, empty_state], axis=0)
def mask_invalid_actions(probabilities: np.ndarray, mask: np.ndarray) -> np.ndarray:
mask = mask.reshape(probabilities.shape)
valids = probabilities * mask
valids_sum = valids.sum()
return valids / valids_sum
# to_print = "" # for debugging
# # mask = np.where(observations != 0, -5, observations)
# # mask = np.where(mask == 0, 1, mask)
# # mask = np.where(mask == -5, 0, mask)
# valids = probabilities.reshape(-1, board_size ** 2) * mask.reshape(-1, board_size ** 2)
# valids_sum = valids.sum()
# if valids_sum == 0:
# # When no valid moves are available (shouldn't happen) sum of valids is 0, making the returned valids an array
# # of nan's (result of division by zero). In this case, we create a uniform probability distribution.
# to_print += f"Sum of valid probabilities is 0. Creating a uniform probability...\nMask:\n{mask}"
# valids = np.full(valids.shape, 1.0 / np.prod(valids.shape))
# else:
# valids = valids / valids_sum # normalize
#
# if len(to_print) > 0:
# print(to_print, file=open("masking_message.txt", "w"))
# return valids
def mask_invalid_actions_batch(states: th.tensor) -> th.tensor:
masks = []
for state in states:
np_state = state.detach().cpu().numpy()
mask = np.where(np_state != 0, -5, np_state)
mask = np.where(mask == 0, 1, mask)
mask = np.where(mask == -5, 0, mask)
masks.append(mask)
return th.tensor(np.array(masks), dtype=th.float32).squeeze(1)
def check_args(args: dict):
required_keys = ["num_net_channels", "num_net_in_channels", "net_dropout", "net_action_size", "num_simulations",
"self_play_games", "num_iters", "epochs", "lr", "max_buffer_size", "num_pit_games",
"random_pit_freq", "board_size", "batch_size", "tau", "c", "checkpoint_dir", "update_threshold"]
for key in required_keys:
if key not in args:
raise KeyError(f"Missing key {key} in args dict. Please supply all required keys.\n"
f"Required keys: {required_keys}.")
def calculate_board_win_positions(n: int, k: int):
return get_num_horizontal_conv_slides(n, k) * (n * 2 + 2) + 4 * sum(
[get_num_horizontal_conv_slides(x, k) for x in range(k, n)])
def get_num_horizontal_conv_slides(board_size: int, kernel_size: int) -> int:
return (board_size - kernel_size) + 1
def az_optuna_parameter_search(n_trials: int, target_values: list, target_game, config: AlphaZeroConfig,
net_class: Type[GeneralNetwork], results_dir: str, az,refresh_az: Callable):
"""
Performs a hyperparameter search using optuna. This method is meant to be called using the start_jobs.py script.
For this method to work, a mysql database must be running on the storage address and an optuna study with the
given name and the 'maximize' direction must exist.
:param n_trials: num of trials to run the search for.
:param config: The config to use for the search.
:return:
"""
def get_function_from_value(value, trial: optuna.Trial):
if type(value[1]) == int:
return trial.suggest_int(value[0], value[1], value[2])
if type(value[1]) == float:
return trial.suggest_float(value[0], value[1], value[2])
if type(value[1]) == list:
return trial.suggest_categorical(value[0], value[1])
def objective(trial: optuna.Trial):
az = refresh_az()
for value in target_values:
setattr(config, value[0], get_function_from_value(value, trial))
az.trainer.opponent_network.load_state_dict(az.trainer.network.state_dict())
shared_storage_manager = SharedStorageManager()
shared_storage_manager.start()
mem = shared_storage_manager.MemBuffer(az.trainer.memory.max_size, az.trainer.memory.disk,
az.trainer.memory.full_disk,
az.trainer.memory.dir_path, hook_manager=az.trainer.memory.hook_manager)
shared_storage: SharedStorage = shared_storage_manager.SharedStorage(mem)
shared_storage.set_stable_network_params(az.trainer.network.state_dict())
pool = az.trainer.mcts.start_continuous_self_play(
az.trainer.make_n_networks(az.trainer.muzero_alphazero_config.num_workers),
az.trainer.make_n_trees(az.trainer.muzero_alphazero_config.num_workers),
shared_storage, az.trainer.device,
az.trainer.muzero_alphazero_config,
az.trainer.muzero_alphazero_config.num_workers,
az.trainer.muzero_alphazero_config.num_worker_iters)
az.trainer.logger.log(
f"Successfully started a pool of {az.trainer.muzero_alphazero_config.num_workers} workers for "
f"self-play (1/2).")
p2 = Process(target=az.trainer.network.continuous_weight_update,
args=(
shared_storage, az.trainer.muzero_alphazero_config, az.trainer.checkpointer,
az.trainer.logger))
p2.start()
p4 = Process(target=az.trainer.arena.continuous_pit, args=(
az.trainer.net_player.make_fresh_instance(),
az.trainer.net_player.make_fresh_instance(),
RandomPlayer(az.trainer.game_manager.make_fresh_instance(), **{}),
az.trainer.muzero_alphazero_config.num_pit_games,
az.trainer.muzero_alphazero_config.num_simulations,
shared_storage,
az.trainer.checkpointer,
False,
1
))
p4.start()
last_len = 0
max_len = 500
while len(shared_storage.get_combined_losses()) < max_len:
if len(shared_storage.get_combined_losses()) <= last_len:
time.sleep(2)
continue
last_len = len(shared_storage.get_combined_losses())
trial.report(shared_storage.get_combined_losses()[-1], len(shared_storage.get_combined_losses()))
pool.terminate()
p2.terminate()
p4.terminate()
return shared_storage.get_combined_losses()[-1]
from mu_alpha_zero.shared_storage_manager import SharedStorageManager, SharedStorage
from mu_alpha_zero.AlphaZero.Arena.players import RandomPlayer
from mu_alpha_zero.mem_buffer import MemBuffer
from multiprocess.context import Process
config.show_tqdm = False
study = optuna.create_study(study_name="AlphaZeroHyperparameterSearch", direction="minimize")
study.optimize(objective, n_trials=n_trials)
with open(f"{results_dir}/best_params.json", "w") as f:
json.dump(study.best_params, f)
def build_net_from_config(muzero_config: Config, device):
from mu_alpha_zero.AlphaZero.Network.nnet import AlphaZeroNet
network = AlphaZeroNet(muzero_config.num_net_in_channels, muzero_config.num_net_channels,
muzero_config.net_dropout, muzero_config.net_action_size,
muzero_config.az_net_linear_input_size)
return network.to(device)
def build_all_from_config(muzero_alphazero_config: Config, device, lr=None, buffer_size=None):
if lr is None:
lr = muzero_alphazero_config.lr
if buffer_size is None:
buffer_size = muzero_alphazero_config.max_buffer_size
network = build_net_from_config(muzero_alphazero_config, device)
optimizer = th.optim.Adam(network.parameters(), lr=lr)
memory = MemBuffer(max_size=buffer_size)
return network, optimizer, memory
def make_net_from_checkpoint(checkpoint_path: str, args: DotDict | None):
if args is None:
args = DotDict(test_args)
device = th.device("cuda" if th.cuda.is_available() else "cpu")
net = build_net_from_config(args, device)
data = th.load(checkpoint_path)
net.load_state_dict(data["net"])
return net
def visualize_tree(root_node, output_file_name: str, depth_limit: int | None = None):
graph = pygraphviz.AGraph()
graph.graph_attr["label"] = "MCTS visualization"
graph.node_attr["shape"] = "circle"
graph.edge_attr["color"] = "blue"
graph.node_attr["color"] = "gold"
if depth_limit is None:
depth_limit = float("inf")
def make_graph(node, parent, g: pygraphviz.AGraph, d_limit: int):
state_ = None
if node.state is None:
state_ = str(np.random.randint(low=0, high=5, size=parent.state.shape))
else:
state_ = str(node.state)
g.add_node(state_)
if parent != node:
g.add_edge(str(parent.state), state_)
if not node.was_visited() or d_limit <= 0:
return
# queue_ = deque(root_node.children.values())
# depth = 1
# num_children = 25
# children_iterated = 0
# parent = root_node
for child in node.children.values():
make_graph(child, node, g, d_limit=d_limit - 1 if depth_limit != float("inf") else depth_limit)
make_graph(root_node, root_node, graph, d_limit=depth_limit)
graph.layout(prog="dot")
graph.draw(f"{output_file_name}.png")
def cpp_data_to_memory(data: list, memory: MemBuffer, board_size: int):
# import pickle
# test_data = pickle.load(open(f"{find_project_root()}/history.pkl","rb"))
for game_data in data:
for state, pi, v in game_data:
state = th.tensor(state, dtype=th.float32).reshape(board_size, board_size)
pi = th.tensor(pi, dtype=th.float32)
memory.add((state, pi, v))
#
# if __name__ == "__main__":
# cpp_data_to_memory(None,MemBuffer(max_size=10_000),test_args)
| 11,998 | Python | .py | 235 | 42.748936 | 120 | 0.644632 | Skirlax/MuAlphaZeroLibrary | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,503 | __init__.py | Skirlax_MuAlphaZeroLibrary/mu_alpha_zero/AlphaZero/__init__.py | #from mu_alpha_zero.mem_buffer import MemBuffer
# from AlphaZero.Network.trainer import Trainer
# from AlphaZero.Network.nnet import TicTacToeNet
# from import cbind | 166 | Python | .py | 4 | 40.75 | 49 | 0.834356 | Skirlax/MuAlphaZeroLibrary | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,504 | logger.py | Skirlax_MuAlphaZeroLibrary/mu_alpha_zero/AlphaZero/logger.py | import atexit
import datetime
import logging
import os
from pushbullet import API
from mu_alpha_zero.General.utils import find_project_root
class Logger:
def __init__(self, logdir: str or None, token: str or None = None) -> None:
self.logdir = self.init_logdir(logdir)
os.makedirs(self.logdir, exist_ok=True)
self.logger = logging.getLogger("AlphaZeroLogger")
self.logger.setLevel(logging.DEBUG)
self.file_handler = logging.FileHandler(
f"{self.logdir}/{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.log")
self.file_handler.setLevel(logging.DEBUG)
self.logger.addHandler(self.file_handler)
self.is_token_set = False
self.api = API()
self.init_api_token(token)
formatter = logging.Formatter("[%(asctime)s - %(levelname)s] %(message)s")
self.file_handler.setFormatter(formatter)
atexit.register(self.cleanup)
def log(self, msg: str, level: str = "debug") -> None:
getattr(self.logger, level)(msg)
def init_logdir(self,logdir: str or None):
if logdir is None:
return f"{find_project_root()}/Logs/ProgramLogs"
else:
return logdir
def init_api_token(self, token: str or None) -> None:
if token is None:
return
self.api.set_token(token)
self.is_token_set = True
def pushbullet_log(self, msg: str, algorithm: str = "MuZero") -> None:
if not self.is_token_set:
return
try:
self.api.send_note(f"{algorithm} training notification.", msg)
except Exception as e:
print(e)
def clear_logdir(self):
for file_name in os.listdir(self.logdir):
os.remove(f"{self.logdir}/{file_name}")
def cleanup(self) -> None:
self.file_handler.close()
self.logger.removeHandler(self.file_handler)
class LoggingMessageTemplates:
@staticmethod
def PITTING_START(name1: str, name2: str, num_games: int):
return f"Starting pitting between {name1} and {name2} for {num_games} games."
@staticmethod
def PITTING_END(name1: str, name2: str, wins1: int, wins2: int, total: int, draws: int):
return (f"Pitting ended between {name1} and {name2}. "
f"Player 1 win frequency: {wins1 / total}. "
f"Player 2 win frequency: {wins2 / total}. Draws: {draws}.")
@staticmethod
def SELF_PLAY_START(num_games: int):
return f"Starting self play for {num_games} games."
@staticmethod
def SELF_PLAY_END(wins1: int, wins2: int, draws: int, not_zero_fn: callable):
if wins1 is None or wins2 is None or draws is None:
return "Self play ended. Results not available (This is expected if you are running MuZero)."
return (f"Self play ended. Player 1 win frequency: {wins1 / (not_zero_fn(wins1 + wins2 + draws))}. "
f"Player 2 win frequency: {wins2 / (not_zero_fn(wins1 + wins2 + draws))}. Draws: {draws}.")
@staticmethod
def NETWORK_TRAINING_START(num_epchs: int):
return f"Starting network training for {num_epchs} epochs."
@staticmethod
def NETWORK_TRAINING_END(mean_loss: float):
return f"Network training ended. Mean loss: {mean_loss}"
@staticmethod
def MODEL_REJECT(num_wins: float, update_threshold: float):
return (f"!!! Model rejected, restoring previous version. Win rate: {num_wins}. "
f"Update threshold: {update_threshold} !!!")
@staticmethod
def MODEL_ACCEPT(num_wins: float, update_threshold: float):
return (
f"!!! Model accepted, keeping current version. Win rate: {num_wins}. Update threshold: {update_threshold}"
f" !!!")
@staticmethod
def TRAINING_START(num_iters: int):
return f"Starting training for {num_iters} iterations."
@staticmethod
def TRAINING_END(args_used: dict):
args_used_str = ""
for key, value in args_used.items():
args_used_str += f"{key}: {value}, "
return f"Training ended. Args used: {args_used_str[:-2]}"
@staticmethod
def SAVED(type_: str, path: str):
return f"Saved {type_} to {path}"
@staticmethod
def LOADED(type_: str, path: str):
return f"Restored {type_} from {path}"
@staticmethod
def ITER_FINISHED_PSB(iter: int):
return f"Iteration {iter} of the algorithm training finished!"
@staticmethod
def TRAINING_END_PSB():
return "Algorithm Training finished, you can collect the results :)"
| 4,590 | Python | .py | 101 | 37.415842 | 118 | 0.640376 | Skirlax/MuAlphaZeroLibrary | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,505 | alpha_zero.py | Skirlax_MuAlphaZeroLibrary/mu_alpha_zero/AlphaZero/alpha_zero.py | import sys
from typing import Type
import numpy as np
import torch as th
from mu_alpha_zero.AlphaZero.Arena.arena import Arena
from mu_alpha_zero.AlphaZero.Arena.players import NetPlayer
from mu_alpha_zero.AlphaZero.MCTS.az_search_tree import McSearchTree
from mu_alpha_zero.trainer import Trainer
from mu_alpha_zero.General.az_game import AlphaZeroGame
from mu_alpha_zero.General.memory import GeneralMemoryBuffer
from mu_alpha_zero.General.network import GeneralNetwork
from mu_alpha_zero.General.utils import net_not_none, find_project_root
from mu_alpha_zero.Hooks.hook_manager import HookManager
from mu_alpha_zero.config import AlphaZeroConfig
class AlphaZero:
def __init__(self, game_instance: AlphaZeroGame):
self.trainer = None
self.net = None
self.game = game_instance
self.device = th.device("cuda" if th.cuda.is_available() else "cpu")
self.alpha_zero_config: AlphaZeroConfig = None
self.tree: McSearchTree = None
def create_new(self, alpha_zero_config: AlphaZeroConfig, network_class: Type[GeneralNetwork],
memory: GeneralMemoryBuffer, headless: bool = True, hook_manager: HookManager or None = None,
checkpointer_verbose: bool = False):
network = network_class.make_from_config(alpha_zero_config, hook_manager=hook_manager).to(
self.device)
tree = McSearchTree(self.game.make_fresh_instance(), alpha_zero_config)
self.tree = tree
net_player = NetPlayer(self.game.make_fresh_instance(), **{"network": network, "monte_carlo_tree_search": tree})
self.alpha_zero_config = alpha_zero_config
self.trainer = Trainer.create(alpha_zero_config, self.game, network, tree, net_player, headless=headless,
checkpointer_verbose=checkpointer_verbose, memory_override=memory,
hook_manager=hook_manager)
self.net = self.trainer.get_network()
def load_checkpoint(self, network_class: Type[GeneralNetwork], path: str, checkpoint_dir: str,
headless: bool = True, hook_manager: HookManager or None = None,
checkpointer_verbose: bool = False, memory: GeneralMemoryBuffer = None):
self.trainer = Trainer.from_checkpoint(network_class, McSearchTree, NetPlayer, path, checkpoint_dir, self.game,
headless=headless, hook_manager=hook_manager,
checkpointer_verbose=checkpointer_verbose, mem=memory)
self.net = self.trainer.get_network()
self.tree = self.trainer.get_tree()
self.args = self.trainer.get_args()
def train(self):
net_not_none(self.net)
self.trainer.train()
def train_parallel(self, use_pitting: bool):
net_not_none(self.net)
self.trainer.train_parallel(False, use_pitting)
def predict(self, x: np.ndarray, tau: float = 0) -> int:
net_not_none(self.net)
assert x.shape == (self.args["board_size"], self.args["board_size"], self.args[
"num_net_in_channels"]), "Input shape is not correct. Expected (board_size, board_size, num_net_in_channels)." \
"Got: " + str(x.shape)
pi, _ = self.tree.search(self.net, x, 1, self.device, tau=tau)
return self.game.select_move(pi, tau=self.alpha_zero_config.tau)
def play(self, p1_name: str, p2_name: str, num_games: int, alpha_zero_config: AlphaZeroConfig, starts: int = 1,
switch_players: bool = True):
net_not_none(self.net)
self.net.to(self.device)
self.net.eval()
manager = self.game.make_fresh_instance()
tree = McSearchTree(manager, alpha_zero_config)
kwargs = {"network": self.net, "monte_carlo_tree_search": tree, "evaluate_fn": manager.eval_board,
"depth": alpha_zero_config.minimax_depth, "player": -1}
path_prefix = find_project_root().replace("\\", "/").split("/")[-1]
p1 = sys.modules[f"{path_prefix}.AlphaZero.Arena.players"].__dict__[p1_name](manager, **kwargs)
p2 = sys.modules[f"{path_prefix}.AlphaZero.Arena.players"].__dict__[p2_name](manager, **kwargs)
arena_manager = self.game.make_fresh_instance()
arena_manager.set_headless(False)
arena = Arena(arena_manager, alpha_zero_config, self.device)
p1_w, p2_w, ds = arena.pit(p1, p2, num_games, alpha_zero_config.num_simulations, one_player=not switch_players,
start_player=starts, add_to_kwargs=kwargs)
print(f"Results: Player 1 wins: {p1_w}, Player 2 wins: {p2_w}, Draws: {ds}")
| 4,714 | Python | .py | 75 | 52.08 | 124 | 0.652193 | Skirlax/MuAlphaZeroLibrary | 8 | 0 | 0 | GPL-2.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,506 | checkpointer.cpython-311.pyc | Skirlax_MuAlphaZeroLibrary/mu_alpha_zero/AlphaZero/__pycache__/checkpointer.cpython-311.pyc | §
-*¢fê ã ól — d dl Z d dlZd dlZd dlZd dlZd dlmZ d dlm Z d dl
mZ G d„ d¦ « ZdS )é N)ÚDotDict)Úfind_project_root)ÚConfigc óˆ — e Zd Zd$dedz deddfd„Zd%d„Z d&dej j d ej j d
ej
deded
e
deddfd„Zdej j deddfd„Zdej j deddfd„Zdedefd„Zdedefd„Zd%d„Zd%d„Zd%d„Zdefd„Zdefd„Zdefd„Zdefd„Zdefd„Zd„ Zdeddfd „Zd!„ Zd"ee fd#„Z dS )'ÚCheckPointerTÚcheckpoint_dirNÚverboseÚreturnc ó¼ — || _ | ¦ « | ¦ « | _ d| _ || _ t
j | j ¦ « d S )NÚ
improved_net_) Ú_CheckPointer__checkpoint_dirÚmake_dirÚinitialize_checkpoint_numÚ_CheckPointer__checkpoint_numÚ_CheckPointer__name_prefixr ÚatexitÚregisterÚcleanup)Úselfr r s úS/home/skyr/PycharmProjects/MuAlphaZeroBuild/mu_alpha_zero/AlphaZero/checkpointer.pyÚ__init__zCheckPointer.__init__ sR € Ø .ˆÔØ�
Š
‰ŒˆØ $× >Ò >Ñ @Ô @ˆÔØ,ˆÔ؈ŒİŒ˜œÑ%Ô%Ğ%Ğ%Ğ%ó c ó® — | j �t j | j d¬¦ « d S t ¦ « }|› d�}|| _ t j |d¬¦ « d S )NT©Úexist_okz/Checkpoints/NetVersions)r
ÚosÚmakedirsr )r Úroot_dirr s r r zCheckPointer.make_dir sd € ØÔ Ğ,İŒK˜Ô-¸Ğ=Ñ=Ô=Ğ=؈Fİ$Ñ&Ô&ˆØ$Ğ>Ğ>Ğ>ˆØ .ˆÔİ
Œ�N¨TĞ2Ñ2Ô2Ğ2Ğ2Ğ2r ÚnetÚopponentÚ optimizerÚlrÚ iterationÚmu_alpha_zero_configÚnamec ó¾ — |€| j t | j ¦ « z }| j › d|› d�}t j | ¦ « t |t j j ¦ « r| ¦ « n||||
¦ « | ¦ « dœ|¦ « | d|› d|› d�¦ « | xj dz
c_ d S )Nú/ú.pth)r r! r" r# ÚargsÚopponent_state_dictzSaved checkpoint to z at iteration ú.é )r Ústrr r
ÚthÚsaveÚ
state_dictÚ
isinstanceÚoptimÚ OptimizerÚto_dictÚ
print_verbose) r r r r! r" r# r$ r% Úcheckpoint_paths r Úsave_checkpointzCheckPointer.save_checkpoint sü € ğ ˆ<ØÔ%¨DÔ,AÑ(BÔ(BÑBˆDà!Ô2Ğ?Ğ?°TĞ?Ğ?Ğ?ˆİ
ŒØ—>’>Ñ#Ô#İ3=¸iÍÌÔI[Ñ3\Ô3\Ğk˜×-Ò-Ñ/Ô/Ğ/ĞbkØØ"Ø(×0Ò0Ñ2Ô2Ø#+×#6Ò#6Ñ#8Ô#8ğ
ğ
ğ ñ ô ğ ğ
×ÒĞ]°/Ğ]Ğ]ĞQZĞ]Ğ]Ğ]Ñ^Ô^Ğ^ØĞÔ Ñ"ĞÔĞĞr c ó˜ — | j › d|› d�}t j | ¦ « |¦ « | d¦ « d S )Nr' r( zSaved state dict checkpoint.)r
r. r/ r0 r5 ©r r r% r6 s r Úsave_state_dict_checkpointz'CheckPointer.save_state_dict_checkpoint1 sR € Ø!Ô2Ğ?Ğ?°TĞ?Ğ?Ğ?ˆİ
Œ�—’Ñ Ô /Ñ2Ô2Ğ2Ø×ÒĞ:Ñ;Ô;Ğ;Ğ;Ğ;r c ó˜ — | j › d|› d�}| t j |¦ « ¦ « | d¦ « d S )Nr' r( zLoaded state dict checkpoint.)r
Úload_state_dictr. Úloadr5 r9 s r Úload_state_dict_checkpointz'CheckPointer.load_state_dict_checkpoint6 sT € Ø!Ô2Ğ?Ğ?°TĞ?Ğ?Ğ?ˆØ×Ò�BœG OÑ4Ô4Ñ5Ô5Ğ5Ø×ÒĞ;Ñ<Ô<Ğ<Ğ<Ğ<r r6 c ó0 — t j d t j d< |