id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
251,300
MillionIntegrals/vel
vel/rl/api/evaluator.py
Evaluator.is_provided
def is_provided(self, name): """ Capability check if evaluator provides given value """ if name in self._storage: return True elif name in self._providers: return True elif name.startswith('rollout:'): rollout_name = name[8:] else: return False
python
def is_provided(self, name): if name in self._storage: return True elif name in self._providers: return True elif name.startswith('rollout:'): rollout_name = name[8:] else: return False
[ "def", "is_provided", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_storage", ":", "return", "True", "elif", "name", "in", "self", ".", "_providers", ":", "return", "True", "elif", "name", ".", "startswith", "(", "'rollout:'", ")", ":", "rollout_name", "=", "name", "[", "8", ":", "]", "else", ":", "return", "False" ]
Capability check if evaluator provides given value
[ "Capability", "check", "if", "evaluator", "provides", "given", "value" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/api/evaluator.py#L103-L112
251,301
MillionIntegrals/vel
vel/rl/api/evaluator.py
Evaluator.get
def get(self, name): """ Return a value from this evaluator. Because tensor calculated is cached, it may lead to suble bugs if the same value is used multiple times with and without no_grad() context. It is advised in such cases to not use no_grad and stick to .detach() """ if name in self._storage: return self._storage[name] elif name in self._providers: value = self._storage[name] = self._providers[name](self) return value elif name.startswith('rollout:'): rollout_name = name[8:] value = self._storage[name] = self.rollout.batch_tensor(rollout_name) return value else: raise RuntimeError(f"Key {name} is not provided by this evaluator")
python
def get(self, name): if name in self._storage: return self._storage[name] elif name in self._providers: value = self._storage[name] = self._providers[name](self) return value elif name.startswith('rollout:'): rollout_name = name[8:] value = self._storage[name] = self.rollout.batch_tensor(rollout_name) return value else: raise RuntimeError(f"Key {name} is not provided by this evaluator")
[ "def", "get", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_storage", ":", "return", "self", ".", "_storage", "[", "name", "]", "elif", "name", "in", "self", ".", "_providers", ":", "value", "=", "self", ".", "_storage", "[", "name", "]", "=", "self", ".", "_providers", "[", "name", "]", "(", "self", ")", "return", "value", "elif", "name", ".", "startswith", "(", "'rollout:'", ")", ":", "rollout_name", "=", "name", "[", "8", ":", "]", "value", "=", "self", ".", "_storage", "[", "name", "]", "=", "self", ".", "rollout", ".", "batch_tensor", "(", "rollout_name", ")", "return", "value", "else", ":", "raise", "RuntimeError", "(", "f\"Key {name} is not provided by this evaluator\"", ")" ]
Return a value from this evaluator. Because tensor calculated is cached, it may lead to suble bugs if the same value is used multiple times with and without no_grad() context. It is advised in such cases to not use no_grad and stick to .detach()
[ "Return", "a", "value", "from", "this", "evaluator", "." ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/api/evaluator.py#L114-L133
251,302
MillionIntegrals/vel
vel/sources/vision/mnist.py
create
def create(model_config, batch_size, normalize=True, num_workers=0, augmentations=None): """ Create a MNIST dataset, normalized """ path = model_config.data_dir('mnist') train_dataset = datasets.MNIST(path, train=True, download=True) test_dataset = datasets.MNIST(path, train=False, download=True) augmentations = [ToArray()] + (augmentations if augmentations is not None else []) if normalize: train_data = train_dataset.train_data mean_value = (train_data.double() / 255).mean().item() std_value = (train_data.double() / 255).std().item() augmentations.append(Normalize(mean=mean_value, std=std_value, tags=['train', 'val'])) augmentations.append(ToTensor()) return TrainingData( train_dataset, test_dataset, num_workers=num_workers, batch_size=batch_size, augmentations=augmentations )
python
def create(model_config, batch_size, normalize=True, num_workers=0, augmentations=None): path = model_config.data_dir('mnist') train_dataset = datasets.MNIST(path, train=True, download=True) test_dataset = datasets.MNIST(path, train=False, download=True) augmentations = [ToArray()] + (augmentations if augmentations is not None else []) if normalize: train_data = train_dataset.train_data mean_value = (train_data.double() / 255).mean().item() std_value = (train_data.double() / 255).std().item() augmentations.append(Normalize(mean=mean_value, std=std_value, tags=['train', 'val'])) augmentations.append(ToTensor()) return TrainingData( train_dataset, test_dataset, num_workers=num_workers, batch_size=batch_size, augmentations=augmentations )
[ "def", "create", "(", "model_config", ",", "batch_size", ",", "normalize", "=", "True", ",", "num_workers", "=", "0", ",", "augmentations", "=", "None", ")", ":", "path", "=", "model_config", ".", "data_dir", "(", "'mnist'", ")", "train_dataset", "=", "datasets", ".", "MNIST", "(", "path", ",", "train", "=", "True", ",", "download", "=", "True", ")", "test_dataset", "=", "datasets", ".", "MNIST", "(", "path", ",", "train", "=", "False", ",", "download", "=", "True", ")", "augmentations", "=", "[", "ToArray", "(", ")", "]", "+", "(", "augmentations", "if", "augmentations", "is", "not", "None", "else", "[", "]", ")", "if", "normalize", ":", "train_data", "=", "train_dataset", ".", "train_data", "mean_value", "=", "(", "train_data", ".", "double", "(", ")", "/", "255", ")", ".", "mean", "(", ")", ".", "item", "(", ")", "std_value", "=", "(", "train_data", ".", "double", "(", ")", "/", "255", ")", ".", "std", "(", ")", ".", "item", "(", ")", "augmentations", ".", "append", "(", "Normalize", "(", "mean", "=", "mean_value", ",", "std", "=", "std_value", ",", "tags", "=", "[", "'train'", ",", "'val'", "]", ")", ")", "augmentations", ".", "append", "(", "ToTensor", "(", ")", ")", "return", "TrainingData", "(", "train_dataset", ",", "test_dataset", ",", "num_workers", "=", "num_workers", ",", "batch_size", "=", "batch_size", ",", "augmentations", "=", "augmentations", ")" ]
Create a MNIST dataset, normalized
[ "Create", "a", "MNIST", "dataset", "normalized" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/sources/vision/mnist.py#L11-L35
251,303
MillionIntegrals/vel
vel/storage/classic.py
ClassicStorage.reset
def reset(self, configuration: dict) -> None: """ Whenever there was anything stored in the database or not, purge previous state and start new training process from scratch. """ self.clean(0) self.backend.store_config(configuration)
python
def reset(self, configuration: dict) -> None: self.clean(0) self.backend.store_config(configuration)
[ "def", "reset", "(", "self", ",", "configuration", ":", "dict", ")", "->", "None", ":", "self", ".", "clean", "(", "0", ")", "self", ".", "backend", ".", "store_config", "(", "configuration", ")" ]
Whenever there was anything stored in the database or not, purge previous state and start new training process from scratch.
[ "Whenever", "there", "was", "anything", "stored", "in", "the", "database", "or", "not", "purge", "previous", "state", "and", "start", "new", "training", "process", "from", "scratch", "." ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/classic.py#L26-L32
251,304
MillionIntegrals/vel
vel/storage/classic.py
ClassicStorage.load
def load(self, train_info: TrainingInfo) -> (dict, dict): """ Resume learning process and return loaded hidden state dictionary """ last_epoch = train_info.start_epoch_idx model_state = torch.load(self.checkpoint_filename(last_epoch)) hidden_state = torch.load(self.checkpoint_hidden_filename(last_epoch)) self.checkpoint_strategy.restore(hidden_state) train_info.restore(hidden_state) return model_state, hidden_state
python
def load(self, train_info: TrainingInfo) -> (dict, dict): last_epoch = train_info.start_epoch_idx model_state = torch.load(self.checkpoint_filename(last_epoch)) hidden_state = torch.load(self.checkpoint_hidden_filename(last_epoch)) self.checkpoint_strategy.restore(hidden_state) train_info.restore(hidden_state) return model_state, hidden_state
[ "def", "load", "(", "self", ",", "train_info", ":", "TrainingInfo", ")", "->", "(", "dict", ",", "dict", ")", ":", "last_epoch", "=", "train_info", ".", "start_epoch_idx", "model_state", "=", "torch", ".", "load", "(", "self", ".", "checkpoint_filename", "(", "last_epoch", ")", ")", "hidden_state", "=", "torch", ".", "load", "(", "self", ".", "checkpoint_hidden_filename", "(", "last_epoch", ")", ")", "self", ".", "checkpoint_strategy", ".", "restore", "(", "hidden_state", ")", "train_info", ".", "restore", "(", "hidden_state", ")", "return", "model_state", ",", "hidden_state" ]
Resume learning process and return loaded hidden state dictionary
[ "Resume", "learning", "process", "and", "return", "loaded", "hidden", "state", "dictionary" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/classic.py#L34-L46
251,305
MillionIntegrals/vel
vel/storage/classic.py
ClassicStorage.clean
def clean(self, global_epoch_idx): """ Clean old checkpoints """ if self.cleaned: return self.cleaned = True self.backend.clean(global_epoch_idx) self._make_sure_dir_exists() for x in os.listdir(self.model_config.checkpoint_dir()): match = re.match('checkpoint_(\\d+)\\.data', x) if match: idx = int(match[1]) if idx > global_epoch_idx: os.remove(os.path.join(self.model_config.checkpoint_dir(), x)) match = re.match('checkpoint_hidden_(\\d+)\\.data', x) if match: idx = int(match[1]) if idx > global_epoch_idx: os.remove(os.path.join(self.model_config.checkpoint_dir(), x)) match = re.match('checkpoint_best_(\\d+)\\.data', x) if match: idx = int(match[1]) if idx > global_epoch_idx: os.remove(os.path.join(self.model_config.checkpoint_dir(), x))
python
def clean(self, global_epoch_idx): if self.cleaned: return self.cleaned = True self.backend.clean(global_epoch_idx) self._make_sure_dir_exists() for x in os.listdir(self.model_config.checkpoint_dir()): match = re.match('checkpoint_(\\d+)\\.data', x) if match: idx = int(match[1]) if idx > global_epoch_idx: os.remove(os.path.join(self.model_config.checkpoint_dir(), x)) match = re.match('checkpoint_hidden_(\\d+)\\.data', x) if match: idx = int(match[1]) if idx > global_epoch_idx: os.remove(os.path.join(self.model_config.checkpoint_dir(), x)) match = re.match('checkpoint_best_(\\d+)\\.data', x) if match: idx = int(match[1]) if idx > global_epoch_idx: os.remove(os.path.join(self.model_config.checkpoint_dir(), x))
[ "def", "clean", "(", "self", ",", "global_epoch_idx", ")", ":", "if", "self", ".", "cleaned", ":", "return", "self", ".", "cleaned", "=", "True", "self", ".", "backend", ".", "clean", "(", "global_epoch_idx", ")", "self", ".", "_make_sure_dir_exists", "(", ")", "for", "x", "in", "os", ".", "listdir", "(", "self", ".", "model_config", ".", "checkpoint_dir", "(", ")", ")", ":", "match", "=", "re", ".", "match", "(", "'checkpoint_(\\\\d+)\\\\.data'", ",", "x", ")", "if", "match", ":", "idx", "=", "int", "(", "match", "[", "1", "]", ")", "if", "idx", ">", "global_epoch_idx", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "self", ".", "model_config", ".", "checkpoint_dir", "(", ")", ",", "x", ")", ")", "match", "=", "re", ".", "match", "(", "'checkpoint_hidden_(\\\\d+)\\\\.data'", ",", "x", ")", "if", "match", ":", "idx", "=", "int", "(", "match", "[", "1", "]", ")", "if", "idx", ">", "global_epoch_idx", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "self", ".", "model_config", ".", "checkpoint_dir", "(", ")", ",", "x", ")", ")", "match", "=", "re", ".", "match", "(", "'checkpoint_best_(\\\\d+)\\\\.data'", ",", "x", ")", "if", "match", ":", "idx", "=", "int", "(", "match", "[", "1", "]", ")", "if", "idx", ">", "global_epoch_idx", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "self", ".", "model_config", ".", "checkpoint_dir", "(", ")", ",", "x", ")", ")" ]
Clean old checkpoints
[ "Clean", "old", "checkpoints" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/classic.py#L52-L85
251,306
MillionIntegrals/vel
vel/storage/classic.py
ClassicStorage.checkpoint
def checkpoint(self, epoch_info: EpochInfo, model: Model): """ When epoch is done, we persist the training state """ self.clean(epoch_info.global_epoch_idx - 1) self._make_sure_dir_exists() # Checkpoint latest torch.save(model.state_dict(), self.checkpoint_filename(epoch_info.global_epoch_idx)) hidden_state = epoch_info.state_dict() self.checkpoint_strategy.write_state_dict(hidden_state) torch.save(hidden_state, self.checkpoint_hidden_filename(epoch_info.global_epoch_idx)) if epoch_info.global_epoch_idx > 1 and self.checkpoint_strategy.should_delete_previous_checkpoint( epoch_info.global_epoch_idx): prev_epoch_idx = epoch_info.global_epoch_idx - 1 os.remove(self.checkpoint_filename(prev_epoch_idx)) os.remove(self.checkpoint_hidden_filename(prev_epoch_idx)) if self.checkpoint_strategy.should_store_best_checkpoint(epoch_info.global_epoch_idx, epoch_info.result): best_checkpoint_idx = self.checkpoint_strategy.current_best_checkpoint_idx if best_checkpoint_idx is not None: os.remove(self.checkpoint_best_filename(best_checkpoint_idx)) torch.save(model.state_dict(), self.checkpoint_best_filename(epoch_info.global_epoch_idx)) self.checkpoint_strategy.store_best_checkpoint_idx(epoch_info.global_epoch_idx) self.backend.store(epoch_info.result)
python
def checkpoint(self, epoch_info: EpochInfo, model: Model): self.clean(epoch_info.global_epoch_idx - 1) self._make_sure_dir_exists() # Checkpoint latest torch.save(model.state_dict(), self.checkpoint_filename(epoch_info.global_epoch_idx)) hidden_state = epoch_info.state_dict() self.checkpoint_strategy.write_state_dict(hidden_state) torch.save(hidden_state, self.checkpoint_hidden_filename(epoch_info.global_epoch_idx)) if epoch_info.global_epoch_idx > 1 and self.checkpoint_strategy.should_delete_previous_checkpoint( epoch_info.global_epoch_idx): prev_epoch_idx = epoch_info.global_epoch_idx - 1 os.remove(self.checkpoint_filename(prev_epoch_idx)) os.remove(self.checkpoint_hidden_filename(prev_epoch_idx)) if self.checkpoint_strategy.should_store_best_checkpoint(epoch_info.global_epoch_idx, epoch_info.result): best_checkpoint_idx = self.checkpoint_strategy.current_best_checkpoint_idx if best_checkpoint_idx is not None: os.remove(self.checkpoint_best_filename(best_checkpoint_idx)) torch.save(model.state_dict(), self.checkpoint_best_filename(epoch_info.global_epoch_idx)) self.checkpoint_strategy.store_best_checkpoint_idx(epoch_info.global_epoch_idx) self.backend.store(epoch_info.result)
[ "def", "checkpoint", "(", "self", ",", "epoch_info", ":", "EpochInfo", ",", "model", ":", "Model", ")", ":", "self", ".", "clean", "(", "epoch_info", ".", "global_epoch_idx", "-", "1", ")", "self", ".", "_make_sure_dir_exists", "(", ")", "# Checkpoint latest", "torch", ".", "save", "(", "model", ".", "state_dict", "(", ")", ",", "self", ".", "checkpoint_filename", "(", "epoch_info", ".", "global_epoch_idx", ")", ")", "hidden_state", "=", "epoch_info", ".", "state_dict", "(", ")", "self", ".", "checkpoint_strategy", ".", "write_state_dict", "(", "hidden_state", ")", "torch", ".", "save", "(", "hidden_state", ",", "self", ".", "checkpoint_hidden_filename", "(", "epoch_info", ".", "global_epoch_idx", ")", ")", "if", "epoch_info", ".", "global_epoch_idx", ">", "1", "and", "self", ".", "checkpoint_strategy", ".", "should_delete_previous_checkpoint", "(", "epoch_info", ".", "global_epoch_idx", ")", ":", "prev_epoch_idx", "=", "epoch_info", ".", "global_epoch_idx", "-", "1", "os", ".", "remove", "(", "self", ".", "checkpoint_filename", "(", "prev_epoch_idx", ")", ")", "os", ".", "remove", "(", "self", ".", "checkpoint_hidden_filename", "(", "prev_epoch_idx", ")", ")", "if", "self", ".", "checkpoint_strategy", ".", "should_store_best_checkpoint", "(", "epoch_info", ".", "global_epoch_idx", ",", "epoch_info", ".", "result", ")", ":", "best_checkpoint_idx", "=", "self", ".", "checkpoint_strategy", ".", "current_best_checkpoint_idx", "if", "best_checkpoint_idx", "is", "not", "None", ":", "os", ".", "remove", "(", "self", ".", "checkpoint_best_filename", "(", "best_checkpoint_idx", ")", ")", "torch", ".", "save", "(", "model", ".", "state_dict", "(", ")", ",", "self", ".", "checkpoint_best_filename", "(", "epoch_info", ".", "global_epoch_idx", ")", ")", "self", ".", "checkpoint_strategy", ".", "store_best_checkpoint_idx", "(", "epoch_info", ".", "global_epoch_idx", ")", "self", ".", "backend", ".", "store", "(", "epoch_info", ".", "result", ")" ]
When epoch is done, we persist the training state
[ "When", "epoch", "is", "done", "we", "persist", "the", "training", "state" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/classic.py#L87-L118
251,307
MillionIntegrals/vel
vel/storage/classic.py
ClassicStorage._persisted_last_epoch
def _persisted_last_epoch(self) -> int: """ Return number of last epoch already calculated """ epoch_number = 0 self._make_sure_dir_exists() for x in os.listdir(self.model_config.checkpoint_dir()): match = re.match('checkpoint_(\\d+)\\.data', x) if match: idx = int(match[1]) if idx > epoch_number: epoch_number = idx return epoch_number
python
def _persisted_last_epoch(self) -> int: epoch_number = 0 self._make_sure_dir_exists() for x in os.listdir(self.model_config.checkpoint_dir()): match = re.match('checkpoint_(\\d+)\\.data', x) if match: idx = int(match[1]) if idx > epoch_number: epoch_number = idx return epoch_number
[ "def", "_persisted_last_epoch", "(", "self", ")", "->", "int", ":", "epoch_number", "=", "0", "self", ".", "_make_sure_dir_exists", "(", ")", "for", "x", "in", "os", ".", "listdir", "(", "self", ".", "model_config", ".", "checkpoint_dir", "(", ")", ")", ":", "match", "=", "re", ".", "match", "(", "'checkpoint_(\\\\d+)\\\\.data'", ",", "x", ")", "if", "match", ":", "idx", "=", "int", "(", "match", "[", "1", "]", ")", "if", "idx", ">", "epoch_number", ":", "epoch_number", "=", "idx", "return", "epoch_number" ]
Return number of last epoch already calculated
[ "Return", "number", "of", "last", "epoch", "already", "calculated" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/classic.py#L140-L153
251,308
MillionIntegrals/vel
vel/storage/classic.py
ClassicStorage._make_sure_dir_exists
def _make_sure_dir_exists(self): """ Make sure directory exists """ filename = self.model_config.checkpoint_dir() pathlib.Path(filename).mkdir(parents=True, exist_ok=True)
python
def _make_sure_dir_exists(self): filename = self.model_config.checkpoint_dir() pathlib.Path(filename).mkdir(parents=True, exist_ok=True)
[ "def", "_make_sure_dir_exists", "(", "self", ")", ":", "filename", "=", "self", ".", "model_config", ".", "checkpoint_dir", "(", ")", "pathlib", ".", "Path", "(", "filename", ")", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")" ]
Make sure directory exists
[ "Make", "sure", "directory", "exists" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/classic.py#L155-L158
251,309
MillionIntegrals/vel
vel/rl/api/algo_base.py
clip_gradients
def clip_gradients(batch_result, model, max_grad_norm): """ Clip gradients to a given maximum length """ if max_grad_norm is not None: grad_norm = torch.nn.utils.clip_grad_norm_( filter(lambda p: p.requires_grad, model.parameters()), max_norm=max_grad_norm ) else: grad_norm = 0.0 batch_result['grad_norm'] = grad_norm
python
def clip_gradients(batch_result, model, max_grad_norm): if max_grad_norm is not None: grad_norm = torch.nn.utils.clip_grad_norm_( filter(lambda p: p.requires_grad, model.parameters()), max_norm=max_grad_norm ) else: grad_norm = 0.0 batch_result['grad_norm'] = grad_norm
[ "def", "clip_gradients", "(", "batch_result", ",", "model", ",", "max_grad_norm", ")", ":", "if", "max_grad_norm", "is", "not", "None", ":", "grad_norm", "=", "torch", ".", "nn", ".", "utils", ".", "clip_grad_norm_", "(", "filter", "(", "lambda", "p", ":", "p", ".", "requires_grad", ",", "model", ".", "parameters", "(", ")", ")", ",", "max_norm", "=", "max_grad_norm", ")", "else", ":", "grad_norm", "=", "0.0", "batch_result", "[", "'grad_norm'", "]", "=", "grad_norm" ]
Clip gradients to a given maximum length
[ "Clip", "gradients", "to", "a", "given", "maximum", "length" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/api/algo_base.py#L4-L14
251,310
MillionIntegrals/vel
vel/rl/buffers/circular_replay_buffer.py
CircularReplayBuffer.sample_trajectories
def sample_trajectories(self, rollout_length, batch_info) -> Trajectories: """ Sample batch of trajectories and return them """ indexes = self.backend.sample_batch_trajectories(rollout_length) transition_tensors = self.backend.get_trajectories(indexes, rollout_length) return Trajectories( num_steps=rollout_length, num_envs=self.backend.num_envs, environment_information=None, transition_tensors={k: torch.from_numpy(v) for k, v in transition_tensors.items()}, rollout_tensors={} )
python
def sample_trajectories(self, rollout_length, batch_info) -> Trajectories: indexes = self.backend.sample_batch_trajectories(rollout_length) transition_tensors = self.backend.get_trajectories(indexes, rollout_length) return Trajectories( num_steps=rollout_length, num_envs=self.backend.num_envs, environment_information=None, transition_tensors={k: torch.from_numpy(v) for k, v in transition_tensors.items()}, rollout_tensors={} )
[ "def", "sample_trajectories", "(", "self", ",", "rollout_length", ",", "batch_info", ")", "->", "Trajectories", ":", "indexes", "=", "self", ".", "backend", ".", "sample_batch_trajectories", "(", "rollout_length", ")", "transition_tensors", "=", "self", ".", "backend", ".", "get_trajectories", "(", "indexes", ",", "rollout_length", ")", "return", "Trajectories", "(", "num_steps", "=", "rollout_length", ",", "num_envs", "=", "self", ".", "backend", ".", "num_envs", ",", "environment_information", "=", "None", ",", "transition_tensors", "=", "{", "k", ":", "torch", ".", "from_numpy", "(", "v", ")", "for", "k", ",", "v", "in", "transition_tensors", ".", "items", "(", ")", "}", ",", "rollout_tensors", "=", "{", "}", ")" ]
Sample batch of trajectories and return them
[ "Sample", "batch", "of", "trajectories", "and", "return", "them" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/circular_replay_buffer.py#L52-L63
251,311
MillionIntegrals/vel
vel/rl/algo/policy_gradient/trpo.py
conjugate_gradient_method
def conjugate_gradient_method(matrix_vector_operator, loss_gradient, nsteps, rdotr_tol=1e-10): """ Conjugate gradient algorithm """ x = torch.zeros_like(loss_gradient) r = loss_gradient.clone() p = loss_gradient.clone() rdotr = torch.dot(r, r) for i in range(nsteps): Avp = matrix_vector_operator(p) alpha = rdotr / torch.dot(p, Avp) x += alpha * p r -= alpha * Avp new_rdotr = torch.dot(r, r) betta = new_rdotr / rdotr p = r + betta * p rdotr = new_rdotr if rdotr < rdotr_tol: break return x
python
def conjugate_gradient_method(matrix_vector_operator, loss_gradient, nsteps, rdotr_tol=1e-10): x = torch.zeros_like(loss_gradient) r = loss_gradient.clone() p = loss_gradient.clone() rdotr = torch.dot(r, r) for i in range(nsteps): Avp = matrix_vector_operator(p) alpha = rdotr / torch.dot(p, Avp) x += alpha * p r -= alpha * Avp new_rdotr = torch.dot(r, r) betta = new_rdotr / rdotr p = r + betta * p rdotr = new_rdotr if rdotr < rdotr_tol: break return x
[ "def", "conjugate_gradient_method", "(", "matrix_vector_operator", ",", "loss_gradient", ",", "nsteps", ",", "rdotr_tol", "=", "1e-10", ")", ":", "x", "=", "torch", ".", "zeros_like", "(", "loss_gradient", ")", "r", "=", "loss_gradient", ".", "clone", "(", ")", "p", "=", "loss_gradient", ".", "clone", "(", ")", "rdotr", "=", "torch", ".", "dot", "(", "r", ",", "r", ")", "for", "i", "in", "range", "(", "nsteps", ")", ":", "Avp", "=", "matrix_vector_operator", "(", "p", ")", "alpha", "=", "rdotr", "/", "torch", ".", "dot", "(", "p", ",", "Avp", ")", "x", "+=", "alpha", "*", "p", "r", "-=", "alpha", "*", "Avp", "new_rdotr", "=", "torch", ".", "dot", "(", "r", ",", "r", ")", "betta", "=", "new_rdotr", "/", "rdotr", "p", "=", "r", "+", "betta", "*", "p", "rdotr", "=", "new_rdotr", "if", "rdotr", "<", "rdotr_tol", ":", "break", "return", "x" ]
Conjugate gradient algorithm
[ "Conjugate", "gradient", "algorithm" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/policy_gradient/trpo.py#L23-L47
251,312
MillionIntegrals/vel
vel/rl/algo/policy_gradient/trpo.py
TrpoPolicyGradient.line_search
def line_search(self, model, rollout, original_policy_loss, original_policy_params, original_parameter_vec, full_step, expected_improvement_full): """ Find the right stepsize to make sure policy improves """ current_parameter_vec = original_parameter_vec.clone() for idx in range(self.line_search_iters): stepsize = 0.5 ** idx new_parameter_vec = current_parameter_vec + stepsize * full_step # Update model parameters v2p(new_parameter_vec, model.policy_parameters()) # Calculate new loss with torch.no_grad(): policy_params = model.policy(rollout.batch_tensor('observations')) policy_entropy = torch.mean(model.entropy(policy_params)) kl_divergence = torch.mean(model.kl_divergence(original_policy_params, policy_params)) new_loss = self.calc_policy_loss(model, policy_params, policy_entropy, rollout) actual_improvement = original_policy_loss - new_loss expected_improvement = expected_improvement_full * stepsize ratio = actual_improvement / expected_improvement if kl_divergence.item() > self.mak_kl * 1.5: # KL divergence bound exceeded continue elif ratio < expected_improvement: # Not enough loss improvement continue else: # Optimization successful return True, ratio, actual_improvement, new_loss, kl_divergence # Optimization failed, revert to initial parameters v2p(original_parameter_vec, model.policy_parameters()) return False, torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0)
python
def line_search(self, model, rollout, original_policy_loss, original_policy_params, original_parameter_vec, full_step, expected_improvement_full): current_parameter_vec = original_parameter_vec.clone() for idx in range(self.line_search_iters): stepsize = 0.5 ** idx new_parameter_vec = current_parameter_vec + stepsize * full_step # Update model parameters v2p(new_parameter_vec, model.policy_parameters()) # Calculate new loss with torch.no_grad(): policy_params = model.policy(rollout.batch_tensor('observations')) policy_entropy = torch.mean(model.entropy(policy_params)) kl_divergence = torch.mean(model.kl_divergence(original_policy_params, policy_params)) new_loss = self.calc_policy_loss(model, policy_params, policy_entropy, rollout) actual_improvement = original_policy_loss - new_loss expected_improvement = expected_improvement_full * stepsize ratio = actual_improvement / expected_improvement if kl_divergence.item() > self.mak_kl * 1.5: # KL divergence bound exceeded continue elif ratio < expected_improvement: # Not enough loss improvement continue else: # Optimization successful return True, ratio, actual_improvement, new_loss, kl_divergence # Optimization failed, revert to initial parameters v2p(original_parameter_vec, model.policy_parameters()) return False, torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0)
[ "def", "line_search", "(", "self", ",", "model", ",", "rollout", ",", "original_policy_loss", ",", "original_policy_params", ",", "original_parameter_vec", ",", "full_step", ",", "expected_improvement_full", ")", ":", "current_parameter_vec", "=", "original_parameter_vec", ".", "clone", "(", ")", "for", "idx", "in", "range", "(", "self", ".", "line_search_iters", ")", ":", "stepsize", "=", "0.5", "**", "idx", "new_parameter_vec", "=", "current_parameter_vec", "+", "stepsize", "*", "full_step", "# Update model parameters", "v2p", "(", "new_parameter_vec", ",", "model", ".", "policy_parameters", "(", ")", ")", "# Calculate new loss", "with", "torch", ".", "no_grad", "(", ")", ":", "policy_params", "=", "model", ".", "policy", "(", "rollout", ".", "batch_tensor", "(", "'observations'", ")", ")", "policy_entropy", "=", "torch", ".", "mean", "(", "model", ".", "entropy", "(", "policy_params", ")", ")", "kl_divergence", "=", "torch", ".", "mean", "(", "model", ".", "kl_divergence", "(", "original_policy_params", ",", "policy_params", ")", ")", "new_loss", "=", "self", ".", "calc_policy_loss", "(", "model", ",", "policy_params", ",", "policy_entropy", ",", "rollout", ")", "actual_improvement", "=", "original_policy_loss", "-", "new_loss", "expected_improvement", "=", "expected_improvement_full", "*", "stepsize", "ratio", "=", "actual_improvement", "/", "expected_improvement", "if", "kl_divergence", ".", "item", "(", ")", ">", "self", ".", "mak_kl", "*", "1.5", ":", "# KL divergence bound exceeded", "continue", "elif", "ratio", "<", "expected_improvement", ":", "# Not enough loss improvement", "continue", "else", ":", "# Optimization successful", "return", "True", ",", "ratio", ",", "actual_improvement", ",", "new_loss", ",", "kl_divergence", "# Optimization failed, revert to initial parameters", "v2p", "(", "original_parameter_vec", ",", "model", ".", "policy_parameters", "(", ")", ")", "return", "False", ",", "torch", ".", "tensor", "(", "0.0", ")", ",", "torch", ".", "tensor", "(", "0.0", ")", ",", "torch", ".", "tensor", "(", "0.0", ")", ",", "torch", ".", "tensor", "(", "0.0", ")" ]
Find the right stepsize to make sure policy improves
[ "Find", "the", "right", "stepsize", "to", "make", "sure", "policy", "improves" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/policy_gradient/trpo.py#L167-L205
251,313
MillionIntegrals/vel
vel/rl/algo/policy_gradient/trpo.py
TrpoPolicyGradient.fisher_vector_product
def fisher_vector_product(self, vector, kl_divergence_gradient, model): """ Calculate product Hessian @ vector """ assert not vector.requires_grad, "Vector must not propagate gradient" dot_product = vector @ kl_divergence_gradient # at least one dimension spans across two contiguous subspaces double_gradient = torch.autograd.grad(dot_product, model.policy_parameters(), retain_graph=True) fvp = p2v(x.contiguous() for x in double_gradient) return fvp + vector * self.cg_damping
python
def fisher_vector_product(self, vector, kl_divergence_gradient, model): assert not vector.requires_grad, "Vector must not propagate gradient" dot_product = vector @ kl_divergence_gradient # at least one dimension spans across two contiguous subspaces double_gradient = torch.autograd.grad(dot_product, model.policy_parameters(), retain_graph=True) fvp = p2v(x.contiguous() for x in double_gradient) return fvp + vector * self.cg_damping
[ "def", "fisher_vector_product", "(", "self", ",", "vector", ",", "kl_divergence_gradient", ",", "model", ")", ":", "assert", "not", "vector", ".", "requires_grad", ",", "\"Vector must not propagate gradient\"", "dot_product", "=", "vector", "@", "kl_divergence_gradient", "# at least one dimension spans across two contiguous subspaces", "double_gradient", "=", "torch", ".", "autograd", ".", "grad", "(", "dot_product", ",", "model", ".", "policy_parameters", "(", ")", ",", "retain_graph", "=", "True", ")", "fvp", "=", "p2v", "(", "x", ".", "contiguous", "(", ")", "for", "x", "in", "double_gradient", ")", "return", "fvp", "+", "vector", "*", "self", ".", "cg_damping" ]
Calculate product Hessian @ vector
[ "Calculate", "product", "Hessian" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/policy_gradient/trpo.py#L207-L216
251,314
MillionIntegrals/vel
vel/rl/algo/policy_gradient/trpo.py
TrpoPolicyGradient.value_loss
def value_loss(self, model, observations, discounted_rewards): """ Loss of value estimator """ value_outputs = model.value(observations) value_loss = 0.5 * F.mse_loss(value_outputs, discounted_rewards) return value_loss
python
def value_loss(self, model, observations, discounted_rewards): value_outputs = model.value(observations) value_loss = 0.5 * F.mse_loss(value_outputs, discounted_rewards) return value_loss
[ "def", "value_loss", "(", "self", ",", "model", ",", "observations", ",", "discounted_rewards", ")", ":", "value_outputs", "=", "model", ".", "value", "(", "observations", ")", "value_loss", "=", "0.5", "*", "F", ".", "mse_loss", "(", "value_outputs", ",", "discounted_rewards", ")", "return", "value_loss" ]
Loss of value estimator
[ "Loss", "of", "value", "estimator" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/policy_gradient/trpo.py#L218-L222
251,315
MillionIntegrals/vel
vel/rl/algo/policy_gradient/trpo.py
TrpoPolicyGradient.calc_policy_loss
def calc_policy_loss(self, model, policy_params, policy_entropy, rollout): """ Policy gradient loss - calculate from probability distribution Calculate surrogate loss - advantage * policy_probability / fixed_initial_policy_probability Because we operate with logarithm of -probability (neglogp) we do - advantage * exp(fixed_neglogps - model_neglogps) """ actions = rollout.batch_tensor('actions') advantages = rollout.batch_tensor('advantages') fixed_logprobs = rollout.batch_tensor('action:logprobs') model_logprobs = model.logprob(actions, policy_params) # Normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) # We put - in front because we want to maximize the surrogate objective policy_loss = -advantages * torch.exp(model_logprobs - fixed_logprobs) return policy_loss.mean() - policy_entropy * self.entropy_coef
python
def calc_policy_loss(self, model, policy_params, policy_entropy, rollout): actions = rollout.batch_tensor('actions') advantages = rollout.batch_tensor('advantages') fixed_logprobs = rollout.batch_tensor('action:logprobs') model_logprobs = model.logprob(actions, policy_params) # Normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) # We put - in front because we want to maximize the surrogate objective policy_loss = -advantages * torch.exp(model_logprobs - fixed_logprobs) return policy_loss.mean() - policy_entropy * self.entropy_coef
[ "def", "calc_policy_loss", "(", "self", ",", "model", ",", "policy_params", ",", "policy_entropy", ",", "rollout", ")", ":", "actions", "=", "rollout", ".", "batch_tensor", "(", "'actions'", ")", "advantages", "=", "rollout", ".", "batch_tensor", "(", "'advantages'", ")", "fixed_logprobs", "=", "rollout", ".", "batch_tensor", "(", "'action:logprobs'", ")", "model_logprobs", "=", "model", ".", "logprob", "(", "actions", ",", "policy_params", ")", "# Normalize advantages", "advantages", "=", "(", "advantages", "-", "advantages", ".", "mean", "(", ")", ")", "/", "(", "advantages", ".", "std", "(", ")", "+", "1e-8", ")", "# We put - in front because we want to maximize the surrogate objective", "policy_loss", "=", "-", "advantages", "*", "torch", ".", "exp", "(", "model_logprobs", "-", "fixed_logprobs", ")", "return", "policy_loss", ".", "mean", "(", ")", "-", "policy_entropy", "*", "self", ".", "entropy_coef" ]
Policy gradient loss - calculate from probability distribution Calculate surrogate loss - advantage * policy_probability / fixed_initial_policy_probability Because we operate with logarithm of -probability (neglogp) we do - advantage * exp(fixed_neglogps - model_neglogps)
[ "Policy", "gradient", "loss", "-", "calculate", "from", "probability", "distribution" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/policy_gradient/trpo.py#L224-L245
251,316
MillionIntegrals/vel
vel/rl/api/rollout.py
Transitions.shuffled_batches
def shuffled_batches(self, batch_size): """ Generate randomized batches of data """ if batch_size >= self.size: yield self else: batch_splits = math_util.divide_ceiling(self.size, batch_size) indices = list(range(self.size)) np.random.shuffle(indices) for sub_indices in np.array_split(indices, batch_splits): yield Transitions( size=len(sub_indices), environment_information=None, # Dont use it in batches for a moment, can be uncommented later if needed # environment_information=[info[sub_indices.tolist()] for info in self.environment_information] transition_tensors={k: v[sub_indices] for k, v in self.transition_tensors.items()} # extra_data does not go into batches )
python
def shuffled_batches(self, batch_size): if batch_size >= self.size: yield self else: batch_splits = math_util.divide_ceiling(self.size, batch_size) indices = list(range(self.size)) np.random.shuffle(indices) for sub_indices in np.array_split(indices, batch_splits): yield Transitions( size=len(sub_indices), environment_information=None, # Dont use it in batches for a moment, can be uncommented later if needed # environment_information=[info[sub_indices.tolist()] for info in self.environment_information] transition_tensors={k: v[sub_indices] for k, v in self.transition_tensors.items()} # extra_data does not go into batches )
[ "def", "shuffled_batches", "(", "self", ",", "batch_size", ")", ":", "if", "batch_size", ">=", "self", ".", "size", ":", "yield", "self", "else", ":", "batch_splits", "=", "math_util", ".", "divide_ceiling", "(", "self", ".", "size", ",", "batch_size", ")", "indices", "=", "list", "(", "range", "(", "self", ".", "size", ")", ")", "np", ".", "random", ".", "shuffle", "(", "indices", ")", "for", "sub_indices", "in", "np", ".", "array_split", "(", "indices", ",", "batch_splits", ")", ":", "yield", "Transitions", "(", "size", "=", "len", "(", "sub_indices", ")", ",", "environment_information", "=", "None", ",", "# Dont use it in batches for a moment, can be uncommented later if needed", "# environment_information=[info[sub_indices.tolist()] for info in self.environment_information]", "transition_tensors", "=", "{", "k", ":", "v", "[", "sub_indices", "]", "for", "k", ",", "v", "in", "self", ".", "transition_tensors", ".", "items", "(", ")", "}", "# extra_data does not go into batches", ")" ]
Generate randomized batches of data
[ "Generate", "randomized", "batches", "of", "data" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/api/rollout.py#L59-L76
251,317
MillionIntegrals/vel
vel/rl/api/rollout.py
Trajectories.to_transitions
def to_transitions(self) -> 'Transitions': """ Convert given rollout to Transitions """ # No need to propagate 'rollout_tensors' as they won't mean anything return Transitions( size=self.num_steps * self.num_envs, environment_information= [ei for l in self.environment_information for ei in l] if self.environment_information is not None else None, transition_tensors={ name: tensor_util.merge_first_two_dims(t) for name, t in self.transition_tensors.items() }, extra_data=self.extra_data )
python
def to_transitions(self) -> 'Transitions': # No need to propagate 'rollout_tensors' as they won't mean anything return Transitions( size=self.num_steps * self.num_envs, environment_information= [ei for l in self.environment_information for ei in l] if self.environment_information is not None else None, transition_tensors={ name: tensor_util.merge_first_two_dims(t) for name, t in self.transition_tensors.items() }, extra_data=self.extra_data )
[ "def", "to_transitions", "(", "self", ")", "->", "'Transitions'", ":", "# No need to propagate 'rollout_tensors' as they won't mean anything", "return", "Transitions", "(", "size", "=", "self", ".", "num_steps", "*", "self", ".", "num_envs", ",", "environment_information", "=", "[", "ei", "for", "l", "in", "self", ".", "environment_information", "for", "ei", "in", "l", "]", "if", "self", ".", "environment_information", "is", "not", "None", "else", "None", ",", "transition_tensors", "=", "{", "name", ":", "tensor_util", ".", "merge_first_two_dims", "(", "t", ")", "for", "name", ",", "t", "in", "self", ".", "transition_tensors", ".", "items", "(", ")", "}", ",", "extra_data", "=", "self", ".", "extra_data", ")" ]
Convert given rollout to Transitions
[ "Convert", "given", "rollout", "to", "Transitions" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/api/rollout.py#L111-L123
251,318
MillionIntegrals/vel
vel/rl/api/rollout.py
Trajectories.shuffled_batches
def shuffled_batches(self, batch_size): """ Generate randomized batches of data - only sample whole trajectories """ if batch_size >= self.num_envs * self.num_steps: yield self else: rollouts_in_batch = batch_size // self.num_steps batch_splits = math_util.divide_ceiling(self.num_envs, rollouts_in_batch) indices = list(range(self.num_envs)) np.random.shuffle(indices) for sub_indices in np.array_split(indices, batch_splits): yield Trajectories( num_steps=self.num_steps, num_envs=len(sub_indices), # Dont use it in batches for a moment, can be uncommented later if needed # environment_information=[x[sub_indices.tolist()] for x in self.environment_information], environment_information=None, transition_tensors={k: x[:, sub_indices] for k, x in self.transition_tensors.items()}, rollout_tensors={k: x[sub_indices] for k, x in self.rollout_tensors.items()}, # extra_data does not go into batches )
python
def shuffled_batches(self, batch_size): if batch_size >= self.num_envs * self.num_steps: yield self else: rollouts_in_batch = batch_size // self.num_steps batch_splits = math_util.divide_ceiling(self.num_envs, rollouts_in_batch) indices = list(range(self.num_envs)) np.random.shuffle(indices) for sub_indices in np.array_split(indices, batch_splits): yield Trajectories( num_steps=self.num_steps, num_envs=len(sub_indices), # Dont use it in batches for a moment, can be uncommented later if needed # environment_information=[x[sub_indices.tolist()] for x in self.environment_information], environment_information=None, transition_tensors={k: x[:, sub_indices] for k, x in self.transition_tensors.items()}, rollout_tensors={k: x[sub_indices] for k, x in self.rollout_tensors.items()}, # extra_data does not go into batches )
[ "def", "shuffled_batches", "(", "self", ",", "batch_size", ")", ":", "if", "batch_size", ">=", "self", ".", "num_envs", "*", "self", ".", "num_steps", ":", "yield", "self", "else", ":", "rollouts_in_batch", "=", "batch_size", "//", "self", ".", "num_steps", "batch_splits", "=", "math_util", ".", "divide_ceiling", "(", "self", ".", "num_envs", ",", "rollouts_in_batch", ")", "indices", "=", "list", "(", "range", "(", "self", ".", "num_envs", ")", ")", "np", ".", "random", ".", "shuffle", "(", "indices", ")", "for", "sub_indices", "in", "np", ".", "array_split", "(", "indices", ",", "batch_splits", ")", ":", "yield", "Trajectories", "(", "num_steps", "=", "self", ".", "num_steps", ",", "num_envs", "=", "len", "(", "sub_indices", ")", ",", "# Dont use it in batches for a moment, can be uncommented later if needed", "# environment_information=[x[sub_indices.tolist()] for x in self.environment_information],", "environment_information", "=", "None", ",", "transition_tensors", "=", "{", "k", ":", "x", "[", ":", ",", "sub_indices", "]", "for", "k", ",", "x", "in", "self", ".", "transition_tensors", ".", "items", "(", ")", "}", ",", "rollout_tensors", "=", "{", "k", ":", "x", "[", "sub_indices", "]", "for", "k", ",", "x", "in", "self", ".", "rollout_tensors", ".", "items", "(", ")", "}", ",", "# extra_data does not go into batches", ")" ]
Generate randomized batches of data - only sample whole trajectories
[ "Generate", "randomized", "batches", "of", "data", "-", "only", "sample", "whole", "trajectories" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/api/rollout.py#L125-L147
251,319
MillionIntegrals/vel
vel/rl/api/rollout.py
Trajectories.episode_information
def episode_information(self): """ List of information about finished episodes """ return [ info.get('episode') for infolist in self.environment_information for info in infolist if 'episode' in info ]
python
def episode_information(self): return [ info.get('episode') for infolist in self.environment_information for info in infolist if 'episode' in info ]
[ "def", "episode_information", "(", "self", ")", ":", "return", "[", "info", ".", "get", "(", "'episode'", ")", "for", "infolist", "in", "self", ".", "environment_information", "for", "info", "in", "infolist", "if", "'episode'", "in", "info", "]" ]
List of information about finished episodes
[ "List", "of", "information", "about", "finished", "episodes" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/api/rollout.py#L164-L168
251,320
MillionIntegrals/vel
vel/models/rnn/multilayer_rnn_sequence_model.py
MultilayerRnnSequenceModel.forward_state
def forward_state(self, sequence, state=None): """ Forward propagate a sequence through the network accounting for the state """ if state is None: state = self.zero_state(sequence.size(0)) data = self.input_block(sequence) state_outputs = [] # for layer_length, layer in zip(self.hidden_layers, self.recurrent_layers): for idx in range(len(self.recurrent_layers)): layer_length = self.recurrent_layers[idx].state_dim # Partition hidden state, for each layer we have layer_length of h state and layer_length of c state current_state = state[:, :, :layer_length] state = state[:, :, layer_length:] # Propagate through the GRU state data, new_h = self.recurrent_layers[idx](data, current_state) if self.dropout_layers: data = self.dropout_layers[idx](data) state_outputs.append(new_h) output_data = self.output_activation(self.output_layer(data)) concatenated_hidden_output = torch.cat(state_outputs, dim=2) return output_data, concatenated_hidden_output
python
def forward_state(self, sequence, state=None): if state is None: state = self.zero_state(sequence.size(0)) data = self.input_block(sequence) state_outputs = [] # for layer_length, layer in zip(self.hidden_layers, self.recurrent_layers): for idx in range(len(self.recurrent_layers)): layer_length = self.recurrent_layers[idx].state_dim # Partition hidden state, for each layer we have layer_length of h state and layer_length of c state current_state = state[:, :, :layer_length] state = state[:, :, layer_length:] # Propagate through the GRU state data, new_h = self.recurrent_layers[idx](data, current_state) if self.dropout_layers: data = self.dropout_layers[idx](data) state_outputs.append(new_h) output_data = self.output_activation(self.output_layer(data)) concatenated_hidden_output = torch.cat(state_outputs, dim=2) return output_data, concatenated_hidden_output
[ "def", "forward_state", "(", "self", ",", "sequence", ",", "state", "=", "None", ")", ":", "if", "state", "is", "None", ":", "state", "=", "self", ".", "zero_state", "(", "sequence", ".", "size", "(", "0", ")", ")", "data", "=", "self", ".", "input_block", "(", "sequence", ")", "state_outputs", "=", "[", "]", "# for layer_length, layer in zip(self.hidden_layers, self.recurrent_layers):", "for", "idx", "in", "range", "(", "len", "(", "self", ".", "recurrent_layers", ")", ")", ":", "layer_length", "=", "self", ".", "recurrent_layers", "[", "idx", "]", ".", "state_dim", "# Partition hidden state, for each layer we have layer_length of h state and layer_length of c state", "current_state", "=", "state", "[", ":", ",", ":", ",", ":", "layer_length", "]", "state", "=", "state", "[", ":", ",", ":", ",", "layer_length", ":", "]", "# Propagate through the GRU state", "data", ",", "new_h", "=", "self", ".", "recurrent_layers", "[", "idx", "]", "(", "data", ",", "current_state", ")", "if", "self", ".", "dropout_layers", ":", "data", "=", "self", ".", "dropout_layers", "[", "idx", "]", "(", "data", ")", "state_outputs", ".", "append", "(", "new_h", ")", "output_data", "=", "self", ".", "output_activation", "(", "self", ".", "output_layer", "(", "data", ")", ")", "concatenated_hidden_output", "=", "torch", ".", "cat", "(", "state_outputs", ",", "dim", "=", "2", ")", "return", "output_data", ",", "concatenated_hidden_output" ]
Forward propagate a sequence through the network accounting for the state
[ "Forward", "propagate", "a", "sequence", "through", "the", "network", "accounting", "for", "the", "state" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/models/rnn/multilayer_rnn_sequence_model.py#L66-L95
251,321
MillionIntegrals/vel
vel/models/rnn/multilayer_rnn_sequence_model.py
MultilayerRnnSequenceModel.loss_value
def loss_value(self, x_data, y_true, y_pred): """ Calculate a value of loss function """ y_pred = y_pred.view(-1, y_pred.size(2)) y_true = y_true.view(-1).to(torch.long) return F.nll_loss(y_pred, y_true)
python
def loss_value(self, x_data, y_true, y_pred): y_pred = y_pred.view(-1, y_pred.size(2)) y_true = y_true.view(-1).to(torch.long) return F.nll_loss(y_pred, y_true)
[ "def", "loss_value", "(", "self", ",", "x_data", ",", "y_true", ",", "y_pred", ")", ":", "y_pred", "=", "y_pred", ".", "view", "(", "-", "1", ",", "y_pred", ".", "size", "(", "2", ")", ")", "y_true", "=", "y_true", ".", "view", "(", "-", "1", ")", ".", "to", "(", "torch", ".", "long", ")", "return", "F", ".", "nll_loss", "(", "y_pred", ",", "y_true", ")" ]
Calculate a value of loss function
[ "Calculate", "a", "value", "of", "loss", "function" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/models/rnn/multilayer_rnn_sequence_model.py#L106-L110
251,322
MillionIntegrals/vel
vel/api/learner.py
Learner.initialize_training
def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None): """ Prepare for training """ if model_state is None: self.model.reset_weights() else: self.model.load_state_dict(model_state)
python
def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None): if model_state is None: self.model.reset_weights() else: self.model.load_state_dict(model_state)
[ "def", "initialize_training", "(", "self", ",", "training_info", ":", "TrainingInfo", ",", "model_state", "=", "None", ",", "hidden_state", "=", "None", ")", ":", "if", "model_state", "is", "None", ":", "self", ".", "model", ".", "reset_weights", "(", ")", "else", ":", "self", ".", "model", ".", "load_state_dict", "(", "model_state", ")" ]
Prepare for training
[ "Prepare", "for", "training" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/learner.py#L36-L41
251,323
MillionIntegrals/vel
vel/api/learner.py
Learner.run_epoch
def run_epoch(self, epoch_info: EpochInfo, source: 'vel.api.Source'): """ Run full epoch of learning """ epoch_info.on_epoch_begin() lr = epoch_info.optimizer.param_groups[-1]['lr'] print("|-------- Epoch {:06} Lr={:.6f} ----------|".format(epoch_info.global_epoch_idx, lr)) self.train_epoch(epoch_info, source) epoch_info.result_accumulator.freeze_results('train') self.validation_epoch(epoch_info, source) epoch_info.result_accumulator.freeze_results('val') epoch_info.on_epoch_end()
python
def run_epoch(self, epoch_info: EpochInfo, source: 'vel.api.Source'): epoch_info.on_epoch_begin() lr = epoch_info.optimizer.param_groups[-1]['lr'] print("|-------- Epoch {:06} Lr={:.6f} ----------|".format(epoch_info.global_epoch_idx, lr)) self.train_epoch(epoch_info, source) epoch_info.result_accumulator.freeze_results('train') self.validation_epoch(epoch_info, source) epoch_info.result_accumulator.freeze_results('val') epoch_info.on_epoch_end()
[ "def", "run_epoch", "(", "self", ",", "epoch_info", ":", "EpochInfo", ",", "source", ":", "'vel.api.Source'", ")", ":", "epoch_info", ".", "on_epoch_begin", "(", ")", "lr", "=", "epoch_info", ".", "optimizer", ".", "param_groups", "[", "-", "1", "]", "[", "'lr'", "]", "print", "(", "\"|-------- Epoch {:06} Lr={:.6f} ----------|\"", ".", "format", "(", "epoch_info", ".", "global_epoch_idx", ",", "lr", ")", ")", "self", ".", "train_epoch", "(", "epoch_info", ",", "source", ")", "epoch_info", ".", "result_accumulator", ".", "freeze_results", "(", "'train'", ")", "self", ".", "validation_epoch", "(", "epoch_info", ",", "source", ")", "epoch_info", ".", "result_accumulator", ".", "freeze_results", "(", "'val'", ")", "epoch_info", ".", "on_epoch_end", "(", ")" ]
Run full epoch of learning
[ "Run", "full", "epoch", "of", "learning" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/learner.py#L43-L56
251,324
MillionIntegrals/vel
vel/api/learner.py
Learner.train_epoch
def train_epoch(self, epoch_info, source: 'vel.api.Source', interactive=True): """ Run a single training epoch """ self.train() if interactive: iterator = tqdm.tqdm(source.train_loader(), desc="Training", unit="iter", file=sys.stdout) else: iterator = source.train_loader() for batch_idx, (data, target) in enumerate(iterator): batch_info = BatchInfo(epoch_info, batch_idx) batch_info.on_batch_begin() self.train_batch(batch_info, data, target) batch_info.on_batch_end() iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss'))
python
def train_epoch(self, epoch_info, source: 'vel.api.Source', interactive=True): self.train() if interactive: iterator = tqdm.tqdm(source.train_loader(), desc="Training", unit="iter", file=sys.stdout) else: iterator = source.train_loader() for batch_idx, (data, target) in enumerate(iterator): batch_info = BatchInfo(epoch_info, batch_idx) batch_info.on_batch_begin() self.train_batch(batch_info, data, target) batch_info.on_batch_end() iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss'))
[ "def", "train_epoch", "(", "self", ",", "epoch_info", ",", "source", ":", "'vel.api.Source'", ",", "interactive", "=", "True", ")", ":", "self", ".", "train", "(", ")", "if", "interactive", ":", "iterator", "=", "tqdm", ".", "tqdm", "(", "source", ".", "train_loader", "(", ")", ",", "desc", "=", "\"Training\"", ",", "unit", "=", "\"iter\"", ",", "file", "=", "sys", ".", "stdout", ")", "else", ":", "iterator", "=", "source", ".", "train_loader", "(", ")", "for", "batch_idx", ",", "(", "data", ",", "target", ")", "in", "enumerate", "(", "iterator", ")", ":", "batch_info", "=", "BatchInfo", "(", "epoch_info", ",", "batch_idx", ")", "batch_info", ".", "on_batch_begin", "(", ")", "self", ".", "train_batch", "(", "batch_info", ",", "data", ",", "target", ")", "batch_info", ".", "on_batch_end", "(", ")", "iterator", ".", "set_postfix", "(", "loss", "=", "epoch_info", ".", "result_accumulator", ".", "intermediate_value", "(", "'loss'", ")", ")" ]
Run a single training epoch
[ "Run", "a", "single", "training", "epoch" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/learner.py#L58-L74
251,325
MillionIntegrals/vel
vel/api/learner.py
Learner.validation_epoch
def validation_epoch(self, epoch_info, source: 'vel.api.Source'): """ Run a single evaluation epoch """ self.eval() iterator = tqdm.tqdm(source.val_loader(), desc="Validation", unit="iter", file=sys.stdout) with torch.no_grad(): for batch_idx, (data, target) in enumerate(iterator): batch_info = BatchInfo(epoch_info, batch_idx) batch_info.on_validation_batch_begin() self.feed_batch(batch_info, data, target) batch_info.on_validation_batch_end()
python
def validation_epoch(self, epoch_info, source: 'vel.api.Source'): self.eval() iterator = tqdm.tqdm(source.val_loader(), desc="Validation", unit="iter", file=sys.stdout) with torch.no_grad(): for batch_idx, (data, target) in enumerate(iterator): batch_info = BatchInfo(epoch_info, batch_idx) batch_info.on_validation_batch_begin() self.feed_batch(batch_info, data, target) batch_info.on_validation_batch_end()
[ "def", "validation_epoch", "(", "self", ",", "epoch_info", ",", "source", ":", "'vel.api.Source'", ")", ":", "self", ".", "eval", "(", ")", "iterator", "=", "tqdm", ".", "tqdm", "(", "source", ".", "val_loader", "(", ")", ",", "desc", "=", "\"Validation\"", ",", "unit", "=", "\"iter\"", ",", "file", "=", "sys", ".", "stdout", ")", "with", "torch", ".", "no_grad", "(", ")", ":", "for", "batch_idx", ",", "(", "data", ",", "target", ")", "in", "enumerate", "(", "iterator", ")", ":", "batch_info", "=", "BatchInfo", "(", "epoch_info", ",", "batch_idx", ")", "batch_info", ".", "on_validation_batch_begin", "(", ")", "self", ".", "feed_batch", "(", "batch_info", ",", "data", ",", "target", ")", "batch_info", ".", "on_validation_batch_end", "(", ")" ]
Run a single evaluation epoch
[ "Run", "a", "single", "evaluation", "epoch" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/learner.py#L76-L88
251,326
MillionIntegrals/vel
vel/api/learner.py
Learner.feed_batch
def feed_batch(self, batch_info, data, target): """ Run single batch of data """ data, target = data.to(self.device), target.to(self.device) output, loss = self.model.loss(data, target) # Store extra batch information for calculation of the statistics batch_info['data'] = data batch_info['target'] = target batch_info['output'] = output batch_info['loss'] = loss return loss
python
def feed_batch(self, batch_info, data, target): data, target = data.to(self.device), target.to(self.device) output, loss = self.model.loss(data, target) # Store extra batch information for calculation of the statistics batch_info['data'] = data batch_info['target'] = target batch_info['output'] = output batch_info['loss'] = loss return loss
[ "def", "feed_batch", "(", "self", ",", "batch_info", ",", "data", ",", "target", ")", ":", "data", ",", "target", "=", "data", ".", "to", "(", "self", ".", "device", ")", ",", "target", ".", "to", "(", "self", ".", "device", ")", "output", ",", "loss", "=", "self", ".", "model", ".", "loss", "(", "data", ",", "target", ")", "# Store extra batch information for calculation of the statistics", "batch_info", "[", "'data'", "]", "=", "data", "batch_info", "[", "'target'", "]", "=", "target", "batch_info", "[", "'output'", "]", "=", "output", "batch_info", "[", "'loss'", "]", "=", "loss", "return", "loss" ]
Run single batch of data
[ "Run", "single", "batch", "of", "data" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/learner.py#L90-L101
251,327
MillionIntegrals/vel
vel/api/learner.py
Learner.train_batch
def train_batch(self, batch_info, data, target): """ Train single batch of data """ batch_info.optimizer.zero_grad() loss = self.feed_batch(batch_info, data, target) loss.backward() if self.max_grad_norm is not None: batch_info['grad_norm'] = torch.nn.utils.clip_grad_norm_( filter(lambda p: p.requires_grad, self.model.parameters()), max_norm=self.max_grad_norm ) batch_info.optimizer.step()
python
def train_batch(self, batch_info, data, target): batch_info.optimizer.zero_grad() loss = self.feed_batch(batch_info, data, target) loss.backward() if self.max_grad_norm is not None: batch_info['grad_norm'] = torch.nn.utils.clip_grad_norm_( filter(lambda p: p.requires_grad, self.model.parameters()), max_norm=self.max_grad_norm ) batch_info.optimizer.step()
[ "def", "train_batch", "(", "self", ",", "batch_info", ",", "data", ",", "target", ")", ":", "batch_info", ".", "optimizer", ".", "zero_grad", "(", ")", "loss", "=", "self", ".", "feed_batch", "(", "batch_info", ",", "data", ",", "target", ")", "loss", ".", "backward", "(", ")", "if", "self", ".", "max_grad_norm", "is", "not", "None", ":", "batch_info", "[", "'grad_norm'", "]", "=", "torch", ".", "nn", ".", "utils", ".", "clip_grad_norm_", "(", "filter", "(", "lambda", "p", ":", "p", ".", "requires_grad", ",", "self", ".", "model", ".", "parameters", "(", ")", ")", ",", "max_norm", "=", "self", ".", "max_grad_norm", ")", "batch_info", ".", "optimizer", ".", "step", "(", ")" ]
Train single batch of data
[ "Train", "single", "batch", "of", "data" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/learner.py#L103-L115
251,328
MillionIntegrals/vel
vel/util/situational.py
process_environment_settings
def process_environment_settings(default_dictionary: dict, settings: typing.Optional[dict]=None, presets: typing.Optional[dict]=None): """ Process a dictionary of env settings """ settings = settings if settings is not None else {} presets = presets if presets is not None else {} env_keys = sorted(set(default_dictionary.keys()) | set(presets.keys())) result_dict = {} for key in env_keys: if key in default_dictionary: new_dict = default_dictionary[key].copy() else: new_dict = {} new_dict.update(settings) if key in presets: new_dict.update(presets[key]) result_dict[key] = new_dict return result_dict
python
def process_environment_settings(default_dictionary: dict, settings: typing.Optional[dict]=None, presets: typing.Optional[dict]=None): settings = settings if settings is not None else {} presets = presets if presets is not None else {} env_keys = sorted(set(default_dictionary.keys()) | set(presets.keys())) result_dict = {} for key in env_keys: if key in default_dictionary: new_dict = default_dictionary[key].copy() else: new_dict = {} new_dict.update(settings) if key in presets: new_dict.update(presets[key]) result_dict[key] = new_dict return result_dict
[ "def", "process_environment_settings", "(", "default_dictionary", ":", "dict", ",", "settings", ":", "typing", ".", "Optional", "[", "dict", "]", "=", "None", ",", "presets", ":", "typing", ".", "Optional", "[", "dict", "]", "=", "None", ")", ":", "settings", "=", "settings", "if", "settings", "is", "not", "None", "else", "{", "}", "presets", "=", "presets", "if", "presets", "is", "not", "None", "else", "{", "}", "env_keys", "=", "sorted", "(", "set", "(", "default_dictionary", ".", "keys", "(", ")", ")", "|", "set", "(", "presets", ".", "keys", "(", ")", ")", ")", "result_dict", "=", "{", "}", "for", "key", "in", "env_keys", ":", "if", "key", "in", "default_dictionary", ":", "new_dict", "=", "default_dictionary", "[", "key", "]", ".", "copy", "(", ")", "else", ":", "new_dict", "=", "{", "}", "new_dict", ".", "update", "(", "settings", ")", "if", "key", "in", "presets", ":", "new_dict", ".", "update", "(", "presets", "[", "key", "]", ")", "result_dict", "[", "key", "]", "=", "new_dict", "return", "result_dict" ]
Process a dictionary of env settings
[ "Process", "a", "dictionary", "of", "env", "settings" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/situational.py#L4-L27
251,329
MillionIntegrals/vel
vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py
BufferedOffPolicyIterationReinforcer.roll_out_and_store
def roll_out_and_store(self, batch_info): """ Roll out environment and store result in the replay buffer """ self.model.train() if self.env_roller.is_ready_for_sampling(): rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) # Store some information about the rollout, no training phase batch_info['frames'] = rollout.frames() batch_info['episode_infos'] = rollout.episode_information() else: frames = 0 episode_infos = [] with tqdm.tqdm(desc="Populating memory", total=self.env_roller.initial_memory_size_hint()) as pbar: while not self.env_roller.is_ready_for_sampling(): rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) new_frames = rollout.frames() frames += new_frames episode_infos.extend(rollout.episode_information()) pbar.update(new_frames) # Store some information about the rollout, no training phase batch_info['frames'] = frames batch_info['episode_infos'] = episode_infos
python
def roll_out_and_store(self, batch_info): self.model.train() if self.env_roller.is_ready_for_sampling(): rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) # Store some information about the rollout, no training phase batch_info['frames'] = rollout.frames() batch_info['episode_infos'] = rollout.episode_information() else: frames = 0 episode_infos = [] with tqdm.tqdm(desc="Populating memory", total=self.env_roller.initial_memory_size_hint()) as pbar: while not self.env_roller.is_ready_for_sampling(): rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) new_frames = rollout.frames() frames += new_frames episode_infos.extend(rollout.episode_information()) pbar.update(new_frames) # Store some information about the rollout, no training phase batch_info['frames'] = frames batch_info['episode_infos'] = episode_infos
[ "def", "roll_out_and_store", "(", "self", ",", "batch_info", ")", ":", "self", ".", "model", ".", "train", "(", ")", "if", "self", ".", "env_roller", ".", "is_ready_for_sampling", "(", ")", ":", "rollout", "=", "self", ".", "env_roller", ".", "rollout", "(", "batch_info", ",", "self", ".", "model", ",", "self", ".", "settings", ".", "rollout_steps", ")", ".", "to_device", "(", "self", ".", "device", ")", "# Store some information about the rollout, no training phase", "batch_info", "[", "'frames'", "]", "=", "rollout", ".", "frames", "(", ")", "batch_info", "[", "'episode_infos'", "]", "=", "rollout", ".", "episode_information", "(", ")", "else", ":", "frames", "=", "0", "episode_infos", "=", "[", "]", "with", "tqdm", ".", "tqdm", "(", "desc", "=", "\"Populating memory\"", ",", "total", "=", "self", ".", "env_roller", ".", "initial_memory_size_hint", "(", ")", ")", "as", "pbar", ":", "while", "not", "self", ".", "env_roller", ".", "is_ready_for_sampling", "(", ")", ":", "rollout", "=", "self", ".", "env_roller", ".", "rollout", "(", "batch_info", ",", "self", ".", "model", ",", "self", ".", "settings", ".", "rollout_steps", ")", ".", "to_device", "(", "self", ".", "device", ")", "new_frames", "=", "rollout", ".", "frames", "(", ")", "frames", "+=", "new_frames", "episode_infos", ".", "extend", "(", "rollout", ".", "episode_information", "(", ")", ")", "pbar", ".", "update", "(", "new_frames", ")", "# Store some information about the rollout, no training phase", "batch_info", "[", "'frames'", "]", "=", "frames", "batch_info", "[", "'episode_infos'", "]", "=", "episode_infos" ]
Roll out environment and store result in the replay buffer
[ "Roll", "out", "environment", "and", "store", "result", "in", "the", "replay", "buffer" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py#L109-L135
251,330
MillionIntegrals/vel
vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py
BufferedOffPolicyIterationReinforcer.train_on_replay_memory
def train_on_replay_memory(self, batch_info): """ Train agent on a memory gotten from replay buffer """ self.model.train() # Algo will aggregate data into this list: batch_info['sub_batch_data'] = [] for i in range(self.settings.training_rounds): sampled_rollout = self.env_roller.sample(batch_info, self.model, self.settings.training_steps) batch_result = self.algo.optimizer_step( batch_info=batch_info, device=self.device, model=self.model, rollout=sampled_rollout.to_device(self.device) ) self.env_roller.update(rollout=sampled_rollout, batch_info=batch_result) batch_info['sub_batch_data'].append(batch_result) batch_info.aggregate_key('sub_batch_data')
python
def train_on_replay_memory(self, batch_info): self.model.train() # Algo will aggregate data into this list: batch_info['sub_batch_data'] = [] for i in range(self.settings.training_rounds): sampled_rollout = self.env_roller.sample(batch_info, self.model, self.settings.training_steps) batch_result = self.algo.optimizer_step( batch_info=batch_info, device=self.device, model=self.model, rollout=sampled_rollout.to_device(self.device) ) self.env_roller.update(rollout=sampled_rollout, batch_info=batch_result) batch_info['sub_batch_data'].append(batch_result) batch_info.aggregate_key('sub_batch_data')
[ "def", "train_on_replay_memory", "(", "self", ",", "batch_info", ")", ":", "self", ".", "model", ".", "train", "(", ")", "# Algo will aggregate data into this list:", "batch_info", "[", "'sub_batch_data'", "]", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "settings", ".", "training_rounds", ")", ":", "sampled_rollout", "=", "self", ".", "env_roller", ".", "sample", "(", "batch_info", ",", "self", ".", "model", ",", "self", ".", "settings", ".", "training_steps", ")", "batch_result", "=", "self", ".", "algo", ".", "optimizer_step", "(", "batch_info", "=", "batch_info", ",", "device", "=", "self", ".", "device", ",", "model", "=", "self", ".", "model", ",", "rollout", "=", "sampled_rollout", ".", "to_device", "(", "self", ".", "device", ")", ")", "self", ".", "env_roller", ".", "update", "(", "rollout", "=", "sampled_rollout", ",", "batch_info", "=", "batch_result", ")", "batch_info", "[", "'sub_batch_data'", "]", ".", "append", "(", "batch_result", ")", "batch_info", ".", "aggregate_key", "(", "'sub_batch_data'", ")" ]
Train agent on a memory gotten from replay buffer
[ "Train", "agent", "on", "a", "memory", "gotten", "from", "replay", "buffer" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py#L137-L158
251,331
MillionIntegrals/vel
vel/modules/resnet_v1.py
conv3x3
def conv3x3(in_channels, out_channels, stride=1): """ 3x3 convolution with padding. Original code has had bias turned off, because Batch Norm would remove the bias either way """ return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
python
def conv3x3(in_channels, out_channels, stride=1): return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
[ "def", "conv3x3", "(", "in_channels", ",", "out_channels", ",", "stride", "=", "1", ")", ":", "return", "nn", ".", "Conv2d", "(", "in_channels", ",", "out_channels", ",", "kernel_size", "=", "3", ",", "stride", "=", "stride", ",", "padding", "=", "1", ",", "bias", "=", "False", ")" ]
3x3 convolution with padding. Original code has had bias turned off, because Batch Norm would remove the bias either way
[ "3x3", "convolution", "with", "padding", ".", "Original", "code", "has", "had", "bias", "turned", "off", "because", "Batch", "Norm", "would", "remove", "the", "bias", "either", "way" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/modules/resnet_v1.py#L10-L15
251,332
MillionIntegrals/vel
vel/notebook/loader.py
load
def load(config_path, run_number=0, device='cuda:0'): """ Load a ModelConfig from filename """ model_config = ModelConfig.from_file(config_path, run_number, device=device) return model_config
python
def load(config_path, run_number=0, device='cuda:0'): model_config = ModelConfig.from_file(config_path, run_number, device=device) return model_config
[ "def", "load", "(", "config_path", ",", "run_number", "=", "0", ",", "device", "=", "'cuda:0'", ")", ":", "model_config", "=", "ModelConfig", ".", "from_file", "(", "config_path", ",", "run_number", ",", "device", "=", "device", ")", "return", "model_config" ]
Load a ModelConfig from filename
[ "Load", "a", "ModelConfig", "from", "filename" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/notebook/loader.py#L4-L8
251,333
MillionIntegrals/vel
vel/api/info.py
TrainingInfo.restore
def restore(self, hidden_state): """ Restore any state from checkpoint - currently not implemented but possible to do so in the future """ for callback in self.callbacks: callback.load_state_dict(self, hidden_state) if 'optimizer' in hidden_state: self.optimizer_initial_state = hidden_state['optimizer']
python
def restore(self, hidden_state): for callback in self.callbacks: callback.load_state_dict(self, hidden_state) if 'optimizer' in hidden_state: self.optimizer_initial_state = hidden_state['optimizer']
[ "def", "restore", "(", "self", ",", "hidden_state", ")", ":", "for", "callback", "in", "self", ".", "callbacks", ":", "callback", ".", "load_state_dict", "(", "self", ",", "hidden_state", ")", "if", "'optimizer'", "in", "hidden_state", ":", "self", ".", "optimizer_initial_state", "=", "hidden_state", "[", "'optimizer'", "]" ]
Restore any state from checkpoint - currently not implemented but possible to do so in the future
[ "Restore", "any", "state", "from", "checkpoint", "-", "currently", "not", "implemented", "but", "possible", "to", "do", "so", "in", "the", "future" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/info.py#L47-L53
251,334
MillionIntegrals/vel
vel/api/info.py
EpochResultAccumulator.result
def result(self): """ Return the epoch result """ final_result = {'epoch_idx': self.global_epoch_idx} for key, value in self.frozen_results.items(): final_result[key] = value return final_result
python
def result(self): final_result = {'epoch_idx': self.global_epoch_idx} for key, value in self.frozen_results.items(): final_result[key] = value return final_result
[ "def", "result", "(", "self", ")", ":", "final_result", "=", "{", "'epoch_idx'", ":", "self", ".", "global_epoch_idx", "}", "for", "key", ",", "value", "in", "self", ".", "frozen_results", ".", "items", "(", ")", ":", "final_result", "[", "key", "]", "=", "value", "return", "final_result" ]
Return the epoch result
[ "Return", "the", "epoch", "result" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/info.py#L144-L151
251,335
MillionIntegrals/vel
vel/api/info.py
EpochInfo.state_dict
def state_dict(self) -> dict: """ Calculate hidden state dictionary """ hidden_state = {} if self.optimizer is not None: hidden_state['optimizer'] = self.optimizer.state_dict() for callback in self.callbacks: callback.write_state_dict(self.training_info, hidden_state) return hidden_state
python
def state_dict(self) -> dict: hidden_state = {} if self.optimizer is not None: hidden_state['optimizer'] = self.optimizer.state_dict() for callback in self.callbacks: callback.write_state_dict(self.training_info, hidden_state) return hidden_state
[ "def", "state_dict", "(", "self", ")", "->", "dict", ":", "hidden_state", "=", "{", "}", "if", "self", ".", "optimizer", "is", "not", "None", ":", "hidden_state", "[", "'optimizer'", "]", "=", "self", ".", "optimizer", ".", "state_dict", "(", ")", "for", "callback", "in", "self", ".", "callbacks", ":", "callback", ".", "write_state_dict", "(", "self", ".", "training_info", ",", "hidden_state", ")", "return", "hidden_state" ]
Calculate hidden state dictionary
[ "Calculate", "hidden", "state", "dictionary" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/info.py#L186-L196
251,336
MillionIntegrals/vel
vel/api/info.py
EpochInfo.on_epoch_end
def on_epoch_end(self): """ Finish epoch processing """ self.freeze_epoch_result() for callback in self.callbacks: callback.on_epoch_end(self) self.training_info.history.add(self.result)
python
def on_epoch_end(self): self.freeze_epoch_result() for callback in self.callbacks: callback.on_epoch_end(self) self.training_info.history.add(self.result)
[ "def", "on_epoch_end", "(", "self", ")", ":", "self", ".", "freeze_epoch_result", "(", ")", "for", "callback", "in", "self", ".", "callbacks", ":", "callback", ".", "on_epoch_end", "(", "self", ")", "self", ".", "training_info", ".", "history", ".", "add", "(", "self", ".", "result", ")" ]
Finish epoch processing
[ "Finish", "epoch", "processing" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/info.py#L203-L210
251,337
MillionIntegrals/vel
vel/api/info.py
BatchInfo.aggregate_key
def aggregate_key(self, aggregate_key): """ Aggregate values from key and put them into the top-level dictionary """ aggregation = self.data_dict[aggregate_key] # List of dictionaries of numpy arrays/scalars # Aggregate sub batch data data_dict_keys = {y for x in aggregation for y in x.keys()} for key in data_dict_keys: # Just average all the statistics from the loss function stacked = np.stack([d[key] for d in aggregation], axis=0) self.data_dict[key] = np.mean(stacked, axis=0)
python
def aggregate_key(self, aggregate_key): aggregation = self.data_dict[aggregate_key] # List of dictionaries of numpy arrays/scalars # Aggregate sub batch data data_dict_keys = {y for x in aggregation for y in x.keys()} for key in data_dict_keys: # Just average all the statistics from the loss function stacked = np.stack([d[key] for d in aggregation], axis=0) self.data_dict[key] = np.mean(stacked, axis=0)
[ "def", "aggregate_key", "(", "self", ",", "aggregate_key", ")", ":", "aggregation", "=", "self", ".", "data_dict", "[", "aggregate_key", "]", "# List of dictionaries of numpy arrays/scalars", "# Aggregate sub batch data", "data_dict_keys", "=", "{", "y", "for", "x", "in", "aggregation", "for", "y", "in", "x", ".", "keys", "(", ")", "}", "for", "key", "in", "data_dict_keys", ":", "# Just average all the statistics from the loss function", "stacked", "=", "np", ".", "stack", "(", "[", "d", "[", "key", "]", "for", "d", "in", "aggregation", "]", ",", "axis", "=", "0", ")", "self", ".", "data_dict", "[", "key", "]", "=", "np", ".", "mean", "(", "stacked", ",", "axis", "=", "0", ")" ]
Aggregate values from key and put them into the top-level dictionary
[ "Aggregate", "values", "from", "key", "and", "put", "them", "into", "the", "top", "-", "level", "dictionary" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/info.py#L316-L326
251,338
MillionIntegrals/vel
vel/rl/commands/rl_train_command.py
RlTrainCommand.run
def run(self): """ Run reinforcement learning algorithm """ device = self.model_config.torch_device() # Reinforcer is the learner for the reinforcement learning model reinforcer = self.reinforcer.instantiate(device) optimizer = self.optimizer_factory.instantiate(reinforcer.model) # All callbacks used for learning callbacks = self.gather_callbacks(optimizer) # Metrics to track through this training metrics = reinforcer.metrics() training_info = self.resume_training(reinforcer, callbacks, metrics) reinforcer.initialize_training(training_info) training_info.on_train_begin() if training_info.optimizer_initial_state: optimizer.load_state_dict(training_info.optimizer_initial_state) global_epoch_idx = training_info.start_epoch_idx + 1 while training_info['frames'] < self.total_frames: epoch_info = EpochInfo( training_info, global_epoch_idx=global_epoch_idx, batches_per_epoch=self.batches_per_epoch, optimizer=optimizer, ) reinforcer.train_epoch(epoch_info) if self.openai_logging: self._openai_logging(epoch_info.result) self.storage.checkpoint(epoch_info, reinforcer.model) global_epoch_idx += 1 training_info.on_train_end() return training_info
python
def run(self): device = self.model_config.torch_device() # Reinforcer is the learner for the reinforcement learning model reinforcer = self.reinforcer.instantiate(device) optimizer = self.optimizer_factory.instantiate(reinforcer.model) # All callbacks used for learning callbacks = self.gather_callbacks(optimizer) # Metrics to track through this training metrics = reinforcer.metrics() training_info = self.resume_training(reinforcer, callbacks, metrics) reinforcer.initialize_training(training_info) training_info.on_train_begin() if training_info.optimizer_initial_state: optimizer.load_state_dict(training_info.optimizer_initial_state) global_epoch_idx = training_info.start_epoch_idx + 1 while training_info['frames'] < self.total_frames: epoch_info = EpochInfo( training_info, global_epoch_idx=global_epoch_idx, batches_per_epoch=self.batches_per_epoch, optimizer=optimizer, ) reinforcer.train_epoch(epoch_info) if self.openai_logging: self._openai_logging(epoch_info.result) self.storage.checkpoint(epoch_info, reinforcer.model) global_epoch_idx += 1 training_info.on_train_end() return training_info
[ "def", "run", "(", "self", ")", ":", "device", "=", "self", ".", "model_config", ".", "torch_device", "(", ")", "# Reinforcer is the learner for the reinforcement learning model", "reinforcer", "=", "self", ".", "reinforcer", ".", "instantiate", "(", "device", ")", "optimizer", "=", "self", ".", "optimizer_factory", ".", "instantiate", "(", "reinforcer", ".", "model", ")", "# All callbacks used for learning", "callbacks", "=", "self", ".", "gather_callbacks", "(", "optimizer", ")", "# Metrics to track through this training", "metrics", "=", "reinforcer", ".", "metrics", "(", ")", "training_info", "=", "self", ".", "resume_training", "(", "reinforcer", ",", "callbacks", ",", "metrics", ")", "reinforcer", ".", "initialize_training", "(", "training_info", ")", "training_info", ".", "on_train_begin", "(", ")", "if", "training_info", ".", "optimizer_initial_state", ":", "optimizer", ".", "load_state_dict", "(", "training_info", ".", "optimizer_initial_state", ")", "global_epoch_idx", "=", "training_info", ".", "start_epoch_idx", "+", "1", "while", "training_info", "[", "'frames'", "]", "<", "self", ".", "total_frames", ":", "epoch_info", "=", "EpochInfo", "(", "training_info", ",", "global_epoch_idx", "=", "global_epoch_idx", ",", "batches_per_epoch", "=", "self", ".", "batches_per_epoch", ",", "optimizer", "=", "optimizer", ",", ")", "reinforcer", ".", "train_epoch", "(", "epoch_info", ")", "if", "self", ".", "openai_logging", ":", "self", ".", "_openai_logging", "(", "epoch_info", ".", "result", ")", "self", ".", "storage", ".", "checkpoint", "(", "epoch_info", ",", "reinforcer", ".", "model", ")", "global_epoch_idx", "+=", "1", "training_info", ".", "on_train_end", "(", ")", "return", "training_info" ]
Run reinforcement learning algorithm
[ "Run", "reinforcement", "learning", "algorithm" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/commands/rl_train_command.py#L62-L104
251,339
MillionIntegrals/vel
vel/rl/commands/rl_train_command.py
RlTrainCommand.resume_training
def resume_training(self, reinforcer, callbacks, metrics) -> TrainingInfo: """ Possibly resume training from a saved state from the storage """ if self.model_config.continue_training: start_epoch = self.storage.last_epoch_idx() else: start_epoch = 0 training_info = TrainingInfo( start_epoch_idx=start_epoch, run_name=self.model_config.run_name, metrics=metrics, callbacks=callbacks ) if start_epoch == 0: self.storage.reset(self.model_config.render_configuration()) training_info.initialize() reinforcer.initialize_training(training_info) else: model_state, hidden_state = self.storage.load(training_info) reinforcer.initialize_training(training_info, model_state, hidden_state) return training_info
python
def resume_training(self, reinforcer, callbacks, metrics) -> TrainingInfo: if self.model_config.continue_training: start_epoch = self.storage.last_epoch_idx() else: start_epoch = 0 training_info = TrainingInfo( start_epoch_idx=start_epoch, run_name=self.model_config.run_name, metrics=metrics, callbacks=callbacks ) if start_epoch == 0: self.storage.reset(self.model_config.render_configuration()) training_info.initialize() reinforcer.initialize_training(training_info) else: model_state, hidden_state = self.storage.load(training_info) reinforcer.initialize_training(training_info, model_state, hidden_state) return training_info
[ "def", "resume_training", "(", "self", ",", "reinforcer", ",", "callbacks", ",", "metrics", ")", "->", "TrainingInfo", ":", "if", "self", ".", "model_config", ".", "continue_training", ":", "start_epoch", "=", "self", ".", "storage", ".", "last_epoch_idx", "(", ")", "else", ":", "start_epoch", "=", "0", "training_info", "=", "TrainingInfo", "(", "start_epoch_idx", "=", "start_epoch", ",", "run_name", "=", "self", ".", "model_config", ".", "run_name", ",", "metrics", "=", "metrics", ",", "callbacks", "=", "callbacks", ")", "if", "start_epoch", "==", "0", ":", "self", ".", "storage", ".", "reset", "(", "self", ".", "model_config", ".", "render_configuration", "(", ")", ")", "training_info", ".", "initialize", "(", ")", "reinforcer", ".", "initialize_training", "(", "training_info", ")", "else", ":", "model_state", ",", "hidden_state", "=", "self", ".", "storage", ".", "load", "(", "training_info", ")", "reinforcer", ".", "initialize_training", "(", "training_info", ",", "model_state", ",", "hidden_state", ")", "return", "training_info" ]
Possibly resume training from a saved state from the storage
[ "Possibly", "resume", "training", "from", "a", "saved", "state", "from", "the", "storage" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/commands/rl_train_command.py#L118-L139
251,340
MillionIntegrals/vel
vel/rl/commands/rl_train_command.py
RlTrainCommand._openai_logging
def _openai_logging(self, epoch_result): """ Use OpenAI logging facilities for the same type of logging """ for key in sorted(epoch_result.keys()): if key == 'fps': # Not super elegant, but I like nicer display of FPS openai_logger.record_tabular(key, int(epoch_result[key])) else: openai_logger.record_tabular(key, epoch_result[key]) openai_logger.dump_tabular()
python
def _openai_logging(self, epoch_result): for key in sorted(epoch_result.keys()): if key == 'fps': # Not super elegant, but I like nicer display of FPS openai_logger.record_tabular(key, int(epoch_result[key])) else: openai_logger.record_tabular(key, epoch_result[key]) openai_logger.dump_tabular()
[ "def", "_openai_logging", "(", "self", ",", "epoch_result", ")", ":", "for", "key", "in", "sorted", "(", "epoch_result", ".", "keys", "(", ")", ")", ":", "if", "key", "==", "'fps'", ":", "# Not super elegant, but I like nicer display of FPS", "openai_logger", ".", "record_tabular", "(", "key", ",", "int", "(", "epoch_result", "[", "key", "]", ")", ")", "else", ":", "openai_logger", ".", "record_tabular", "(", "key", ",", "epoch_result", "[", "key", "]", ")", "openai_logger", ".", "dump_tabular", "(", ")" ]
Use OpenAI logging facilities for the same type of logging
[ "Use", "OpenAI", "logging", "facilities", "for", "the", "same", "type", "of", "logging" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/commands/rl_train_command.py#L141-L150
251,341
MillionIntegrals/vel
vel/util/module_util.py
module_broadcast
def module_broadcast(m, broadcast_fn, *args, **kwargs): """ Call given function in all submodules with given parameters """ apply_leaf(m, lambda x: module_apply_broadcast(x, broadcast_fn, args, kwargs))
python
def module_broadcast(m, broadcast_fn, *args, **kwargs): apply_leaf(m, lambda x: module_apply_broadcast(x, broadcast_fn, args, kwargs))
[ "def", "module_broadcast", "(", "m", ",", "broadcast_fn", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "apply_leaf", "(", "m", ",", "lambda", "x", ":", "module_apply_broadcast", "(", "x", ",", "broadcast_fn", ",", "args", ",", "kwargs", ")", ")" ]
Call given function in all submodules with given parameters
[ "Call", "given", "function", "in", "all", "submodules", "with", "given", "parameters" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/module_util.py#L34-L36
251,342
MillionIntegrals/vel
vel/commands/phase_train_command.py
PhaseTrainCommand._select_phase_left_bound
def _select_phase_left_bound(self, epoch_number): """ Return number of current phase. Return index of first phase not done after all up to epoch_number were done. """ idx = bisect.bisect_left(self.ladder, epoch_number) if idx >= len(self.ladder): return len(self.ladder) - 1 elif self.ladder[idx] > epoch_number: return idx - 1 else: return idx
python
def _select_phase_left_bound(self, epoch_number): idx = bisect.bisect_left(self.ladder, epoch_number) if idx >= len(self.ladder): return len(self.ladder) - 1 elif self.ladder[idx] > epoch_number: return idx - 1 else: return idx
[ "def", "_select_phase_left_bound", "(", "self", ",", "epoch_number", ")", ":", "idx", "=", "bisect", ".", "bisect_left", "(", "self", ".", "ladder", ",", "epoch_number", ")", "if", "idx", ">=", "len", "(", "self", ".", "ladder", ")", ":", "return", "len", "(", "self", ".", "ladder", ")", "-", "1", "elif", "self", ".", "ladder", "[", "idx", "]", ">", "epoch_number", ":", "return", "idx", "-", "1", "else", ":", "return", "idx" ]
Return number of current phase. Return index of first phase not done after all up to epoch_number were done.
[ "Return", "number", "of", "current", "phase", ".", "Return", "index", "of", "first", "phase", "not", "done", "after", "all", "up", "to", "epoch_number", "were", "done", "." ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/commands/phase_train_command.py#L29-L41
251,343
MillionIntegrals/vel
vel/rl/env/classic_atari.py
wrapped_env_maker
def wrapped_env_maker(environment_id, seed, serial_id, disable_reward_clipping=False, disable_episodic_life=False, monitor=False, allow_early_resets=False, scale_float_frames=False, max_episode_frames=10000, frame_stack=None): """ Wrap atari environment so that it's nicer to learn RL algorithms """ env = env_maker(environment_id) env.seed(seed + serial_id) if max_episode_frames is not None: env = ClipEpisodeLengthWrapper(env, max_episode_length=max_episode_frames) # Monitoring the env if monitor: logdir = logger.get_dir() and os.path.join(logger.get_dir(), str(serial_id)) else: logdir = None env = Monitor(env, logdir, allow_early_resets=allow_early_resets) if not disable_episodic_life: # Make end-of-life == end-of-episode, but only reset on true game over. # Done by DeepMind for the DQN and co. since it helps value estimation. env = EpisodicLifeEnv(env) if 'FIRE' in env.unwrapped.get_action_meanings(): # Take action on reset for environments that are fixed until firing. if disable_episodic_life: env = FireEpisodicLifeEnv(env) else: env = FireResetEnv(env) # Warp frames to 84x84 as done in the Nature paper and later work. env = WarpFrame(env) if scale_float_frames: env = ScaledFloatFrame(env) if not disable_reward_clipping: # Bin reward to {+1, 0, -1} by its sign. env = ClipRewardEnv(env) if frame_stack is not None: env = FrameStack(env, frame_stack) return env
python
def wrapped_env_maker(environment_id, seed, serial_id, disable_reward_clipping=False, disable_episodic_life=False, monitor=False, allow_early_resets=False, scale_float_frames=False, max_episode_frames=10000, frame_stack=None): env = env_maker(environment_id) env.seed(seed + serial_id) if max_episode_frames is not None: env = ClipEpisodeLengthWrapper(env, max_episode_length=max_episode_frames) # Monitoring the env if monitor: logdir = logger.get_dir() and os.path.join(logger.get_dir(), str(serial_id)) else: logdir = None env = Monitor(env, logdir, allow_early_resets=allow_early_resets) if not disable_episodic_life: # Make end-of-life == end-of-episode, but only reset on true game over. # Done by DeepMind for the DQN and co. since it helps value estimation. env = EpisodicLifeEnv(env) if 'FIRE' in env.unwrapped.get_action_meanings(): # Take action on reset for environments that are fixed until firing. if disable_episodic_life: env = FireEpisodicLifeEnv(env) else: env = FireResetEnv(env) # Warp frames to 84x84 as done in the Nature paper and later work. env = WarpFrame(env) if scale_float_frames: env = ScaledFloatFrame(env) if not disable_reward_clipping: # Bin reward to {+1, 0, -1} by its sign. env = ClipRewardEnv(env) if frame_stack is not None: env = FrameStack(env, frame_stack) return env
[ "def", "wrapped_env_maker", "(", "environment_id", ",", "seed", ",", "serial_id", ",", "disable_reward_clipping", "=", "False", ",", "disable_episodic_life", "=", "False", ",", "monitor", "=", "False", ",", "allow_early_resets", "=", "False", ",", "scale_float_frames", "=", "False", ",", "max_episode_frames", "=", "10000", ",", "frame_stack", "=", "None", ")", ":", "env", "=", "env_maker", "(", "environment_id", ")", "env", ".", "seed", "(", "seed", "+", "serial_id", ")", "if", "max_episode_frames", "is", "not", "None", ":", "env", "=", "ClipEpisodeLengthWrapper", "(", "env", ",", "max_episode_length", "=", "max_episode_frames", ")", "# Monitoring the env", "if", "monitor", ":", "logdir", "=", "logger", ".", "get_dir", "(", ")", "and", "os", ".", "path", ".", "join", "(", "logger", ".", "get_dir", "(", ")", ",", "str", "(", "serial_id", ")", ")", "else", ":", "logdir", "=", "None", "env", "=", "Monitor", "(", "env", ",", "logdir", ",", "allow_early_resets", "=", "allow_early_resets", ")", "if", "not", "disable_episodic_life", ":", "# Make end-of-life == end-of-episode, but only reset on true game over.", "# Done by DeepMind for the DQN and co. since it helps value estimation.", "env", "=", "EpisodicLifeEnv", "(", "env", ")", "if", "'FIRE'", "in", "env", ".", "unwrapped", ".", "get_action_meanings", "(", ")", ":", "# Take action on reset for environments that are fixed until firing.", "if", "disable_episodic_life", ":", "env", "=", "FireEpisodicLifeEnv", "(", "env", ")", "else", ":", "env", "=", "FireResetEnv", "(", "env", ")", "# Warp frames to 84x84 as done in the Nature paper and later work.", "env", "=", "WarpFrame", "(", "env", ")", "if", "scale_float_frames", ":", "env", "=", "ScaledFloatFrame", "(", "env", ")", "if", "not", "disable_reward_clipping", ":", "# Bin reward to {+1, 0, -1} by its sign.", "env", "=", "ClipRewardEnv", "(", "env", ")", "if", "frame_stack", "is", "not", "None", ":", "env", "=", "FrameStack", "(", "env", ",", "frame_stack", ")", "return", "env" ]
Wrap atari environment so that it's nicer to learn RL algorithms
[ "Wrap", "atari", "environment", "so", "that", "it", "s", "nicer", "to", "learn", "RL", "algorithms" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/env/classic_atari.py#L55-L98
251,344
MillionIntegrals/vel
vel/rl/env/classic_atari.py
ClassicAtariEnv.instantiate
def instantiate(self, seed=0, serial_id=0, preset='default', extra_args=None) -> gym.Env: """ Make a single environment compatible with the experiments """ settings = self.get_preset(preset) return wrapped_env_maker(self.envname, seed, serial_id, **settings)
python
def instantiate(self, seed=0, serial_id=0, preset='default', extra_args=None) -> gym.Env: settings = self.get_preset(preset) return wrapped_env_maker(self.envname, seed, serial_id, **settings)
[ "def", "instantiate", "(", "self", ",", "seed", "=", "0", ",", "serial_id", "=", "0", ",", "preset", "=", "'default'", ",", "extra_args", "=", "None", ")", "->", "gym", ".", "Env", ":", "settings", "=", "self", ".", "get_preset", "(", "preset", ")", "return", "wrapped_env_maker", "(", "self", ".", "envname", ",", "seed", ",", "serial_id", ",", "*", "*", "settings", ")" ]
Make a single environment compatible with the experiments
[ "Make", "a", "single", "environment", "compatible", "with", "the", "experiments" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/env/classic_atari.py#L115-L118
251,345
MillionIntegrals/vel
vel/util/visdom.py
visdom_send_metrics
def visdom_send_metrics(vis, metrics, update='replace'): """ Send set of metrics to visdom """ visited = {} sorted_metrics = sorted(metrics.columns, key=_column_original_name) for metric_basename, metric_list in it.groupby(sorted_metrics, key=_column_original_name): metric_list = list(metric_list) for metric in metric_list: if vis.win_exists(metric_basename) and (not visited.get(metric, False)): update = update elif not vis.win_exists(metric_basename): update = None else: update = 'append' vis.line( metrics[metric].values, metrics.index.values, win=metric_basename, name=metric, opts={ 'title': metric_basename, 'showlegend': True }, update=update ) if metric_basename != metric and len(metric_list) > 1: if vis.win_exists(metric): update = update else: update = None vis.line( metrics[metric].values, metrics.index.values, win=metric, name=metric, opts={ 'title': metric, 'showlegend': True }, update=update )
python
def visdom_send_metrics(vis, metrics, update='replace'): visited = {} sorted_metrics = sorted(metrics.columns, key=_column_original_name) for metric_basename, metric_list in it.groupby(sorted_metrics, key=_column_original_name): metric_list = list(metric_list) for metric in metric_list: if vis.win_exists(metric_basename) and (not visited.get(metric, False)): update = update elif not vis.win_exists(metric_basename): update = None else: update = 'append' vis.line( metrics[metric].values, metrics.index.values, win=metric_basename, name=metric, opts={ 'title': metric_basename, 'showlegend': True }, update=update ) if metric_basename != metric and len(metric_list) > 1: if vis.win_exists(metric): update = update else: update = None vis.line( metrics[metric].values, metrics.index.values, win=metric, name=metric, opts={ 'title': metric, 'showlegend': True }, update=update )
[ "def", "visdom_send_metrics", "(", "vis", ",", "metrics", ",", "update", "=", "'replace'", ")", ":", "visited", "=", "{", "}", "sorted_metrics", "=", "sorted", "(", "metrics", ".", "columns", ",", "key", "=", "_column_original_name", ")", "for", "metric_basename", ",", "metric_list", "in", "it", ".", "groupby", "(", "sorted_metrics", ",", "key", "=", "_column_original_name", ")", ":", "metric_list", "=", "list", "(", "metric_list", ")", "for", "metric", "in", "metric_list", ":", "if", "vis", ".", "win_exists", "(", "metric_basename", ")", "and", "(", "not", "visited", ".", "get", "(", "metric", ",", "False", ")", ")", ":", "update", "=", "update", "elif", "not", "vis", ".", "win_exists", "(", "metric_basename", ")", ":", "update", "=", "None", "else", ":", "update", "=", "'append'", "vis", ".", "line", "(", "metrics", "[", "metric", "]", ".", "values", ",", "metrics", ".", "index", ".", "values", ",", "win", "=", "metric_basename", ",", "name", "=", "metric", ",", "opts", "=", "{", "'title'", ":", "metric_basename", ",", "'showlegend'", ":", "True", "}", ",", "update", "=", "update", ")", "if", "metric_basename", "!=", "metric", "and", "len", "(", "metric_list", ")", ">", "1", ":", "if", "vis", ".", "win_exists", "(", "metric", ")", ":", "update", "=", "update", "else", ":", "update", "=", "None", "vis", ".", "line", "(", "metrics", "[", "metric", "]", ".", "values", ",", "metrics", ".", "index", ".", "values", ",", "win", "=", "metric", ",", "name", "=", "metric", ",", "opts", "=", "{", "'title'", ":", "metric", ",", "'showlegend'", ":", "True", "}", ",", "update", "=", "update", ")" ]
Send set of metrics to visdom
[ "Send", "set", "of", "metrics", "to", "visdom" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/visdom.py#L27-L71
251,346
MillionIntegrals/vel
vel/api/train_phase.py
TrainPhase.restore
def restore(self, training_info: TrainingInfo, local_batch_idx: int, model: Model, hidden_state: dict): """ Restore learning from intermediate state. """ pass
python
def restore(self, training_info: TrainingInfo, local_batch_idx: int, model: Model, hidden_state: dict): pass
[ "def", "restore", "(", "self", ",", "training_info", ":", "TrainingInfo", ",", "local_batch_idx", ":", "int", ",", "model", ":", "Model", ",", "hidden_state", ":", "dict", ")", ":", "pass" ]
Restore learning from intermediate state.
[ "Restore", "learning", "from", "intermediate", "state", "." ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/train_phase.py#L18-L22
251,347
MillionIntegrals/vel
vel/rl/buffers/backend/prioritized_vec_buffer_backend.py
PrioritizedCircularVecEnvBufferBackend.update_priority
def update_priority(self, tree_idx_list, priority_list): """ Update priorities of the elements in the tree """ for tree_idx, priority, segment_tree in zip(tree_idx_list, priority_list, self.segment_trees): segment_tree.update(tree_idx, priority)
python
def update_priority(self, tree_idx_list, priority_list): for tree_idx, priority, segment_tree in zip(tree_idx_list, priority_list, self.segment_trees): segment_tree.update(tree_idx, priority)
[ "def", "update_priority", "(", "self", ",", "tree_idx_list", ",", "priority_list", ")", ":", "for", "tree_idx", ",", "priority", ",", "segment_tree", "in", "zip", "(", "tree_idx_list", ",", "priority_list", ",", "self", ".", "segment_trees", ")", ":", "segment_tree", ".", "update", "(", "tree_idx", ",", "priority", ")" ]
Update priorities of the elements in the tree
[ "Update", "priorities", "of", "the", "elements", "in", "the", "tree" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/backend/prioritized_vec_buffer_backend.py#L72-L75
251,348
MillionIntegrals/vel
vel/rl/buffers/backend/prioritized_vec_buffer_backend.py
PrioritizedCircularVecEnvBufferBackend._sample_batch_prioritized
def _sample_batch_prioritized(self, segment_tree, batch_size, history, forward_steps=1): """ Return indexes of the next sample in from prioritized distribution """ p_total = segment_tree.total() segment = p_total / batch_size # Get batch of valid samples batch = [ self._get_sample_from_segment(segment_tree, segment, i, history, forward_steps) for i in range(batch_size) ] probs, idxs, tree_idxs = zip(*batch) return np.array(probs), np.array(idxs), np.array(tree_idxs)
python
def _sample_batch_prioritized(self, segment_tree, batch_size, history, forward_steps=1): p_total = segment_tree.total() segment = p_total / batch_size # Get batch of valid samples batch = [ self._get_sample_from_segment(segment_tree, segment, i, history, forward_steps) for i in range(batch_size) ] probs, idxs, tree_idxs = zip(*batch) return np.array(probs), np.array(idxs), np.array(tree_idxs)
[ "def", "_sample_batch_prioritized", "(", "self", ",", "segment_tree", ",", "batch_size", ",", "history", ",", "forward_steps", "=", "1", ")", ":", "p_total", "=", "segment_tree", ".", "total", "(", ")", "segment", "=", "p_total", "/", "batch_size", "# Get batch of valid samples", "batch", "=", "[", "self", ".", "_get_sample_from_segment", "(", "segment_tree", ",", "segment", ",", "i", ",", "history", ",", "forward_steps", ")", "for", "i", "in", "range", "(", "batch_size", ")", "]", "probs", ",", "idxs", ",", "tree_idxs", "=", "zip", "(", "*", "batch", ")", "return", "np", ".", "array", "(", "probs", ")", ",", "np", ".", "array", "(", "idxs", ")", ",", "np", ".", "array", "(", "tree_idxs", ")" ]
Return indexes of the next sample in from prioritized distribution
[ "Return", "indexes", "of", "the", "next", "sample", "in", "from", "prioritized", "distribution" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/backend/prioritized_vec_buffer_backend.py#L87-L99
251,349
MillionIntegrals/vel
vel/rl/buffers/backend/circular_vec_buffer_backend.py
take_along_axis
def take_along_axis(large_array, indexes): """ Take along axis """ # Reshape indexes into the right shape if len(large_array.shape) > len(indexes.shape): indexes = indexes.reshape(indexes.shape + tuple([1] * (len(large_array.shape) - len(indexes.shape)))) return np.take_along_axis(large_array, indexes, axis=0)
python
def take_along_axis(large_array, indexes): # Reshape indexes into the right shape if len(large_array.shape) > len(indexes.shape): indexes = indexes.reshape(indexes.shape + tuple([1] * (len(large_array.shape) - len(indexes.shape)))) return np.take_along_axis(large_array, indexes, axis=0)
[ "def", "take_along_axis", "(", "large_array", ",", "indexes", ")", ":", "# Reshape indexes into the right shape", "if", "len", "(", "large_array", ".", "shape", ")", ">", "len", "(", "indexes", ".", "shape", ")", ":", "indexes", "=", "indexes", ".", "reshape", "(", "indexes", ".", "shape", "+", "tuple", "(", "[", "1", "]", "*", "(", "len", "(", "large_array", ".", "shape", ")", "-", "len", "(", "indexes", ".", "shape", ")", ")", ")", ")", "return", "np", ".", "take_along_axis", "(", "large_array", ",", "indexes", ",", "axis", "=", "0", ")" ]
Take along axis
[ "Take", "along", "axis" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/backend/circular_vec_buffer_backend.py#L7-L13
251,350
MillionIntegrals/vel
vel/rl/buffers/backend/circular_vec_buffer_backend.py
CircularVecEnvBufferBackend.get_transition
def get_transition(self, frame_idx, env_idx): """ Single transition with given index """ past_frame, future_frame = self.get_frame_with_future(frame_idx, env_idx) data_dict = { 'observations': past_frame, 'observations_next': future_frame, 'actions': self.action_buffer[frame_idx, env_idx], 'rewards': self.reward_buffer[frame_idx, env_idx], 'dones': self.dones_buffer[frame_idx, env_idx], } for name in self.extra_data: data_dict[name] = self.extra_data[name][frame_idx, env_idx] return data_dict
python
def get_transition(self, frame_idx, env_idx): past_frame, future_frame = self.get_frame_with_future(frame_idx, env_idx) data_dict = { 'observations': past_frame, 'observations_next': future_frame, 'actions': self.action_buffer[frame_idx, env_idx], 'rewards': self.reward_buffer[frame_idx, env_idx], 'dones': self.dones_buffer[frame_idx, env_idx], } for name in self.extra_data: data_dict[name] = self.extra_data[name][frame_idx, env_idx] return data_dict
[ "def", "get_transition", "(", "self", ",", "frame_idx", ",", "env_idx", ")", ":", "past_frame", ",", "future_frame", "=", "self", ".", "get_frame_with_future", "(", "frame_idx", ",", "env_idx", ")", "data_dict", "=", "{", "'observations'", ":", "past_frame", ",", "'observations_next'", ":", "future_frame", ",", "'actions'", ":", "self", ".", "action_buffer", "[", "frame_idx", ",", "env_idx", "]", ",", "'rewards'", ":", "self", ".", "reward_buffer", "[", "frame_idx", ",", "env_idx", "]", ",", "'dones'", ":", "self", ".", "dones_buffer", "[", "frame_idx", ",", "env_idx", "]", ",", "}", "for", "name", "in", "self", ".", "extra_data", ":", "data_dict", "[", "name", "]", "=", "self", ".", "extra_data", "[", "name", "]", "[", "frame_idx", ",", "env_idx", "]", "return", "data_dict" ]
Single transition with given index
[ "Single", "transition", "with", "given", "index" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/backend/circular_vec_buffer_backend.py#L190-L205
251,351
MillionIntegrals/vel
vel/rl/buffers/backend/circular_vec_buffer_backend.py
CircularVecEnvBufferBackend.get_transitions_forward_steps
def get_transitions_forward_steps(self, indexes, forward_steps, discount_factor): """ Get dictionary of a transition data - where the target of a transition is n steps forward along the trajectory. Rewards are properly aggregated according to the discount factor, and the process stops when trajectory is done. """ frame_batch_shape = ( [indexes.shape[0], indexes.shape[1]] + list(self.state_buffer.shape[2:-1]) + [self.state_buffer.shape[-1] * self.frame_history] ) simple_batch_shape = [indexes.shape[0], indexes.shape[1]] past_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype) future_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype) reward_buffer = np.zeros(simple_batch_shape, dtype=np.float32) dones_buffer = np.zeros(simple_batch_shape, dtype=bool) for buffer_idx, frame_row in enumerate(indexes): for env_idx, frame_idx in enumerate(frame_row): past_frame, future_frame, reward, done = self.get_frame_with_future_forward_steps( frame_idx, env_idx, forward_steps=forward_steps, discount_factor=discount_factor ) past_frame_buffer[buffer_idx, env_idx] = past_frame future_frame_buffer[buffer_idx, env_idx] = future_frame reward_buffer[buffer_idx, env_idx] = reward dones_buffer[buffer_idx, env_idx] = done actions = take_along_axis(self.action_buffer, indexes) transition_tensors = { 'observations': past_frame_buffer, 'actions': actions, 'rewards': reward_buffer, 'observations_next': future_frame_buffer, 'dones': dones_buffer.astype(np.float32), } for name in self.extra_data: transition_tensors[name] = take_along_axis(self.extra_data[name], indexes) return transition_tensors
python
def get_transitions_forward_steps(self, indexes, forward_steps, discount_factor): frame_batch_shape = ( [indexes.shape[0], indexes.shape[1]] + list(self.state_buffer.shape[2:-1]) + [self.state_buffer.shape[-1] * self.frame_history] ) simple_batch_shape = [indexes.shape[0], indexes.shape[1]] past_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype) future_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype) reward_buffer = np.zeros(simple_batch_shape, dtype=np.float32) dones_buffer = np.zeros(simple_batch_shape, dtype=bool) for buffer_idx, frame_row in enumerate(indexes): for env_idx, frame_idx in enumerate(frame_row): past_frame, future_frame, reward, done = self.get_frame_with_future_forward_steps( frame_idx, env_idx, forward_steps=forward_steps, discount_factor=discount_factor ) past_frame_buffer[buffer_idx, env_idx] = past_frame future_frame_buffer[buffer_idx, env_idx] = future_frame reward_buffer[buffer_idx, env_idx] = reward dones_buffer[buffer_idx, env_idx] = done actions = take_along_axis(self.action_buffer, indexes) transition_tensors = { 'observations': past_frame_buffer, 'actions': actions, 'rewards': reward_buffer, 'observations_next': future_frame_buffer, 'dones': dones_buffer.astype(np.float32), } for name in self.extra_data: transition_tensors[name] = take_along_axis(self.extra_data[name], indexes) return transition_tensors
[ "def", "get_transitions_forward_steps", "(", "self", ",", "indexes", ",", "forward_steps", ",", "discount_factor", ")", ":", "frame_batch_shape", "=", "(", "[", "indexes", ".", "shape", "[", "0", "]", ",", "indexes", ".", "shape", "[", "1", "]", "]", "+", "list", "(", "self", ".", "state_buffer", ".", "shape", "[", "2", ":", "-", "1", "]", ")", "+", "[", "self", ".", "state_buffer", ".", "shape", "[", "-", "1", "]", "*", "self", ".", "frame_history", "]", ")", "simple_batch_shape", "=", "[", "indexes", ".", "shape", "[", "0", "]", ",", "indexes", ".", "shape", "[", "1", "]", "]", "past_frame_buffer", "=", "np", ".", "zeros", "(", "frame_batch_shape", ",", "dtype", "=", "self", ".", "state_buffer", ".", "dtype", ")", "future_frame_buffer", "=", "np", ".", "zeros", "(", "frame_batch_shape", ",", "dtype", "=", "self", ".", "state_buffer", ".", "dtype", ")", "reward_buffer", "=", "np", ".", "zeros", "(", "simple_batch_shape", ",", "dtype", "=", "np", ".", "float32", ")", "dones_buffer", "=", "np", ".", "zeros", "(", "simple_batch_shape", ",", "dtype", "=", "bool", ")", "for", "buffer_idx", ",", "frame_row", "in", "enumerate", "(", "indexes", ")", ":", "for", "env_idx", ",", "frame_idx", "in", "enumerate", "(", "frame_row", ")", ":", "past_frame", ",", "future_frame", ",", "reward", ",", "done", "=", "self", ".", "get_frame_with_future_forward_steps", "(", "frame_idx", ",", "env_idx", ",", "forward_steps", "=", "forward_steps", ",", "discount_factor", "=", "discount_factor", ")", "past_frame_buffer", "[", "buffer_idx", ",", "env_idx", "]", "=", "past_frame", "future_frame_buffer", "[", "buffer_idx", ",", "env_idx", "]", "=", "future_frame", "reward_buffer", "[", "buffer_idx", ",", "env_idx", "]", "=", "reward", "dones_buffer", "[", "buffer_idx", ",", "env_idx", "]", "=", "done", "actions", "=", "take_along_axis", "(", "self", ".", "action_buffer", ",", "indexes", ")", "transition_tensors", "=", "{", "'observations'", ":", "past_frame_buffer", ",", "'actions'", ":", "actions", ",", "'rewards'", ":", "reward_buffer", ",", "'observations_next'", ":", "future_frame_buffer", ",", "'dones'", ":", "dones_buffer", ".", "astype", "(", "np", ".", "float32", ")", ",", "}", "for", "name", "in", "self", ".", "extra_data", ":", "transition_tensors", "[", "name", "]", "=", "take_along_axis", "(", "self", ".", "extra_data", "[", "name", "]", ",", "indexes", ")", "return", "transition_tensors" ]
Get dictionary of a transition data - where the target of a transition is n steps forward along the trajectory. Rewards are properly aggregated according to the discount factor, and the process stops when trajectory is done.
[ "Get", "dictionary", "of", "a", "transition", "data", "-", "where", "the", "target", "of", "a", "transition", "is", "n", "steps", "forward", "along", "the", "trajectory", ".", "Rewards", "are", "properly", "aggregated", "according", "to", "the", "discount", "factor", "and", "the", "process", "stops", "when", "trajectory", "is", "done", "." ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/backend/circular_vec_buffer_backend.py#L244-L288
251,352
MillionIntegrals/vel
vel/rl/buffers/backend/circular_vec_buffer_backend.py
CircularVecEnvBufferBackend.sample_batch_trajectories
def sample_batch_trajectories(self, rollout_length): """ Return indexes of next random rollout """ results = [] for i in range(self.num_envs): results.append(self.sample_rollout_single_env(rollout_length)) return np.stack(results, axis=-1)
python
def sample_batch_trajectories(self, rollout_length): results = [] for i in range(self.num_envs): results.append(self.sample_rollout_single_env(rollout_length)) return np.stack(results, axis=-1)
[ "def", "sample_batch_trajectories", "(", "self", ",", "rollout_length", ")", ":", "results", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "num_envs", ")", ":", "results", ".", "append", "(", "self", ".", "sample_rollout_single_env", "(", "rollout_length", ")", ")", "return", "np", ".", "stack", "(", "results", ",", "axis", "=", "-", "1", ")" ]
Return indexes of next random rollout
[ "Return", "indexes", "of", "next", "random", "rollout" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/backend/circular_vec_buffer_backend.py#L310-L317
251,353
MillionIntegrals/vel
vel/rl/buffers/backend/circular_vec_buffer_backend.py
CircularVecEnvBufferBackend.sample_frame_single_env
def sample_frame_single_env(self, batch_size, forward_steps=1): """ Return an in index of a random set of frames from a buffer, that have enough history and future """ # Whole idea of this function is to make sure that sample we take is far away from the point which we are # currently writing to the buffer, which is 'discontinuous' if self.current_size < self.buffer_capacity: # Sample from up to total size of the buffer # -1 because we cannot take the last one return np.random.choice(self.current_size - forward_steps, batch_size, replace=False) else: candidate = np.random.choice(self.buffer_capacity, batch_size, replace=False) forbidden_ones = ( np.arange(self.current_idx - forward_steps + 1, self.current_idx + self.frame_history) % self.buffer_capacity ) # Exclude these frames for learning as they may have some part of history overwritten while any(x in candidate for x in forbidden_ones): candidate = np.random.choice(self.buffer_capacity, batch_size, replace=False) return candidate
python
def sample_frame_single_env(self, batch_size, forward_steps=1): # Whole idea of this function is to make sure that sample we take is far away from the point which we are # currently writing to the buffer, which is 'discontinuous' if self.current_size < self.buffer_capacity: # Sample from up to total size of the buffer # -1 because we cannot take the last one return np.random.choice(self.current_size - forward_steps, batch_size, replace=False) else: candidate = np.random.choice(self.buffer_capacity, batch_size, replace=False) forbidden_ones = ( np.arange(self.current_idx - forward_steps + 1, self.current_idx + self.frame_history) % self.buffer_capacity ) # Exclude these frames for learning as they may have some part of history overwritten while any(x in candidate for x in forbidden_ones): candidate = np.random.choice(self.buffer_capacity, batch_size, replace=False) return candidate
[ "def", "sample_frame_single_env", "(", "self", ",", "batch_size", ",", "forward_steps", "=", "1", ")", ":", "# Whole idea of this function is to make sure that sample we take is far away from the point which we are", "# currently writing to the buffer, which is 'discontinuous'", "if", "self", ".", "current_size", "<", "self", ".", "buffer_capacity", ":", "# Sample from up to total size of the buffer", "# -1 because we cannot take the last one", "return", "np", ".", "random", ".", "choice", "(", "self", ".", "current_size", "-", "forward_steps", ",", "batch_size", ",", "replace", "=", "False", ")", "else", ":", "candidate", "=", "np", ".", "random", ".", "choice", "(", "self", ".", "buffer_capacity", ",", "batch_size", ",", "replace", "=", "False", ")", "forbidden_ones", "=", "(", "np", ".", "arange", "(", "self", ".", "current_idx", "-", "forward_steps", "+", "1", ",", "self", ".", "current_idx", "+", "self", ".", "frame_history", ")", "%", "self", ".", "buffer_capacity", ")", "# Exclude these frames for learning as they may have some part of history overwritten", "while", "any", "(", "x", "in", "candidate", "for", "x", "in", "forbidden_ones", ")", ":", "candidate", "=", "np", ".", "random", ".", "choice", "(", "self", ".", "buffer_capacity", ",", "batch_size", ",", "replace", "=", "False", ")", "return", "candidate" ]
Return an in index of a random set of frames from a buffer, that have enough history and future
[ "Return", "an", "in", "index", "of", "a", "random", "set", "of", "frames", "from", "a", "buffer", "that", "have", "enough", "history", "and", "future" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/backend/circular_vec_buffer_backend.py#L346-L367
251,354
MillionIntegrals/vel
vel/rl/commands/record_movie_command.py
RecordMovieCommand.record_take
def record_take(self, model, env_instance, device, take_number): """ Record a single movie and store it on hard drive """ frames = [] observation = env_instance.reset() if model.is_recurrent: hidden_state = model.zero_state(1).to(device) frames.append(env_instance.render('rgb_array')) print("Evaluating environment...") while True: observation_array = np.expand_dims(np.array(observation), axis=0) observation_tensor = torch.from_numpy(observation_array).to(device) if model.is_recurrent: output = model.step(observation_tensor, hidden_state, **self.sample_args) hidden_state = output['state'] actions = output['actions'] else: actions = model.step(observation_tensor, **self.sample_args)['actions'] actions = actions.detach().cpu().numpy() observation, reward, done, epinfo = env_instance.step(actions[0]) frames.append(env_instance.render('rgb_array')) if 'episode' in epinfo: # End of an episode break takename = self.model_config.output_dir('videos', self.model_config.run_name, self.videoname.format(take_number)) pathlib.Path(os.path.dirname(takename)).mkdir(parents=True, exist_ok=True) fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') video = cv2.VideoWriter(takename, fourcc, self.fps, (frames[0].shape[1], frames[0].shape[0])) for i in tqdm.trange(len(frames), file=sys.stdout): video.write(cv2.cvtColor(frames[i], cv2.COLOR_RGB2BGR)) video.release() print("Written {}".format(takename))
python
def record_take(self, model, env_instance, device, take_number): frames = [] observation = env_instance.reset() if model.is_recurrent: hidden_state = model.zero_state(1).to(device) frames.append(env_instance.render('rgb_array')) print("Evaluating environment...") while True: observation_array = np.expand_dims(np.array(observation), axis=0) observation_tensor = torch.from_numpy(observation_array).to(device) if model.is_recurrent: output = model.step(observation_tensor, hidden_state, **self.sample_args) hidden_state = output['state'] actions = output['actions'] else: actions = model.step(observation_tensor, **self.sample_args)['actions'] actions = actions.detach().cpu().numpy() observation, reward, done, epinfo = env_instance.step(actions[0]) frames.append(env_instance.render('rgb_array')) if 'episode' in epinfo: # End of an episode break takename = self.model_config.output_dir('videos', self.model_config.run_name, self.videoname.format(take_number)) pathlib.Path(os.path.dirname(takename)).mkdir(parents=True, exist_ok=True) fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') video = cv2.VideoWriter(takename, fourcc, self.fps, (frames[0].shape[1], frames[0].shape[0])) for i in tqdm.trange(len(frames), file=sys.stdout): video.write(cv2.cvtColor(frames[i], cv2.COLOR_RGB2BGR)) video.release() print("Written {}".format(takename))
[ "def", "record_take", "(", "self", ",", "model", ",", "env_instance", ",", "device", ",", "take_number", ")", ":", "frames", "=", "[", "]", "observation", "=", "env_instance", ".", "reset", "(", ")", "if", "model", ".", "is_recurrent", ":", "hidden_state", "=", "model", ".", "zero_state", "(", "1", ")", ".", "to", "(", "device", ")", "frames", ".", "append", "(", "env_instance", ".", "render", "(", "'rgb_array'", ")", ")", "print", "(", "\"Evaluating environment...\"", ")", "while", "True", ":", "observation_array", "=", "np", ".", "expand_dims", "(", "np", ".", "array", "(", "observation", ")", ",", "axis", "=", "0", ")", "observation_tensor", "=", "torch", ".", "from_numpy", "(", "observation_array", ")", ".", "to", "(", "device", ")", "if", "model", ".", "is_recurrent", ":", "output", "=", "model", ".", "step", "(", "observation_tensor", ",", "hidden_state", ",", "*", "*", "self", ".", "sample_args", ")", "hidden_state", "=", "output", "[", "'state'", "]", "actions", "=", "output", "[", "'actions'", "]", "else", ":", "actions", "=", "model", ".", "step", "(", "observation_tensor", ",", "*", "*", "self", ".", "sample_args", ")", "[", "'actions'", "]", "actions", "=", "actions", ".", "detach", "(", ")", ".", "cpu", "(", ")", ".", "numpy", "(", ")", "observation", ",", "reward", ",", "done", ",", "epinfo", "=", "env_instance", ".", "step", "(", "actions", "[", "0", "]", ")", "frames", ".", "append", "(", "env_instance", ".", "render", "(", "'rgb_array'", ")", ")", "if", "'episode'", "in", "epinfo", ":", "# End of an episode", "break", "takename", "=", "self", ".", "model_config", ".", "output_dir", "(", "'videos'", ",", "self", ".", "model_config", ".", "run_name", ",", "self", ".", "videoname", ".", "format", "(", "take_number", ")", ")", "pathlib", ".", "Path", "(", "os", ".", "path", ".", "dirname", "(", "takename", ")", ")", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "fourcc", "=", "cv2", ".", "VideoWriter_fourcc", "(", "'M'", ",", "'J'", ",", "'P'", ",", "'G'", ")", "video", "=", "cv2", ".", "VideoWriter", "(", "takename", ",", "fourcc", ",", "self", ".", "fps", ",", "(", "frames", "[", "0", "]", ".", "shape", "[", "1", "]", ",", "frames", "[", "0", "]", ".", "shape", "[", "0", "]", ")", ")", "for", "i", "in", "tqdm", ".", "trange", "(", "len", "(", "frames", ")", ",", "file", "=", "sys", ".", "stdout", ")", ":", "video", ".", "write", "(", "cv2", ".", "cvtColor", "(", "frames", "[", "i", "]", ",", "cv2", ".", "COLOR_RGB2BGR", ")", ")", "video", ".", "release", "(", ")", "print", "(", "\"Written {}\"", ".", "format", "(", "takename", ")", ")" ]
Record a single movie and store it on hard drive
[ "Record", "a", "single", "movie", "and", "store", "it", "on", "hard", "drive" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/commands/record_movie_command.py#L47-L91
251,355
MillionIntegrals/vel
vel/rl/modules/noise/ou_noise.py
OuNoise.reset_training_state
def reset_training_state(self, dones, batch_info): """ A hook for a model to react when during training episode is finished """ for idx, done in enumerate(dones): if done > 0.5: self.processes[idx].reset()
python
def reset_training_state(self, dones, batch_info): for idx, done in enumerate(dones): if done > 0.5: self.processes[idx].reset()
[ "def", "reset_training_state", "(", "self", ",", "dones", ",", "batch_info", ")", ":", "for", "idx", ",", "done", "in", "enumerate", "(", "dones", ")", ":", "if", "done", ">", "0.5", ":", "self", ".", "processes", "[", "idx", "]", ".", "reset", "(", ")" ]
A hook for a model to react when during training episode is finished
[ "A", "hook", "for", "a", "model", "to", "react", "when", "during", "training", "episode", "is", "finished" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/modules/noise/ou_noise.py#L22-L26
251,356
MillionIntegrals/vel
vel/rl/modules/noise/ou_noise.py
OuNoise.forward
def forward(self, actions, batch_info): """ Return model step after applying noise """ while len(self.processes) < actions.shape[0]: len_action_space = self.action_space.shape[-1] self.processes.append( OrnsteinUhlenbeckNoiseProcess( np.zeros(len_action_space), float(self.std_dev) * np.ones(len_action_space) ) ) noise = torch.from_numpy(np.stack([x() for x in self.processes])).float().to(actions.device) return torch.min(torch.max(actions + noise, self.low_tensor), self.high_tensor)
python
def forward(self, actions, batch_info): while len(self.processes) < actions.shape[0]: len_action_space = self.action_space.shape[-1] self.processes.append( OrnsteinUhlenbeckNoiseProcess( np.zeros(len_action_space), float(self.std_dev) * np.ones(len_action_space) ) ) noise = torch.from_numpy(np.stack([x() for x in self.processes])).float().to(actions.device) return torch.min(torch.max(actions + noise, self.low_tensor), self.high_tensor)
[ "def", "forward", "(", "self", ",", "actions", ",", "batch_info", ")", ":", "while", "len", "(", "self", ".", "processes", ")", "<", "actions", ".", "shape", "[", "0", "]", ":", "len_action_space", "=", "self", ".", "action_space", ".", "shape", "[", "-", "1", "]", "self", ".", "processes", ".", "append", "(", "OrnsteinUhlenbeckNoiseProcess", "(", "np", ".", "zeros", "(", "len_action_space", ")", ",", "float", "(", "self", ".", "std_dev", ")", "*", "np", ".", "ones", "(", "len_action_space", ")", ")", ")", "noise", "=", "torch", ".", "from_numpy", "(", "np", ".", "stack", "(", "[", "x", "(", ")", "for", "x", "in", "self", ".", "processes", "]", ")", ")", ".", "float", "(", ")", ".", "to", "(", "actions", ".", "device", ")", "return", "torch", ".", "min", "(", "torch", ".", "max", "(", "actions", "+", "noise", ",", "self", ".", "low_tensor", ")", ",", "self", ".", "high_tensor", ")" ]
Return model step after applying noise
[ "Return", "model", "step", "after", "applying", "noise" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/modules/noise/ou_noise.py#L28-L41
251,357
MillionIntegrals/vel
vel/util/intepolate.py
interpolate_logscale
def interpolate_logscale(start, end, steps): """ Interpolate series between start and end in given number of steps - logscale interpolation """ if start <= 0.0: warnings.warn("Start of logscale interpolation must be positive!") start = 1e-5 return np.logspace(np.log10(float(start)), np.log10(float(end)), steps)
python
def interpolate_logscale(start, end, steps): if start <= 0.0: warnings.warn("Start of logscale interpolation must be positive!") start = 1e-5 return np.logspace(np.log10(float(start)), np.log10(float(end)), steps)
[ "def", "interpolate_logscale", "(", "start", ",", "end", ",", "steps", ")", ":", "if", "start", "<=", "0.0", ":", "warnings", ".", "warn", "(", "\"Start of logscale interpolation must be positive!\"", ")", "start", "=", "1e-5", "return", "np", ".", "logspace", "(", "np", ".", "log10", "(", "float", "(", "start", ")", ")", ",", "np", ".", "log10", "(", "float", "(", "end", ")", ")", ",", "steps", ")" ]
Interpolate series between start and end in given number of steps - logscale interpolation
[ "Interpolate", "series", "between", "start", "and", "end", "in", "given", "number", "of", "steps", "-", "logscale", "interpolation" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/intepolate.py#L10-L16
251,358
MillionIntegrals/vel
vel/util/intepolate.py
interpolate_series
def interpolate_series(start, end, steps, how='linear'): """ Interpolate series between start and end in given number of steps """ return INTERP_DICT[how](start, end, steps)
python
def interpolate_series(start, end, steps, how='linear'): return INTERP_DICT[how](start, end, steps)
[ "def", "interpolate_series", "(", "start", ",", "end", ",", "steps", ",", "how", "=", "'linear'", ")", ":", "return", "INTERP_DICT", "[", "how", "]", "(", "start", ",", "end", ",", "steps", ")" ]
Interpolate series between start and end in given number of steps
[ "Interpolate", "series", "between", "start", "and", "end", "in", "given", "number", "of", "steps" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/intepolate.py#L48-L50
251,359
MillionIntegrals/vel
vel/util/intepolate.py
interpolate_single
def interpolate_single(start, end, coefficient, how='linear'): """ Interpolate single value between start and end in given number of steps """ return INTERP_SINGLE_DICT[how](start, end, coefficient)
python
def interpolate_single(start, end, coefficient, how='linear'): return INTERP_SINGLE_DICT[how](start, end, coefficient)
[ "def", "interpolate_single", "(", "start", ",", "end", ",", "coefficient", ",", "how", "=", "'linear'", ")", ":", "return", "INTERP_SINGLE_DICT", "[", "how", "]", "(", "start", ",", "end", ",", "coefficient", ")" ]
Interpolate single value between start and end in given number of steps
[ "Interpolate", "single", "value", "between", "start", "and", "end", "in", "given", "number", "of", "steps" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/intepolate.py#L53-L55
251,360
MillionIntegrals/vel
vel/commands/summary_command.py
ModelSummary.run
def run(self, *args): """ Print model summary """ if self.source is None: self.model.summary() else: x_data, y_data = next(iter(self.source.train_loader())) self.model.summary(input_size=x_data.shape[1:])
python
def run(self, *args): if self.source is None: self.model.summary() else: x_data, y_data = next(iter(self.source.train_loader())) self.model.summary(input_size=x_data.shape[1:])
[ "def", "run", "(", "self", ",", "*", "args", ")", ":", "if", "self", ".", "source", "is", "None", ":", "self", ".", "model", ".", "summary", "(", ")", "else", ":", "x_data", ",", "y_data", "=", "next", "(", "iter", "(", "self", ".", "source", ".", "train_loader", "(", ")", ")", ")", "self", ".", "model", ".", "summary", "(", "input_size", "=", "x_data", ".", "shape", "[", "1", ":", "]", ")" ]
Print model summary
[ "Print", "model", "summary" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/commands/summary_command.py#L10-L16
251,361
MillionIntegrals/vel
vel/rl/reinforcers/on_policy_iteration_reinforcer.py
OnPolicyIterationReinforcer.initialize_training
def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None): """ Prepare models for training """ if model_state is not None: self.model.load_state_dict(model_state) else: self.model.reset_weights() self.algo.initialize( training_info=training_info, model=self.model, environment=self.env_roller.environment, device=self.device )
python
def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None): if model_state is not None: self.model.load_state_dict(model_state) else: self.model.reset_weights() self.algo.initialize( training_info=training_info, model=self.model, environment=self.env_roller.environment, device=self.device )
[ "def", "initialize_training", "(", "self", ",", "training_info", ":", "TrainingInfo", ",", "model_state", "=", "None", ",", "hidden_state", "=", "None", ")", ":", "if", "model_state", "is", "not", "None", ":", "self", ".", "model", ".", "load_state_dict", "(", "model_state", ")", "else", ":", "self", ".", "model", ".", "reset_weights", "(", ")", "self", ".", "algo", ".", "initialize", "(", "training_info", "=", "training_info", ",", "model", "=", "self", ".", "model", ",", "environment", "=", "self", ".", "env_roller", ".", "environment", ",", "device", "=", "self", ".", "device", ")" ]
Prepare models for training
[ "Prepare", "models", "for", "training" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/reinforcers/on_policy_iteration_reinforcer.py#L63-L72
251,362
MillionIntegrals/vel
vel/util/network.py
convolutional_layer_series
def convolutional_layer_series(initial_size, layer_sequence): """ Execute a series of convolutional layer transformations to the size number """ size = initial_size for filter_size, padding, stride in layer_sequence: size = convolution_size_equation(size, filter_size, padding, stride) return size
python
def convolutional_layer_series(initial_size, layer_sequence): size = initial_size for filter_size, padding, stride in layer_sequence: size = convolution_size_equation(size, filter_size, padding, stride) return size
[ "def", "convolutional_layer_series", "(", "initial_size", ",", "layer_sequence", ")", ":", "size", "=", "initial_size", "for", "filter_size", ",", "padding", ",", "stride", "in", "layer_sequence", ":", "size", "=", "convolution_size_equation", "(", "size", ",", "filter_size", ",", "padding", ",", "stride", ")", "return", "size" ]
Execute a series of convolutional layer transformations to the size number
[ "Execute", "a", "series", "of", "convolutional", "layer", "transformations", "to", "the", "size", "number" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/network.py#L34-L41
251,363
MillionIntegrals/vel
vel/api/model.py
Model.train
def train(self, mode=True): r""" Sets the module in training mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. Returns: Module: self """ super().train(mode) if mode: mu.apply_leaf(self, mu.set_train_mode) return self
python
def train(self, mode=True): r""" Sets the module in training mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. Returns: Module: self """ super().train(mode) if mode: mu.apply_leaf(self, mu.set_train_mode) return self
[ "def", "train", "(", "self", ",", "mode", "=", "True", ")", ":", "super", "(", ")", ".", "train", "(", "mode", ")", "if", "mode", ":", "mu", ".", "apply_leaf", "(", "self", ",", "mu", ".", "set_train_mode", ")", "return", "self" ]
r""" Sets the module in training mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. Returns: Module: self
[ "r", "Sets", "the", "module", "in", "training", "mode", "." ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/model.py#L18-L35
251,364
MillionIntegrals/vel
vel/api/model.py
Model.summary
def summary(self, input_size=None, hashsummary=False): """ Print a model summary """ if input_size is None: print(self) print("-" * 120) number = sum(p.numel() for p in self.model.parameters()) print("Number of model parameters: {:,}".format(number)) print("-" * 120) else: summary(self, input_size) if hashsummary: for idx, hashvalue in enumerate(self.hashsummary()): print(f"{idx}: {hashvalue}")
python
def summary(self, input_size=None, hashsummary=False): if input_size is None: print(self) print("-" * 120) number = sum(p.numel() for p in self.model.parameters()) print("Number of model parameters: {:,}".format(number)) print("-" * 120) else: summary(self, input_size) if hashsummary: for idx, hashvalue in enumerate(self.hashsummary()): print(f"{idx}: {hashvalue}")
[ "def", "summary", "(", "self", ",", "input_size", "=", "None", ",", "hashsummary", "=", "False", ")", ":", "if", "input_size", "is", "None", ":", "print", "(", "self", ")", "print", "(", "\"-\"", "*", "120", ")", "number", "=", "sum", "(", "p", ".", "numel", "(", ")", "for", "p", "in", "self", ".", "model", ".", "parameters", "(", ")", ")", "print", "(", "\"Number of model parameters: {:,}\"", ".", "format", "(", "number", ")", ")", "print", "(", "\"-\"", "*", "120", ")", "else", ":", "summary", "(", "self", ",", "input_size", ")", "if", "hashsummary", ":", "for", "idx", ",", "hashvalue", "in", "enumerate", "(", "self", ".", "hashsummary", "(", ")", ")", ":", "print", "(", "f\"{idx}: {hashvalue}\"", ")" ]
Print a model summary
[ "Print", "a", "model", "summary" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/model.py#L37-L51
251,365
MillionIntegrals/vel
vel/api/model.py
Model.hashsummary
def hashsummary(self): """ Print a model summary - checksums of each layer parameters """ children = list(self.children()) result = [] for child in children: result.extend(hashlib.sha256(x.detach().cpu().numpy().tobytes()).hexdigest() for x in child.parameters()) return result
python
def hashsummary(self): children = list(self.children()) result = [] for child in children: result.extend(hashlib.sha256(x.detach().cpu().numpy().tobytes()).hexdigest() for x in child.parameters()) return result
[ "def", "hashsummary", "(", "self", ")", ":", "children", "=", "list", "(", "self", ".", "children", "(", ")", ")", "result", "=", "[", "]", "for", "child", "in", "children", ":", "result", ".", "extend", "(", "hashlib", ".", "sha256", "(", "x", ".", "detach", "(", ")", ".", "cpu", "(", ")", ".", "numpy", "(", ")", ".", "tobytes", "(", ")", ")", ".", "hexdigest", "(", ")", "for", "x", "in", "child", ".", "parameters", "(", ")", ")", "return", "result" ]
Print a model summary - checksums of each layer parameters
[ "Print", "a", "model", "summary", "-", "checksums", "of", "each", "layer", "parameters" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/model.py#L53-L62
251,366
MillionIntegrals/vel
vel/api/model.py
RnnLinearBackboneModel.zero_state
def zero_state(self, batch_size): """ Initial state of the network """ return torch.zeros(batch_size, self.state_dim, dtype=torch.float32)
python
def zero_state(self, batch_size): return torch.zeros(batch_size, self.state_dim, dtype=torch.float32)
[ "def", "zero_state", "(", "self", ",", "batch_size", ")", ":", "return", "torch", ".", "zeros", "(", "batch_size", ",", "self", ".", "state_dim", ",", "dtype", "=", "torch", ".", "float32", ")" ]
Initial state of the network
[ "Initial", "state", "of", "the", "network" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/model.py#L121-L123
251,367
MillionIntegrals/vel
vel/api/model.py
SupervisedModel.loss
def loss(self, x_data, y_true): """ Forward propagate network and return a value of loss function """ y_pred = self(x_data) return y_pred, self.loss_value(x_data, y_true, y_pred)
python
def loss(self, x_data, y_true): y_pred = self(x_data) return y_pred, self.loss_value(x_data, y_true, y_pred)
[ "def", "loss", "(", "self", ",", "x_data", ",", "y_true", ")", ":", "y_pred", "=", "self", "(", "x_data", ")", "return", "y_pred", ",", "self", ".", "loss_value", "(", "x_data", ",", "y_true", ",", "y_pred", ")" ]
Forward propagate network and return a value of loss function
[ "Forward", "propagate", "network", "and", "return", "a", "value", "of", "loss", "function" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/model.py#L139-L142
251,368
MillionIntegrals/vel
vel/models/vision/cifar_resnet_v2.py
ResNetV2.metrics
def metrics(self): """ Set of metrics for this model """ from vel.metrics.loss_metric import Loss from vel.metrics.accuracy import Accuracy return [Loss(), Accuracy()]
python
def metrics(self): from vel.metrics.loss_metric import Loss from vel.metrics.accuracy import Accuracy return [Loss(), Accuracy()]
[ "def", "metrics", "(", "self", ")", ":", "from", "vel", ".", "metrics", ".", "loss_metric", "import", "Loss", "from", "vel", ".", "metrics", ".", "accuracy", "import", "Accuracy", "return", "[", "Loss", "(", ")", ",", "Accuracy", "(", ")", "]" ]
Set of metrics for this model
[ "Set", "of", "metrics", "for", "this", "model" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/models/vision/cifar_resnet_v2.py#L77-L81
251,369
MillionIntegrals/vel
vel/util/tensor_util.py
one_hot_encoding
def one_hot_encoding(input_tensor, num_labels): """ One-hot encode labels from input """ xview = input_tensor.view(-1, 1).to(torch.long) onehot = torch.zeros(xview.size(0), num_labels, device=input_tensor.device, dtype=torch.float) onehot.scatter_(1, xview, 1) return onehot.view(list(input_tensor.shape) + [-1])
python
def one_hot_encoding(input_tensor, num_labels): xview = input_tensor.view(-1, 1).to(torch.long) onehot = torch.zeros(xview.size(0), num_labels, device=input_tensor.device, dtype=torch.float) onehot.scatter_(1, xview, 1) return onehot.view(list(input_tensor.shape) + [-1])
[ "def", "one_hot_encoding", "(", "input_tensor", ",", "num_labels", ")", ":", "xview", "=", "input_tensor", ".", "view", "(", "-", "1", ",", "1", ")", ".", "to", "(", "torch", ".", "long", ")", "onehot", "=", "torch", ".", "zeros", "(", "xview", ".", "size", "(", "0", ")", ",", "num_labels", ",", "device", "=", "input_tensor", ".", "device", ",", "dtype", "=", "torch", ".", "float", ")", "onehot", ".", "scatter_", "(", "1", ",", "xview", ",", "1", ")", "return", "onehot", ".", "view", "(", "list", "(", "input_tensor", ".", "shape", ")", "+", "[", "-", "1", "]", ")" ]
One-hot encode labels from input
[ "One", "-", "hot", "encode", "labels", "from", "input" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/tensor_util.py#L4-L10
251,370
MillionIntegrals/vel
vel/util/tensor_util.py
merge_first_two_dims
def merge_first_two_dims(tensor): """ Reshape tensor to merge first two dimensions """ shape = tensor.shape batch_size = shape[0] * shape[1] new_shape = tuple([batch_size] + list(shape[2:])) return tensor.view(new_shape)
python
def merge_first_two_dims(tensor): shape = tensor.shape batch_size = shape[0] * shape[1] new_shape = tuple([batch_size] + list(shape[2:])) return tensor.view(new_shape)
[ "def", "merge_first_two_dims", "(", "tensor", ")", ":", "shape", "=", "tensor", ".", "shape", "batch_size", "=", "shape", "[", "0", "]", "*", "shape", "[", "1", "]", "new_shape", "=", "tuple", "(", "[", "batch_size", "]", "+", "list", "(", "shape", "[", "2", ":", "]", ")", ")", "return", "tensor", ".", "view", "(", "new_shape", ")" ]
Reshape tensor to merge first two dimensions
[ "Reshape", "tensor", "to", "merge", "first", "two", "dimensions" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/tensor_util.py#L13-L18
251,371
MillionIntegrals/vel
vel/rl/vecenv/dummy.py
DummyVecEnvWrapper.instantiate
def instantiate(self, parallel_envs, seed=0, preset='default') -> VecEnv: """ Create vectorized environments """ envs = DummyVecEnv([self._creation_function(i, seed, preset) for i in range(parallel_envs)]) if self.frame_history is not None: envs = VecFrameStack(envs, self.frame_history) return envs
python
def instantiate(self, parallel_envs, seed=0, preset='default') -> VecEnv: envs = DummyVecEnv([self._creation_function(i, seed, preset) for i in range(parallel_envs)]) if self.frame_history is not None: envs = VecFrameStack(envs, self.frame_history) return envs
[ "def", "instantiate", "(", "self", ",", "parallel_envs", ",", "seed", "=", "0", ",", "preset", "=", "'default'", ")", "->", "VecEnv", ":", "envs", "=", "DummyVecEnv", "(", "[", "self", ".", "_creation_function", "(", "i", ",", "seed", ",", "preset", ")", "for", "i", "in", "range", "(", "parallel_envs", ")", "]", ")", "if", "self", ".", "frame_history", "is", "not", "None", ":", "envs", "=", "VecFrameStack", "(", "envs", ",", "self", ".", "frame_history", ")", "return", "envs" ]
Create vectorized environments
[ "Create", "vectorized", "environments" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/vecenv/dummy.py#L16-L23
251,372
MillionIntegrals/vel
vel/rl/vecenv/dummy.py
DummyVecEnvWrapper.instantiate_single
def instantiate_single(self, seed=0, preset='default'): """ Create a new Env instance - single """ env = self.env.instantiate(seed=seed, serial_id=0, preset=preset) if self.frame_history is not None: env = FrameStack(env, self.frame_history) return env
python
def instantiate_single(self, seed=0, preset='default'): env = self.env.instantiate(seed=seed, serial_id=0, preset=preset) if self.frame_history is not None: env = FrameStack(env, self.frame_history) return env
[ "def", "instantiate_single", "(", "self", ",", "seed", "=", "0", ",", "preset", "=", "'default'", ")", ":", "env", "=", "self", ".", "env", ".", "instantiate", "(", "seed", "=", "seed", ",", "serial_id", "=", "0", ",", "preset", "=", "preset", ")", "if", "self", ".", "frame_history", "is", "not", "None", ":", "env", "=", "FrameStack", "(", "env", ",", "self", ".", "frame_history", ")", "return", "env" ]
Create a new Env instance - single
[ "Create", "a", "new", "Env", "instance", "-", "single" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/vecenv/dummy.py#L25-L32
251,373
MillionIntegrals/vel
vel/rl/vecenv/dummy.py
DummyVecEnvWrapper._creation_function
def _creation_function(self, idx, seed, preset): """ Helper function to create a proper closure around supplied values """ return lambda: self.env.instantiate(seed=seed, serial_id=idx, preset=preset)
python
def _creation_function(self, idx, seed, preset): return lambda: self.env.instantiate(seed=seed, serial_id=idx, preset=preset)
[ "def", "_creation_function", "(", "self", ",", "idx", ",", "seed", ",", "preset", ")", ":", "return", "lambda", ":", "self", ".", "env", ".", "instantiate", "(", "seed", "=", "seed", ",", "serial_id", "=", "idx", ",", "preset", "=", "preset", ")" ]
Helper function to create a proper closure around supplied values
[ "Helper", "function", "to", "create", "a", "proper", "closure", "around", "supplied", "values" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/vecenv/dummy.py#L34-L36
251,374
MillionIntegrals/vel
vel/rl/models/stochastic_policy_model_separate.py
StochasticPolicyModelSeparate.policy
def policy(self, observations): """ Calculate only action head for given state """ input_data = self.input_block(observations) policy_base_output = self.policy_backbone(input_data) policy_params = self.action_head(policy_base_output) return policy_params
python
def policy(self, observations): input_data = self.input_block(observations) policy_base_output = self.policy_backbone(input_data) policy_params = self.action_head(policy_base_output) return policy_params
[ "def", "policy", "(", "self", ",", "observations", ")", ":", "input_data", "=", "self", ".", "input_block", "(", "observations", ")", "policy_base_output", "=", "self", ".", "policy_backbone", "(", "input_data", ")", "policy_params", "=", "self", ".", "action_head", "(", "policy_base_output", ")", "return", "policy_params" ]
Calculate only action head for given state
[ "Calculate", "only", "action", "head", "for", "given", "state" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/models/stochastic_policy_model_separate.py#L85-L90
251,375
MillionIntegrals/vel
vel/phase/cycle.py
CycleCallback._init_cycle_dict
def _init_cycle_dict(self): """ Populate a cycle dict """ dict_arr = np.zeros(self.epochs, dtype=int) length_arr = np.zeros(self.epochs, dtype=int) start_arr = np.zeros(self.epochs, dtype=int) c_len = self.cycle_len idx = 0 for i in range(self.cycles): current_start = idx for j in range(c_len): dict_arr[idx] = i length_arr[idx] = c_len start_arr[idx] = current_start idx += 1 c_len *= self.cycle_mult return dict_arr, length_arr, start_arr
python
def _init_cycle_dict(self): dict_arr = np.zeros(self.epochs, dtype=int) length_arr = np.zeros(self.epochs, dtype=int) start_arr = np.zeros(self.epochs, dtype=int) c_len = self.cycle_len idx = 0 for i in range(self.cycles): current_start = idx for j in range(c_len): dict_arr[idx] = i length_arr[idx] = c_len start_arr[idx] = current_start idx += 1 c_len *= self.cycle_mult return dict_arr, length_arr, start_arr
[ "def", "_init_cycle_dict", "(", "self", ")", ":", "dict_arr", "=", "np", ".", "zeros", "(", "self", ".", "epochs", ",", "dtype", "=", "int", ")", "length_arr", "=", "np", ".", "zeros", "(", "self", ".", "epochs", ",", "dtype", "=", "int", ")", "start_arr", "=", "np", ".", "zeros", "(", "self", ".", "epochs", ",", "dtype", "=", "int", ")", "c_len", "=", "self", ".", "cycle_len", "idx", "=", "0", "for", "i", "in", "range", "(", "self", ".", "cycles", ")", ":", "current_start", "=", "idx", "for", "j", "in", "range", "(", "c_len", ")", ":", "dict_arr", "[", "idx", "]", "=", "i", "length_arr", "[", "idx", "]", "=", "c_len", "start_arr", "[", "idx", "]", "=", "current_start", "idx", "+=", "1", "c_len", "*=", "self", ".", "cycle_mult", "return", "dict_arr", ",", "length_arr", ",", "start_arr" ]
Populate a cycle dict
[ "Populate", "a", "cycle", "dict" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/phase/cycle.py#L34-L53
251,376
MillionIntegrals/vel
vel/phase/cycle.py
CycleCallback.on_batch_begin
def on_batch_begin(self, batch_info: BatchInfo): """ Set proper learning rate """ cycle_length = self.cycle_lengths[batch_info.local_epoch_number - 1] cycle_start = self.cycle_starts[batch_info.local_epoch_number - 1] numerator = (batch_info.local_epoch_number - cycle_start - 1) * batch_info.batches_per_epoch + batch_info.batch_number denominator = cycle_length * batch_info.batches_per_epoch interpolation_number = numerator / denominator if cycle_start == 0 and numerator < self.init_iter: lr = self.init_lr else: if isinstance(self.max_lr, list): lr = [interp.interpolate_single(max_lr, min_lr, interpolation_number, how=self.interpolate) for max_lr, min_lr in zip(self.max_lr, self.min_lr)] else: lr = interp.interpolate_single(self.max_lr, self.min_lr, interpolation_number, how=self.interpolate) self.set_lr(lr)
python
def on_batch_begin(self, batch_info: BatchInfo): cycle_length = self.cycle_lengths[batch_info.local_epoch_number - 1] cycle_start = self.cycle_starts[batch_info.local_epoch_number - 1] numerator = (batch_info.local_epoch_number - cycle_start - 1) * batch_info.batches_per_epoch + batch_info.batch_number denominator = cycle_length * batch_info.batches_per_epoch interpolation_number = numerator / denominator if cycle_start == 0 and numerator < self.init_iter: lr = self.init_lr else: if isinstance(self.max_lr, list): lr = [interp.interpolate_single(max_lr, min_lr, interpolation_number, how=self.interpolate) for max_lr, min_lr in zip(self.max_lr, self.min_lr)] else: lr = interp.interpolate_single(self.max_lr, self.min_lr, interpolation_number, how=self.interpolate) self.set_lr(lr)
[ "def", "on_batch_begin", "(", "self", ",", "batch_info", ":", "BatchInfo", ")", ":", "cycle_length", "=", "self", ".", "cycle_lengths", "[", "batch_info", ".", "local_epoch_number", "-", "1", "]", "cycle_start", "=", "self", ".", "cycle_starts", "[", "batch_info", ".", "local_epoch_number", "-", "1", "]", "numerator", "=", "(", "batch_info", ".", "local_epoch_number", "-", "cycle_start", "-", "1", ")", "*", "batch_info", ".", "batches_per_epoch", "+", "batch_info", ".", "batch_number", "denominator", "=", "cycle_length", "*", "batch_info", ".", "batches_per_epoch", "interpolation_number", "=", "numerator", "/", "denominator", "if", "cycle_start", "==", "0", "and", "numerator", "<", "self", ".", "init_iter", ":", "lr", "=", "self", ".", "init_lr", "else", ":", "if", "isinstance", "(", "self", ".", "max_lr", ",", "list", ")", ":", "lr", "=", "[", "interp", ".", "interpolate_single", "(", "max_lr", ",", "min_lr", ",", "interpolation_number", ",", "how", "=", "self", ".", "interpolate", ")", "for", "max_lr", ",", "min_lr", "in", "zip", "(", "self", ".", "max_lr", ",", "self", ".", "min_lr", ")", "]", "else", ":", "lr", "=", "interp", ".", "interpolate_single", "(", "self", ".", "max_lr", ",", "self", ".", "min_lr", ",", "interpolation_number", ",", "how", "=", "self", ".", "interpolate", ")", "self", ".", "set_lr", "(", "lr", ")" ]
Set proper learning rate
[ "Set", "proper", "learning", "rate" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/phase/cycle.py#L55-L73
251,377
MillionIntegrals/vel
vel/phase/cycle.py
CycleCallback.set_lr
def set_lr(self, lr): """ Set a learning rate for the optimizer """ if isinstance(lr, list): for group_lr, param_group in zip(lr, self.optimizer.param_groups): param_group['lr'] = group_lr else: for param_group in self.optimizer.param_groups: param_group['lr'] = lr
python
def set_lr(self, lr): if isinstance(lr, list): for group_lr, param_group in zip(lr, self.optimizer.param_groups): param_group['lr'] = group_lr else: for param_group in self.optimizer.param_groups: param_group['lr'] = lr
[ "def", "set_lr", "(", "self", ",", "lr", ")", ":", "if", "isinstance", "(", "lr", ",", "list", ")", ":", "for", "group_lr", ",", "param_group", "in", "zip", "(", "lr", ",", "self", ".", "optimizer", ".", "param_groups", ")", ":", "param_group", "[", "'lr'", "]", "=", "group_lr", "else", ":", "for", "param_group", "in", "self", ".", "optimizer", ".", "param_groups", ":", "param_group", "[", "'lr'", "]", "=", "lr" ]
Set a learning rate for the optimizer
[ "Set", "a", "learning", "rate", "for", "the", "optimizer" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/phase/cycle.py#L75-L82
251,378
MillionIntegrals/vel
vel/internals/parser.py
Variable.parameter_constructor
def parameter_constructor(cls, loader, node): """ Construct variable instance from yaml node """ value = loader.construct_scalar(node) if isinstance(value, str): if '=' in value: (varname, varvalue) = Parser.parse_equality(value) return cls(varname, varvalue) else: return cls(value) else: return cls(value)
python
def parameter_constructor(cls, loader, node): value = loader.construct_scalar(node) if isinstance(value, str): if '=' in value: (varname, varvalue) = Parser.parse_equality(value) return cls(varname, varvalue) else: return cls(value) else: return cls(value)
[ "def", "parameter_constructor", "(", "cls", ",", "loader", ",", "node", ")", ":", "value", "=", "loader", ".", "construct_scalar", "(", "node", ")", "if", "isinstance", "(", "value", ",", "str", ")", ":", "if", "'='", "in", "value", ":", "(", "varname", ",", "varvalue", ")", "=", "Parser", ".", "parse_equality", "(", "value", ")", "return", "cls", "(", "varname", ",", "varvalue", ")", "else", ":", "return", "cls", "(", "value", ")", "else", ":", "return", "cls", "(", "value", ")" ]
Construct variable instance from yaml node
[ "Construct", "variable", "instance", "from", "yaml", "node" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/parser.py#L18-L29
251,379
MillionIntegrals/vel
vel/internals/parser.py
Parser.register
def register(cls): """ Register variable handling in YAML """ if not cls.IS_LOADED: cls.IS_LOADED = True yaml.add_constructor('!param', Parameter.parameter_constructor, Loader=yaml.SafeLoader) yaml.add_constructor('!env', EnvironmentVariable.parameter_constructor, Loader=yaml.SafeLoader)
python
def register(cls): if not cls.IS_LOADED: cls.IS_LOADED = True yaml.add_constructor('!param', Parameter.parameter_constructor, Loader=yaml.SafeLoader) yaml.add_constructor('!env', EnvironmentVariable.parameter_constructor, Loader=yaml.SafeLoader)
[ "def", "register", "(", "cls", ")", ":", "if", "not", "cls", ".", "IS_LOADED", ":", "cls", ".", "IS_LOADED", "=", "True", "yaml", ".", "add_constructor", "(", "'!param'", ",", "Parameter", ".", "parameter_constructor", ",", "Loader", "=", "yaml", ".", "SafeLoader", ")", "yaml", ".", "add_constructor", "(", "'!env'", ",", "EnvironmentVariable", ".", "parameter_constructor", ",", "Loader", "=", "yaml", ".", "SafeLoader", ")" ]
Register variable handling in YAML
[ "Register", "variable", "handling", "in", "YAML" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/parser.py#L74-L80
251,380
MillionIntegrals/vel
vel/internals/parser.py
Parser.parse_equality
def parse_equality(cls, equality_string): """ Parse some simple equality statements """ cls.register() assert '=' in equality_string, "There must be an '=' sign in the equality" [left_side, right_side] = equality_string.split('=', 1) left_side_value = yaml.safe_load(left_side.strip()) right_side_value = yaml.safe_load(right_side.strip()) assert isinstance(left_side_value, str), "Left side of equality must be a string" return left_side_value, right_side_value
python
def parse_equality(cls, equality_string): cls.register() assert '=' in equality_string, "There must be an '=' sign in the equality" [left_side, right_side] = equality_string.split('=', 1) left_side_value = yaml.safe_load(left_side.strip()) right_side_value = yaml.safe_load(right_side.strip()) assert isinstance(left_side_value, str), "Left side of equality must be a string" return left_side_value, right_side_value
[ "def", "parse_equality", "(", "cls", ",", "equality_string", ")", ":", "cls", ".", "register", "(", ")", "assert", "'='", "in", "equality_string", ",", "\"There must be an '=' sign in the equality\"", "[", "left_side", ",", "right_side", "]", "=", "equality_string", ".", "split", "(", "'='", ",", "1", ")", "left_side_value", "=", "yaml", ".", "safe_load", "(", "left_side", ".", "strip", "(", ")", ")", "right_side_value", "=", "yaml", ".", "safe_load", "(", "right_side", ".", "strip", "(", ")", ")", "assert", "isinstance", "(", "left_side_value", ",", "str", ")", ",", "\"Left side of equality must be a string\"", "return", "left_side_value", ",", "right_side_value" ]
Parse some simple equality statements
[ "Parse", "some", "simple", "equality", "statements" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/parser.py#L89-L100
251,381
MillionIntegrals/vel
vel/storage/backend/mongodb.py
MongoDbBackend.clean
def clean(self, initial_epoch): """ Remove entries from database that would get overwritten """ self.db.metrics.delete_many({'run_name': self.model_config.run_name, 'epoch_idx': {'$gt': initial_epoch}})
python
def clean(self, initial_epoch): self.db.metrics.delete_many({'run_name': self.model_config.run_name, 'epoch_idx': {'$gt': initial_epoch}})
[ "def", "clean", "(", "self", ",", "initial_epoch", ")", ":", "self", ".", "db", ".", "metrics", ".", "delete_many", "(", "{", "'run_name'", ":", "self", ".", "model_config", ".", "run_name", ",", "'epoch_idx'", ":", "{", "'$gt'", ":", "initial_epoch", "}", "}", ")" ]
Remove entries from database that would get overwritten
[ "Remove", "entries", "from", "database", "that", "would", "get", "overwritten" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/backend/mongodb.py#L13-L15
251,382
MillionIntegrals/vel
vel/storage/backend/mongodb.py
MongoDbBackend.store_config
def store_config(self, configuration): """ Store model parameters in the database """ run_name = self.model_config.run_name self.db.configs.delete_many({'run_name': self.model_config.run_name}) configuration = configuration.copy() configuration['run_name'] = run_name self.db.configs.insert_one(configuration)
python
def store_config(self, configuration): run_name = self.model_config.run_name self.db.configs.delete_many({'run_name': self.model_config.run_name}) configuration = configuration.copy() configuration['run_name'] = run_name self.db.configs.insert_one(configuration)
[ "def", "store_config", "(", "self", ",", "configuration", ")", ":", "run_name", "=", "self", ".", "model_config", ".", "run_name", "self", ".", "db", ".", "configs", ".", "delete_many", "(", "{", "'run_name'", ":", "self", ".", "model_config", ".", "run_name", "}", ")", "configuration", "=", "configuration", ".", "copy", "(", ")", "configuration", "[", "'run_name'", "]", "=", "run_name", "self", ".", "db", ".", "configs", ".", "insert_one", "(", "configuration", ")" ]
Store model parameters in the database
[ "Store", "model", "parameters", "in", "the", "database" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/backend/mongodb.py#L17-L26
251,383
MillionIntegrals/vel
vel/storage/backend/mongodb.py
MongoDbBackend.get_frame
def get_frame(self): """ Get a dataframe of metrics from this storage """ metric_items = list(self.db.metrics.find({'run_name': self.model_config.run_name}).sort('epoch_idx')) if len(metric_items) == 0: return pd.DataFrame(columns=['run_name']) else: return pd.DataFrame(metric_items).drop(['_id', 'model_name'], axis=1).set_index('epoch_idx')
python
def get_frame(self): metric_items = list(self.db.metrics.find({'run_name': self.model_config.run_name}).sort('epoch_idx')) if len(metric_items) == 0: return pd.DataFrame(columns=['run_name']) else: return pd.DataFrame(metric_items).drop(['_id', 'model_name'], axis=1).set_index('epoch_idx')
[ "def", "get_frame", "(", "self", ")", ":", "metric_items", "=", "list", "(", "self", ".", "db", ".", "metrics", ".", "find", "(", "{", "'run_name'", ":", "self", ".", "model_config", ".", "run_name", "}", ")", ".", "sort", "(", "'epoch_idx'", ")", ")", "if", "len", "(", "metric_items", ")", "==", "0", ":", "return", "pd", ".", "DataFrame", "(", "columns", "=", "[", "'run_name'", "]", ")", "else", ":", "return", "pd", ".", "DataFrame", "(", "metric_items", ")", ".", "drop", "(", "[", "'_id'", ",", "'model_name'", "]", ",", "axis", "=", "1", ")", ".", "set_index", "(", "'epoch_idx'", ")" ]
Get a dataframe of metrics from this storage
[ "Get", "a", "dataframe", "of", "metrics", "from", "this", "storage" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/backend/mongodb.py#L28-L34
251,384
MillionIntegrals/vel
vel/rl/buffers/prioritized_circular_replay_buffer.py
PrioritizedCircularReplayBuffer._get_transitions
def _get_transitions(self, probs, indexes, tree_idxs, batch_info, forward_steps=1, discount_factor=1.0): """ Return batch of frames for given indexes """ if forward_steps > 1: transition_arrays = self.backend.get_transitions_forward_steps(indexes, forward_steps, discount_factor) else: transition_arrays = self.backend.get_transitions(indexes) priority_weight = self.priority_weight.value(batch_info['progress']) # Normalize by sum of all probs probs = probs / np.array([s.total() for s in self.backend.segment_trees], dtype=float).reshape(1, -1) capacity = self.backend.current_size weights = (capacity * probs) ** (-priority_weight) weights = weights / weights.max(axis=0, keepdims=True) transition_arrays['weights'] = weights transition_tensors = {k: torch.from_numpy(v) for k, v in transition_arrays.items()} transitions = Trajectories( num_steps=indexes.shape[0], num_envs=indexes.shape[1], environment_information=None, transition_tensors=transition_tensors, rollout_tensors={}, extra_data={ 'tree_idxs': tree_idxs } ) return transitions.to_transitions()
python
def _get_transitions(self, probs, indexes, tree_idxs, batch_info, forward_steps=1, discount_factor=1.0): if forward_steps > 1: transition_arrays = self.backend.get_transitions_forward_steps(indexes, forward_steps, discount_factor) else: transition_arrays = self.backend.get_transitions(indexes) priority_weight = self.priority_weight.value(batch_info['progress']) # Normalize by sum of all probs probs = probs / np.array([s.total() for s in self.backend.segment_trees], dtype=float).reshape(1, -1) capacity = self.backend.current_size weights = (capacity * probs) ** (-priority_weight) weights = weights / weights.max(axis=0, keepdims=True) transition_arrays['weights'] = weights transition_tensors = {k: torch.from_numpy(v) for k, v in transition_arrays.items()} transitions = Trajectories( num_steps=indexes.shape[0], num_envs=indexes.shape[1], environment_information=None, transition_tensors=transition_tensors, rollout_tensors={}, extra_data={ 'tree_idxs': tree_idxs } ) return transitions.to_transitions()
[ "def", "_get_transitions", "(", "self", ",", "probs", ",", "indexes", ",", "tree_idxs", ",", "batch_info", ",", "forward_steps", "=", "1", ",", "discount_factor", "=", "1.0", ")", ":", "if", "forward_steps", ">", "1", ":", "transition_arrays", "=", "self", ".", "backend", ".", "get_transitions_forward_steps", "(", "indexes", ",", "forward_steps", ",", "discount_factor", ")", "else", ":", "transition_arrays", "=", "self", ".", "backend", ".", "get_transitions", "(", "indexes", ")", "priority_weight", "=", "self", ".", "priority_weight", ".", "value", "(", "batch_info", "[", "'progress'", "]", ")", "# Normalize by sum of all probs", "probs", "=", "probs", "/", "np", ".", "array", "(", "[", "s", ".", "total", "(", ")", "for", "s", "in", "self", ".", "backend", ".", "segment_trees", "]", ",", "dtype", "=", "float", ")", ".", "reshape", "(", "1", ",", "-", "1", ")", "capacity", "=", "self", ".", "backend", ".", "current_size", "weights", "=", "(", "capacity", "*", "probs", ")", "**", "(", "-", "priority_weight", ")", "weights", "=", "weights", "/", "weights", ".", "max", "(", "axis", "=", "0", ",", "keepdims", "=", "True", ")", "transition_arrays", "[", "'weights'", "]", "=", "weights", "transition_tensors", "=", "{", "k", ":", "torch", ".", "from_numpy", "(", "v", ")", "for", "k", ",", "v", "in", "transition_arrays", ".", "items", "(", ")", "}", "transitions", "=", "Trajectories", "(", "num_steps", "=", "indexes", ".", "shape", "[", "0", "]", ",", "num_envs", "=", "indexes", ".", "shape", "[", "1", "]", ",", "environment_information", "=", "None", ",", "transition_tensors", "=", "transition_tensors", ",", "rollout_tensors", "=", "{", "}", ",", "extra_data", "=", "{", "'tree_idxs'", ":", "tree_idxs", "}", ")", "return", "transitions", ".", "to_transitions", "(", ")" ]
Return batch of frames for given indexes
[ "Return", "batch", "of", "frames", "for", "given", "indexes" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/prioritized_circular_replay_buffer.py#L46-L75
251,385
MillionIntegrals/vel
vel/rl/api/env_roller.py
EnvRollerBase.rollout
def rollout(self, batch_info: BatchInfo, model: Model, number_of_steps: int) -> Rollout: """ Roll-out the environment and return it """ raise NotImplementedError
python
def rollout(self, batch_info: BatchInfo, model: Model, number_of_steps: int) -> Rollout: raise NotImplementedError
[ "def", "rollout", "(", "self", ",", "batch_info", ":", "BatchInfo", ",", "model", ":", "Model", ",", "number_of_steps", ":", "int", ")", "->", "Rollout", ":", "raise", "NotImplementedError" ]
Roll-out the environment and return it
[ "Roll", "-", "out", "the", "environment", "and", "return", "it" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/api/env_roller.py#L17-L19
251,386
MillionIntegrals/vel
vel/rl/reinforcers/buffered_mixed_policy_iteration_reinforcer.py
BufferedMixedPolicyIterationReinforcer.train_epoch
def train_epoch(self, epoch_info: EpochInfo, interactive=True): """ Train model on an epoch of a fixed number of batch updates """ epoch_info.on_epoch_begin() if interactive: iterator = tqdm.trange(epoch_info.batches_per_epoch, file=sys.stdout, desc="Training", unit="batch") else: iterator = range(epoch_info.batches_per_epoch) for batch_idx in iterator: batch_info = BatchInfo(epoch_info, batch_idx) batch_info.on_batch_begin() self.train_batch(batch_info) batch_info.on_batch_end() epoch_info.result_accumulator.freeze_results() epoch_info.on_epoch_end()
python
def train_epoch(self, epoch_info: EpochInfo, interactive=True): epoch_info.on_epoch_begin() if interactive: iterator = tqdm.trange(epoch_info.batches_per_epoch, file=sys.stdout, desc="Training", unit="batch") else: iterator = range(epoch_info.batches_per_epoch) for batch_idx in iterator: batch_info = BatchInfo(epoch_info, batch_idx) batch_info.on_batch_begin() self.train_batch(batch_info) batch_info.on_batch_end() epoch_info.result_accumulator.freeze_results() epoch_info.on_epoch_end()
[ "def", "train_epoch", "(", "self", ",", "epoch_info", ":", "EpochInfo", ",", "interactive", "=", "True", ")", ":", "epoch_info", ".", "on_epoch_begin", "(", ")", "if", "interactive", ":", "iterator", "=", "tqdm", ".", "trange", "(", "epoch_info", ".", "batches_per_epoch", ",", "file", "=", "sys", ".", "stdout", ",", "desc", "=", "\"Training\"", ",", "unit", "=", "\"batch\"", ")", "else", ":", "iterator", "=", "range", "(", "epoch_info", ".", "batches_per_epoch", ")", "for", "batch_idx", "in", "iterator", ":", "batch_info", "=", "BatchInfo", "(", "epoch_info", ",", "batch_idx", ")", "batch_info", ".", "on_batch_begin", "(", ")", "self", ".", "train_batch", "(", "batch_info", ")", "batch_info", ".", "on_batch_end", "(", ")", "epoch_info", ".", "result_accumulator", ".", "freeze_results", "(", ")", "epoch_info", ".", "on_epoch_end", "(", ")" ]
Train model on an epoch of a fixed number of batch updates
[ "Train", "model", "on", "an", "epoch", "of", "a", "fixed", "number", "of", "batch", "updates" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/reinforcers/buffered_mixed_policy_iteration_reinforcer.py#L75-L92
251,387
MillionIntegrals/vel
vel/rl/reinforcers/buffered_mixed_policy_iteration_reinforcer.py
BufferedMixedPolicyIterationReinforcer.train_batch
def train_batch(self, batch_info: BatchInfo): """ Single, most atomic 'step' of learning this reinforcer can perform """ batch_info['sub_batch_data'] = [] self.on_policy_train_batch(batch_info) if self.settings.experience_replay > 0 and self.env_roller.is_ready_for_sampling(): if self.settings.stochastic_experience_replay: experience_replay_count = np.random.poisson(self.settings.experience_replay) else: experience_replay_count = self.settings.experience_replay for i in range(experience_replay_count): self.off_policy_train_batch(batch_info) # Even with all the experience replay, we count the single rollout as a single batch batch_info.aggregate_key('sub_batch_data')
python
def train_batch(self, batch_info: BatchInfo): batch_info['sub_batch_data'] = [] self.on_policy_train_batch(batch_info) if self.settings.experience_replay > 0 and self.env_roller.is_ready_for_sampling(): if self.settings.stochastic_experience_replay: experience_replay_count = np.random.poisson(self.settings.experience_replay) else: experience_replay_count = self.settings.experience_replay for i in range(experience_replay_count): self.off_policy_train_batch(batch_info) # Even with all the experience replay, we count the single rollout as a single batch batch_info.aggregate_key('sub_batch_data')
[ "def", "train_batch", "(", "self", ",", "batch_info", ":", "BatchInfo", ")", ":", "batch_info", "[", "'sub_batch_data'", "]", "=", "[", "]", "self", ".", "on_policy_train_batch", "(", "batch_info", ")", "if", "self", ".", "settings", ".", "experience_replay", ">", "0", "and", "self", ".", "env_roller", ".", "is_ready_for_sampling", "(", ")", ":", "if", "self", ".", "settings", ".", "stochastic_experience_replay", ":", "experience_replay_count", "=", "np", ".", "random", ".", "poisson", "(", "self", ".", "settings", ".", "experience_replay", ")", "else", ":", "experience_replay_count", "=", "self", ".", "settings", ".", "experience_replay", "for", "i", "in", "range", "(", "experience_replay_count", ")", ":", "self", ".", "off_policy_train_batch", "(", "batch_info", ")", "# Even with all the experience replay, we count the single rollout as a single batch", "batch_info", ".", "aggregate_key", "(", "'sub_batch_data'", ")" ]
Single, most atomic 'step' of learning this reinforcer can perform
[ "Single", "most", "atomic", "step", "of", "learning", "this", "reinforcer", "can", "perform" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/reinforcers/buffered_mixed_policy_iteration_reinforcer.py#L94-L110
251,388
MillionIntegrals/vel
vel/rl/reinforcers/buffered_mixed_policy_iteration_reinforcer.py
BufferedMixedPolicyIterationReinforcer.on_policy_train_batch
def on_policy_train_batch(self, batch_info: BatchInfo): """ Perform an 'on-policy' training step of evaluating an env and a single backpropagation step """ self.model.train() rollout = self.env_roller.rollout(batch_info, self.model, self.settings.number_of_steps).to_device(self.device) batch_result = self.algo.optimizer_step( batch_info=batch_info, device=self.device, model=self.model, rollout=rollout ) batch_info['sub_batch_data'].append(batch_result) batch_info['frames'] = rollout.frames() batch_info['episode_infos'] = rollout.episode_information()
python
def on_policy_train_batch(self, batch_info: BatchInfo): self.model.train() rollout = self.env_roller.rollout(batch_info, self.model, self.settings.number_of_steps).to_device(self.device) batch_result = self.algo.optimizer_step( batch_info=batch_info, device=self.device, model=self.model, rollout=rollout ) batch_info['sub_batch_data'].append(batch_result) batch_info['frames'] = rollout.frames() batch_info['episode_infos'] = rollout.episode_information()
[ "def", "on_policy_train_batch", "(", "self", ",", "batch_info", ":", "BatchInfo", ")", ":", "self", ".", "model", ".", "train", "(", ")", "rollout", "=", "self", ".", "env_roller", ".", "rollout", "(", "batch_info", ",", "self", ".", "model", ",", "self", ".", "settings", ".", "number_of_steps", ")", ".", "to_device", "(", "self", ".", "device", ")", "batch_result", "=", "self", ".", "algo", ".", "optimizer_step", "(", "batch_info", "=", "batch_info", ",", "device", "=", "self", ".", "device", ",", "model", "=", "self", ".", "model", ",", "rollout", "=", "rollout", ")", "batch_info", "[", "'sub_batch_data'", "]", ".", "append", "(", "batch_result", ")", "batch_info", "[", "'frames'", "]", "=", "rollout", ".", "frames", "(", ")", "batch_info", "[", "'episode_infos'", "]", "=", "rollout", ".", "episode_information", "(", ")" ]
Perform an 'on-policy' training step of evaluating an env and a single backpropagation step
[ "Perform", "an", "on", "-", "policy", "training", "step", "of", "evaluating", "an", "env", "and", "a", "single", "backpropagation", "step" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/reinforcers/buffered_mixed_policy_iteration_reinforcer.py#L112-L127
251,389
MillionIntegrals/vel
vel/rl/reinforcers/buffered_mixed_policy_iteration_reinforcer.py
BufferedMixedPolicyIterationReinforcer.off_policy_train_batch
def off_policy_train_batch(self, batch_info: BatchInfo): """ Perform an 'off-policy' training step of sampling the replay buffer and gradient descent """ self.model.train() rollout = self.env_roller.sample(batch_info, self.model, self.settings.number_of_steps).to_device(self.device) batch_result = self.algo.optimizer_step( batch_info=batch_info, device=self.device, model=self.model, rollout=rollout ) batch_info['sub_batch_data'].append(batch_result)
python
def off_policy_train_batch(self, batch_info: BatchInfo): self.model.train() rollout = self.env_roller.sample(batch_info, self.model, self.settings.number_of_steps).to_device(self.device) batch_result = self.algo.optimizer_step( batch_info=batch_info, device=self.device, model=self.model, rollout=rollout ) batch_info['sub_batch_data'].append(batch_result)
[ "def", "off_policy_train_batch", "(", "self", ",", "batch_info", ":", "BatchInfo", ")", ":", "self", ".", "model", ".", "train", "(", ")", "rollout", "=", "self", ".", "env_roller", ".", "sample", "(", "batch_info", ",", "self", ".", "model", ",", "self", ".", "settings", ".", "number_of_steps", ")", ".", "to_device", "(", "self", ".", "device", ")", "batch_result", "=", "self", ".", "algo", ".", "optimizer_step", "(", "batch_info", "=", "batch_info", ",", "device", "=", "self", ".", "device", ",", "model", "=", "self", ".", "model", ",", "rollout", "=", "rollout", ")", "batch_info", "[", "'sub_batch_data'", "]", ".", "append", "(", "batch_result", ")" ]
Perform an 'off-policy' training step of sampling the replay buffer and gradient descent
[ "Perform", "an", "off", "-", "policy", "training", "step", "of", "sampling", "the", "replay", "buffer", "and", "gradient", "descent" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/reinforcers/buffered_mixed_policy_iteration_reinforcer.py#L129-L142
251,390
MillionIntegrals/vel
vel/storage/strategy/classic_checkpoint_strategy.py
ClassicCheckpointStrategy.should_store_best_checkpoint
def should_store_best_checkpoint(self, epoch_idx, metrics) -> bool: """ Should we store current checkpoint as the best """ if not self.store_best: return False metric = metrics[self.metric] if better(self._current_best_metric_value, metric, self.metric_mode): self._current_best_metric_value = metric return True return False
python
def should_store_best_checkpoint(self, epoch_idx, metrics) -> bool: if not self.store_best: return False metric = metrics[self.metric] if better(self._current_best_metric_value, metric, self.metric_mode): self._current_best_metric_value = metric return True return False
[ "def", "should_store_best_checkpoint", "(", "self", ",", "epoch_idx", ",", "metrics", ")", "->", "bool", ":", "if", "not", "self", ".", "store_best", ":", "return", "False", "metric", "=", "metrics", "[", "self", ".", "metric", "]", "if", "better", "(", "self", ".", "_current_best_metric_value", ",", "metric", ",", "self", ".", "metric_mode", ")", ":", "self", ".", "_current_best_metric_value", "=", "metric", "return", "True", "return", "False" ]
Should we store current checkpoint as the best
[ "Should", "we", "store", "current", "checkpoint", "as", "the", "best" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/strategy/classic_checkpoint_strategy.py#L27-L38
251,391
MillionIntegrals/vel
vel/sources/nlp/imdb.py
create
def create(model_config, batch_size, vectors=None): """ Create an IMDB dataset """ path = model_config.data_dir('imdb') text_field = data.Field(lower=True, tokenize='spacy', batch_first=True) label_field = data.LabelField(is_target=True) train_source, test_source = IMDBCached.splits( root=path, text_field=text_field, label_field=label_field ) text_field.build_vocab(train_source, max_size=25_000, vectors=vectors) label_field.build_vocab(train_source) train_iterator, test_iterator = data.BucketIterator.splits( (train_source, test_source), batch_size=batch_size, device=model_config.torch_device(), shuffle=True ) return TextData( train_source, test_source, train_iterator, test_iterator, text_field, label_field )
python
def create(model_config, batch_size, vectors=None): path = model_config.data_dir('imdb') text_field = data.Field(lower=True, tokenize='spacy', batch_first=True) label_field = data.LabelField(is_target=True) train_source, test_source = IMDBCached.splits( root=path, text_field=text_field, label_field=label_field ) text_field.build_vocab(train_source, max_size=25_000, vectors=vectors) label_field.build_vocab(train_source) train_iterator, test_iterator = data.BucketIterator.splits( (train_source, test_source), batch_size=batch_size, device=model_config.torch_device(), shuffle=True ) return TextData( train_source, test_source, train_iterator, test_iterator, text_field, label_field )
[ "def", "create", "(", "model_config", ",", "batch_size", ",", "vectors", "=", "None", ")", ":", "path", "=", "model_config", ".", "data_dir", "(", "'imdb'", ")", "text_field", "=", "data", ".", "Field", "(", "lower", "=", "True", ",", "tokenize", "=", "'spacy'", ",", "batch_first", "=", "True", ")", "label_field", "=", "data", ".", "LabelField", "(", "is_target", "=", "True", ")", "train_source", ",", "test_source", "=", "IMDBCached", ".", "splits", "(", "root", "=", "path", ",", "text_field", "=", "text_field", ",", "label_field", "=", "label_field", ")", "text_field", ".", "build_vocab", "(", "train_source", ",", "max_size", "=", "25_000", ",", "vectors", "=", "vectors", ")", "label_field", ".", "build_vocab", "(", "train_source", ")", "train_iterator", ",", "test_iterator", "=", "data", ".", "BucketIterator", ".", "splits", "(", "(", "train_source", ",", "test_source", ")", ",", "batch_size", "=", "batch_size", ",", "device", "=", "model_config", ".", "torch_device", "(", ")", ",", "shuffle", "=", "True", ")", "return", "TextData", "(", "train_source", ",", "test_source", ",", "train_iterator", ",", "test_iterator", ",", "text_field", ",", "label_field", ")" ]
Create an IMDB dataset
[ "Create", "an", "IMDB", "dataset" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/sources/nlp/imdb.py#L48-L73
251,392
MillionIntegrals/vel
vel/commands/augvis_command.py
AugmentationVisualizationCommand.run
def run(self): """ Run the visualization """ dataset = self.source.train_dataset() num_samples = len(dataset) fig, ax = plt.subplots(self.cases, self.samples+1) selected_sample = np.sort(np.random.choice(num_samples, self.cases, replace=False)) for i in range(self.cases): raw_image, _ = dataset.get_raw(selected_sample[i]) ax[i, 0].imshow(raw_image) ax[i, 0].set_title("Original image") for j in range(self.samples): augmented_image, _ = dataset[selected_sample[i]] augmented_image = dataset.denormalize(augmented_image) ax[i, j+1].imshow(augmented_image) plt.show()
python
def run(self): dataset = self.source.train_dataset() num_samples = len(dataset) fig, ax = plt.subplots(self.cases, self.samples+1) selected_sample = np.sort(np.random.choice(num_samples, self.cases, replace=False)) for i in range(self.cases): raw_image, _ = dataset.get_raw(selected_sample[i]) ax[i, 0].imshow(raw_image) ax[i, 0].set_title("Original image") for j in range(self.samples): augmented_image, _ = dataset[selected_sample[i]] augmented_image = dataset.denormalize(augmented_image) ax[i, j+1].imshow(augmented_image) plt.show()
[ "def", "run", "(", "self", ")", ":", "dataset", "=", "self", ".", "source", ".", "train_dataset", "(", ")", "num_samples", "=", "len", "(", "dataset", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "self", ".", "cases", ",", "self", ".", "samples", "+", "1", ")", "selected_sample", "=", "np", ".", "sort", "(", "np", ".", "random", ".", "choice", "(", "num_samples", ",", "self", ".", "cases", ",", "replace", "=", "False", ")", ")", "for", "i", "in", "range", "(", "self", ".", "cases", ")", ":", "raw_image", ",", "_", "=", "dataset", ".", "get_raw", "(", "selected_sample", "[", "i", "]", ")", "ax", "[", "i", ",", "0", "]", ".", "imshow", "(", "raw_image", ")", "ax", "[", "i", ",", "0", "]", ".", "set_title", "(", "\"Original image\"", ")", "for", "j", "in", "range", "(", "self", ".", "samples", ")", ":", "augmented_image", ",", "_", "=", "dataset", "[", "selected_sample", "[", "i", "]", "]", "augmented_image", "=", "dataset", ".", "denormalize", "(", "augmented_image", ")", "ax", "[", "i", ",", "j", "+", "1", "]", ".", "imshow", "(", "augmented_image", ")", "plt", ".", "show", "(", ")" ]
Run the visualization
[ "Run", "the", "visualization" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/commands/augvis_command.py#L14-L34
251,393
MillionIntegrals/vel
vel/rl/env/classic_control.py
env_maker
def env_maker(environment_id, seed, serial_id, monitor=False, allow_early_resets=False): """ Create a classic control environment with basic set of wrappers """ env = gym.make(environment_id) env.seed(seed + serial_id) # Monitoring the env if monitor: logdir = logger.get_dir() and os.path.join(logger.get_dir(), str(serial_id)) else: logdir = None env = Monitor(env, logdir, allow_early_resets=allow_early_resets) return env
python
def env_maker(environment_id, seed, serial_id, monitor=False, allow_early_resets=False): env = gym.make(environment_id) env.seed(seed + serial_id) # Monitoring the env if monitor: logdir = logger.get_dir() and os.path.join(logger.get_dir(), str(serial_id)) else: logdir = None env = Monitor(env, logdir, allow_early_resets=allow_early_resets) return env
[ "def", "env_maker", "(", "environment_id", ",", "seed", ",", "serial_id", ",", "monitor", "=", "False", ",", "allow_early_resets", "=", "False", ")", ":", "env", "=", "gym", ".", "make", "(", "environment_id", ")", "env", ".", "seed", "(", "seed", "+", "serial_id", ")", "# Monitoring the env", "if", "monitor", ":", "logdir", "=", "logger", ".", "get_dir", "(", ")", "and", "os", ".", "path", ".", "join", "(", "logger", ".", "get_dir", "(", ")", ",", "str", "(", "serial_id", ")", ")", "else", ":", "logdir", "=", "None", "env", "=", "Monitor", "(", "env", ",", "logdir", ",", "allow_early_resets", "=", "allow_early_resets", ")", "return", "env" ]
Create a classic control environment with basic set of wrappers
[ "Create", "a", "classic", "control", "environment", "with", "basic", "set", "of", "wrappers" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/env/classic_control.py#L25-L38
251,394
MillionIntegrals/vel
vel/models/imagenet/resnet34.py
Resnet34.freeze
def freeze(self, number=None): """ Freeze given number of layers in the model """ if number is None: number = self.head_layers for idx, child in enumerate(self.model.children()): if idx < number: mu.freeze_layer(child)
python
def freeze(self, number=None): if number is None: number = self.head_layers for idx, child in enumerate(self.model.children()): if idx < number: mu.freeze_layer(child)
[ "def", "freeze", "(", "self", ",", "number", "=", "None", ")", ":", "if", "number", "is", "None", ":", "number", "=", "self", ".", "head_layers", "for", "idx", ",", "child", "in", "enumerate", "(", "self", ".", "model", ".", "children", "(", ")", ")", ":", "if", "idx", "<", "number", ":", "mu", ".", "freeze_layer", "(", "child", ")" ]
Freeze given number of layers in the model
[ "Freeze", "given", "number", "of", "layers", "in", "the", "model" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/models/imagenet/resnet34.py#L66-L73
251,395
MillionIntegrals/vel
vel/models/imagenet/resnet34.py
Resnet34.unfreeze
def unfreeze(self): """ Unfreeze model layers """ for idx, child in enumerate(self.model.children()): mu.unfreeze_layer(child)
python
def unfreeze(self): for idx, child in enumerate(self.model.children()): mu.unfreeze_layer(child)
[ "def", "unfreeze", "(", "self", ")", ":", "for", "idx", ",", "child", "in", "enumerate", "(", "self", ".", "model", ".", "children", "(", ")", ")", ":", "mu", ".", "unfreeze_layer", "(", "child", ")" ]
Unfreeze model layers
[ "Unfreeze", "model", "layers" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/models/imagenet/resnet34.py#L75-L78
251,396
MillionIntegrals/vel
vel/rl/algo/policy_gradient/acer.py
AcerPolicyGradient.update_average_model
def update_average_model(self, model): """ Update weights of the average model with new model observation """ for model_param, average_param in zip(model.parameters(), self.average_model.parameters()): # EWMA average model update average_param.data.mul_(self.average_model_alpha).add_(model_param.data * (1 - self.average_model_alpha))
python
def update_average_model(self, model): for model_param, average_param in zip(model.parameters(), self.average_model.parameters()): # EWMA average model update average_param.data.mul_(self.average_model_alpha).add_(model_param.data * (1 - self.average_model_alpha))
[ "def", "update_average_model", "(", "self", ",", "model", ")", ":", "for", "model_param", ",", "average_param", "in", "zip", "(", "model", ".", "parameters", "(", ")", ",", "self", ".", "average_model", ".", "parameters", "(", ")", ")", ":", "# EWMA average model update", "average_param", ".", "data", ".", "mul_", "(", "self", ".", "average_model_alpha", ")", ".", "add_", "(", "model_param", ".", "data", "*", "(", "1", "-", "self", ".", "average_model_alpha", ")", ")" ]
Update weights of the average model with new model observation
[ "Update", "weights", "of", "the", "average", "model", "with", "new", "model", "observation" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/policy_gradient/acer.py#L43-L47
251,397
MillionIntegrals/vel
vel/rl/algo/policy_gradient/acer.py
AcerPolicyGradient.retrace
def retrace(self, rewards, dones, q_values, state_values, rho, final_values): """ Calculate Q retraced targets """ rho_bar = torch.min(torch.ones_like(rho) * self.retrace_rho_cap, rho) q_retraced_buffer = torch.zeros_like(rewards) next_value = final_values for i in reversed(range(rewards.size(0))): q_retraced = rewards[i] + self.discount_factor * next_value * (1.0 - dones[i]) # Next iteration next_value = rho_bar[i] * (q_retraced - q_values[i]) + state_values[i] q_retraced_buffer[i] = q_retraced return q_retraced_buffer
python
def retrace(self, rewards, dones, q_values, state_values, rho, final_values): rho_bar = torch.min(torch.ones_like(rho) * self.retrace_rho_cap, rho) q_retraced_buffer = torch.zeros_like(rewards) next_value = final_values for i in reversed(range(rewards.size(0))): q_retraced = rewards[i] + self.discount_factor * next_value * (1.0 - dones[i]) # Next iteration next_value = rho_bar[i] * (q_retraced - q_values[i]) + state_values[i] q_retraced_buffer[i] = q_retraced return q_retraced_buffer
[ "def", "retrace", "(", "self", ",", "rewards", ",", "dones", ",", "q_values", ",", "state_values", ",", "rho", ",", "final_values", ")", ":", "rho_bar", "=", "torch", ".", "min", "(", "torch", ".", "ones_like", "(", "rho", ")", "*", "self", ".", "retrace_rho_cap", ",", "rho", ")", "q_retraced_buffer", "=", "torch", ".", "zeros_like", "(", "rewards", ")", "next_value", "=", "final_values", "for", "i", "in", "reversed", "(", "range", "(", "rewards", ".", "size", "(", "0", ")", ")", ")", ":", "q_retraced", "=", "rewards", "[", "i", "]", "+", "self", ".", "discount_factor", "*", "next_value", "*", "(", "1.0", "-", "dones", "[", "i", "]", ")", "# Next iteration", "next_value", "=", "rho_bar", "[", "i", "]", "*", "(", "q_retraced", "-", "q_values", "[", "i", "]", ")", "+", "state_values", "[", "i", "]", "q_retraced_buffer", "[", "i", "]", "=", "q_retraced", "return", "q_retraced_buffer" ]
Calculate Q retraced targets
[ "Calculate", "Q", "retraced", "targets" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/policy_gradient/acer.py#L170-L186
251,398
MillionIntegrals/vel
vel/rl/modules/action_head.py
DiagGaussianActionHead.logprob
def logprob(self, action_sample, pd_params): """ Log-likelihood """ means = pd_params[:, :, 0] log_std = pd_params[:, :, 1] std = torch.exp(log_std) z_score = (action_sample - means) / std return - (0.5 * ((z_score**2 + self.LOG2PI).sum(dim=-1)) + log_std.sum(dim=-1))
python
def logprob(self, action_sample, pd_params): means = pd_params[:, :, 0] log_std = pd_params[:, :, 1] std = torch.exp(log_std) z_score = (action_sample - means) / std return - (0.5 * ((z_score**2 + self.LOG2PI).sum(dim=-1)) + log_std.sum(dim=-1))
[ "def", "logprob", "(", "self", ",", "action_sample", ",", "pd_params", ")", ":", "means", "=", "pd_params", "[", ":", ",", ":", ",", "0", "]", "log_std", "=", "pd_params", "[", ":", ",", ":", ",", "1", "]", "std", "=", "torch", ".", "exp", "(", "log_std", ")", "z_score", "=", "(", "action_sample", "-", "means", ")", "/", "std", "return", "-", "(", "0.5", "*", "(", "(", "z_score", "**", "2", "+", "self", ".", "LOG2PI", ")", ".", "sum", "(", "dim", "=", "-", "1", ")", ")", "+", "log_std", ".", "sum", "(", "dim", "=", "-", "1", ")", ")" ]
Log-likelihood
[ "Log", "-", "likelihood" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/modules/action_head.py#L45-L54
251,399
MillionIntegrals/vel
vel/rl/modules/action_head.py
CategoricalActionHead.logprob
def logprob(self, actions, action_logits): """ Logarithm of probability of given sample """ neg_log_prob = F.nll_loss(action_logits, actions, reduction='none') return -neg_log_prob
python
def logprob(self, actions, action_logits): neg_log_prob = F.nll_loss(action_logits, actions, reduction='none') return -neg_log_prob
[ "def", "logprob", "(", "self", ",", "actions", ",", "action_logits", ")", ":", "neg_log_prob", "=", "F", ".", "nll_loss", "(", "action_logits", ",", "actions", ",", "reduction", "=", "'none'", ")", "return", "-", "neg_log_prob" ]
Logarithm of probability of given sample
[ "Logarithm", "of", "probability", "of", "given", "sample" ]
e0726e1f63742b728966ccae0c8b825ea0ba491a
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/modules/action_head.py#L103-L106