Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/algos/test_vpg.py
"""This script creates a test that fails when VPG performance is too low.""" import gym import pytest import torch from garage.envs import GarageEnv from garage.experiment import deterministic from garage.experiment import LocalRunner from garage.torch.algos import VPG from garage.torch.policies import GaussianMLPPolicy from garage.torch.value_functions import GaussianMLPValueFunction from tests.fixtures import snapshot_config # yapf: disable INVALID_ENTROPY_CONFIG = [ ({'entropy_method': 'INVALID_ENTROPY_METHOD'}, ValueError, 'entropy_method'), ({'entropy_method': 'max', 'center_adv': True}, ValueError, 'center_adv'), ({'entropy_method': 'max', 'center_adv': False, 'stop_entropy_gradient': False}, ValueError, 'entropy_method'), ({'entropy_method': 'no_entropy', 'policy_ent_coeff': 1.0}, ValueError, 'policy_ent_coeff') ] # yapf: enable class TestVPG: """Test class for VPG.""" @classmethod def setup_class(cls): """Setup method which is called once before all tests in this class.""" deterministic.set_seed(0) def setup_method(self): """Setup method which is called before every test.""" self._env = GarageEnv(gym.make('InvertedDoublePendulum-v2')) self._runner = LocalRunner(snapshot_config) self._policy = GaussianMLPPolicy(env_spec=self._env.spec, hidden_sizes=[64, 64], hidden_nonlinearity=torch.tanh, output_nonlinearity=None) self._params = { 'env_spec': self._env.spec, 'policy': self._policy, 'value_function': GaussianMLPValueFunction(env_spec=self._env.spec), 'max_path_length': 100, 'discount': 0.99, } def teardown_method(self): """Teardown method which is called after every test.""" self._env.close() @pytest.mark.mujoco def test_vpg_no_entropy(self): """Test VPG with no_entropy.""" self._params['positive_adv'] = True self._params['use_softplus_entropy'] = True algo = VPG(**self._params) self._runner.setup(algo, self._env) last_avg_ret = self._runner.train(n_epochs=10, batch_size=100) assert last_avg_ret > 0 @pytest.mark.mujoco def test_vpg_max(self): """Test VPG with maximum entropy.""" self._params['center_adv'] = False self._params['stop_entropy_gradient'] = True self._params['entropy_method'] = 'max' algo = VPG(**self._params) self._runner.setup(algo, self._env) last_avg_ret = self._runner.train(n_epochs=10, batch_size=100) assert last_avg_ret > 0 @pytest.mark.mujoco def test_vpg_regularized(self): """Test VPG with entropy_regularized.""" self._params['entropy_method'] = 'regularized' algo = VPG(**self._params) self._runner.setup(algo, self._env) last_avg_ret = self._runner.train(n_epochs=10, batch_size=100) assert last_avg_ret > 0 @pytest.mark.mujoco @pytest.mark.parametrize('algo_param, error, msg', INVALID_ENTROPY_CONFIG) def test_invalid_entropy_config(self, algo_param, error, msg): """Test VPG with invalid entropy config.""" self._params.update(algo_param) with pytest.raises(error, match=msg): VPG(**self._params)
3,494
33.264706
79
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/distributions/test_tanh_normal_dist.py
"""Tests for the tanh transformed normal distribution.""" import torch from garage.torch.distributions import TanhNormal class TestBenchmarkTanhNormalDistribution: """Tests for the tanh normal distribution.""" def test_new_tanh_normal(self): """Tests the tanh_normal constructor.""" mean = torch.ones(1) std = torch.ones(1) dist = TanhNormal(mean, std) del dist def test_tanh_normal_bounds(self): """Test to make sure the tanh_normal dist obeys the bounds (-1,1).""" mean = torch.ones(1) * 100 std = torch.ones(1) * 100 dist = TanhNormal(mean, std) assert dist.mean <= 1. del dist mean = torch.ones(1) * -100 std = torch.ones(1) * 100 dist = TanhNormal(mean, std) assert dist.mean >= -1. def test_tanh_normal_rsample(self): """Test the bounds of the tanh_normal rsample function.""" mean = torch.zeros(1) std = torch.ones(1) dist = TanhNormal(mean, std) sample = dist.rsample() pre_tanh_action, action = dist.rsample_with_pre_tanh_value() assert (pre_tanh_action.tanh() == action).all() assert -1 <= action <= 1. assert -1 <= sample <= 1. del dist def test_tanh_normal_log_prob(self): """Verify the correctnes of the tanh_normal log likelihood function.""" mean = torch.zeros(1) std = torch.ones(1) dist = TanhNormal(mean, std) pre_tanh_action = torch.Tensor([[2.0960]]) action = pre_tanh_action.tanh() log_prob = dist.log_prob(action, pre_tanh_action) log_prob_approx = dist.log_prob(action) assert torch.allclose(log_prob, torch.Tensor([-0.2798519])) assert torch.allclose(log_prob_approx, torch.Tensor([-0.2798519])) del dist def test_tanh_normal_expand(self): """Test for expand function. Checks whether expand returns a distribution that has potentially a different batch size from the already existing distribution. """ mean = torch.zeros(1) std = torch.ones(1) dist = TanhNormal(mean, std) new_dist = dist.expand((2, )) sample = new_dist.sample() assert sample.shape == torch.Size((2, 1)) def test_tanh_normal_repr(self): """Test that the repr function outputs the class name.""" mean = torch.zeros(1) std = torch.ones(1) dist = TanhNormal(mean, std) assert repr(dist) == 'TanhNormal'
2,539
33.324324
79
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/modules/test_gaussian_mlp_module.py
import pytest import torch from torch import nn from garage.torch.modules.gaussian_mlp_module \ import GaussianMLPIndependentStdModule, GaussianMLPModule, \ GaussianMLPTwoHeadedModule plain_settings = [ (1, 1, (1, )), (1, 2, (2, )), (1, 3, (3, )), (1, 1, (1, 2)), (1, 2, (2, 1)), (1, 3, (4, 5)), (2, 1, (1, )), (2, 2, (2, )), (2, 3, (3, )), (2, 1, (1, 2)), (2, 2, (2, 1)), (2, 3, (4, 5)), (5, 1, (1, )), (5, 2, (2, )), (5, 3, (3, )), (5, 1, (1, 2)), (5, 2, (2, 1)), (5, 3, (4, 5)), ] different_std_settings = [(1, 1, (1, ), (1, )), (1, 2, (2, ), (2, )), (1, 3, (3, ), (3, )), (1, 1, (1, 2), (1, 2)), (1, 2, (2, 1), (2, 1)), (1, 3, (4, 5), (4, 5)), (2, 1, (1, ), (1, )), (2, 2, (2, ), (2, )), (2, 3, (3, ), (3, )), (2, 1, (1, 2), (1, 2)), (2, 2, (2, 1), (2, 1)), (2, 3, (4, 5), (4, 5)), (5, 1, (1, ), (1, )), (5, 2, (2, ), (2, )), (5, 3, (3, ), (3, )), (5, 1, (1, 2), (1, 2)), (5, 2, (2, 1), (2, 1)), (5, 3, (4, 5), (4, 5))] @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes', plain_settings) def test_std_share_network_output_values(input_dim, output_dim, hidden_sizes): module = GaussianMLPTwoHeadedModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) dist = module(torch.ones(input_dim)) exp_mean = torch.full( (output_dim, ), input_dim * (torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float) exp_variance = (input_dim * torch.Tensor(hidden_sizes).prod()).exp().pow(2).item() assert dist.mean.equal(exp_mean) assert dist.variance.equal( torch.full((output_dim, ), exp_variance, dtype=torch.float)) assert dist.rsample().shape == (output_dim, ) @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes', plain_settings) def test_std_share_network_output_values_with_batch(input_dim, output_dim, hidden_sizes): module = GaussianMLPTwoHeadedModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) batch_size = 5 dist = module(torch.ones([batch_size, input_dim])) exp_mean = torch.full( (batch_size, output_dim), input_dim * (torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float) exp_variance = (input_dim * torch.Tensor(hidden_sizes).prod()).exp().pow(2).item() assert dist.mean.equal(exp_mean) assert dist.variance.equal( torch.full((batch_size, output_dim), exp_variance, dtype=torch.float)) assert dist.rsample().shape == (batch_size, output_dim) @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes', plain_settings) def test_std_network_output_values(input_dim, output_dim, hidden_sizes): init_std = 2. module = GaussianMLPModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) dist = module(torch.ones(input_dim)) exp_mean = torch.full( (output_dim, ), input_dim * (torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float) exp_variance = init_std**2 assert dist.mean.equal(exp_mean) assert dist.variance.equal( torch.full((output_dim, ), exp_variance, dtype=torch.float)) assert dist.rsample().shape == (output_dim, ) @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes', plain_settings) def test_std_network_output_values_with_batch(input_dim, output_dim, hidden_sizes): init_std = 2. module = GaussianMLPModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) batch_size = 5 dist = module(torch.ones([batch_size, input_dim])) exp_mean = torch.full( (batch_size, output_dim), input_dim * (torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float) exp_variance = init_std**2 assert dist.mean.equal(exp_mean) assert dist.variance.equal( torch.full((batch_size, output_dim), exp_variance, dtype=torch.float)) assert dist.rsample().shape == (batch_size, output_dim) @pytest.mark.parametrize( 'input_dim, output_dim, hidden_sizes, std_hidden_sizes', different_std_settings) def test_std_adaptive_network_output_values(input_dim, output_dim, hidden_sizes, std_hidden_sizes): module = GaussianMLPIndependentStdModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, std_hidden_sizes=std_hidden_sizes, hidden_nonlinearity=None, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_, std_hidden_nonlinearity=None, std_hidden_w_init=nn.init.ones_, std_output_w_init=nn.init.ones_) dist = module(torch.ones(input_dim)) exp_mean = torch.full( (output_dim, ), input_dim * (torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float) exp_variance = (input_dim * torch.Tensor(hidden_sizes).prod()).exp().pow(2).item() assert dist.mean.equal(exp_mean) assert dist.variance.equal( torch.full((output_dim, ), exp_variance, dtype=torch.float)) assert dist.rsample().shape == (output_dim, ) @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes', plain_settings) def test_softplus_std_network_output_values(input_dim, output_dim, hidden_sizes): init_std = 2. module = GaussianMLPModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='softplus', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) dist = module(torch.ones(input_dim)) exp_mean = input_dim * torch.Tensor(hidden_sizes).prod().item() exp_variance = torch.Tensor([init_std]).exp().add(1.).log()**2 assert dist.mean.equal( torch.full((output_dim, ), exp_mean, dtype=torch.float)) assert dist.variance.equal( torch.full((output_dim, ), exp_variance[0], dtype=torch.float)) assert dist.rsample().shape == (output_dim, ) @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes', plain_settings) def test_exp_min_std(input_dim, output_dim, hidden_sizes): min_value = 10. module = GaussianMLPModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, init_std=1., min_std=min_value, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.zeros_, output_w_init=nn.init.zeros_) dist = module(torch.ones(input_dim)) exp_variance = min_value**2 assert dist.variance.equal( torch.full((output_dim, ), exp_variance, dtype=torch.float)) @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes', plain_settings) def test_exp_max_std(input_dim, output_dim, hidden_sizes): max_value = 1. module = GaussianMLPModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, init_std=10., max_std=max_value, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.zeros_, output_w_init=nn.init.zeros_) dist = module(torch.ones(input_dim)) exp_variance = max_value**2 assert dist.variance.equal( torch.full((output_dim, ), exp_variance, dtype=torch.float)) @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes', plain_settings) def test_softplus_min_std(input_dim, output_dim, hidden_sizes): min_value = 2. module = GaussianMLPModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, init_std=1., min_std=min_value, hidden_nonlinearity=None, std_parameterization='softplus', hidden_w_init=nn.init.zeros_, output_w_init=nn.init.zeros_) dist = module(torch.ones(input_dim)) exp_variance = torch.Tensor([min_value]).exp().add(1.).log()**2 assert dist.variance.equal( torch.full((output_dim, ), exp_variance[0], dtype=torch.float)) @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes', plain_settings) def test_softplus_max_std(input_dim, output_dim, hidden_sizes): max_value = 1. module = GaussianMLPModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, init_std=10, max_std=max_value, hidden_nonlinearity=None, std_parameterization='softplus', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) dist = module(torch.ones(input_dim)) exp_variance = torch.Tensor([max_value]).exp().add(1.).log()**2 assert torch.equal( dist.variance, torch.full((output_dim, ), exp_variance[0], dtype=torch.float)) def test_unknown_std_parameterization(): with pytest.raises(NotImplementedError): GaussianMLPModule(input_dim=1, output_dim=1, std_parameterization='unknown')
11,989
38.966667
79
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/modules/test_mlp_module.py
"""Test MLPModule.""" import pickle import numpy as np import pytest import torch import torch.nn as nn from garage.torch.modules import MLPModule class TestMLPModel: """Test MLPModule.""" # yapf: disable @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes', [ (5, 1, (1, )), (5, 1, (2, )), (5, 2, (3, )), (5, 2, (1, 1)), (5, 3, (2, 2)), ]) # yapf: enable def test_output_values(self, input_dim, output_dim, hidden_sizes): """Test output values from MLPModule. Args: input_dim (int): Input dimension. output_dim (int): Ouput dimension. hidden_sizes (list[int]): Size of hidden layers. """ input_val = torch.ones([1, input_dim], dtype=torch.float32) module_with_nonlinear_function_and_module = MLPModule( input_dim=input_dim, output_dim=output_dim, hidden_nonlinearity=torch.relu, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_, output_nonlinearity=torch.nn.ReLU) module_with_nonlinear_module_instance_and_function = MLPModule( input_dim=input_dim, output_dim=output_dim, hidden_nonlinearity=torch.nn.ReLU(), hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_, output_nonlinearity=torch.relu) output1 = module_with_nonlinear_function_and_module(input_val) output2 = module_with_nonlinear_module_instance_and_function(input_val) expected_output = torch.full([1, output_dim], fill_value=5 * np.prod(hidden_sizes), dtype=torch.float32) assert torch.all(torch.eq(expected_output, output1)) assert torch.all(torch.eq(expected_output, output2)) # yapf: disable @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes', [ (5, 1, (1, )), (5, 1, (2, )), (5, 2, (3, )), (5, 2, (1, 1)), (5, 3, (2, 2)), ]) # yapf: enable def test_is_pickleable(self, input_dim, output_dim, hidden_sizes): """Check MLPModule is pickeable. Args: input_dim (int): Input dimension. output_dim (int): Ouput dimension. hidden_sizes (list[int]): Size of hidden layers. """ input_val = torch.ones([1, input_dim], dtype=torch.float32) module = MLPModule(input_dim=input_dim, output_dim=output_dim, hidden_nonlinearity=torch.relu, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_, output_nonlinearity=torch.nn.ReLU) output1 = module(input_val) h = pickle.dumps(module) model_pickled = pickle.loads(h) output2 = model_pickled(input_val) assert np.array_equal(torch.all(torch.eq(output1, output2)), True) # yapf: disable @pytest.mark.parametrize('hidden_nonlinear, output_nonlinear', [ (torch.nn.ReLU, 'test'), ('test', torch.relu), (object(), torch.tanh), (torch.tanh, object()) ]) # yapf: enable def test_no_head_invalid_settings(self, hidden_nonlinear, output_nonlinear): """Check MLPModule throws exception with invalid non-linear functions. Args: hidden_nonlinear (callable or torch.nn.Module): Non-linear functions for hidden layers. output_nonlinear (callable or torch.nn.Module): Non-linear functions for output layer. """ expected_msg = 'Non linear function .* is not supported' with pytest.raises(ValueError, match=expected_msg): MLPModule(input_dim=3, output_dim=5, hidden_sizes=(2, 3), hidden_nonlinearity=hidden_nonlinear, output_nonlinearity=output_nonlinear) def test_mlp_with_learnable_non_linear_function(self): """Test MLPModule with learnable non-linear functions.""" input_dim, output_dim, hidden_sizes = 1, 1, (3, 2) input_val = -torch.ones([1, input_dim], dtype=torch.float32) module = MLPModule(input_dim=input_dim, output_dim=output_dim, hidden_nonlinearity=torch.nn.PReLU(init=10.), hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_, output_nonlinearity=torch.nn.PReLU(init=1.)) output = module(input_val) output.sum().backward() for tt in module.parameters(): assert torch.all(torch.ne(tt.grad, 0))
5,051
34.577465
79
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/modules/test_multi_headed_mlp_module.py
"""Test Multi-headed MLPModule.""" import pytest import torch from torch import nn from garage.torch.modules import MultiHeadedMLPModule plain_settings = [ (1, (2, ), (2, ), (0, 1, 2), 3), (5, (3, ), (4, 5), (0, 1, 2, 3, 5, 5), 6), (1, (2, 3), (2, ), (0, 3), 2), (2, (3, 6, 7), (3, ), (6, ), 3), (5, (3, 4, 1, 2), (4, 5), (5, 1, 2, 3), 4), ] invalid_settings = [ (1, (1, 4, 5), (1, ), 2, (None, ), (1, 2), (nn.init.ones_, )), # n_head != output_dims (1, (1, 4), (1, ), 2, (None, ), (1, 2, 3), (nn.init.ones_, )), # n_head != w_init (1, (1, 4), (1, ), 2, (None, None, None), (1, 2), (nn.init.ones_, )), # n_head != nonlinearity (1, (1, 4), (1, ), 2, (None, ), (1, 2), (nn.init.ones_, nn.init.ones_, nn.init.ones_)), # n_head != b_init (1, (1, 4, 5), (1, ), 3, (None, ), (1, 2), (nn.init.ones_, )), # output_dims > w_init (1, (1, ), (1, ), 1, (None, ), (1, 2, 3), (nn.init.ones_, )), # output_dims < w_init (1, (1, 4, 5), (1, ), 3, (None, None), (1, 2, 3), (nn.init.ones_, )), # output_dims > nonlinearity (1, (1, ), (1, ), 1, (None, None, None), (1, 2, 3), (nn.init.ones_, )), # output_dims < nonlinearity (1, (1, 4, 5), (1, ), 3, (None, ), (1, 2, 3), (nn.init.ones_, nn.init.ones_)), # output_dims > b_init (1, (1, ), (1, ), 1, (None, ), (1, 2, 3), (nn.init.ones_, nn.init.ones_, nn.init.ones_)), # output_dims > b_init ] def _helper_make_inits(val): """Return the function that initialize variable with val. Args: val (int): Value to initialize variable. Returns: lambda: Lambda function that initialize variable with val. """ return lambda x: nn.init.constant_(x, val) @pytest.mark.parametrize( 'input_dim, output_dim, hidden_sizes, output_w_init_vals, n_heads', plain_settings) def test_multi_headed_mlp_module(input_dim, output_dim, hidden_sizes, output_w_init_vals, n_heads): """Test Multi-headed MLPModule. Args: input_dim (int): Input dimension. output_dim (int): Ouput dimension. hidden_sizes (list[int]): Size of hidden layers. output_w_init_vals (list[int]): Init values for output weights. n_heads (int): Number of output layers. """ module = MultiHeadedMLPModule(n_heads=n_heads, input_dim=input_dim, output_dims=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, hidden_w_init=nn.init.ones_, output_nonlinearities=None, output_w_inits=list( map(_helper_make_inits, output_w_init_vals))) input_value = torch.ones(input_dim) outputs = module(input_value) if len(output_w_init_vals) == 1: output_w_init_vals = list(output_w_init_vals) * n_heads if len(output_dim) == 1: output_dim = list(output_dim) * n_heads for i, output in enumerate(outputs): expected = input_dim * torch.Tensor(hidden_sizes).prod() expected *= output_w_init_vals[i] assert torch.equal( output, torch.full((output_dim[i], ), expected, dtype=torch.float)) @pytest.mark.parametrize( 'input_dim, output_dim, hidden_sizes, output_w_init_vals, n_heads', plain_settings) def test_multi_headed_mlp_module_with_layernorm(input_dim, output_dim, hidden_sizes, output_w_init_vals, n_heads): """Test Multi-headed MLPModule with layer normalization. Args: input_dim (int): Input dimension. output_dim (int): Ouput dimension. hidden_sizes (list[int]): Size of hidden layers. output_w_init_vals (list[int]): Init values for output weights. n_heads (int): Number of output layers. """ module = MultiHeadedMLPModule(n_heads=n_heads, input_dim=input_dim, output_dims=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, layer_normalization=True, hidden_w_init=nn.init.ones_, output_nonlinearities=None, output_w_inits=list( map(_helper_make_inits, output_w_init_vals))) input_value = torch.ones(input_dim) outputs = module(input_value) if len(output_w_init_vals) == 1: output_w_init_vals = list(output_w_init_vals) * n_heads if len(output_dim) == 1: output_dim = list(output_dim) * n_heads for i, output in enumerate(outputs): expected = input_dim * torch.Tensor(hidden_sizes).prod() expected *= output_w_init_vals[i] assert torch.equal(output, torch.zeros(output_dim[i])) @pytest.mark.parametrize('input_dim, output_dim, hidden_sizes, ' 'n_heads, nonlinearity, w_init, b_init', invalid_settings) def test_invalid_settings(input_dim, output_dim, hidden_sizes, n_heads, nonlinearity, w_init, b_init): """Test Multi-headed MLPModule with invalid parameters. Args: input_dim (int): Input dimension. output_dim (int): Ouput dimension. hidden_sizes (list[int]): Size of hidden layers. n_heads (int): Number of output layers. nonlinearity (callable or torch.nn.Module): Non-linear functions for output layers w_init (list[callable]): Initializer function for the weight in output layer. b_init (list[callable]): Initializer function for the bias in output layer. """ expected_msg_template = ('should be either an integer or a collection of ' 'length n_heads') with pytest.raises(ValueError, match=expected_msg_template): MultiHeadedMLPModule(n_heads=n_heads, input_dim=input_dim, output_dims=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, hidden_w_init=nn.init.ones_, output_nonlinearities=nonlinearity, output_w_inits=list( map(_helper_make_inits, w_init)), output_b_inits=b_init)
6,812
39.796407
79
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/optimizers/test_differentiable_sgd.py
"""Tests for DifferentialSGD optimizer.""" import torch from garage.torch import update_module_params from garage.torch.optimizers import DifferentiableSGD def test_differentiable_sgd(): """Test second order derivative after taking optimization step.""" policy = torch.nn.Linear(10, 10, bias=False) lr = 0.01 diff_sgd = DifferentiableSGD(policy, lr=lr) named_theta = dict(policy.named_parameters()) theta = list(named_theta.values())[0] meta_loss = torch.sum(theta**2) meta_loss.backward(create_graph=True) diff_sgd.step() theta_prime = list(policy.parameters())[0] loss = torch.sum(theta_prime**2) update_module_params(policy, named_theta) diff_sgd.zero_grad() loss.backward() result = theta.grad dtheta_prime = 1 - 2 * lr # dtheta_prime/dtheta dloss = 2 * theta_prime # dloss/dtheta_prime expected_result = dloss * dtheta_prime # dloss/dtheta assert torch.allclose(result, expected_result)
980
27.852941
70
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/optimizers/test_torch_conjugate_gradient_optimizer.py
"""Tests for garage.torch.optimizers.conjugateGradientOptimizer.""" import pickle import numpy as np import pytest import torch from garage.torch.optimizers.conjugate_gradient_optimizer import ( _build_hessian_vector_product, _conjugate_gradient, ConjugateGradientOptimizer) # pylint: disable=not-callable #https://github.com/pytorch/pytorch/issues/24807 # noqa: E501 class TestTorchConjugateGradientOptimizer: """Test class for conjugate gradient optimizer.""" def test_line_search_should_stop(self): """Test if line search stops when loss is decreasing, and constraint is satisfied.""" # noqa: E501 p1 = torch.tensor([0.1]) p2 = torch.tensor([0.1]) params = [p1, p2] optimizer = ConjugateGradientOptimizer(params, 0.01) expected_num_steps = 1 loss_calls = 0 first_time = True def f_loss(): nonlocal loss_calls, first_time if first_time: first_time = False else: loss_calls += 1 return -torch.tensor(loss_calls) kl_calls = 0 def f_constrint(): nonlocal kl_calls kl_calls += 1 return -torch.tensor(kl_calls) descent_step = torch.tensor([0.05, 0.05]) optimizer._backtracking_line_search(params, descent_step, f_loss, f_constrint) assert loss_calls == expected_num_steps assert kl_calls == expected_num_steps def test_line_search_step_size_should_decrease(self): """Line search step size should always decrease.""" p1 = torch.tensor([0.1]) p2 = torch.tensor([0.1]) params = [p1, p2] optimizer = ConjugateGradientOptimizer(params, 0.01) p1_history = [] p2_history = [] loss = 0 first_time = True def f_loss(): nonlocal loss, first_time if first_time: first_time = False else: p1_history.append(p1.clone()) p2_history.append(p2.clone()) loss += 1 return torch.tensor(loss) def f_constrint(): return torch.tensor(0) descent_step = torch.tensor([0.05, 0.05]) optimizer._backtracking_line_search(params, descent_step, f_loss, f_constrint) p1_steps = [] p2_steps = [] for i in range(len(p1_history) - 1): p1_steps.append(p1_history[i + 1] - p1_history[i]) p2_steps.append(p2_history[i + 1] - p2_history[i]) for i in range(len(p1_steps) - 1): assert p1_steps[i] > p1_steps[i + 1] assert p2_steps[i] > p2_steps[i + 1] def test_cg(): """Solve Ax = b using Conjugate gradient method.""" a = np.linspace(-np.pi, np.pi, 25).reshape((5, 5)) a = a.T.dot(a) # make sure a is positive semi-definite def hvp(v): return torch.tensor(a.dot(v)) b = torch.tensor(np.linspace(-np.pi, np.pi, 5)) x = _conjugate_gradient(hvp, b, 5) assert np.allclose(a.dot(x), b) def test_hessian_vector_product(): """Test Hessian-vector product for a function with one variable.""" a = torch.tensor([5.0]) x = torch.tensor([10.0], requires_grad=True) def f(): return a * (x**2) expected_hessian = 2 * a vector = torch.tensor([10.0]) expected_hvp = (expected_hessian * vector).detach() f_Ax = _build_hessian_vector_product(f, [x]) computed_hvp = f_Ax(vector).detach() assert np.allclose(computed_hvp, expected_hvp) @pytest.mark.parametrize('a_val, b_val, x_val, y_val, vector', [ (1.0, 1.0, 1.0, 1.0, [10.0, 20.0]), (5.0, 10.0, -2.0, 5.0, [0.0, -1.0]), (0.0, 0.0, 1.1, 0.02, [0.0, 0.0]), (-2.2, -1.5, -12.3, 34.8, [2.2, 5.3]), (-1.5, 0.0, -0.002, 4.93, [0.1, -0.02]), ]) def test_hessian_vector_product_2x2(a_val, b_val, x_val, y_val, vector): """Test for a function with two variables.""" obs = [torch.tensor([a_val]), torch.tensor([b_val])] vector = torch.tensor([vector]) x = torch.tensor(x_val, requires_grad=True) y = torch.tensor(y_val, requires_grad=True) def f(): a, b = obs[0], obs[1] return a * (x**2) + b * (y**2) expected_hessian = compute_hessian(f(), [x, y]) expected_hvp = torch.mm(vector, expected_hessian).detach() f_Ax = _build_hessian_vector_product(f, [x, y]) hvp = f_Ax(vector[0]).detach() assert np.allclose(hvp, expected_hvp, atol=1e-6) @pytest.mark.parametrize('a_val, b_val, x_val, y_val, vector', [ (1.0, 1.0, 1.0, 1.0, [10.0, 20.0]), (5.0, 10.0, -2.0, 5.0, [0.0, -1.0]), (0.0, 0.0, 1.1, 0.02, [0.0, 0.0]), (-2.2, -1.5, -12.3, 34.8, [2.2, 5.3]), (-1.5, 0.0, -0.002, 4.93, [0.1, -0.02]), ]) def test_hessian_vector_product_2x2_non_diagonal(a_val, b_val, x_val, y_val, vector): """Test for a function with two variables and non-diagonal Hessian.""" obs = [torch.tensor([a_val]), torch.tensor([b_val])] vector = torch.tensor([vector]) x = torch.tensor([x_val], requires_grad=True) y = torch.tensor([y_val], requires_grad=True) def f(): a, b = obs[0], obs[1] kl = a * (x**3) + b * (y**3) + (x**2) * y + (y**2) * x return kl expected_hessian = compute_hessian(f(), [x, y]) expected_hvp = torch.mm(vector, expected_hessian).detach() f_Ax = _build_hessian_vector_product(f, [x, y]) hvp = f_Ax(vector[0]).detach() assert np.allclose(hvp, expected_hvp) def compute_hessian(f, params): """Compute hessian matrix of given function.""" h = [] for i in params: h_i = [] for j in params: grad = torch.autograd.grad(f, j, create_graph=True) h_ij = torch.autograd.grad(grad, i, allow_unused=True, retain_graph=True) h_ij = (torch.tensor(0.), ) if h_ij[0] is None else h_ij h_i.append(h_ij[0]) h_i = torch.stack(h_i) h.append(h_i) h = torch.stack(h) h = h.reshape((len(params), len(params))) return h def test_pickle_round_trip(): """Test that pickling works as one would normally expect.""" # pylint: disable=protected-access p1 = torch.tensor([0.1]) p2 = torch.tensor([0.1]) params = [p1, p2] optimizer = ConjugateGradientOptimizer(params, 0.01) optimizer_pickled = pickle.dumps(optimizer) optimizer2 = pickle.loads(optimizer_pickled) assert optimizer._max_constraint_value == optimizer2._max_constraint_value assert optimizer._cg_iters == optimizer2._cg_iters assert optimizer._max_backtracks == optimizer2._max_backtracks assert optimizer._backtrack_ratio == optimizer2._backtrack_ratio assert optimizer._hvp_reg_coeff == optimizer2._hvp_reg_coeff assert optimizer._accept_violation == optimizer2._accept_violation class BrokenPicklingConjugateGradientOptimizer(ConjugateGradientOptimizer): """Used to check unpickling compat with old implementation.""" @property def state(self): """dict: Get the (empty) state.""" return dict() @state.setter def state(self, state): # Pylint is confused and thinks fset doesn't exist. # pylint: disable=no-member ConjugateGradientOptimizer.state.fset(self, state) def test_unpickle_empty_state(): """Test that pickling works as one would normally expect.""" # pylint: disable=protected-access p1 = torch.tensor([0.1]) p2 = torch.tensor([0.1]) params = [p1, p2] optimizer = BrokenPicklingConjugateGradientOptimizer(params, 0.02) optimizer_pickled = pickle.dumps(optimizer) optimizer2 = pickle.loads(optimizer_pickled) assert optimizer2._max_constraint_value == 0.01 # These asserts only pass because they contain the default values. assert optimizer._cg_iters == optimizer2._cg_iters assert optimizer._max_backtracks == optimizer2._max_backtracks assert optimizer._backtrack_ratio == optimizer2._backtrack_ratio assert optimizer._hvp_reg_coeff == optimizer2._hvp_reg_coeff assert optimizer._accept_violation == optimizer2._accept_violation
8,346
33.491736
107
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/policies/test_context_conditioned_policy.py
"""This is a script to test the ContextConditionedPolicy module.""" import akro import numpy as np import torch import torch.nn as nn from torch.nn import functional as F # NOQA from garage import TimeStep from garage.envs import EnvSpec from garage.envs import GarageEnv from garage.torch.embeddings import MLPEncoder from garage.torch.policies import ContextConditionedPolicy from garage.torch.policies import TanhGaussianMLPPolicy from tests.fixtures.envs.dummy import DummyBoxEnv class TestContextConditionedPolicy: """Test for ContextConditionedPolicy.""" def setup_method(self): """Setup for all test methods.""" self.latent_dim = 5 self.env_spec = GarageEnv(DummyBoxEnv()) latent_space = akro.Box(low=-1, high=1, shape=(self.latent_dim, ), dtype=np.float32) # add latent space to observation space to create a new space augmented_obs_space = akro.Tuple( (self.env_spec.observation_space, latent_space)) augmented_env_spec = EnvSpec(augmented_obs_space, self.env_spec.action_space) self.obs_dim = self.env_spec.observation_space.flat_dim self.action_dim = self.env_spec.action_space.flat_dim reward_dim = 1 self.encoder_input_dim = self.obs_dim + self.action_dim + reward_dim encoder_output_dim = self.latent_dim * 2 encoder_hidden_sizes = (3, 2, encoder_output_dim) context_encoder = MLPEncoder(input_dim=self.encoder_input_dim, output_dim=encoder_output_dim, hidden_nonlinearity=None, hidden_sizes=encoder_hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) context_policy = TanhGaussianMLPPolicy(env_spec=augmented_env_spec, hidden_sizes=(3, 5, 7), hidden_nonlinearity=F.relu, output_nonlinearity=None) self.module = ContextConditionedPolicy(latent_dim=self.latent_dim, context_encoder=context_encoder, policy=context_policy, use_information_bottleneck=True, use_next_obs=False) def test_reset_belief(self): """Test reset_belief.""" expected_shape = [1, self.latent_dim] self.module.reset_belief() assert torch.all( torch.eq(self.module.z_means, torch.zeros(expected_shape))) assert torch.all( torch.eq(self.module.z_vars, torch.ones(expected_shape))) def test_sample_from_belief(self): """Test sample_from_belief.""" self.module.sample_from_belief() expected_shape = [1, self.latent_dim] assert all( [a == b for a, b in zip(self.module.z.shape, expected_shape)]) def test_update_context(self): """Test update_context.""" s = TimeStep(env_spec=self.env_spec, observation=np.ones(self.obs_dim), next_observation=np.ones(self.obs_dim), action=np.ones(self.action_dim), reward=1.0, terminal=False, env_info={}, agent_info={}) updates = 10 for _ in range(updates): self.module.update_context(s) assert torch.all( torch.eq(self.module.context, torch.ones(updates, self.encoder_input_dim))) def test_infer_posterior(self): """Test infer_posterior.""" context = torch.randn(1, 1, self.encoder_input_dim) self.module.infer_posterior(context) expected_shape = [1, self.latent_dim] assert all( [a == b for a, b in zip(self.module.z.shape, expected_shape)]) def test_forward(self): """Test forward.""" t, b = 1, 2 obs = torch.randn((t, b, self.obs_dim), dtype=torch.float32) context = torch.randn(1, 1, self.encoder_input_dim) policy_output, task_z_out = self.module.forward(obs, context) expected_shape = [b, self.action_dim] assert all( [a == b for a, b in zip(policy_output[0].shape, expected_shape)]) expected_shape = [b, self.latent_dim] assert all([a == b for a, b in zip(task_z_out.shape, expected_shape)]) def test_get_action(self): """Test get_action.""" obs = np.random.rand(self.obs_dim) action, _ = self.module.get_action(obs) assert len(action) == self.action_dim def test_compute_kl_div(self): """Test compute_kl_div.""" self.module.sample_from_belief() context = torch.randn(1, 1, self.encoder_input_dim) self.module.infer_posterior(context) kl_div = self.module.compute_kl_div() assert kl_div != 0 def test_networks(self): """Test networks.""" nets = self.module.networks assert nets[0] and nets[1]
5,359
39.606061
79
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/policies/test_deterministic_mlp_policy.py
import pickle import numpy as np import pytest import torch from torch import nn from garage.envs import GarageEnv from garage.torch.policies import DeterministicMLPPolicy # yapf: Disable from tests.fixtures.envs.dummy import DummyBoxEnv, DummyDictEnv # noqa: I202 # yapf: Enable class TestDeterministicMLPPolicies: # yapf: disable @pytest.mark.parametrize('hidden_sizes', [ (1, ), (2, ), (3, ), (1, 1), (2, 2)]) # yapf: enable def test_get_action(self, hidden_sizes): env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = torch.ones([1, obs_dim], dtype=torch.float32) obs_np = np.ones([1, obs_dim], dtype=np.float32) policy = DeterministicMLPPolicy(env_spec=env_spec, hidden_nonlinearity=None, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) expected_output = np.full([1, act_dim], fill_value=obs_dim * np.prod(hidden_sizes), dtype=np.float32) assert np.array_equal(policy.get_action(obs)[0], expected_output) assert np.array_equal(policy.get_action(obs_np)[0], expected_output) # yapf: disable @pytest.mark.parametrize('batch_size, hidden_sizes', [ (1, (1, )), (4, (2, )), (6, (3, )), (20, (1, 1)), (32, (2, 6, 8)), ]) # yapf: enable def test_get_actions(self, batch_size, hidden_sizes): env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = torch.ones([batch_size, obs_dim], dtype=torch.float32) obs_np = np.ones([obs_dim], dtype=np.float32) obs_torch = torch.Tensor(obs_np) policy = DeterministicMLPPolicy(env_spec=env_spec, hidden_nonlinearity=None, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) expected_output = np.full([batch_size, act_dim], fill_value=obs_dim * np.prod(hidden_sizes), dtype=np.float32) assert np.array_equal(policy.get_actions(obs)[0], expected_output) assert np.array_equal( policy.get_actions([obs_torch] * batch_size)[0], expected_output) assert np.array_equal( policy.get_actions([obs_np] * batch_size)[0], expected_output) # yapf: disable @pytest.mark.parametrize('batch_size, hidden_sizes', [ (1, (1, )), (4, (2, )), (10, (3, )), (25, (2, 4)), (34, (2, 6, 11)), ]) # yapf: enable def test_is_pickleable(self, batch_size, hidden_sizes): env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim obs = torch.ones([batch_size, obs_dim], dtype=torch.float32) policy = DeterministicMLPPolicy(env_spec=env_spec, hidden_nonlinearity=None, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) output1 = policy.get_actions(obs)[0] p = pickle.dumps(policy) policy_pickled = pickle.loads(p) output2 = policy_pickled.get_actions(obs)[0] assert np.array_equal(output1, output2) def test_get_action_dict_space(self): """Test if observations from dict obs spaces are properly flattened.""" env = GarageEnv( DummyDictEnv(obs_space_type='box', act_space_type='box')) policy = DeterministicMLPPolicy(env_spec=env.spec, hidden_nonlinearity=None, hidden_sizes=(1, ), hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) obs = env.reset() action, _ = policy.get_action(obs) assert env.action_space.shape == action.shape actions, _ = policy.get_actions(np.array([obs, obs])) for action in actions: assert env.action_space.shape == action.shape
4,626
39.234783
79
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/policies/test_gaussian_mlp_policy.py
"""Test Gaussian MLP Policy.""" import pickle import numpy as np import pytest import torch from torch import nn from garage.envs import GarageEnv from garage.torch.policies import GaussianMLPPolicy # yapf: Disable from tests.fixtures.envs.dummy import DummyBoxEnv, DummyDictEnv # noqa: I202 # yapf: Enable class TestGaussianMLPPolicies: """Class for Testing Gaussian MlP Policy.""" # yapf: disable @pytest.mark.parametrize('hidden_sizes', [ (1, ), (2, ), (3, ), (1, 4), (3, 5)]) # yapf: enable def test_get_action(self, hidden_sizes): """Test get_action function.""" env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = torch.ones(obs_dim, dtype=torch.float32) init_std = 2. policy = GaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) dist = policy(obs)[0] expected_mean = torch.full( (act_dim, ), obs_dim * (torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float) expected_variance = init_std**2 action, prob = policy.get_action(obs) assert np.array_equal(prob['mean'], expected_mean.numpy()) assert dist.variance.equal( torch.full((act_dim, ), expected_variance, dtype=torch.float)) assert action.shape == (act_dim, ) # yapf: disable @pytest.mark.parametrize('hidden_sizes', [ (1, ), (2, ), (3, ), (1, 4), (3, 5)]) # yapf: enable def test_get_action_np(self, hidden_sizes): """Test get_action function with numpy inputs.""" env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = np.ones(obs_dim, dtype=np.float32) init_std = 2. policy = GaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) dist = policy(torch.from_numpy(obs))[0] expected_mean = torch.full( (act_dim, ), obs_dim * (torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float) expected_variance = init_std**2 action, prob = policy.get_action(obs) assert np.array_equal(prob['mean'], expected_mean.numpy()) assert dist.variance.equal( torch.full((act_dim, ), expected_variance, dtype=torch.float)) assert action.shape == (act_dim, ) # yapf: disable @pytest.mark.parametrize('batch_size, hidden_sizes', [ (1, (1, )), (5, (3, )), (8, (4, )), (15, (1, 2)), (30, (3, 4, 10)), ]) # yapf: enable def test_get_actions(self, batch_size, hidden_sizes): """Test get_actions function.""" env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = torch.ones([batch_size, obs_dim], dtype=torch.float32) init_std = 2. policy = GaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) dist = policy(obs)[0] expected_mean = torch.full([batch_size, act_dim], obs_dim * (torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float) expected_variance = init_std**2 action, prob = policy.get_actions(obs) assert np.array_equal(prob['mean'], expected_mean.numpy()) assert dist.variance.equal( torch.full((batch_size, act_dim), expected_variance, dtype=torch.float)) assert action.shape == (batch_size, act_dim) # yapf: disable @pytest.mark.parametrize('batch_size, hidden_sizes', [ (1, (1, )), (5, (3, )), (8, (4, )), (15, (1, 2)), (30, (3, 4, 10)), ]) # yapf: enable def test_get_actions_np(self, batch_size, hidden_sizes): """Test get_actions function with numpy inputs.""" env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = np.ones((batch_size, obs_dim), dtype=np.float32) init_std = 2. policy = GaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) dist = policy(torch.from_numpy(obs))[0] expected_mean = torch.full([batch_size, act_dim], obs_dim * (torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float) expected_variance = init_std**2 action, prob = policy.get_actions(obs) assert np.array_equal(prob['mean'], expected_mean.numpy()) assert dist.variance.equal( torch.full((batch_size, act_dim), expected_variance, dtype=torch.float)) assert action.shape == (batch_size, act_dim) # yapf: disable @pytest.mark.parametrize('batch_size, hidden_sizes', [ (1, (1, )), (6, (3, )), (11, (6, )), (25, (3, 5)), (34, (2, 10, 11)), ]) # yapf: enable def test_is_pickleable(self, batch_size, hidden_sizes): """Test if policy is pickleable.""" env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim obs = torch.ones([batch_size, obs_dim], dtype=torch.float32) init_std = 2. policy = GaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) output1_action, output1_prob = policy.get_actions(obs) p = pickle.dumps(policy) policy_pickled = pickle.loads(p) output2_action, output2_prob = policy_pickled.get_actions(obs) assert np.array_equal(output1_prob['mean'], output2_prob['mean']) assert output1_action.shape == output2_action.shape def test_get_action_dict_space(self): """Test if observations from dict obs spaces are properly flattened.""" env = GarageEnv( DummyDictEnv(obs_space_type='box', act_space_type='box')) policy = GaussianMLPPolicy(env_spec=env.spec, hidden_nonlinearity=None, hidden_sizes=(1, ), hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) obs = env.reset() action, _ = policy.get_action(obs) assert env.action_space.shape == action.shape actions, _ = policy.get_actions(np.array([obs, obs])) for action in actions: assert env.action_space.shape == action.shape actions, _ = policy.get_actions(np.array([obs, obs])) for action in actions: assert env.action_space.shape == action.shape
8,639
37.744395
79
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/policies/test_tanh_gaussian_mlp_policy.py
"""Tests for tanh gaussian mlp policy.""" import pickle import numpy as np import pytest import torch from torch import nn from garage.envs import GarageEnv from garage.torch.policies import TanhGaussianMLPPolicy # yapf: Disable from tests.fixtures.envs.dummy import DummyBoxEnv, DummyDictEnv # noqa: I202 # yapf: Enable class TestTanhGaussianMLPPolicy: """Tests for TanhGaussianMLPPolicy.""" # yapf: disable @pytest.mark.parametrize('hidden_sizes', [ (1, ), (2, ), (3, ), (1, 4), (3, 5)]) # yapf: enable def test_get_action(self, hidden_sizes): """Test Tanh Gaussian Policy get action function.""" env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = torch.ones(obs_dim, dtype=torch.float32).unsqueeze(0) init_std = 2. policy = TanhGaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) expected_mean = torch.full((act_dim, ), 1.0, dtype=torch.float) action, prob = policy.get_action(obs) assert np.allclose(prob['mean'], expected_mean.numpy(), rtol=1e-3) assert action.squeeze(0).shape == (act_dim, ) # yapf: disable @pytest.mark.parametrize('hidden_sizes', [ (1, ), (2, ), (3, ), (1, 4), (3, 5)]) # yapf: enable def test_get_action_np(self, hidden_sizes): """Test Policy get action function with numpy inputs.""" env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = np.ones((obs_dim, ), dtype=np.float32) init_std = 2. policy = TanhGaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) expected_mean = torch.full((act_dim, ), 1.0, dtype=torch.float) action, prob = policy.get_action(obs) assert np.allclose(prob['mean'], expected_mean.numpy(), rtol=1e-3) assert action.shape == (act_dim, ) # yapf: disable @pytest.mark.parametrize('batch_size, hidden_sizes', [ (1, (1, )), (5, (3, )), (8, (4, )), (15, (1, 2)), (30, (3, 4, 10)), ]) # yapf: enable def test_get_actions(self, batch_size, hidden_sizes): """Test Tanh Gaussian Policy get actions function.""" env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = torch.ones([batch_size, obs_dim], dtype=torch.float32) init_std = 2. policy = TanhGaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) expected_mean = torch.full([batch_size, act_dim], 1.0, dtype=torch.float) action, prob = policy.get_actions(obs) assert np.allclose(prob['mean'], expected_mean.numpy(), rtol=1e-3) assert action.shape == (batch_size, act_dim) # yapf: disable @pytest.mark.parametrize('batch_size, hidden_sizes', [ (1, (1, )), (5, (3, )), (8, (4, )), (15, (1, 2)), (30, (3, 4, 10)), ]) # yapf: enable def test_get_actions_np(self, batch_size, hidden_sizes): """Test get actions with np.ndarray inputs.""" env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = np.ones((batch_size, obs_dim), dtype=np.float32) init_std = 2. policy = TanhGaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) expected_mean = torch.full([batch_size, act_dim], 1.0, dtype=torch.float) action, prob = policy.get_actions(obs) assert np.allclose(prob['mean'], expected_mean.numpy(), rtol=1e-3) assert action.shape == (batch_size, act_dim) # yapf: disable @pytest.mark.parametrize('batch_size, hidden_sizes', [ (1, (1, )), (6, (3, )), (11, (6, )), (25, (3, 5)), (34, (2, 10, 11)), ]) # yapf: enable def test_is_pickleable(self, batch_size, hidden_sizes): """Test if policy is unchanged after pickling.""" env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim obs = torch.ones([batch_size, obs_dim], dtype=torch.float32) init_std = 2. policy = TanhGaussianMLPPolicy(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) output1_action, output1_prob = policy.get_actions(obs) p = pickle.dumps(policy) policy_pickled = pickle.loads(p) output2_action, output2_prob = policy_pickled.get_actions(obs) assert np.allclose(output2_prob['mean'], output1_prob['mean'], rtol=1e-3) assert output1_action.shape == output2_action.shape def test_to(self): """Test Tanh Gaussian Policy can be moved to cpu.""" env_spec = GarageEnv(DummyBoxEnv()) init_std = 2. policy = TanhGaussianMLPPolicy(env_spec=env_spec, hidden_sizes=(1, ), init_std=init_std, hidden_nonlinearity=None, std_parameterization='exp', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) if torch.cuda.is_available(): policy.to(torch.device('cuda:0')) assert str(next(policy.parameters()).device) == 'cuda:0' else: policy.to(None) assert str(next(policy.parameters()).device) == 'cpu' def test_get_action_dict_space(self): """Test if observations from dict obs spaces are properly flattened.""" env = GarageEnv( DummyDictEnv(obs_space_type='box', act_space_type='box')) policy = TanhGaussianMLPPolicy(env_spec=env.spec, hidden_nonlinearity=None, hidden_sizes=(1, ), hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) obs = env.reset() action, _ = policy.get_action(obs) assert env.action_space.shape == action.shape actions, _ = policy.get_actions(np.array([obs, obs])) for action in actions: assert env.action_space.shape == action.shape
8,472
40.945545
79
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/garage/torch/q_functions/test_continuous_mlp_q_function.py
import pickle import numpy as np import pytest import torch from torch import nn from garage.envs import GarageEnv from garage.torch.q_functions import ContinuousMLPQFunction from tests.fixtures.envs.dummy import DummyBoxEnv class TestContinuousNNQFunction: # yapf: disable @pytest.mark.parametrize('hidden_sizes', [ (1, ), (2, ), (3, ), (1, 1), (2, 2)]) # yapf: enable def test_forward(self, hidden_sizes): env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = torch.ones(obs_dim, dtype=torch.float32).unsqueeze(0) act = torch.ones(act_dim, dtype=torch.float32).unsqueeze(0) qf = ContinuousMLPQFunction(env_spec=env_spec, hidden_nonlinearity=None, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) output = qf(obs, act) expected_output = torch.full([1, 1], fill_value=(obs_dim + act_dim) * np.prod(hidden_sizes), dtype=torch.float32) assert torch.eq(output, expected_output) # yapf: disable @pytest.mark.parametrize('batch_size, hidden_sizes', [ (1, (1, )), (3, (2, )), (9, (3, )), (15, (1, 1)), (22, (2, 2)), ]) # yapf: enable def test_output_shape(self, batch_size, hidden_sizes): env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = torch.ones(batch_size, obs_dim, dtype=torch.float32) act = torch.ones(batch_size, act_dim, dtype=torch.float32) qf = ContinuousMLPQFunction(env_spec=env_spec, hidden_nonlinearity=None, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) output = qf(obs, act) assert output.shape == (batch_size, 1) # yapf: disable @pytest.mark.parametrize('hidden_sizes', [ (1, ), (2, ), (3, ), (1, 5), (2, 7, 10)]) # yapf: enable def test_is_pickleable(self, hidden_sizes): env_spec = GarageEnv(DummyBoxEnv()) obs_dim = env_spec.observation_space.flat_dim act_dim = env_spec.action_space.flat_dim obs = torch.ones(obs_dim, dtype=torch.float32).unsqueeze(0) act = torch.ones(act_dim, dtype=torch.float32).unsqueeze(0) qf = ContinuousMLPQFunction(env_spec=env_spec, hidden_nonlinearity=None, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_) output1 = qf(obs, act) p = pickle.dumps(qf) qf_pickled = pickle.loads(p) output2 = qf_pickled(obs, act) assert torch.eq(output1, output2)
3,242
36.275862
69
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/integration_tests/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/integration_tests/test_examples.py
"""Integration tests to make sure scripts in examples work.""" import os import pathlib import subprocess import pytest EXAMPLES_ROOT_DIR = pathlib.Path('examples/') NON_ALGO_EXAMPLES = [ EXAMPLES_ROOT_DIR / 'torch/resume_training.py', EXAMPLES_ROOT_DIR / 'tf/resume_training.py', EXAMPLES_ROOT_DIR / 'sim_policy.py', EXAMPLES_ROOT_DIR / 'step_env.py', EXAMPLES_ROOT_DIR / 'step_dm_control_env.py', ] # yapf: disable LONG_RUNNING_EXAMPLES = [ EXAMPLES_ROOT_DIR / 'tf/ppo_memorize_digits.py', EXAMPLES_ROOT_DIR / 'tf/dqn_pong.py', EXAMPLES_ROOT_DIR / 'tf/her_ddpg_fetchreach.py', EXAMPLES_ROOT_DIR / 'tf/trpo_cubecrash.py', EXAMPLES_ROOT_DIR / 'torch/maml_ppo_half_cheetah_dir.py', EXAMPLES_ROOT_DIR / 'torch/maml_trpo_half_cheetah_dir.py', EXAMPLES_ROOT_DIR / 'torch/maml_vpg_half_cheetah_dir.py', EXAMPLES_ROOT_DIR / 'torch/maml_trpo_metaworld_ml1_push.py', EXAMPLES_ROOT_DIR / 'torch/maml_trpo_metaworld_ml10.py', EXAMPLES_ROOT_DIR / 'torch/maml_trpo_metaworld_ml45.py', EXAMPLES_ROOT_DIR / 'torch/pearl_half_cheetah_vel.py', EXAMPLES_ROOT_DIR / 'torch/pearl_metaworld_ml1_push.py', EXAMPLES_ROOT_DIR / 'torch/pearl_metaworld_ml10.py', EXAMPLES_ROOT_DIR / 'torch/pearl_metaworld_ml45.py', EXAMPLES_ROOT_DIR / 'tf/rl2_ppo_metaworld_ml1_push.py', EXAMPLES_ROOT_DIR / 'tf/rl2_ppo_metaworld_ml10.py', EXAMPLES_ROOT_DIR / 'tf/rl2_ppo_metaworld_ml10_meta_test.py', EXAMPLES_ROOT_DIR / 'tf/rl2_ppo_metaworld_ml45.py', EXAMPLES_ROOT_DIR / 'torch/mtsac_metaworld_mt10.py', EXAMPLES_ROOT_DIR / 'torch/mtsac_metaworld_mt50.py', EXAMPLES_ROOT_DIR / 'torch/mtsac_metaworld_ml1_pick_place.py', EXAMPLES_ROOT_DIR / 'torch/mtppo_metaworld_ml1_push.py', EXAMPLES_ROOT_DIR / 'torch/mtppo_metaworld_mt10.py', EXAMPLES_ROOT_DIR / 'torch/mtppo_metaworld_mt50.py', EXAMPLES_ROOT_DIR / 'torch/mttrpo_metaworld_ml1_push.py', EXAMPLES_ROOT_DIR / 'torch/mttrpo_metaworld_mt10.py', EXAMPLES_ROOT_DIR / 'torch/mttrpo_metaworld_mt50.py', EXAMPLES_ROOT_DIR / 'tf/te_ppo_point.py', EXAMPLES_ROOT_DIR / 'tf/te_ppo_metaworld_ml1_push.py', EXAMPLES_ROOT_DIR / 'tf/te_ppo_metaworld_mt10.py', EXAMPLES_ROOT_DIR / 'tf/te_ppo_metaworld_mt50.py', ] # yapf: enable def enumerate_algo_examples(): """Return a list of paths for all algo examples. Returns: List[str]: list of path strings """ exclude = NON_ALGO_EXAMPLES + LONG_RUNNING_EXAMPLES all_examples = EXAMPLES_ROOT_DIR.glob('**/*.py') return [str(e) for e in all_examples if e not in exclude] @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(70) @pytest.mark.parametrize('filepath', enumerate_algo_examples()) def test_algo_examples(filepath): """Test algo examples. Args: filepath (str): path string of example """ env = os.environ.copy() env['GARAGE_EXAMPLE_TEST_N_EPOCHS'] = '1' # Don't use check=True, since that causes subprocess to throw an error # in case of failure before the assertion is evaluated assert subprocess.run([filepath], check=False, env=env).returncode == 0 @pytest.mark.no_cover @pytest.mark.timeout(180) def test_dqn_pong(): """Test tf/dqn_pong.py with reduced replay buffer size. This is to reduced memory consumption. """ env = os.environ.copy() env['GARAGE_EXAMPLE_TEST_N_EPOCHS'] = '1' assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'tf/dqn_pong.py'), '--buffer_size', '5', '--max_path_length', '5' ], check=False, env=env).returncode == 0 @pytest.mark.no_cover @pytest.mark.timeout(30) def test_ppo_memorize_digits(): """Test tf/ppo_memorize_digits.py with reduced batch size. This is to reduced memory consumption. """ env = os.environ.copy() env['GARAGE_EXAMPLE_TEST_N_EPOCHS'] = '1' command = [ str(EXAMPLES_ROOT_DIR / 'tf/ppo_memorize_digits.py'), '--batch_size', '4' ] assert subprocess.run(command, check=False, env=env).returncode == 0 @pytest.mark.no_cover @pytest.mark.timeout(40) def test_trpo_cubecrash(): """Test tf/trpo_cubecrash.py with reduced batch size. This is to reduced memory consumption. """ env = os.environ.copy() env['GARAGE_EXAMPLE_TEST_N_EPOCHS'] = '1' assert subprocess.run( [str(EXAMPLES_ROOT_DIR / 'tf/trpo_cubecrash.py'), '--batch_size', '4'], check=False, env=env).returncode == 0 @pytest.mark.no_cover @pytest.mark.timeout(10) def test_step_env(): """Test step_env.py.""" assert subprocess.run( [str(EXAMPLES_ROOT_DIR / 'step_env.py'), '--n_steps', '1'], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(20) def test_step_dm_control_env(): """Test step_dm_control_env.py.""" assert subprocess.run( [str(EXAMPLES_ROOT_DIR / 'step_dm_control_env.py'), '--n_steps', '1'], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(20) def test_maml_halfcheetah(): """Test maml_trpo_half_cheetah_dir.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/maml_trpo_half_cheetah_dir.py'), '--epochs', '1', '--rollouts_per_task', '1', '--meta_batch_size', '1' ], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(60) def test_pearl_half_cheetah_vel(): """Test pearl_half_cheetah_vel.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/pearl_half_cheetah_vel.py'), '--num_epochs', '1', '--num_train_tasks', '5', '--num_test_tasks', '1', '--encoder_hidden_size', '2', '--net_size', '2', '--num_steps_per_epoch', '5', '--num_initial_steps', '5', '--num_steps_prior', '1', '--num_extra_rl_steps_posterior', '1', '--batch_size', '4', '--embedding_batch_size', '2', '--embedding_mini_batch_size', '2', '--max_path_length', '1' ], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(60) def test_pearl_metaworld_ml1_push(): """Test pearl_ml1_push.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/pearl_metaworld_ml1_push.py'), '--num_epochs', '1', '--num_train_tasks', '5', '--num_test_tasks', '1', '--encoder_hidden_size', '2', '--net_size', '2', '--num_steps_per_epoch', '5', '--num_initial_steps', '5', '--num_steps_prior', '1', '--num_extra_rl_steps_posterior', '1', '--batch_size', '4', '--embedding_batch_size', '2', '--embedding_mini_batch_size', '2', '--max_path_length', '1' ], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover def test_pearl_metaworld_ml10(): """Test pearl_ml10.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/pearl_metaworld_ml10.py'), '--num_epochs', '1', '--num_train_tasks', '1', '--num_test_tasks', '1', '--encoder_hidden_size', '1', '--net_size', '2', '--num_steps_per_epoch', '2', '--num_initial_steps', '2', '--num_steps_prior', '1', '--num_extra_rl_steps_posterior', '1', '--batch_size', '2', '--embedding_batch_size', '1', '--embedding_mini_batch_size', '1', '--max_path_length', '1' ], check=False).returncode == 0 @pytest.mark.skip('Temporarily skipped because of out-of-memory error') @pytest.mark.mujoco @pytest.mark.no_cover def test_pearl_metaworld_ml45(): """Test pearl_ml45.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/pearl_metaworld_ml45.py'), '--num_epochs', '1', '--num_train_tasks', '1', '--num_test_tasks', '1', '--encoder_hidden_size', '1', '--net_size', '2', '--num_steps_per_epoch', '2', '--num_initial_steps', '2', '--num_steps_prior', '1', '--num_extra_rl_steps_posterior', '1', '--batch_size', '2', '--embedding_batch_size', '1', '--embedding_mini_batch_size', '1', '--max_path_length', '1' ], check=False).returncode == 0 @pytest.mark.nightly @pytest.mark.no_cover @pytest.mark.timeout(200) def test_maml_trpo_metaworld_ml10(): """Test maml_trpo_ml10.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/maml_trpo_metaworld_ml10.py'), '--epochs', '1', '--rollouts_per_task', '1', '--meta_batch_size', '1' ], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(30) def test_maml_trpo(): """Test maml_trpo_half_cheetah_dir.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/maml_trpo_half_cheetah_dir.py'), '--epochs', '1', '--rollouts_per_task', '1', '--meta_batch_size', '1' ], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(30) def test_maml_ppo(): """Test maml_ppo_half_cheetah_dir.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/maml_ppo_half_cheetah_dir.py'), '--epochs', '1', '--rollouts_per_task', '1', '--meta_batch_size', '1' ], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(30) def test_maml_vpg(): """Test maml_vpg_half_cheetah_dir.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/maml_vpg_half_cheetah_dir.py'), '--epochs', '1', '--rollouts_per_task', '1', '--meta_batch_size', '1' ], check=False).returncode == 0 @pytest.mark.nightly @pytest.mark.no_cover @pytest.mark.timeout(120) def test_rl2_metaworld_ml1_push(): """Test rl2_ppo_ml1.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'tf/rl2_ppo_metaworld_ml1_push.py'), '--n_epochs', '1', '--episode_per_task', '1', '--meta_batch_size', '10' ], check=False).returncode == 0 @pytest.mark.nightly @pytest.mark.no_cover @pytest.mark.timeout(200) def test_rl2_ppo_metaworld_ml10(): """Test rl2_ppo_ml10.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'tf/rl2_ppo_metaworld_ml10.py'), '--n_epochs', '1', '--episode_per_task', '1', '--meta_batch_size', '10' ], check=False).returncode == 0 @pytest.mark.nightly @pytest.mark.no_cover @pytest.mark.timeout(200) def test_rl2_ppo_metaworld_ml10_meta_test(): """Test rl2_ppo_ml10_meta_test.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'tf/rl2_ppo_metaworld_ml10_meta_test.py'), '--n_epochs', '1', '--episode_per_task', '1', '--meta_batch_size', '10' ], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(60) def test_mtppo_metaworld_ml1_push(): """Test ppo_metaworld_ml1_push.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/mtppo_metaworld_ml1_push.py'), '--epochs', '1', '--batch_size', '1' ], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(60) def test_mtppo_metaworld_mt10(): """Test ppo_metaworld_mt10.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/mtppo_metaworld_mt10.py'), '--epochs', '1', '--batch_size', '1', '--n_worker', '1' ], check=False).returncode == 0 @pytest.mark.skip('Temporarily skipped because it takes too long time') @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(300) def test_mtppo_metaworld_mt50(): """Test ppo_metaworld_mt50.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/mtppo_metaworld_mt50.py'), '--epochs', '1', '--batch_size', '1', '--n_worker', '1' ], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(60) def test_trpo_metaworld_ml1_push(): """Test trpo_metaworld_ml1_push.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/mttrpo_metaworld_ml1_push.py'), '--epochs', '1', '--batch_size', '1' ], check=False).returncode == 0 @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(60) def test_trpo_metaworld_mt10(): """Test trpo_metaworld_mt10.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/mttrpo_metaworld_mt10.py'), '--epochs', '1', '--batch_size', '1', '--n_worker', '1' ], check=False).returncode == 0 @pytest.mark.skip('Temporarily skipped because it takes too long time') @pytest.mark.mujoco @pytest.mark.no_cover @pytest.mark.timeout(300) def test_trpo_metaworld_mt50(): """Test trpo_metaworld_mt50.py.""" assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'torch/mttrpo_metaworld_mt50.py'), '--epochs', '1', '--batch_size', '1', '--n_worker', '1' ], check=False).returncode == 0 @pytest.mark.no_cover @pytest.mark.timeout(60) def test_te_ppo_point(): """Test te_ppo_point.py.""" # yapf: disable assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'tf/te_ppo_point.py'), '--n_epochs', '1', '--batch_size_per_task', '100' ], check=False).returncode == 0 # yapf: enable @pytest.mark.no_cover @pytest.mark.mujoco @pytest.mark.timeout(100) def test_te_ppo_metaworld_ml1_push(): """Test te_ppo_point.py.""" # yapf: disable assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'tf/te_ppo_metaworld_ml1_push.py'), '--n_epochs', '1', '--batch_size_per_task', '100' ], check=False).returncode == 0 # yapf: enable @pytest.mark.no_cover @pytest.mark.mujoco @pytest.mark.timeout(300) def test_te_ppo_metaworld_mt10(): """Test te_ppo_point.py.""" # yapf: disable assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'tf/te_ppo_metaworld_mt10.py'), '--n_epochs', '1', '--batch_size_per_task', '100' ], check=False).returncode == 0 # yapf: enable @pytest.mark.skip('Temporarily skipped because it takes too long time') @pytest.mark.no_cover @pytest.mark.mujoco @pytest.mark.timeout(300) def test_te_ppo_metaworld_mt50(): """Test te_ppo_point.py.""" # yapf: disable assert subprocess.run([ str(EXAMPLES_ROOT_DIR / 'tf/te_ppo_metaworld_mt50.py'), '--n_epochs', '1', '--batch_size_per_task', '100' ], check=False).returncode == 0 # yapf: enable
14,662
32.249433
79
py
CSD-locomotion
CSD-locomotion-master/garaged/tests/integration_tests/test_sigint.py
from enum import IntEnum import itertools from multiprocessing.connection import Listener import os import signal import subprocess import psutil import pytest scripts = [ 'tests/fixtures/algos/nop_pendulum_instrumented.py', 'tests/fixtures/tf/trpo_pendulum_instrumented.py', ] class ExpLifecycle(IntEnum): """Messages sent from InstrumentedBatchPolopt to this test.""" START = 1 OBTAIN_SAMPLES = 2 PROCESS_SAMPLES = 3 OPTIMIZE_POLICY = 4 UPDATE_PLOT = 5 SHUTDOWN = 6 def interrupt_experiment(experiment_script, lifecycle_stage): """Interrupt the experiment and verify no children processes remain.""" args = ['python', experiment_script] # The pre-executed function setpgrp allows to create a process group # so signals are propagated to all the process in the group. proc = subprocess.Popen(args, preexec_fn=os.setpgrp) launcher_proc = psutil.Process(proc.pid) # This socket connects with the client in the algorithm, so we're # notified of the different stages in the experiment lifecycle. address = ('localhost', 6000) listener = Listener(address) conn = listener.accept() while True: msg = conn.recv() if msg == lifecycle_stage: # Notice that we're asking for the children of the launcher, not # the children of this test script, since there could be other # children processes attached to the process running this test # that are not part of the launcher. children = launcher_proc.children(recursive=True) # Remove the semaphore tracker from the list of children, since # we cannot stop its execution. for child in children: if any([ 'multiprocessing.semaphore_tracker' in cmd for cmd in child.cmdline() ]): children.remove(child) # We append the launcher to the list of children so later we can # check it has died. children.append(launcher_proc) pgrp = os.getpgid(proc.pid) os.killpg(pgrp, signal.SIGINT) conn.close() break listener.close() # Once the signal has been sent, all children should die _, alive = psutil.wait_procs(children, timeout=6) # If any, notify the zombie and sleeping processes and fail the test clean_exit = True error_msg = '' for child in alive: error_msg += ( str(child.as_dict(attrs=['pid', 'name', 'status', 'cmdline'])) + '\n') clean_exit = False error_msg = ("These processes didn't die during %s:\n" % (lifecycle_stage) + error_msg) for child in alive: os.kill(child.pid, signal.SIGINT) assert clean_exit, error_msg class TestSigInt: test_sigint_params = list(itertools.product(scripts, ExpLifecycle)) @pytest.mark.flaky @pytest.mark.parametrize('experiment_script, exp_stage', test_sigint_params) def test_sigint(self, experiment_script, exp_stage): """Interrupt the experiment in different stages of its lifecyle.""" interrupt_experiment(experiment_script, exp_stage)
3,273
32.070707
76
py
CSD-locomotion
CSD-locomotion-master/garagei/__init__.py
from garagei._functions import log_performance_ex __all__ = [ 'log_performance_ex', ]
91
14.333333
49
py
CSD-locomotion
CSD-locomotion-master/garagei/_functions.py
import numpy as np import global_context import dowel_wrapper from garage.misc.tensor_utils import discount_cumsum from dowel import Histogram def log_performance_ex(itr, batch, discount, additional_records=None, additional_prefix=''): """Evaluate the performance of an algorithm on a batch of trajectories. Args: itr (int): Iteration number. batch (TrajectoryBatch): The trajectories to evaluate with. discount (float): Discount value, from algorithm's property. Returns: numpy.ndarray: Undiscounted returns. """ if additional_records is None: additional_records = {} returns = [] undiscounted_returns = [] completion = [] success = [] for trajectory in batch.split(): returns.append(discount_cumsum(trajectory.rewards, discount)) undiscounted_returns.append(sum(trajectory.rewards)) completion.append(float(trajectory.terminals.any())) if 'success' in trajectory.env_infos: success.append(float(trajectory.env_infos['success'].any())) average_discounted_return = np.mean([rtn[0] for rtn in returns]) prefix_tabular = global_context.get_metric_prefix() with dowel_wrapper.get_tabular().prefix(prefix_tabular): def _record(key, val, pre=''): dowel_wrapper.get_tabular().record( (pre + '/' if len(pre) > 0 else '') + key, val) def _record_histogram(key, val): dowel_wrapper.get_tabular('plot').record(key, Histogram(val)) _record('Iteration', itr) dowel_wrapper.get_tabular().record('Iteration', itr) _record('NumTrajs', len(returns)) max_undiscounted_returns = np.max(undiscounted_returns) min_undiscounted_returns = np.min(undiscounted_returns) _record('AverageDiscountedReturn', average_discounted_return) _record('AverageReturn', np.mean(undiscounted_returns)) _record('StdReturn', np.std(undiscounted_returns)) _record('MaxReturn', max_undiscounted_returns) _record('MinReturn', min_undiscounted_returns) _record('DiffMaxMinReturn', max_undiscounted_returns - min_undiscounted_returns) _record('CompletionRate', np.mean(completion)) if success: _record('SuccessRate', np.mean(success)) for key, val in additional_records.items(): is_scalar = True try: if len(val) > 1: is_scalar = False except TypeError: pass if is_scalar: _record(key, val, pre=additional_prefix) else: _record_histogram(key, val) return dict( undiscounted_returns=undiscounted_returns, discounted_returns=[rtn[0] for rtn in returns], )
2,830
35.294872
92
py
CSD-locomotion
CSD-locomotion-master/garagei/envs/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/garagei/envs/akro_wrapper.py
import akro from garage.envs import EnvSpec class AkroWrapperTrait: @property def spec(self): return EnvSpec(action_space=akro.from_gym(self.action_space), observation_space=akro.from_gym(self.observation_space))
255
22.272727
79
py
CSD-locomotion
CSD-locomotion-master/garagei/envs/consistent_normalized_env.py
"""An environment wrapper that normalizes action, observation and reward.""" import akro import gym import gym.spaces import gym.spaces.utils import numpy as np from garage.envs import EnvSpec from garagei.envs.akro_wrapper import AkroWrapperTrait class ConsistentNormalizedEnv(AkroWrapperTrait, gym.Wrapper): def __init__( self, env, expected_action_scale=1., flatten_obs=True, normalize_obs=True, mean=None, std=None, ): super().__init__(env) self._normalize_obs = normalize_obs self._expected_action_scale = expected_action_scale self._flatten_obs = flatten_obs self._obs_mean = np.full(env.observation_space.shape, 0 if mean is None else mean) self._obs_var = np.full(env.observation_space.shape, 1 if std is None else std ** 2) self._cur_obs = None if isinstance(self.env.action_space, gym.spaces.Box): self.action_space = akro.Box(low=-self._expected_action_scale, high=self._expected_action_scale, shape=self.env.action_space.shape) else: self.action_space = self.env.action_space self.observation_space = self.env.observation_space def _apply_normalize_obs(self, obs): normalized_obs = (obs - self._obs_mean) / (np.sqrt(self._obs_var) + 1e-8) return normalized_obs def reset(self, **kwargs): obs = self.env.reset(**kwargs) self._cur_obs = obs if self._normalize_obs: obs = self._apply_normalize_obs(obs) if self._flatten_obs: obs = gym.spaces.utils.flatten(self.env.observation_space, obs) return obs def step(self, action, **kwargs): if isinstance(self.env.action_space, gym.spaces.Box): # rescale the action when the bounds are not inf lb, ub = self.env.action_space.low, self.env.action_space.high if np.all(lb != -np.inf) and np.all(ub != -np.inf): scaled_action = lb + (action + self._expected_action_scale) * ( 0.5 * (ub - lb) / self._expected_action_scale) scaled_action = np.clip(scaled_action, lb, ub) else: scaled_action = action else: scaled_action = action next_obs, reward, done, info = self.env.step(scaled_action, **kwargs) info['original_observations'] = self._cur_obs info['original_next_observations'] = next_obs self._cur_obs = next_obs if self._normalize_obs: next_obs = self._apply_normalize_obs(next_obs) if self._flatten_obs: next_obs = gym.spaces.utils.flatten(self.env.observation_space, next_obs) return next_obs, reward, done, info consistent_normalize = ConsistentNormalizedEnv
2,932
33.104651
92
py
CSD-locomotion
CSD-locomotion-master/garagei/envs/normalized_env_ex.py
"""An environment wrapper that normalizes action, observation and reward.""" import akro import gym import gym.spaces import gym.spaces.utils import numpy as np from garage.envs import EnvSpec from garagei.envs.akro_wrapper import AkroWrapperTrait class NormalizedEnvEx(AkroWrapperTrait, gym.Wrapper): """An environment wrapper for normalization. This wrapper normalizes action, and optionally observation and reward. Args: env (garage.envs.GarageEnv): An environment instance. scale_reward (float): Scale of environment reward. normalize_obs (bool): If True, normalize observation. normalize_reward (bool): If True, normalize reward. scale_reward is applied after normalization. expected_action_scale (float): Assuming action falls in the range of [-expected_action_scale, expected_action_scale] when normalize it. flatten_obs (bool): Flatten observation if True. obs_alpha (float): Update rate of moving average when estimating the mean and variance of observations. reward_alpha (float): Update rate of moving average when estimating the mean and variance of rewards. """ def __init__( self, env, scale_reward=1., normalize_obs=False, normalize_reward=False, expected_action_scale=1., flatten_obs=True, obs_alpha=0.001, reward_alpha=0.001, initial_obs_mean=None, initial_obs_var=None, action_drop_type='state', action_drop_prob=None, ): super().__init__(env) self._scale_reward = scale_reward self._normalize_obs = normalize_obs self._normalize_reward = normalize_reward self._expected_action_scale = expected_action_scale self._flatten_obs = flatten_obs self._obs_alpha = obs_alpha flat_obs_dim = gym.spaces.utils.flatdim(env.observation_space) if initial_obs_mean is not None: self._obs_mean = initial_obs_mean else: self._obs_mean = np.zeros(flat_obs_dim) if initial_obs_var is not None: self._obs_var = initial_obs_var else: self._obs_var = np.ones(flat_obs_dim) self._reward_alpha = reward_alpha self._reward_mean = 0. self._reward_var = 1. self._action_drop_type = action_drop_type assert self._action_drop_type in ['state', 'traj'] self._action_drop_prob = action_drop_prob if self._action_drop_prob is not None and self._action_drop_type == 'traj': self._curr_traj_action_drop_mask = self._sample_action_drop_mask() if isinstance(self.env.action_space, gym.spaces.Box): self.action_space = akro.Box(low=-self._expected_action_scale, high=self._expected_action_scale, shape=self.env.action_space.shape) else: self.action_space = self.env.action_space self.observation_space = self.env.observation_space self.do_update = True #@property #def spec(self): # return EnvSpec(action_space=self.action_space, # observation_space=self.observation_space) def _update_obs_estimate(self, obs): flat_obs = gym.spaces.utils.flatten(self.env.observation_space, obs) # self._obs_mean = ( # 1 - self._obs_alpha) * self._obs_mean + self._obs_alpha * flat_obs # self._obs_var = ( # 1 - self._obs_alpha) * self._obs_var + self._obs_alpha * np.square( # flat_obs - self._obs_mean) # https://en.wikipedia.org/wiki/Moving_average#Exponentially_weighted_moving_variance_and_standard_deviation #delta = obs - self._obs_mean delta = flat_obs - self._obs_mean self._obs_mean = self._obs_mean + self._obs_alpha * delta self._obs_var = (1 - self._obs_alpha) * (self._obs_var + self._obs_alpha * delta ** 2) def _update_reward_estimate(self, reward): delta = reward - self._reward_mean self._reward_mean = self._reward_mean + self._reward_alpha * delta self._reward_var = (1 - self._reward_alpha) * (self._reward_var + self._reward_alpha * delta ** 2) def _apply_normalize_obs(self, obs): """Compute normalized observation. Args: obs (np.ndarray): Observation. Returns: np.ndarray: Normalized observation. """ if self.do_update: self._update_obs_estimate(obs) flat_obs = gym.spaces.utils.flatten(self.env.observation_space, obs) normalized_obs = (flat_obs - self._obs_mean) / (np.sqrt(self._obs_var) + 1e-8) if not self._flatten_obs: normalized_obs = gym.spaces.utils.unflatten( self.env.observation_space, normalized_obs) return normalized_obs def _apply_normalize_reward(self, reward): """Compute normalized reward. Args: reward (float): Reward. Returns: float: Normalized reward. """ self._update_reward_estimate(reward) return reward / (np.sqrt(self._reward_var) + 1e-8) def reset(self, **kwargs): """Reset environment. Args: **kwargs: Additional parameters for reset. Returns: tuple: * observation (np.ndarray): The observation of the environment. * reward (float): The reward acquired at this time step. * done (boolean): Whether the environment was completed at this time step. * infos (dict): Environment-dependent additional information. """ ret = self.env.reset(**kwargs) self._cur_obs = ret if self._normalize_obs: ret = self._apply_normalize_obs(ret) return ret def step(self, action, **kwargs): """Feed environment with one step of action and get result. Args: action (np.ndarray): An action fed to the environment. Returns: tuple: * observation (np.ndarray): The observation of the environment. * reward (float): The reward acquired at this time step. * done (boolean): Whether the environment was completed at this time step. * infos (dict): Environment-dependent additional information. """ if isinstance(self.env.action_space, gym.spaces.Box): # rescale the action when the bounds are not inf lb, ub = self.env.action_space.low, self.env.action_space.high if np.all(lb != -np.inf) and np.all(ub != -np.inf): scaled_action = lb + (action + self._expected_action_scale) * ( 0.5 * (ub - lb) / self._expected_action_scale) scaled_action = np.clip(scaled_action, lb, ub) else: scaled_action = action else: scaled_action = action next_obs, reward, done, info = self.env.step(scaled_action, **kwargs) info['original_observations'] = self._cur_obs info['original_next_observations'] = next_obs self._cur_obs = next_obs if self._normalize_obs: next_obs = self._apply_normalize_obs(next_obs) if self._normalize_reward: reward = self._apply_normalize_reward(reward) return next_obs, reward * self._scale_reward, done, info normalize_ex = NormalizedEnvEx
7,692
36.34466
116
py
CSD-locomotion
CSD-locomotion-master/garagei/experiment/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/garagei/experiment/option_local_runner.py
import atexit import copy import os import signal import time import torch from dowel import logger, tabular, TextOutput, CsvOutput, StdOutput import numpy as np import psutil from garage.experiment import LocalRunner from garage.experiment.deterministic import get_seed, set_seed from garage.experiment.local_runner import SetupArgs, NotSetupError from garage.sampler import WorkerFactory from garage.sampler.sampler_deprecated import BaseSampler import global_context import dowel_wrapper from garagei.sampler.option_local_sampler import OptionLocalSampler from garagei.sampler.option_worker import OptionWorker class OptionLocalRunner(LocalRunner): def setup(self, algo, env, sampler_cls=None, sampler_args=None, n_workers=psutil.cpu_count(logical=False), worker_class=None, worker_args=None, ): self._algo = algo self._env = env self._n_workers = {} self._worker_class = worker_class if sampler_args is None: sampler_args = {} if sampler_cls is None: sampler_cls = getattr(algo, 'sampler_cls', None) if worker_class is None: worker_class = getattr(algo, 'worker_cls', OptionWorker) if worker_args is None: worker_args = {} self._worker_args = worker_args if sampler_cls is None: self._sampler = None else: self._sampler = {} for key, policy in self._algo.policy.items(): sampler_key = key cur_worker_args = dict(worker_args, sampler_key=sampler_key) self._sampler[sampler_key] = self.make_sampler( sampler_cls, sampler_args=sampler_args, n_workers=n_workers, worker_class=worker_class, worker_args=cur_worker_args, policy=policy ) sampler_key = f'local_{key}' cur_worker_args = dict(worker_args, sampler_key=sampler_key) self._n_workers[key] = n_workers self._sampler[sampler_key] = self.make_local_sampler( policy=policy, worker_args=cur_worker_args, ) self._n_workers[sampler_key] = 1 self.sampler_keys = list(self._sampler.keys()) self._has_setup = True self._setup_args = SetupArgs(sampler_cls=sampler_cls, sampler_args=sampler_args, seed=get_seed()) self._hanging_env_update = {} self._hanging_worker_update = {} for key in self.sampler_keys: self._hanging_env_update[key] = None self._hanging_worker_update[key] = None def save(self, epoch, new_save=False, pt_save=False, pkl_update=False): """Save snapshot of current batch. Args: epoch (int): Epoch. Raises: NotSetupError: if save() is called before the runner is set up. """ if not self._has_setup: raise NotSetupError('Use setup() to setup runner before saving.') logger.log('Saving snapshot...') params = dict() # Save arguments params['setup_args'] = self._setup_args params['train_args'] = self._train_args params['stats'] = self._stats # Save states params['env'] = self._env params['algo'] = self._algo params['n_workers'] = self._n_workers params['worker_class'] = self._worker_class params['worker_args'] = self._worker_args if new_save and epoch != 0: prev_snapshot_mode = self._snapshotter._snapshot_mode self._snapshotter._snapshot_mode = 'all' self._snapshotter.save_itr_params(epoch, params) self._snapshotter._snapshot_mode = prev_snapshot_mode file_name = os.path.join(self._snapshotter._snapshot_dir, f'option_policy{epoch}.pt') torch.save({ 'discrete': self._algo.discrete, 'dim_option': self._algo.dim_option, 'normalizer_obs_mean': self._algo._cur_obs_mean, 'normalizer_obs_std': self._algo._cur_obs_std, 'policy': self._algo.option_policy, }, file_name) file_name = os.path.join(self._snapshotter._snapshot_dir, f'traj_encoder{epoch}.pt') torch.save({ 'discrete': self._algo.discrete, 'dim_option': self._algo.dim_option, 'normalizer_obs_mean': self._algo._cur_obs_mean, 'normalizer_obs_std': self._algo._cur_obs_std, 'traj_encoder': self._algo.traj_encoder, }, file_name) if pt_save and epoch != 0: file_name = os.path.join(self._snapshotter._snapshot_dir, f'option_policy{epoch}.pt') torch.save({ 'discrete': self._algo.discrete, 'dim_option': self._algo.dim_option, 'normalizer_obs_mean': self._algo._cur_obs_mean, 'normalizer_obs_std': self._algo._cur_obs_std, 'policy': self._algo.option_policy, }, file_name) if pkl_update: self._snapshotter.save_itr_params(epoch, params) logger.log('Saved') def restore(self, from_dir, from_epoch='last', post_restore_handler=None): """Restore experiment from snapshot. Args: from_dir (str): Directory of the pickle file to resume experiment from. from_epoch (str or int): The epoch to restore from. Can be 'first', 'last' or a number. Not applicable when snapshot_mode='last'. Returns: TrainArgs: Arguments for train(). """ saved = self._snapshotter.load(from_dir, from_epoch) self._setup_args = saved['setup_args'] self._train_args = saved['train_args'] self._stats = saved['stats'] set_seed(self._setup_args.seed) if post_restore_handler is not None: post_restore_handler(saved) self.setup(env=saved['env'], algo=saved['algo'], sampler_cls=self._setup_args.sampler_cls, sampler_args=self._setup_args.sampler_args, n_workers=saved['n_workers']['option_policy'], ) n_epochs = self._train_args.n_epochs last_epoch = self._stats.total_epoch last_itr = self._stats.total_itr total_env_steps = self._stats.total_env_steps batch_size = self._train_args.batch_size store_paths = self._train_args.store_paths pause_for_plot = self._train_args.pause_for_plot fmt = '{:<20} {:<15}' logger.log('Restore from snapshot saved in %s' % self._snapshotter.snapshot_dir) logger.log(fmt.format('-- Train Args --', '-- Value --')) logger.log(fmt.format('n_epochs', n_epochs)) logger.log(fmt.format('last_epoch', last_epoch)) logger.log(fmt.format('batch_size', batch_size)) logger.log(fmt.format('store_paths', store_paths)) logger.log(fmt.format('pause_for_plot', pause_for_plot)) logger.log(fmt.format('-- Stats --', '-- Value --')) logger.log(fmt.format('last_itr', last_itr)) logger.log(fmt.format('total_env_steps', total_env_steps)) self._train_args.start_epoch = last_epoch return copy.copy(self._train_args) def _start_worker(self): """Start Plotter and Sampler workers.""" for sampler in self._sampler.values(): if isinstance(sampler, BaseSampler): sampler.start_worker() if self._plot: raise NotImplementedError() self._shutdown_worker_called = False atexit.register(self._shutdown_worker) for sig in [signal.SIGINT, signal.SIGTERM]: signal.signal(sig, self._shutdown_worker_on_signal) def _shutdown_worker(self): """Shutdown Plotter and Sampler workers.""" if self._shutdown_worker_called: return for sampler in self._sampler.values(): if sampler is not None: sampler.shutdown_worker() if self._plot: raise NotImplementedError() self._shutdown_worker_called = True def _shutdown_worker_on_signal(self, signum, frame): self._shutdown_worker() def make_sampler(self, sampler_cls, *, seed=None, n_workers=psutil.cpu_count(logical=False), max_path_length=None, worker_class=OptionWorker, sampler_args=None, worker_args=None, policy=None): if max_path_length is None: if hasattr(self._algo, 'max_path_length'): max_path_length = self._algo.max_path_length else: raise ValueError('If `sampler_cls` is specified in ' 'runner.setup, the algorithm must have ' 'a `max_path_length` field.') if seed is None: seed = get_seed() if sampler_args is None: sampler_args = {} if worker_args is None: worker_args = {} agents = policy if issubclass(sampler_cls, BaseSampler): raise NotImplementedError('BaseSampler does not support obtain_exact_trajectories()') else: return sampler_cls.from_worker_factory(WorkerFactory( seed=seed, max_path_length=max_path_length, n_workers=n_workers, worker_class=worker_class, worker_args=worker_args), agents=agents, envs=self._env, **sampler_args) def make_local_sampler(self, policy, worker_args): max_path_length = self._algo.max_path_length seed = get_seed() agents = copy.deepcopy(policy) return OptionLocalSampler.from_worker_factory(WorkerFactory( seed=seed, max_path_length=max_path_length, n_workers=1, worker_class=OptionWorker, worker_args=worker_args), agents=agents, envs=self._env) def set_hanging_env_update(self, env_update, sampler_keys): for k, v in env_update.items(): setattr(self._env, k, v) for key in sampler_keys: self._hanging_env_update[key] = dict(env_update) def set_hanging_worker_update(self, worker_update, sampler_keys): for key in sampler_keys: self._hanging_worker_update[key] = dict(worker_update) def obtain_exact_trajectories(self, itr, sampler_key, batch_size=None, agent_update=None, env_update=None, worker_update=None, extras=None, max_path_length_override=None, get_attrs=None, update_normalized_env_ex=None, update_stats=True): if batch_size is None and self._train_args.batch_size is None: raise ValueError('Runner was not initialized with `batch_size`. ' 'Either provide `batch_size` to runner.train, ' ' or pass `batch_size` to runner.obtain_samples.') sampler = self._sampler[sampler_key] if isinstance(sampler, BaseSampler): raise NotImplementedError('BaseSampler does not support obtain_exact_trajectories()') else: if agent_update is None: agent_update = self._algo.policy[sampler_key].get_param_values() if self._hanging_env_update[sampler_key] is not None and env_update is not None: if isinstance(self._hanging_env_update[sampler_key], dict) and isinstance(env_update, dict): self._hanging_env_update[sampler_key].update(env_update) env_update = None else: raise NotImplementedError() if self._hanging_worker_update[sampler_key] is not None and worker_update is not None: if isinstance(self._hanging_worker_update[sampler_key], dict) and isinstance(worker_update, dict): self._hanging_worker_update[sampler_key].update(worker_update) worker_update = None else: raise NotImplementedError() if self._hanging_env_update[sampler_key] is not None: env_update = self._hanging_env_update[sampler_key] self._hanging_env_update[sampler_key] = None if self._hanging_worker_update[sampler_key] is not None: worker_update = self._hanging_worker_update[sampler_key] self._hanging_worker_update[sampler_key] = None batch_size = (batch_size or self._train_args.batch_size) n_traj_per_workers = [ batch_size // self._n_workers[sampler_key] + int(i < (batch_size % self._n_workers[sampler_key])) for i in range(self._n_workers[sampler_key]) ] assert batch_size == sum(n_traj_per_workers) if env_update is None: env_update = {} if worker_update is None: worker_update = {} worker_update.update(dict( _max_path_length_override=max_path_length_override, _cur_extras=None, _cur_extra_idx=None, )) if extras is not None: assert batch_size == len(extras) worker_extras_list = np.array_split(extras, self._n_workers[sampler_key]) worker_update = [ dict( worker_update, _cur_extras=worker_extras, _cur_extra_idx=-1, ) for worker_extras in worker_extras_list ] if update_normalized_env_ex is not None: assert isinstance(env_update, dict) env_update.update(dict( do_update=update_normalized_env_ex, )) paths, infos = sampler.obtain_exact_trajectories( n_traj_per_workers, agent_update=agent_update, env_update=env_update, worker_update=worker_update, get_attrs=get_attrs, ) paths = paths.to_trajectory_list() if update_stats: # XXX: Assume that env_infos always contains 2D coordinates. self._stats.total_env_steps += sum([ (len(p['env_infos']['coordinates'].reshape(-1, 2)) if p['env_infos']['coordinates'].dtype != np.object else sum(len(l) for l in p['env_infos']['coordinates'])) for p in paths ]) return paths, infos def step_epochs(self, log_period=1, full_tb_epochs=None, tb_period=None, pt_save_period=None, pkl_update_period=None, new_save_period=None): """Step through each epoch. This function returns a magic generator. When iterated through, this generator automatically performs services such as snapshotting and log management. It is used inside train() in each algorithm. The generator initializes two variables: `self.step_itr` and `self.step_path`. To use the generator, these two have to be updated manually in each epoch, as the example shows below. Yields: int: The next training epoch. Examples: for epoch in runner.step_epochs(): runner.step_path = runner.obtain_samples(...) self.train_once(...) runner.step_itr += 1 """ self._start_worker() self._start_time = time.time() self.step_itr = self._stats.total_itr self.step_path = None # Used by integration tests to ensure examples can run one epoch. n_epochs = int( os.environ.get('GARAGE_EXAMPLE_TEST_N_EPOCHS', self._train_args.n_epochs)) logger.log('Obtaining samples...') for epoch in range(self._train_args.start_epoch, n_epochs): self._itr_start_time = time.time() with logger.prefix('epoch #%d | ' % epoch): save_path = (self.step_path if self._train_args.store_paths else None) self._stats.last_path = save_path self._stats.total_epoch = epoch self._stats.total_itr = self.step_itr new_save = (new_save_period != 0 and self.step_itr % new_save_period == 0) pt_save = (pt_save_period != 0 and self.step_itr % pt_save_period == 0) pkl_update = (pkl_update_period != 0 and self.step_itr % pkl_update_period == 0) if new_save or pt_save or pkl_update: self.save(epoch, new_save=new_save, pt_save=pt_save, pkl_update=pkl_update) yield epoch if self.enable_logging: if self.step_itr % log_period == 0: self.log_diagnostics(self._train_args.pause_for_plot) if full_tb_epochs is None or tb_period is None: logger.dump_all(self.step_itr) else: if self.step_itr <= full_tb_epochs or (tb_period != 0 and self.step_itr % tb_period == 0): logger.dump_all(self.step_itr) else: logger.dump_output_type((TextOutput, CsvOutput, StdOutput), self.step_itr) tabular.clear() def log_diagnostics(self, pause_for_plot=False): logger.log('Time %.2f s' % (time.time() - self._start_time)) epoch_time = (time.time() - self._itr_start_time) logger.log('EpochTime %.2f s' % epoch_time) tabular.record('TotalEnvSteps', self._stats.total_env_steps) tabular.record('TimeEpoch', epoch_time) logger.log(tabular) def eval_log_diagnostics(self): if self.enable_logging: dowel_wrapper.get_tabular('eval').record('TotalEnvSteps', self._stats.total_env_steps) dowel_wrapper.get_logger('eval').log(dowel_wrapper.get_tabular('eval')) dowel_wrapper.get_logger('eval').dump_all(self.step_itr) dowel_wrapper.get_tabular('eval').clear() def plot_log_diagnostics(self): if self.enable_logging: dowel_wrapper.get_tabular('plot').record('TotalEnvSteps', self._stats.total_env_steps) dowel_wrapper.get_logger('plot').log(dowel_wrapper.get_tabular('plot')) dowel_wrapper.get_logger('plot').dump_all(self.step_itr) dowel_wrapper.get_tabular('plot').clear()
19,606
39.678423
144
py
CSD-locomotion
CSD-locomotion-master/garagei/np/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/garagei/np/optimizers/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/garagei/np/optimizers/dict_minibatch_dataset.py
import numpy as np class DictBatchDataset: """Use when the input is the dict type.""" def __init__(self, inputs, batch_size): self._inputs = inputs self._batch_size = batch_size self._size = list(self._inputs.values())[0].shape[0] if batch_size is not None: self._ids = np.arange(self._size) self.update() @property def number_batches(self): if self._batch_size is None: return 1 return int(np.ceil(self._size * 1.0 / self._batch_size)) def iterate(self, update=True): if self._batch_size is None: yield self._inputs else: if update: self.update() for itr in range(self.number_batches): batch_start = itr * self._batch_size batch_end = (itr + 1) * self._batch_size batch_ids = self._ids[batch_start:batch_end] batch = { k: v[batch_ids] for k, v in self._inputs.items() } yield batch def update(self): np.random.shuffle(self._ids)
1,155
29.421053
64
py
CSD-locomotion
CSD-locomotion-master/garagei/sampler/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/garagei/sampler/option_local_sampler.py
"""Sampler that runs workers in the main process.""" import copy from collections import defaultdict from garage import TrajectoryBatch from garage.sampler import LocalSampler class OptionLocalSampler(LocalSampler): def _update_workers(self, agent_update, env_update, worker_update): """Apply updates to the workers. Args: agent_update(object): Value which will be passed into the `agent_update_fn` before doing rollouts. If a list is passed in, it must have length exactly `factory.n_workers`, and will be spread across the workers. env_update(object): Value which will be passed into the `env_update_fn` before doing rollouts. If a list is passed in, it must have length exactly `factory.n_workers`, and will be spread across the workers. """ agent_updates = self._factory.prepare_worker_messages(agent_update) env_updates = self._factory.prepare_worker_messages( env_update, preprocess=copy.deepcopy) worker_updates = self._factory.prepare_worker_messages(worker_update) for worker, agent_up, env_up, worker_up in zip(self._workers, agent_updates, env_updates, worker_updates): worker.update_agent(agent_up) worker.update_env(env_up) worker.update_worker(worker_up) def obtain_exact_trajectories(self, n_traj_per_worker, agent_update, env_update=None, worker_update=None, get_attrs=None): self._update_workers(agent_update, env_update, worker_update) batches = [] for worker, n_traj in zip(self._workers, n_traj_per_worker): for _ in range(n_traj): batch = worker.rollout() batches.append(batch) infos = defaultdict(list) if get_attrs is not None: for i in range(self._factory.n_workers): contents = self._workers[i].get_attrs(get_attrs) for k, v in contents.items(): infos[k].append(v) return TrajectoryBatch.concatenate(*batches), infos
2,352
41.781818
84
py
CSD-locomotion
CSD-locomotion-master/garagei/sampler/option_multiprocessing_sampler.py
"""A multiprocessing sampler which avoids waiting as much as possible.""" import itertools import torch.multiprocessing as mp import multiprocessing.dummy as mpd from collections import defaultdict import click import cloudpickle import matplotlib import setproctitle from garage import TrajectoryBatch from garage.sampler import MultiprocessingSampler DEBUG = False # DEBUG = True if DEBUG: matplotlib.use('Agg') class OptionMultiprocessingSampler(MultiprocessingSampler): def __init__(self, worker_factory, agents, envs, n_thread): # pylint: disable=super-init-not-called self._factory = worker_factory self._agents = self._factory.prepare_worker_messages( agents, cloudpickle.dumps) self._envs = self._factory.prepare_worker_messages(envs) self._n_thread = n_thread if not DEBUG: self._to_sampler = mp.Queue() self._to_worker = [mp.Queue() for _ in range(self._factory.n_workers)] else: self._to_sampler = mpd.Queue() self._to_worker = [mpd.Queue() for _ in range(self._factory.n_workers)] if not DEBUG: # If we crash from an exception, with full queues, we would rather not # hang forever, so we would like the process to close without flushing # the queues. # That's what cancel_join_thread does. for q in self._to_worker: q.cancel_join_thread() if not DEBUG: self._workers = [ mp.Process(target=run_worker, kwargs=dict( factory=self._factory, to_sampler=self._to_sampler, to_worker=self._to_worker[worker_number], worker_number=worker_number, agent=self._agents[worker_number], env=self._envs[worker_number], n_thread=self._n_thread, ), daemon=False ) for worker_number in range(self._factory.n_workers) ] else: self._workers = [ mpd.Process(target=run_worker, kwargs=dict( factory=self._factory, to_sampler=self._to_sampler, to_worker=self._to_worker[worker_number], worker_number=worker_number, agent=self._agents[worker_number], env=self._envs[worker_number], n_thread=self._n_thread, ) ) for worker_number in range(self._factory.n_workers) ] self._agent_version = 0 for w in self._workers: w.start() @classmethod def from_worker_factory(cls, worker_factory, agents, envs, **kwargs): return cls(worker_factory, agents, envs, **kwargs) def obtain_exact_trajectories(self, n_traj_per_workers, agent_update, env_update=None, worker_update=None, get_attrs=None): """Same as the parent method except that n_traj_per_workers can be either an integer or a list.""" if isinstance(n_traj_per_workers, int): n_traj_per_workers = [n_traj_per_workers] * self._factory.n_workers self._agent_version += 1 updated_workers = set() agent_ups = self._factory.prepare_worker_messages( agent_update, cloudpickle.dumps) env_ups = self._factory.prepare_worker_messages(env_update) worker_ups = self._factory.prepare_worker_messages(worker_update) trajectories = defaultdict(list) for worker_number, q in enumerate(self._to_worker): q.put_nowait(('start', (agent_ups[worker_number], env_ups[worker_number], worker_ups[worker_number], self._agent_version))) updated_workers.add(worker_number) if len(trajectories[worker_number]) < n_traj_per_workers[worker_number]: q.put_nowait(('rollout', ())) with click.progressbar(length=sum(n_traj_per_workers), label='Sampling') as pbar: while any( len(trajectories[i]) < n_traj_per_workers[i] for i in range(self._factory.n_workers)): tag, contents = self._to_sampler.get() if tag == 'trajectory': pbar.update(1) batch, version, worker_n = contents if version == self._agent_version: trajectories[worker_n].append(batch) if len(trajectories[worker_n]) < n_traj_per_workers[worker_n]: self._to_worker[worker_n].put_nowait( ('rollout', ())) elif len(trajectories[worker_n]) == n_traj_per_workers[worker_n]: self._to_worker[worker_n].put_nowait( ('stop', ())) else: raise Exception('len(trajectories[worker_n]) > n_traj_per_workers[worker_n]') else: raise Exception('version != self._agent_version') else: raise AssertionError( 'Unknown tag {} with contents {}'.format( tag, contents)) ordered_trajectories = list( itertools.chain( *[trajectories[i] for i in range(self._factory.n_workers)])) infos = defaultdict(list) if get_attrs is not None: for i in range(self._factory.n_workers): self._to_worker[i].put_nowait( ('get_attrs', get_attrs)) tag, contents = self._to_sampler.get() assert tag == 'attr_dict' for k, v in contents.items(): infos[k].append(v) return TrajectoryBatch.concatenate(*ordered_trajectories), infos def run_worker(factory, to_worker, to_sampler, worker_number, agent, env, n_thread): if n_thread is not None: import torch torch.set_num_threads(n_thread) if not DEBUG: to_sampler.cancel_join_thread() setproctitle.setproctitle('worker:' + setproctitle.getproctitle()) inner_worker = factory(worker_number) inner_worker.update_agent(cloudpickle.loads(agent)) inner_worker.update_env(env) version = 0 while True: tag, contents = to_worker.get() if tag == 'start': # Update env and policy. agent_update, env_update, worker_update, version = contents inner_worker.update_agent(cloudpickle.loads(agent_update)) inner_worker.update_env(env_update) inner_worker.update_worker(worker_update) elif tag == 'stop': pass elif tag == 'rollout': batch = inner_worker.rollout() to_sampler.put_nowait( ('trajectory', (batch, version, worker_number))) elif tag == 'get_attrs': keys = contents attr_dict = inner_worker.get_attrs(keys) to_sampler.put_nowait( ('attr_dict', attr_dict) ) elif tag == 'exit': to_worker.close() to_sampler.close() inner_worker.shutdown() return else: raise AssertionError('Unknown tag {} with contents {}'.format( tag, contents))
8,054
38.876238
106
py
CSD-locomotion
CSD-locomotion-master/garagei/sampler/option_worker.py
import functools import numpy as np from garage.experiment import deterministic from garage.sampler import DefaultWorker from iod.utils import get_np_concat_obs class OptionWorker(DefaultWorker): def __init__( self, *, # Require passing by keyword, since everything's an int. seed, max_path_length, worker_number, sampler_key): super().__init__(seed=seed, max_path_length=max_path_length, worker_number=worker_number) self._sampler_key = sampler_key self._max_path_length_override = None self._cur_extras = None self._cur_extra_idx = None self._cur_extra_keys = set() self._render = False self._deterministic_initial_state = None self._deterministic_policy = None def update_env(self, env_update): if env_update is not None: if isinstance(env_update, dict): for k, v in env_update.items(): setattr(self.env, k, v) else: super().update_env(env_update) def worker_init(self): """Initialize a worker.""" if self._seed is not None: deterministic.set_seed(self._seed + self._worker_number * 10000) def update_worker(self, worker_update): if worker_update is not None: if isinstance(worker_update, dict): for k, v in worker_update.items(): setattr(self, k, v) if k == '_cur_extras': if v is None: self._cur_extra_keys = set() else: if len(self._cur_extras) > 0: self._cur_extra_keys = set(self._cur_extras[0].keys()) else: self._cur_extra_keys = None else: raise TypeError('Unknown worker update type.') def get_attrs(self, keys): attr_dict = {} for key in keys: attr_dict[key] = functools.reduce(getattr, [self] + key.split('.')) return attr_dict def start_rollout(self): """Begin a new rollout.""" if 'goal' in self._cur_extra_keys: goal = self._cur_extras[self._cur_extra_idx]['goal'] reset_kwargs = {'goal': goal} else: reset_kwargs = {} env = self.env while hasattr(env, 'env'): env = getattr(env, 'env') if hasattr(env, 'fixed_initial_state') and self._deterministic_initial_state is not None: env.fixed_initial_state = self._deterministic_initial_state self._path_length = 0 self._prev_obs = self.env.reset(**reset_kwargs) self._prev_extra = None self.agent.reset() def step_rollout(self): """Take a single time-step in the current rollout. Returns: bool: True iff the path is done, either due to the environment indicating termination of due to reaching `max_path_length`. """ cur_max_path_length = self._max_path_length if self._max_path_length_override is None else self._max_path_length_override if self._path_length < cur_max_path_length: if 'option' in self._cur_extra_keys: cur_extra_key = 'option' else: cur_extra_key = None if cur_extra_key is None: agent_input = self._prev_obs else: if isinstance(self._cur_extras[self._cur_extra_idx][cur_extra_key], list): cur_extra = self._cur_extras[self._cur_extra_idx][cur_extra_key][self._path_length] if cur_extra is None: cur_extra = self._prev_extra self._cur_extras[self._cur_extra_idx][cur_extra_key][self._path_length] = cur_extra else: cur_extra = self._cur_extras[self._cur_extra_idx][cur_extra_key] agent_input = get_np_concat_obs( self._prev_obs, cur_extra, ) self._prev_extra = cur_extra if self._deterministic_policy is not None: self.agent._force_use_mode_actions = self._deterministic_policy a, agent_info = self.agent.get_action(agent_input) if self._render: next_o, r, d, env_info = self.env.step(a, render=self._render) else: next_o, r, d, env_info = self.env.step(a) self._observations.append(self._prev_obs) self._rewards.append(r) self._actions.append(a) for k, v in agent_info.items(): self._agent_infos[k].append(v) for k in self._cur_extra_keys: if isinstance(self._cur_extras[self._cur_extra_idx][k], list): self._agent_infos[k].append(self._cur_extras[self._cur_extra_idx][k][self._path_length]) else: self._agent_infos[k].append(self._cur_extras[self._cur_extra_idx][k]) for k, v in env_info.items(): self._env_infos[k].append(v) self._path_length += 1 self._terminals.append(d) if not d: self._prev_obs = next_o return False self._terminals[-1] = True self._lengths.append(self._path_length) self._last_observations.append(self._prev_obs) return True def rollout(self): """Sample a single rollout of the agent in the environment. Returns: garage.TrajectoryBatch: The collected trajectory. """ if self._cur_extras is not None: self._cur_extra_idx += 1 self.start_rollout() while not self.step_rollout(): pass return self.collect_rollout()
5,991
35.536585
129
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/utils.py
from math import inf import math import numpy as np import torch from torch.distributions.transforms import AffineTransform from torch.nn.init import _calculate_fan_in_and_fan_out, _no_grad_normal_ from garagei.torch.distributions.transformed_distribution_ex import TransformedDistributionEx from garagei.torch.distributions.transforms_ex import AffineTransformEx def unsqueeze_expand_flat_dim0(x, num): return x.unsqueeze(dim=0).expand(num, *((-1,) * x.ndim)).reshape( num * x.size(0), *x.size()[1:]) def _get_transform_summary(transform): if isinstance(transform, AffineTransform): return f'{type(transform).__name__}({transform.loc}, {transform.scale})' raise NotImplementedError def wrap_dist_with_transforms(base_dist_cls, transforms): def _create(*args, **kwargs): return TransformedDistributionEx(base_dist_cls(*args, **kwargs), transforms) _create.__name__ = (f'{base_dist_cls.__name__}[' + ', '.join(_get_transform_summary(t) for t in transforms) + ']') return _create def unwrap_dist(dist): while hasattr(dist, 'base_dist'): dist = dist.base_dist return dist def get_outermost_dist_attr(dist, attr): while (not hasattr(dist, attr)) and hasattr(dist, 'base_dist'): dist = dist.base_dist return getattr(dist, attr, None) def get_affine_transform_for_beta_dist(target_min, target_max): # https://stackoverflow.com/a/12569453/2182622 if isinstance(target_min, (np.ndarray, np.generic)): assert np.all(target_min <= target_max) else: assert target_min <= target_max #return AffineTransform(loc=torch.Tensor(target_min), # scale=torch.Tensor(target_max - target_min)) return AffineTransformEx(loc=torch.tensor(target_min), scale=torch.tensor(target_max - target_min)) def compute_total_norm(parameters, norm_type=2): # Code adopted from clip_grad_norm_(). if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = list(filter(lambda p: p.grad is not None, parameters)) norm_type = float(norm_type) if len(parameters) == 0: return torch.tensor(0.) device = parameters[0].grad.device if norm_type == inf: total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) else: total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) return total_norm class TrainContext: def __init__(self, modules): self.modules = modules def __enter__(self): for m in self.modules: m.train() return self def __exit__(self, exc_type, exc_val, exc_tb): for m in self.modules: m.eval() def xavier_normal_ex(tensor, gain=1., multiplier=0.1): fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) return _no_grad_normal_(tensor, 0., std * multiplier) def kaiming_uniform_ex_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu', gain=None): fan = torch.nn.init._calculate_correct_fan(tensor, mode) gain = gain or torch.nn.init.calculate_gain(nonlinearity, a) std = gain / math.sqrt(fan) bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation with torch.no_grad(): return tensor.uniform_(-bound, bound)
3,495
37.844444
128
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/distributions/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/distributions/transformed_distribution_ex.py
import torch from torch.distributions import Beta, Normal, TransformedDistribution from torch.distributions.transforms import AffineTransform class TransformedDistributionEx(TransformedDistribution): def entropy(self): """ Returns entropy of distribution, batched over batch_shape. Returns: Tensor of shape batch_shape. """ ent = self.base_dist.entropy() for t in self.transforms: if isinstance(t, AffineTransform): affine_ent = torch.log(torch.abs(t.scale)) if t.event_dim > 0: sum_dims = list(range(-t.event_dim, 0)) affine_ent = affine_ent.sum(dim=sum_dims) ent = ent + affine_ent else: raise NotImplementedError return ent
830
32.24
69
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/distributions/transforms_ex.py
from torch.distributions.transforms import AffineTransform, _InverseTransform class NoWeakrefTrait(object): def _inv_no_weakref(self): """ Returns the inverse :class:`Transform` of this transform. This should satisfy ``t.inv.inv is t``. """ inv = None if self._inv is not None: #inv = self._inv() inv = self._inv if inv is None: inv = _InverseTransform(self) #inv = _InverseTransformNoWeakref(self) #self._inv = weakref.ref(inv) self._inv = inv return inv class AffineTransformEx(AffineTransform, NoWeakrefTrait): @property def inv(self): return NoWeakrefTrait._inv_no_weakref(self) def maybe_clone_to_device(self, device): if device == self.loc.device: return self return AffineTransformEx(loc=self.loc.to(device, copy=True), scale=self.scale.to(device, copy=True))
992
31.032258
77
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/modules/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/modules/categorical_mlp_module_ex.py
import abc import torch from torch import nn from torch.distributions import Categorical, OneHotCategorical from torch.distributions.independent import Independent from garage.torch.distributions import TanhNormal from garage.torch.modules.mlp_module import MLPModule from garage.torch.modules.multi_headed_mlp_module import MultiHeadedMLPModule from garagei.torch.distributions.transformed_distribution_ex import TransformedDistributionEx class CategoricalMLPModuleEx(nn.Module): def __init__(self, input_dim, output_dim, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, hidden_w_init=nn.init.xavier_uniform_, hidden_b_init=nn.init.zeros_, output_nonlinearity=None, output_w_init=nn.init.xavier_uniform_, output_b_init=nn.init.zeros_, layer_normalization=False, categorical_distribution_cls=Categorical, distribution_transformations=None): super().__init__() self._input_dim = input_dim self._output_dim = output_dim self._hidden_sizes = hidden_sizes self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init self._output_nonlinearity = output_nonlinearity self._output_w_init = output_w_init self._output_b_init = output_b_init self._layer_normalization = layer_normalization self._categorical_dist_class = categorical_distribution_cls self._distribution_transformations = distribution_transformations self._logits_module = MLPModule( input_dim=self._input_dim, output_dim=self._output_dim, hidden_sizes=self._hidden_sizes, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, layer_normalization=self._layer_normalization) def _maybe_move_distribution_transformations(self): device = next(self.parameters()).device if self._distribution_transformations is not None: self._distribution_transformations = [ t.maybe_clone_to_device(device) for t in self._distribution_transformations ] # Parent module's .to(), .cpu(), and .cuda() call children's ._apply(). def _apply(self, *args, **kwargs): ret = super()._apply(*args, **kwargs) self._maybe_move_distribution_transformations() return ret @abc.abstractmethod def _get_logits(self, *inputs): return self._logits_module(*inputs) def forward(self, *inputs): logits = self._get_logits(*inputs) dist = self._categorical_dist_class(logits=logits) if self._distribution_transformations is not None: dist = TransformedDistributionEx( dist, self._distribution_transformations) # This control flow is needed because if a TanhNormal distribution is # wrapped by torch.distributions.Independent, then custom functions # such as rsample_with_pretanh_value of the TanhNormal distribution # are not accessable. if not isinstance(dist, (TanhNormal, OneHotCategorical)): # Makes it so that a sample from the distribution is treated as a # single sample and not dist.batch_shape samples. dist = Independent(dist, 1) return dist def forward_mode(self, *inputs): logits = self._get_logits(*inputs) dist = self._categorical_dist_class(logits=logits) if self._distribution_transformations is not None: dist = TransformedDistributionEx( dist, self._distribution_transformations) # This control flow is needed because if a TanhNormal distribution is # wrapped by torch.distributions.Independent, then custom functions # such as rsample_with_pretanh_value of the TanhNormal distribution # are not accessable. if not isinstance(dist, (TanhNormal, OneHotCategorical)): # Makes it so that a sample from the distribution is treated as a # single sample and not dist.batch_shape samples. dist = Independent(dist, 1) return dist.mode def forward_with_transform(self, *inputs, transform): logits = self._get_logits(*inputs) dist = self._categorical_dist_class(logits=logits) if self._distribution_transformations is not None: dist = TransformedDistributionEx( dist, self._distribution_transformations) # This control flow is needed because if a TanhNormal distribution is # wrapped by torch.distributions.Independent, then custom functions # such as rsample_with_pretanh_value of the TanhNormal distribution # are not accessable. if not isinstance(dist, (TanhNormal, OneHotCategorical)): # Makes it so that a sample from the distribution is treated as a # single sample and not dist.batch_shape samples. dist = Independent(dist, 1) logits = transform(logits) dist_transformed = self._categorical_dist_class(logits=logits) if self._distribution_transformations is not None: dist_transformed = TransformedDistributionEx( dist_transformed, self._distribution_transformations) # This control flow is needed because if a TanhNormal distribution is # wrapped by torch.distributions.Independent, then custom functions # such as rsample_with_pretanh_value of the TanhNormal distribution # are not accessable. if not isinstance(dist_transformed, (TanhNormal, OneHotCategorical)): # Makes it so that a sample from the distribution is treated as a # single sample and not dist_transformed.batch_shape samples. dist_transformed = Independent(dist_transformed, 1) return dist, dist_transformed def forward_with_chunks(self, *inputs, merge): logits = [] for chunk_inputs in zip(*inputs): chunk_logits = self._get_logits(*chunk_inputs) logits.append(chunk_logits) logits = merge(logits, batch_dim=0) dist = self._categorical_dist_class(logits=logits) if self._distribution_transformations is not None: dist = TransformedDistributionEx( dist, self._distribution_transformations) # This control flow is needed because if a TanhNormal distribution is # wrapped by torch.distributions.Independent, then custom functions # such as rsample_with_pretanh_value of the TanhNormal distribution # are not accessable. if not isinstance(dist, (TanhNormal, OneHotCategorical)): # Makes it so that a sample from the distribution is treated as a # single sample and not dist.batch_shape samples. dist = Independent(dist, 1) return dist
7,450
40.859551
93
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/modules/gaussian_mlp_module_ex.py
import torch from garage.torch.distributions import TanhNormal from garage.torch.modules import MultiHeadedMLPModule from garage.torch.modules.gaussian_mlp_module import GaussianMLPModule, GaussianMLPIndependentStdModule, \ GaussianMLPTwoHeadedModule, GaussianMLPBaseModule from garage.torch.modules.mlp_module import MLPModule from torch import nn from torch.distributions import Normal, Categorical, MixtureSameFamily from torch.distributions.independent import Independent class ForwardWithTransformTrait(object): def forward_with_transform(self, *inputs, transform): mean, log_std_uncentered = self._get_mean_and_log_std(*inputs) if self._min_std_param or self._max_std_param: log_std_uncentered = log_std_uncentered.clamp( min=(None if self._min_std_param is None else self._min_std_param.item()), max=(None if self._max_std_param is None else self._max_std_param.item())) if self._std_parameterization == 'exp': std = log_std_uncentered.exp() else: std = log_std_uncentered.exp().exp().add(1.).log() dist = self._norm_dist_class(mean, std) # This control flow is needed because if a TanhNormal distribution is # wrapped by torch.distributions.Independent, then custom functions # such as rsample_with_pre_tanh_value of the TanhNormal distribution # are not accessable. if not isinstance(dist, TanhNormal): # Makes it so that a sample from the distribution is treated as a # single sample and not dist.batch_shape samples. dist = Independent(dist, 1) mean = transform(mean) std = transform(std) dist_transformed = self._norm_dist_class(mean, std) # This control flow is needed because if a TanhNormal distribution is # wrapped by torch.distributions.Independent, then custom functions # such as rsample_with_pre_tanh_value of the TanhNormal distribution # are not accessable. if not isinstance(dist_transformed, TanhNormal): # Makes it so that a sample from the distribution is treated as a # single sample and not dist_transformed.batch_shape samples. dist_transformed = Independent(dist_transformed, 1) return dist, dist_transformed class ForwardWithChunksTrait(object): def forward_with_chunks(self, *inputs, merge): mean = [] log_std_uncentered = [] for chunk_inputs in zip(*inputs): chunk_mean, chunk_log_std_uncentered = self._get_mean_and_log_std(*chunk_inputs) mean.append(chunk_mean) log_std_uncentered.append(chunk_log_std_uncentered) mean = merge(mean, batch_dim=0) log_std_uncentered = merge(log_std_uncentered, batch_dim=0) if self._min_std_param or self._max_std_param: log_std_uncentered = log_std_uncentered.clamp( min=(None if self._min_std_param is None else self._min_std_param.item()), max=(None if self._max_std_param is None else self._max_std_param.item())) if self._std_parameterization == 'exp': std = log_std_uncentered.exp() else: std = log_std_uncentered.exp().exp().add(1.).log() dist = self._norm_dist_class(mean, std) # This control flow is needed because if a TanhNormal distribution is # wrapped by torch.distributions.Independent, then custom functions # such as rsample_with_pretanh_value of the TanhNormal distribution # are not accessable. if not isinstance(dist, TanhNormal): # Makes it so that a sample from the distribution is treated as a # single sample and not dist.batch_shape samples. dist = Independent(dist, 1) return dist class ForwardModeTrait(object): def forward_mode(self, *inputs): mean, log_std_uncentered = self._get_mean_and_log_std(*inputs) if self._min_std_param or self._max_std_param: log_std_uncentered = log_std_uncentered.clamp( min=(None if self._min_std_param is None else self._min_std_param.item()), max=(None if self._max_std_param is None else self._max_std_param.item())) if self._std_parameterization == 'exp': std = log_std_uncentered.exp() else: std = log_std_uncentered.exp().exp().add(1.).log() dist = self._norm_dist_class(mean, std) # This control flow is needed because if a TanhNormal distribution is # wrapped by torch.distributions.Independent, then custom functions # such as rsample_with_pre_tanh_value of the TanhNormal distribution # are not accessable. if not isinstance(dist, TanhNormal): # Makes it so that a sample from the distribution is treated as a # single sample and not dist.batch_shape samples. dist = Independent(dist, 1) return dist.mean class GaussianMLPModuleEx(GaussianMLPModule, ForwardWithTransformTrait, ForwardWithChunksTrait, ForwardModeTrait): pass class GaussianMLPIndependentStdModuleEx(GaussianMLPIndependentStdModule, ForwardWithTransformTrait, ForwardWithChunksTrait, ForwardModeTrait): pass class GaussianMLPTwoHeadedModuleEx(GaussianMLPTwoHeadedModule, ForwardWithTransformTrait, ForwardWithChunksTrait, ForwardModeTrait): pass class GaussianMixtureMLPModule(GaussianMLPBaseModule): def __init__(self, input_dim, output_dim, num_components, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, hidden_w_init=nn.init.xavier_uniform_, hidden_b_init=nn.init.zeros_, output_nonlinearity=None, output_w_init=nn.init.xavier_uniform_, output_b_init=nn.init.zeros_, learn_std=True, init_std=1.0, min_std=1e-6, max_std=None, std_parameterization='exp', layer_normalization=False, normal_distribution_cls=Normal, **kwargs): super().__init__(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, learn_std=learn_std, init_std=init_std, min_std=min_std, max_std=max_std, std_parameterization=std_parameterization, layer_normalization=layer_normalization, normal_distribution_cls=normal_distribution_cls) self._mean_module = MultiHeadedMLPModule( n_heads=num_components + 1, input_dim=self._input_dim, output_dims=[self._action_dim] * num_components + [num_components], hidden_sizes=self._hidden_sizes, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearities=self._output_nonlinearity, output_w_inits=self._output_w_init, output_b_inits=self._output_b_init, layer_normalization=self._layer_normalization, **kwargs, ) def forward(self, *inputs): assert len(inputs) == 1 *means, logits = self._mean_module(*inputs) broadcast_shape = list(inputs[0].shape[:-1]) + [self._action_dim] log_std_uncentered = torch.zeros(*broadcast_shape, device=self._init_std.device) + self._init_std if self._min_std_param or self._max_std_param: log_std_uncentered = log_std_uncentered.clamp( min=(None if self._min_std_param is None else self._min_std_param.item()), max=(None if self._max_std_param is None else self._max_std_param.item())) if self._std_parameterization == 'exp': std = log_std_uncentered.exp() else: std = log_std_uncentered.exp().exp().add(1.).log() categorical_dist = Categorical(logits=logits) mean = torch.stack(means, dim=1) std = torch.unsqueeze(std, dim=1) std = std.expand(std.size(0), mean.size(1), std.size(2)) assert self._norm_dist_class == Normal norm_dist = Independent(self._norm_dist_class(mean, std), 1) dist = MixtureSameFamily(categorical_dist, norm_dist) return dist
9,107
43.213592
142
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/modules/multiplier.py
import numpy as np import torch from torch import nn class Multiplier(nn.Module): def __init__(self, multiplicand, requires_grad=False, ): super().__init__() self._multiplicand = nn.Parameter(multiplicand, requires_grad=requires_grad) def forward(self, x): return x * self._multiplicand
375
19.888889
84
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/modules/normalizer.py
import numpy as np import torch from torch import nn class Normalizer(nn.Module): def __init__( self, shape, alpha=0.001, do_normalize=True, ): super().__init__() self.shape = shape self.alpha = alpha self.do_normalize = do_normalize self.register_buffer('running_mean', torch.zeros(shape)) self.register_buffer('running_var', torch.ones(shape)) def update(self, data, override=False): if not self.do_normalize: return # Compute in numpy for performance. data = data.detach().cpu().numpy() if not override: running_mean = self.running_mean.detach().cpu().numpy() running_var = self.running_var.detach().cpu().numpy() for single_data in np.random.permutation(data): # https://en.wikipedia.org/wiki/Moving_average#Exponentially_weighted_moving_variance_and_standard_deviation delta = single_data - running_mean running_mean = running_mean + self.alpha * delta running_var = (1 - self.alpha) * (running_var + self.alpha * delta ** 2) else: running_mean = np.mean(data, axis=0) running_var = np.var(data, axis=0) self.running_mean = torch.from_numpy(running_mean) self.running_var = torch.from_numpy(running_var) @property def mean(self): return self.running_mean.detach().cpu().numpy() @property def var(self): return self.running_var.detach().cpu().numpy() @property def std(self): return self.var ** 0.5 def normalize(self, x): return (x - self.mean) / self.std def denormalize(self, x): return x * self.std + self.mean def do_scale(self, x): return x / self.std
1,859
28.52381
124
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/modules/parallel_module.py
import numpy as np import torch from torch import nn class ParallelModule(nn.Module): def __init__(self, input_dims, parallel_modules, post_parallel_module, **kwargs): super().__init__() self._input_dims = input_dims self._parallel_modules = nn.ModuleList(parallel_modules) print(parallel_modules) self._post_parallel_module = post_parallel_module self._split_dim = -1 assert len(self._input_dims) == len(self._parallel_modules) def _get_input_dim_cumsum(self): return np.cumsum([0] + self._input_dims[:-1]) def _forward_parallel(self, *inputs): split_inputs = list(zip(*[ torch.split(i, self._input_dims, dim=self._split_dim) for i in inputs ])) split_outputs = [ m(*si) for si, m in zip(split_inputs, self._parallel_modules) ] return torch.cat(split_outputs, dim=-1) def forward(self, *inputs): out = self._forward_parallel(*inputs) if self._post_parallel_module is not None: out = self._post_parallel_module(out) return out def forward_mode(self, *inputs): out = self._forward_parallel(*inputs) if self._post_parallel_module is not None: out = self._post_parallel_module.forward_mode(out) return out def forward_with_transform(self, *inputs, transform): out = self._forward_parallel(*inputs) if self._post_parallel_module is not None: out = self._post_parallel_module.forward_with_transform(out, transform=transform) return out def forward_with_chunks(self, *inputs, merge): out = [] for chunk_inputs in zip(*inputs): out.append(self._forward_parallel(*chunk_inputs)) if self._post_parallel_module is not None: out = self._post_parallel_module.forward_with_chunks(out, merge=merge) return out
2,017
32.633333
93
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/modules/parameter_module.py
import torch from torch import nn class ParameterModule(nn.Module): def __init__( self, init_value ): super().__init__() self.param = torch.nn.Parameter(init_value)
216
15.692308
51
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/modules/reshape.py
import numpy as np import torch from torch import nn class ReshapeModule(nn.Module): def __init__(self, shape): super().__init__() self.shape = shape def forward(self, x): assert np.prod(x.shape[1:]) == np.prod(self.shape) return x.reshape(-1, *self.shape) class ViewModule(nn.Module): def __init__(self, shape): super().__init__() self.shape = shape def forward(self, x): assert np.prod(x.shape[1:]) == np.prod(self.shape) return x.view(-1, *self.shape)
542
20.72
58
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/modules/seq_last_selector_module.py
import copy import numpy as np import torch from torch import nn import dowel_wrapper from iod.utils import zip_dict class SeqLastSelectorModule(nn.Module): def __init__(self, *, inner_module, input_dim=None, use_delta=False, omit_obs_idxs=None, restrict_obs_idxs=None, batch_norm=None, noise=None, ): super().__init__() if restrict_obs_idxs is not None: input_dim = len(restrict_obs_idxs) self.inner_module = inner_module self.input_dim = input_dim self.use_delta = use_delta self.omit_obs_idxs = omit_obs_idxs self.restrict_obs_idxs = restrict_obs_idxs self.batch_norm = batch_norm self.noise = noise if self.batch_norm: self.input_batch_norm = torch.nn.BatchNorm1d(self.input_dim, momentum=0.01, affine=False) self.input_batch_norm.eval() def _process_input(self, x): # x: (Batch (list), Time, Dim) assert isinstance(x, list) batch_size = len(x) x = [e[-1:] for e in x] if self.omit_obs_idxs is not None: for i in range(batch_size): x[i] = x[i].clone() x[i][:, self.omit_obs_idxs] = 0 if self.restrict_obs_idxs is not None: for i in range(batch_size): x[i] = x[i][:, self.restrict_obs_idxs] if self.use_delta: for i in range(batch_size): x[i] = x[i][1:] - x[i][:-1] if self.output_type in ['BatchTime_Hidden', 'BatchTimecummean_Hidden']: x[i] = torch.cat([x[i][0:1], x[i]]) if self.batch_norm: x_cat = torch.cat(x, dim=0) x_cat = self.input_batch_norm(x_cat) x = list(x_cat.split([len(x_i) for x_i in x], dim=0)) if self.noise is not None and self.training: for i in range(batch_size): x[i] = x[i] + torch.randn_like(x[i], device=x[0].device) * self.noise x = torch.cat(x, dim=0) return x def forward(self, x, **kwargs): return self.inner_module(self._process_input(x), **kwargs), (None, None) def forward_with_transform(self, x, *, transform, **kwargs): return self.inner_module.forward_with_transform(self._process_input(x), transform=transform, **kwargs), (None, None) def forward_with_chunks(self, x, *, merge, **kwargs): outs = [] for chunk_x, chunk_kwargs in zip(x, zip_dict(kwargs)): chunk_out = self.inner_module(chunk_x, **chunk_kwargs) outs.append(chunk_out) return self.inner_module.forward_with_chunks(outs, merge=merge), (None, None) def forward_force_only_last(self, x, **kwargs): return self.forward(x, **kwargs) def get_last_linear_layers(self): return self.inner_module.get_last_linear_layers()
3,113
32.12766
101
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/modules/spectral_norm.py
""" Spectral Normalization from https://arxiv.org/abs/1802.05957 """ import torch from torch.nn.functional import normalize from typing import Any, Optional, TypeVar from torch.nn import Module class SpectralNorm: # Invariant before and after each forward call: # u = normalize(W @ v) # NB: At initialization, this invariant is not enforced _version: int = 1 # At version 1: # made `W` not a buffer, # added `v` as a buffer, and # made eval mode use `W = u @ W_orig @ v` rather than the stored `W`. name: str dim: int n_power_iterations: int eps: float spectral_coef: float = 1. def __init__(self, name: str = 'weight', n_power_iterations: int = 1, dim: int = 0, eps: float = 1e-12, spectral_coef: float = 1.) -> None: self.name = name self.dim = dim if n_power_iterations <= 0: raise ValueError('Expected n_power_iterations to be positive, but ' 'got n_power_iterations={}'.format(n_power_iterations)) self.n_power_iterations = n_power_iterations self.eps = eps self.spectral_coef = spectral_coef def reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor: weight_mat = weight if self.dim != 0: # permute dim to front weight_mat = weight_mat.permute(self.dim, *[d for d in range(weight_mat.dim()) if d != self.dim]) height = weight_mat.size(0) return weight_mat.reshape(height, -1) def compute_weight(self, module: Module, do_power_iteration: bool) -> torch.Tensor: # NB: If `do_power_iteration` is set, the `u` and `v` vectors are # updated in power iteration **in-place**. This is very important # because in `DataParallel` forward, the vectors (being buffers) are # broadcast from the parallelized module to each module replica, # which is a new module object created on the fly. And each replica # runs its own spectral norm power iteration. So simply assigning # the updated vectors to the module this function runs on will cause # the update to be lost forever. And the next time the parallelized # module is replicated, the same randomly initialized vectors are # broadcast and used! # # Therefore, to make the change propagate back, we rely on two # important behaviors (also enforced via tests): # 1. `DataParallel` doesn't clone storage if the broadcast tensor # is already on correct device; and it makes sure that the # parallelized module is already on `device[0]`. # 2. If the out tensor in `out=` kwarg has correct shape, it will # just fill in the values. # Therefore, since the same power iteration is performed on all # devices, simply updating the tensors in-place will make sure that # the module replica on `device[0]` will update the _u vector on the # parallized module (by shared storage). # # However, after we update `u` and `v` in-place, we need to **clone** # them before using them to normalize the weight. This is to support # backproping through two forward passes, e.g., the common pattern in # GAN training: loss = D(real) - D(fake). Otherwise, engine will # complain that variables needed to do backward for the first forward # (i.e., the `u` and `v` vectors) are changed in the second forward. weight = getattr(module, self.name + '_orig') u = getattr(module, self.name + '_u') v = getattr(module, self.name + '_v') weight_mat = self.reshape_weight_to_matrix(weight) if do_power_iteration: with torch.no_grad(): for _ in range(self.n_power_iterations): # Spectral norm of weight equals to `u^T W v`, where `u` and `v` # are the first left and right singular vectors. # This power iteration produces approximations of `u` and `v`. v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v) u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u) if self.n_power_iterations > 0: # See above on why we need to clone u = u.clone(memory_format=torch.contiguous_format) v = v.clone(memory_format=torch.contiguous_format) sigma = torch.dot(u, torch.mv(weight_mat, v)) weight = weight / sigma * self.spectral_coef return weight def remove(self, module: Module) -> None: with torch.no_grad(): weight = self.compute_weight(module, do_power_iteration=False) delattr(module, self.name) delattr(module, self.name + '_u') delattr(module, self.name + '_v') delattr(module, self.name + '_orig') module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) def __call__(self, module: Module, inputs: Any) -> None: setattr(module, self.name, self.compute_weight(module, do_power_iteration=module.training)) def _solve_v_and_rescale(self, weight_mat, u, target_sigma): # Tries to returns a vector `v` s.t. `u = normalize(W @ v)` # (the invariant at top of this class) and `u @ W @ v = sigma`. # This uses pinverse in case W^T W is not invertible. v = torch.chain_matmul(weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)).squeeze(1) return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v))) @staticmethod def apply(module: Module, name: str, n_power_iterations: int, dim: int, eps: float, spectral_coef: float) -> 'SpectralNorm': for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, SpectralNorm) and hook.name == name: raise RuntimeError("Cannot register two spectral_norm hooks on " "the same parameter {}".format(name)) fn = SpectralNorm(name, n_power_iterations, dim, eps, spectral_coef) weight = module._parameters[name] with torch.no_grad(): weight_mat = fn.reshape_weight_to_matrix(weight) h, w = weight_mat.size() # randomly initialize `u` and `v` u = normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps) v = normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps) delattr(module, fn.name) module.register_parameter(fn.name + "_orig", weight) # We still need to assign weight back as fn.name because all sorts of # things may assume that it exists, e.g., when initializing weights. # However, we can't directly assign as it could be an nn.Parameter and # gets added as a parameter. Instead, we register weight.data as a plain # attribute. setattr(module, fn.name, weight.data) module.register_buffer(fn.name + "_u", u) module.register_buffer(fn.name + "_v", v) module.register_forward_pre_hook(fn) module._register_state_dict_hook(SpectralNormStateDictHook(fn)) module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn)) return fn # This is a top level class because Py2 pickle doesn't like inner class nor an # instancemethod. class SpectralNormLoadStateDictPreHook: # See docstring of SpectralNorm._version on the changes to spectral_norm. def __init__(self, fn) -> None: self.fn = fn # For state_dict with version None, (assuming that it has gone through at # least one training forward), we have # # u = normalize(W_orig @ v) # W = W_orig / sigma, where sigma = u @ W_orig @ v # # To compute `v`, we solve `W_orig @ x = u`, and let # v = x / (u @ W_orig @ x) * (W / W_orig). def __call__(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) -> None: fn = self.fn version = local_metadata.get('spectral_norm', {}).get(fn.name + '.version', None) if version is None or version < 1: weight_key = prefix + fn.name if version is None and all(weight_key + s in state_dict for s in ('_orig', '_u', '_v')) and \ weight_key not in state_dict: # Detect if it is the updated state dict and just missing metadata. # This could happen if the users are crafting a state dict themselves, # so we just pretend that this is the newest. return has_missing_keys = False for suffix in ('_orig', '', '_u'): key = weight_key + suffix if key not in state_dict: has_missing_keys = True if strict: missing_keys.append(key) if has_missing_keys: return with torch.no_grad(): weight_orig = state_dict[weight_key + '_orig'] weight = state_dict.pop(weight_key) sigma = (weight_orig / weight).mean() weight_mat = fn.reshape_weight_to_matrix(weight_orig) u = state_dict[weight_key + '_u'] v = fn._solve_v_and_rescale(weight_mat, u, sigma) state_dict[weight_key + '_v'] = v # This is a top level class because Py2 pickle doesn't like inner class nor an # instancemethod. class SpectralNormStateDictHook: # See docstring of SpectralNorm._version on the changes to spectral_norm. def __init__(self, fn) -> None: self.fn = fn def __call__(self, module, state_dict, prefix, local_metadata) -> None: if 'spectral_norm' not in local_metadata: local_metadata['spectral_norm'] = {} key = self.fn.name + '.version' if key in local_metadata['spectral_norm']: raise RuntimeError("Unexpected key in metadata['spectral_norm']: {}".format(key)) local_metadata['spectral_norm'][key] = self.fn._version T_module = TypeVar('T_module', bound=Module) def spectral_norm(module: T_module, name: str = 'weight', n_power_iterations: int = 1, eps: float = 1e-12, dim: Optional[int] = None, spectral_coef=1.) -> T_module: r"""Applies spectral normalization to a parameter in the given module. .. math:: \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})}, \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2} Spectral normalization stabilizes the training of discriminators (critics) in Generative Adversarial Networks (GANs) by rescaling the weight tensor with spectral norm :math:`\sigma` of the weight matrix calculated using power iteration method. If the dimension of the weight tensor is greater than 2, it is reshaped to 2D in power iteration method to get spectral norm. This is implemented via a hook that calculates spectral norm and rescales weight before every :meth:`~Module.forward` call. See `Spectral Normalization for Generative Adversarial Networks`_ . .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args: module (nn.Module): containing module name (str, optional): name of weight parameter n_power_iterations (int, optional): number of power iterations to calculate spectral norm eps (float, optional): epsilon for numerical stability in calculating norms dim (int, optional): dimension corresponding to number of outputs, the default is ``0``, except for modules that are instances of ConvTranspose{1,2,3}d, when it is ``1`` Returns: The original module with the spectral norm hook Example:: >>> m = spectral_norm(nn.Linear(20, 40)) >>> m Linear(in_features=20, out_features=40, bias=True) >>> m.weight_u.size() torch.Size([40]) """ if dim is None: if isinstance(module, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d)): dim = 1 else: dim = 0 SpectralNorm.apply(module, name, n_power_iterations, dim, eps, spectral_coef) return module def remove_spectral_norm(module: T_module, name: str = 'weight') -> T_module: r"""Removes the spectral normalization reparameterization from a module. Args: module (Module): containing module name (str, optional): name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) """ for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, SpectralNorm) and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] break else: raise ValueError("spectral_norm of '{}' not found in {}".format( name, module)) for k, hook in module._state_dict_hooks.items(): if isinstance(hook, SpectralNormStateDictHook) and hook.fn.name == name: del module._state_dict_hooks[k] break for k, hook in module._load_state_dict_pre_hooks.items(): if isinstance(hook, SpectralNormLoadStateDictPreHook) and hook.fn.name == name: del module._load_state_dict_pre_hooks[k] break return module
13,869
44.625
143
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/optimizers/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/optimizers/optimizer_group_wrapper.py
"""A PyTorch optimizer wrapper that compute loss and optimize module.""" from garagei.np.optimizers.dict_minibatch_dataset import DictBatchDataset class OptimizerGroupWrapper: """A wrapper class to handle torch.optim.optimizer. """ def __init__(self, optimizers, max_optimization_epochs=1, minibatch_size=None): self._optimizers = optimizers self._max_optimization_epochs = max_optimization_epochs self._minibatch_size = minibatch_size def get_minibatch(self, data, max_optimization_epochs=None): batch_dataset = DictBatchDataset(data, self._minibatch_size) if max_optimization_epochs is None: max_optimization_epochs = self._max_optimization_epochs for _ in range(max_optimization_epochs): for dataset in batch_dataset.iterate(): yield dataset def zero_grad(self, keys=None): r"""Clears the gradients of all optimized :class:`torch.Tensor` s.""" # TODO: optimize to param = None style. if keys is None: keys = self._optimizers.keys() for key in keys: self._optimizers[key].zero_grad() def step(self, keys=None, **closure): """Performs a single optimization step. Arguments: **closure (callable, optional): A closure that reevaluates the model and returns the loss. """ if keys is None: keys = self._optimizers.keys() for key in keys: self._optimizers[key].step(**closure) def target_parameters(self, keys=None): if keys is None: keys = self._optimizers.keys() for key in keys: for pg in self._optimizers[key].param_groups: for p in pg['params']: yield p
1,859
31.631579
77
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/policies/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/garagei/torch/policies/policy_ex.py
import akro import numpy as np import torch from garage.torch import global_device from garage.torch.distributions import TanhNormal from garage.torch.policies.stochastic_policy import StochasticPolicy from garagei.torch.modules.multiplier import Multiplier class PolicyEx(StochasticPolicy): def __init__(self, env_spec, name, *, module_cls, module_kwargs, clip_action=False, omit_obs_idxs=None, option_info=None, force_use_mode_actions=False, ): super().__init__(env_spec, name) self.env_spec = env_spec self._obs_dim = env_spec.observation_space.flat_dim self._action_dim = env_spec.action_space.flat_dim self._clip_action = clip_action self._omit_obs_idxs = omit_obs_idxs self._option_info = option_info self._force_use_mode_actions = force_use_mode_actions self._module = module_cls( input_dim=self._obs_dim, output_dim=self._action_dim, **module_kwargs ) def process_observations(self, observations): if self._omit_obs_idxs is not None: observations = observations.clone() observations[:, self._omit_obs_idxs] = 0 return observations def forward(self, observations): observations = self.process_observations(observations) dist = self._module(observations) try: ret_mean = dist.mean ret_log_std = (dist.variance.sqrt()).log() info = dict(mean=ret_mean, log_std=ret_log_std) except NotImplementedError: info = dict() if hasattr(dist, '_normal'): info.update(dict( normal_mean=dist._normal.mean, normal_std=dist._normal.variance.sqrt(), )) return dist, info def forward_mode(self, observations): observations = self.process_observations(observations) samples = self._module.forward_mode(observations) return samples, dict() def forward_with_transform(self, observations, *, transform): observations = self.process_observations(observations) dist, dist_transformed = self._module.forward_with_transform(observations, transform=transform) try: ret_mean = dist.mean ret_log_std = (dist.variance.sqrt()).log() ret_mean_transformed = dist_transformed.mean.cpu() ret_log_std_transformed = (dist_transformed.variance.sqrt()).log().cpu() info = (dict(mean=ret_mean, log_std=ret_log_std), dict(mean=ret_mean_transformed, log_std=ret_log_std_transformed)) except NotImplementedError: info = (dict(), dict()) return (dist, dist_transformed), info def forward_with_chunks(self, observations, *, merge): observations = [self.process_observations(o) for o in observations] dist = self._module.forward_with_chunks(observations, merge=merge) try: ret_mean = dist.mean ret_log_std = (dist.variance.sqrt()).log() info = dict(mean=ret_mean, log_std=ret_log_std) except NotImplementedError: info = dict() return dist, info def get_mode_actions(self, observations): if not isinstance(observations[0], np.ndarray) and not isinstance( observations[0], torch.Tensor): observations = self._env_spec.observation_space.flatten_n( observations) # frequently users like to pass lists of torch tensors or lists of # numpy arrays. This handles those conversions. if isinstance(observations, list): if isinstance(observations[0], np.ndarray): observations = np.stack(observations) elif isinstance(observations[0], torch.Tensor): observations = torch.stack(observations) if isinstance(self._env_spec.observation_space, akro.Image) and \ len(observations.shape) < \ len(self._env_spec.observation_space.shape): observations = self._env_spec.observation_space.unflatten_n( observations) with torch.no_grad(): if not isinstance(observations, torch.Tensor): observations = torch.as_tensor(observations).float().to( global_device()) samples, info = self.forward_mode(observations) return samples.cpu().numpy(), { k: v.detach().cpu().numpy() for (k, v) in info.items() } def get_sample_actions(self, observations): """Based on super().get_actions()""" if not isinstance(observations[0], np.ndarray) and not isinstance( observations[0], torch.Tensor): observations = self._env_spec.observation_space.flatten_n( observations) # frequently users like to pass lists of torch tensors or lists of # numpy arrays. This handles those conversions. if isinstance(observations, list): if isinstance(observations[0], np.ndarray): observations = np.stack(observations) elif isinstance(observations[0], torch.Tensor): observations = torch.stack(observations) if isinstance(self._env_spec.observation_space, akro.Image) and \ len(observations.shape) < \ len(self._env_spec.observation_space.shape): observations = self._env_spec.observation_space.unflatten_n( observations) with torch.no_grad(): if not isinstance(observations, torch.Tensor): observations = torch.as_tensor(observations).float().to( global_device()) dist, info = self.forward(observations) if isinstance(dist, TanhNormal): pre_tanh_values, actions = dist.rsample_with_pre_tanh_value() log_probs = dist.log_prob(actions, pre_tanh_values) actions = actions.detach().cpu().numpy() infos = { k: v.detach().cpu().numpy() for (k, v) in info.items() } infos['pre_tanh_value'] = pre_tanh_values.detach().cpu().numpy() infos['log_prob'] = log_probs.detach().cpu().numpy() else: actions = dist.sample() log_probs = dist.log_prob(actions) actions = actions.detach().cpu().numpy() infos = { k: v.detach().cpu().numpy() for (k, v) in info.items() } infos['log_prob'] = log_probs.detach().cpu().numpy() return actions, infos def get_actions(self, observations): if self._force_use_mode_actions: actions, info = self.get_mode_actions(observations) else: actions, info = self.get_sample_actions(observations) if self._clip_action: epsilon = 1e-6 actions = np.clip( actions, self.env_spec.action_space.low + epsilon, self.env_spec.action_space.high - epsilon, ) return actions, info
7,491
39.717391
103
py
CSD-locomotion
CSD-locomotion-master/iod/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/iod/iod.py
import copy import gc import numpy as np import torch from matplotlib import cm import global_context import dowel_wrapper from dowel import Histogram from garage import TrajectoryBatch from garage.misc import tensor_utils from garage.np.algos.rl_algorithm import RLAlgorithm from garagei import log_performance_ex from garagei.torch.optimizers.optimizer_group_wrapper import OptimizerGroupWrapper from garagei.torch.utils import compute_total_norm, TrainContext from iod.utils import draw_2d_gaussians, get_option_colors, FigManager, MeasureAndAccTime, record_video, \ to_np_object_arr class IOD(RLAlgorithm): def __init__( self, *, env_spec, normalizer, normalizer_type, normalizer_mean, normalizer_std, normalized_env_eval_update, option_policy, traj_encoder, dist_predictor, dual_lam, optimizer, alpha, max_path_length, max_optimization_epochs, n_epochs_per_eval, n_epochs_per_first_n_eval, custom_eval_steps, n_epochs_per_log, n_epochs_per_tb, n_epochs_per_save, n_epochs_per_pt_save, n_epochs_per_pkl_update, dim_option, num_eval_options, num_eval_trajectories_per_option, num_random_trajectories, eval_record_video, video_skip_frames, eval_deterministic_traj, eval_deterministic_video, eval_plot_axis, name='IOD', device=torch.device('cpu'), num_train_per_epoch=1, discount=0.99, record_metric_difference=True, te_max_optimization_epochs=None, te_trans_optimization_epochs=None, trans_minibatch_size=None, trans_optimization_epochs=None, discrete=False, ): self.discount = discount self.max_path_length = max_path_length self.max_optimization_epochs = max_optimization_epochs self.device = device self.normalizer = normalizer self.normalizer_type = normalizer_type self.normalized_env_eval_update = normalized_env_eval_update self.option_policy = option_policy.to(self.device) self.traj_encoder = traj_encoder.to(self.device) self.dist_predictor = dist_predictor.to(self.device) self.dual_lam = dual_lam.to(self.device) self.param_modules = { 'traj_encoder': self.traj_encoder, 'option_policy': self.option_policy, 'dist_predictor': self.dist_predictor, 'dual_lam': self.dual_lam, } self.alpha = alpha self.name = name self.dim_option = dim_option self._num_train_per_epoch = num_train_per_epoch self._env_spec = env_spec self.n_epochs_per_eval = n_epochs_per_eval self.n_epochs_per_first_n_eval = n_epochs_per_first_n_eval self.custom_eval_steps = custom_eval_steps self.n_epochs_per_log = n_epochs_per_log self.n_epochs_per_tb = n_epochs_per_tb self.n_epochs_per_save = n_epochs_per_save self.n_epochs_per_pt_save = n_epochs_per_pt_save self.n_epochs_per_pkl_update = n_epochs_per_pkl_update self.num_eval_options = num_eval_options self.num_eval_trajectories_per_option = num_eval_trajectories_per_option self.num_random_trajectories = num_random_trajectories self.eval_record_video = eval_record_video self.video_skip_frames = video_skip_frames self.eval_deterministic_traj = eval_deterministic_traj self.eval_deterministic_video = eval_deterministic_video self.eval_plot_axis = eval_plot_axis assert isinstance(optimizer, OptimizerGroupWrapper) self._optimizer = optimizer self._record_metric_difference = record_metric_difference self._cur_max_path_length = max_path_length self.te_max_optimization_epochs = te_max_optimization_epochs self.te_trans_optimization_epochs = te_trans_optimization_epochs self._trans_minibatch_size = trans_minibatch_size self._trans_optimization_epochs = trans_optimization_epochs self._cur_obs_mean = None self._cur_obs_std = None if self.normalizer_type == 'manual': self._cur_obs_mean = np.full(self._env_spec.observation_space.flat_dim, normalizer_mean) self._cur_obs_std = np.full(self._env_spec.observation_space.flat_dim, normalizer_std) else: # Set to the default value self._cur_obs_mean = np.full(self._env_spec.observation_space.flat_dim, 0.) self._cur_obs_std = np.full(self._env_spec.observation_space.flat_dim, 1.) self.discrete = discrete self.traj_encoder.eval() @property def policy(self): raise NotImplementedError() def all_parameters(self): for m in self.param_modules.values(): for p in m.parameters(): yield p def train_once(self, itr, paths, runner, extra_scalar_metrics={}): """Train the algorithm once. Args: itr (int): Iteration number. paths (list[dict]): A list of collected paths. Returns: numpy.float64: Calculated mean value of undiscounted returns. """ # Actually itr + 1 is correct (to match with step_epochs' logging period) logging_enabled = ((runner.step_itr + 1) % self.n_epochs_per_log == 0) data = self.process_samples(paths, training=True, logging_enabled=logging_enabled) time_computing_metrics = [0.0] time_training = [0.0] if logging_enabled: metrics_from_processing = data.pop('metrics') with torch.no_grad(), MeasureAndAccTime(time_computing_metrics): tensors_before, _ = self._compute_common_tensors(data, compute_extra_metrics=True, op_compute_chunk_size=self._optimizer._minibatch_size) gc.collect() with MeasureAndAccTime(time_training): self._train_once_inner(data) performence = log_performance_ex( itr, TrajectoryBatch.from_trajectory_list(self._env_spec, paths), discount=self.discount, ) discounted_returns = performence['discounted_returns'] undiscounted_returns = performence['undiscounted_returns'] if logging_enabled: with torch.no_grad(), MeasureAndAccTime(time_computing_metrics): tensors_after, _ = self._compute_common_tensors(data, compute_extra_metrics=True, op_compute_chunk_size=self._optimizer._minibatch_size) gc.collect() prefix_tabular = global_context.get_metric_prefix() with dowel_wrapper.get_tabular().prefix(prefix_tabular + self.name + '/'), dowel_wrapper.get_tabular( 'plot').prefix(prefix_tabular + self.name + '/'): def _record_scalar(key, val): dowel_wrapper.get_tabular().record(key, val) def _record_histogram(key, val): dowel_wrapper.get_tabular('plot').record(key, Histogram(val)) for k in tensors_before.keys(): if tensors_before[k].numel() == 1: _record_scalar(f'{k}Before', tensors_before[k].item()) if self._record_metric_difference: _record_scalar(f'{k}After', tensors_after[k].item()) _record_scalar(f'{k}Decrease', (tensors_before[k] - tensors_after[k]).item()) else: _record_scalar(f'{k}Before', np.array2string(tensors_before[k].detach().cpu().numpy(), suppress_small=True)) # _record_histogram(f'{k}Before', tensors_before[k].detach().cpu().numpy()) with torch.no_grad(): total_norm = compute_total_norm(self.all_parameters()) _record_scalar('TotalGradNormAll', total_norm.item()) for key, module in self.param_modules.items(): total_norm = compute_total_norm(module.parameters()) _record_scalar(f'TotalGradNorm{key.replace("_", " ").title().replace(" ", "")}', total_norm.item()) for k, v in extra_scalar_metrics.items(): _record_scalar(k, v) _record_scalar('TimeComputingMetrics', time_computing_metrics[0]) _record_scalar('TimeTraining', time_training[0]) path_lengths = [ len(path['actions']) for path in paths ] _record_scalar('PathLengthMean', np.mean(path_lengths)) _record_scalar('PathLengthMax', np.max(path_lengths)) _record_scalar('PathLengthMin', np.min(path_lengths)) _record_histogram('ExternalDiscountedReturns', np.asarray(discounted_returns)) _record_histogram('ExternalUndiscountedReturns', np.asarray(undiscounted_returns)) for k, v in metrics_from_processing.items(): _record_scalar(k, v) return np.mean(undiscounted_returns) def train(self, runner): """Obtain samplers and start actual training for each epoch. Args: runner (LocalRunnerTraj): LocalRunnerTraj is passed to give algorithm the access to runner.step_epochs(), which provides services such as snapshotting and sampler control. Returns: float: The average return in last epoch cycle. """ last_return = None with global_context.GlobalContext({'phase': 'train', 'policy': 'sampling'}): for _ in runner.step_epochs( full_tb_epochs=0, log_period=self.n_epochs_per_log, tb_period=self.n_epochs_per_tb, pt_save_period=self.n_epochs_per_pt_save, pkl_update_period=self.n_epochs_per_pkl_update, new_save_period=self.n_epochs_per_save, ): for p in self.policy.values(): p.eval() self.traj_encoder.eval() eval_policy = ( (self.n_epochs_per_eval != 0 and runner.step_itr % self.n_epochs_per_eval == 0) or (self.n_epochs_per_eval != 0 and self.n_epochs_per_first_n_eval is not None and runner.step_itr < self.n_epochs_per_eval and runner.step_itr % self.n_epochs_per_first_n_eval == 0) or (self.custom_eval_steps is not None and runner.step_itr in self.custom_eval_steps) ) if eval_policy: eval_preparation = self._prepare_for_evaluate_policy(runner) self._evaluate_policy(runner, **eval_preparation) self._log_eval_metrics(runner) for p in self.policy.values(): p.train() self.traj_encoder.train() for _ in range(self._num_train_per_epoch): time_sampling = [0.0] with MeasureAndAccTime(time_sampling): runner.step_path = self._get_train_trajectories(runner) last_return = self.train_once( runner.step_itr, runner.step_path, runner, extra_scalar_metrics={ 'TimeSampling': time_sampling[0], }, ) gc.collect() runner.step_itr += 1 return last_return def _get_trajectories(self, runner, sampler_key, batch_size=None, extras=None, update_stats=False, update_normalizer=False, update_normalizer_override=False, worker_update=None, max_path_length_override=None, env_update=None): if batch_size is None: batch_size = len(extras) policy_sampler_key = sampler_key[6:] if sampler_key.startswith('local_') else sampler_key time_get_trajectories = [0.0] with MeasureAndAccTime(time_get_trajectories): trajectories, infos = runner.obtain_exact_trajectories( runner.step_itr, sampler_key=sampler_key, batch_size=batch_size, agent_update=self._get_policy_param_values_cpu(policy_sampler_key), env_update=env_update, worker_update=worker_update, update_normalized_env_ex=update_normalizer if self.normalizer_type == 'garage_ex' else None, get_attrs=['env._obs_mean', 'env._obs_var'], extras=extras, max_path_length_override=max_path_length_override, update_stats=update_stats, ) print(f'_get_trajectories({sampler_key}) {time_get_trajectories[0]}s') for traj in trajectories: for key in ['ori_obs', 'next_ori_obs', 'coordinates', 'next_coordinates']: if key not in traj['env_infos']: continue if self.normalizer_type == 'garage_ex' and update_normalizer: self._set_updated_normalized_env_ex(runner, infos) if self.normalizer_type == 'consistent' and update_normalizer: self._set_updated_normalizer(runner, trajectories, update_normalizer_override) return trajectories def _get_train_trajectories(self, runner, burn_in=False): default_kwargs = dict( runner=runner, update_stats=not burn_in, update_normalizer=True, update_normalizer_override=burn_in, max_path_length_override=self._cur_max_path_length, worker_update=dict( _deterministic_initial_state=False, _deterministic_policy=False, ), env_update=dict(_action_noise_std=None), ) kwargs = dict(default_kwargs, **self._get_train_trajectories_kwargs(runner)) paths = self._get_trajectories(**kwargs) return paths def process_samples(self, paths, training=False, logging_enabled=True): r"""Process sample data based on the collected paths.""" def _to_torch_float32(x): if x.dtype == np.object: return np.array([torch.tensor(i, dtype=torch.float32, device=self.device) for i in x], dtype=np.object) return torch.tensor(x, dtype=torch.float32, device=self.device) valids = np.asarray([len(path['actions'][:self._cur_max_path_length]) for path in paths]) obs = to_np_object_arr( [_to_torch_float32(path['observations'][:self._cur_max_path_length]) for path in paths]) ori_obs = to_np_object_arr( [_to_torch_float32(path['env_infos']['ori_obs'][:self._cur_max_path_length]) for path in paths]) next_obs = to_np_object_arr( [_to_torch_float32(path['next_observations'][:self._cur_max_path_length]) for path in paths]) next_ori_obs = to_np_object_arr( [_to_torch_float32(path['env_infos']['next_ori_obs'][:self._cur_max_path_length]) for path in paths]) actions = to_np_object_arr( [_to_torch_float32(path['actions'][:self._cur_max_path_length]) for path in paths]) rewards = to_np_object_arr( [_to_torch_float32(path['rewards'][:self._cur_max_path_length]) for path in paths]) returns = to_np_object_arr( [_to_torch_float32(tensor_utils.discount_cumsum(path['rewards'][:self._cur_max_path_length], self.discount).copy()) for path in paths]) dones = to_np_object_arr( [_to_torch_float32(path['dones'][:self._cur_max_path_length]) for path in paths]) data = dict( obs=obs, ori_obs=ori_obs, next_obs=next_obs, next_ori_obs=next_ori_obs, actions=actions, rewards=rewards, returns=returns, dones=dones, valids=valids, ) for key in paths[0]['agent_infos'].keys(): data[key] = to_np_object_arr([torch.tensor(path['agent_infos'][key][:self._cur_max_path_length], dtype=torch.float32, device=self.device) for path in paths]) for key in ['option']: if key not in data: continue next_key = f'next_{key}' data[next_key] = copy.deepcopy(data[key]) for i in range(len(data[next_key])): cur_data = data[key][i] data[next_key][i] = torch.cat([cur_data[1:], cur_data[-1:]], dim=0) if logging_enabled: data['metrics'] = dict() return data def _get_policy_param_values_cpu(self, key): param_dict = self.policy[key].get_param_values() for k in param_dict.keys(): param_dict[k] = param_dict[k].detach().cpu() return param_dict def _generate_option_extras(self, options): return [{'option': option} for option in options] def _gradient_descent(self, loss, optimizer_keys): self._optimizer.zero_grad(keys=optimizer_keys) loss.backward() self._optimizer.step(keys=optimizer_keys) def _get_mini_tensors(self, tensors, internal_vars, num_transitions, trans_minibatch_size): idxs = np.random.choice(num_transitions, trans_minibatch_size) mini_tensors = {} mini_internal_vars = {} for k, v in tensors.items(): try: if len(v) == num_transitions: mini_tensors[k] = v[idxs] except TypeError: pass for k, v in internal_vars.items(): try: if len(v) == num_transitions: mini_internal_vars[k] = v[idxs] except TypeError: pass return mini_tensors, mini_internal_vars def _compute_common_tensors(self, data, *, compute_extra_metrics=False, op_compute_chunk_size=None): tensors = {} # contains tensors to be logged, including losses. internal_vars = { # contains internal variables. 'maybe_no_grad': {}, } self._update_inputs(data, tensors, internal_vars) return tensors, internal_vars def _update_inputs(self, data, tensors, v): obs = list(data['obs']) next_obs = list(data['next_obs']) actions = list(data['actions']) valids = list(data['valids']) dones = list(data['dones']) rewards = list(data['rewards']) if 'log_prob' in data: log_probs = list(data['log_prob']) else: log_probs = None num_trajs = len(obs) valids_t = torch.tensor(data['valids'], device=self.device) valids_t_f32 = valids_t.to(torch.float32) max_traj_length = valids_t.max().item() obs_flat = torch.cat(obs, dim=0) next_obs_flat = torch.cat(next_obs, dim=0) actions_flat = torch.cat(actions, dim=0) dones_flat = torch.cat(dones, dim=0).to(torch.int) rewards_flat = torch.cat(rewards, dim=0) if log_probs is not None: log_probs_flat = torch.cat(log_probs, dim=0) else: log_probs_flat = None if 'pre_tanh_value' in data: pre_tanh_values = list(data['pre_tanh_value']) pre_tanh_values_flat = torch.cat(pre_tanh_values, dim=0) dims_action = actions_flat.size()[1:] assert obs_flat.ndim == 2 dim_obs = obs_flat.size(1) num_transitions = actions_flat.size(0) traj_encoder_extra_kwargs = dict() cat_obs_flat = obs_flat next_cat_obs_flat = next_obs_flat v.update({ 'obs': obs, 'obs_flat': obs_flat, 'next_obs_flat': next_obs_flat, 'cat_obs_flat': cat_obs_flat, 'next_cat_obs_flat': next_cat_obs_flat, 'actions_flat': actions_flat, 'valids': valids, 'valids_t': valids_t, 'valids_t_f32': valids_t_f32, 'dones_flat': dones_flat, 'rewards_flat': rewards_flat, 'log_probs_flat': log_probs_flat, 'dim_obs': dim_obs, 'dims_action': dims_action, 'num_trajs': num_trajs, 'num_transitions': num_transitions, 'max_traj_length': max_traj_length, 'traj_encoder_extra_kwargs': traj_encoder_extra_kwargs, }) if 'pre_tanh_value' in data: v.update({ 'pre_tanh_values_flat': pre_tanh_values_flat, }) def _set_updated_normalizer(self, runner, paths, override=False): original_obs = [torch.tensor( path['env_infos']['original_observations'], dtype=torch.float32 ) for path in paths] original_obs_flat = torch.cat(original_obs, dim=0) self.normalizer.update(original_obs_flat, override) runner.set_hanging_env_update( dict( _obs_mean=self.normalizer.mean, _obs_var=self.normalizer.var, ), sampler_keys=['option_policy', 'local_option_policy'], ) def _set_updated_normalized_env_ex(self, runner, infos): mean = np.mean(infos['env._obs_mean'], axis=0) var = np.mean(infos['env._obs_var'], axis=0) self._cur_obs_mean = mean self._cur_obs_std = var ** 0.5 runner.set_hanging_env_update( dict( _obs_mean=mean, _obs_var=var, ), sampler_keys=['option_policy', 'local_option_policy'], ) def _get_coordinates_trajectories(self, trajectories, include_last): coordinates_trajectories = [] for trajectory in trajectories: if trajectory['env_infos']['coordinates'].dtype == np.object: coords = np.concatenate(trajectory['env_infos']['coordinates'], axis=0) if include_last: coords = np.concatenate([ coords, [trajectory['env_infos']['next_coordinates'][-1][-1]], ]) elif trajectory['env_infos']['coordinates'].ndim == 2: coords = trajectory['env_infos']['coordinates'] if include_last: coords = np.concatenate([ coords, [trajectory['env_infos']['next_coordinates'][-1]] ]) elif trajectory['env_infos']['coordinates'].ndim > 2: coords = trajectory['env_infos']['coordinates'].reshape(-1, 2) if include_last: coords = np.concatenate([ coords, trajectory['env_infos']['next_coordinates'].reshape(-1, 2)[-1:] ]) coordinates_trajectories.append(np.asarray(coords)) return coordinates_trajectories def _get_sp_options_at_timesteps(self, data, use_zero_options=False): sp_obs = data['obs'] if use_zero_options: zero_options = np.zeros((len(sp_obs), self.dim_option)) return zero_options, np.ones((len(sp_obs), self.dim_option)), zero_options last_obs = torch.stack([sp_ob[-1] for sp_ob in sp_obs]) sp_option_dists = self.traj_encoder(last_obs) sp_option_means = sp_option_dists.mean.detach().cpu().numpy() if self.inner: sp_option_stddevs = torch.ones_like(sp_option_dists.stddev.detach().cpu()).numpy() else: sp_option_stddevs = sp_option_dists.stddev.detach().cpu().numpy() sp_option_samples = sp_option_dists.mean.detach().cpu().numpy() # Plot from means return sp_option_means, sp_option_stddevs, sp_option_samples def _log_eval_metrics(self, runner): runner.eval_log_diagnostics() runner.plot_log_diagnostics()
24,795
39.51634
169
py
CSD-locomotion
CSD-locomotion-master/iod/lsd.py
import numpy as np import torch import global_context from garage import TrajectoryBatch from garagei import log_performance_ex from iod import sac_utils from iod.iod import IOD import copy from iod.utils import get_torch_concat_obs, to_np_object_arr, FigManager, get_option_colors, record_video, \ draw_2d_gaussians class LSD(IOD): def __init__( self, *, qf1, qf2, log_alpha, tau, scale_reward, target_coef, update_target_per_gradient, replay_buffer, min_buffer_size, inner, dual_reg, dual_slack, dual_dist, **kwargs, ): super().__init__(**kwargs) self.qf1 = qf1.to(self.device) self.qf2 = qf2.to(self.device) self.target_qf1 = copy.deepcopy(self.qf1) self.target_qf2 = copy.deepcopy(self.qf2) self.log_alpha = log_alpha.to(self.device) self.tau = tau self.update_target_per_gradient = update_target_per_gradient self.replay_buffer = replay_buffer self.min_buffer_size = min_buffer_size self.inner = inner self.dual_reg = dual_reg self.dual_slack = dual_slack self.dual_dist = dual_dist self.param_modules.update( qf1=self.qf1, qf2=self.qf2, log_alpha=self.log_alpha ) self._reward_scale_factor = scale_reward self._target_entropy = -np.prod(self._env_spec.action_space.shape).item() / 2. * target_coef @property def policy(self): return { 'option_policy': self.option_policy, } def _get_concat_obs(self, obs, option): return get_torch_concat_obs(obs, option) def _get_train_trajectories_kwargs(self, runner): if self.discrete: extras = self._generate_option_extras(np.eye(self.dim_option)[np.random.randint(0, self.dim_option, runner._train_args.batch_size)]) else: random_options = np.random.randn(runner._train_args.batch_size, self.dim_option) extras = self._generate_option_extras(random_options) return dict( extras=extras, sampler_key='option_policy', ) def _update_inputs(self, data, tensors, v): super()._update_inputs(data, tensors, v) options = list(data['option']) traj_options = torch.stack([x[0] for x in options], dim=0) assert traj_options.size() == (v['num_trajs'], self.dim_option) options_flat = torch.cat(options, dim=0) cat_obs_flat = self._get_concat_obs(v['obs_flat'], options_flat) next_options = list(data['next_option']) next_options_flat = torch.cat(next_options, dim=0) next_cat_obs_flat = self._get_concat_obs(v['next_obs_flat'], next_options_flat) v.update({ 'traj_options': traj_options, 'options_flat': options_flat, 'cat_obs_flat': cat_obs_flat, 'next_cat_obs_flat': next_cat_obs_flat, }) def _update_replay_buffer(self, data): if self.replay_buffer is not None: # Add paths to the replay buffer for i in range(len(data['actions'])): path = {} for key in data.keys(): cur_list = data[key][i] if isinstance(cur_list, torch.Tensor): cur_list = cur_list.detach().cpu().numpy() if cur_list.ndim == 1: cur_list = cur_list[..., np.newaxis] elif cur_list.ndim == 0: # valids continue path[key] = cur_list self.replay_buffer.add_path(path) def _sample_replay_buffer(self): samples = self.replay_buffer.sample_transitions(self._trans_minibatch_size) data = {} for key, value in samples.items(): if value.shape[1] == 1: value = np.squeeze(value, axis=1) data[key] = to_np_object_arr([torch.from_numpy(value).float().to(self.device)]) data['valids'] = [self._trans_minibatch_size] self._compute_reward(data) assert len(data['obs']) == 1 assert self.normalizer_type not in ['consistent', 'garage_ex'] tensors = {} internal_vars = { 'maybe_no_grad': {}, } self._update_inputs(data, tensors, internal_vars) return data, tensors, internal_vars def _train_once_inner(self, data): self._update_replay_buffer(data) self._compute_reward(data) for minibatch in self._optimizer.get_minibatch(data, max_optimization_epochs=self.max_optimization_epochs[0]): self._train_op_with_minibatch(minibatch) for minibatch in self._optimizer.get_minibatch(data, max_optimization_epochs=self.te_max_optimization_epochs): self._train_te_with_minibatch(minibatch) if not self.update_target_per_gradient: sac_utils.update_targets(self) def _train_te_with_minibatch(self, data): tensors, internal_vars = self._compute_common_tensors(data) if self.te_trans_optimization_epochs is None: assert self.replay_buffer is None self._optimize_te(tensors, internal_vars) else: if self.replay_buffer is None: num_transitions = internal_vars['num_transitions'] for _ in range(self.te_trans_optimization_epochs): mini_tensors, mini_internal_vars = self._get_mini_tensors( tensors, internal_vars, num_transitions, self._trans_minibatch_size ) self._optimize_te(mini_tensors, mini_internal_vars) else: if self.replay_buffer.n_transitions_stored >= self.min_buffer_size: for i in range(self.te_trans_optimization_epochs): data, tensors, internal_vars = self._sample_replay_buffer() self._optimize_te(tensors, internal_vars) def _train_op_with_minibatch(self, data): tensors, internal_vars = self._compute_common_tensors(data) if self._trans_optimization_epochs is None: assert self.replay_buffer is None self._optimize_op(tensors, internal_vars) else: if self.replay_buffer is None: num_transitions = internal_vars['num_transitions'] for _ in range(self._trans_optimization_epochs): mini_tensors, mini_internal_vars = self._get_mini_tensors( tensors, internal_vars, num_transitions, self._trans_minibatch_size ) self._optimize_op(mini_tensors, mini_internal_vars) else: if self.replay_buffer.n_transitions_stored >= self.min_buffer_size: for _ in range(self._trans_optimization_epochs): data, tensors, internal_vars = self._sample_replay_buffer() self._optimize_op(tensors, internal_vars) def _optimize_te(self, tensors, internal_vars): self._update_loss_te(tensors, internal_vars) self._gradient_descent( tensors['LossTe'], optimizer_keys=['traj_encoder'], ) if self.dual_reg: self._update_loss_dual_lam(tensors, internal_vars) self._gradient_descent( tensors['LossDualLam'], optimizer_keys=['dual_lam'], ) if self.dual_dist != 'l2': self._gradient_descent( tensors['LossDp'], optimizer_keys=['dist_predictor'], ) def _optimize_op(self, tensors, internal_vars): self._update_loss_qf(tensors, internal_vars) self._gradient_descent( tensors['LossQf1'], optimizer_keys=['qf1'], ) self._gradient_descent( tensors['LossQf2'], optimizer_keys=['qf2'], ) self._update_loss_op(tensors, internal_vars) self._gradient_descent( tensors['LossSacp'], optimizer_keys=['option_policy'], ) self._update_loss_alpha(tensors, internal_vars) self._gradient_descent( tensors['LossAlpha'], optimizer_keys=['log_alpha'], ) if self.update_target_per_gradient: sac_utils.update_targets(self) def _compute_common_tensors(self, data, *, compute_extra_metrics=False, op_compute_chunk_size=None): tensors = {} internal_vars = {} self._update_inputs(data, tensors, internal_vars) if compute_extra_metrics: self._update_loss_te(tensors, internal_vars) if self.dual_reg: self._update_loss_dual_lam(tensors, internal_vars) self._compute_reward(data, metric_tensors=tensors) self._update_loss_qf(tensors, internal_vars) self._update_loss_op(tensors, internal_vars) self._update_loss_alpha(tensors, internal_vars) return tensors, internal_vars def _get_rewards(self, tensors, v): obs_flat = v['obs_flat'] next_obs_flat = v['next_obs_flat'] if self.inner: # Only use the mean of the distribution cur_z = self.traj_encoder(obs_flat).mean next_z = self.traj_encoder(next_obs_flat).mean target_z = next_z - cur_z if self.discrete: masks = (v['options_flat'] - v['options_flat'].mean(dim=1, keepdim=True)) * (self.dim_option) / (self.dim_option - 1 if self.dim_option != 1 else 1) rewards = (target_z * masks).sum(dim=1) else: inner = (target_z * v['options_flat']).sum(dim=1) rewards = inner # For dual LSD v.update({ 'cur_z': cur_z, 'next_z': next_z, }) else: target_dists = self.traj_encoder(next_obs_flat) if self.discrete: logits = target_dists.mean rewards = -torch.nn.functional.cross_entropy(logits, v['options_flat'].argmax(dim=1), reduction='none') else: rewards = target_dists.log_prob(v['options_flat']) tensors.update({ 'RewardMean': rewards.mean(), 'RewardStd': rewards.std(), }) return rewards def _update_loss_te(self, tensors, v): rewards = self._get_rewards(tensors, v) obs_flat = v['obs_flat'] next_obs_flat = v['next_obs_flat'] if self.dual_dist != 'l2': s2_dist = self.dist_predictor(obs_flat) loss_dp = -s2_dist.log_prob(next_obs_flat - obs_flat).mean() tensors.update({ 'LossDp': loss_dp, }) if self.dual_reg: dual_lam = self.dual_lam.param.exp() x = obs_flat y = next_obs_flat phi_x = v['cur_z'] phi_y = v['next_z'] if self.dual_dist == 'l2': cst_dist = torch.square(y - x).mean(dim=1) else: s2_dist = self.dist_predictor(obs_flat) s2_dist_mean = s2_dist.mean s2_dist_std = s2_dist.stddev scaling_factor = 1. / s2_dist_std geo_mean = torch.exp(torch.log(scaling_factor).mean(dim=1, keepdim=True)) normalized_scaling_factor = (scaling_factor / geo_mean) ** 2 cst_dist = torch.mean(torch.square((y - x) - s2_dist_mean) * normalized_scaling_factor, dim=1) tensors.update({ 'ScalingFactor': scaling_factor.mean(dim=0), 'NormalizedScalingFactor': normalized_scaling_factor.mean(dim=0), }) cst_penalty = cst_dist - torch.square(phi_y - phi_x).mean(dim=1) cst_penalty = torch.clamp(cst_penalty, max=self.dual_slack) rewards = rewards + dual_lam.detach() * cst_penalty v.update({ 'cst_penalty': cst_penalty }) tensors.update({ 'DualCstPenalty': cst_penalty.mean(), }) reward_mean = rewards.mean() loss_te = -reward_mean v.update({ 'rewards': rewards, 'reward_mean': reward_mean, }) tensors.update({ 'LossTe': loss_te, }) def _update_loss_dual_lam(self, tensors, v): log_dual_lam = self.dual_lam.param dual_lam = log_dual_lam.exp() loss_dual_lam = log_dual_lam * (v['cst_penalty'].detach()).mean() tensors.update({ 'DualLam': dual_lam, 'LossDualLam': loss_dual_lam, }) def _update_loss_qf(self, tensors, v): processed_cat_obs_flat = self.option_policy.process_observations(v['cat_obs_flat']) next_processed_cat_obs_flat = self.option_policy.process_observations(v['next_cat_obs_flat']) sac_utils.update_loss_qf( self, tensors, v, obs_flat=processed_cat_obs_flat, actions_flat=v['actions_flat'], next_obs_flat=next_processed_cat_obs_flat, dones_flat=v['dones_flat'], rewards_flat=v['rewards_flat'] * self._reward_scale_factor, policy=self.option_policy, ) v.update({ 'processed_cat_obs_flat': processed_cat_obs_flat, 'next_processed_cat_obs_flat': next_processed_cat_obs_flat, }) def _update_loss_op(self, tensors, v): sac_utils.update_loss_sacp( self, tensors, v, obs_flat=v['processed_cat_obs_flat'], policy=self.option_policy, ) def _update_loss_alpha(self, tensors, v): sac_utils.update_loss_alpha( self, tensors, v, ) def _compute_reward(self, data, metric_tensors=None): tensors = {} v = {} self._update_inputs(data, tensors, v) with torch.no_grad(): rewards = self._get_rewards(tensors, v) if metric_tensors is not None: metric_tensors.update({ 'LsdTotalRewards': rewards.mean(), }) rewards = rewards.split(v['valids'], dim=0) data['rewards'] = to_np_object_arr(rewards) def _prepare_for_evaluate_policy(self, runner): return {} def _evaluate_policy(self, runner, **kwargs): if self.discrete: random_options = np.eye(self.dim_option) random_options = random_options.repeat(self.num_eval_trajectories_per_option, axis=0) colors = np.arange(0, self.dim_option) colors = colors.repeat(self.num_eval_trajectories_per_option, axis=0) num_evals = len(random_options) from matplotlib import cm cmap = 'tab10' if self.dim_option <= 10 else 'tab20' random_option_colors = [] for i in range(num_evals): random_option_colors.extend([cm.get_cmap(cmap)(colors[i])[:3]]) random_option_colors = np.array(random_option_colors) else: random_options = np.random.randn(self.num_random_trajectories, self.dim_option) random_option_colors = get_option_colors(random_options * 4) random_op_trajectories = self._get_trajectories( runner, sampler_key='option_policy', extras=self._generate_option_extras(random_options), max_path_length_override=self._cur_max_path_length, worker_update=dict( _deterministic_initial_state=False, _deterministic_policy=self.eval_deterministic_traj, ), env_update=dict(_action_noise_std=None), ) with FigManager(runner, 'TrajPlot_RandomZ') as fm: runner._env.render_trajectories( random_op_trajectories, random_option_colors, self.eval_plot_axis, fm.ax ) sp_trajectories = random_op_trajectories data = self.process_samples(sp_trajectories) use_zero_options = False sp_option_means, sp_option_stddevs, sp_option_samples = self._get_sp_options_at_timesteps( data, use_zero_options=use_zero_options, ) sp_option_colors = random_option_colors sp_option_sample_colors = random_option_colors if self.dim_option == 2: with FigManager(runner, f'PhiPlot') as fm: draw_2d_gaussians(sp_option_means, sp_option_stddevs, sp_option_colors, fm.ax) draw_2d_gaussians( sp_option_samples, [[0.03, 0.03]] * len(sp_option_samples), sp_option_sample_colors, fm.ax, fill=True, use_adaptive_axis=True, ) else: with FigManager(runner, f'PhiPlot') as fm: draw_2d_gaussians(sp_option_means[:, :2], sp_option_stddevs[:, :2], sp_option_colors, fm.ax) draw_2d_gaussians( sp_option_samples[:, :2], [[0.03, 0.03]] * len(sp_option_samples), sp_option_sample_colors, fm.ax, fill=True, ) if self.eval_record_video: if self.discrete: random_options = np.eye(self.dim_option) random_options = random_options.repeat(2, axis=0) else: random_options = np.random.randn(9, self.dim_option) random_options = random_options.repeat(2, axis=0) video_op_trajectories = self._get_trajectories( runner, sampler_key='local_option_policy', extras=self._generate_option_extras(random_options), worker_update=dict( _render=True, _deterministic_initial_state=False, _deterministic_policy=self.eval_deterministic_video, ), ) record_video(runner, 'Video_RandomZ', video_op_trajectories, skip_frames=self.video_skip_frames) with global_context.GlobalContext({'phase': 'eval', 'policy': 'option'}): log_performance_ex( runner.step_itr, TrajectoryBatch.from_trajectory_list(self._env_spec, random_op_trajectories), discount=self.discount, additional_records=dict(), additional_prefix=type(runner._env.unwrapped).__name__, ) self._log_eval_metrics(runner)
18,933
35.552124
164
py
CSD-locomotion
CSD-locomotion-master/iod/sac_utils.py
import torch from torch.nn import functional as F def _clip_actions(algo, actions): epsilon = 1e-6 lower = torch.from_numpy(algo._env_spec.action_space.low).to(algo.device) + epsilon upper = torch.from_numpy(algo._env_spec.action_space.high).to(algo.device) - epsilon clip_up = (actions > upper).float() clip_down = (actions < lower).float() with torch.no_grad(): clip = ((upper - actions) * clip_up + (lower - actions) * clip_down) return actions + clip def update_loss_qf( algo, tensors, v, obs_flat, actions_flat, next_obs_flat, dones_flat, rewards_flat, policy, ): with torch.no_grad(): alpha = algo.log_alpha.param.exp() q1_pred = algo.qf1(obs_flat, actions_flat).flatten() q2_pred = algo.qf2(obs_flat, actions_flat).flatten() next_action_dists_flat, *_ = policy(next_obs_flat) if hasattr(next_action_dists_flat, 'rsample_with_pre_tanh_value'): new_next_actions_flat_pre_tanh, new_next_actions_flat = next_action_dists_flat.rsample_with_pre_tanh_value() new_next_action_log_probs = next_action_dists_flat.log_prob(new_next_actions_flat, pre_tanh_value=new_next_actions_flat_pre_tanh) else: new_next_actions_flat = next_action_dists_flat.rsample() new_next_actions_flat = _clip_actions(algo, new_next_actions_flat) new_next_action_log_probs = next_action_dists_flat.log_prob(new_next_actions_flat) target_q_values = torch.min( algo.target_qf1(next_obs_flat, new_next_actions_flat).flatten(), algo.target_qf2(next_obs_flat, new_next_actions_flat).flatten(), ) target_q_values = target_q_values - alpha * new_next_action_log_probs target_q_values = target_q_values * algo.discount with torch.no_grad(): q_target = rewards_flat + target_q_values * (1. - dones_flat) # critic loss weight: 0.5 loss_qf1 = F.mse_loss(q1_pred, q_target) * 0.5 loss_qf2 = F.mse_loss(q2_pred, q_target) * 0.5 tensors.update({ 'QTargetsMean': q_target.mean(), 'QTdErrsMean': ((q_target - q1_pred).mean() + (q_target - q2_pred).mean()) / 2, 'LossQf1': loss_qf1, 'LossQf2': loss_qf2, }) def update_loss_sacp( algo, tensors, v, obs_flat, policy, ): with torch.no_grad(): alpha = algo.log_alpha.param.exp() action_dists_flat, *_ = policy(obs_flat) if hasattr(action_dists_flat, 'rsample_with_pre_tanh_value'): new_actions_flat_pre_tanh, new_actions_flat = action_dists_flat.rsample_with_pre_tanh_value() new_action_log_probs = action_dists_flat.log_prob(new_actions_flat, pre_tanh_value=new_actions_flat_pre_tanh) else: new_actions_flat = action_dists_flat.rsample() new_actions_flat = _clip_actions(algo, new_actions_flat) new_action_log_probs = action_dists_flat.log_prob(new_actions_flat) min_q_values = torch.min( algo.qf1(obs_flat, new_actions_flat).flatten(), algo.qf2(obs_flat, new_actions_flat).flatten(), ) loss_sacp = (alpha * new_action_log_probs - min_q_values).mean() tensors.update({ 'SacpNewActionLogProbMean': new_action_log_probs.mean(), 'LossSacp': loss_sacp, }) v.update({ 'new_action_log_probs': new_action_log_probs, }) def update_loss_alpha( algo, tensors, v, ): loss_alpha = (-algo.log_alpha.param * ( v['new_action_log_probs'].detach() + algo._target_entropy )).mean() tensors.update({ 'Alpha': algo.log_alpha.param.exp(), 'LossAlpha': loss_alpha, }) def update_targets(algo): """Update parameters in the target q-functions.""" target_qfs = [algo.target_qf1, algo.target_qf2] qfs = [algo.qf1, algo.qf2] for target_qf, qf in zip(target_qfs, qfs): for t_param, param in zip(target_qf.parameters(), qf.parameters()): t_param.data.copy_(t_param.data * (1.0 - algo.tau) + param.data * algo.tau)
4,046
33.008403
137
py
CSD-locomotion
CSD-locomotion-master/iod/utils.py
import copy import pathlib import time import dowel_wrapper import akro import numpy as np import torch import platform from PIL import Image if 'macOS' in platform.platform(): import os os.environ["IMAGEIO_FFMPEG_EXE"] = '/opt/homebrew/bin/ffmpeg' from moviepy import editor as mpy from garage.envs import EnvSpec from garage.misc.tensor_utils import discount_cumsum from matplotlib import figure from matplotlib.patches import Ellipse from sklearn import decomposition class EnvSpecEx(EnvSpec): def __init__(self, observation_space, action_space, pure_observation_space, option_space, ): super().__init__(observation_space, action_space) self.pure_observation_space = pure_observation_space self.option_space = option_space def to_np_object_arr(x): arr = np.empty(len(x), dtype=np.object) for i, t in enumerate(x): arr[i] = t return arr def make_env_spec_for_option_policy(env_spec, num_option_params, use_option=True): option_space = None if use_option: option_space = akro.Box(low=-np.inf, high=np.inf, shape=(num_option_params,)) space = akro.concat(env_spec.observation_space, option_space) else: space = env_spec.observation_space new_spec = EnvSpecEx( action_space=env_spec.action_space, observation_space=space, pure_observation_space=env_spec.observation_space, option_space=option_space, ) return new_spec def get_torch_concat_obs(obs, option, dim=1): concat_obs = torch.cat([obs] + [option], dim=dim) return concat_obs def get_np_concat_obs(obs, option): concat_obs = np.concatenate([obs] + [option]) return concat_obs def get_normalizer_preset(normalizer_type): # Precomputed mean and std of the state dimensions from 10000 length-50 random rollouts (without early termination) if normalizer_type == 'off': normalizer_mean = np.array([0.]) normalizer_std = np.array([1.]) elif normalizer_type == 'half_cheetah_preset': normalizer_mean = np.array( [-0.07861924, -0.08627162, 0.08968642, 0.00960849, 0.02950368, -0.00948337, 0.01661406, -0.05476654, -0.04932635, -0.08061652, -0.05205841, 0.04500197, 0.02638421, -0.04570961, 0.03183838, 0.01736591, 0.0091929, -0.0115027]) normalizer_std = np.array( [0.4039283, 0.07610687, 0.23817, 0.2515473, 0.2698137, 0.26374814, 0.32229397, 0.2896734, 0.2774097, 0.73060024, 0.77360505, 1.5871304, 5.5405455, 6.7097645, 6.8253727, 6.3142195, 6.417641, 5.9759197]) elif normalizer_type == 'ant_preset': normalizer_mean = np.array( [0.00486117, 0.011312, 0.7022248, 0.8454677, -0.00102548, -0.00300276, 0.00311523, -0.00139029, 0.8607109, -0.00185301, -0.8556998, 0.00343217, -0.8585605, -0.00109082, 0.8558013, 0.00278213, 0.00618173, -0.02584622, -0.00599026, -0.00379596, 0.00526138, -0.0059213, 0.27686235, 0.00512205, -0.27617684, -0.0033233, -0.2766923, 0.00268359, 0.27756855]) normalizer_std = np.array( [0.62473416, 0.61958003, 0.1717569, 0.28629342, 0.20020866, 0.20572574, 0.34922406, 0.40098143, 0.3114514, 0.4024826, 0.31057045, 0.40343934, 0.3110796, 0.40245822, 0.31100526, 0.81786263, 0.8166509, 0.9870919, 1.7525449, 1.7468817, 1.8596431, 4.502961, 4.4070187, 4.522444, 4.3518476, 4.5105968, 4.3704205, 4.5175962, 4.3704395]) elif normalizer_type == 'humanoid_preset': normalizer_mean = np.array( [-8.1131503e-02, -7.3915249e-04, 9.5715916e-01, 9.5207644e-01, 2.0175683e-03, -6.3051097e-02, -1.2828799e-02, -5.4687279e-04, -2.4450898e-01, 7.7590477e-03, -3.2982033e-02, -1.7136147e-02, -1.7263800e-01, -1.6152242e+00, -3.4986842e-02, -3.4458160e-02, -1.6019167e-01, -1.5958424e+00, 3.0278003e-01, -2.7908441e-01, -3.4809363e-01, -2.9139769e-01, 2.8643531e-01, -3.4040874e-01, -3.8491020e-01, 2.6394178e-05, -1.2304888e+00, 3.6492027e-02, -6.8305099e-01, -8.6309865e-02, 9.3602976e-03, -5.4201365e-01, 1.1908096e-02, -9.6945368e-02, -4.0906958e-02, -3.0476081e-01, -3.3397417e+00, -8.6432390e-02, -6.1523411e-02, -2.6818362e-01, -3.3175933e+00, 7.4578458e-01, -9.6735454e-01, -1.1773691e+00, -7.7269357e-01, 9.5517111e-01, -1.1721193e+00]) normalizer_std = np.array( [0.12630117, 0.09309318, 0.31789413, 0.07312579, 0.12920779, 0.21994449, 0.1426761, 0.18718153, 0.43414274, 0.32560128, 0.1282181, 0.23556797, 0.4009979, 0.97610635, 0.12872458, 0.23611404, 0.4062315, 0.9686742, 0.3580939, 0.42217487, 0.49625927, 0.3586807, 0.4218451, 0.50105387, 0.5517619, 0.43790612, 0.8357725, 1.3804333, 2.4758842, 2.2540345, 3.15485, 4.4246655, 2.8681147, 2.6601605, 3.5328803, 5.8904147, 6.434801, 2.6590736, 3.5234997, 5.899381, 6.412176, 2.5906591, 3.0781884, 3.3108664, 2.5866294, 3.0885093, 3.2871766]) return normalizer_mean, normalizer_std def get_2d_colors(points, min_point, max_point): points = np.array(points) min_point = np.array(min_point) max_point = np.array(max_point) colors = (points - min_point) / (max_point - min_point) colors = np.hstack(( colors, (2 - np.sum(colors, axis=1, keepdims=True)) / 2, )) colors = np.clip(colors, 0, 1) colors = np.c_[colors, np.full(len(colors), 0.8)] return colors def get_option_colors(options, color_range=4): num_options = options.shape[0] dim_option = options.shape[1] if dim_option <= 2: # Use a predefined option color scheme if dim_option == 1: options_2d = [] d = 2. for i in range(len(options)): option = options[i][0] if option < 0: abs_value = -option options_2d.append((d - abs_value * d, d)) else: abs_value = option options_2d.append((d, d - abs_value * d)) options = np.array(options_2d) # options = np.c_[options, options] option_colors = get_2d_colors(options, (-color_range, -color_range), (color_range, color_range)) else: if dim_option > 3 and num_options >= 3: pca = decomposition.PCA(n_components=3) # Add random noises to break symmetry. pca_options = np.vstack((options, np.random.randn(dim_option, dim_option))) pca.fit(pca_options) option_colors = np.array(pca.transform(options)) elif dim_option > 3 and num_options < 3: option_colors = options[:, :3] elif dim_option == 3: option_colors = options # max_colors = np.max(option_colors, axis=0) # min_colors = np.min(option_colors, axis=0) max_colors = np.array([color_range] * 3) min_colors = np.array([-color_range] * 3) if all((max_colors - min_colors) > 0): option_colors = (option_colors - min_colors) / (max_colors - min_colors) option_colors = np.clip(option_colors, 0, 1) option_colors = np.c_[option_colors, np.full(len(option_colors), 0.8)] return option_colors def draw_2d_gaussians(means, stddevs, colors, ax, fill=False, alpha=0.8, use_adaptive_axis=False, draw_unit_gaussian=True, plot_axis=None): means = np.clip(means, -1000, 1000) stddevs = np.clip(stddevs, -1000, 1000) square_axis_limit = 2.0 if draw_unit_gaussian: ellipse = Ellipse(xy=(0, 0), width=2, height=2, edgecolor='r', lw=1, facecolor='none', alpha=0.5) ax.add_patch(ellipse) for mean, stddev, color in zip(means, stddevs, colors): if len(mean) == 1: mean = np.concatenate([mean, [0.]]) stddev = np.concatenate([stddev, [0.1]]) ellipse = Ellipse(xy=mean, width=stddev[0] * 2, height=stddev[1] * 2, edgecolor=color, lw=1, facecolor='none' if not fill else color, alpha=alpha) ax.add_patch(ellipse) square_axis_limit = max( square_axis_limit, np.abs(mean[0] + stddev[0]), np.abs(mean[0] - stddev[0]), np.abs(mean[1] + stddev[1]), np.abs(mean[1] - stddev[1]), ) square_axis_limit = square_axis_limit * 1.2 ax.axis('scaled') if plot_axis is None: if use_adaptive_axis: ax.set_xlim(-square_axis_limit, square_axis_limit) ax.set_ylim(-square_axis_limit, square_axis_limit) else: ax.set_xlim(-5, 5) ax.set_ylim(-5, 5) else: ax.axis(plot_axis) def prepare_video(v, n_cols=None): orig_ndim = v.ndim if orig_ndim == 4: v = v[None, ] #b, t, c, h, w = v.shape _, t, c, h, w = v.shape if v.dtype == np.uint8: v = np.float32(v) / 255. def is_power2(num): return num != 0 and ((num & (num - 1)) == 0) # pad to nearest power of 2, all at once # if not is_power2(v.shape[0]): # len_addition = int(2**v.shape[0].bit_length() - v.shape[0]) # v = np.concatenate( # (v, np.zeros(shape=(len_addition, t, c, h, w))), axis=0) # n_rows = 2**((b.bit_length() - 1) // 2) if n_cols is None: if v.shape[0] <= 3: n_cols = v.shape[0] elif v.shape[0] <= 9: n_cols = 3 else: n_cols = 6 if v.shape[0] % n_cols != 0: len_addition = n_cols - v.shape[0] % n_cols v = np.concatenate( (v, np.zeros(shape=(len_addition, t, c, h, w))), axis=0) n_rows = v.shape[0] // n_cols v = np.reshape(v, newshape=(n_rows, n_cols, t, c, h, w)) v = np.transpose(v, axes=(2, 0, 4, 1, 5, 3)) v = np.reshape(v, newshape=(t, n_rows * h, n_cols * w, c)) return v def save_video(runner, label, tensor, fps=15, n_cols=None): def _to_uint8(t): # If user passes in uint8, then we don't need to rescale by 255 if t.dtype != np.uint8: t = (t * 255.0).astype(np.uint8) return t if tensor.dtype in [np.object]: tensor = [_to_uint8(prepare_video(t, n_cols)) for t in tensor] else: tensor = prepare_video(tensor, n_cols) tensor = _to_uint8(tensor) # Encode sequence of images into gif string clip = mpy.ImageSequenceClip(list(tensor), fps=fps) plot_path = (pathlib.Path(runner._snapshotter.snapshot_dir) / 'plots' # / f'{label}_{runner.step_itr}.gif') / f'{label}_{runner.step_itr}.mp4') plot_path.parent.mkdir(parents=True, exist_ok=True) # clip.write_gif(plot_path, verbose=False, logger=None) clip.write_videofile(str(plot_path), audio=False, verbose=False, logger=None) def save_trajectories(runner, label, tensor, skip_frame=3, n_cols=None): epsilon = 1e-6 tensor = prepare_video(tensor, n_cols) image = np.ones(tensor.shape[1:]) for i, frame in enumerate(tensor): image_mask = (image.mean(axis=2) < 1. - epsilon).astype(int)[..., np.newaxis] frame_mask = (frame.mean(axis=2) < 1. - epsilon).astype(int)[..., np.newaxis] if i % skip_frame == 0: image = frame_mask * frame + (1 - frame_mask) * image image = image * 255. image = image.astype(np.uint8) image = np.clip(image, 0, 255) im = Image.fromarray(image, 'RGB') im.save("mujoco.png") def record_video(runner, label, trajectories, n_cols=None, skip_frames=1): renders = [] for trajectory in trajectories: render = trajectory['env_infos']['render'] if render.ndim >= 5: render = render.reshape(-1, *render.shape[-3:]) elif render.ndim == 1: render = np.concatenate(render, axis=0) renders.append(render) max_length = max([len(render) for render in renders]) for i, render in enumerate(renders): renders[i] = np.concatenate([render, np.zeros((max_length - render.shape[0], *render.shape[1:]), dtype=render.dtype)], axis=0) renders[i] = renders[i][::skip_frames] renders = np.array(renders) save_video(runner, label, renders, n_cols=n_cols) def get_ori_coords(ori, offset=0): if ori.ndim == 3: ori = np.concatenate(ori, axis=0) t = np.arange(0, len(ori)) + offset theta = np.arctan2(ori[:, 1], ori[:, 0]) for i in range(1, len(ori)): if theta[i] - theta[i - 1] > 5: theta[i:] -= 2 * np.pi elif theta[i] - theta[i - 1] < -5: theta[i:] += 2 * np.pi return np.c_[t, theta] class FigManager: def __init__(self, runner, label, extensions=None, subplot_spec=None): self.runner = runner self.label = label self.fig = figure.Figure() if subplot_spec is not None: self.ax = self.fig.subplots(*subplot_spec).flatten() else: self.ax = self.fig.add_subplot() if extensions is None: self.extensions = ['png'] else: self.extensions = extensions def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): plot_paths = [(pathlib.Path(self.runner._snapshotter.snapshot_dir) / 'plots' / f'{self.label}_{self.runner.step_itr}.{extension}') for extension in self.extensions] plot_paths[0].parent.mkdir(parents=True, exist_ok=True) for plot_path in plot_paths: self.fig.savefig(plot_path, dpi=300) dowel_wrapper.get_tabular('plot').record(self.label, self.fig) class MeasureAndAccTime: def __init__(self, target): assert isinstance(target, list) assert len(target) == 1 self._target = target def __enter__(self): self._time_enter = time.time() return self def __exit__(self, exc_type, exc_val, exc_tb): self._target[0] += (time.time() - self._time_enter) class Timer: def __init__(self): self.t = time.time() def __call__(self, msg='', *args, **kwargs): print(f'{msg}: {time.time() - self.t:.20f}') self.t = time.time() def valuewise_sequencify_dicts(dicts): result = dict((k, []) for k in dicts[0].keys()) for d in dicts: for k, v in d.items(): result[k].append(v) return result def zip_dict(d): keys = list(d.keys()) values = [d[k] for k in keys] for z in zip(*values): yield dict((k, v) for k, v in zip(keys, z)) def split_paths(paths, chunking_points): assert 0 in chunking_points assert len(chunking_points) >= 2 if len(chunking_points) == 2: return orig_paths = copy.copy(paths) paths.clear() for path in orig_paths: ei = path for s, e in zip(chunking_points[:-1], chunking_points[1:]): assert len(set( len(v) for k, v in path.items() if k not in ['env_infos', 'agent_infos'] )) == 1 new_path = { k: v[s:e] for k, v in path.items() if k not in ['env_infos', 'agent_infos'] } new_path['dones'][-1] = True assert len(set( len(v) for k, v in path['env_infos'].items() )) == 1 new_path['env_infos'] = { k: v[s:e] for k, v in path['env_infos'].items() } assert len(set( len(v) for k, v in path['agent_infos'].items() )) == 1 new_path['agent_infos'] = { k: v[s:e] for k, v in path['agent_infos'].items() } paths.append(new_path) class RunningMeanStd(object): def __init__(self, shape, keep_rate, init): # keep_rate < 0 means cumulative average # keep_rate >= 0 means exponential moving average if keep_rate < 0 or init == 'zero_one': self._mean = np.zeros(shape, np.float64) self._var = np.ones(shape, np.float64) else: self._mean = None self._var = None self.count = 0 self.keep_rate = keep_rate self.init = init def update(self, arr: np.ndarray) -> None: batch_mean = np.mean(arr, axis=0, dtype=np.float64) batch_var = np.var(arr, axis=0, dtype=np.float64) batch_count = arr.shape[0] self.update_from_moments(batch_mean, batch_var, batch_count) def update_from_moments(self, batch_mean: np.ndarray, batch_var: np.ndarray, batch_count: int) -> None: if self.keep_rate < 0: delta = batch_mean - self._mean tot_count = self.count + batch_count new_mean = self._mean + delta * batch_count / tot_count m_a = self._var * self.count m_b = batch_var * batch_count m_2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count) new_var = m_2 / (self.count + batch_count) new_count = batch_count + self.count self._mean = new_mean self._var = new_var self.count = new_count else: if self._mean is None: self._mean = batch_mean self._var = batch_var else: self._mean = self._mean * self.keep_rate + batch_mean * (1 - self.keep_rate) self._var = self._var * self.keep_rate + batch_var * (1 - self.keep_rate) @property def mean(self): return self._mean.astype(np.float32) @property def var(self): return self._var.astype(np.float32) @property def std(self): return np.sqrt(self._var).astype(np.float32) def compute_traj_batch_performance(batch, discount): returns = [] undiscounted_returns = [] for trajectory in batch.split(): returns.append(discount_cumsum(trajectory.rewards, discount)) undiscounted_returns.append(sum(trajectory.rewards)) return dict( undiscounted_returns=undiscounted_returns, discounted_returns=[rtn[0] for rtn in returns], ) class RMS(object): """running mean and std """ def __init__(self, device, epsilon=1e-4, shape=(1,)): self.M = torch.zeros(shape).to(device) self.S = torch.ones(shape).to(device) self.n = epsilon def __call__(self, x): bs = x.size(0) delta = torch.mean(x, dim=0) - self.M new_M = self.M + delta * bs / (self.n + bs) new_S = (self.S * self.n + torch.var(x, dim=0) * bs + torch.square(delta) * self.n * bs / (self.n + bs)) / (self.n + bs) self.M = new_M self.S = new_S self.n += bs return self.M, self.S
19,058
35.302857
139
py
CSD-locomotion
CSD-locomotion-master/tests/__init__.py
0
0
0
py
CSD-locomotion
CSD-locomotion-master/tests/main.py
#!/usr/bin/env python3 import dowel_wrapper assert dowel_wrapper is not None import dowel import argparse import datetime import functools import os import torch.multiprocessing as mp import better_exceptions import numpy as np better_exceptions.hook() import torch from garage import wrap_experiment from garage.experiment.deterministic import set_seed from garage.torch.distributions import TanhNormal from garage.torch.q_functions import ContinuousMLPQFunction from garage.replay_buffer import PathBuffer from garagei.experiment.option_local_runner import OptionLocalRunner from garagei.envs.consistent_normalized_env import consistent_normalize from garagei.envs.normalized_env_ex import normalize_ex from garagei.sampler.option_multiprocessing_sampler import OptionMultiprocessingSampler from garagei.torch.modules.gaussian_mlp_module_ex import GaussianMLPTwoHeadedModuleEx, GaussianMLPIndependentStdModuleEx, GaussianMLPModuleEx, GaussianMixtureMLPModule from garagei.torch.modules.normalizer import Normalizer from garagei.torch.modules.parameter_module import ParameterModule from garagei.torch.policies.policy_ex import PolicyEx from garagei.torch.optimizers.optimizer_group_wrapper import OptimizerGroupWrapper from garagei.torch.utils import xavier_normal_ex from iod.lsd import LSD from iod.utils import make_env_spec_for_option_policy, get_normalizer_preset EXP_DIR = 'exp' if os.environ.get('START_METHOD') is not None: START_METHOD = os.environ['START_METHOD'] else: START_METHOD = 'spawn' print('START_METHOD', START_METHOD) def get_argparser(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--run_group', type=str, required=True) parser.add_argument('--policy_type', type=str, default='tanhgaussian', choices=['tanhgaussian']) parser.add_argument('--normalizer_type', type=str, default='off', choices=['off', 'garage_ex', 'consistent', 'manual', 'half_cheetah_preset', 'ant_preset', 'humanoid_preset']) parser.add_argument('--normalizer_obs_alpha', type=float, default=0.001) parser.add_argument('--normalized_env_eval_update', type=int, default=0) parser.add_argument('--normalizer_mean', type=float, default=0) parser.add_argument('--normalizer_std', type=float, default=1) parser.add_argument('--env', type=str, default='ant', choices=['half_cheetah', 'ant', 'humanoid',]) parser.add_argument('--mujoco_render_hw', type=int, default=100) parser.add_argument('--max_path_length', type=int, default=200) parser.add_argument('--use_gpu', type=int, default=0, choices=[0, 1]) parser.add_argument('--seed', type=int, default=0) parser.add_argument('--n_parallel', type=int, default=10) parser.add_argument('--n_thread', type=int, default=1) parser.add_argument('--n_epochs', type=int, default=1000000) parser.add_argument('--max_optimization_epochs', type=int, default=[1], nargs='+') parser.add_argument('--traj_batch_size', type=int, default=20) parser.add_argument('--minibatch_size', type=int, default=None) parser.add_argument('--trans_minibatch_size', type=int, default=None) parser.add_argument('--trans_optimization_epochs', type=int, default=None) parser.add_argument('--record_metric_difference', type=int, default=0, choices=[0, 1]) parser.add_argument('--n_epochs_per_eval', type=int, default=int(500)) parser.add_argument('--n_epochs_per_first_n_eval', type=int, default=None) parser.add_argument('--custom_eval_steps', type=int, default=None, nargs='*') parser.add_argument('--n_epochs_per_log', type=int, default=None) parser.add_argument('--n_epochs_per_tb', type=int, default=None) parser.add_argument('--n_epochs_per_save', type=int, default=int(1000)) parser.add_argument('--n_epochs_per_pt_save', type=int, default=None) parser.add_argument('--n_epochs_per_pkl_update', type=int, default=None) parser.add_argument('--num_eval_options', type=int, default=int(49)) parser.add_argument('--num_eval_trajectories_per_option', type=int, default=int(4)) parser.add_argument('--num_random_trajectories', type=int, default=int(200)) parser.add_argument('--eval_record_video', type=int, default=int(0)) parser.add_argument('--eval_deterministic_traj', type=int, default=1) parser.add_argument('--eval_deterministic_video', type=int, default=1) parser.add_argument('--eval_plot_axis', type=float, default=None, nargs='*') parser.add_argument('--video_skip_frames', type=int, default=1) parser.add_argument('--dim_option', type=int, default=2) parser.add_argument('--common_lr', type=float, default=1e-4) parser.add_argument('--lr_sp', type=float, default=None) parser.add_argument('--lr_op', type=float, default=None) parser.add_argument('--lr_te', type=float, default=None) parser.add_argument('--alpha', type=float, default=0.01) parser.add_argument('--sac_tau', type=float, default=5e-3) parser.add_argument('--sac_lr_q', type=float, default=None) parser.add_argument('--sac_lr_a', type=float, default=None) parser.add_argument('--sac_discount', type=float, default=0.99) parser.add_argument('--sac_scale_reward', type=float, default=1.) parser.add_argument('--sac_target_coef', type=float, default=1.) parser.add_argument('--sac_update_target_per_gradient', type=int, default=0, choices=[0, 1]) parser.add_argument('--sac_update_with_loss_alpha_prior_opt', type=int, default=0, choices=[0, 1]) parser.add_argument('--sac_replay_buffer', type=int, default=0, choices=[0, 1]) parser.add_argument('--sac_max_buffer_size', type=int, default=1000000) parser.add_argument('--sac_min_buffer_size', type=int, default=10000) parser.add_argument('--spectral_normalization', type=int, default=0, choices=[0, 1]) parser.add_argument('--spectral_coef', type=float, default=1.) parser.add_argument('--model_master_dim', type=int, default=None) parser.add_argument('--model_common_dim', type=int, default=None) parser.add_argument('--model_master_num_layers', type=int, default=2) parser.add_argument('--model_master_nonlinearity', type=str, default=None, choices=['relu', 'tanh']) parser.add_argument('--op_hidden_dims', type=int, default=None, nargs='*') parser.add_argument('--te_hidden_dims', type=int, default=None, nargs='*') parser.add_argument('--te_max_optimization_epochs', type=int, default=2) parser.add_argument('--te_trans_optimization_epochs', type=int, default=None) parser.add_argument('--discrete', type=int, default=0, choices=[0, 1]) parser.add_argument('--inner', type=int, default=1, choices=[0, 1]) parser.add_argument('--dual_reg', type=int, default=0, choices=[0, 1]) parser.add_argument('--dual_lam', type=float, default=1.) parser.add_argument('--dual_slack', type=float, default=0.) parser.add_argument('--dual_dist', type=str, default='l2', choices=['l2', 's2_from_s']) return parser args = get_argparser().parse_args() g_start_time = int(datetime.datetime.now().timestamp()) def get_exp_name(hack_slurm_job_id_override=None): parser = get_argparser() exp_name = '' exp_name += f'sd{args.seed:03d}_' if 'SLURM_JOB_ID' in os.environ or hack_slurm_job_id_override is not None: exp_name += f's_{hack_slurm_job_id_override or os.environ["SLURM_JOB_ID"]}.' if 'SLURM_PROCID' in os.environ: exp_name += f'{os.environ["SLURM_PROCID"]}.' exp_name_prefix = exp_name if 'SLURM_RESTART_COUNT' in os.environ: exp_name += f'rs_{os.environ["SLURM_RESTART_COUNT"]}.' exp_name += f'{g_start_time}' exp_name_abbrs = set() exp_name_arguments = set() def list_to_str(arg_list): return str(arg_list).replace(",", "|").replace(" ", "").replace("'", "") def add_name(abbr, argument, value_dict=None, max_length=None, log_only_if_changed=True): nonlocal exp_name if abbr is not None: assert abbr not in exp_name_abbrs exp_name_abbrs.add(abbr) else: abbr = '' exp_name_arguments.add(argument) value = getattr(args, argument) if log_only_if_changed and parser.get_default(argument) == value: return if isinstance(value, list): if value_dict is not None: value = [value_dict.get(v) for v in value] value = list_to_str(value) elif value_dict is not None: value = value_dict.get(value) if value is None: value = 'X' if max_length is not None: value = str(value)[:max_length] if isinstance(value, str): value = value.replace('/', '-') exp_name += f'_{abbr}{value}' add_name(None, 'env', { 'half_cheetah': 'CH', 'ant': 'ANT', 'humanoid': 'HUM', }, log_only_if_changed=False) add_name('clr', 'common_lr', log_only_if_changed=False) add_name('slra', 'sac_lr_a', log_only_if_changed=False) add_name('a', 'alpha', log_only_if_changed=False) add_name('sg', 'sac_update_target_per_gradient', log_only_if_changed=False) add_name('do', 'dim_option', log_only_if_changed=False) add_name('sr', 'sac_replay_buffer') add_name('md', 'model_master_dim') add_name('sdc', 'sac_discount', log_only_if_changed=False) add_name('ss', 'sac_scale_reward') add_name('ds', 'discrete') add_name('in', 'inner') add_name('dr', 'dual_reg') if args.dual_reg: add_name('dl', 'dual_lam') add_name('dk', 'dual_slack') add_name('dd', 'dual_dist', max_length=1) # Check lr arguments for key in vars(args): if key.startswith('lr_') or key.endswith('_lr') or '_lr_' in key: val = getattr(args, key) assert val is None or bool(val), 'To specify a lr of 0, use a negative value' return exp_name, exp_name_prefix def get_log_dir(): exp_name, exp_name_prefix = get_exp_name() assert len(exp_name) <= os.pathconf('/', 'PC_NAME_MAX') # Resolve symlinks to prevent runs from crashing in case of home nfs crashing. log_dir = os.path.realpath(os.path.join(EXP_DIR, args.run_group, exp_name)) assert not os.path.exists(log_dir), f'The following path already exists: {log_dir}' return log_dir def get_gaussian_module_construction(args, *, hidden_sizes, hidden_nonlinearity=torch.relu, w_init=torch.nn.init.xavier_uniform_, init_std=1.0, min_std=1e-6, max_std=None, **kwargs): module_kwargs = dict() module_cls = GaussianMLPIndependentStdModuleEx module_kwargs.update(dict( std_hidden_sizes=hidden_sizes, std_hidden_nonlinearity=hidden_nonlinearity, std_hidden_w_init=w_init, std_output_w_init=w_init, init_std=init_std, min_std=min_std, max_std=max_std, )) module_kwargs.update(dict( hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=w_init, output_w_init=w_init, std_parameterization='exp', bias=True, spectral_normalization=args.spectral_normalization, spectral_coef=args.spectral_coef, **kwargs, )) return module_cls, module_kwargs def create_policy(*, name, env_spec, hidden_sizes, hidden_nonlinearity=None, dim_option=None, policy_type=None): option_info = { 'dim_option': dim_option, } policy_kwargs = dict( env_spec=env_spec, name=name, option_info=option_info, ) module_kwargs = dict( hidden_sizes=hidden_sizes, layer_normalization=False, ) if hidden_nonlinearity is not None: module_kwargs.update(hidden_nonlinearity=hidden_nonlinearity) if policy_type == 'tanhgaussian': module_cls = GaussianMLPTwoHeadedModuleEx module_kwargs.update(dict( max_std=np.exp(2.), normal_distribution_cls=TanhNormal, output_w_init=functools.partial(xavier_normal_ex, gain=1.), init_std=1., )) else: raise NotImplementedError policy_cls = PolicyEx policy_kwargs.update(dict( module_cls=module_cls, module_kwargs=module_kwargs, )) policy = policy_cls(**policy_kwargs) return policy @wrap_experiment(log_dir=get_log_dir(), name=get_exp_name()[0]) def main(ctxt=None): dowel.logger.log('ARGS: ' + str(args)) if args.n_thread is not None: torch.set_num_threads(args.n_thread) set_seed(args.seed) runner = OptionLocalRunner(ctxt) max_path_length = args.max_path_length if args.env == 'half_cheetah': from envs.mujoco.half_cheetah_env import HalfCheetahEnv env = HalfCheetahEnv( render_hw=args.mujoco_render_hw, ) elif args.env == 'ant': from envs.mujoco.ant_env import AntEnv env = AntEnv( done_allowing_step_unit=None, render_hw=args.mujoco_render_hw, ) elif args.env == 'humanoid': from envs.mujoco.humanoid_env import HumanoidEnv env = HumanoidEnv( done_allowing_step_unit=None, render_hw=args.mujoco_render_hw, ) else: raise NotImplementedError normalizer_type = args.normalizer_type normalizer_mean = args.normalizer_mean normalizer_std = args.normalizer_std normalizer_kwargs = {} if normalizer_type == 'off': env = consistent_normalize(env, normalize_obs=False, **normalizer_kwargs) elif normalizer_type == 'garage_ex': env = normalize_ex(env, normalize_obs=True, obs_alpha=args.normalizer_obs_alpha, **normalizer_kwargs) elif normalizer_type == 'consistent': env = consistent_normalize(env, normalize_obs=True, **normalizer_kwargs) elif normalizer_type == 'manual': env = consistent_normalize(env, normalize_obs=True, mean=normalizer_mean, std=normalizer_std, **normalizer_kwargs) elif normalizer_type.endswith('preset'): normalizer_mean, normalizer_std = get_normalizer_preset(normalizer_type) normalizer_type = 'manual' env = consistent_normalize(env, normalize_obs=True, mean=normalizer_mean, std=normalizer_std, **normalizer_kwargs) device = torch.device('cuda' if args.use_gpu else 'cpu') if normalizer_type == 'consistent': normalizer = Normalizer( shape=env.observation_space.shape, alpha=args.normalizer_obs_alpha, do_normalize=True, ) else: normalizer = None if args.model_master_dim is not None: master_dims = [args.model_master_dim] * args.model_master_num_layers else: master_dims = None if args.model_common_dim is not None: common_dims = [args.model_common_dim] * args.model_master_num_layers else: common_dims = None if args.model_master_nonlinearity == 'relu': nonlinearity = torch.relu elif args.model_master_nonlinearity == 'tanh': nonlinearity = torch.tanh else: nonlinearity = None op_env_spec = make_env_spec_for_option_policy(env.spec, args.dim_option, use_option=True) option_policy = create_policy( name='option_policy', env_spec=op_env_spec, hidden_sizes=master_dims or args.op_hidden_dims or common_dims or [32, 32], hidden_nonlinearity=nonlinearity, dim_option=args.dim_option, policy_type=args.policy_type, ) module_cls, module_kwargs = get_gaussian_module_construction( args, hidden_sizes=master_dims or args.te_hidden_dims or common_dims or [32, 32], hidden_nonlinearity=nonlinearity or torch.relu, w_init=torch.nn.init.xavier_uniform_, input_dim=env.spec.observation_space.flat_dim, output_dim=args.dim_option, ) traj_encoder = module_cls(**module_kwargs) module_cls, module_kwargs = get_gaussian_module_construction( args, hidden_sizes=master_dims or args.te_hidden_dims or common_dims or [32, 32], hidden_nonlinearity=nonlinearity or torch.relu, w_init=torch.nn.init.xavier_uniform_, input_dim=env.spec.observation_space.flat_dim, output_dim=env.spec.observation_space.flat_dim, min_std=1e-6, max_std=1e6, ) dist_predictor = module_cls(**module_kwargs) dual_lam = ParameterModule(torch.Tensor([np.log(args.dual_lam)])) def _finalize_lr(lr): if lr is None: lr = args.common_lr else: assert bool(lr), 'To specify a lr of 0, use a negative value' if lr < 0.0: dowel.logger.log(f'Setting lr to ZERO given {lr}') lr = 0.0 return lr optimizers = { 'option_policy': torch.optim.Adam([ {'params': option_policy.parameters(), 'lr': _finalize_lr(args.lr_op)}, ]), 'traj_encoder': torch.optim.Adam([ {'params': traj_encoder.parameters(), 'lr': _finalize_lr(args.lr_te)}, ]), 'dist_predictor': torch.optim.Adam([ {'params': dist_predictor.parameters(), 'lr': _finalize_lr(args.lr_op)}, ]), 'dual_lam': torch.optim.Adam([ {'params': dual_lam.parameters(), 'lr': _finalize_lr(args.lr_op)}, ]), } if args.sac_replay_buffer: replay_buffer = PathBuffer(capacity_in_transitions=int(args.sac_max_buffer_size)) else: replay_buffer = None qf1 = ContinuousMLPQFunction( env_spec=op_env_spec, hidden_sizes=master_dims or common_dims or [32, 32], hidden_nonlinearity=nonlinearity or torch.relu, layer_normalization=False, ) qf2 = ContinuousMLPQFunction( env_spec=op_env_spec, hidden_sizes=master_dims or common_dims or [32, 32], hidden_nonlinearity=nonlinearity or torch.relu, layer_normalization=False, ) log_alpha = ParameterModule(torch.Tensor([np.log(args.alpha)])) optimizers.update({ 'qf1': torch.optim.Adam([ {'params': qf1.parameters(), 'lr': _finalize_lr(args.sac_lr_q)}, ]), 'qf2': torch.optim.Adam([ {'params': qf2.parameters(), 'lr': _finalize_lr(args.sac_lr_q)}, ]), 'log_alpha': torch.optim.Adam([ {'params': log_alpha.parameters(), 'lr': _finalize_lr(args.sac_lr_a)}, ]) }) optimizer = OptimizerGroupWrapper( optimizers=optimizers, max_optimization_epochs=None, minibatch_size=args.minibatch_size, ) algo_kwargs = dict( env_spec=env.spec, normalizer=normalizer, normalizer_type=normalizer_type, normalizer_mean=normalizer_mean, normalizer_std=normalizer_std, normalized_env_eval_update=args.normalized_env_eval_update, option_policy=option_policy, traj_encoder=traj_encoder, dist_predictor=dist_predictor, dual_lam=dual_lam, optimizer=optimizer, alpha=args.alpha, max_path_length=args.max_path_length, max_optimization_epochs=args.max_optimization_epochs, n_epochs_per_eval=args.n_epochs_per_eval, n_epochs_per_first_n_eval=args.n_epochs_per_first_n_eval, custom_eval_steps=args.custom_eval_steps, n_epochs_per_log=args.n_epochs_per_log or 1, n_epochs_per_tb=args.n_epochs_per_tb or args.n_epochs_per_eval, n_epochs_per_save=args.n_epochs_per_save, n_epochs_per_pt_save=args.n_epochs_per_eval if args.n_epochs_per_pt_save is None else args.n_epochs_per_pt_save, n_epochs_per_pkl_update=args.n_epochs_per_eval if args.n_epochs_per_pkl_update is None else args.n_epochs_per_pkl_update, dim_option=args.dim_option, num_eval_options=args.num_eval_options, num_eval_trajectories_per_option=args.num_eval_trajectories_per_option, num_random_trajectories=args.num_random_trajectories, eval_record_video=args.eval_record_video, video_skip_frames=args.video_skip_frames, eval_deterministic_traj=args.eval_deterministic_traj, eval_deterministic_video=args.eval_deterministic_video, eval_plot_axis=args.eval_plot_axis, name='LSD', device=device, num_train_per_epoch=1, record_metric_difference=args.record_metric_difference, te_max_optimization_epochs=args.te_max_optimization_epochs, te_trans_optimization_epochs=args.te_trans_optimization_epochs, trans_minibatch_size=args.trans_minibatch_size, trans_optimization_epochs=args.trans_optimization_epochs, discount=args.sac_discount, discrete=args.discrete, ) algo = LSD( **algo_kwargs, qf1=qf1, qf2=qf2, log_alpha=log_alpha, tau=args.sac_tau, scale_reward=args.sac_scale_reward, target_coef=args.sac_target_coef, update_target_per_gradient=args.sac_update_target_per_gradient, replay_buffer=replay_buffer, min_buffer_size=args.sac_min_buffer_size, inner=args.inner, dual_reg=args.dual_reg, dual_slack=args.dual_slack, dual_dist=args.dual_dist, ) algo.option_policy.cpu() runner.setup( algo=algo, env=env, sampler_cls=OptionMultiprocessingSampler, sampler_args=dict(n_thread=args.n_thread), n_workers=args.n_parallel, ) algo.option_policy.to(device) runner.train(n_epochs=args.n_epochs, batch_size=args.traj_batch_size) if __name__ == '__main__': mp.set_start_method(START_METHOD) main()
21,948
38.124777
167
py
CSD-locomotion
CSD-locomotion-master/tests/utils.py
import datetime import os import socket from garage.experiment.experiment import get_metadata import global_context def get_run_env_dict(): d = {} d['timestamp'] = datetime.datetime.now().timestamp() d['hostname'] = socket.gethostname() if 'SLURM_JOB_ID' in os.environ: d['slurm_job_id'] = int(os.environ['SLURM_JOB_ID']) if 'SLURM_PROCID' in os.environ: d['slurm_procid'] = int(os.environ['SLURM_PROCID']) if 'SLURM_RESTART_COUNT' in os.environ: d['slurm_restart_count'] = int(os.environ['SLURM_RESTART_COUNT']) git_root_path, metadata = get_metadata() # get_metadata() does not decode git_root_path. d['git_root_path'] = git_root_path.decode('utf-8') if git_root_path is not None else None d['git_commit'] = metadata.get('githash') d['launcher'] = metadata.get('launcher') return d
862
29.821429
93
py
null
IT-Defense-main/README.md
# IT-Defense Our code for paper '[The art of defense: letting networks fool the attacker](https://arxiv.org/abs/2104.02963)' ## Introduction Robust environment perception is critical for autonomous cars, and adversarial defenses are the most effective and widely studied ways to improve the robustness of environment perception. However, all of previous defense methods decrease the natural accuracy, and the nature of the DNNs itself has been overlooked. To this end, in this paper, we propose a novel adversarial defense for 3D point cloud classifier that makes full use of the nature of the DNNs. Due to the disorder of point cloud, all point cloud classifiers have the property of permutation invariant to the input point cloud. Based on this nature, we design invariant transformations defense (IT-Defense). We show that, even after accounting for obfuscated gradients, our IT-Defense is a resilient defense against state-of-the-art (SOTA) 3D attacks. Moreover, IT-Defense do not hurt clean accuracy compared to previous SOTA 3D defenses. ![invariant](figs/Permutation.png) ![it_defense](figs/It_defense.png) ### Citation if you find our work useful in your research, please consider citing: ``` @article{zhang2023art, title={The art of defense: letting networks fool the attacker}, author={Zhang, Jinlai and Dong, Yinpeng and Liu, Binbin and Ouyang, Bo and Zhu, Jihong and Kuang, Minchi and Wang, Houqing and Meng, Yanmei}, journal={IEEE Transactions on Information Forensics and Security}, year={2023}, publisher={IEEE} } ``` ## Usage For example, your can insert our code in [IF-Defense baseline](https://github.com/Wuziyi616/IF-Defense/tree/main/baselines) to implement our IT-Defense. ```python #attack_scripts/targeted_perturb_attack.py#L128 # for input x.size() = Bx3xN class Infer(nn.Module): def __init__(self, model): super(Infer, self).__init__() self.model = model for p in self.parameters(): p.requires_grad = False def forward(self, x): x.data = x[:, :, torch.randperm(x.size()[2])].data x = self.model(x) return x model = Infer(model) ``` Note that for BxNx3, our code should be `x.data = x[:, torch.randperm(x.size()[1]), :].data`
2,247
40.62963
483
md
null
IT-Defense-main/it_defense.py
import torch.nn as nn class Infer(nn.Module): def __init__(self, model): super(Infer, self).__init__() self.model = model for p in self.parameters(): p.requires_grad = False def forward(self, x): x.data = x[:, :, torch.randperm(x.size()[2])].data x = self.model(x) return x model = Infer(model)
365
23.4
58
py
smartbugs
smartbugs-master/README.md
# SmartBugs: A Framework for Analysing Ethereum Smart Contracts ![SmartBugs tests](https://github.com/smartbugs/smartbugs/actions/workflows/ubuntu.yml/badge.svg) <a href="https://github.com/smartbugs/smartbugs/releases"><img alt="Smartbugs release" src="https://img.shields.io/github/release/smartbugs/smartbugs.svg"></a> <a href="https://github.com/smartbugs/smartbugs/blob/master/LICENSE"><img alt="Smartbugs license" src="https://img.shields.io/github/license/smartbugs/smartbugs.svg?color=blue"></a> <span class="badge-crypto"><a href="#support-and-donate" title="Donate to this project using Cryptocurrency"><img src="https://img.shields.io/badge/crypto-donate-red.svg" alt="crypto donate button" /></a></span> <a href="#Supported-Tools"><img alt="analysis tools" src="https://img.shields.io/badge/analysis tools-19-blue"></a> SmartBugs is an extensible platform with a uniform interface to tools that analyse blockchain programs for weaknesses and other properties. ## Features - *19 supported tools, 3 modes* for analysing Solidity source code, deployment bytecode, and runtime code. - *A modular approach to integrating analysers.* All it takes to add a new tool is a Docker image encapsulating the tool and a few lines in a config file. To make the output accessible in a standardised format, add a small Python script. - *Parallel, randomised execution* of the tasks for the optimal use of resources when performing a bulk analysis. - *Standardised output format.* Scripts parse and normalise the output of the tools to allow for an automated analysis of the results across tools. - *Automatic download of an appropriate Solidity compiler* matching the contract under analysis, and injection into the Docker image. - *Output of results in SARIF format,* for integration into Github workflows. ## Supported Tools | | version | Solidity | bytecode | runtime code | | :--- | :--- | :---: | :---: | :--: | | [ConFuzzius](https://github.com/christoftorres/ConFuzzius) | #4315fb7 v0.0.1 | :heavy_check_mark: | | | | [Conkas](https://github.com/smartbugs/conkas) | #4e0f256 | :heavy_check_mark: | | :heavy_check_mark: | | [Ethainter](https://zenodo.org/record/3760403) | | | | :heavy_check_mark: | | [eThor](https://secpriv.wien/ethor) | 2021 (CCS 2020) | | | :heavy_check_mark: | | [HoneyBadger](https://github.com/christoftorres/HoneyBadger) | #ff30c9a | :heavy_check_mark: | | :heavy_check_mark: | | [MadMax](https://github.com/nevillegrech/MadMax) | #6e9a6e9 | | | :heavy_check_mark: | | [Maian](https://github.com/smartbugs/MAIAN) | #4bab09a | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | [Manticore](https://github.com/trailofbits/manticore) | 0.3.7 | :heavy_check_mark: | | | | [Mythril](https://github.com/ConsenSys/mythril) | 0.23.15 | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | [Osiris](https://github.com/christoftorres/Osiris) | #d1ecc37 | :heavy_check_mark: | | :heavy_check_mark: | | [Oyente](https://github.com/smartbugs/oyente) | #480e725 | :heavy_check_mark: | | :heavy_check_mark: | | [Pakala](https://github.com/palkeo/pakala) | #c84ef38 v1.1.10 | | | :heavy_check_mark: | | [Securify](https://github.com/eth-sri/securify) | | :heavy_check_mark: | | :heavy_check_mark: | | [sFuzz](https://github.com/duytai/sFuzz) | #48934c0 (2019-03-01) | :heavy_check_mark: | | | | [Slither](https://github.com/crytic/slither) | | :heavy_check_mark: | | | | [Smartcheck](https://github.com/smartdec/smartcheck) | | :heavy_check_mark: | | | | [Solhint](https://github.com/protofire/solhint) | 3.3.8 | :heavy_check_mark: | | | | [teEther](https://github.com/nescio007/teether) | #04adf56 | | | :heavy_check_mark: | | [Vandal](https://github.com/usyd-blockchain/vandal) | #d2b0043 | | | :heavy_check_mark: | ## Requirements - Unix-based system (Windows users might want to read [our wiki page on running SmartBugs in Windows](https://github.com/smartbugs/smartbugs/wiki/Running-SmartBugs-in-Windows)) - [Docker](https://docs.docker.com/install) - [Python3](https://www.python.org) (version 3.6 and above, 3.10+ recommended) ## Installation ### Unix/Linux 1. Install [Docker](https://docs.docker.com/install) and [Python3](https://www.python.org). Make sure that the user running SmartBugs is allowed to interact with the Docker daemon. Currently, this is achieved by adding the user to the `docker` group: ```bash sudo usermod -a -G docker $USER ``` For adding another user, replace `$USER` by the respective user-id. The group membership becomes active with the next log-in. 2. Clone [SmartBugs's repository](https://github.com/smartbugs/smartbugs): ```bash git clone https://github.com/smartbugs/smartbugs ``` 3. Install Python dependencies in a virtual environment: ```bash cd smartbugs install/setup-venv.sh ``` 4. Optionally, add the executables to the command search path, e.g. by adding links to `$HOME/bin`. ```bash ln -s "`pwd`/smartbugs" "$HOME/bin/smartbugs" ln -s "`pwd`/reparse" "$HOME/bin/reparse" ln -s "`pwd`/results2csv" "$HOME/bin/results2csv" ``` The command `which smartbugs` should now display the path to the command. ### Windows See [our wiki page on running SmartBugs in Windows](https://github.com/smartbugs/smartbugs/wiki/Running-SmartBugs-in-Windows). ## Usage SmartBugs provides a command-line interface. Run it without arguments for a short description. ```console ./smartbugs usage: smartbugs [-c FILE] [-t TOOL [TOOL ...]] [-f PATTERN [PATTERN ...]] [--main] [--runtime] [--processes N] [--timeout N] [--cpu-quota N] [--mem-limit MEM] [--runid ID] [--results DIR] [--log FILE] [--overwrite] [--json] [--sarif] [--quiet] [--version] [-h] ... ``` For details, see [SmartBugs' wiki](https://github.com/smartbugs/smartbugs/wiki/The-command-line-interface). **Example:** To analyse the Solidity files in the `samples` directory with Mythril, use the command ```console ./smartbugs -t mythril -f samples/*.sol --processes 2 --mem-limit 4g --timeout 600 ``` The options tell SmartBugs to run two processes in parallel, with a memory limit of 4GB and max. 10 minutes computation time per task. By default, the results are placed in the local directory `results`. ### Utility programs **`reparse`** can be used to parse analysis results and extract relevant information, without rerunning the analysis. This may be useful either when you forgot to specify the option `--json` or `--sarif` during analysis, or when you want to parse old analysis results with an updated parser. ```console ./reparse usage: reparse [-h] [--sarif] [--processes N] [-v] DIR [DIR ...] ... ``` **`results2csv`** generates a csv file from the results, suitable e.g. for a database. ```console ./results2csv usage: results2csv [-h] [-p] [-v] [-f FIELD [FIELD ...]] [-x FIELD [FIELD ...]] DIR [DIR ...] ... ``` The following commands analyse `SimpleDAO.sol` with all available tools and write the parsed output to `results.csv`. `reparse` is necessary in this example, since `smartbugs` is called without the options `--json` and `--sarif`, so SmartBugs doesn't parse during the analysis. `results2csv` collects the outputs in the folder `results` and writes for each analysed contract one line of comma-separated values to standard output (redirected to `results.csv`). The option `-p` tells `results2csv` to format the lists of findings, errors etc. as Postgres arrays; without the option, the csv file is suitable for spreadsheet programs. ```console ./smartbugs -t all -f samples/SimpleDAO.sol ./reparse results ./results2csv -p results > results.csv ``` ## Further Information - For documentation, see the [wiki](https://github.com/smartbugs/smartbugs/wiki). - Sample contracts: The folder [`samples`](samples) contains a few selected Solidity source files with the corresponding deployment and runtime bytecodes, for first experiments. - [SB Curated](https://github.com/smartbugs/smartbugs-curated) is a curated dataset of vulnerable Solidity smart contracts. - [SmartBugs Wild Dataset](https://github.com/smartbugs/smartbugs-wild) is a repository with 47,398 smart contracts extracted from the Ethereum network. ## Academic Usage If you use SmartBugs or any of its datasets, please cite: - Durieux, T., Ferreira, J.F., Abreu, R. and Cruz, P., 2020. Empirical review of automated analysis tools on 47,587 Ethereum smart contracts. In Proceedings of the ACM/IEEE 42nd International Conference on Software Engineering (pp. 530-541). ``` @inproceedings{durieux2020empirical, title={Empirical review of automated analysis tools on 47,587 Ethereum smart contracts}, author={Durieux, Thomas and Ferreira, Jo{\~a}o F. and Abreu, Rui and Cruz, Pedro}, booktitle={Proceedings of the ACM/IEEE 42nd International conference on software engineering}, pages={530--541}, year={2020} } ``` - Ferreira, J.F., Cruz, P., Durieux, T. and Abreu, R., 2020. SmartBugs: A framework to analyze solidity smart contracts. In Proceedings of the 35th IEEE/ACM International Conference on Automated Software Engineering (pp. 1349-1352). ``` @inproceedings{ferreira2020smartbugs, title={SmartBugs: A framework to analyze solidity smart contracts}, author={Ferreira, Jo{\~a}o F and Cruz, Pedro and Durieux, Thomas and Abreu, Rui}, booktitle={Proceedings of the 35th IEEE/ACM International Conference on Automated Software Engineering}, pages={1349--1352}, year={2020} } ``` ## Work that uses SmartBugs - [SmartBugs was used to analyze 47,587 smart contracts](https://joaoff.com/publication/2020/icse) (work published at ICSE 2020). These contracts are available in a [separate repository](https://github.com/smartbugs/smartbugs-wild). The results are also in [their own repository](https://github.com/smartbugs/smartbugs-results). The version of SmartBugs used in this study was [SmartBugs v1.0](https://github.com/smartbugs/smartbugs/releases/tag/v1.0.0). - [SmartBugs was used to evaluate a simple extension of Smartcheck](https://joaoff.com/publication/2020/ase) (work published at ASE 2020, _Tool Demo Track_) - **... you are more than welcome to add your own work here!** ## Support and Donate You can show your appreciation for the project and support future development by donating. **🙌 ETH Donations:** `0xA4FBA2908162646197aca90b84B095BE4D16Ae53` 🙌 ## License The [license](LICENSE) applies to all files in the repository, with the exception of the smart contracts in the `samples` folder. The files there were obtained from [Etherscan](http://etherscan.io) and retain their original licenses.
11,277
50.733945
454
md
smartbugs
smartbugs-master/site_cfg.yaml
#files: [] ## $HOME or ${HOME} is replaced by the home dir of the current user # #runtime: false # #main: false # #tools: [] # #runid: ${YEAR}${MONTH}${DAY}_${HOUR}${MIN} ## vars: YEAR, MONTH, DAY, HOUR, MIN, SEC, ZONE, ## HOME, PID, SBVERSION, SBHOME # #overwrite: false # #processes: 1 # #timeout: 0 # [s] 0/null = no timeout enforced, tool default applies # #cpu-quota: 0 # 0/null = no quota # #mem-limit: 0 # "512m" or "4g" 0/null = no quota # #results: results/${TOOL}/${RUNID}/${FILENAME} ## vars: all vars from "runid" above, as well as RUNID, ## TOOL, MODE (solidity, bytecode, runtime), ABSDIR, RELDIR, ## FILENAME, FILEBASE, FILEEXT (FILENAME = FILEBASE + "." + FILEEXT) # #log: results/logs/${RUNID}.log ## vars: all vars from "runid" above, as well as RUNID # #json: false # #sarif: false # #quiet: false
832
21.513514
70
yaml
smartbugs
smartbugs-master/.github/github_results.sh
#!/bin/bash # Run .github/github_results.sh from SmartBugs home directory # Generates .github/results-ubuntu.csv, needed for the workflow ubuntu.yml # as a reference for comparing the results of the workflow with #rm -rf results/*/github-sol ./smartbugs -t all -f 'samples/SimpleDAO.sol' --runid github-sol --sarif --main --timeout 180 ./results2csv -x start duration -- results/*/github-sol | sed '/confuzzius/s/".*"//' > .github/results-ubuntu-sol.csv #rm -rf results/*/github-rt ./smartbugs -t all -f 'samples/SimpleDAO.rt.hex' --runid github-rt --sarif --timeout 180 ./results2csv -x start duration -- results/*/github-rt > .github/results-ubuntu-rt.csv #rm -rf results/*/github-hx ./smartbugs -t all -f 'samples/SimpleDAO.hex' --runid github-hx --sarif --timeout 180 ./results2csv -x start duration -- results/*/github-hx > .github/results-ubuntu-hx.csv
863
47
117
sh
smartbugs
smartbugs-master/.github/ISSUE_TEMPLATE/bug_report.md
--- name: Bug report about: Template for bug reports title: '' labels: '' assignees: '' --- **Describe the bug** A description of what the bug is: what did you do, what did you expect, what did actually happen. If applicable, add screenshots to help explain your problem. **Platform** Please provide information on the version of SmartBugs and your platform. Run SmartBugs with the option `--version` to obtain this information. - SmartBugs version: [e.g. 2.0.2] - Python version: [e.g. 3.10.6] - OS : [e.g. Linux, Ubuntu, MacOS, Windows, ...] - CPU: [e.g. Intel, AMD, M1, ... ] **Additional context** Add any other context about the problem here.
655
26.333333
143
md
smartbugs
smartbugs-master/.github/ISSUE_TEMPLATE/feature_request.md
--- name: Feature request about: Suggest an idea for this project title: '' labels: '' assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here.
595
27.380952
92
md
smartbugs
smartbugs-master/.github/old/github_results.sh
#!/bin/bash # Run .github/github_results.sh from SmartBugs home directory # Generates .github/results-ubuntu.csv, needed for the workflow ubuntu.yml # as a reference for comparing the results of the workflow with rm -rf results/github ./smartbugs -t all -f 'samples/SimpleDAO.*' --runid github --json --main --timeout 360 ./results2csv -x start duration -- results/*/github | sed '/confuzzius/s/".*"//' > .github/results-ubuntu.csv
434
42.5
109
sh
smartbugs
smartbugs-master/.github/old/ubuntu.yml
name: SmartBugs tests # name displayed in badge on: push: branches: - master pull_request: branches: - master jobs: run-sb-ubuntu: runs-on: ubuntu-20.04 #runs-on: ubuntu-latest strategy: matrix: tool: ["all"] contract: ["'samples/SimpleDAO.*'"] python-version: ["3.6.9"] #python-version: ["3.10.8"] steps: - name: Clone SmartBugs uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install Smartbugs' dependencies run: install/setup-venv.sh - name: Execute ${{ matrix.tool }} on ${{ matrix.contract }} run: ./smartbugs -t ${{ matrix.tool }} -f ${{ matrix.contract }} --sarif --runid github --main --timeout 360 - name: Reparse results of ${{matrix.tool}} on ${{ matrix.contract }} run: ./reparse --sarif results - name: Verify results of ${{matrix.tool}} on ${{ matrix.contract }} run: ./results2csv -x start duration -- results | sed '/confuzzius/s/".*"//' | diff .github/results-ubuntu.csv - #- name: Upload SARIF file for slither, mode sol # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/slither/github/SimpleDAO.sol/result.sarif # category: slither/sol #- name: Upload SARIF file for smartcheck, mode sol # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/smartcheck/github/SimpleDAO.sol/result.sarif # category: smartcheck/sol #- name: Upload SARIF file for honeybadger, mode sol # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/honeybadger/github/SimpleDAO.sol/result.sarif # category: honeybadger/sol #- name: Upload SARIF file for honeybadger, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/honeybadger/github/SimpleDAO.rt.hex/result.sarif # category: honeybadger/rt.hex #- name: Upload SARIF file for conkas, mode sol # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/conkas/github/SimpleDAO.sol/result.sarif # category: conkas/sol #- name: Upload SARIF file for conkas, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/conkas/github/SimpleDAO.rt.hex/result.sarif # category: conkas/rt.hex # like osiris #- name: Upload SARIF file for oyente, mode sol # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/oyente/github/SimpleDAO.sol/result.sarif # category: oyente/sol #- name: Upload SARIF file for oyente, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/oyente/github/SimpleDAO.rt.hex/result.sarif # category: oyente/rt.hex #- name: Upload SARIF file for maian, mode sol # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/maian/github/SimpleDAO.sol/result.sarif # category: maian/sol #- name: Upload SARIF file for maian, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/maian/github/SimpleDAO.rt.hex/result.sarif # category: maian/rt.hex #- name: Upload SARIF file for maian, mode hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/maian/github/SimpleDAO.hex/result.sarif # category: maian/hex # like Ethainter #- name: Upload SARIF file for madmax, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/madmax/github/SimpleDAO.rt.hex/result.sarif # category: madmax/rt.hex #- name: Upload SARIF file for pakala, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/pakala/github/SimpleDAO.rt.hex/result.sarif # category: pakala/rt.hex #- name: Upload SARIF file for ethainter, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/ethainter/github/SimpleDAO.rt.hex/result.sarif # category: ethainter/rt.hex # like Maian #- name: Upload SARIF file for vandal, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/vandal/github/SimpleDAO.rt.hex/result.sarif # category: vandal/rt.hex # like Ethainter #- name: Upload SARIF file for teether, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/teether/github/SimpleDAO.rt.hex/result.sarif # category: teether/rt.hex #- name: Upload SARIF file for osiris, mode sol # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/osiris/github/SimpleDAO.sol/result.sarif # category: osiris/sol #- name: Upload SARIF file for osiris, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/osiris/github/SimpleDAO.rt.hex/result.sarif # category: osiris/rt.hex #- name: Upload SARIF file for solhint, mode sol # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/solhint/github/SimpleDAO.sol/result.sarif # category: solhint/sol # like Maian #- name: Upload SARIF file for securify, mode sol # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/securify/github/SimpleDAO.sol/result.sarif # category: securify/sol #- name: Upload SARIF file for securify, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/securify/github/SimpleDAO.rt.hex/result.sarif # category: securify/rt.hex #- name: Upload SARIF file for ethor, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/ethor/github/SimpleDAO.rt.hex/result.sarif # category: ethor/rt.hex #- name: Upload SARIF file for mythril-0.23.5, mode sol # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/mythril-0.23.5/github/SimpleDAO.sol/result.sarif # category: mythril-0.23.5/sol #- name: Upload SARIF file for mythril-0.23.5, mode rt.hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/mythril-0.23.5/github/SimpleDAO.rt.hex/result.sarif # category: mythril-0.23.5/rt.hex #- name: Upload SARIF file for mythril-0.23.5, mode hex # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/mythril-0.23.5/github/SimpleDAO.hex/result.sarif # category: mythril-0.23.5/hex # like Ethainter #- name: Upload SARIF file for manticore-0.3.7, mode sol # uses: github/codeql-action/upload-sarif@v2 # with: # sarif_file: results/manticore-0.3.7/github/SimpleDAO.sol/result.sarif # category: manticore-0.3.7/sol
7,440
42.770588
120
yml
smartbugs
smartbugs-master/.github/workflows/ubuntu.yml
name: SmartBugs tests # name displayed in badge on: push: branches: - master pull_request: branches: - master jobs: run-sb-ubuntu-sol: runs-on: ubuntu-20.04 strategy: matrix: tool: ["all"] contract: ["'samples/SimpleDAO.sol'"] python-version: ["3.6.9"] steps: - name: Clone SmartBugs uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install Smartbugs' dependencies run: install/setup-venv.sh - name: Execute ${{ matrix.tool }} on ${{ matrix.contract }} run: ./smartbugs -t ${{ matrix.tool }} -f ${{ matrix.contract }} --sarif --runid github-sol --main --timeout 180 - name: Reparse results of ${{matrix.tool}} on ${{ matrix.contract }} run: ./reparse --sarif results - name: Verify results of ${{matrix.tool}} on ${{ matrix.contract }} run: ./results2csv -x start duration -- results | sed '/confuzzius/s/".*"//' | diff .github/results-ubuntu-sol.csv - run-sb-ubuntu-rt: runs-on: ubuntu-20.04 strategy: matrix: tool: ["all"] contract: ["'samples/SimpleDAO.rt.hex'"] python-version: ["3.6.9"] steps: - name: Clone SmartBugs uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install Smartbugs' dependencies run: install/setup-venv.sh - name: Execute ${{ matrix.tool }} on ${{ matrix.contract }} run: ./smartbugs -t ${{ matrix.tool }} -f ${{ matrix.contract }} --sarif --runid github-rt --main --timeout 180 - name: Reparse results of ${{matrix.tool}} on ${{ matrix.contract }} run: ./reparse --sarif results - name: Verify results of ${{matrix.tool}} on ${{ matrix.contract }} run: ./results2csv -x start duration -- results | diff .github/results-ubuntu-rt.csv - run-sb-ubuntu-hx: runs-on: ubuntu-20.04 strategy: matrix: tool: ["all"] contract: ["'samples/SimpleDAO.hex'"] python-version: ["3.6.9"] steps: - name: Clone SmartBugs uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install Smartbugs' dependencies run: install/setup-venv.sh - name: Execute ${{ matrix.tool }} on ${{ matrix.contract }} run: ./smartbugs -t ${{ matrix.tool }} -f ${{ matrix.contract }} --sarif --runid github-hx --main --timeout 180 - name: Reparse results of ${{matrix.tool}} on ${{ matrix.contract }} run: ./reparse --sarif results - name: Verify results of ${{matrix.tool}} on ${{ matrix.contract }} run: ./results2csv -x start duration -- results | diff .github/results-ubuntu-hx.csv -
3,081
38.012658
124
yml
smartbugs
smartbugs-master/install/setup-venv.sh
#!/bin/bash # tested for python >= 3.6.9 # python < 3.10 will give an error when using the ':'-feature in input patterns python3 -m venv venv source venv/bin/activate # avoid spurious errors/warnings; the next two lines could be omitted pip install --upgrade pip pip install wheel # install the packages needed by smartbugs pip install pyyaml colorama requests semantic_version docker py-cpuinfo
399
27.571429
79
sh
smartbugs
smartbugs-master/sb/__init__.py
0
0
0
py
smartbugs
smartbugs-master/sb/__main__.py
#!/usr/bin/env python3 """SmartBugs: A framework to analyze smart contracts http://github.com/smartbugs/smartbugs """ import sb.cli if __name__ == "__main__": sb.cli.main()
183
15.727273
52
py
smartbugs
smartbugs-master/sb/analysis.py
import multiprocessing, random, time, datetime, os, random import sb.logging, sb.colors, sb.docker, sb.cfg, sb.io, sb.parsing, sb.sarif, sb.errors def task_log_dict(task, start_time, duration, exit_code, log, output, docker_args): return { "filename": task.relfn, "runid": task.settings.runid, "result": { "start": start_time, "duration": duration, "exit_code": exit_code, "logs": sb.cfg.TOOL_LOG if log else None, "output": sb.cfg.TOOL_OUTPUT if output else None}, "solc": str(task.solc_version) if task.solc_version else None, "tool": task.tool.dict(), "docker": docker_args, "platform": sb.cfg.PLATFORM, } def execute(task): # create result dir if it doesn't exist os.makedirs(task.rdir, exist_ok=True) if not os.path.isdir(task.rdir): raise sb.errors.SmartBugsError(f"Cannot create result directory {task.rdir}") # check whether result dir is empty, # and if not, whether we are going to overwrite it fn_task_log = os.path.join(task.rdir, sb.cfg.TASK_LOG) if os.path.exists(fn_task_log): old = sb.io.read_json(fn_task_log) old_fn = old["filename"] old_toolid = old["tool"]["id"] old_mode = old["tool"]["mode"] if task.relfn != old_fn or task.tool.id != old_toolid or task.tool.mode != old_mode: raise sb.errors.SmartBugsError( f"Result directory {task.rdir} occupied by another task" f" ({old_toolid}/{old_mode}, {old_fn})") if not task.settings.overwrite: return 0.0 # remove any leftovers from a previous analysis fn_tool_log = os.path.join(task.rdir, sb.cfg.TOOL_LOG) fn_tool_output = os.path.join(task.rdir, sb.cfg.TOOL_OUTPUT) fn_parser_output = os.path.join(task.rdir, sb.cfg.PARSER_OUTPUT) fn_sarif_output = os.path.join(task.rdir, sb.cfg.SARIF_OUTPUT) for fn in (fn_task_log, fn_tool_log, fn_tool_output, fn_parser_output, fn_sarif_output): try: os.remove(fn) except Exception: pass if os.path.exists(fn): raise sb.errors.SmartBugsError(f"Cannot clear old output {fn}") # perform analysis # Docker causes spurious connection errors # try three times before giving up for i in range(3): try: start_time = time.time() exit_code,tool_log,tool_output,docker_args = sb.docker.execute(task) duration = time.time() - start_time break except sb.errors.SmartBugsError as e: if i == 2: raise # wait 3 to 8 minutes time.sleep(random.randint(3,8)*60) # write result to files task_log = task_log_dict(task, start_time, duration, exit_code, tool_log, tool_output, docker_args) if tool_log: sb.io.write_txt(fn_tool_log, tool_log) if tool_output: sb.io.write_bin(fn_tool_output, tool_output) # Parse output of tool if task.settings.json or task.settings.sarif: parsed_result = sb.parsing.parse(task_log, tool_log, tool_output) sb.io.write_json(fn_parser_output,parsed_result) # Format parsed result as sarif if task.settings.sarif: sarif_result = sb.sarif.sarify(task_log["tool"], parsed_result["findings"]) sb.io.write_json(fn_sarif_output, sarif_result) # Write to fn_task_log last, to indicate that this task is done sb.io.write_json(fn_task_log, task_log) return duration def analyser(logqueue, taskqueue, tasks_total, tasks_started, tasks_completed, time_completed): def pre_analysis(): with tasks_started.get_lock(): tasks_started_value = tasks_started.value + 1 tasks_started.value = tasks_started_value sb.logging.message( f"Starting task {tasks_started_value}/{tasks_total}: {sb.colors.tool(task.tool.id)} and {sb.colors.file(task.relfn)}", "", logqueue) def post_analysis(duration, no_processes, timeout): with tasks_completed.get_lock(), time_completed.get_lock(): tasks_completed_value = tasks_completed.value + 1 tasks_completed.value = tasks_completed_value time_completed_value = time_completed.value + duration time_completed.value = time_completed_value # estimated time to completion = time_so_far / completed_tasks * remaining_tasks / no_processes completed_tasks = tasks_completed_value time_so_far = time_completed_value remaining_tasks = tasks_total - tasks_completed_value if timeout: # Assume that the first round of processes all ran into a timeout completed_tasks += no_processes time_so_far += timeout*no_processes etc = time_so_far / completed_tasks * remaining_tasks / no_processes etc_fmt = datetime.timedelta(seconds=round(etc)) duration_fmt = datetime.timedelta(seconds=round(duration)) sb.logging.message(f"{tasks_completed_value}/{tasks_total} completed, ETC {etc_fmt}") while True: task = taskqueue.get() if task is None: return sb.logging.quiet = task.settings.quiet pre_analysis() try: duration = execute(task) except sb.errors.SmartBugsError as e: duration = 0.0 sb.logging.message(sb.colors.error(f"Analysis of {task.absfn} with {task.tool.id} failed.\n{e}"), "", logqueue) post_analysis(duration, task.settings.processes, task.settings.timeout) def run(tasks, settings): # spawn processes (instead of forking), for identical behavior on Linux and MacOS mp = multiprocessing.get_context("spawn") # start shared logging logqueue = mp.Queue() sb.logging.start(settings.log, settings.overwrite, logqueue) try: start_time = time.time() # fill task queue taskqueue = mp.Queue() random.shuffle(tasks) for task in tasks: taskqueue.put(task) for _ in range(settings.processes): taskqueue.put(None) # accounting tasks_total = len(tasks) tasks_started = mp.Value('L', 0) tasks_completed = mp.Value('L', 0) time_completed = mp.Value('f', 0.0) # start analysers shared = (logqueue, taskqueue, tasks_total, tasks_started, tasks_completed, time_completed) analysers = [ mp.Process(target=analyser, args=shared) for _ in range(settings.processes) ] for a in analysers: a.start() # wait for analysers to finish for a in analysers: a.join() # good bye duration = datetime.timedelta(seconds=round(time.time()-start_time)) sb.logging.message(f"Analysis completed in {duration}.", "", logqueue) finally: sb.logging.stop(logqueue)
6,961
37.043716
130
py
smartbugs
smartbugs-master/sb/cfg.py
import os, time, cpuinfo, platform VERSION = "2.0.7" HOME = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) SITE_CFG = os.path.join(HOME,"site_cfg.yaml") TASK_LOG = "smartbugs.json" TOOLS_HOME = os.path.join(HOME,"tools") TOOL_CONFIG = "config.yaml" TOOL_FINDINGS = "findings.yaml" TOOL_PARSER = "parser.py" TOOL_LOG = "result.log" TOOL_OUTPUT = "result.tar" PARSER_OUTPUT = "result.json" SARIF_OUTPUT = "result.sarif" CPU = cpuinfo.get_cpu_info() UNAME = platform.uname() PLATFORM = { "smartbugs": VERSION, "python": CPU.get("python_version"), "system": UNAME.system, "release": UNAME.release, "version": UNAME.version, "cpu": CPU.get("brand_raw"), }
698
25.884615
74
py
smartbugs
smartbugs-master/sb/cli.py
import argparse, sys, os import sb.cfg, sb.colors, sb.smartbugs, sb.logging, sb.settings, sb.errors def cli_args(defaults): def fmt_default(defval): formatted = ( "yes" if isinstance(defval, bool) and defval else "no" if isinstance(defval, bool) and not defval else str(defval) if isinstance(defval, int) else "none" if not defval else " ".join([str(dv) for dv in defval]) if isinstance(defval, list) or isinstance(defval, tuple) or isinstance(defval, set) else str(defval)) return f" [default: {formatted}]" parser = argparse.ArgumentParser( description="Automated analysis of Ethereum smart contracts", add_help=False, prog="smartbugs") input = parser.add_argument_group("input options") input.add_argument("-c", "--configuration", metavar="FILE", type=str, help=f"settings to be processed before command line args{fmt_default(None)}") input.add_argument("-t", "--tools", metavar="TOOL", nargs="+", type=str, help=f"tools to run on the contracts{fmt_default(defaults.tools)}") input.add_argument("-f", "--files", metavar="PATTERN", nargs="+", type=str, help=f"glob pattern specifying the files to analyse{fmt_default(defaults.files)}" "; may be prefixed by 'DIR:' for search relative to DIR") input.add_argument("--main", action="store_true", default=None, help=f"if the Solidity file contains a contract named like the file, analyse this contract only{fmt_default('all contracts')}") input.add_argument("--runtime", action="store_true", default=None, help=f"analyse the deployed, not the deployment code{fmt_default(defaults.runtime)}") exec = parser.add_argument_group("execution options") exec.add_argument("--processes", type=int, metavar="N", help=f"number of parallel processes{fmt_default(defaults.processes)}") exec.add_argument("--timeout", type=int, metavar="N", help=f"timeout for each task in seconds{fmt_default(defaults.timeout)}") exec.add_argument("--cpu-quota", type=int, metavar="N", help=f"cpu quota for docker containers{fmt_default(defaults.cpu_quota)}") exec.add_argument("--mem-limit", type=str, metavar="MEM", help=f"memory quota for docker containers, like 512m or 1g{fmt_default(defaults.mem_limit)}") output = parser.add_argument_group("output options") output.add_argument("--runid", type=str, metavar="ID", help=f"string identifying the run{fmt_default(defaults.runid)}") output.add_argument("--results", type=str, metavar="DIR", help=f"folder for the results{fmt_default(defaults.results)}") output.add_argument("--log", type=str, metavar="FILE", help=f"file for log messages{fmt_default(defaults.log)}") output.add_argument("--overwrite", action="store_true", default=None, help=f"delete old result and rerun the analysis{fmt_default(defaults.overwrite)}") output.add_argument("--json", action="store_true", default=None, help=f"parse output and write it to {sb.cfg.PARSER_OUTPUT}{fmt_default(defaults.json)}") output.add_argument("--sarif", action="store_true", default=None, help=f"parse output and write it to {sb.cfg.PARSER_OUTPUT} as well as {sb.cfg.SARIF_OUTPUT}{fmt_default(defaults.sarif)}") output.add_argument("--quiet", action="store_true", default=None, help=f"suppress output to console (stdout){fmt_default(defaults.quiet)}") info = parser.add_argument_group("information options") info.add_argument("-v", "--version", action="store_true", help="show version and exit") info.add_argument("-h", "--help", action="help", default=argparse.SUPPRESS, help="show this help message and exit") if len(sys.argv)==1: parser.print_help(sys.stderr) sys.exit(1) args = vars(parser.parse_args()) if args["version"]: print(f"""\ SmartBugs {sb.cfg.VERSION} Python {sb.cfg.CPU.get('python_version')} {sb.cfg.UNAME.system} {sb.cfg.UNAME.release} {sb.cfg.UNAME.version} CPU {sb.cfg.CPU.get('brand_raw')}\ """) sys.exit(0) cfg_file = args["configuration"] del args["version"], args["configuration"] for k in [ k for k,v in args.items() if v is None ]: del args[k] return cfg_file, args def cli(site_cfg=sb.cfg.SITE_CFG): settings = sb.settings.Settings() if site_cfg and os.path.exists(site_cfg): settings.update(site_cfg) cfg_file, cli_settings = cli_args(settings) settings.update(cfg_file) settings.update(cli_settings) return settings def main(): try: settings = cli() sb.logging.message(None, f"Arguments passed: {sys.argv}") sb.smartbugs.main(settings) except sb.errors.SmartBugsError as e: sb.logging.message(sb.colors.error(e)) sys.exit(1)
5,222
33.589404
135
py
smartbugs
smartbugs-master/sb/colors.py
import colorama, re, sys from colorama import Fore, Style ANSIcolor = re.compile('\x1b\[[^m]*m') def strip(s): return ANSIcolor.sub('',str(s)) if sys.platform == "win32": def color(col, s): return s else: def color(col, s): return f"{col}{s}{Style.RESET_ALL}" def file(s): return color(Fore.BLUE, s) def tool(s): return color(Fore.CYAN, s) def error(s): return color(Fore.RED, s) def warning(s): return color(Fore.YELLOW, s) def success(s): return color(Fore.GREEN, s)
525
16.533333
43
py
smartbugs
smartbugs-master/sb/docker.py
import docker, os, shutil, tempfile, requests import sb.io, sb.errors _client = None def client(): global _client if not _client: try: _client = docker.from_env() _client.info() except Exception: raise sb.errors.SmartBugsError("Docker: Cannot connect to service. Is it installed and running?") return _client images_loaded = set() def is_loaded(image): if image in images_loaded: return True try: image_list = client().images.list(image) except Exception as e: raise sb.errors.SmartBugsError(f"Docker: checking for image {image} failed.\n{e}") if image_list: images_loaded.add(image) return True return False def load(image): try: client().images.pull(image) except Exception as e: raise sb.errors.SmartBugsError(f"Docker: Loading image {image} failed.\n{e}") images_loaded.add(image) def __docker_volume(task): sbdir = tempfile.mkdtemp() sbdir_bin = os.path.join(sbdir, "bin") if task.tool.mode in ("bytecode","runtime"): # sanitize hex code code = sb.io.read_lines(task.absfn) code = code[0].strip() if code else "" if code.startswith("0x"): code = code[2:] _,filename = os.path.split(task.absfn) sb.io.write_txt(os.path.join(sbdir,filename), code) else: shutil.copy(task.absfn, sbdir) if task.tool.bin: shutil.copytree(task.tool.absbin, sbdir_bin) else: os.mkdir(sbdir_bin) if task.solc_path: sbdir_bin_solc = os.path.join(sbdir_bin, "solc") shutil.copyfile(task.solc_path, sbdir_bin_solc) return sbdir def __docker_args(task, sbdir): args = { "volumes": {sbdir: {"bind": "/sb", "mode": "rw"}}, "detach": True, "user": 0 } for k in ("image","cpu_quota","mem_limit"): v = getattr(task.tool, k, None) if v is not None: args[k] = v for k in ("cpu_quota","mem_limit"): v = getattr(task.settings, k, None) if v is not None: args[k] = v filename = f"/sb/{os.path.split(task.absfn)[1]}" # path in Linux Docker image timeout = task.settings.timeout or "0" main = 1 if task.settings.main else 0 args['command'] = task.tool.command(filename, timeout, "/sb/bin", main) args['entrypoint'] = task.tool.entrypoint(filename, timeout, "/sb/bin", main) return args def execute(task): sbdir = __docker_volume(task) args = __docker_args(task, sbdir) exit_code,logs,output,container = None,[],None,None try: container = client().containers.run(**args) try: result = container.wait(timeout=task.settings.timeout) exit_code = result["StatusCode"] except (requests.exceptions.ReadTimeout,requests.exceptions.ConnectionError): try: container.stop(timeout=10) except docker.errors.APIError: pass logs = container.logs().decode("utf8").splitlines() if task.tool.output: try: output,_ = container.get_archive(task.tool.output) output = b''.join(output) except docker.errors.NotFound: pass except Exception as e: raise sb.errors.SmartBugsError(f"Problem running Docker container: {e})") finally: try: container.kill() except Exception: pass try: container.remove() except Exception: pass shutil.rmtree(sbdir) return exit_code, logs, output, args
3,668
27.44186
109
py
smartbugs
smartbugs-master/sb/errors.py
class InternalError(Exception): pass class SmartBugsError(Exception): pass
84
13.166667
32
py
smartbugs
smartbugs-master/sb/io.py
import yaml, json import sb.errors def read_yaml(fn): try: with open(fn, 'r', encoding='utf-8') as f: # for an empty file, return empty dict, not NoneType return yaml.safe_load(f) or {} except Exception as e: raise sb.errors.SmartBugsError(e) def read_json(fn): try: with open(fn, 'r', encoding='utf-8') as f: return json.load(f) except Exception as e: raise sb.errors.SmartBugsError(e) def write_json(fn, output): try: j = json.dumps(output, sort_keys=True, indent=4) with open(fn, 'w', encoding='utf-8') as f: print(j, file=f) except Exception as e: raise sb.errors.SmartBugsError(e) def read_lines(fn): try: with open(fn, 'r', encoding='utf-8') as f: return f.read().splitlines() except Exception as e: raise sb.errors.SmartBugsError(e) def write_txt(fn, output): try: with open(fn, 'w', encoding='utf-8') as f: if isinstance(output, str): f.write(output) else: for line in output: f.write(f"{line}\n") except Exception as e: raise sb.errors.SmartBugsError(e) def read_bin(fn): try: with open(fn, 'rb') as f: return f.read() except Exception as e: raise sb.errors.SmartBugsError(e) def write_bin(fn, output): try: with open(fn, 'wb') as f: f.write(output) except Exception as e: raise sb.errors.SmartBugsError(e)
1,560
25.457627
64
py
smartbugs
smartbugs-master/sb/logging.py
import multiprocessing, threading, os, sys, time, re import sb.colors def logger_process(logfn, overwrite, queue, prolog): log_parent_folder = os.path.dirname(logfn) if log_parent_folder: os.makedirs(log_parent_folder, exist_ok=True) mode = "w" if overwrite else "a" with open(logfn, mode) as logfile: for log in prolog: print(log, file=logfile) while True: log = queue.get() if log is None: break print(log, file=logfile) __prolog = [] def start(logfn, append, queue): global logger logger = threading.Thread(target=logger_process, args=(logfn,append,queue,__prolog)) logger.start() quiet = False def message(con=None, log=None, queue=None): if con and log=="": log = sb.colors.strip(con) if con and not quiet: print(con, flush=True) if log: if queue: queue.put(log) else: __prolog.append(log) def stop(queue): queue.put(None) logger.join()
1,041
23.809524
88
py
smartbugs
smartbugs-master/sb/parse_utils.py
'''Utilities for the output parsers''' import re DOCKER_CODES = { 125: "DOCKER_INVOCATION_PROBLEM", 126: "DOCKER_CMD_NOT_EXECUTABLE", 127: "DOCKER_CMD_NOT_FOUND", 137: "DOCKER_KILL_OOM", # container received KILL signal, manually or because out of memory 139: "DOCKER_SEGV", # segmentation violation 143: "DOCKER_TERM" # container was externally stopped } ANSI = re.compile('\x1b\[[^m]*m') def discard_ANSI(lines): return ( ANSI.sub('',line) for line in lines ) def truncate_message(m, length=205): half_length = (length-5)//2 return m if len(m) <= length else m[:half_length]+' ... '+m[-half_length:] TRACEBACK = "Traceback (most recent call last)" # Python EXCEPTIONS = ( re.compile(".*line [0-9: ]*(Segmentation fault|Killed)"), # Shell re.compile('Exception in thread "[^"]*" (.*)'), # Java re.compile("thread '[^']*' panicked at '([^']*)'"), # Rust ) def exceptions(lines): exceptions = set() traceback = False for line in lines: if traceback: if line and line[0] != " ": exceptions.add(f"exception ({line})") traceback = False elif line.startswith(TRACEBACK): traceback = True else: for re_exception in EXCEPTIONS: m = re_exception.match(line) if m: exceptions.add(f"exception ({m[1]})") return exceptions def add_match(matches, line, patterns): for pattern in patterns: m = pattern.match(line) if m: matches.add(m[1]) return True return False def errors_fails(exit_code, log, log_expected=True): errors = set() # errors detected and handled by the tool fails = set() # exceptions not caught by the tool, or outside events leading to abortion if exit_code is None: fails.add('DOCKER_TIMEOUT') elif exit_code == 0: pass elif exit_code == 127: fails.add("SmartBugs was invoked with option 'main', but the filename did not match any contract") elif exit_code in DOCKER_CODES: fails.add(DOCKER_CODES[exit_code]) elif 128 <= exit_code <= 128+64: fails.add(f"DOCKER_RECEIVED_SIGNAL_{exit_code-128}") else: # remove it for individual signals and tools, where it is not an error errors.add(f"EXIT_CODE_{exit_code}") if log: fails.update(exceptions(log)) elif log_expected and not fails: fails.add('execution failed') return errors, fails
2,528
29.841463
106
py
smartbugs
smartbugs-master/sb/parsing.py
import os, importlib.util import sb.cfg, sb.errors tool_parsers = {} def get_parser(tool): tid,tmode = tool["id"],tool["mode"] key = (tid,tmode) if key not in tool_parsers: try: modulename = f"tools.{tid}.{tmode}" fn = os.path.join(sb.cfg.TOOLS_HOME, tid, tool["parser"]) spec = importlib.util.spec_from_file_location(modulename, fn) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) tool_parsers[key] = module except Exception as e: raise sb.errors.SmartBugsError(f"Cannot load parser for {tid}/{tmode}\n{e}") return tool_parsers[key] def parse(task_log, tool_log, tool_output): tool = task_log["tool"] filename = task_log["filename"] exit_code = task_log["result"]["exit_code"] tool_parser = get_parser(tool) try: findings,infos,errors,fails = tool_parser.parse(exit_code, tool_log, tool_output) for finding in findings: # if FINDINGS is defined, ensure that the current finding is in FINDINGS # irrelevant for SmartBugs, but may be relevant for programs further down the line if tool_parser.FINDINGS and finding["name"] not in tool_parser.FINDINGS: raise sb.errors.SmartBugsError(f"'{finding['name']}' not among the findings of {tool['id']}") # check that filename within docker corresponds to filename outside, before replacing it # splitting at "/" is ok, since it is a Linux path from within the docker container assert not finding.get("filename") or filename.endswith(finding["filename"].split("/")[-1]) finding["filename"] = filename except Exception as e: raise sb.errors.SmartBugsError(f"Parsing of results failed\n{e}") return { "findings": findings, "infos": sorted(infos), "errors": sorted(errors), "fails": sorted(fails), "parser": { "id": tool["id"], "mode": tool["mode"], "version": tool_parser.VERSION } }
2,126
36.982143
109
py
smartbugs
smartbugs-master/sb/reparse.py
import os, argparse, multiprocessing, sys import sb.cfg, sb.io, sb.parsing, sb.sarif, sb.errors def reparser(taskqueue, sarif, verbose): while True: d = taskqueue.get() if d is None: break fn_sbj = os.path.join(d, sb.cfg.TASK_LOG) fn_log = os.path.join(d, sb.cfg.TOOL_LOG) fn_tar = os.path.join(d, sb.cfg.TOOL_OUTPUT) fn_json = os.path.join(d, sb.cfg.PARSER_OUTPUT) fn_sarif = os.path.join(d, sb.cfg.SARIF_OUTPUT) if not os.path.exists(fn_sbj): if verbose: print(f"{d}: {sb.cfg.TASK_LOG} not found, skipping") continue for fn in (fn_json, fn_sarif): try: os.remove(fn) except Exception: pass if os.path.exists(fn_json) or os.path.exists(fn_sarif): print(f"{d}: Cannot clear old parse output, skipping") continue if verbose: print(d) sbj = sb.io.read_json(fn_sbj) log = sb.io.read_lines(fn_log) if os.path.exists(fn_log) else [] tar = sb.io.read_bin(fn_tar) if os.path.exists(fn_tar) else None try: parsed_result = sb.parsing.parse(sbj, log, tar) except sb.errors.SmartBugsError as e: print(e) continue sb.io.write_json(fn_json, parsed_result) if sarif: sarif_result = sb.sarif.sarify(sbj["tool"], parsed_result["findings"]) sb.io.write_json(fn_sarif, sarif_result) def main(): argparser = argparse.ArgumentParser( prog="reparse", description=f"Parse the tool output ({sb.cfg.TOOL_LOG}, {sb.cfg.TOOL_OUTPUT}) into {sb.cfg.PARSER_OUTPUT}.") argparser.add_argument("--sarif", action="store_true", help=f"generate sarif output, {sb.cfg.SARIF_OUTPUT}, as well") argparser.add_argument("--processes", type=int, metavar="N", default=1, help="number of parallel processes (default 1)") argparser.add_argument("-v", action='store_true', help="show progress") argparser.add_argument("results", nargs="+", metavar="DIR", help="directories containing the run results") if len(sys.argv)==1: argparser.print_help(sys.stderr) sys.exit(1) args = argparser.parse_args() results = set() for r in args.results: for path,_,files in os.walk(r): if sb.cfg.TASK_LOG in files: results.add(path) # spawn processes, instead of forking, to have same behavior under Linux and MacOS mp = multiprocessing.get_context("spawn") taskqueue = mp.Queue() for r in sorted(results): taskqueue.put(r) for _ in range(args.processes): taskqueue.put(None) reparsers = [ mp.Process(target=reparser, args=(taskqueue,args.sarif,args.v)) for _ in range(args.processes) ] for r in reparsers: r.start() for r in reparsers: r.join() if __name__ == '__main__': main()
3,050
29.207921
116
py
smartbugs
smartbugs-master/sb/results2csv.py
import argparse, csv, os, sys import sb.cfg, sb.io, sb.utils FIELDS = ( "filename", "basename", "toolid", "toolmode", "parser_version", "runid", "start", "duration", "exit_code", "findings", "infos", "errors", "fails") def main(): argparser = argparse.ArgumentParser( prog="results2csv", description="Write key information from runs to stdout, in csv format.") argparser.add_argument("-p", action='store_true', help="encode lists (findings, infos, errors, fails) as Postgres arrays") argparser.add_argument("-v", action='store_true', help="verbose: show progress") argparser.add_argument("-f", nargs="+", metavar="FIELD", type=str, choices=FIELDS, default=FIELDS, help=f"fields to include in the csv output; one or more of {', '.join(FIELDS)} (default: all)") argparser.add_argument("-x", nargs="+", metavar="FIELD", type=str, choices=FIELDS, default=[], help=f"fields to exclude from csv output; one or more of {', '.join(FIELDS)} (default: none excluded)") argparser.add_argument("results", nargs="+", metavar="DIR", help="directories containing the run results") if len(sys.argv)==1: argparser.print_help(sys.stderr) sys.exit(1) args = argparser.parse_args() fields = [ f for f in args.f if f not in args.x ] results = set() for r in args.results: for path,_,files in os.walk(r): if sb.cfg.TASK_LOG in files: results.add(path) csv_out = csv.writer(sys.stdout) csv_out.writerow(fields) for r in sorted(results): if args.v: print(r, file=sys.stderr) try: task_log = sb.io.read_json(os.path.join(r,sb.cfg.TASK_LOG)) except Exception as e: print(f"Cannot read task log: {e}", file=sys.stderr) continue try: parser_output = sb.io.read_json(os.path.join(r,sb.cfg.PARSER_OUTPUT)) except Exception as e: print(f"Cannot read parsed output; use 'reparse' to generate it.\n{e}", file=sys.stderr) continue csv_out.writerow(data2csv(task_log, parser_output, args.p, fields)) def list2postgres(l): es = [] for e in l: if any (ch in e for ch in ('"', ",", "\n", "{", "}")): es.append('"'+e.replace('"','\\"')+'"') else: es.append(e) return "{" + ",".join(es) + "}" def list2excel(l): es = [] for e in l: if any (ch in e for ch in ('"', ",", "\n")): es.append('"'+e.replace('"','""')+'"') else: es.append(e) return ",".join(es) def data2csv(task_log, parser_output, postgres, fields): csv = { "filename": task_log["filename"], "basename": os.path.basename(task_log["filename"]), "toolid": task_log["tool"]["id"], "toolmode": task_log["tool"]["mode"], "parser_version": parser_output["parser"]["version"], "runid": task_log["runid"], "start": task_log["result"]["start"], "duration": task_log["result"]["duration"], "exit_code": task_log["result"]["exit_code"], "findings": sorted({ sb.utils.str2label(f["name"]) for f in parser_output["findings"]}), "infos": parser_output["infos"], "errors": parser_output["errors"], "fails": parser_output["fails"], } for f in ("findings", "infos", "errors", "fails"): if postgres: csv[f] = list2postgres(csv[f]) else: csv[f] = list2excel(csv[f]) return [ csv[f] for f in fields ] if __name__ == '__main__': sys.exit(main())
3,783
31.904348
111
py
smartbugs
smartbugs-master/sb/sarif.py
import sb.tools, sb.utils def sarify(tool, findings): return { "$schema": "https://json.schemastore.org/sarif-2.1.0.json", "version": "2.1.0", "runs": [ run_info(tool, findings) ] } def run_info(tool, findings): fnames = { finding["name"] for finding in findings } return { "tool": tool_info(tool, fnames), "results": [ result_info(tool["id"],finding) for finding in findings ] } def tool_info(tool, fnames): driver = { "name": tool.get("name", tool["id"]), # tool["id"] always exists "rules": [ rule_info(tool["id"], fname) for fname in fnames ] } v = tool.get("version") if v: driver["version"] = v v = tool.get("origin") if v: driver["informationUri"] = v return { "driver": driver } def rule_info(tool_id, fname): info_finding = sb.tools.info_finding(tool_id, fname) rule_dict = { "name": fname, "id": rule_id(tool_id, fname) } v = rule_shortDescription(info_finding) if v: rule_dict["shortDescription"] = { "text": v } v = rule_fullDescription(info_finding) if v: rule_dict["fullDescription"] = { "text": v } v = rule_help(info_finding) if v: rule_dict["help"] = { "text": v } v = rule_security_severity(info_finding) if v: rule_dict["properties"] = { "security-severity": v } v = rule_problem_severity(info_finding) if v: rule_dict["properties"] = { "problem": { "severity": v }} return rule_dict def result_info(tool_id, finding): fname = finding["name"] info_finding = sb.tools.info_finding(tool_id, fname) result_dict = { "ruleId": rule_id(tool_id, fname), "locations": [ { "physicalLocation": { "artifactLocation": { "uri": finding["filename"] } } } ] } v = result_message(finding, info_finding) if v: result_dict["message"] = { "text": v } v = result_level(finding) if v: result_dict["level"] = v v = result_region(finding) if v: result_dict["locations"][0]["physicalLocation"]["region"] = v v = result_location_message(finding) if v: result_dict["locations"][0]["message"] = { "text": v } return result_dict def rule_id(tool_id, fname): return f"{sb.utils.str2label(tool_id)}_{sb.utils.str2label(fname)}" def rule_shortDescription(info_finding): return info_finding.get("descr_short") def rule_fullDescription(info_finding): descr_short = info_finding.get("descr_short") descr_long = info_finding.get("descr_long") classification = info_finding.get("classification") method = info_finding.get("method") description = [] if descr_short: description.append(descr_short) if descr_long: description.append(descr_long) if classification: description.append(f"Classification: {classification}.") if method: description.append(f"Detection method: {method}") return " ".join(description) def rule_help(info_finding): descr_short = info_finding.get("descr_short") descr_long = info_finding.get("descr_long") return (descr_long if descr_long else descr_short if descr_short else "") def rule_problem_severity(info_finding): return info_finding.get("level", "").strip().lower() def rule_security_severity(info_finding): severity = info_finding.get("severity", "").strip().lower() try: return float(severity) except Exception: return ("2.0" if severity == "low" else "5.5" if severity == "medium" else "8.0" if severity == "high" else "") def result_message(finding, info_finding): message = ( finding.get("message") or info_finding.get("descr_short") or finding["name"]) severity = finding.get("severity") return (f"{message}\nSeverity: {severity}" if message and severity else message if message else f"Severity: {severity}" if severity else "") def result_level(finding): level = finding.get("level","").strip().lower() return level if level in ("none", "note", "warning", "error") else None def result_location_message(finding): contract = finding.get("contract") function = finding.get("function") return (f"contract {contract}, function {function}" if contract and function else f"contract {contract}" if contract else f"function {function}" if function else "") def result_region(finding): region_dict = {} # source code for f,r in (("line","startLine"), ("column","startColumn"), ("line_end","endLine"), ("column_end","endColumn")): if f in finding: region_dict[r] = int(finding[f]) if region_dict: return region_dict # hex code for a,l,c in (("address","startLine","startColumn"), ("address_end","endLine","endColumn")): if a in finding: region_dict[l] = 1 region_dict[c] = 1 + 2*int(finding[a]) return region_dict
5,054
27.885714
116
py
smartbugs
smartbugs-master/sb/settings.py
import os, string, time import sb.io, sb.logging, sb.cfg, sb.errors HOME = os.path.expanduser("~") # cross-plattform safe NOW = time.gmtime() # only use in main process, value may be different in sub-processes PID = os.getpid() # only use in main process, value may be different in sub-processes class Settings: def __init__(self): self.frozen = False self.files = [] self.main = False self.runtime = False self.tools = [] self.runid = "${YEAR}${MONTH}${DAY}_${HOUR}${MIN}" self.overwrite = False self.processes = 1 self.timeout = None self.cpu_quota = None self.mem_limit = None self.results = os.path.join("results","${TOOL}","${RUNID}","${FILENAME}") self.log = os.path.join("results","logs","${RUNID}.log") self.json = False self.sarif = False self.quiet = False def freeze(self): if self.frozen: return self.frozen = True env = { 'SBVERSION': sb.cfg.VERSION, 'SBHOME': sb.cfg.HOME, 'HOME': HOME, 'PID': PID, 'YEAR': str(NOW.tm_year).zfill(4), # year with century, four digits 'MONTH': str(NOW.tm_mon).zfill(2), # month 01..12 'DAY': str(NOW.tm_mday).zfill(2), # day of month 01..31 'HOUR': str(NOW.tm_hour).zfill(2), # hour 00..23 'MIN': str(NOW.tm_min).zfill(2), # minutes 00..59 'SEC': str(NOW.tm_sec).zfill(2), # seconds 00..61 'ZONE': NOW.tm_zone, # abbreviation of timezone name } try: self.runid = string.Template(self.runid).substitute(env) except KeyError as e: raise sb.errors.SmartBugsError(f"Unknown variable '{e}' in run id") try: self.log = string.Template(self.log).substitute(env, RUNID=self.runid) except KeyError as e: raise sb.errors.SmartBugsError(f"Unknown variable '{e}' in name of log file") self.results = string.Template(self.results).safe_substitute(env, RUNID=self.runid) self.results = string.Template(self.results) def resultdir(self, toolid, toolmode, absfn, relfn): if not self.frozen: raise sb.errors.InternalError("Template of result directory is accessed before settings have been frozen") absdir,filename = os.path.split(absfn) reldir = os.path.dirname(relfn) filebase,fileext = os.path.splitext(filename) fileext = fileext.replace('.','') try: return self.results.substitute( TOOL=toolid, MODE=toolmode, ABSDIR=absdir, RELDIR=reldir, FILENAME=filename, FILEBASE=filebase, FILEEXT=fileext) except KeyError as e: raise sb.errors.SmartBugsError(f"Unknown variable '{e}' in template of result dir") def update(self, settings): if self.frozen: raise sb.errors.InternalError("Frozen settings cannot be updated") if not settings: return if isinstance(settings, str): s = sb.io.read_yaml(settings) else: s = settings if not isinstance(s, dict): raise sb.errors.SmartBugsError(f"Settings cannot be updated by objects of type '{type(settings).__name__}'") for k,v in s.items(): k = k.replace("-", "_") # attributes accepting None as a value if k in ("timeout", "cpu_quota", "mem_limit") and v in (None, 0, "0"): setattr(self, k, None) elif k in ("timeout", "cpu_quota", "processes"): try: v = int(v) assert v > 0 setattr(self, k, v) except Exception: raise sb.errors.SmartBugsError(f"'{k}' needs to be a positive integer (in {settings}).") elif k in ("tools"): if not isinstance(v,list): v = [v] try: setattr(self, k, [str(vi) for vi in v]) except Exception: raise sb.errors.SmartBugsError(f"'{k}' needs to be a string or a list of strings (in {settings}).") elif k in ("files"): if not isinstance(v,list): v = [v] try: patterns = [str(vi) for vi in v] except Exception: raise sb.errors.SmartBugsError(f"'{k}' needs to be a string or a list of strings (in {settings}).") root_specs = [] for pattern in patterns: try: pattern = string.Template(pattern).substitute(HOME=HOME) except KeyError as e: raise sb.errors.SmartBugsError(f"Unknown variable '{e}' in file specification") root_spec = pattern.split(":") if len(root_spec) == 1: root,spec = None,root_spec[0] elif len(root_spec) == 2: root,spec = root_spec[0],root_spec[1] else: raise sb.errors.SmartBugsError(f"File pattern {pattern} contains more than one colon (in {settings}).") root_specs.append((root,spec)) setattr(self, k, root_specs) elif k in ("main", "runtime", "overwrite", "quiet", "json", "sarif"): try: assert isinstance(v, bool) setattr(self, k, v) except Exception: raise sb.errors.SmartBugsError(f"'{k}' needs to be a Boolean (in {settings}).") elif k in ("results", "log"): try: setattr(self, k, str(v).replace("/",os.path.sep)) except Exception: raise sb.errors.SmartBugsError(f"'{k}' needs to be a path (in {settings}).") elif k in ("runid"): try: setattr(self, k, str(v)) except Exception: raise sb.errors.SmartBugsError(f"'{k}' needs to be a string (in {settings}).") elif k == "mem_limit": try: v = str(v).replace(" ","") if v[-1] in "kKmMgG": assert int(v[:-1]) > 0 else: assert int(v) > 0 setattr(self, k, v) except Exception: raise sb.errors.SmartBugsError(f"'{k}' needs to be a memory specifcation (in {settings}).") else: raise sb.errors.SmartBugsError(f"Invalid key '{k}' (in {settings}).") def dict(self): d = {} for k,v in self.__dict__.items(): if k == "frozen": continue elif k == "results" and v and not isinstance(v,str): d[k] = self.results.template else: d[k] = v return d def __str__(self): l = [ f"{k}: {str(v)}" for k,v in self.dict().items() ] return f"{{{', '.join(l)}}}"
7,323
39.021858
127
py
smartbugs
smartbugs-master/sb/smartbugs.py
import glob, os, operator import sb.tools, sb.solidity, sb.tasks, sb.docker, sb.analysis, sb.colors, sb.logging, sb.cfg, sb.io, sb.settings, sb.errors def collect_files(patterns): files = [] for root,spec in patterns: if spec.endswith(".sbd"): contracts = [] for sbdfile in glob.glob(spec, recursive=True): contracts.extend(sb.io.read_lines(sbdfile)) elif root: try: contracts = glob.glob(spec, root_dir=root, recursive=True) except TypeError: raise sb.errors.SmartBugsError(f"{root}:{spec}: colons in file patterns only supported for Python>=3.10") else: # avoid root_dir, compatibility with python<3.10 contracts = glob.glob(spec, recursive=True) for relfn in contracts: root_relfn = os.path.join(root,relfn) if root else relfn absfn = os.path.normpath(os.path.abspath(root_relfn)) if os.path.isfile(absfn) and absfn[-4:] in (".hex", ".sol"): files.append( (absfn,relfn) ) return files def collect_tasks(files, tools, settings): used_rdirs = set() rdir_collisions = 0 def disambiguate(base): nonlocal rdir_collisions cnt = 1 rdir = base collision = 0 while rdir in used_rdirs: collision = 1 cnt += 1 rdir = f"{base}_{cnt}" used_rdirs.add(rdir) rdir_collisions += collision return rdir def report_collisions(): if rdir_collisions > 0: sb.logging.message( sb.colors.warning(f"{rdir_collisions} collision(s) of result directories resolved."), "") if rdir_collisions > len(files)*0.1: sb.logging.message(sb.colors.warning( " Consider using more of $TOOL, $MODE, $ABSDIR, $RELDIR, $FILENAME,\n" " $FILEBASE, $FILEEXT when specifying the 'results' directory.")) def get_solc(pragma, fn, toolid): if not pragma: raise sb.errors.SmartBugsError(f"{fn}: no pragma, cannot determine solc version") if not sb.solidity.ensure_solc_versions_loaded(): sb.logging.message(sb.colors.warning( "Failed to load list of solc versions; are we connected to the internet? Proceeding with local compilers"), "") solc_version = sb.solidity.get_solc_version(pragma) if not solc_version: raise sb.errors.SmartBugsError(f"{fn}: no compiler found that matches {pragma}") solc_path = sb.solidity.get_solc_path(solc_version) if not solc_path: raise sb.errors.SmartBugsError(f"{fn}: cannot load solc {solc_version} needed by {toolid}") return solc_version,solc_path def ensure_loaded(image): if not sb.docker.is_loaded(image): sb.logging.message(f"Loading docker image {image}, may take a while ...") sb.docker.load(image) tasks = [] exceptions = [] last_absfn = None for absfn,relfn in sorted(files): if absfn == last_absfn: # ignore duplicate contracts continue last_absfn = absfn is_sol = absfn[-4:]==".sol" is_byc = absfn[-4:]==".hex" and not (absfn[-7:-4]==".rt" or settings.runtime) is_rtc = absfn[-4:]==".hex" and (absfn[-7:-4]==".rt" or settings.runtime) contract = os.path.basename(absfn)[:-4] pragma,contractnames = None,[] if is_sol: prg = sb.io.read_lines(absfn) pragma,contractnames = sb.solidity.get_pragma_contractnames(prg) if settings.main and contract not in contractnames: exceptions.append(f"Contract '{contract}' not found in {absfn}") for tool in sorted(tools, key=operator.attrgetter("id", "mode")): if ((is_sol and tool.mode=="solidity") or (is_byc and tool.mode=="bytecode") or (is_rtc and tool.mode=="runtime")): # find unique name for result dir # ought to be the same when rerunning SB with the same args, # due to sorting files and tools base = settings.resultdir(tool.id,tool.mode,absfn,relfn) rdir = disambiguate(base) # load resources solc_version, solc_path = None,None if tool.solc: try: solc_version, solc_path = get_solc(pragma, relfn, tool.id) except Exception as e: exceptions.append(e) ensure_loaded(tool.image) task = sb.tasks.Task(absfn,relfn,rdir,solc_version,solc_path,tool,settings) tasks.append(task) report_collisions() if exceptions: errors = "\n".join(sorted({str(e) for e in exceptions})) raise sb.errors.SmartBugsError(f"Error(s) while collecting tasks:\n{errors}") return tasks def main(settings: sb.settings.Settings): settings.freeze() sb.logging.quiet = settings.quiet sb.logging.message( sb.colors.success(f"Welcome to SmartBugs {sb.cfg.VERSION}!"), f"Settings: {settings}") tools = sb.tools.load(settings.tools) if not tools: sb.logging.message(sb.colors.warning("Warning: no tools selected!")) sb.logging.message("Collecting files ...") files = collect_files(settings.files) sb.logging.message(f"{len(files)} files to analyse") sb.logging.message("Assembling tasks ...") tasks = collect_tasks(files, tools, settings) sb.logging.message(f"{len(tasks)} tasks to execute") sb.analysis.run(tasks, settings)
5,736
37.246667
124
py
smartbugs
smartbugs-master/sb/solidity.py
import os,re from pathlib import Path import solcx # load binaries for Linux in Docker images, not for host platform solcx.set_target_os("linux") VOID_START = re.compile("//|/\*|\"|'") QUOTE_END = re.compile("(?<!\\\\)'") DQUOTE_END = re.compile('(?<!\\\\)"') def remove_comments_strings(prg): todo = "\n".join(prg) # normalize line ends done = "" while True: m = VOID_START.search(todo) if not m: done += todo break else: done += todo[:m.start()] if m[0] == "//": end = todo.find('\n', m.end()) todo = "" if end == -1 else todo[end:] elif m[0] == "/*": end = todo.find("*/", m.end()) done += " " todo = "" if end == -1 else todo[end+2:] else: if m[0] == "'": m2 = QUOTE_END.search(todo[m.end():]) else: m2 = DQUOTE_END.search(todo[m.end():]) if not m2: # unclosed string break todo = todo[m.end()+m2.end():] return done PRAGMA = re.compile("pragma solidity.*?;") RE_CONTRACT_NAMES = re.compile(r'(?:contract|library)\s+([A-Za-z0-9_]*)(?:\s*{|\s+is\s)') def get_pragma_contractnames(prg): prg_wo_comments_strings = remove_comments_strings(prg) m = PRAGMA.search(prg_wo_comments_strings) pragma = m[0] if m else None contractnames = RE_CONTRACT_NAMES.findall(prg_wo_comments_strings) return pragma,contractnames cached_solc_versions = None def ensure_solc_versions_loaded(): global cached_solc_versions if cached_solc_versions: return True try: cached_solc_versions = solcx.get_installable_solc_versions() return True except Exception: cached_solc_versions = solcx.get_installed_solc_versions() return False def get_solc_version(pragma): if not pragma: return None # correct >=0.y.z to ^0.y.z pragma = re.sub(r">=0\.", r"^0.", pragma) # replace x.y by x.y.0 pragma = re.sub(r"([^0-9])([0-9]+\.[0-9]+)([^0-9.]|$)", r"\1\2.0\3", pragma) try: version = solcx.install._select_pragma_version(pragma, cached_solc_versions) except Exception: version = None return version cached_solc_paths = {} def get_solc_path(version): if not version: return None if version in cached_solc_paths: return cached_solc_paths[version] try: solcx.install_solc(version) solc_path = solcx.get_executable(version) except Exception: solc_path = None cached_solc_paths[version] = solc_path return solc_path
2,728
26.29
89
py
smartbugs
smartbugs-master/sb/tasks.py
class Task: def __init__(self, absfn, relfn, rdir, solc_version, solc_path, tool, settings): self.absfn = absfn # absolute normalized path self.relfn = relfn # path within project self.rdir = rdir # directory for results self.solc_version = solc_version self.solc_path = solc_path self.tool = tool self.settings = settings def __str__(self): s = [ f"{k}: {str(v)}" for k,v in self.__dict__.items() ] return f"{{{', '.join(s)}}}"
512
35.642857
84
py
smartbugs
smartbugs-master/sb/tools.py
import os, string import sb.io, sb.cfg, sb.errors FIELDS = ("id","mode","image","name","origin","version","info","parser", "output","bin","solc","cpu_quota","mem_limit","command","entrypoint") class Tool(): def __init__(self, cfg): for k in FIELDS: v = cfg.get(k) if v is not None: if k in ("solc"): try: v = bool(v) except Exception: raise sb.errors.SmartBugsError(f"Tool: value of attribute '{k}' is not a Boolean.\n{cfg}") elif k in ("cpu_quota"): try: v = int(v) assert v >= 0 except Exception: raise sb.errors.SmartBugsError(f"Tool: value of attribute '{k}' is not an integer>=0.\n{cfg}") elif k in ("mem_limit"): try: v = str(v).replace(" ","") if v[-1] in "kKmMgG": assert int(v[:-1]) > 0 else: assert int(v) > 0 except Exception: raise sb.errors.SmartBugsError(f"Tool: value of attribute '{k}' is not a valid memory specifcation.\n{cfg}") else: try: v = str(v) except Exception: raise sb.errors.SmartBugsError(f"Tool: value of attribute '{k}' is not a string.\n{cfg}") if k in ("command","entrypoint"): k = f"_{k}" v = string.Template(v) if v else None setattr(self, k, v) for k in ("id", "mode"): if not getattr(self, k): raise sb.errors.InternalError(f"Tool: Field '{k}' missing.\n{cfg}") if not self.image: raise sb.errors.SmartBugsError(f"Tool {self.id}/{self.mode}: no image specified") extras = set(cfg.keys()).difference(FIELDS) if extras: raise sb.errors.SmartBugsError(f"Tool {self.id}/{self.mode}: extra field(s) {', '.join(extras)}") if not self._command and not self._entrypoint: raise sb.errors.SmartBugsError(f"Tool {self.id}/{self.mode}: neither command nor entrypoint specified.") if not self.parser: self.parser = sb.cfg.TOOL_PARSER if self.bin: self.absbin = os.path.join(sb.cfg.TOOLS_HOME,self.id,self.bin) def command(self, filename, timeout, bin, main): try: return self._command.substitute(FILENAME=filename, TIMEOUT=timeout, BIN=bin, MAIN=main) if self._command else None except KeyError as e: raise sb.errors.SmartBugsError(f"Unknown variable '{e}' in command of tool {self.id}/{self.mode}") def entrypoint(self, filename, timeout, bin, main): try: return self._entrypoint.substitute(FILENAME=filename, TIMEOUT=timeout, BIN=bin, MAIN=main) if self._entrypoint else None except KeyError as e: raise sb.errors.SmartBugsError(f"Unknown variable '{e}' in entrypoint of tool {self.id}/{self.mode}") def dict(self): d = {} for k,v in self.__dict__.items(): if k == "_command": d["command"] = self._command.template if self._command else None elif k == "_entrypoint": d["entrypoint"] = self._entrypoint.template if self._entrypoint else None elif k == "absbin": # We do not want to leak private information, like the full path, into log files pass else: d[k] = v return d def __str__(self): l = [ f"{k}: {str(v)}" for k,v in self.dict().items() ] return f"{{{', '.join(l)}}}" def load(ids, tools = [], seen = set()): """Load tool specifications Parameters ---------- ids: list[str] list of strings identifying the tools tools: list[Tool] list of tool specifications already loaded seen: set[str] list of tool ids and tool aliases already processed Returns ------- list[Tool] list of tool specifications corresponding to parameter ids """ for id in ids: if id in seen: continue seen.add(id) toolpath = os.path.join(sb.cfg.TOOLS_HOME, id) fn = os.path.join(toolpath, sb.cfg.TOOL_CONFIG) cfg = sb.io.read_yaml(fn) alias = cfg.get("alias") if alias: load(alias, tools, seen) continue cfg["id"] = id found = False for mode in ("solidity", "bytecode", "runtime"): if mode not in cfg: continue found = True cfg_copy = cfg.copy() for m in ("solidity", "bytecode", "runtime"): cfg_copy.pop(m,None) cfg_copy["mode"] = mode if not isinstance(cfg[mode], dict): raise sb.errors.SmartBugsError(f"Tool {id}/{mode}: key/value mapping expected.") cfg_copy.update(cfg[mode]) tools.append(Tool(cfg_copy)) if not found: raise sb.errors.SmartBugsError(f"{fn}: needs one of the attributes 'alias', 'solidity', 'bytecode', 'runtime'") return tools # the contents of tools/.../findings.yaml is cached, once per process info_findings = {} def info_finding(tool_id, fname): if tool_id not in info_findings: try: fn = os.path.join(sb.cfg.TOOLS_HOME, tool_id, sb.cfg.TOOL_FINDINGS) info_findings[tool_id] = sb.io.read_yaml(fn) except Exception: info_findings[tool_id] = {} info = info_findings[tool_id].get(fname) return {} if info is None else info
5,861
35.867925
132
py
smartbugs
smartbugs-master/sb/utils.py
def str2label(s): """Convert string to label. - leading non-letters are removed - trailing characters that are neither letters nor digits ("other chars") are removed - sequences of other chars within the string are replaced by a single underscore """ l = "" separator = False has_started = False for c in s: if c.isalpha() or (has_started and c.isdigit()): has_started = True if separator: separator = False l += "_" l += c else: separator = has_started return l
600
27.619048
89
py
smartbugs
smartbugs-master/solcx/__init__.py
from solcx.install import ( compile_solc, get_compilable_solc_versions, get_executable, get_installable_solc_versions, get_installed_solc_versions, get_solcx_install_folder, import_installed_solc, install_solc, install_solc_pragma, set_target_os, set_solc_version, set_solc_version_pragma, ) from solcx.main import ( compile_files, compile_source, compile_standard, get_solc_version, link_code )
464
20.136364
34
py
smartbugs
smartbugs-master/solcx/exceptions.py
from typing import Dict, List class SolcError(Exception): message = "An error occurred during execution" def __init__( self, message: str = None, command: List = None, return_code: int = None, stdin_data: str = None, stdout_data: str = None, stderr_data: str = None, error_dict: Dict = None, ) -> None: if message is not None: self.message = message self.command = command or [] self.return_code = return_code self.stdin_data = stdin_data self.stderr_data = stderr_data self.stdout_data = stdout_data self.error_dict = error_dict def __str__(self) -> str: return ( f"{self.message}" f"\n> command: `{' '.join(str(i) for i in self.command)}`" f"\n> return code: `{self.return_code}`" "\n> stdout:" f"\n{self.stdout_data}" "\n> stderr:" f"\n{self.stderr_data}" ).strip() class ContractsNotFound(SolcError): message = "No contracts found during compilation" class SolcInstallationError(Exception): pass class UnknownOption(AttributeError): pass class UnknownValue(ValueError): pass class UnexpectedVersionError(Exception): pass class UnsupportedVersionError(ValueError): pass class SolcNotInstalled(Exception): pass class DownloadError(Exception): pass class UnexpectedVersionWarning(Warning): pass
1,502
19.875
70
py
smartbugs
smartbugs-master/solcx/install.py
""" Install solc """ import argparse import logging import os import re import shutil import stat import subprocess import sys import tarfile import tempfile import warnings import zipfile from base64 import b64encode from io import BytesIO from pathlib import Path from typing import Dict, List, Optional, Union import requests from semantic_version import SimpleSpec, Version from solcx import wrapper from solcx.exceptions import ( DownloadError, SolcInstallationError, SolcNotInstalled, UnexpectedVersionError, UnexpectedVersionWarning, UnsupportedVersionError, ) from solcx.utils.lock import get_process_lock try: from tqdm import tqdm except ImportError: tqdm = None BINARY_DOWNLOAD_BASE = "https://solc-bin.ethereum.org/{}-amd64/{}" SOURCE_DOWNLOAD_BASE = "https://github.com/ethereum/solidity/releases/download/v{}/{}" GITHUB_RELEASES = "https://api.github.com/repos/ethereum/solidity/releases?per_page=100" MINIMAL_SOLC_VERSION = Version("0.4.11") LOGGER = logging.getLogger("solcx") SOLCX_BINARY_PATH_VARIABLE = "SOLCX_BINARY_PATH" _default_solc_binary = None _target_os = None def set_target_os(platform: Optional[str] = None): """ Set the target platform for the solc binaries. If unset, it defaults to the current os. """ global _target_os if platform is None or platform in ("linux", "macosx", "windows"): _target_os = platform else: raise OSError(f"Unsupported target OS: '{platform}' - py-solc-x supports 'linux', 'macosx', or 'windows'.") def _get_target_os() -> str: return _target_os if _target_os else _get_os_name() def _get_os_name() -> str: if sys.platform.startswith("linux"): return "linux" if sys.platform == "darwin": return "macosx" if sys.platform == "win32": return "windows" raise OSError(f"Unsupported OS: '{sys.platform}' - py-solc-x supports Linux, OSX and Windows") def _convert_and_validate_version(version: Union[str, Version]) -> Version: # take a user-supplied version as a string or Version # validate the value, and return a Version object if not isinstance(version, Version): version = Version(version.lstrip("v")) if version not in SimpleSpec(">=0.4.11"): raise UnsupportedVersionError("py-solc-x does not support solc versions <0.4.11") return version def _unlink_solc(solc_path: Path) -> None: solc_path.unlink() if _get_target_os() == "windows": shutil.rmtree(solc_path.parent) def get_solcx_install_folder(solcx_binary_path: Union[Path, str] = None) -> Path: """ Return the directory where `py-solc-x` stores installed `solc` binaries. By default, this is `~/.solcx` Arguments --------- solcx_binary_path : Path | str, optional User-defined path, used to override the default installation directory. Returns ------- Path Subdirectory where `solc` binaries are are saved. """ if _get_target_os() == _get_os_name(): if os.getenv(SOLCX_BINARY_PATH_VARIABLE): return Path(os.environ[SOLCX_BINARY_PATH_VARIABLE]) elif solcx_binary_path is not None: return Path(solcx_binary_path) else: path = Path.home().joinpath(".solcx") path.mkdir(exist_ok=True) return path else: path = Path.home().joinpath(f".solcx-{_get_target_os()}") path.mkdir(exist_ok=True) return path def _get_which_solc() -> Path: # get the path for the currently installed `solc` version, if any if _get_os_name() == "windows": response = subprocess.check_output(["where.exe", "solc", "/Q"], encoding="utf8").strip() else: response = subprocess.check_output(["which", "solc"], encoding="utf8").strip() return Path(response) def import_installed_solc(solcx_binary_path: Union[Path, str] = None) -> List[Version]: """ Search for and copy installed `solc` versions into the local installation folder. Arguments --------- solcx_binary_path : Path | str, optional User-defined path, used to override the default installation directory. Returns ------- List Imported solc versions """ try: path_list = [_get_which_solc()] except (FileNotFoundError, subprocess.CalledProcessError): path_list = [] # on OSX, also copy all versions of solc from cellar if _get_os_name() == "macosx": path_list.extend(Path("/usr/local/Cellar").glob("solidity*/**/solc")) imported_versions = [] for path in path_list: try: version = wrapper._get_solc_version(path) assert version not in get_installed_solc_versions() except Exception: continue copy_path = get_solcx_install_folder(solcx_binary_path).joinpath(f"solc-v{version}") if _get_os_name() == "windows": copy_path.mkdir() copy_path = copy_path.joinpath("solc.exe") shutil.copy(path, copy_path) try: # confirm that solc still works after being copied assert version == wrapper._get_solc_version(copy_path) imported_versions.append(version) except Exception: _unlink_solc(copy_path) return imported_versions def get_executable( version: Union[str, Version] = None, solcx_binary_path: Union[Path, str] = None ) -> Path: """ Return the Path to an installed `solc` binary. Arguments --------- version : str | Version, optional Installed `solc` version to get the path of. If not given, returns the path of the active version. solcx_binary_path : Path | str, optional User-defined path, used to override the default installation directory. Returns ------- Path `solc` executable. """ if not version: if not _default_solc_binary: raise SolcNotInstalled( "Solc is not installed. Call solcx.get_installable_solc_versions()" " to view for available versions and solcx.install_solc() to install." ) return _default_solc_binary version = _convert_and_validate_version(version) solc_bin = get_solcx_install_folder(solcx_binary_path).joinpath(f"solc-v{version}") if _get_target_os() == "windows": solc_bin = solc_bin.joinpath("solc.exe") if not solc_bin.exists(): raise SolcNotInstalled( f"solc {version} has not been installed." f" Use solcx.install_solc('{version}') to install." ) return solc_bin def set_solc_version( version: Union[str, Version], silent: bool = False, solcx_binary_path: Union[Path, str] = None ) -> None: """ Set the currently active `solc` binary. Arguments --------- version : str | Version, optional Installed `solc` version to get the path of. If not given, returns the path of the active version. silent : bool, optional If True, do not generate any logger output. solcx_binary_path : Path | str, optional User-defined path, used to override the default installation directory. """ version = _convert_and_validate_version(version) global _default_solc_binary _default_solc_binary = get_executable(version, solcx_binary_path) if not silent: LOGGER.info(f"Using solc version {version}") def _select_pragma_version(pragma_string: str, version_list: List[Version]) -> Optional[Version]: pragma_string = re.sub(r"(\D)0+(\d)", r"\1\2", pragma_string) comparator_set_range = pragma_string.replace(" ", "").split("||") comparator_regex = re.compile(r"(([<>]?=?|\^)\d+\.\d+\.\d+)") version = None for comparator_set in comparator_set_range: spec = SimpleSpec(",".join((i[0] for i in comparator_regex.findall(comparator_set)))) selected = spec.select(version_list) if selected and (not version or version < selected): version = selected return version def set_solc_version_pragma( pragma_string: str, silent: bool = False, check_new: bool = False ) -> Version: """ Set the currently active `solc` binary based on a pragma statement. The newest installed version that matches the pragma is chosen. Raises `SolcNotInstalled` if no installed versions match. Arguments --------- pragma_string : str Pragma statement, e.g. "pragma solidity ^0.4.22;" silent : bool, optional If True, do not generate any logger output. check_new : bool, optional If True, also check if there is a newer compatible version that has not been installed. Returns ------- Version The new active `solc` version. """ version = _select_pragma_version(pragma_string, get_installed_solc_versions()) if version is None: raise SolcNotInstalled( f"No compatible solc version installed." f" Use solcx.install_solc_version_pragma('{version}') to install." ) set_solc_version(version, silent) if check_new: latest = install_solc_pragma(pragma_string, False) if latest > version: LOGGER.info(f"Newer compatible solc version exists: {latest}") return version def install_solc_pragma( pragma_string: str, install: bool = True, show_progress: bool = False, solcx_binary_path: Union[Path, str] = None, ) -> Version: """ Find, and optionally install, the latest compatible `solc` version based on a pragma statement. Arguments --------- pragma_string : str Pragma statement, e.g. "pragma solidity ^0.4.22;" install : bool, optional If True, installs the version of `solc`. show_progress : bool, optional If True, display a progress bar while downloading. Requires installing the `tqdm` package. solcx_binary_path : Path | str, optional User-defined path, used to override the default installation directory. Returns ------- Version Installed `solc` version. """ version = _select_pragma_version(pragma_string, get_installable_solc_versions()) if not version: raise UnsupportedVersionError("Compatible solc version does not exist") if install: install_solc(version, show_progress=show_progress, solcx_binary_path=solcx_binary_path) return version def get_installable_solc_versions() -> List[Version]: """ Return a list of all `solc` versions that can be installed by py-solc-x. Returns ------- List List of Versions objects of installable `solc` versions. """ data = requests.get(BINARY_DOWNLOAD_BASE.format(_get_os_name(), "list.json")) if data.status_code != 200: raise ConnectionError( f"Status {data.status_code} when getting solc versions from solc-bin.ethereum.org" ) version_list = sorted((Version(i) for i in data.json()["releases"]), reverse=True) version_list = [i for i in version_list if i >= MINIMAL_SOLC_VERSION] return version_list def get_compilable_solc_versions(headers: Optional[Dict] = None) -> List[Version]: """ Return a list of all `solc` versions that can be compiled from source by py-solc-x. Arguments --------- headers : Dict, optional Headers to include in the request to Github. Returns ------- List List of Versions objects of installable `solc` versions. """ if _get_target_os() == "windows": raise OSError("Compiling from source is not supported on Windows systems") version_list = [] pattern = "solidity_[0-9].[0-9].[0-9]{1,}.tar.gz" if headers is None and os.getenv("GITHUB_TOKEN") is not None: auth = b64encode(os.environ["GITHUB_TOKEN"].encode()).decode() headers = {"Authorization": f"Basic {auth}"} data = requests.get(GITHUB_RELEASES, headers=headers) if data.status_code != 200: msg = ( f"Status {data.status_code} when getting solc versions from Github:" f" '{data.json()['message']}'" ) if data.status_code == 403: msg += ( "\n\nIf this issue persists, generate a Github API token and store" " it as the environment variable `GITHUB_TOKEN`:\n" "https://github.blog/2013-05-16-personal-api-tokens/" ) raise ConnectionError(msg) for release in data.json(): try: version = Version.coerce(release["tag_name"].lstrip("v")) except ValueError: # ignore non-standard releases (e.g. the 0.8.x preview) continue asset = next((i for i in release["assets"] if re.match(pattern, i["name"])), False) if asset: version_list.append(version) if version == MINIMAL_SOLC_VERSION: break return sorted(version_list, reverse=True) def get_installed_solc_versions(solcx_binary_path: Union[Path, str] = None) -> List[Version]: """ Return a list of currently installed `solc` versions. Arguments --------- solcx_binary_path : Path | str, optional User-defined path, used to override the default installation directory. Returns ------- List List of Version objects of installed `solc` versions. """ install_path = get_solcx_install_folder(solcx_binary_path) return sorted([Version(i.name[6:]) for i in install_path.glob("solc-v*")], reverse=True) def install_solc( version: Union[str, Version] = "latest", show_progress: bool = False, solcx_binary_path: Union[Path, str] = None, ) -> Version: """ Download and install a precompiled version of `solc`. Arguments --------- version : str | Version, optional Version of `solc` to install. Default is the newest available version. show_progress : bool, optional If True, display a progress bar while downloading. Requires installing the `tqdm` package. solcx_binary_path : Path | str, optional User-defined path, used to override the default installation directory. Returns ------- Version installed solc version """ if version == "latest": version = get_installable_solc_versions()[0] else: version = _convert_and_validate_version(version) target_os = _get_target_os() this_os = _get_os_name() process_lock = get_process_lock(str(version)) with process_lock: if _check_for_installed_version(version, solcx_binary_path): path = get_solcx_install_folder(solcx_binary_path).joinpath(f"solc-v{version}") LOGGER.info(f"solc {version} already installed at: {path}") return version data = requests.get(BINARY_DOWNLOAD_BASE.format(target_os, "list.json")) if data.status_code != 200: raise ConnectionError( f"Status {data.status_code} when getting solc versions from solc-bin.ethereum.org" ) try: filename = data.json()["releases"][str(version)] except KeyError: raise SolcInstallationError(f"Solc binary for v{version} is not available for this OS") if target_os == "linux": _install_solc_unix(version, filename, show_progress, solcx_binary_path) elif target_os == "macosx": _install_solc_unix(version, filename, show_progress, solcx_binary_path) elif target_os == "windows": _install_solc_windows(version, filename, show_progress, solcx_binary_path) try: _validate_installation(version, solcx_binary_path) except SolcInstallationError as exc: if target_os != "windows" and target_os == this_os: exc.args = ( f"{exc.args[0]} If this issue persists, you can try to compile from " f"source code using `solcx.compile_solc('{version}')`.", ) raise exc return version def compile_solc( version: Version, show_progress: bool = False, solcx_binary_path: Union[Path, str] = None ) -> Version: """ Install a version of `solc` by downloading and compiling source code. Arguments --------- version : str | Version, optional Version of `solc` to install. Default is the newest available version. show_progress : bool, optional If True, display a progress bar while downloading. Requires installing the `tqdm` package. solcx_binary_path : Path | str, optional User-defined path, used to override the default installation directory. Returns ------- Version installed solc version """ if _get_os_name() != _get_target_os(): raise OSError("Cross-compiling is not supported") if _get_os_name() == "windows": raise OSError("Compiling from source is not supported on Windows systems") if version == "latest": version = get_compilable_solc_versions()[0] else: version = _convert_and_validate_version(version) process_lock = get_process_lock(str(version)) with process_lock: if _check_for_installed_version(version, solcx_binary_path): path = get_solcx_install_folder(solcx_binary_path).joinpath(f"solc-v{version}") LOGGER.info(f"solc {version} already installed at: {path}") return version temp_path = _get_temp_folder() download = SOURCE_DOWNLOAD_BASE.format(version, f"solidity_{version}.tar.gz") install_path = get_solcx_install_folder(solcx_binary_path).joinpath(f"solc-v{version}") content = _download_solc(download, show_progress) with tarfile.open(fileobj=BytesIO(content)) as tar: tar.extractall(temp_path) temp_path = temp_path.joinpath(f"solidity_{version}") try: LOGGER.info("Running dependency installation script `install_deps.sh`...") subprocess.check_call( ["sh", temp_path.joinpath("scripts/install_deps.sh")], stderr=subprocess.DEVNULL ) except subprocess.CalledProcessError as exc: LOGGER.warning(exc, exc_info=True) original_path = os.getcwd() temp_path.joinpath("build").mkdir(exist_ok=True) os.chdir(str(temp_path.joinpath("build").resolve())) try: for cmd in (["cmake", ".."], ["make"]): LOGGER.info(f"Running `{cmd[0]}`...") subprocess.check_call(cmd, stderr=subprocess.DEVNULL) temp_path.joinpath("build/solc/solc").rename(install_path) except subprocess.CalledProcessError as exc: err_msg = ( f"{cmd[0]} returned non-zero exit status {exc.returncode}" " while attempting to build solc from the source.\n" "This is likely due to a missing or incorrect version of a build dependency." ) if _get_target_os() == "macosx": err_msg = ( f"{err_msg}\n\nFor suggested installation options: " "https://github.com/iamdefinitelyahuman/py-solc-x/wiki/Installing-Solidity-on-OSX" # noqa: E501 ) raise SolcInstallationError(err_msg) finally: os.chdir(original_path) install_path.chmod(install_path.stat().st_mode | stat.S_IEXEC) _validate_installation(version, solcx_binary_path) return version def _check_for_installed_version( version: Version, solcx_binary_path: Union[Path, str] = None ) -> bool: path = get_solcx_install_folder(solcx_binary_path).joinpath(f"solc-v{version}") return path.exists() def _get_temp_folder() -> Path: path = Path(tempfile.gettempdir()).joinpath(f"solcx-tmp-{os.getpid()}") if path.exists(): shutil.rmtree(str(path)) path.mkdir() return path def _download_solc(url: str, show_progress: bool) -> bytes: LOGGER.info(f"Downloading from {url}") response = requests.get(url, stream=show_progress) if response.status_code == 404: raise DownloadError( "404 error when attempting to download from {} - are you sure this" " version of solidity is available?".format(url) ) if response.status_code != 200: raise DownloadError( f"Received status code {response.status_code} when attempting to download from {url}" ) if not show_progress: return response.content total_size = int(response.headers.get("content-length", 0)) progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True) content = bytes() for data in response.iter_content(1024, decode_unicode=True): progress_bar.update(len(data)) content += data progress_bar.close() return content def _install_solc_unix( version: Version, filename: str, show_progress: bool, solcx_binary_path: Union[Path, str, None] ) -> None: download = BINARY_DOWNLOAD_BASE.format(_get_target_os(), filename) install_path = get_solcx_install_folder(solcx_binary_path).joinpath(f"solc-v{version}") content = _download_solc(download, show_progress) with open(install_path, "wb") as fp: fp.write(content) install_path.chmod(install_path.stat().st_mode | stat.S_IEXEC) def _install_solc_windows( version: Version, filename: str, show_progress: bool, solcx_binary_path: Union[Path, str, None] ) -> None: download = BINARY_DOWNLOAD_BASE.format(_get_target_os(), filename) install_path = get_solcx_install_folder(solcx_binary_path).joinpath(f"solc-v{version}") temp_path = _get_temp_folder() content = _download_solc(download, show_progress) if Path(filename).suffix == ".exe": install_path.mkdir() with open(install_path.joinpath("solc.exe"), "wb") as fp: fp.write(content) else: with zipfile.ZipFile(BytesIO(content)) as zf: zf.extractall(str(temp_path)) temp_path.rename(install_path) def _validate_installation(version: Version, solcx_binary_path: Union[Path, str, None]) -> None: if _get_target_os() != _get_os_name(): return binary_path = get_executable(version, solcx_binary_path) try: installed_version = wrapper._get_solc_version(binary_path) except Exception: _unlink_solc(binary_path) raise SolcInstallationError( "Downloaded binary would not execute, or returned unexpected output." ) if installed_version.truncate() != version.truncate(): _unlink_solc(binary_path) raise UnexpectedVersionError( f"Attempted to install solc v{version}, but got solc v{installed_version}" ) if installed_version != version: warnings.warn(f"Installed solc version is v{installed_version}", UnexpectedVersionWarning) if not _default_solc_binary: set_solc_version(version) LOGGER.info(f"solc {version} successfully installed at: {binary_path}") try: # try to set the result of `which`/`where` as the default _default_solc_binary = _get_which_solc() except Exception: # if not available, use the most recent solcx installed version if get_installed_solc_versions(): set_solc_version(get_installed_solc_versions()[0], silent=True) if __name__ == "__main__": argument_parser = argparse.ArgumentParser() argument_parser.add_argument("version") argument_parser.add_argument("--solcx-binary-path", default=None) args = argument_parser.parse_args() install_solc(args.version, solcx_binary_path=args.solcx_binary_path)
23,707
33.210678
116
py
smartbugs
smartbugs-master/solcx/main.py
import json from pathlib import Path from typing import Any, Dict, List, Optional, Union from semantic_version import Version from solcx import wrapper from solcx.exceptions import ContractsNotFound, SolcError from solcx.install import get_executable def get_solc_version(with_commit_hash: bool = False) -> Version: """ Get the version of the active `solc` binary. Arguments --------- with_commit_hash : bool, optional If True, the commit hash is included within the version Returns ------- Version solc version """ solc_binary = get_executable() return wrapper._get_solc_version(solc_binary, with_commit_hash) def compile_source( source: str, output_values: List = None, import_remappings: Union[Dict, List, str] = None, base_path: Union[Path, str] = None, allow_paths: Union[List, Path, str] = None, output_dir: Union[Path, str] = None, overwrite: bool = False, evm_version: str = None, revert_strings: Union[List, str] = None, metadata_hash: str = None, metadata_literal: bool = False, optimize: bool = False, optimize_runs: int = None, optimize_yul: bool = False, no_optimize_yul: bool = False, yul_optimizations: int = None, solc_binary: Union[str, Path] = None, solc_version: Version = None, allow_empty: bool = False, ) -> Dict: """ Compile a Solidity contract. Compilation is handled via the `--combined-json` flag. Depending on the solc version used, some keyword arguments may not be available. Arguments --------- source: str Solidity contract to be compiled. output_values : List, optional Compiler outputs to return. Valid options depend on the version of `solc`. If not given, all possible outputs for the active version are returned. import_remappings : Dict | List | str , optional Path remappings. May be given as a string or list of strings formatted as `"prefix=path"`, or a dict of `{"prefix": "path"}`. base_path : Path | str, optional Use the given path as the root of the source tree instead of the root of the filesystem. allow_paths : List | Path | str, optional A path, or list of paths, to allow for imports. output_dir : str, optional Creates one file per component and contract/file at the specified directory. overwrite : bool, optional Overwrite existing files (used in combination with `output_dir`) evm_version: str, optional Select the desired EVM version. Valid options depend on the `solc` version. revert_strings : List | str, optional Strip revert (and require) reason strings or add additional debugging information. metadata_hash : str, optional Choose hash method for the bytecode metadata or disable it. metadata_literal : bool, optional Store referenced sources as literal data in the metadata output. optimize : bool, optional Enable bytecode optimizer. optimize_runs : int, optional Set for how many contract runs to optimize. Lower values will optimize more for initial deployment cost, higher values will optimize more for high-frequency usage. optimize_yul: bool, optional Enable the yul optimizer. no_optimize_yul : bool, optional Disable the yul optimizer. yul_optimizations : int, optional Force yul optimizer to use the specified sequence of optimization steps instead of the built-in one. solc_binary : str | Path, optional Path of the `solc` binary to use. If not given, the currently active version is used (as set by `solcx.set_solc_version`) solc_version: Version, optional `solc` version to use. If not given, the currently active version is used. Ignored if `solc_binary` is also given. allow_empty : bool, optional If `True`, do not raise when no compiled contracts are returned. Returns ------- Dict Compiler output. The source file name is given as `<stdin>`. """ return _compile_combined_json( solc_binary=solc_binary, solc_version=solc_version, stdin=source, output_values=output_values, import_remappings=import_remappings, base_path=base_path, allow_paths=allow_paths, output_dir=output_dir, overwrite=overwrite, evm_version=evm_version, revert_strings=revert_strings, metadata_hash=metadata_hash, metadata_literal=metadata_literal, optimize=optimize, optimize_runs=optimize_runs, no_optimize_yul=no_optimize_yul, yul_optimizations=yul_optimizations, allow_empty=allow_empty, ) def compile_files( source_files: Union[List, Path, str], output_values: List = None, import_remappings: Union[Dict, List, str] = None, base_path: Union[Path, str] = None, allow_paths: Union[List, Path, str] = None, output_dir: Union[Path, str] = None, overwrite: bool = False, evm_version: str = None, revert_strings: Union[List, str] = None, metadata_hash: str = None, metadata_literal: bool = False, optimize: bool = False, optimize_runs: int = None, optimize_yul: bool = False, no_optimize_yul: bool = False, yul_optimizations: int = None, solc_binary: Union[str, Path] = None, solc_version: Version = None, allow_empty: bool = False, ) -> Dict: """ Compile one or more Solidity source files. Compilation is handled via the `--combined-json` flag. Depending on the solc version used, some keyword arguments may not be available. Arguments --------- source_files: List | Path | str Path, or list of paths, of Solidity source files to be compiled. output_values : List, optional Compiler outputs to return. Valid options depend on the version of `solc`. If not given, all possible outputs for the active version are returned. import_remappings : Dict | List | str , optional Path remappings. May be given as a string or list of strings formatted as `"prefix=path"`, or a dict of `{"prefix": "path"}`. base_path : Path | str, optional Use the given path as the root of the source tree instead of the root of the filesystem. allow_paths : List | Path | str, optional A path, or list of paths, to allow for imports. output_dir : str, optional Creates one file per component and contract/file at the specified directory. overwrite : bool, optional Overwrite existing files (used in combination with `output_dir`) evm_version: str, optional Select the desired EVM version. Valid options depend on the `solc` version. revert_strings : List | str, optional Strip revert (and require) reason strings or add additional debugging information. metadata_hash : str, optional Choose hash method for the bytecode metadata or disable it. metadata_literal : bool, optional Store referenced sources as literal data in the metadata output. optimize : bool, optional Enable bytecode optimizer. optimize_runs : int, optional Set for how many contract runs to optimize. Lower values will optimize more for initial deployment cost, higher values will optimize more for high-frequency usage. optimize_yul: bool, optional Enable the yul optimizer. no_optimize_yul : bool, optional Disable the yul optimizer. yul_optimizations : int, optional Force yul optimizer to use the specified sequence of optimization steps instead of the built-in one. solc_binary : str | Path, optional Path of the `solc` binary to use. If not given, the currently active version is used (as set by `solcx.set_solc_version`) solc_version: Version, optional `solc` version to use. If not given, the currently active version is used. Ignored if `solc_binary` is also given. allow_empty : bool, optional If `True`, do not raise when no compiled contracts are returned. Returns ------- Dict Compiler output """ return _compile_combined_json( solc_binary=solc_binary, solc_version=solc_version, source_files=source_files, output_values=output_values, import_remappings=import_remappings, base_path=base_path, allow_paths=allow_paths, output_dir=output_dir, overwrite=overwrite, evm_version=evm_version, revert_strings=revert_strings, metadata_hash=metadata_hash, metadata_literal=metadata_literal, optimize=optimize, optimize_runs=optimize_runs, no_optimize_yul=no_optimize_yul, yul_optimizations=yul_optimizations, allow_empty=allow_empty, ) def _get_combined_json_outputs(solc_binary: Union[Path, str] = None) -> str: if solc_binary is None: solc_binary = get_executable() help_str = wrapper.solc_wrapper(solc_binary=solc_binary, help=True)[0].split("\n") combined_json_args = next(i for i in help_str if i.startswith(" --combined-json")) return combined_json_args.split(" ")[-1] def _parse_compiler_output(stdoutdata: str) -> Dict: output = json.loads(stdoutdata) contracts = output.get("contracts", {}) sources = output.get("sources", {}) for path_str, data in contracts.items(): if "abi" in data and isinstance(data["abi"], str): data["abi"] = json.loads(data["abi"]) key = path_str.rsplit(":", maxsplit=1)[0] if "AST" in sources.get(key, {}): data["ast"] = sources[key]["AST"] return contracts def _compile_combined_json( output_values: Optional[List] = None, solc_binary: Union[str, Path, None] = None, solc_version: Optional[Version] = None, output_dir: Union[str, Path, None] = None, overwrite: Optional[bool] = False, allow_empty: Optional[bool] = False, **kwargs: Any, ) -> Dict: if solc_binary is None: solc_binary = get_executable(solc_version) if output_values is None: combined_json = _get_combined_json_outputs(solc_binary) else: combined_json = ",".join(output_values) if output_dir: output_dir = Path(output_dir) if output_dir.is_file(): raise FileExistsError("`output_dir` must be as a directory, not a file") if output_dir.joinpath("combined.json").exists() and not overwrite: target_path = output_dir.joinpath("combined.json") raise FileExistsError( f"Target output file {target_path} already exists - use overwrite=True to overwrite" ) stdoutdata, stderrdata, command, proc = wrapper.solc_wrapper( solc_binary=solc_binary, combined_json=combined_json, output_dir=output_dir, overwrite=overwrite, **kwargs, ) if output_dir: output_path = Path(output_dir).joinpath("combined.json") if stdoutdata: output_path.parent.mkdir(parents=True, exist_ok=True) with output_path.open("w") as fp: fp.write(stdoutdata) else: with output_path.open() as fp: stdoutdata = fp.read() contracts = _parse_compiler_output(stdoutdata) if not contracts and not allow_empty: raise ContractsNotFound( command=command, return_code=proc.returncode, stdout_data=stdoutdata, stderr_data=stderrdata, ) return contracts def compile_standard( input_data: Dict, base_path: str = None, allow_paths: List = None, output_dir: str = None, overwrite: bool = False, solc_binary: Union[str, Path] = None, solc_version: Version = None, allow_empty: bool = False, ) -> Dict: """ Compile Solidity contracts using the JSON-input-output interface. See the Solidity documentation for details on the expected JSON input and output formats. Arguments --------- input_data : Dict Compiler JSON input. base_path : Path | str, optional Use the given path as the root of the source tree instead of the root of the filesystem. allow_paths : List | Path | str, optional A path, or list of paths, to allow for imports. output_dir : str, optional Creates one file per component and contract/file at the specified directory. overwrite : bool, optional Overwrite existing files (used in combination with `output_dir`) solc_binary : str | Path, optional Path of the `solc` binary to use. If not given, the currently active version is used (as set by `solcx.set_solc_version`) solc_version: Version, optional `solc` version to use. If not given, the currently active version is used. Ignored if `solc_binary` is also given. allow_empty : bool, optional If `True`, do not raise when no compiled contracts are returned. Returns ------- Dict Compiler JSON output. """ if not input_data.get("sources") and not allow_empty: raise ContractsNotFound( "Input JSON does not contain any sources", stdin_data=json.dumps(input_data, sort_keys=True, indent=2), ) if solc_binary is None: solc_binary = get_executable(solc_version) stdoutdata, stderrdata, command, proc = wrapper.solc_wrapper( solc_binary=solc_binary, stdin=json.dumps(input_data), standard_json=True, base_path=base_path, allow_paths=allow_paths, output_dir=output_dir, overwrite=overwrite, ) compiler_output = json.loads(stdoutdata) if "errors" in compiler_output: has_errors = any(error["severity"] == "error" for error in compiler_output["errors"]) if has_errors: error_message = "\n".join( tuple( error["formattedMessage"] for error in compiler_output["errors"] if error["severity"] == "error" ) ) raise SolcError( error_message, command=command, return_code=proc.returncode, stdin_data=json.dumps(input_data), stdout_data=stdoutdata, stderr_data=stderrdata, error_dict=compiler_output["errors"], ) return compiler_output def link_code( unlinked_bytecode: str, libraries: Dict, solc_binary: Union[str, Path] = None, solc_version: Version = None, ) -> str: """ Add library addresses into unlinked bytecode. Arguments --------- unlinked_bytecode : str Compiled bytecode containing one or more library placeholders. libraries : Dict Library addresses given as {"library name": "address"} solc_binary : str | Path, optional Path of the `solc` binary to use. If not given, the currently active version is used (as set by `solcx.set_solc_version`) solc_version: Version, optional `solc` version to use. If not given, the currently active version is used. Ignored if `solc_binary` is also given. Returns ------- str Linked bytecode """ if solc_binary is None: solc_binary = get_executable(solc_version) library_list = [f"{name}:{address}" for name, address in libraries.items()] stdoutdata = wrapper.solc_wrapper( solc_binary=solc_binary, stdin=unlinked_bytecode, link=True, libraries=library_list )[0] return stdoutdata.replace("Linking completed.", "").strip()
15,849
34.778781
100
py
smartbugs
smartbugs-master/solcx/wrapper.py
import re import subprocess from pathlib import Path from typing import Any, Dict, List, Tuple, Union from semantic_version import Version from solcx import install from solcx.exceptions import SolcError, UnknownOption, UnknownValue # (major.minor.patch)(nightly)(commit) VERSION_REGEX = r"(\d+\.\d+\.\d+)(?:-nightly.\d+.\d+.\d+|)(\+commit.\w+)" def _get_solc_version(solc_binary: Union[Path, str], with_commit_hash: bool = False) -> Version: # private wrapper function to get `solc` version stdout_data = subprocess.check_output([str(solc_binary), "--version"], encoding="utf8") try: match = next(re.finditer(VERSION_REGEX, stdout_data)) version_str = "".join(match.groups()) except StopIteration: raise SolcError("Could not determine the solc binary version") version = Version.coerce(version_str) if with_commit_hash: return version else: return version.truncate() def _to_string(key: str, value: Any) -> str: # convert data into a string prior to calling `solc` if isinstance(value, (int, str)): return str(value) elif isinstance(value, Path): return value.as_posix() elif isinstance(value, (list, tuple)): return ",".join(_to_string(key, i) for i in value) else: raise TypeError(f"Invalid type for {key}: {type(value)}") def solc_wrapper( solc_binary: Union[Path, str] = None, stdin: str = None, source_files: Union[List, Path, str] = None, import_remappings: Union[Dict, List, str] = None, success_return_code: int = None, **kwargs: Any, ) -> Tuple[str, str, List, subprocess.Popen]: """ Wrapper function for calling to `solc`. Arguments --------- solc_binary : Path | str, optional Location of the `solc` binary. If not given, the current default binary is used. stdin : str, optional Input to pass to `solc` via stdin source_files : list | Path | str, optional Path, or list of paths, of sources to compile import_remappings : Dict | List | str, optional Path remappings. May be given as a string or list of strings formatted as `"prefix=path"` or a dict of `{"prefix": "path"}` success_return_code : int, optional Expected exit code. Raises `SolcError` if the process returns a different value. Keyword Arguments ----------------- **kwargs : Any Flags to be passed to `solc`. Keywords are converted to flags by prepending `--` and replacing `_` with `-`, for example the keyword `evm_version` becomes `--evm-version`. Values may be given in the following formats: * `False`, `None`: ignored * `True`: flag is used without any arguments * str: given as an argument without modification * int: given as an argument, converted to a string * Path: converted to a string via `Path.as_posix()` * List, Tuple: elements are converted to strings and joined with `,` Returns ------- str Process `stdout` output str Process `stderr` output List Full command executed by the function Popen Subprocess object used to call `solc` """ if solc_binary: solc_binary = Path(solc_binary) else: solc_binary = install.get_executable() solc_version = _get_solc_version(solc_binary) command: List = [str(solc_binary)] if success_return_code is None: success_return_code = 1 if "help" in kwargs else 0 if source_files is not None: if isinstance(source_files, (str, Path)): command.append(_to_string("source_files", source_files)) else: command.extend([_to_string("source_files", i) for i in source_files]) if import_remappings is not None: if isinstance(import_remappings, str): command.append(import_remappings) else: if isinstance(import_remappings, dict): import_remappings = [f"{k}={v}" for k, v in import_remappings.items()] command.extend(import_remappings) for key, value in kwargs.items(): if value is None or value is False: continue key = f"--{key.replace('_', '-')}" if value is True: command.append(key) else: command.extend([key, _to_string(key, value)]) if "standard_json" not in kwargs and not source_files: # indicates that solc should read from stdin command.append("-") if stdin is not None: stdin = str(stdin) proc = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8", ) stdoutdata, stderrdata = proc.communicate(stdin) if proc.returncode != success_return_code: if stderrdata.startswith("unrecognised option"): # unrecognised option '<FLAG>' flag = stderrdata.split("'")[1] raise UnknownOption(f"solc {solc_version} does not support the '{flag}' option'") if stderrdata.startswith("Invalid option"): # Invalid option to <FLAG>: <OPTION> flag, option = stderrdata.split(": ") flag = flag.split(" ")[-1] raise UnknownValue( f"solc {solc_version} does not accept '{option}' as an option for the '{flag}' flag" ) raise SolcError( command=command, return_code=proc.returncode, stdin_data=stdin, stdout_data=stdoutdata, stderr_data=stderrdata, ) return stdoutdata, stderrdata, command, proc
5,704
33.161677
100
py
smartbugs
smartbugs-master/solcx/utils/__init__.py
0
0
0
py
smartbugs
smartbugs-master/solcx/utils/lock.py
import os import sys import tempfile import threading import getpass from pathlib import Path from typing import Any, Dict, Union if sys.platform == "win32": import msvcrt OPEN_MODE = os.O_RDWR | os.O_CREAT | os.O_TRUNC else: import fcntl NON_BLOCKING = fcntl.LOCK_EX | fcntl.LOCK_NB BLOCKING = fcntl.LOCK_EX _locks: Dict[str, Union["UnixLock", "WindowsLock"]] = {} _base_lock = threading.Lock() def get_process_lock(lock_id: str) -> Union["UnixLock", "WindowsLock"]: with _base_lock: if lock_id not in _locks: if sys.platform == "win32": _locks[lock_id] = WindowsLock(lock_id) else: _locks[lock_id] = UnixLock(lock_id) return _locks[lock_id] class _ProcessLock: """ Ensure an action is both thread-safe and process-safe. """ def __init__(self, lock_id: str) -> None: self._lock = threading.Lock() self._lock_path = Path(tempfile.gettempdir()).joinpath(f".solcx-lock-{getpass.getuser()}-{lock_id}") self._lock_file = self._lock_path.open("w") class UnixLock(_ProcessLock): def __enter__(self) -> None: self.acquire(True) def __exit__(self, *args: Any) -> None: self.release() def acquire(self, blocking: bool) -> bool: if not self._lock.acquire(blocking): return False try: fcntl.flock(self._lock_file, BLOCKING if blocking else NON_BLOCKING) except BlockingIOError: self._lock.release() return False return True def release(self) -> None: fcntl.flock(self._lock_file, fcntl.LOCK_UN) self._lock.release() class WindowsLock(_ProcessLock): def __enter__(self) -> None: self.acquire(True) def __exit__(self, *args: Any) -> None: self.release() def acquire(self, blocking: bool) -> bool: if not self._lock.acquire(blocking): return False while True: try: fd = os.open(self._lock_path, OPEN_MODE) # type: ignore msvcrt.locking( # type: ignore fd, msvcrt.LK_LOCK if blocking else msvcrt.LK_NBLCK, 1 # type: ignore ) self._fd = fd return True except OSError: if not blocking: self._lock.release() return False def release(self) -> None: msvcrt.locking(self._fd, msvcrt.LK_UNLCK, 1) # type: ignore self._lock.release()
2,554
26.771739
108
py
smartbugs
smartbugs-master/templates/scripts/example.py
# This is a sample file showing how to call SmartBugs # from a Python script. import sb.smartbugs, sb.settings, sb.exceptions if __name__ == "__main__": settings = sb.settings.Settings() settings.update({ "tools": ["conkas"], "files": ["samples/simple_dao.*"], #"quiet": True # suppress output on stdout }) try: sb.smartbugs.main(settings) except sb.exceptions.SmartBugsError as e: print(f"Something didn't work: {e}")
480
29.0625
53
py
smartbugs
smartbugs-master/templates/tools/template/config.yaml
name: ToolName # optional version: 0.3.14 # version number, commit id, ... (optional) origin: where to find more on the tool, e.g. an URL # optional info: Succinct description of your tool. # optional image: smartbugs/toolname:0.3.14 # id of Docker image (mandatory) bin: scripts # folder with programs that will be accessible in the Docker container # add the section below if the tool is able to analyse Solidity source code solidity: entrypoint: "'$BIN/do_solidity.sh' '$FILENAME' '$TIMEOUT' '$BIN'" solc: yes # add the section below if the tool is able to analyse bytecode (deployment code) bytecode: entrypoint: "'$BIN/do_bytecode.sh' '$FILENAME' '$TIMEOUT'" # add the section below if the tool is able to analyse runtime code (deployed code) runtime: entrypoint: "'$BIN/do_runtime.sh' '$FILENAME' '$TIMEOUT'"
831
47.941176
83
yaml
smartbugs
smartbugs-master/templates/tools/template/parser.py
import sb.parse_utils # for sb.parse_utils.init(...) import io, tarfile # if the output parameter is used import ... # any further imports VERSION: str = ... """identify the version of the parser, e.g. '2022/08/15'""" FINDINGS: set[str] = ... """set of strings: all possible findings, of which 'findings' below will be a subset""" def parse(exit_code, log, output): """ Analyse the result of the tool tun. :param exit_code: int|None, exit code of Docker run (None=timeout) :param log: list[str], stdout/stderr of Docker run :param output: bytes, tar archive of files generated by the tool (if specified in config.yaml) :return: tuple[findings: list[dict], infos: set[str], errors: set[str], fails: set[str]] findings identifies the major observations of the tool, infos contains any messages generated by the tool that might be of interest, errors lists the error messages deliberately generated by the tool, fails lists exceptions and other events not expected by the tool, analysis contains any analysis results worth reporting """ findings, infos = [], set() errors, fails = sb.parse_utils.errors_fails(exit_code, log) # Parses the output for common Python/Java/shell exceptions (returned in 'fails') for line in log: # analyse stdout/stderr of the Docker run ... try: with io.BytesIO(output) as o, tarfile.open(fileobj=o) as tar: # access specific file contents_of_some_file = tar.extractfile("name_of_some_file").read() # iterate over all files: for f in tar.getmembers(): contents_of_f = tar.extractfile(f).read() except Exception as e: fails.add(f"error parsing results: {e}") return findings, infos, errors, fails """ findings is a list of issues. Each issue is a dict with the following fields. name: str mandatory. Identifies the type of issue filename: str optional. Path of file processed. As this is the path within the docker image, it will be replaced by the external filename, after parsing. contract: str optional. Name of contract within the file (for source code) function: str optional. Name/header/signature of function containing the issue line: int optional. Line number of issue in source code, starting with 1 column: int optional. Column of issue in source code, starting with 1 line_end: int optional. Last line of the source code, where issue occurs. column_end: int optional. Last column of the source code, where issue occurs. address: int optional. Address of instruction in the bytecode, where issue occurs, starting with 0 address_end: int optional. Address of last instruction in the bytecode, where issue occurs, starting with 0 exploit: Any optional. Information on a potential exploit, e.g. a list of transactions level: str optional. type of issue, e.g. recommendation, warning, error severity: str optional. Severity of issue, e.g. low, medium, high message: str optional. Description of the issue If missing, the fields severity, classification, method, descr_short, descr_long will be taken from the file findings.yaml in the tools directory (if it exists), with "name" serving as the key. """
3,454
38.712644
98
py
smartbugs
smartbugs-master/templates/tools/template/scripts/printContractNames.py
import sys, json from subprocess import PIPE, Popen filename = sys.argv[1] cmd = ["solc", "--standard-json", "--allow-paths", ".,/"] settings = { "optimizer": {"enabled": False}, "outputSelection": { "*": { "*": [ "evm.deployedBytecode" ], } }, } input_json = json.dumps( { "language": "Solidity", "sources": {filename: {"urls": [filename]}}, "settings": settings, } ) p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate(bytes(input_json, "utf8")) out = stdout.decode("UTF-8") result = json.loads(out) for error in result.get("errors", []): if error["severity"] == "error": print(error["formattedMessage"]) sys.exit(1) contracts = result["contracts"][filename] for contract in contracts.keys(): if len(contracts[contract]["evm"]["deployedBytecode"]["object"]): print(contract)
915
25.941176
69
py