body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
bf14931d763cc2da8061def2584cf1a3948be1dc6eb529452662c4b1d0d4b234
def load_config(additional_paths: typing.List[str]=[]) -> typing.Dict: "\n Searches the current working directory and `additional_paths` for a loadable\n module named `config.py`. If a file is found and has a global variable named\n 'PRESALYTICS', the dictionary contained in the 'PRESALYTICS' is returned.\n\n *Note*: A environment variable called `autodiscover_paths` is automatically\n loaded into the `additional_paths` keyword argument the the `presalytics` module\n is imported.\n\n Parameters\n ----------\n additional_paths : list of str, optional\n Additional filepaths to search for files named `config.py`\n\n Returns\n ----------\n a `dict` containing the PRESALYTICS module environment configuration\n\n " current_path = os.getcwd() additional_paths.append(current_path) config_dict = None try: for path in additional_paths: for name in os.listdir(path): if (name == 'config.py'): config_path = os.path.join(path, name) config_spec = importlib.util.spec_from_file_location('config', config_path) config_mod = importlib.util.module_from_spec(config_spec) config_spec.loader.exec_module(config_mod) config_dict = getattr(config_mod, 'PRESALYTICS', None) if (not config_dict): config_dict = config_mod.__dict__ if config_dict: break if config_dict: break if config_dict: return config_dict else: return {} except Exception as ex: logger.exception(ex) return {}
Searches the current working directory and `additional_paths` for a loadable module named `config.py`. If a file is found and has a global variable named 'PRESALYTICS', the dictionary contained in the 'PRESALYTICS' is returned. *Note*: A environment variable called `autodiscover_paths` is automatically loaded into the `additional_paths` keyword argument the the `presalytics` module is imported. Parameters ---------- additional_paths : list of str, optional Additional filepaths to search for files named `config.py` Returns ---------- a `dict` containing the PRESALYTICS module environment configuration
presalytics/lib/config_loader.py
load_config
presalytics/python-client
4
python
def load_config(additional_paths: typing.List[str]=[]) -> typing.Dict: "\n Searches the current working directory and `additional_paths` for a loadable\n module named `config.py`. If a file is found and has a global variable named\n 'PRESALYTICS', the dictionary contained in the 'PRESALYTICS' is returned.\n\n *Note*: A environment variable called `autodiscover_paths` is automatically\n loaded into the `additional_paths` keyword argument the the `presalytics` module\n is imported.\n\n Parameters\n ----------\n additional_paths : list of str, optional\n Additional filepaths to search for files named `config.py`\n\n Returns\n ----------\n a `dict` containing the PRESALYTICS module environment configuration\n\n " current_path = os.getcwd() additional_paths.append(current_path) config_dict = None try: for path in additional_paths: for name in os.listdir(path): if (name == 'config.py'): config_path = os.path.join(path, name) config_spec = importlib.util.spec_from_file_location('config', config_path) config_mod = importlib.util.module_from_spec(config_spec) config_spec.loader.exec_module(config_mod) config_dict = getattr(config_mod, 'PRESALYTICS', None) if (not config_dict): config_dict = config_mod.__dict__ if config_dict: break if config_dict: break if config_dict: return config_dict else: return {} except Exception as ex: logger.exception(ex) return {}
def load_config(additional_paths: typing.List[str]=[]) -> typing.Dict: "\n Searches the current working directory and `additional_paths` for a loadable\n module named `config.py`. If a file is found and has a global variable named\n 'PRESALYTICS', the dictionary contained in the 'PRESALYTICS' is returned.\n\n *Note*: A environment variable called `autodiscover_paths` is automatically\n loaded into the `additional_paths` keyword argument the the `presalytics` module\n is imported.\n\n Parameters\n ----------\n additional_paths : list of str, optional\n Additional filepaths to search for files named `config.py`\n\n Returns\n ----------\n a `dict` containing the PRESALYTICS module environment configuration\n\n " current_path = os.getcwd() additional_paths.append(current_path) config_dict = None try: for path in additional_paths: for name in os.listdir(path): if (name == 'config.py'): config_path = os.path.join(path, name) config_spec = importlib.util.spec_from_file_location('config', config_path) config_mod = importlib.util.module_from_spec(config_spec) config_spec.loader.exec_module(config_mod) config_dict = getattr(config_mod, 'PRESALYTICS', None) if (not config_dict): config_dict = config_mod.__dict__ if config_dict: break if config_dict: break if config_dict: return config_dict else: return {} except Exception as ex: logger.exception(ex) return {}<|docstring|>Searches the current working directory and `additional_paths` for a loadable module named `config.py`. If a file is found and has a global variable named 'PRESALYTICS', the dictionary contained in the 'PRESALYTICS' is returned. *Note*: A environment variable called `autodiscover_paths` is automatically loaded into the `additional_paths` keyword argument the the `presalytics` module is imported. Parameters ---------- additional_paths : list of str, optional Additional filepaths to search for files named `config.py` Returns ---------- a `dict` containing the PRESALYTICS module environment configuration<|endoftext|>
7b9b2aa17706a687602fbd3c6f53f87d67f29c9cffd715ed425ede93d4750718
def test_global_avg_pool_module(self): '\n Tests the global average pool module with fixed 4-d test tensors\n ' base_tensor = torch.Tensor([[2, 1], [3, 0]]) all_init = [] for i in range((- 2), 3): all_init.append(torch.add(base_tensor, i)) init_tensor = torch.stack(all_init, dim=2) init_tensor = init_tensor.unsqueeze((- 1)) reference = base_tensor.unsqueeze((- 1)).unsqueeze((- 1)) encr_module = crypten.nn.GlobalAveragePool().encrypt() self.assertTrue(encr_module.encrypted, 'module not encrypted') for i in range(1, 10): input = init_tensor.repeat(1, 1, i, i) encr_input = crypten.cryptensor(input) encr_output = encr_module(encr_input) self._check(encr_output, reference, 'GlobalAveragePool failed')
Tests the global average pool module with fixed 4-d test tensors
test/test_nn.py
test_global_avg_pool_module
youben11/CrypTen
2
python
def test_global_avg_pool_module(self): '\n \n ' base_tensor = torch.Tensor([[2, 1], [3, 0]]) all_init = [] for i in range((- 2), 3): all_init.append(torch.add(base_tensor, i)) init_tensor = torch.stack(all_init, dim=2) init_tensor = init_tensor.unsqueeze((- 1)) reference = base_tensor.unsqueeze((- 1)).unsqueeze((- 1)) encr_module = crypten.nn.GlobalAveragePool().encrypt() self.assertTrue(encr_module.encrypted, 'module not encrypted') for i in range(1, 10): input = init_tensor.repeat(1, 1, i, i) encr_input = crypten.cryptensor(input) encr_output = encr_module(encr_input) self._check(encr_output, reference, 'GlobalAveragePool failed')
def test_global_avg_pool_module(self): '\n \n ' base_tensor = torch.Tensor([[2, 1], [3, 0]]) all_init = [] for i in range((- 2), 3): all_init.append(torch.add(base_tensor, i)) init_tensor = torch.stack(all_init, dim=2) init_tensor = init_tensor.unsqueeze((- 1)) reference = base_tensor.unsqueeze((- 1)).unsqueeze((- 1)) encr_module = crypten.nn.GlobalAveragePool().encrypt() self.assertTrue(encr_module.encrypted, 'module not encrypted') for i in range(1, 10): input = init_tensor.repeat(1, 1, i, i) encr_input = crypten.cryptensor(input) encr_output = encr_module(encr_input) self._check(encr_output, reference, 'GlobalAveragePool failed')<|docstring|>Tests the global average pool module with fixed 4-d test tensors<|endoftext|>
38046427c74a05fc1e87859756e8ee42e5f737f95218795fdcb09b611337cf4d
def test_dropout_module(self): 'Tests the dropout module' input_size = [3, 3, 3] prob_list = [(0.2 * x) for x in range(1, 5)] for module_name in ['Dropout', 'Dropout2d', 'Dropout3d']: for prob in prob_list: for wrap in [True, False]: input = get_random_test_tensor(size=input_size, is_float=True, ex_zero=True) input.requires_grad = True encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) module = getattr(torch.nn, module_name)(prob) module.train() encr_module = crypten.nn.from_pytorch(module, input) for encrypted in [False, True, True, False, True]: encr_module.encrypt(mode=encrypted) if encrypted: self.assertTrue(encr_module.encrypted, 'module not encrypted') else: self.assertFalse(encr_module.encrypted, 'module encrypted') self.assertTrue(encr_module.training, 'training value incorrect') encr_output = encr_module(encr_input) plaintext_output = encr_output.get_plain_text() scaled_tensor = (input / (1 - prob)) reference = plaintext_output.where((plaintext_output == 0), scaled_tensor) self._check(encr_output, reference, 'Dropout forward failed') all_ones = torch.ones(reference.size()) ref_grad = plaintext_output.where((plaintext_output == 0), all_ones) ref_grad_input = (ref_grad / (1 - prob)) encr_output.backward() if wrap: self._check(encr_input.grad, ref_grad_input, 'dropout backward on input failed') encr_module.train(mode=False) encr_output = encr_module(encr_input) result = encr_input.eq(encr_output) result_plaintext = result.get_plain_text().bool() self.assertTrue(result_plaintext.all(), 'dropout failed in test mode')
Tests the dropout module
test/test_nn.py
test_dropout_module
youben11/CrypTen
2
python
def test_dropout_module(self): input_size = [3, 3, 3] prob_list = [(0.2 * x) for x in range(1, 5)] for module_name in ['Dropout', 'Dropout2d', 'Dropout3d']: for prob in prob_list: for wrap in [True, False]: input = get_random_test_tensor(size=input_size, is_float=True, ex_zero=True) input.requires_grad = True encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) module = getattr(torch.nn, module_name)(prob) module.train() encr_module = crypten.nn.from_pytorch(module, input) for encrypted in [False, True, True, False, True]: encr_module.encrypt(mode=encrypted) if encrypted: self.assertTrue(encr_module.encrypted, 'module not encrypted') else: self.assertFalse(encr_module.encrypted, 'module encrypted') self.assertTrue(encr_module.training, 'training value incorrect') encr_output = encr_module(encr_input) plaintext_output = encr_output.get_plain_text() scaled_tensor = (input / (1 - prob)) reference = plaintext_output.where((plaintext_output == 0), scaled_tensor) self._check(encr_output, reference, 'Dropout forward failed') all_ones = torch.ones(reference.size()) ref_grad = plaintext_output.where((plaintext_output == 0), all_ones) ref_grad_input = (ref_grad / (1 - prob)) encr_output.backward() if wrap: self._check(encr_input.grad, ref_grad_input, 'dropout backward on input failed') encr_module.train(mode=False) encr_output = encr_module(encr_input) result = encr_input.eq(encr_output) result_plaintext = result.get_plain_text().bool() self.assertTrue(result_plaintext.all(), 'dropout failed in test mode')
def test_dropout_module(self): input_size = [3, 3, 3] prob_list = [(0.2 * x) for x in range(1, 5)] for module_name in ['Dropout', 'Dropout2d', 'Dropout3d']: for prob in prob_list: for wrap in [True, False]: input = get_random_test_tensor(size=input_size, is_float=True, ex_zero=True) input.requires_grad = True encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) module = getattr(torch.nn, module_name)(prob) module.train() encr_module = crypten.nn.from_pytorch(module, input) for encrypted in [False, True, True, False, True]: encr_module.encrypt(mode=encrypted) if encrypted: self.assertTrue(encr_module.encrypted, 'module not encrypted') else: self.assertFalse(encr_module.encrypted, 'module encrypted') self.assertTrue(encr_module.training, 'training value incorrect') encr_output = encr_module(encr_input) plaintext_output = encr_output.get_plain_text() scaled_tensor = (input / (1 - prob)) reference = plaintext_output.where((plaintext_output == 0), scaled_tensor) self._check(encr_output, reference, 'Dropout forward failed') all_ones = torch.ones(reference.size()) ref_grad = plaintext_output.where((plaintext_output == 0), all_ones) ref_grad_input = (ref_grad / (1 - prob)) encr_output.backward() if wrap: self._check(encr_input.grad, ref_grad_input, 'dropout backward on input failed') encr_module.train(mode=False) encr_output = encr_module(encr_input) result = encr_input.eq(encr_output) result_plaintext = result.get_plain_text().bool() self.assertTrue(result_plaintext.all(), 'dropout failed in test mode')<|docstring|>Tests the dropout module<|endoftext|>
518467dff0d3737c61ceadb153bf7a1dc86be071543585ab0c1f216c1fe316e9
def test_non_pytorch_modules(self): '\n Tests all non-container Modules in crypten.nn that do not have\n equivalent modules in PyTorch.\n ' no_input_modules = ['Constant'] binary_modules = ['Add', 'Sub', 'Concat'] ex_zero_modules = [] module_args = {'Add': (), 'Concat': (0,), 'Constant': (1.2,), 'Exp': (), 'Gather': (0,), 'Reshape': (), 'ReduceSum': ([0], True), 'Shape': (), 'Sub': (), 'Squeeze': (0,), 'Unsqueeze': (0,)} module_lambdas = {'Add': (lambda x: (x[0] + x[1])), 'Concat': (lambda x: torch.cat((x[0], x[1]))), 'Constant': (lambda _: torch.tensor(module_args['Constant'][0])), 'Exp': (lambda x: torch.exp(x)), 'Gather': (lambda x: torch.from_numpy(x[0].numpy().take(x[1], module_args['Gather'][0]))), 'ReduceSum': (lambda x: torch.sum(x, dim=module_args['ReduceSum'][0], keepdim=(module_args['ReduceSum'][1] == 1))), 'Reshape': (lambda x: x[0].reshape(x[1].tolist())), 'Shape': (lambda x: torch.tensor(x.size()).float()), 'Sub': (lambda x: (x[0] - x[1])), 'Squeeze': (lambda x: x.squeeze(module_args['Squeeze'][0])), 'Unsqueeze': (lambda x: x.unsqueeze(module_args['Unsqueeze'][0]))} input_sizes = {'Add': (10, 12), 'Concat': (2, 2), 'Constant': (1,), 'Exp': (10, 10, 10), 'Gather': (4, 4, 4, 4), 'Reshape': (1, 4), 'ReduceSum': (3, 3, 3), 'Shape': (8, 3, 2), 'Sub': (10, 12), 'Squeeze': (1, 12, 6), 'Unsqueeze': (8, 3)} additional_inputs = {'Gather': torch.tensor([[1, 2], [0, 3]]), 'Reshape': torch.tensor([2, 2])} module_attributes = {'Add': [], 'Exp': [], 'Concat': [('axis', False)], 'Constant': [('value', False)], 'Gather': [('axis', False)], 'ReduceSum': [('axes', False), ('keepdims', False)], 'Reshape': [], 'Shape': [], 'Sub': [], 'Squeeze': [('axes', True)], 'Unsqueeze': [('axes', True)]} for module_name in module_args.keys(): encr_module = getattr(crypten.nn, module_name)(*module_args[module_name]) encr_module.encrypt() self.assertTrue(encr_module.encrypted, 'module not encrypted') (inputs, encr_inputs) = (None, None) ex_zero_values = (module_name in ex_zero_modules) if (module_name in binary_modules): inputs = [get_random_test_tensor(size=input_sizes[module_name], is_float=True, ex_zero=ex_zero_values) for _ in range(2)] encr_inputs = [crypten.cryptensor(input) for input in inputs] elif (module_name not in no_input_modules): inputs = get_random_test_tensor(size=input_sizes[module_name], is_float=True, ex_zero=ex_zero_values) encr_inputs = crypten.cryptensor(inputs) if (module_name in additional_inputs): if (not isinstance(inputs, (list, tuple))): (inputs, encr_inputs) = ([inputs], [encr_inputs]) inputs.append(additional_inputs[module_name]) encr_inputs.append(crypten.cryptensor(inputs[(- 1)])) reference = module_lambdas[module_name](inputs) encr_output = encr_module(encr_inputs) self._check(encr_output, reference, ('%s failed' % module_name)) local_attr = {} for (i, attr_tuple) in enumerate(module_attributes[module_name]): (attr_name, wrap_attr_in_list) = attr_tuple if wrap_attr_in_list: local_attr[attr_name] = [module_args[module_name][i]] else: local_attr[attr_name] = module_args[module_name][i] if (module_name == 'ReduceSum'): local_attr['keepdims'] = (1 if (module_args['ReduceSum'][1] is True) else 0) module = getattr(crypten.nn, module_name).from_onnx(attributes=local_attr) encr_module_onnx = module.encrypt() encr_output = encr_module_onnx(encr_inputs) self._check(encr_output, reference, ('%s failed' % module_name))
Tests all non-container Modules in crypten.nn that do not have equivalent modules in PyTorch.
test/test_nn.py
test_non_pytorch_modules
youben11/CrypTen
2
python
def test_non_pytorch_modules(self): '\n Tests all non-container Modules in crypten.nn that do not have\n equivalent modules in PyTorch.\n ' no_input_modules = ['Constant'] binary_modules = ['Add', 'Sub', 'Concat'] ex_zero_modules = [] module_args = {'Add': (), 'Concat': (0,), 'Constant': (1.2,), 'Exp': (), 'Gather': (0,), 'Reshape': (), 'ReduceSum': ([0], True), 'Shape': (), 'Sub': (), 'Squeeze': (0,), 'Unsqueeze': (0,)} module_lambdas = {'Add': (lambda x: (x[0] + x[1])), 'Concat': (lambda x: torch.cat((x[0], x[1]))), 'Constant': (lambda _: torch.tensor(module_args['Constant'][0])), 'Exp': (lambda x: torch.exp(x)), 'Gather': (lambda x: torch.from_numpy(x[0].numpy().take(x[1], module_args['Gather'][0]))), 'ReduceSum': (lambda x: torch.sum(x, dim=module_args['ReduceSum'][0], keepdim=(module_args['ReduceSum'][1] == 1))), 'Reshape': (lambda x: x[0].reshape(x[1].tolist())), 'Shape': (lambda x: torch.tensor(x.size()).float()), 'Sub': (lambda x: (x[0] - x[1])), 'Squeeze': (lambda x: x.squeeze(module_args['Squeeze'][0])), 'Unsqueeze': (lambda x: x.unsqueeze(module_args['Unsqueeze'][0]))} input_sizes = {'Add': (10, 12), 'Concat': (2, 2), 'Constant': (1,), 'Exp': (10, 10, 10), 'Gather': (4, 4, 4, 4), 'Reshape': (1, 4), 'ReduceSum': (3, 3, 3), 'Shape': (8, 3, 2), 'Sub': (10, 12), 'Squeeze': (1, 12, 6), 'Unsqueeze': (8, 3)} additional_inputs = {'Gather': torch.tensor([[1, 2], [0, 3]]), 'Reshape': torch.tensor([2, 2])} module_attributes = {'Add': [], 'Exp': [], 'Concat': [('axis', False)], 'Constant': [('value', False)], 'Gather': [('axis', False)], 'ReduceSum': [('axes', False), ('keepdims', False)], 'Reshape': [], 'Shape': [], 'Sub': [], 'Squeeze': [('axes', True)], 'Unsqueeze': [('axes', True)]} for module_name in module_args.keys(): encr_module = getattr(crypten.nn, module_name)(*module_args[module_name]) encr_module.encrypt() self.assertTrue(encr_module.encrypted, 'module not encrypted') (inputs, encr_inputs) = (None, None) ex_zero_values = (module_name in ex_zero_modules) if (module_name in binary_modules): inputs = [get_random_test_tensor(size=input_sizes[module_name], is_float=True, ex_zero=ex_zero_values) for _ in range(2)] encr_inputs = [crypten.cryptensor(input) for input in inputs] elif (module_name not in no_input_modules): inputs = get_random_test_tensor(size=input_sizes[module_name], is_float=True, ex_zero=ex_zero_values) encr_inputs = crypten.cryptensor(inputs) if (module_name in additional_inputs): if (not isinstance(inputs, (list, tuple))): (inputs, encr_inputs) = ([inputs], [encr_inputs]) inputs.append(additional_inputs[module_name]) encr_inputs.append(crypten.cryptensor(inputs[(- 1)])) reference = module_lambdas[module_name](inputs) encr_output = encr_module(encr_inputs) self._check(encr_output, reference, ('%s failed' % module_name)) local_attr = {} for (i, attr_tuple) in enumerate(module_attributes[module_name]): (attr_name, wrap_attr_in_list) = attr_tuple if wrap_attr_in_list: local_attr[attr_name] = [module_args[module_name][i]] else: local_attr[attr_name] = module_args[module_name][i] if (module_name == 'ReduceSum'): local_attr['keepdims'] = (1 if (module_args['ReduceSum'][1] is True) else 0) module = getattr(crypten.nn, module_name).from_onnx(attributes=local_attr) encr_module_onnx = module.encrypt() encr_output = encr_module_onnx(encr_inputs) self._check(encr_output, reference, ('%s failed' % module_name))
def test_non_pytorch_modules(self): '\n Tests all non-container Modules in crypten.nn that do not have\n equivalent modules in PyTorch.\n ' no_input_modules = ['Constant'] binary_modules = ['Add', 'Sub', 'Concat'] ex_zero_modules = [] module_args = {'Add': (), 'Concat': (0,), 'Constant': (1.2,), 'Exp': (), 'Gather': (0,), 'Reshape': (), 'ReduceSum': ([0], True), 'Shape': (), 'Sub': (), 'Squeeze': (0,), 'Unsqueeze': (0,)} module_lambdas = {'Add': (lambda x: (x[0] + x[1])), 'Concat': (lambda x: torch.cat((x[0], x[1]))), 'Constant': (lambda _: torch.tensor(module_args['Constant'][0])), 'Exp': (lambda x: torch.exp(x)), 'Gather': (lambda x: torch.from_numpy(x[0].numpy().take(x[1], module_args['Gather'][0]))), 'ReduceSum': (lambda x: torch.sum(x, dim=module_args['ReduceSum'][0], keepdim=(module_args['ReduceSum'][1] == 1))), 'Reshape': (lambda x: x[0].reshape(x[1].tolist())), 'Shape': (lambda x: torch.tensor(x.size()).float()), 'Sub': (lambda x: (x[0] - x[1])), 'Squeeze': (lambda x: x.squeeze(module_args['Squeeze'][0])), 'Unsqueeze': (lambda x: x.unsqueeze(module_args['Unsqueeze'][0]))} input_sizes = {'Add': (10, 12), 'Concat': (2, 2), 'Constant': (1,), 'Exp': (10, 10, 10), 'Gather': (4, 4, 4, 4), 'Reshape': (1, 4), 'ReduceSum': (3, 3, 3), 'Shape': (8, 3, 2), 'Sub': (10, 12), 'Squeeze': (1, 12, 6), 'Unsqueeze': (8, 3)} additional_inputs = {'Gather': torch.tensor([[1, 2], [0, 3]]), 'Reshape': torch.tensor([2, 2])} module_attributes = {'Add': [], 'Exp': [], 'Concat': [('axis', False)], 'Constant': [('value', False)], 'Gather': [('axis', False)], 'ReduceSum': [('axes', False), ('keepdims', False)], 'Reshape': [], 'Shape': [], 'Sub': [], 'Squeeze': [('axes', True)], 'Unsqueeze': [('axes', True)]} for module_name in module_args.keys(): encr_module = getattr(crypten.nn, module_name)(*module_args[module_name]) encr_module.encrypt() self.assertTrue(encr_module.encrypted, 'module not encrypted') (inputs, encr_inputs) = (None, None) ex_zero_values = (module_name in ex_zero_modules) if (module_name in binary_modules): inputs = [get_random_test_tensor(size=input_sizes[module_name], is_float=True, ex_zero=ex_zero_values) for _ in range(2)] encr_inputs = [crypten.cryptensor(input) for input in inputs] elif (module_name not in no_input_modules): inputs = get_random_test_tensor(size=input_sizes[module_name], is_float=True, ex_zero=ex_zero_values) encr_inputs = crypten.cryptensor(inputs) if (module_name in additional_inputs): if (not isinstance(inputs, (list, tuple))): (inputs, encr_inputs) = ([inputs], [encr_inputs]) inputs.append(additional_inputs[module_name]) encr_inputs.append(crypten.cryptensor(inputs[(- 1)])) reference = module_lambdas[module_name](inputs) encr_output = encr_module(encr_inputs) self._check(encr_output, reference, ('%s failed' % module_name)) local_attr = {} for (i, attr_tuple) in enumerate(module_attributes[module_name]): (attr_name, wrap_attr_in_list) = attr_tuple if wrap_attr_in_list: local_attr[attr_name] = [module_args[module_name][i]] else: local_attr[attr_name] = module_args[module_name][i] if (module_name == 'ReduceSum'): local_attr['keepdims'] = (1 if (module_args['ReduceSum'][1] is True) else 0) module = getattr(crypten.nn, module_name).from_onnx(attributes=local_attr) encr_module_onnx = module.encrypt() encr_output = encr_module_onnx(encr_inputs) self._check(encr_output, reference, ('%s failed' % module_name))<|docstring|>Tests all non-container Modules in crypten.nn that do not have equivalent modules in PyTorch.<|endoftext|>
1db3c09bb6a761e1fabfff4d6937db95b49a30d5eb5ef0d6d64caa2e8bf6de5b
def test_pytorch_modules(self): '\n Tests all non-container Modules in crypten.nn that have equivalent\n modules in PyTorch.\n ' module_args = {'AdaptiveAvgPool2d': (2,), 'AvgPool2d': (2,), 'ConstantPad1d': (3, 1.0), 'ConstantPad2d': (2, 2.0), 'ConstantPad3d': (1, 0.0), 'Conv2d': (3, 6, 5), 'Linear': (400, 120), 'MaxPool2d': (2,), 'ReLU': (), 'Softmax': (0,), 'LogSoftmax': (0,)} input_sizes = {'AdaptiveAvgPool2d': (1, 3, 32, 32), 'AvgPool2d': (1, 3, 32, 32), 'BatchNorm1d': (8, 400), 'BatchNorm2d': (8, 3, 32, 32), 'BatchNorm3d': (8, 6, 32, 32, 4), 'ConstantPad1d': (9,), 'ConstantPad2d': (3, 6), 'ConstantPad3d': (4, 2, 7), 'Conv2d': (1, 3, 32, 32), 'Linear': (1, 400), 'MaxPool2d': (1, 2, 32, 32), 'ReLU': (1, 3, 32, 32), 'Softmax': (5, 5, 5), 'LogSoftmax': (5, 5, 5)} for module_name in module_args.keys(): for wrap in [True, False]: input = get_random_test_tensor(size=input_sizes[module_name], is_float=True) input.requires_grad = True encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) module = getattr(torch.nn, module_name)(*module_args[module_name]) module.train() encr_module = crypten.nn.from_pytorch(module, input) for encrypted in [False, True, True, False, True]: encr_module.encrypt(mode=encrypted) if encrypted: self.assertTrue(encr_module.encrypted, 'module not encrypted') else: self.assertFalse(encr_module.encrypted, 'module encrypted') for key in ['weight', 'bias']: if hasattr(module, key): encr_param = None if isinstance(encr_module, crypten.nn.Graph): for encr_node in encr_module.modules(): if hasattr(encr_node, key): encr_param = getattr(encr_node, key) break else: encr_param = getattr(encr_module, key) reference = getattr(module, key) src_reference = comm.get().broadcast(reference, src=0) msg = ('parameter %s in %s incorrect' % (key, module_name)) if (not encrypted): encr_param = crypten.cryptensor(encr_param) self._check(encr_param, src_reference, msg) self.assertTrue(encr_module.training, 'training value incorrect') reference = module(input) encr_output = encr_module(encr_input) self._check(encr_output, reference, ('%s forward failed' % module_name)) reference.backward(torch.ones(reference.size())) encr_output.backward() if wrap: self._check(encr_input.grad, input.grad, ('%s backward on input failed' % module_name)) else: self.assertFalse(hasattr(encr_input, 'grad')) for (name, param) in module.named_parameters(): encr_param = getattr(encr_module, name) self._check(encr_param.grad, param.grad, ('%s backward on %s failed' % (module_name, name)))
Tests all non-container Modules in crypten.nn that have equivalent modules in PyTorch.
test/test_nn.py
test_pytorch_modules
youben11/CrypTen
2
python
def test_pytorch_modules(self): '\n Tests all non-container Modules in crypten.nn that have equivalent\n modules in PyTorch.\n ' module_args = {'AdaptiveAvgPool2d': (2,), 'AvgPool2d': (2,), 'ConstantPad1d': (3, 1.0), 'ConstantPad2d': (2, 2.0), 'ConstantPad3d': (1, 0.0), 'Conv2d': (3, 6, 5), 'Linear': (400, 120), 'MaxPool2d': (2,), 'ReLU': (), 'Softmax': (0,), 'LogSoftmax': (0,)} input_sizes = {'AdaptiveAvgPool2d': (1, 3, 32, 32), 'AvgPool2d': (1, 3, 32, 32), 'BatchNorm1d': (8, 400), 'BatchNorm2d': (8, 3, 32, 32), 'BatchNorm3d': (8, 6, 32, 32, 4), 'ConstantPad1d': (9,), 'ConstantPad2d': (3, 6), 'ConstantPad3d': (4, 2, 7), 'Conv2d': (1, 3, 32, 32), 'Linear': (1, 400), 'MaxPool2d': (1, 2, 32, 32), 'ReLU': (1, 3, 32, 32), 'Softmax': (5, 5, 5), 'LogSoftmax': (5, 5, 5)} for module_name in module_args.keys(): for wrap in [True, False]: input = get_random_test_tensor(size=input_sizes[module_name], is_float=True) input.requires_grad = True encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) module = getattr(torch.nn, module_name)(*module_args[module_name]) module.train() encr_module = crypten.nn.from_pytorch(module, input) for encrypted in [False, True, True, False, True]: encr_module.encrypt(mode=encrypted) if encrypted: self.assertTrue(encr_module.encrypted, 'module not encrypted') else: self.assertFalse(encr_module.encrypted, 'module encrypted') for key in ['weight', 'bias']: if hasattr(module, key): encr_param = None if isinstance(encr_module, crypten.nn.Graph): for encr_node in encr_module.modules(): if hasattr(encr_node, key): encr_param = getattr(encr_node, key) break else: encr_param = getattr(encr_module, key) reference = getattr(module, key) src_reference = comm.get().broadcast(reference, src=0) msg = ('parameter %s in %s incorrect' % (key, module_name)) if (not encrypted): encr_param = crypten.cryptensor(encr_param) self._check(encr_param, src_reference, msg) self.assertTrue(encr_module.training, 'training value incorrect') reference = module(input) encr_output = encr_module(encr_input) self._check(encr_output, reference, ('%s forward failed' % module_name)) reference.backward(torch.ones(reference.size())) encr_output.backward() if wrap: self._check(encr_input.grad, input.grad, ('%s backward on input failed' % module_name)) else: self.assertFalse(hasattr(encr_input, 'grad')) for (name, param) in module.named_parameters(): encr_param = getattr(encr_module, name) self._check(encr_param.grad, param.grad, ('%s backward on %s failed' % (module_name, name)))
def test_pytorch_modules(self): '\n Tests all non-container Modules in crypten.nn that have equivalent\n modules in PyTorch.\n ' module_args = {'AdaptiveAvgPool2d': (2,), 'AvgPool2d': (2,), 'ConstantPad1d': (3, 1.0), 'ConstantPad2d': (2, 2.0), 'ConstantPad3d': (1, 0.0), 'Conv2d': (3, 6, 5), 'Linear': (400, 120), 'MaxPool2d': (2,), 'ReLU': (), 'Softmax': (0,), 'LogSoftmax': (0,)} input_sizes = {'AdaptiveAvgPool2d': (1, 3, 32, 32), 'AvgPool2d': (1, 3, 32, 32), 'BatchNorm1d': (8, 400), 'BatchNorm2d': (8, 3, 32, 32), 'BatchNorm3d': (8, 6, 32, 32, 4), 'ConstantPad1d': (9,), 'ConstantPad2d': (3, 6), 'ConstantPad3d': (4, 2, 7), 'Conv2d': (1, 3, 32, 32), 'Linear': (1, 400), 'MaxPool2d': (1, 2, 32, 32), 'ReLU': (1, 3, 32, 32), 'Softmax': (5, 5, 5), 'LogSoftmax': (5, 5, 5)} for module_name in module_args.keys(): for wrap in [True, False]: input = get_random_test_tensor(size=input_sizes[module_name], is_float=True) input.requires_grad = True encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) module = getattr(torch.nn, module_name)(*module_args[module_name]) module.train() encr_module = crypten.nn.from_pytorch(module, input) for encrypted in [False, True, True, False, True]: encr_module.encrypt(mode=encrypted) if encrypted: self.assertTrue(encr_module.encrypted, 'module not encrypted') else: self.assertFalse(encr_module.encrypted, 'module encrypted') for key in ['weight', 'bias']: if hasattr(module, key): encr_param = None if isinstance(encr_module, crypten.nn.Graph): for encr_node in encr_module.modules(): if hasattr(encr_node, key): encr_param = getattr(encr_node, key) break else: encr_param = getattr(encr_module, key) reference = getattr(module, key) src_reference = comm.get().broadcast(reference, src=0) msg = ('parameter %s in %s incorrect' % (key, module_name)) if (not encrypted): encr_param = crypten.cryptensor(encr_param) self._check(encr_param, src_reference, msg) self.assertTrue(encr_module.training, 'training value incorrect') reference = module(input) encr_output = encr_module(encr_input) self._check(encr_output, reference, ('%s forward failed' % module_name)) reference.backward(torch.ones(reference.size())) encr_output.backward() if wrap: self._check(encr_input.grad, input.grad, ('%s backward on input failed' % module_name)) else: self.assertFalse(hasattr(encr_input, 'grad')) for (name, param) in module.named_parameters(): encr_param = getattr(encr_module, name) self._check(encr_param.grad, param.grad, ('%s backward on %s failed' % (module_name, name)))<|docstring|>Tests all non-container Modules in crypten.nn that have equivalent modules in PyTorch.<|endoftext|>
8c5c1804559f8471c41c82698da7939ab686e818ceb32fabfd4b050770665d32
def test_sequential(self): '\n Tests crypten.nn.Sequential module.\n ' for num_layers in range(1, 6): for wrap in [True, False]: input_size = (3, 10) output_size = (input_size[0], (input_size[1] - num_layers)) layer_idx = range(input_size[1], output_size[1], (- 1)) module_list = [crypten.nn.Linear(num_feat, (num_feat - 1)) for num_feat in layer_idx] sequential = crypten.nn.Sequential(module_list) sequential.encrypt() self.assertTrue(sequential.encrypted, 'nn.Sequential not encrypted') for module in sequential.modules(): self.assertTrue(module.encrypted, 'module not encrypted') assert (sum((1 for _ in sequential.modules())) == len(module_list)), 'nn.Sequential contains incorrect number of modules' input = get_random_test_tensor(size=input_size, is_float=True) encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) encr_output = sequential(encr_input) encr_reference = encr_input for module in sequential.modules(): encr_reference = module(encr_reference) reference = encr_reference.get_plain_text() self._check(encr_output, reference, 'nn.Sequential forward failed')
Tests crypten.nn.Sequential module.
test/test_nn.py
test_sequential
youben11/CrypTen
2
python
def test_sequential(self): '\n \n ' for num_layers in range(1, 6): for wrap in [True, False]: input_size = (3, 10) output_size = (input_size[0], (input_size[1] - num_layers)) layer_idx = range(input_size[1], output_size[1], (- 1)) module_list = [crypten.nn.Linear(num_feat, (num_feat - 1)) for num_feat in layer_idx] sequential = crypten.nn.Sequential(module_list) sequential.encrypt() self.assertTrue(sequential.encrypted, 'nn.Sequential not encrypted') for module in sequential.modules(): self.assertTrue(module.encrypted, 'module not encrypted') assert (sum((1 for _ in sequential.modules())) == len(module_list)), 'nn.Sequential contains incorrect number of modules' input = get_random_test_tensor(size=input_size, is_float=True) encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) encr_output = sequential(encr_input) encr_reference = encr_input for module in sequential.modules(): encr_reference = module(encr_reference) reference = encr_reference.get_plain_text() self._check(encr_output, reference, 'nn.Sequential forward failed')
def test_sequential(self): '\n \n ' for num_layers in range(1, 6): for wrap in [True, False]: input_size = (3, 10) output_size = (input_size[0], (input_size[1] - num_layers)) layer_idx = range(input_size[1], output_size[1], (- 1)) module_list = [crypten.nn.Linear(num_feat, (num_feat - 1)) for num_feat in layer_idx] sequential = crypten.nn.Sequential(module_list) sequential.encrypt() self.assertTrue(sequential.encrypted, 'nn.Sequential not encrypted') for module in sequential.modules(): self.assertTrue(module.encrypted, 'module not encrypted') assert (sum((1 for _ in sequential.modules())) == len(module_list)), 'nn.Sequential contains incorrect number of modules' input = get_random_test_tensor(size=input_size, is_float=True) encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) encr_output = sequential(encr_input) encr_reference = encr_input for module in sequential.modules(): encr_reference = module(encr_reference) reference = encr_reference.get_plain_text() self._check(encr_output, reference, 'nn.Sequential forward failed')<|docstring|>Tests crypten.nn.Sequential module.<|endoftext|>
1dc6010c88183f60029314c8bd878759172d20347ea013d15924302c92d1b4a9
def test_graph(self): '\n Tests crypten.nn.Graph module.\n ' for wrap in [True, False]: input_size = (3, 10) input = get_random_test_tensor(size=input_size, is_float=True) encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) graph = crypten.nn.Graph('input', 'output') linear1 = get_random_linear(input_size[1], input_size[1]) linear2 = get_random_linear(input_size[1], input_size[1]) graph.add_module('linear', crypten.nn.from_pytorch(linear1, input), ['input']) graph.add_module('residual', crypten.nn.Add(), ['input', 'linear']) graph.add_module('output', crypten.nn.from_pytorch(linear2, input), ['residual']) graph.encrypt() self.assertTrue(graph.encrypted, 'nn.Graph not encrypted') for module in graph.modules(): self.assertTrue(module.encrypted, 'module not encrypted') assert (sum((1 for _ in graph.modules())) == 3), 'nn.Graph contains incorrect number of modules' encr_output = graph(encr_input) reference = linear2((linear1(input) + input)) self._check(encr_output, reference, 'nn.Graph forward failed')
Tests crypten.nn.Graph module.
test/test_nn.py
test_graph
youben11/CrypTen
2
python
def test_graph(self): '\n \n ' for wrap in [True, False]: input_size = (3, 10) input = get_random_test_tensor(size=input_size, is_float=True) encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) graph = crypten.nn.Graph('input', 'output') linear1 = get_random_linear(input_size[1], input_size[1]) linear2 = get_random_linear(input_size[1], input_size[1]) graph.add_module('linear', crypten.nn.from_pytorch(linear1, input), ['input']) graph.add_module('residual', crypten.nn.Add(), ['input', 'linear']) graph.add_module('output', crypten.nn.from_pytorch(linear2, input), ['residual']) graph.encrypt() self.assertTrue(graph.encrypted, 'nn.Graph not encrypted') for module in graph.modules(): self.assertTrue(module.encrypted, 'module not encrypted') assert (sum((1 for _ in graph.modules())) == 3), 'nn.Graph contains incorrect number of modules' encr_output = graph(encr_input) reference = linear2((linear1(input) + input)) self._check(encr_output, reference, 'nn.Graph forward failed')
def test_graph(self): '\n \n ' for wrap in [True, False]: input_size = (3, 10) input = get_random_test_tensor(size=input_size, is_float=True) encr_input = crypten.cryptensor(input) if wrap: encr_input = AutogradCrypTensor(encr_input) graph = crypten.nn.Graph('input', 'output') linear1 = get_random_linear(input_size[1], input_size[1]) linear2 = get_random_linear(input_size[1], input_size[1]) graph.add_module('linear', crypten.nn.from_pytorch(linear1, input), ['input']) graph.add_module('residual', crypten.nn.Add(), ['input', 'linear']) graph.add_module('output', crypten.nn.from_pytorch(linear2, input), ['residual']) graph.encrypt() self.assertTrue(graph.encrypted, 'nn.Graph not encrypted') for module in graph.modules(): self.assertTrue(module.encrypted, 'module not encrypted') assert (sum((1 for _ in graph.modules())) == 3), 'nn.Graph contains incorrect number of modules' encr_output = graph(encr_input) reference = linear2((linear1(input) + input)) self._check(encr_output, reference, 'nn.Graph forward failed')<|docstring|>Tests crypten.nn.Graph module.<|endoftext|>
634ae4313d5ee6c554be40352037b3c350db2d9d90fc6126301fe8ad1ed884a1
def test_losses(self): '\n Tests all Losses implemented in crypten.nn.\n ' input = (get_random_test_tensor(max_value=0.999, is_float=True).abs() + 0.001) target = (get_random_test_tensor(max_value=0.999, is_float=True).abs() + 0.001) encrypted_input = crypten.cryptensor(input) encrypted_target = crypten.cryptensor(target) for loss_name in ['BCELoss', 'BCEWithLogitsLoss', 'L1Loss', 'MSELoss']: for skip_forward in [False, True]: enc_loss_object = getattr(torch.nn, loss_name)() self.assertEqual(enc_loss_object.reduction, 'mean', "Reduction used is not 'mean'") input.requires_grad = True input.grad = None loss = getattr(torch.nn, loss_name)()(input, target) if (not skip_forward): encrypted_loss = getattr(crypten.nn, loss_name)()(encrypted_input, encrypted_target) self._check(encrypted_loss, loss, ('%s failed' % loss_name)) encrypted_input = AutogradCrypTensor(encrypted_input, requires_grad=True) encrypted_loss = getattr(crypten.nn, loss_name)(skip_forward=skip_forward)(encrypted_input, AutogradCrypTensor(encrypted_target)) if (not skip_forward): self._check(encrypted_loss, loss, ('%s failed' % loss_name)) loss.backward() encrypted_loss.backward() self._check(encrypted_input.grad, input.grad, ('%s grad failed' % loss_name)) (batch_size, num_targets) = (16, 5) input = get_random_test_tensor(size=(batch_size, num_targets), is_float=True) target = get_random_test_tensor(size=(batch_size,), max_value=(num_targets - 1)).abs() encrypted_input = crypten.cryptensor(input) encrypted_target = crypten.cryptensor(onehot(target, num_targets=num_targets)) enc_loss_object = crypten.nn.CrossEntropyLoss() self.assertEqual(enc_loss_object.reduction, 'mean', "Reduction used is not 'mean'") loss = torch.nn.CrossEntropyLoss()(input, target) encrypted_loss = crypten.nn.CrossEntropyLoss()(encrypted_input, encrypted_target) self._check(encrypted_loss, loss, 'cross-entropy loss failed') encrypted_loss = crypten.nn.CrossEntropyLoss()(AutogradCrypTensor(encrypted_input), AutogradCrypTensor(encrypted_target)) self._check(encrypted_loss, loss, 'cross-entropy loss failed')
Tests all Losses implemented in crypten.nn.
test/test_nn.py
test_losses
youben11/CrypTen
2
python
def test_losses(self): '\n \n ' input = (get_random_test_tensor(max_value=0.999, is_float=True).abs() + 0.001) target = (get_random_test_tensor(max_value=0.999, is_float=True).abs() + 0.001) encrypted_input = crypten.cryptensor(input) encrypted_target = crypten.cryptensor(target) for loss_name in ['BCELoss', 'BCEWithLogitsLoss', 'L1Loss', 'MSELoss']: for skip_forward in [False, True]: enc_loss_object = getattr(torch.nn, loss_name)() self.assertEqual(enc_loss_object.reduction, 'mean', "Reduction used is not 'mean'") input.requires_grad = True input.grad = None loss = getattr(torch.nn, loss_name)()(input, target) if (not skip_forward): encrypted_loss = getattr(crypten.nn, loss_name)()(encrypted_input, encrypted_target) self._check(encrypted_loss, loss, ('%s failed' % loss_name)) encrypted_input = AutogradCrypTensor(encrypted_input, requires_grad=True) encrypted_loss = getattr(crypten.nn, loss_name)(skip_forward=skip_forward)(encrypted_input, AutogradCrypTensor(encrypted_target)) if (not skip_forward): self._check(encrypted_loss, loss, ('%s failed' % loss_name)) loss.backward() encrypted_loss.backward() self._check(encrypted_input.grad, input.grad, ('%s grad failed' % loss_name)) (batch_size, num_targets) = (16, 5) input = get_random_test_tensor(size=(batch_size, num_targets), is_float=True) target = get_random_test_tensor(size=(batch_size,), max_value=(num_targets - 1)).abs() encrypted_input = crypten.cryptensor(input) encrypted_target = crypten.cryptensor(onehot(target, num_targets=num_targets)) enc_loss_object = crypten.nn.CrossEntropyLoss() self.assertEqual(enc_loss_object.reduction, 'mean', "Reduction used is not 'mean'") loss = torch.nn.CrossEntropyLoss()(input, target) encrypted_loss = crypten.nn.CrossEntropyLoss()(encrypted_input, encrypted_target) self._check(encrypted_loss, loss, 'cross-entropy loss failed') encrypted_loss = crypten.nn.CrossEntropyLoss()(AutogradCrypTensor(encrypted_input), AutogradCrypTensor(encrypted_target)) self._check(encrypted_loss, loss, 'cross-entropy loss failed')
def test_losses(self): '\n \n ' input = (get_random_test_tensor(max_value=0.999, is_float=True).abs() + 0.001) target = (get_random_test_tensor(max_value=0.999, is_float=True).abs() + 0.001) encrypted_input = crypten.cryptensor(input) encrypted_target = crypten.cryptensor(target) for loss_name in ['BCELoss', 'BCEWithLogitsLoss', 'L1Loss', 'MSELoss']: for skip_forward in [False, True]: enc_loss_object = getattr(torch.nn, loss_name)() self.assertEqual(enc_loss_object.reduction, 'mean', "Reduction used is not 'mean'") input.requires_grad = True input.grad = None loss = getattr(torch.nn, loss_name)()(input, target) if (not skip_forward): encrypted_loss = getattr(crypten.nn, loss_name)()(encrypted_input, encrypted_target) self._check(encrypted_loss, loss, ('%s failed' % loss_name)) encrypted_input = AutogradCrypTensor(encrypted_input, requires_grad=True) encrypted_loss = getattr(crypten.nn, loss_name)(skip_forward=skip_forward)(encrypted_input, AutogradCrypTensor(encrypted_target)) if (not skip_forward): self._check(encrypted_loss, loss, ('%s failed' % loss_name)) loss.backward() encrypted_loss.backward() self._check(encrypted_input.grad, input.grad, ('%s grad failed' % loss_name)) (batch_size, num_targets) = (16, 5) input = get_random_test_tensor(size=(batch_size, num_targets), is_float=True) target = get_random_test_tensor(size=(batch_size,), max_value=(num_targets - 1)).abs() encrypted_input = crypten.cryptensor(input) encrypted_target = crypten.cryptensor(onehot(target, num_targets=num_targets)) enc_loss_object = crypten.nn.CrossEntropyLoss() self.assertEqual(enc_loss_object.reduction, 'mean', "Reduction used is not 'mean'") loss = torch.nn.CrossEntropyLoss()(input, target) encrypted_loss = crypten.nn.CrossEntropyLoss()(encrypted_input, encrypted_target) self._check(encrypted_loss, loss, 'cross-entropy loss failed') encrypted_loss = crypten.nn.CrossEntropyLoss()(AutogradCrypTensor(encrypted_input), AutogradCrypTensor(encrypted_target)) self._check(encrypted_loss, loss, 'cross-entropy loss failed')<|docstring|>Tests all Losses implemented in crypten.nn.<|endoftext|>
7cab97b929a049564bbb96b3252321bf11f1a1f33b08549c7ce62dbe482817ef
def test_getattr_setattr(self): 'Tests the __getattr__ and __setattr__ functions' tensor1 = get_random_test_tensor(size=(3, 3), is_float=True) tensor2 = get_random_test_tensor(size=(3, 3), is_float=True) class ExampleNet(crypten.nn.Module): def __init__(self): super(ExampleNet, self).__init__() self.fc1 = crypten.nn.Linear(20, 1) sample_buffer = tensor1 self.register_buffer('sample_buffer', sample_buffer) sample_param = tensor2 self.register_parameter('sample_param', sample_param) def forward(self, x): out = self.fc1(x) return out model = ExampleNet() model.encrypt() self.assertTrue(('fc1' in model._modules.keys()), 'modules __setattr__ failed') self._check(model.sample_buffer, tensor1, 'buffer __getattr__ failed') self._check(model.sample_param, tensor2, 'parameter __getattr__ failed') self.assertTrue(isinstance(model.fc1, crypten.nn.Linear), 'modules __getattr__ failed')
Tests the __getattr__ and __setattr__ functions
test/test_nn.py
test_getattr_setattr
youben11/CrypTen
2
python
def test_getattr_setattr(self): tensor1 = get_random_test_tensor(size=(3, 3), is_float=True) tensor2 = get_random_test_tensor(size=(3, 3), is_float=True) class ExampleNet(crypten.nn.Module): def __init__(self): super(ExampleNet, self).__init__() self.fc1 = crypten.nn.Linear(20, 1) sample_buffer = tensor1 self.register_buffer('sample_buffer', sample_buffer) sample_param = tensor2 self.register_parameter('sample_param', sample_param) def forward(self, x): out = self.fc1(x) return out model = ExampleNet() model.encrypt() self.assertTrue(('fc1' in model._modules.keys()), 'modules __setattr__ failed') self._check(model.sample_buffer, tensor1, 'buffer __getattr__ failed') self._check(model.sample_param, tensor2, 'parameter __getattr__ failed') self.assertTrue(isinstance(model.fc1, crypten.nn.Linear), 'modules __getattr__ failed')
def test_getattr_setattr(self): tensor1 = get_random_test_tensor(size=(3, 3), is_float=True) tensor2 = get_random_test_tensor(size=(3, 3), is_float=True) class ExampleNet(crypten.nn.Module): def __init__(self): super(ExampleNet, self).__init__() self.fc1 = crypten.nn.Linear(20, 1) sample_buffer = tensor1 self.register_buffer('sample_buffer', sample_buffer) sample_param = tensor2 self.register_parameter('sample_param', sample_param) def forward(self, x): out = self.fc1(x) return out model = ExampleNet() model.encrypt() self.assertTrue(('fc1' in model._modules.keys()), 'modules __setattr__ failed') self._check(model.sample_buffer, tensor1, 'buffer __getattr__ failed') self._check(model.sample_param, tensor2, 'parameter __getattr__ failed') self.assertTrue(isinstance(model.fc1, crypten.nn.Linear), 'modules __getattr__ failed')<|docstring|>Tests the __getattr__ and __setattr__ functions<|endoftext|>
877176cfd54364a58e77f63d7064d0aaa61851442fcc6980b695e5cfde32c119
def test_training(self): '\n Tests training of simple model in crypten.nn.\n ' learning_rate = 0.1 (batch_size, num_inputs, num_intermediate, num_outputs) = (8, 10, 5, 1) model = crypten.nn.Sequential([crypten.nn.Linear(num_inputs, num_intermediate), crypten.nn.ReLU(), crypten.nn.Linear(num_intermediate, num_outputs)]) model.train() model.encrypt() loss = crypten.nn.MSELoss() for _ in range(10): for wrap in [True, False]: input = get_random_test_tensor(size=(batch_size, num_inputs), is_float=True) target = input.mean(dim=1, keepdim=True) input = crypten.cryptensor(input) target = crypten.cryptensor(target) if wrap: input = AutogradCrypTensor(input) target = AutogradCrypTensor(target) output = model(input) loss_value = loss(output, target) model.zero_grad() for param in model.parameters(): self.assertIsNone(param.grad, 'zero_grad did not reset gradients') loss_value.backward() reference = {} reference = self._compute_reference_parameters('', reference, model, learning_rate) model.update_parameters(learning_rate) self._check_reference_parameters('', reference, model)
Tests training of simple model in crypten.nn.
test/test_nn.py
test_training
youben11/CrypTen
2
python
def test_training(self): '\n \n ' learning_rate = 0.1 (batch_size, num_inputs, num_intermediate, num_outputs) = (8, 10, 5, 1) model = crypten.nn.Sequential([crypten.nn.Linear(num_inputs, num_intermediate), crypten.nn.ReLU(), crypten.nn.Linear(num_intermediate, num_outputs)]) model.train() model.encrypt() loss = crypten.nn.MSELoss() for _ in range(10): for wrap in [True, False]: input = get_random_test_tensor(size=(batch_size, num_inputs), is_float=True) target = input.mean(dim=1, keepdim=True) input = crypten.cryptensor(input) target = crypten.cryptensor(target) if wrap: input = AutogradCrypTensor(input) target = AutogradCrypTensor(target) output = model(input) loss_value = loss(output, target) model.zero_grad() for param in model.parameters(): self.assertIsNone(param.grad, 'zero_grad did not reset gradients') loss_value.backward() reference = {} reference = self._compute_reference_parameters(, reference, model, learning_rate) model.update_parameters(learning_rate) self._check_reference_parameters(, reference, model)
def test_training(self): '\n \n ' learning_rate = 0.1 (batch_size, num_inputs, num_intermediate, num_outputs) = (8, 10, 5, 1) model = crypten.nn.Sequential([crypten.nn.Linear(num_inputs, num_intermediate), crypten.nn.ReLU(), crypten.nn.Linear(num_intermediate, num_outputs)]) model.train() model.encrypt() loss = crypten.nn.MSELoss() for _ in range(10): for wrap in [True, False]: input = get_random_test_tensor(size=(batch_size, num_inputs), is_float=True) target = input.mean(dim=1, keepdim=True) input = crypten.cryptensor(input) target = crypten.cryptensor(target) if wrap: input = AutogradCrypTensor(input) target = AutogradCrypTensor(target) output = model(input) loss_value = loss(output, target) model.zero_grad() for param in model.parameters(): self.assertIsNone(param.grad, 'zero_grad did not reset gradients') loss_value.backward() reference = {} reference = self._compute_reference_parameters(, reference, model, learning_rate) model.update_parameters(learning_rate) self._check_reference_parameters(, reference, model)<|docstring|>Tests training of simple model in crypten.nn.<|endoftext|>
c498b1a7f55d5fa717d81f1fcda944acae461d818d5110cfeef6aa9522d73aaf
def test_custom_module_training(self): 'Tests training CrypTen models created directly using the crypten.nn.Module' BATCH_SIZE = 32 NUM_FEATURES = 3 class ExampleNet(crypten.nn.Module): def __init__(self): super(ExampleNet, self).__init__() self.fc1 = crypten.nn.Linear(NUM_FEATURES, BATCH_SIZE) self.fc2 = crypten.nn.Linear(BATCH_SIZE, 2) def forward(self, x): out = self.fc1(x) out = self.fc2(out) return out model = ExampleNet() x_orig = get_random_test_tensor(size=(BATCH_SIZE, NUM_FEATURES), is_float=True) y_orig = (2 * x_orig.mean(dim=1)).gt(0).long() y_one_hot = onehot(y_orig, num_targets=2) x_train = AutogradCrypTensor(crypten.cryptensor(x_orig)) y_train = crypten.cryptensor(y_one_hot) for loss_name in ['BCELoss', 'CrossEntropyLoss', 'MSELoss']: loss = getattr(crypten.nn, loss_name)() model.train() model.encrypt() num_epochs = 3 learning_rate = 0.001 for i in range(num_epochs): output = model(x_train) if (loss_name == 'MSELoss'): output_norm = output else: output_norm = output.softmax(1) loss_value = loss(output_norm, y_train) model.zero_grad() for param in model.parameters(): self.assertIsNone(param.grad, 'zero_grad did not reset gradients') loss_value.backward() for param in model.parameters(): if param.requires_grad: self.assertIsNotNone(param.grad, 'required parameter gradient not created') (orig_parameters, upd_parameters) = ({}, {}) orig_parameters = self._compute_reference_parameters('', orig_parameters, model, 0) model.update_parameters(learning_rate) upd_parameters = self._compute_reference_parameters('', upd_parameters, model, learning_rate) parameter_changed = False for (name, value) in orig_parameters.items(): if (param.requires_grad and (param.grad is not None)): unchanged = torch.allclose(upd_parameters[name], value) if (unchanged is False): parameter_changed = True self.assertTrue(parameter_changed, 'no parameter changed in training step') if (i == 0): orig_loss = loss_value.get_plain_text() curr_loss = loss_value.get_plain_text() self.assertTrue((curr_loss.item() < orig_loss.item()), 'loss has not decreased after training')
Tests training CrypTen models created directly using the crypten.nn.Module
test/test_nn.py
test_custom_module_training
youben11/CrypTen
2
python
def test_custom_module_training(self): BATCH_SIZE = 32 NUM_FEATURES = 3 class ExampleNet(crypten.nn.Module): def __init__(self): super(ExampleNet, self).__init__() self.fc1 = crypten.nn.Linear(NUM_FEATURES, BATCH_SIZE) self.fc2 = crypten.nn.Linear(BATCH_SIZE, 2) def forward(self, x): out = self.fc1(x) out = self.fc2(out) return out model = ExampleNet() x_orig = get_random_test_tensor(size=(BATCH_SIZE, NUM_FEATURES), is_float=True) y_orig = (2 * x_orig.mean(dim=1)).gt(0).long() y_one_hot = onehot(y_orig, num_targets=2) x_train = AutogradCrypTensor(crypten.cryptensor(x_orig)) y_train = crypten.cryptensor(y_one_hot) for loss_name in ['BCELoss', 'CrossEntropyLoss', 'MSELoss']: loss = getattr(crypten.nn, loss_name)() model.train() model.encrypt() num_epochs = 3 learning_rate = 0.001 for i in range(num_epochs): output = model(x_train) if (loss_name == 'MSELoss'): output_norm = output else: output_norm = output.softmax(1) loss_value = loss(output_norm, y_train) model.zero_grad() for param in model.parameters(): self.assertIsNone(param.grad, 'zero_grad did not reset gradients') loss_value.backward() for param in model.parameters(): if param.requires_grad: self.assertIsNotNone(param.grad, 'required parameter gradient not created') (orig_parameters, upd_parameters) = ({}, {}) orig_parameters = self._compute_reference_parameters(, orig_parameters, model, 0) model.update_parameters(learning_rate) upd_parameters = self._compute_reference_parameters(, upd_parameters, model, learning_rate) parameter_changed = False for (name, value) in orig_parameters.items(): if (param.requires_grad and (param.grad is not None)): unchanged = torch.allclose(upd_parameters[name], value) if (unchanged is False): parameter_changed = True self.assertTrue(parameter_changed, 'no parameter changed in training step') if (i == 0): orig_loss = loss_value.get_plain_text() curr_loss = loss_value.get_plain_text() self.assertTrue((curr_loss.item() < orig_loss.item()), 'loss has not decreased after training')
def test_custom_module_training(self): BATCH_SIZE = 32 NUM_FEATURES = 3 class ExampleNet(crypten.nn.Module): def __init__(self): super(ExampleNet, self).__init__() self.fc1 = crypten.nn.Linear(NUM_FEATURES, BATCH_SIZE) self.fc2 = crypten.nn.Linear(BATCH_SIZE, 2) def forward(self, x): out = self.fc1(x) out = self.fc2(out) return out model = ExampleNet() x_orig = get_random_test_tensor(size=(BATCH_SIZE, NUM_FEATURES), is_float=True) y_orig = (2 * x_orig.mean(dim=1)).gt(0).long() y_one_hot = onehot(y_orig, num_targets=2) x_train = AutogradCrypTensor(crypten.cryptensor(x_orig)) y_train = crypten.cryptensor(y_one_hot) for loss_name in ['BCELoss', 'CrossEntropyLoss', 'MSELoss']: loss = getattr(crypten.nn, loss_name)() model.train() model.encrypt() num_epochs = 3 learning_rate = 0.001 for i in range(num_epochs): output = model(x_train) if (loss_name == 'MSELoss'): output_norm = output else: output_norm = output.softmax(1) loss_value = loss(output_norm, y_train) model.zero_grad() for param in model.parameters(): self.assertIsNone(param.grad, 'zero_grad did not reset gradients') loss_value.backward() for param in model.parameters(): if param.requires_grad: self.assertIsNotNone(param.grad, 'required parameter gradient not created') (orig_parameters, upd_parameters) = ({}, {}) orig_parameters = self._compute_reference_parameters(, orig_parameters, model, 0) model.update_parameters(learning_rate) upd_parameters = self._compute_reference_parameters(, upd_parameters, model, learning_rate) parameter_changed = False for (name, value) in orig_parameters.items(): if (param.requires_grad and (param.grad is not None)): unchanged = torch.allclose(upd_parameters[name], value) if (unchanged is False): parameter_changed = True self.assertTrue(parameter_changed, 'no parameter changed in training step') if (i == 0): orig_loss = loss_value.get_plain_text() curr_loss = loss_value.get_plain_text() self.assertTrue((curr_loss.item() < orig_loss.item()), 'loss has not decreased after training')<|docstring|>Tests training CrypTen models created directly using the crypten.nn.Module<|endoftext|>
75896f2000603640a2074cb22b00d92334a0d4ef401a45db1d7c67091f4a57bb
def test_from_pytorch_training(self): 'Tests the from_pytorch code path for training CrypTen models' import torch.nn as nn import torch.nn.functional as F class ExampleNet(nn.Module): def __init__(self): super(ExampleNet, self).__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=1) self.fc1 = nn.Linear(((16 * 13) * 13), 100) self.fc2 = nn.Linear(100, 2) def forward(self, x): out = self.conv1(x) out = F.relu(out) out = F.max_pool2d(out, 2) out = out.view(out.size(0), (- 1)) out = self.fc1(out) out = F.relu(out) out = self.fc2(out) return out model_plaintext = ExampleNet() batch_size = 5 x_orig = get_random_test_tensor(size=(batch_size, 1, 28, 28), is_float=True) y_orig = get_random_test_tensor(size=(batch_size, 1), is_float=True).gt(0).long() y_one_hot = onehot(y_orig, num_targets=2) x_train = AutogradCrypTensor(crypten.cryptensor(x_orig)) y_train = crypten.cryptensor(y_one_hot) dummy_input = torch.empty((1, 1, 28, 28)) for loss_name in ['BCELoss', 'CrossEntropyLoss', 'MSELoss']: loss = getattr(crypten.nn, loss_name)() model = crypten.nn.from_pytorch(model_plaintext, dummy_input) model.train() model.encrypt() num_epochs = 3 learning_rate = 0.001 for i in range(num_epochs): output = model(x_train) if (loss_name == 'MSELoss'): output_norm = output else: output_norm = output.softmax(1) loss_value = loss(output_norm, y_train) model.zero_grad() for param in model.parameters(): self.assertIsNone(param.grad, 'zero_grad did not reset gradients') loss_value.backward() for param in model.parameters(): if param.requires_grad: self.assertIsNotNone(param.grad, 'required parameter gradient not created') (orig_parameters, upd_parameters) = ({}, {}) orig_parameters = self._compute_reference_parameters('', orig_parameters, model, 0) model.update_parameters(learning_rate) upd_parameters = self._compute_reference_parameters('', upd_parameters, model, learning_rate) parameter_changed = False for (name, value) in orig_parameters.items(): if (param.requires_grad and (param.grad is not None)): unchanged = torch.allclose(upd_parameters[name], value) if (unchanged is False): parameter_changed = True self.assertTrue(parameter_changed, 'no parameter changed in training step') if (i == 0): orig_loss = loss_value.get_plain_text() curr_loss = loss_value.get_plain_text() self.assertTrue((curr_loss.item() < orig_loss.item()), 'loss has not decreased after training')
Tests the from_pytorch code path for training CrypTen models
test/test_nn.py
test_from_pytorch_training
youben11/CrypTen
2
python
def test_from_pytorch_training(self): import torch.nn as nn import torch.nn.functional as F class ExampleNet(nn.Module): def __init__(self): super(ExampleNet, self).__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=1) self.fc1 = nn.Linear(((16 * 13) * 13), 100) self.fc2 = nn.Linear(100, 2) def forward(self, x): out = self.conv1(x) out = F.relu(out) out = F.max_pool2d(out, 2) out = out.view(out.size(0), (- 1)) out = self.fc1(out) out = F.relu(out) out = self.fc2(out) return out model_plaintext = ExampleNet() batch_size = 5 x_orig = get_random_test_tensor(size=(batch_size, 1, 28, 28), is_float=True) y_orig = get_random_test_tensor(size=(batch_size, 1), is_float=True).gt(0).long() y_one_hot = onehot(y_orig, num_targets=2) x_train = AutogradCrypTensor(crypten.cryptensor(x_orig)) y_train = crypten.cryptensor(y_one_hot) dummy_input = torch.empty((1, 1, 28, 28)) for loss_name in ['BCELoss', 'CrossEntropyLoss', 'MSELoss']: loss = getattr(crypten.nn, loss_name)() model = crypten.nn.from_pytorch(model_plaintext, dummy_input) model.train() model.encrypt() num_epochs = 3 learning_rate = 0.001 for i in range(num_epochs): output = model(x_train) if (loss_name == 'MSELoss'): output_norm = output else: output_norm = output.softmax(1) loss_value = loss(output_norm, y_train) model.zero_grad() for param in model.parameters(): self.assertIsNone(param.grad, 'zero_grad did not reset gradients') loss_value.backward() for param in model.parameters(): if param.requires_grad: self.assertIsNotNone(param.grad, 'required parameter gradient not created') (orig_parameters, upd_parameters) = ({}, {}) orig_parameters = self._compute_reference_parameters(, orig_parameters, model, 0) model.update_parameters(learning_rate) upd_parameters = self._compute_reference_parameters(, upd_parameters, model, learning_rate) parameter_changed = False for (name, value) in orig_parameters.items(): if (param.requires_grad and (param.grad is not None)): unchanged = torch.allclose(upd_parameters[name], value) if (unchanged is False): parameter_changed = True self.assertTrue(parameter_changed, 'no parameter changed in training step') if (i == 0): orig_loss = loss_value.get_plain_text() curr_loss = loss_value.get_plain_text() self.assertTrue((curr_loss.item() < orig_loss.item()), 'loss has not decreased after training')
def test_from_pytorch_training(self): import torch.nn as nn import torch.nn.functional as F class ExampleNet(nn.Module): def __init__(self): super(ExampleNet, self).__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=1) self.fc1 = nn.Linear(((16 * 13) * 13), 100) self.fc2 = nn.Linear(100, 2) def forward(self, x): out = self.conv1(x) out = F.relu(out) out = F.max_pool2d(out, 2) out = out.view(out.size(0), (- 1)) out = self.fc1(out) out = F.relu(out) out = self.fc2(out) return out model_plaintext = ExampleNet() batch_size = 5 x_orig = get_random_test_tensor(size=(batch_size, 1, 28, 28), is_float=True) y_orig = get_random_test_tensor(size=(batch_size, 1), is_float=True).gt(0).long() y_one_hot = onehot(y_orig, num_targets=2) x_train = AutogradCrypTensor(crypten.cryptensor(x_orig)) y_train = crypten.cryptensor(y_one_hot) dummy_input = torch.empty((1, 1, 28, 28)) for loss_name in ['BCELoss', 'CrossEntropyLoss', 'MSELoss']: loss = getattr(crypten.nn, loss_name)() model = crypten.nn.from_pytorch(model_plaintext, dummy_input) model.train() model.encrypt() num_epochs = 3 learning_rate = 0.001 for i in range(num_epochs): output = model(x_train) if (loss_name == 'MSELoss'): output_norm = output else: output_norm = output.softmax(1) loss_value = loss(output_norm, y_train) model.zero_grad() for param in model.parameters(): self.assertIsNone(param.grad, 'zero_grad did not reset gradients') loss_value.backward() for param in model.parameters(): if param.requires_grad: self.assertIsNotNone(param.grad, 'required parameter gradient not created') (orig_parameters, upd_parameters) = ({}, {}) orig_parameters = self._compute_reference_parameters(, orig_parameters, model, 0) model.update_parameters(learning_rate) upd_parameters = self._compute_reference_parameters(, upd_parameters, model, learning_rate) parameter_changed = False for (name, value) in orig_parameters.items(): if (param.requires_grad and (param.grad is not None)): unchanged = torch.allclose(upd_parameters[name], value) if (unchanged is False): parameter_changed = True self.assertTrue(parameter_changed, 'no parameter changed in training step') if (i == 0): orig_loss = loss_value.get_plain_text() curr_loss = loss_value.get_plain_text() self.assertTrue((curr_loss.item() < orig_loss.item()), 'loss has not decreased after training')<|docstring|>Tests the from_pytorch code path for training CrypTen models<|endoftext|>
f9acb577fee3ea08965a4ce018452a89e5c706c2d5915ac22b81f048a8cd6eca
def test_batchnorm_module(self): 'Test module correctly sets and updates running stats' batchnorm_fn_and_size = (('BatchNorm1d', (500, 10, 3)), ('BatchNorm2d', (600, 7, 4, 20)), ('BatchNorm3d', (800, 5, 4, 8, 15))) for (batchnorm_fn, size) in batchnorm_fn_and_size: for is_trainning in (True, False): tensor = get_random_test_tensor(size=size, is_float=True) tensor.requires_grad = True encrypted_input = AutogradCrypTensor(crypten.cryptensor(tensor)) C = size[1] weight = get_random_test_tensor(size=[C], max_value=1, is_float=True) bias = get_random_test_tensor(size=[C], max_value=1, is_float=True) weight.requires_grad = True bias.requires_grad = True stats_dimensions = list(range(tensor.dim())) stats_dimensions.pop(1) enc_model = getattr(crypten.nn.module, batchnorm_fn)(C).encrypt() plain_model = getattr(torch.nn.modules, batchnorm_fn)(C) stats = ['running_var', 'running_mean'] for stat in stats: self._check(enc_model._buffers[stat], plain_model._buffers[stat], f'{stat} initial module value incorrect') plain_model.training = is_trainning enc_model.training = is_trainning enc_model.forward(encrypted_input) plain_model.forward(tensor) for stat in stats: self._check(enc_model._buffers[stat], plain_model._buffers[stat], f'{stat} momentum update in module incorrect')
Test module correctly sets and updates running stats
test/test_nn.py
test_batchnorm_module
youben11/CrypTen
2
python
def test_batchnorm_module(self): batchnorm_fn_and_size = (('BatchNorm1d', (500, 10, 3)), ('BatchNorm2d', (600, 7, 4, 20)), ('BatchNorm3d', (800, 5, 4, 8, 15))) for (batchnorm_fn, size) in batchnorm_fn_and_size: for is_trainning in (True, False): tensor = get_random_test_tensor(size=size, is_float=True) tensor.requires_grad = True encrypted_input = AutogradCrypTensor(crypten.cryptensor(tensor)) C = size[1] weight = get_random_test_tensor(size=[C], max_value=1, is_float=True) bias = get_random_test_tensor(size=[C], max_value=1, is_float=True) weight.requires_grad = True bias.requires_grad = True stats_dimensions = list(range(tensor.dim())) stats_dimensions.pop(1) enc_model = getattr(crypten.nn.module, batchnorm_fn)(C).encrypt() plain_model = getattr(torch.nn.modules, batchnorm_fn)(C) stats = ['running_var', 'running_mean'] for stat in stats: self._check(enc_model._buffers[stat], plain_model._buffers[stat], f'{stat} initial module value incorrect') plain_model.training = is_trainning enc_model.training = is_trainning enc_model.forward(encrypted_input) plain_model.forward(tensor) for stat in stats: self._check(enc_model._buffers[stat], plain_model._buffers[stat], f'{stat} momentum update in module incorrect')
def test_batchnorm_module(self): batchnorm_fn_and_size = (('BatchNorm1d', (500, 10, 3)), ('BatchNorm2d', (600, 7, 4, 20)), ('BatchNorm3d', (800, 5, 4, 8, 15))) for (batchnorm_fn, size) in batchnorm_fn_and_size: for is_trainning in (True, False): tensor = get_random_test_tensor(size=size, is_float=True) tensor.requires_grad = True encrypted_input = AutogradCrypTensor(crypten.cryptensor(tensor)) C = size[1] weight = get_random_test_tensor(size=[C], max_value=1, is_float=True) bias = get_random_test_tensor(size=[C], max_value=1, is_float=True) weight.requires_grad = True bias.requires_grad = True stats_dimensions = list(range(tensor.dim())) stats_dimensions.pop(1) enc_model = getattr(crypten.nn.module, batchnorm_fn)(C).encrypt() plain_model = getattr(torch.nn.modules, batchnorm_fn)(C) stats = ['running_var', 'running_mean'] for stat in stats: self._check(enc_model._buffers[stat], plain_model._buffers[stat], f'{stat} initial module value incorrect') plain_model.training = is_trainning enc_model.training = is_trainning enc_model.forward(encrypted_input) plain_model.forward(tensor) for stat in stats: self._check(enc_model._buffers[stat], plain_model._buffers[stat], f'{stat} momentum update in module incorrect')<|docstring|>Test module correctly sets and updates running stats<|endoftext|>
5be02682d0908b0a16d18ff87abf7f4005cfcded237aa55afb9208e5510e8dcf
def __init__(self, log_level=10): '\n Use log_level=10 for Debug and log_level=20 for info\n ' self._logger = logging.getLogger(__class__.__name__) self._logger.setLevel(log_level)
Use log_level=10 for Debug and log_level=20 for info
cli/tools/__init__.py
__init__
Dilshod070/mlmltool
0
python
def __init__(self, log_level=10): '\n \n ' self._logger = logging.getLogger(__class__.__name__) self._logger.setLevel(log_level)
def __init__(self, log_level=10): '\n \n ' self._logger = logging.getLogger(__class__.__name__) self._logger.setLevel(log_level)<|docstring|>Use log_level=10 for Debug and log_level=20 for info<|endoftext|>
59d220bbfaf2473c7d641f61fc4ef5e50a32437735baa8f8fce6d884faf26634
def _run_command(self, command: str, *args, wait: bool=True): '\n Runs shell command\n :param command: shell command\n :param args: arguments for shell command\n :param wait: whether to run sync or async\n ' self._logger.info(([command] + list(args))) proc = subprocess.Popen(([command] + list(args))) if wait: proc.wait()
Runs shell command :param command: shell command :param args: arguments for shell command :param wait: whether to run sync or async
cli/tools/__init__.py
_run_command
Dilshod070/mlmltool
0
python
def _run_command(self, command: str, *args, wait: bool=True): '\n Runs shell command\n :param command: shell command\n :param args: arguments for shell command\n :param wait: whether to run sync or async\n ' self._logger.info(([command] + list(args))) proc = subprocess.Popen(([command] + list(args))) if wait: proc.wait()
def _run_command(self, command: str, *args, wait: bool=True): '\n Runs shell command\n :param command: shell command\n :param args: arguments for shell command\n :param wait: whether to run sync or async\n ' self._logger.info(([command] + list(args))) proc = subprocess.Popen(([command] + list(args))) if wait: proc.wait()<|docstring|>Runs shell command :param command: shell command :param args: arguments for shell command :param wait: whether to run sync or async<|endoftext|>
84e71010f4a7c2e66d31aa2725f877ec9c1b4e35bc7b81a3ee2464567b2d0b03
def __init__(self, channel=None, latitude=None, longitude=None, qos=None, _configuration=None): 'CellSiteChannel - a model defined in Swagger' if (_configuration is None): _configuration = Configuration() self._configuration = _configuration self._channel = None self._latitude = None self._longitude = None self._qos = None self.discriminator = None if (channel is not None): self.channel = channel if (latitude is not None): self.latitude = latitude if (longitude is not None): self.longitude = longitude if (qos is not None): self.qos = qos
CellSiteChannel - a model defined in Swagger
Wigle/python-client/swagger_client/models/cell_site_channel.py
__init__
BillReyor/SSIDprobeCollector
1
python
def __init__(self, channel=None, latitude=None, longitude=None, qos=None, _configuration=None): if (_configuration is None): _configuration = Configuration() self._configuration = _configuration self._channel = None self._latitude = None self._longitude = None self._qos = None self.discriminator = None if (channel is not None): self.channel = channel if (latitude is not None): self.latitude = latitude if (longitude is not None): self.longitude = longitude if (qos is not None): self.qos = qos
def __init__(self, channel=None, latitude=None, longitude=None, qos=None, _configuration=None): if (_configuration is None): _configuration = Configuration() self._configuration = _configuration self._channel = None self._latitude = None self._longitude = None self._qos = None self.discriminator = None if (channel is not None): self.channel = channel if (latitude is not None): self.latitude = latitude if (longitude is not None): self.longitude = longitude if (qos is not None): self.qos = qos<|docstring|>CellSiteChannel - a model defined in Swagger<|endoftext|>
1efd083b7a06cc2b5801a5e6e02f01da757a7b7be123415a27d3c197fca94634
@property def channel(self): 'Gets the channel of this CellSiteChannel. # noqa: E501\n\n\n :return: The channel of this CellSiteChannel. # noqa: E501\n :rtype: int\n ' return self._channel
Gets the channel of this CellSiteChannel. # noqa: E501 :return: The channel of this CellSiteChannel. # noqa: E501 :rtype: int
Wigle/python-client/swagger_client/models/cell_site_channel.py
channel
BillReyor/SSIDprobeCollector
1
python
@property def channel(self): 'Gets the channel of this CellSiteChannel. # noqa: E501\n\n\n :return: The channel of this CellSiteChannel. # noqa: E501\n :rtype: int\n ' return self._channel
@property def channel(self): 'Gets the channel of this CellSiteChannel. # noqa: E501\n\n\n :return: The channel of this CellSiteChannel. # noqa: E501\n :rtype: int\n ' return self._channel<|docstring|>Gets the channel of this CellSiteChannel. # noqa: E501 :return: The channel of this CellSiteChannel. # noqa: E501 :rtype: int<|endoftext|>
9eb30a3369cf6a94d04a0c92a8e4a485e9abf1a3f5c4177230e3c30bb5a3d845
@channel.setter def channel(self, channel): 'Sets the channel of this CellSiteChannel.\n\n\n :param channel: The channel of this CellSiteChannel. # noqa: E501\n :type: int\n ' self._channel = channel
Sets the channel of this CellSiteChannel. :param channel: The channel of this CellSiteChannel. # noqa: E501 :type: int
Wigle/python-client/swagger_client/models/cell_site_channel.py
channel
BillReyor/SSIDprobeCollector
1
python
@channel.setter def channel(self, channel): 'Sets the channel of this CellSiteChannel.\n\n\n :param channel: The channel of this CellSiteChannel. # noqa: E501\n :type: int\n ' self._channel = channel
@channel.setter def channel(self, channel): 'Sets the channel of this CellSiteChannel.\n\n\n :param channel: The channel of this CellSiteChannel. # noqa: E501\n :type: int\n ' self._channel = channel<|docstring|>Sets the channel of this CellSiteChannel. :param channel: The channel of this CellSiteChannel. # noqa: E501 :type: int<|endoftext|>
0152a4d2ac029637983aad71c44ba8ed67181aeeca8df32928afd3a80b300073
@property def latitude(self): 'Gets the latitude of this CellSiteChannel. # noqa: E501\n\n\n :return: The latitude of this CellSiteChannel. # noqa: E501\n :rtype: float\n ' return self._latitude
Gets the latitude of this CellSiteChannel. # noqa: E501 :return: The latitude of this CellSiteChannel. # noqa: E501 :rtype: float
Wigle/python-client/swagger_client/models/cell_site_channel.py
latitude
BillReyor/SSIDprobeCollector
1
python
@property def latitude(self): 'Gets the latitude of this CellSiteChannel. # noqa: E501\n\n\n :return: The latitude of this CellSiteChannel. # noqa: E501\n :rtype: float\n ' return self._latitude
@property def latitude(self): 'Gets the latitude of this CellSiteChannel. # noqa: E501\n\n\n :return: The latitude of this CellSiteChannel. # noqa: E501\n :rtype: float\n ' return self._latitude<|docstring|>Gets the latitude of this CellSiteChannel. # noqa: E501 :return: The latitude of this CellSiteChannel. # noqa: E501 :rtype: float<|endoftext|>
fccff7cd1ee9ba688b7f12f8b97001b3ff25322867fb42c39e41691b43904907
@latitude.setter def latitude(self, latitude): 'Sets the latitude of this CellSiteChannel.\n\n\n :param latitude: The latitude of this CellSiteChannel. # noqa: E501\n :type: float\n ' self._latitude = latitude
Sets the latitude of this CellSiteChannel. :param latitude: The latitude of this CellSiteChannel. # noqa: E501 :type: float
Wigle/python-client/swagger_client/models/cell_site_channel.py
latitude
BillReyor/SSIDprobeCollector
1
python
@latitude.setter def latitude(self, latitude): 'Sets the latitude of this CellSiteChannel.\n\n\n :param latitude: The latitude of this CellSiteChannel. # noqa: E501\n :type: float\n ' self._latitude = latitude
@latitude.setter def latitude(self, latitude): 'Sets the latitude of this CellSiteChannel.\n\n\n :param latitude: The latitude of this CellSiteChannel. # noqa: E501\n :type: float\n ' self._latitude = latitude<|docstring|>Sets the latitude of this CellSiteChannel. :param latitude: The latitude of this CellSiteChannel. # noqa: E501 :type: float<|endoftext|>
63374f30f58bc94fb2386de19ae52b212a8ee8fad14c2b98b34f24805d653227
@property def longitude(self): 'Gets the longitude of this CellSiteChannel. # noqa: E501\n\n\n :return: The longitude of this CellSiteChannel. # noqa: E501\n :rtype: float\n ' return self._longitude
Gets the longitude of this CellSiteChannel. # noqa: E501 :return: The longitude of this CellSiteChannel. # noqa: E501 :rtype: float
Wigle/python-client/swagger_client/models/cell_site_channel.py
longitude
BillReyor/SSIDprobeCollector
1
python
@property def longitude(self): 'Gets the longitude of this CellSiteChannel. # noqa: E501\n\n\n :return: The longitude of this CellSiteChannel. # noqa: E501\n :rtype: float\n ' return self._longitude
@property def longitude(self): 'Gets the longitude of this CellSiteChannel. # noqa: E501\n\n\n :return: The longitude of this CellSiteChannel. # noqa: E501\n :rtype: float\n ' return self._longitude<|docstring|>Gets the longitude of this CellSiteChannel. # noqa: E501 :return: The longitude of this CellSiteChannel. # noqa: E501 :rtype: float<|endoftext|>
8f3297441f1b239dfd51d94022960ce8ce992da623b96439c49073509096ba76
@longitude.setter def longitude(self, longitude): 'Sets the longitude of this CellSiteChannel.\n\n\n :param longitude: The longitude of this CellSiteChannel. # noqa: E501\n :type: float\n ' self._longitude = longitude
Sets the longitude of this CellSiteChannel. :param longitude: The longitude of this CellSiteChannel. # noqa: E501 :type: float
Wigle/python-client/swagger_client/models/cell_site_channel.py
longitude
BillReyor/SSIDprobeCollector
1
python
@longitude.setter def longitude(self, longitude): 'Sets the longitude of this CellSiteChannel.\n\n\n :param longitude: The longitude of this CellSiteChannel. # noqa: E501\n :type: float\n ' self._longitude = longitude
@longitude.setter def longitude(self, longitude): 'Sets the longitude of this CellSiteChannel.\n\n\n :param longitude: The longitude of this CellSiteChannel. # noqa: E501\n :type: float\n ' self._longitude = longitude<|docstring|>Sets the longitude of this CellSiteChannel. :param longitude: The longitude of this CellSiteChannel. # noqa: E501 :type: float<|endoftext|>
a8ada354ffadb746d06cb7867a07a9945636b21b859545916b2ca1344b01de7c
@property def qos(self): 'Gets the qos of this CellSiteChannel. # noqa: E501\n\n\n :return: The qos of this CellSiteChannel. # noqa: E501\n :rtype: int\n ' return self._qos
Gets the qos of this CellSiteChannel. # noqa: E501 :return: The qos of this CellSiteChannel. # noqa: E501 :rtype: int
Wigle/python-client/swagger_client/models/cell_site_channel.py
qos
BillReyor/SSIDprobeCollector
1
python
@property def qos(self): 'Gets the qos of this CellSiteChannel. # noqa: E501\n\n\n :return: The qos of this CellSiteChannel. # noqa: E501\n :rtype: int\n ' return self._qos
@property def qos(self): 'Gets the qos of this CellSiteChannel. # noqa: E501\n\n\n :return: The qos of this CellSiteChannel. # noqa: E501\n :rtype: int\n ' return self._qos<|docstring|>Gets the qos of this CellSiteChannel. # noqa: E501 :return: The qos of this CellSiteChannel. # noqa: E501 :rtype: int<|endoftext|>
ad96be938a1b576cb0e1871d071cafdc1f94205ab12f90a616f6230fc122ab18
@qos.setter def qos(self, qos): 'Sets the qos of this CellSiteChannel.\n\n\n :param qos: The qos of this CellSiteChannel. # noqa: E501\n :type: int\n ' self._qos = qos
Sets the qos of this CellSiteChannel. :param qos: The qos of this CellSiteChannel. # noqa: E501 :type: int
Wigle/python-client/swagger_client/models/cell_site_channel.py
qos
BillReyor/SSIDprobeCollector
1
python
@qos.setter def qos(self, qos): 'Sets the qos of this CellSiteChannel.\n\n\n :param qos: The qos of this CellSiteChannel. # noqa: E501\n :type: int\n ' self._qos = qos
@qos.setter def qos(self, qos): 'Sets the qos of this CellSiteChannel.\n\n\n :param qos: The qos of this CellSiteChannel. # noqa: E501\n :type: int\n ' self._qos = qos<|docstring|>Sets the qos of this CellSiteChannel. :param qos: The qos of this CellSiteChannel. # noqa: E501 :type: int<|endoftext|>
efd746c332d5a2ee3d490dffd5287a3b799d8a97580a25ccb938a65caf8b4c29
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(CellSiteChannel, dict): for (key, value) in self.items(): result[key] = value return result
Returns the model properties as a dict
Wigle/python-client/swagger_client/models/cell_site_channel.py
to_dict
BillReyor/SSIDprobeCollector
1
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(CellSiteChannel, dict): for (key, value) in self.items(): result[key] = value return result
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(CellSiteChannel, dict): for (key, value) in self.items(): result[key] = value return result<|docstring|>Returns the model properties as a dict<|endoftext|>
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
Returns the string representation of the model
Wigle/python-client/swagger_client/models/cell_site_channel.py
to_str
BillReyor/SSIDprobeCollector
1
python
def to_str(self): return pprint.pformat(self.to_dict())
def to_str(self): return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
For `print` and `pprint`
Wigle/python-client/swagger_client/models/cell_site_channel.py
__repr__
BillReyor/SSIDprobeCollector
1
python
def __repr__(self): return self.to_str()
def __repr__(self): return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
8065b5258c674b1f83c6aa2f6599da00cbeda3bc604db8f97ed697474982334a
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, CellSiteChannel)): return False return (self.to_dict() == other.to_dict())
Returns true if both objects are equal
Wigle/python-client/swagger_client/models/cell_site_channel.py
__eq__
BillReyor/SSIDprobeCollector
1
python
def __eq__(self, other): if (not isinstance(other, CellSiteChannel)): return False return (self.to_dict() == other.to_dict())
def __eq__(self, other): if (not isinstance(other, CellSiteChannel)): return False return (self.to_dict() == other.to_dict())<|docstring|>Returns true if both objects are equal<|endoftext|>
b3f9de9f2c79ba4213a6bfcfa63588bc31e5910ea0563db072ed9185bec8f191
def __ne__(self, other): 'Returns true if both objects are not equal' if (not isinstance(other, CellSiteChannel)): return True return (self.to_dict() != other.to_dict())
Returns true if both objects are not equal
Wigle/python-client/swagger_client/models/cell_site_channel.py
__ne__
BillReyor/SSIDprobeCollector
1
python
def __ne__(self, other): if (not isinstance(other, CellSiteChannel)): return True return (self.to_dict() != other.to_dict())
def __ne__(self, other): if (not isinstance(other, CellSiteChannel)): return True return (self.to_dict() != other.to_dict())<|docstring|>Returns true if both objects are not equal<|endoftext|>
8e5bf121d375f16923a9a6322dd450088b66ed5dd3d1e8d6fc20f23016020402
def eval_masks(args, model, dataset, root): 'Evaluate masks to produce mAP (and PSNR) scores.' root = os.path.join(root, 'masks') os.makedirs(root) maskloader = MaskLoader(dataset=dataset) image_ids = evaluation.utils.sample_linear(dataset.img_ids_test, args.masks_n_samples)[0] results = evaluation.evaluate(dataset, model, maskloader, vis_i=1, save_dir=root, save=True, vid=args.vid, image_ids=image_ids)
Evaluate masks to produce mAP (and PSNR) scores.
evaluate.py
eval_masks
dichotomies/NeuralDiff
5
python
def eval_masks(args, model, dataset, root): root = os.path.join(root, 'masks') os.makedirs(root) maskloader = MaskLoader(dataset=dataset) image_ids = evaluation.utils.sample_linear(dataset.img_ids_test, args.masks_n_samples)[0] results = evaluation.evaluate(dataset, model, maskloader, vis_i=1, save_dir=root, save=True, vid=args.vid, image_ids=image_ids)
def eval_masks(args, model, dataset, root): root = os.path.join(root, 'masks') os.makedirs(root) maskloader = MaskLoader(dataset=dataset) image_ids = evaluation.utils.sample_linear(dataset.img_ids_test, args.masks_n_samples)[0] results = evaluation.evaluate(dataset, model, maskloader, vis_i=1, save_dir=root, save=True, vid=args.vid, image_ids=image_ids)<|docstring|>Evaluate masks to produce mAP (and PSNR) scores.<|endoftext|>
2981229e35774246cac4bcef4b5a10e7179c2d90c626bedd56c9a791b1d70256
def eval_masks_average(args): 'Calculate average of `eval_masks` results for all 10 scenes.' scores = [] for vid in VIDEO_IDS: path_metrics = os.path.join('results', args.exp, vid, 'masks', 'metrics.txt') with open(f'results/rel/{vid}/masks/metrics.txt') as f: lines = f.readlines() (score_map, score_psnr) = [float(s) for s in lines[2].split('\t')[:2]] scores.append([score_map, score_psnr]) scores = np.array(scores).mean(axis=0) print('Average for all 10 scenes:') print(f'mAP: {(scores[0] * 100).round(2)}, PSNR: {scores[1].round(2)}')
Calculate average of `eval_masks` results for all 10 scenes.
evaluate.py
eval_masks_average
dichotomies/NeuralDiff
5
python
def eval_masks_average(args): scores = [] for vid in VIDEO_IDS: path_metrics = os.path.join('results', args.exp, vid, 'masks', 'metrics.txt') with open(f'results/rel/{vid}/masks/metrics.txt') as f: lines = f.readlines() (score_map, score_psnr) = [float(s) for s in lines[2].split('\t')[:2]] scores.append([score_map, score_psnr]) scores = np.array(scores).mean(axis=0) print('Average for all 10 scenes:') print(f'mAP: {(scores[0] * 100).round(2)}, PSNR: {scores[1].round(2)}')
def eval_masks_average(args): scores = [] for vid in VIDEO_IDS: path_metrics = os.path.join('results', args.exp, vid, 'masks', 'metrics.txt') with open(f'results/rel/{vid}/masks/metrics.txt') as f: lines = f.readlines() (score_map, score_psnr) = [float(s) for s in lines[2].split('\t')[:2]] scores.append([score_map, score_psnr]) scores = np.array(scores).mean(axis=0) print('Average for all 10 scenes:') print(f'mAP: {(scores[0] * 100).round(2)}, PSNR: {scores[1].round(2)}')<|docstring|>Calculate average of `eval_masks` results for all 10 scenes.<|endoftext|>
9025447ef7eb4f7008af3c9bdf19720d02cd0199b95689a1ffdf62ae1d81b48c
def render_video(args, model, dataset, root, save_cache=False): 'Render a summary video like shown on the project page.' root = os.path.join(root, 'summary') os.makedirs(root) sid = SAMPLE_IDS[args.vid] top = evaluation.video.render(dataset, model, n_images=args.summary_n_samples) bot = evaluation.video.render(dataset, model, sid, n_images=args.summary_n_samples) if save_cache: evaluation.video.save_to_cache(args.vid, sid, root=root, top=top, bot=bot) ims_cat = [evaluation.video.convert_rgb(evaluation.video.cat_sample(top[k], bot[k])) for k in bot.keys()] utils.write_mp4(f'{root}/cat-{sid}-N{len(ims_cat)}', ims_cat)
Render a summary video like shown on the project page.
evaluate.py
render_video
dichotomies/NeuralDiff
5
python
def render_video(args, model, dataset, root, save_cache=False): root = os.path.join(root, 'summary') os.makedirs(root) sid = SAMPLE_IDS[args.vid] top = evaluation.video.render(dataset, model, n_images=args.summary_n_samples) bot = evaluation.video.render(dataset, model, sid, n_images=args.summary_n_samples) if save_cache: evaluation.video.save_to_cache(args.vid, sid, root=root, top=top, bot=bot) ims_cat = [evaluation.video.convert_rgb(evaluation.video.cat_sample(top[k], bot[k])) for k in bot.keys()] utils.write_mp4(f'{root}/cat-{sid}-N{len(ims_cat)}', ims_cat)
def render_video(args, model, dataset, root, save_cache=False): root = os.path.join(root, 'summary') os.makedirs(root) sid = SAMPLE_IDS[args.vid] top = evaluation.video.render(dataset, model, n_images=args.summary_n_samples) bot = evaluation.video.render(dataset, model, sid, n_images=args.summary_n_samples) if save_cache: evaluation.video.save_to_cache(args.vid, sid, root=root, top=top, bot=bot) ims_cat = [evaluation.video.convert_rgb(evaluation.video.cat_sample(top[k], bot[k])) for k in bot.keys()] utils.write_mp4(f'{root}/cat-{sid}-N{len(ims_cat)}', ims_cat)<|docstring|>Render a summary video like shown on the project page.<|endoftext|>
38142ea211dc3abe71f7c17079765d4fd43b783e989b4e61065057d91b04ffd7
def check_none(val, default): 'Check an input for if it is None, and if so return a default object.\n\n Parameters\n ----------\n val : collection object or None\n An object to check whether is None.\n default : collection object\n What to defaul to if `val` is None.\n\n Returns\n -------\n collection object\n Either the original input item, or the default input.\n\n Notes\n -----\n This function is used to catch unused inputs (that end up as None), before they\n are passed into subsequent functions that presume collection objects.\n ' return (val if val else default)
Check an input for if it is None, and if so return a default object. Parameters ---------- val : collection object or None An object to check whether is None. default : collection object What to defaul to if `val` is None. Returns ------- collection object Either the original input item, or the default input. Notes ----- This function is used to catch unused inputs (that end up as None), before they are passed into subsequent functions that presume collection objects.
lisc/urls/utils.py
check_none
ryanhammonds/lisc
1
python
def check_none(val, default): 'Check an input for if it is None, and if so return a default object.\n\n Parameters\n ----------\n val : collection object or None\n An object to check whether is None.\n default : collection object\n What to defaul to if `val` is None.\n\n Returns\n -------\n collection object\n Either the original input item, or the default input.\n\n Notes\n -----\n This function is used to catch unused inputs (that end up as None), before they\n are passed into subsequent functions that presume collection objects.\n ' return (val if val else default)
def check_none(val, default): 'Check an input for if it is None, and if so return a default object.\n\n Parameters\n ----------\n val : collection object or None\n An object to check whether is None.\n default : collection object\n What to defaul to if `val` is None.\n\n Returns\n -------\n collection object\n Either the original input item, or the default input.\n\n Notes\n -----\n This function is used to catch unused inputs (that end up as None), before they\n are passed into subsequent functions that presume collection objects.\n ' return (val if val else default)<|docstring|>Check an input for if it is None, and if so return a default object. Parameters ---------- val : collection object or None An object to check whether is None. default : collection object What to defaul to if `val` is None. Returns ------- collection object Either the original input item, or the default input. Notes ----- This function is used to catch unused inputs (that end up as None), before they are passed into subsequent functions that presume collection objects.<|endoftext|>
8713c22df238354c48105a16406afa99ad65ff6ede3c2b86e100d571ecd90e71
def prepend(string, prefix): 'Append something to the beginning of another string.\n\n Parameters\n ----------\n string : str\n String to prepend to.\n prefix : str\n String to add to the beginning.\n\n Returns\n -------\n str\n String with the addition to the beginning.\n\n Notes\n -----\n This function deals with empty inputs, and returns an empty string in that case.\n ' return ((prefix + string) if string else string)
Append something to the beginning of another string. Parameters ---------- string : str String to prepend to. prefix : str String to add to the beginning. Returns ------- str String with the addition to the beginning. Notes ----- This function deals with empty inputs, and returns an empty string in that case.
lisc/urls/utils.py
prepend
ryanhammonds/lisc
1
python
def prepend(string, prefix): 'Append something to the beginning of another string.\n\n Parameters\n ----------\n string : str\n String to prepend to.\n prefix : str\n String to add to the beginning.\n\n Returns\n -------\n str\n String with the addition to the beginning.\n\n Notes\n -----\n This function deals with empty inputs, and returns an empty string in that case.\n ' return ((prefix + string) if string else string)
def prepend(string, prefix): 'Append something to the beginning of another string.\n\n Parameters\n ----------\n string : str\n String to prepend to.\n prefix : str\n String to add to the beginning.\n\n Returns\n -------\n str\n String with the addition to the beginning.\n\n Notes\n -----\n This function deals with empty inputs, and returns an empty string in that case.\n ' return ((prefix + string) if string else string)<|docstring|>Append something to the beginning of another string. Parameters ---------- string : str String to prepend to. prefix : str String to add to the beginning. Returns ------- str String with the addition to the beginning. Notes ----- This function deals with empty inputs, and returns an empty string in that case.<|endoftext|>
4a46560074a7b6218e96dc9a54010e5ef2a27f7ffdf26526966b6cc4fac82792
def make_segments(segments): 'Make the segments portion of a URL.\n\n Parameters\n ----------\n segments : list of str\n Segments to use to create the segments string for a URL.\n\n Returns\n -------\n str\n Segments for a URL.\n ' return prepend('/'.join(segments), '/')
Make the segments portion of a URL. Parameters ---------- segments : list of str Segments to use to create the segments string for a URL. Returns ------- str Segments for a URL.
lisc/urls/utils.py
make_segments
ryanhammonds/lisc
1
python
def make_segments(segments): 'Make the segments portion of a URL.\n\n Parameters\n ----------\n segments : list of str\n Segments to use to create the segments string for a URL.\n\n Returns\n -------\n str\n Segments for a URL.\n ' return prepend('/'.join(segments), '/')
def make_segments(segments): 'Make the segments portion of a URL.\n\n Parameters\n ----------\n segments : list of str\n Segments to use to create the segments string for a URL.\n\n Returns\n -------\n str\n Segments for a URL.\n ' return prepend('/'.join(segments), '/')<|docstring|>Make the segments portion of a URL. Parameters ---------- segments : list of str Segments to use to create the segments string for a URL. Returns ------- str Segments for a URL.<|endoftext|>
49827c99f46937d9e3e814080b27fd892ccc7a5af9e8895a8fca99c995c24e17
def make_settings(settings, prefix='?'): 'Make the settings portion of a URL.\n\n Parameters\n ----------\n settings : dict\n Settings to use to create the settings string for a URL.\n prefix : str\n String to add to the beginning.\n\n Returns\n -------\n str\n Setting for a URL.\n ' return prepend('&'.join([((ke + '=') + va) for (ke, va) in settings.items()]), prefix)
Make the settings portion of a URL. Parameters ---------- settings : dict Settings to use to create the settings string for a URL. prefix : str String to add to the beginning. Returns ------- str Setting for a URL.
lisc/urls/utils.py
make_settings
ryanhammonds/lisc
1
python
def make_settings(settings, prefix='?'): 'Make the settings portion of a URL.\n\n Parameters\n ----------\n settings : dict\n Settings to use to create the settings string for a URL.\n prefix : str\n String to add to the beginning.\n\n Returns\n -------\n str\n Setting for a URL.\n ' return prepend('&'.join([((ke + '=') + va) for (ke, va) in settings.items()]), prefix)
def make_settings(settings, prefix='?'): 'Make the settings portion of a URL.\n\n Parameters\n ----------\n settings : dict\n Settings to use to create the settings string for a URL.\n prefix : str\n String to add to the beginning.\n\n Returns\n -------\n str\n Setting for a URL.\n ' return prepend('&'.join([((ke + '=') + va) for (ke, va) in settings.items()]), prefix)<|docstring|>Make the settings portion of a URL. Parameters ---------- settings : dict Settings to use to create the settings string for a URL. prefix : str String to add to the beginning. Returns ------- str Setting for a URL.<|endoftext|>
af8886982bb75e97d3d125c607cbc3d8b1cfd3ea849c083223750df616e39f4a
def _compute_attention(attention_mechanism, cell_output, attention_state, attention_layer, prev_max_attentions): 'Computes the attention and alignments for a given attention_mechanism.' (alignments, next_attention_state, max_attentions) = attention_mechanism(cell_output, state=attention_state, prev_max_attentions=prev_max_attentions) expanded_alignments = array_ops.expand_dims(alignments, 1) context = math_ops.matmul(expanded_alignments, attention_mechanism.values) context = array_ops.squeeze(context, [1]) if (attention_layer is not None): attention = attention_layer(array_ops.concat([cell_output, context], 1)) else: attention = context return (attention, alignments, next_attention_state, max_attentions)
Computes the attention and alignments for a given attention_mechanism.
tacotron/models/attention.py
_compute_attention
huangdou123/huangdou
2,154
python
def _compute_attention(attention_mechanism, cell_output, attention_state, attention_layer, prev_max_attentions): (alignments, next_attention_state, max_attentions) = attention_mechanism(cell_output, state=attention_state, prev_max_attentions=prev_max_attentions) expanded_alignments = array_ops.expand_dims(alignments, 1) context = math_ops.matmul(expanded_alignments, attention_mechanism.values) context = array_ops.squeeze(context, [1]) if (attention_layer is not None): attention = attention_layer(array_ops.concat([cell_output, context], 1)) else: attention = context return (attention, alignments, next_attention_state, max_attentions)
def _compute_attention(attention_mechanism, cell_output, attention_state, attention_layer, prev_max_attentions): (alignments, next_attention_state, max_attentions) = attention_mechanism(cell_output, state=attention_state, prev_max_attentions=prev_max_attentions) expanded_alignments = array_ops.expand_dims(alignments, 1) context = math_ops.matmul(expanded_alignments, attention_mechanism.values) context = array_ops.squeeze(context, [1]) if (attention_layer is not None): attention = attention_layer(array_ops.concat([cell_output, context], 1)) else: attention = context return (attention, alignments, next_attention_state, max_attentions)<|docstring|>Computes the attention and alignments for a given attention_mechanism.<|endoftext|>
84e642753a35621d5fdf7106053b78adc012201037b3dce3666b59cbc8a9d82e
def _location_sensitive_score(W_query, W_fil, W_keys): "Impelements Bahdanau-style (cumulative) scoring function.\n\tThis attention is described in:\n\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t gio, “Attention-based models for speech recognition,” in Ad-\n\t vances in Neural Information Processing Systems, 2015, pp.\n\t 577–585.\n\n\t#############################################################################\n\t\t\t hybrid attention (content-based + location-based)\n\t\t\t\t\t\t\t f = F * α_{i-1}\n\t energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a))\n\t#############################################################################\n\n\tArgs:\n\t\tW_query: Tensor, shape '[batch_size, 1, attention_dim]' to compare to location features.\n\t\tW_location: processed previous alignments into location features, shape '[batch_size, max_time, attention_dim]'\n\t\tW_keys: Tensor, shape '[batch_size, max_time, attention_dim]', typically the encoder outputs.\n\tReturns:\n\t\tA '[batch_size, max_time]' attention score (energy)\n\t" dtype = W_query.dtype num_units = (W_keys.shape[(- 1)].value or array_ops.shape(W_keys)[(- 1)]) v_a = tf.get_variable('attention_variable_projection', shape=[num_units], dtype=dtype, initializer=tf.contrib.layers.xavier_initializer()) b_a = tf.get_variable('attention_bias', shape=[num_units], dtype=dtype, initializer=tf.zeros_initializer()) return tf.reduce_sum((v_a * tf.tanh((((W_keys + W_query) + W_fil) + b_a))), [2])
Impelements Bahdanau-style (cumulative) scoring function. This attention is described in: J. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben- gio, “Attention-based models for speech recognition,” in Ad- vances in Neural Information Processing Systems, 2015, pp. 577–585. ############################################################################# hybrid attention (content-based + location-based) f = F * α_{i-1} energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a)) ############################################################################# Args: W_query: Tensor, shape '[batch_size, 1, attention_dim]' to compare to location features. W_location: processed previous alignments into location features, shape '[batch_size, max_time, attention_dim]' W_keys: Tensor, shape '[batch_size, max_time, attention_dim]', typically the encoder outputs. Returns: A '[batch_size, max_time]' attention score (energy)
tacotron/models/attention.py
_location_sensitive_score
huangdou123/huangdou
2,154
python
def _location_sensitive_score(W_query, W_fil, W_keys): "Impelements Bahdanau-style (cumulative) scoring function.\n\tThis attention is described in:\n\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t gio, “Attention-based models for speech recognition,” in Ad-\n\t vances in Neural Information Processing Systems, 2015, pp.\n\t 577–585.\n\n\t#############################################################################\n\t\t\t hybrid attention (content-based + location-based)\n\t\t\t\t\t\t\t f = F * α_{i-1}\n\t energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a))\n\t#############################################################################\n\n\tArgs:\n\t\tW_query: Tensor, shape '[batch_size, 1, attention_dim]' to compare to location features.\n\t\tW_location: processed previous alignments into location features, shape '[batch_size, max_time, attention_dim]'\n\t\tW_keys: Tensor, shape '[batch_size, max_time, attention_dim]', typically the encoder outputs.\n\tReturns:\n\t\tA '[batch_size, max_time]' attention score (energy)\n\t" dtype = W_query.dtype num_units = (W_keys.shape[(- 1)].value or array_ops.shape(W_keys)[(- 1)]) v_a = tf.get_variable('attention_variable_projection', shape=[num_units], dtype=dtype, initializer=tf.contrib.layers.xavier_initializer()) b_a = tf.get_variable('attention_bias', shape=[num_units], dtype=dtype, initializer=tf.zeros_initializer()) return tf.reduce_sum((v_a * tf.tanh((((W_keys + W_query) + W_fil) + b_a))), [2])
def _location_sensitive_score(W_query, W_fil, W_keys): "Impelements Bahdanau-style (cumulative) scoring function.\n\tThis attention is described in:\n\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t gio, “Attention-based models for speech recognition,” in Ad-\n\t vances in Neural Information Processing Systems, 2015, pp.\n\t 577–585.\n\n\t#############################################################################\n\t\t\t hybrid attention (content-based + location-based)\n\t\t\t\t\t\t\t f = F * α_{i-1}\n\t energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a))\n\t#############################################################################\n\n\tArgs:\n\t\tW_query: Tensor, shape '[batch_size, 1, attention_dim]' to compare to location features.\n\t\tW_location: processed previous alignments into location features, shape '[batch_size, max_time, attention_dim]'\n\t\tW_keys: Tensor, shape '[batch_size, max_time, attention_dim]', typically the encoder outputs.\n\tReturns:\n\t\tA '[batch_size, max_time]' attention score (energy)\n\t" dtype = W_query.dtype num_units = (W_keys.shape[(- 1)].value or array_ops.shape(W_keys)[(- 1)]) v_a = tf.get_variable('attention_variable_projection', shape=[num_units], dtype=dtype, initializer=tf.contrib.layers.xavier_initializer()) b_a = tf.get_variable('attention_bias', shape=[num_units], dtype=dtype, initializer=tf.zeros_initializer()) return tf.reduce_sum((v_a * tf.tanh((((W_keys + W_query) + W_fil) + b_a))), [2])<|docstring|>Impelements Bahdanau-style (cumulative) scoring function. This attention is described in: J. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben- gio, “Attention-based models for speech recognition,” in Ad- vances in Neural Information Processing Systems, 2015, pp. 577–585. ############################################################################# hybrid attention (content-based + location-based) f = F * α_{i-1} energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a)) ############################################################################# Args: W_query: Tensor, shape '[batch_size, 1, attention_dim]' to compare to location features. W_location: processed previous alignments into location features, shape '[batch_size, max_time, attention_dim]' W_keys: Tensor, shape '[batch_size, max_time, attention_dim]', typically the encoder outputs. Returns: A '[batch_size, max_time]' attention score (energy)<|endoftext|>
01ce2e0db624b3d54e64dd3832729985860aac8cbaec07009b5d1af0bd8925a1
def _smoothing_normalization(e): 'Applies a smoothing normalization function instead of softmax\n\tIntroduced in:\n\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t gio, “Attention-based models for speech recognition,” in Ad-\n\t vances in Neural Information Processing Systems, 2015, pp.\n\t 577–585.\n\n\t############################################################################\n\t\t\t\t\t\tSmoothing normalization function\n\t\t\t\ta_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j}))\n\t############################################################################\n\n\tArgs:\n\t\te: matrix [batch_size, max_time(memory_time)]: expected to be energy (score)\n\t\t\tvalues of an attention mechanism\n\tReturns:\n\t\tmatrix [batch_size, max_time]: [0, 1] normalized alignments with possible\n\t\t\tattendance to multiple memory time steps.\n\t' return (tf.nn.sigmoid(e) / tf.reduce_sum(tf.nn.sigmoid(e), axis=(- 1), keepdims=True))
Applies a smoothing normalization function instead of softmax Introduced in: J. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben- gio, “Attention-based models for speech recognition,” in Ad- vances in Neural Information Processing Systems, 2015, pp. 577–585. ############################################################################ Smoothing normalization function a_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j})) ############################################################################ Args: e: matrix [batch_size, max_time(memory_time)]: expected to be energy (score) values of an attention mechanism Returns: matrix [batch_size, max_time]: [0, 1] normalized alignments with possible attendance to multiple memory time steps.
tacotron/models/attention.py
_smoothing_normalization
huangdou123/huangdou
2,154
python
def _smoothing_normalization(e): 'Applies a smoothing normalization function instead of softmax\n\tIntroduced in:\n\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t gio, “Attention-based models for speech recognition,” in Ad-\n\t vances in Neural Information Processing Systems, 2015, pp.\n\t 577–585.\n\n\t############################################################################\n\t\t\t\t\t\tSmoothing normalization function\n\t\t\t\ta_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j}))\n\t############################################################################\n\n\tArgs:\n\t\te: matrix [batch_size, max_time(memory_time)]: expected to be energy (score)\n\t\t\tvalues of an attention mechanism\n\tReturns:\n\t\tmatrix [batch_size, max_time]: [0, 1] normalized alignments with possible\n\t\t\tattendance to multiple memory time steps.\n\t' return (tf.nn.sigmoid(e) / tf.reduce_sum(tf.nn.sigmoid(e), axis=(- 1), keepdims=True))
def _smoothing_normalization(e): 'Applies a smoothing normalization function instead of softmax\n\tIntroduced in:\n\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t gio, “Attention-based models for speech recognition,” in Ad-\n\t vances in Neural Information Processing Systems, 2015, pp.\n\t 577–585.\n\n\t############################################################################\n\t\t\t\t\t\tSmoothing normalization function\n\t\t\t\ta_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j}))\n\t############################################################################\n\n\tArgs:\n\t\te: matrix [batch_size, max_time(memory_time)]: expected to be energy (score)\n\t\t\tvalues of an attention mechanism\n\tReturns:\n\t\tmatrix [batch_size, max_time]: [0, 1] normalized alignments with possible\n\t\t\tattendance to multiple memory time steps.\n\t' return (tf.nn.sigmoid(e) / tf.reduce_sum(tf.nn.sigmoid(e), axis=(- 1), keepdims=True))<|docstring|>Applies a smoothing normalization function instead of softmax Introduced in: J. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben- gio, “Attention-based models for speech recognition,” in Ad- vances in Neural Information Processing Systems, 2015, pp. 577–585. ############################################################################ Smoothing normalization function a_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j})) ############################################################################ Args: e: matrix [batch_size, max_time(memory_time)]: expected to be energy (score) values of an attention mechanism Returns: matrix [batch_size, max_time]: [0, 1] normalized alignments with possible attendance to multiple memory time steps.<|endoftext|>
b3df7a47f97f9e017dc0324cdb81a6551ac87278867448e3bba710463d753d6a
def __init__(self, num_units, memory, hparams, is_training, mask_encoder=True, memory_sequence_length=None, smoothing=False, cumulate_weights=True, name='LocationSensitiveAttention'): "Construct the Attention mechanism.\n\t\tArgs:\n\t\t\tnum_units: The depth of the query mechanism.\n\t\t\tmemory: The memory to query; usually the output of an RNN encoder. This\n\t\t\t\ttensor should be shaped `[batch_size, max_time, ...]`.\n\t\t\tmask_encoder (optional): Boolean, whether to mask encoder paddings.\n\t\t\tmemory_sequence_length (optional): Sequence lengths for the batch entries\n\t\t\t\tin memory. If provided, the memory tensor rows are masked with zeros\n\t\t\t\tfor values past the respective sequence lengths. Only relevant if mask_encoder = True.\n\t\t\tsmoothing (optional): Boolean. Determines which normalization function to use.\n\t\t\t\tDefault normalization function (probablity_fn) is softmax. If smoothing is\n\t\t\t\tenabled, we replace softmax with:\n\t\t\t\t\t\ta_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j}))\n\t\t\t\tIntroduced in:\n\t\t\t\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t\t\t\t gio, “Attention-based models for speech recognition,” in Ad-\n\t\t\t\t vances in Neural Information Processing Systems, 2015, pp.\n\t\t\t\t 577–585.\n\t\t\t\tThis is mainly used if the model wants to attend to multiple input parts\n\t\t\t\tat the same decoding step. We probably won't be using it since multiple sound\n\t\t\t\tframes may depend on the same character/phone, probably not the way around.\n\t\t\t\tNote:\n\t\t\t\t\tWe still keep it implemented in case we want to test it. They used it in the\n\t\t\t\t\tpaper in the context of speech recognition, where one phoneme may depend on\n\t\t\t\t\tmultiple subsequent sound frames.\n\t\t\tname: Name to use when creating ops.\n\t\t" normalization_function = (_smoothing_normalization if (smoothing == True) else None) memory_length = (memory_sequence_length if (mask_encoder == True) else None) super(LocationSensitiveAttention, self).__init__(num_units=num_units, memory=memory, memory_sequence_length=memory_length, probability_fn=normalization_function, name=name) self.location_convolution = tf.layers.Conv1D(filters=hparams.attention_filters, kernel_size=hparams.attention_kernel, padding='same', use_bias=True, bias_initializer=tf.zeros_initializer(), name='location_features_convolution') self.location_layer = tf.layers.Dense(units=num_units, use_bias=False, dtype=tf.float32, name='location_features_layer') self._cumulate = cumulate_weights self.synthesis_constraint = (hparams.synthesis_constraint and (not is_training)) self.attention_win_size = tf.convert_to_tensor(hparams.attention_win_size, dtype=tf.int32) self.constraint_type = hparams.synthesis_constraint_type
Construct the Attention mechanism. Args: num_units: The depth of the query mechanism. memory: The memory to query; usually the output of an RNN encoder. This tensor should be shaped `[batch_size, max_time, ...]`. mask_encoder (optional): Boolean, whether to mask encoder paddings. memory_sequence_length (optional): Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. Only relevant if mask_encoder = True. smoothing (optional): Boolean. Determines which normalization function to use. Default normalization function (probablity_fn) is softmax. If smoothing is enabled, we replace softmax with: a_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j})) Introduced in: J. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben- gio, “Attention-based models for speech recognition,” in Ad- vances in Neural Information Processing Systems, 2015, pp. 577–585. This is mainly used if the model wants to attend to multiple input parts at the same decoding step. We probably won't be using it since multiple sound frames may depend on the same character/phone, probably not the way around. Note: We still keep it implemented in case we want to test it. They used it in the paper in the context of speech recognition, where one phoneme may depend on multiple subsequent sound frames. name: Name to use when creating ops.
tacotron/models/attention.py
__init__
huangdou123/huangdou
2,154
python
def __init__(self, num_units, memory, hparams, is_training, mask_encoder=True, memory_sequence_length=None, smoothing=False, cumulate_weights=True, name='LocationSensitiveAttention'): "Construct the Attention mechanism.\n\t\tArgs:\n\t\t\tnum_units: The depth of the query mechanism.\n\t\t\tmemory: The memory to query; usually the output of an RNN encoder. This\n\t\t\t\ttensor should be shaped `[batch_size, max_time, ...]`.\n\t\t\tmask_encoder (optional): Boolean, whether to mask encoder paddings.\n\t\t\tmemory_sequence_length (optional): Sequence lengths for the batch entries\n\t\t\t\tin memory. If provided, the memory tensor rows are masked with zeros\n\t\t\t\tfor values past the respective sequence lengths. Only relevant if mask_encoder = True.\n\t\t\tsmoothing (optional): Boolean. Determines which normalization function to use.\n\t\t\t\tDefault normalization function (probablity_fn) is softmax. If smoothing is\n\t\t\t\tenabled, we replace softmax with:\n\t\t\t\t\t\ta_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j}))\n\t\t\t\tIntroduced in:\n\t\t\t\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t\t\t\t gio, “Attention-based models for speech recognition,” in Ad-\n\t\t\t\t vances in Neural Information Processing Systems, 2015, pp.\n\t\t\t\t 577–585.\n\t\t\t\tThis is mainly used if the model wants to attend to multiple input parts\n\t\t\t\tat the same decoding step. We probably won't be using it since multiple sound\n\t\t\t\tframes may depend on the same character/phone, probably not the way around.\n\t\t\t\tNote:\n\t\t\t\t\tWe still keep it implemented in case we want to test it. They used it in the\n\t\t\t\t\tpaper in the context of speech recognition, where one phoneme may depend on\n\t\t\t\t\tmultiple subsequent sound frames.\n\t\t\tname: Name to use when creating ops.\n\t\t" normalization_function = (_smoothing_normalization if (smoothing == True) else None) memory_length = (memory_sequence_length if (mask_encoder == True) else None) super(LocationSensitiveAttention, self).__init__(num_units=num_units, memory=memory, memory_sequence_length=memory_length, probability_fn=normalization_function, name=name) self.location_convolution = tf.layers.Conv1D(filters=hparams.attention_filters, kernel_size=hparams.attention_kernel, padding='same', use_bias=True, bias_initializer=tf.zeros_initializer(), name='location_features_convolution') self.location_layer = tf.layers.Dense(units=num_units, use_bias=False, dtype=tf.float32, name='location_features_layer') self._cumulate = cumulate_weights self.synthesis_constraint = (hparams.synthesis_constraint and (not is_training)) self.attention_win_size = tf.convert_to_tensor(hparams.attention_win_size, dtype=tf.int32) self.constraint_type = hparams.synthesis_constraint_type
def __init__(self, num_units, memory, hparams, is_training, mask_encoder=True, memory_sequence_length=None, smoothing=False, cumulate_weights=True, name='LocationSensitiveAttention'): "Construct the Attention mechanism.\n\t\tArgs:\n\t\t\tnum_units: The depth of the query mechanism.\n\t\t\tmemory: The memory to query; usually the output of an RNN encoder. This\n\t\t\t\ttensor should be shaped `[batch_size, max_time, ...]`.\n\t\t\tmask_encoder (optional): Boolean, whether to mask encoder paddings.\n\t\t\tmemory_sequence_length (optional): Sequence lengths for the batch entries\n\t\t\t\tin memory. If provided, the memory tensor rows are masked with zeros\n\t\t\t\tfor values past the respective sequence lengths. Only relevant if mask_encoder = True.\n\t\t\tsmoothing (optional): Boolean. Determines which normalization function to use.\n\t\t\t\tDefault normalization function (probablity_fn) is softmax. If smoothing is\n\t\t\t\tenabled, we replace softmax with:\n\t\t\t\t\t\ta_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j}))\n\t\t\t\tIntroduced in:\n\t\t\t\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t\t\t\t gio, “Attention-based models for speech recognition,” in Ad-\n\t\t\t\t vances in Neural Information Processing Systems, 2015, pp.\n\t\t\t\t 577–585.\n\t\t\t\tThis is mainly used if the model wants to attend to multiple input parts\n\t\t\t\tat the same decoding step. We probably won't be using it since multiple sound\n\t\t\t\tframes may depend on the same character/phone, probably not the way around.\n\t\t\t\tNote:\n\t\t\t\t\tWe still keep it implemented in case we want to test it. They used it in the\n\t\t\t\t\tpaper in the context of speech recognition, where one phoneme may depend on\n\t\t\t\t\tmultiple subsequent sound frames.\n\t\t\tname: Name to use when creating ops.\n\t\t" normalization_function = (_smoothing_normalization if (smoothing == True) else None) memory_length = (memory_sequence_length if (mask_encoder == True) else None) super(LocationSensitiveAttention, self).__init__(num_units=num_units, memory=memory, memory_sequence_length=memory_length, probability_fn=normalization_function, name=name) self.location_convolution = tf.layers.Conv1D(filters=hparams.attention_filters, kernel_size=hparams.attention_kernel, padding='same', use_bias=True, bias_initializer=tf.zeros_initializer(), name='location_features_convolution') self.location_layer = tf.layers.Dense(units=num_units, use_bias=False, dtype=tf.float32, name='location_features_layer') self._cumulate = cumulate_weights self.synthesis_constraint = (hparams.synthesis_constraint and (not is_training)) self.attention_win_size = tf.convert_to_tensor(hparams.attention_win_size, dtype=tf.int32) self.constraint_type = hparams.synthesis_constraint_type<|docstring|>Construct the Attention mechanism. Args: num_units: The depth of the query mechanism. memory: The memory to query; usually the output of an RNN encoder. This tensor should be shaped `[batch_size, max_time, ...]`. mask_encoder (optional): Boolean, whether to mask encoder paddings. memory_sequence_length (optional): Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. Only relevant if mask_encoder = True. smoothing (optional): Boolean. Determines which normalization function to use. Default normalization function (probablity_fn) is softmax. If smoothing is enabled, we replace softmax with: a_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j})) Introduced in: J. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben- gio, “Attention-based models for speech recognition,” in Ad- vances in Neural Information Processing Systems, 2015, pp. 577–585. This is mainly used if the model wants to attend to multiple input parts at the same decoding step. We probably won't be using it since multiple sound frames may depend on the same character/phone, probably not the way around. Note: We still keep it implemented in case we want to test it. They used it in the paper in the context of speech recognition, where one phoneme may depend on multiple subsequent sound frames. name: Name to use when creating ops.<|endoftext|>
e8e952b117bc3a20b91e545899411936232f1a088033dadb40de61f7e4b9b154
def __call__(self, query, state, prev_max_attentions): "Score the query based on the keys and values.\n\t\tArgs:\n\t\t\tquery: Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, query_depth]`.\n\t\t\tstate (previous alignments): Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, alignments_size]`\n\t\t\t\t(`alignments_size` is memory's `max_time`).\n\t\tReturns:\n\t\t\talignments: Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, alignments_size]` (`alignments_size` is memory's\n\t\t\t\t`max_time`).\n\t\t" previous_alignments = state with variable_scope.variable_scope(None, 'Location_Sensitive_Attention', [query]): processed_query = (self.query_layer(query) if self.query_layer else query) processed_query = tf.expand_dims(processed_query, 1) expanded_alignments = tf.expand_dims(previous_alignments, axis=2) f = self.location_convolution(expanded_alignments) processed_location_features = self.location_layer(f) energy = _location_sensitive_score(processed_query, processed_location_features, self.keys) if self.synthesis_constraint: Tx = tf.shape(energy)[(- 1)] if (self.constraint_type == 'monotonic'): key_masks = tf.sequence_mask(prev_max_attentions, Tx) reverse_masks = tf.sequence_mask(((Tx - self.attention_win_size) - prev_max_attentions), Tx)[(:, ::(- 1))] else: assert (self.constraint_type == 'window') key_masks = tf.sequence_mask((prev_max_attentions - ((self.attention_win_size // 2) + ((self.attention_win_size % 2) != 0))), Tx) reverse_masks = tf.sequence_mask(((Tx - (self.attention_win_size // 2)) - prev_max_attentions), Tx)[(:, ::(- 1))] masks = tf.logical_or(key_masks, reverse_masks) paddings = (tf.ones_like(energy) * ((- (2 ** 32)) + 1)) energy = tf.where(tf.equal(masks, False), energy, paddings) alignments = self._probability_fn(energy, previous_alignments) max_attentions = tf.argmax(alignments, (- 1), output_type=tf.int32) if self._cumulate: next_state = (alignments + previous_alignments) else: next_state = alignments return (alignments, next_state, max_attentions)
Score the query based on the keys and values. Args: query: Tensor of dtype matching `self.values` and shape `[batch_size, query_depth]`. state (previous alignments): Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). Returns: alignments: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`).
tacotron/models/attention.py
__call__
huangdou123/huangdou
2,154
python
def __call__(self, query, state, prev_max_attentions): "Score the query based on the keys and values.\n\t\tArgs:\n\t\t\tquery: Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, query_depth]`.\n\t\t\tstate (previous alignments): Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, alignments_size]`\n\t\t\t\t(`alignments_size` is memory's `max_time`).\n\t\tReturns:\n\t\t\talignments: Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, alignments_size]` (`alignments_size` is memory's\n\t\t\t\t`max_time`).\n\t\t" previous_alignments = state with variable_scope.variable_scope(None, 'Location_Sensitive_Attention', [query]): processed_query = (self.query_layer(query) if self.query_layer else query) processed_query = tf.expand_dims(processed_query, 1) expanded_alignments = tf.expand_dims(previous_alignments, axis=2) f = self.location_convolution(expanded_alignments) processed_location_features = self.location_layer(f) energy = _location_sensitive_score(processed_query, processed_location_features, self.keys) if self.synthesis_constraint: Tx = tf.shape(energy)[(- 1)] if (self.constraint_type == 'monotonic'): key_masks = tf.sequence_mask(prev_max_attentions, Tx) reverse_masks = tf.sequence_mask(((Tx - self.attention_win_size) - prev_max_attentions), Tx)[(:, ::(- 1))] else: assert (self.constraint_type == 'window') key_masks = tf.sequence_mask((prev_max_attentions - ((self.attention_win_size // 2) + ((self.attention_win_size % 2) != 0))), Tx) reverse_masks = tf.sequence_mask(((Tx - (self.attention_win_size // 2)) - prev_max_attentions), Tx)[(:, ::(- 1))] masks = tf.logical_or(key_masks, reverse_masks) paddings = (tf.ones_like(energy) * ((- (2 ** 32)) + 1)) energy = tf.where(tf.equal(masks, False), energy, paddings) alignments = self._probability_fn(energy, previous_alignments) max_attentions = tf.argmax(alignments, (- 1), output_type=tf.int32) if self._cumulate: next_state = (alignments + previous_alignments) else: next_state = alignments return (alignments, next_state, max_attentions)
def __call__(self, query, state, prev_max_attentions): "Score the query based on the keys and values.\n\t\tArgs:\n\t\t\tquery: Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, query_depth]`.\n\t\t\tstate (previous alignments): Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, alignments_size]`\n\t\t\t\t(`alignments_size` is memory's `max_time`).\n\t\tReturns:\n\t\t\talignments: Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, alignments_size]` (`alignments_size` is memory's\n\t\t\t\t`max_time`).\n\t\t" previous_alignments = state with variable_scope.variable_scope(None, 'Location_Sensitive_Attention', [query]): processed_query = (self.query_layer(query) if self.query_layer else query) processed_query = tf.expand_dims(processed_query, 1) expanded_alignments = tf.expand_dims(previous_alignments, axis=2) f = self.location_convolution(expanded_alignments) processed_location_features = self.location_layer(f) energy = _location_sensitive_score(processed_query, processed_location_features, self.keys) if self.synthesis_constraint: Tx = tf.shape(energy)[(- 1)] if (self.constraint_type == 'monotonic'): key_masks = tf.sequence_mask(prev_max_attentions, Tx) reverse_masks = tf.sequence_mask(((Tx - self.attention_win_size) - prev_max_attentions), Tx)[(:, ::(- 1))] else: assert (self.constraint_type == 'window') key_masks = tf.sequence_mask((prev_max_attentions - ((self.attention_win_size // 2) + ((self.attention_win_size % 2) != 0))), Tx) reverse_masks = tf.sequence_mask(((Tx - (self.attention_win_size // 2)) - prev_max_attentions), Tx)[(:, ::(- 1))] masks = tf.logical_or(key_masks, reverse_masks) paddings = (tf.ones_like(energy) * ((- (2 ** 32)) + 1)) energy = tf.where(tf.equal(masks, False), energy, paddings) alignments = self._probability_fn(energy, previous_alignments) max_attentions = tf.argmax(alignments, (- 1), output_type=tf.int32) if self._cumulate: next_state = (alignments + previous_alignments) else: next_state = alignments return (alignments, next_state, max_attentions)<|docstring|>Score the query based on the keys and values. Args: query: Tensor of dtype matching `self.values` and shape `[batch_size, query_depth]`. state (previous alignments): Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). Returns: alignments: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`).<|endoftext|>
402f434b1c982001ee1d2aeaf7fc7475b6df415451d56bd85e27bf5d619b01a0
def __init__(self, *, id_: Optional[FhirId]=None, meta: Optional[Meta]=None, implicitRules: Optional[FhirUri]=None, language: Optional[CommonLanguagesCode]=None, text: Optional[Narrative]=None, contained: Optional[FhirList[ResourceContainer]]=None, extension: Optional[FhirList[ExtensionBase]]=None, modifierExtension: Optional[FhirList[ExtensionBase]]=None, url: Optional[FhirUri]=None, identifier: Optional[FhirList[Identifier]]=None, version: Optional[FhirString]=None, name: Optional[FhirString]=None, title: Optional[FhirString]=None, status: PublicationStatusCode, date: Optional[FhirDateTime]=None, publisher: Optional[FhirString]=None, contact: Optional[FhirList[ContactDetail]]=None, description: Optional[FhirMarkdown]=None, note: Optional[FhirList[Annotation]]=None, useContext: Optional[FhirList[UsageContext]]=None, jurisdiction: Optional[FhirList[CodeableConcept[JurisdictionValueSetCode]]]=None, copyright: Optional[FhirMarkdown]=None, approvalDate: Optional[FhirDate]=None, lastReviewDate: Optional[FhirDate]=None, effectivePeriod: Optional[Period]=None, topic: Optional[FhirList[CodeableConcept[DefinitionTopicCode]]]=None, author: Optional[FhirList[ContactDetail]]=None, editor: Optional[FhirList[ContactDetail]]=None, reviewer: Optional[FhirList[ContactDetail]]=None, endorser: Optional[FhirList[ContactDetail]]=None, relatedArtifact: Optional[FhirList[RelatedArtifact]]=None, synthesisType: Optional[CodeableConcept[SynthesisTypeCode]]=None, studyType: Optional[CodeableConcept[StudyTypeCode]]=None, population: Reference[EvidenceVariable], exposure: Optional[Reference[EvidenceVariable]]=None, outcome: Reference[EvidenceVariable], sampleSize: Optional[RiskEvidenceSynthesisSampleSize]=None, riskEstimate: Optional[RiskEvidenceSynthesisRiskEstimate]=None, certainty: Optional[FhirList[RiskEvidenceSynthesisCertainty]]=None) -> None: '\n The RiskEvidenceSynthesis resource describes the likelihood of an outcome in a\n population plus exposure state where the risk estimate is derived from a\n combination of research studies.\n If the element is present, it must have either a @value, an @id, or extensions\n\n :param id_: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n :param meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content might not always be associated with\n version changes to the resource.\n :param implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content. Often,\n this is a reference to an implementation guide that defines the special rules\n along with other profiles etc.\n :param language: The base language in which the resource is written.\n :param text: A human-readable narrative that contains a summary of the resource and can be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n :param contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n :param extension: May be used to represent additional information that is not part of the basic\n definition of the resource. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n :param modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the resource and that modifies the understanding of the element\n that contains it and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer is allowed to define an extension, there is a set of requirements\n that SHALL be met as part of the definition of the extension. Applications\n processing a resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n :param url: An absolute URI that is used to identify this risk evidence synthesis when it\n is referenced in a specification, model, design or an instance; also called\n its canonical identifier. This SHOULD be globally unique and SHOULD be a\n literal address at which at which an authoritative instance of this risk\n evidence synthesis is (or will be) published. This URL can be the target of a\n canonical reference. It SHALL remain the same when the risk evidence synthesis\n is stored on different servers.\n :param identifier: A formal identifier that is used to identify this risk evidence synthesis when\n it is represented in other formats, or referenced in a specification, model,\n design or an instance.\n :param version: The identifier that is used to identify this version of the risk evidence\n synthesis when it is referenced in a specification, model, design or instance.\n This is an arbitrary value managed by the risk evidence synthesis author and\n is not expected to be globally unique. For example, it might be a timestamp\n (e.g. yyyymmdd) if a managed version is not available. There is also no\n expectation that versions can be placed in a lexicographical sequence.\n :param name: A natural language name identifying the risk evidence synthesis. This name\n should be usable as an identifier for the module by machine processing\n applications such as code generation.\n :param title: A short, descriptive, user-friendly title for the risk evidence synthesis.\n :param status: The status of this risk evidence synthesis. Enables tracking the life-cycle of\n the content.\n :param date: The date (and optionally time) when the risk evidence synthesis was\n published. The date must change when the business version changes and it must\n change if the status code changes. In addition, it should change when the\n substantive content of the risk evidence synthesis changes.\n :param publisher: The name of the organization or individual that published the risk evidence\n synthesis.\n :param contact: Contact details to assist a user in finding and communicating with the\n publisher.\n :param description: A free text natural language description of the risk evidence synthesis from a\n consumer\'s perspective.\n :param note: A human-readable string to clarify or explain concepts about the resource.\n :param useContext: The content was developed with a focus and intent of supporting the contexts\n that are listed. These contexts may be general categories (gender, age, ...)\n or may be references to specific programs (insurance plans, studies, ...) and\n may be used to assist with indexing and searching for appropriate risk\n evidence synthesis instances.\n :param jurisdiction: A legal or geographic region in which the risk evidence synthesis is intended\n to be used.\n :param copyright: A copyright statement relating to the risk evidence synthesis and/or its\n contents. Copyright statements are generally legal restrictions on the use and\n publishing of the risk evidence synthesis.\n :param approvalDate: The date on which the resource content was approved by the publisher. Approval\n happens once when the content is officially approved for usage.\n :param lastReviewDate: The date on which the resource content was last reviewed. Review happens\n periodically after approval but does not change the original approval date.\n :param effectivePeriod: The period during which the risk evidence synthesis content was or is planned\n to be in active use.\n :param topic: Descriptive topics related to the content of the RiskEvidenceSynthesis. Topics\n provide a high-level categorization grouping types of EffectEvidenceSynthesiss\n that can be useful for filtering and searching.\n :param author: An individiual or organization primarily involved in the creation and\n maintenance of the content.\n :param editor: An individual or organization primarily responsible for internal coherence of\n the content.\n :param reviewer: An individual or organization primarily responsible for review of some aspect\n of the content.\n :param endorser: An individual or organization responsible for officially endorsing the content\n for use in some setting.\n :param relatedArtifact: Related artifacts such as additional documentation, justification, or\n bibliographic references.\n :param synthesisType: Type of synthesis eg meta-analysis.\n :param studyType: Type of study eg randomized trial.\n :param population: A reference to a EvidenceVariable resource that defines the population for the\n research.\n :param exposure: A reference to a EvidenceVariable resource that defines the exposure for the\n research.\n :param outcome: A reference to a EvidenceVariable resomece that defines the outcome for the\n research.\n :param sampleSize: A description of the size of the sample involved in the synthesis.\n :param riskEstimate: The estimated risk of the outcome.\n :param certainty: A description of the certainty of the risk estimate.\n ' super().__init__(resourceType='RiskEvidenceSynthesis', id_=id_, meta=meta, implicitRules=implicitRules, language=language, text=text, contained=contained, extension=extension, modifierExtension=modifierExtension, url=url, identifier=identifier, version=version, name=name, title=title, status=status, date=date, publisher=publisher, contact=contact, description=description, note=note, useContext=useContext, jurisdiction=jurisdiction, copyright=copyright, approvalDate=approvalDate, lastReviewDate=lastReviewDate, effectivePeriod=effectivePeriod, topic=topic, author=author, editor=editor, reviewer=reviewer, endorser=endorser, relatedArtifact=relatedArtifact, synthesisType=synthesisType, studyType=studyType, population=population, exposure=exposure, outcome=outcome, sampleSize=sampleSize, riskEstimate=riskEstimate, certainty=certainty)
The RiskEvidenceSynthesis resource describes the likelihood of an outcome in a population plus exposure state where the risk estimate is derived from a combination of research studies. If the element is present, it must have either a @value, an @id, or extensions :param id_: The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. :param meta: The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource. :param implicitRules: A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc. :param language: The base language in which the resource is written. :param text: A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. :param contained: These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. :param extension: May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. :param modifierExtension: May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). :param url: An absolute URI that is used to identify this risk evidence synthesis when it is referenced in a specification, model, design or an instance; also called its canonical identifier. This SHOULD be globally unique and SHOULD be a literal address at which at which an authoritative instance of this risk evidence synthesis is (or will be) published. This URL can be the target of a canonical reference. It SHALL remain the same when the risk evidence synthesis is stored on different servers. :param identifier: A formal identifier that is used to identify this risk evidence synthesis when it is represented in other formats, or referenced in a specification, model, design or an instance. :param version: The identifier that is used to identify this version of the risk evidence synthesis when it is referenced in a specification, model, design or instance. This is an arbitrary value managed by the risk evidence synthesis author and is not expected to be globally unique. For example, it might be a timestamp (e.g. yyyymmdd) if a managed version is not available. There is also no expectation that versions can be placed in a lexicographical sequence. :param name: A natural language name identifying the risk evidence synthesis. This name should be usable as an identifier for the module by machine processing applications such as code generation. :param title: A short, descriptive, user-friendly title for the risk evidence synthesis. :param status: The status of this risk evidence synthesis. Enables tracking the life-cycle of the content. :param date: The date (and optionally time) when the risk evidence synthesis was published. The date must change when the business version changes and it must change if the status code changes. In addition, it should change when the substantive content of the risk evidence synthesis changes. :param publisher: The name of the organization or individual that published the risk evidence synthesis. :param contact: Contact details to assist a user in finding and communicating with the publisher. :param description: A free text natural language description of the risk evidence synthesis from a consumer's perspective. :param note: A human-readable string to clarify or explain concepts about the resource. :param useContext: The content was developed with a focus and intent of supporting the contexts that are listed. These contexts may be general categories (gender, age, ...) or may be references to specific programs (insurance plans, studies, ...) and may be used to assist with indexing and searching for appropriate risk evidence synthesis instances. :param jurisdiction: A legal or geographic region in which the risk evidence synthesis is intended to be used. :param copyright: A copyright statement relating to the risk evidence synthesis and/or its contents. Copyright statements are generally legal restrictions on the use and publishing of the risk evidence synthesis. :param approvalDate: The date on which the resource content was approved by the publisher. Approval happens once when the content is officially approved for usage. :param lastReviewDate: The date on which the resource content was last reviewed. Review happens periodically after approval but does not change the original approval date. :param effectivePeriod: The period during which the risk evidence synthesis content was or is planned to be in active use. :param topic: Descriptive topics related to the content of the RiskEvidenceSynthesis. Topics provide a high-level categorization grouping types of EffectEvidenceSynthesiss that can be useful for filtering and searching. :param author: An individiual or organization primarily involved in the creation and maintenance of the content. :param editor: An individual or organization primarily responsible for internal coherence of the content. :param reviewer: An individual or organization primarily responsible for review of some aspect of the content. :param endorser: An individual or organization responsible for officially endorsing the content for use in some setting. :param relatedArtifact: Related artifacts such as additional documentation, justification, or bibliographic references. :param synthesisType: Type of synthesis eg meta-analysis. :param studyType: Type of study eg randomized trial. :param population: A reference to a EvidenceVariable resource that defines the population for the research. :param exposure: A reference to a EvidenceVariable resource that defines the exposure for the research. :param outcome: A reference to a EvidenceVariable resomece that defines the outcome for the research. :param sampleSize: A description of the size of the sample involved in the synthesis. :param riskEstimate: The estimated risk of the outcome. :param certainty: A description of the certainty of the risk estimate.
spark_auto_mapper_fhir/resources/risk_evidence_synthesis.py
__init__
icanbwell/SparkAutoMapper.FHIR
1
python
def __init__(self, *, id_: Optional[FhirId]=None, meta: Optional[Meta]=None, implicitRules: Optional[FhirUri]=None, language: Optional[CommonLanguagesCode]=None, text: Optional[Narrative]=None, contained: Optional[FhirList[ResourceContainer]]=None, extension: Optional[FhirList[ExtensionBase]]=None, modifierExtension: Optional[FhirList[ExtensionBase]]=None, url: Optional[FhirUri]=None, identifier: Optional[FhirList[Identifier]]=None, version: Optional[FhirString]=None, name: Optional[FhirString]=None, title: Optional[FhirString]=None, status: PublicationStatusCode, date: Optional[FhirDateTime]=None, publisher: Optional[FhirString]=None, contact: Optional[FhirList[ContactDetail]]=None, description: Optional[FhirMarkdown]=None, note: Optional[FhirList[Annotation]]=None, useContext: Optional[FhirList[UsageContext]]=None, jurisdiction: Optional[FhirList[CodeableConcept[JurisdictionValueSetCode]]]=None, copyright: Optional[FhirMarkdown]=None, approvalDate: Optional[FhirDate]=None, lastReviewDate: Optional[FhirDate]=None, effectivePeriod: Optional[Period]=None, topic: Optional[FhirList[CodeableConcept[DefinitionTopicCode]]]=None, author: Optional[FhirList[ContactDetail]]=None, editor: Optional[FhirList[ContactDetail]]=None, reviewer: Optional[FhirList[ContactDetail]]=None, endorser: Optional[FhirList[ContactDetail]]=None, relatedArtifact: Optional[FhirList[RelatedArtifact]]=None, synthesisType: Optional[CodeableConcept[SynthesisTypeCode]]=None, studyType: Optional[CodeableConcept[StudyTypeCode]]=None, population: Reference[EvidenceVariable], exposure: Optional[Reference[EvidenceVariable]]=None, outcome: Reference[EvidenceVariable], sampleSize: Optional[RiskEvidenceSynthesisSampleSize]=None, riskEstimate: Optional[RiskEvidenceSynthesisRiskEstimate]=None, certainty: Optional[FhirList[RiskEvidenceSynthesisCertainty]]=None) -> None: '\n The RiskEvidenceSynthesis resource describes the likelihood of an outcome in a\n population plus exposure state where the risk estimate is derived from a\n combination of research studies.\n If the element is present, it must have either a @value, an @id, or extensions\n\n :param id_: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n :param meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content might not always be associated with\n version changes to the resource.\n :param implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content. Often,\n this is a reference to an implementation guide that defines the special rules\n along with other profiles etc.\n :param language: The base language in which the resource is written.\n :param text: A human-readable narrative that contains a summary of the resource and can be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n :param contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n :param extension: May be used to represent additional information that is not part of the basic\n definition of the resource. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n :param modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the resource and that modifies the understanding of the element\n that contains it and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer is allowed to define an extension, there is a set of requirements\n that SHALL be met as part of the definition of the extension. Applications\n processing a resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n :param url: An absolute URI that is used to identify this risk evidence synthesis when it\n is referenced in a specification, model, design or an instance; also called\n its canonical identifier. This SHOULD be globally unique and SHOULD be a\n literal address at which at which an authoritative instance of this risk\n evidence synthesis is (or will be) published. This URL can be the target of a\n canonical reference. It SHALL remain the same when the risk evidence synthesis\n is stored on different servers.\n :param identifier: A formal identifier that is used to identify this risk evidence synthesis when\n it is represented in other formats, or referenced in a specification, model,\n design or an instance.\n :param version: The identifier that is used to identify this version of the risk evidence\n synthesis when it is referenced in a specification, model, design or instance.\n This is an arbitrary value managed by the risk evidence synthesis author and\n is not expected to be globally unique. For example, it might be a timestamp\n (e.g. yyyymmdd) if a managed version is not available. There is also no\n expectation that versions can be placed in a lexicographical sequence.\n :param name: A natural language name identifying the risk evidence synthesis. This name\n should be usable as an identifier for the module by machine processing\n applications such as code generation.\n :param title: A short, descriptive, user-friendly title for the risk evidence synthesis.\n :param status: The status of this risk evidence synthesis. Enables tracking the life-cycle of\n the content.\n :param date: The date (and optionally time) when the risk evidence synthesis was\n published. The date must change when the business version changes and it must\n change if the status code changes. In addition, it should change when the\n substantive content of the risk evidence synthesis changes.\n :param publisher: The name of the organization or individual that published the risk evidence\n synthesis.\n :param contact: Contact details to assist a user in finding and communicating with the\n publisher.\n :param description: A free text natural language description of the risk evidence synthesis from a\n consumer\'s perspective.\n :param note: A human-readable string to clarify or explain concepts about the resource.\n :param useContext: The content was developed with a focus and intent of supporting the contexts\n that are listed. These contexts may be general categories (gender, age, ...)\n or may be references to specific programs (insurance plans, studies, ...) and\n may be used to assist with indexing and searching for appropriate risk\n evidence synthesis instances.\n :param jurisdiction: A legal or geographic region in which the risk evidence synthesis is intended\n to be used.\n :param copyright: A copyright statement relating to the risk evidence synthesis and/or its\n contents. Copyright statements are generally legal restrictions on the use and\n publishing of the risk evidence synthesis.\n :param approvalDate: The date on which the resource content was approved by the publisher. Approval\n happens once when the content is officially approved for usage.\n :param lastReviewDate: The date on which the resource content was last reviewed. Review happens\n periodically after approval but does not change the original approval date.\n :param effectivePeriod: The period during which the risk evidence synthesis content was or is planned\n to be in active use.\n :param topic: Descriptive topics related to the content of the RiskEvidenceSynthesis. Topics\n provide a high-level categorization grouping types of EffectEvidenceSynthesiss\n that can be useful for filtering and searching.\n :param author: An individiual or organization primarily involved in the creation and\n maintenance of the content.\n :param editor: An individual or organization primarily responsible for internal coherence of\n the content.\n :param reviewer: An individual or organization primarily responsible for review of some aspect\n of the content.\n :param endorser: An individual or organization responsible for officially endorsing the content\n for use in some setting.\n :param relatedArtifact: Related artifacts such as additional documentation, justification, or\n bibliographic references.\n :param synthesisType: Type of synthesis eg meta-analysis.\n :param studyType: Type of study eg randomized trial.\n :param population: A reference to a EvidenceVariable resource that defines the population for the\n research.\n :param exposure: A reference to a EvidenceVariable resource that defines the exposure for the\n research.\n :param outcome: A reference to a EvidenceVariable resomece that defines the outcome for the\n research.\n :param sampleSize: A description of the size of the sample involved in the synthesis.\n :param riskEstimate: The estimated risk of the outcome.\n :param certainty: A description of the certainty of the risk estimate.\n ' super().__init__(resourceType='RiskEvidenceSynthesis', id_=id_, meta=meta, implicitRules=implicitRules, language=language, text=text, contained=contained, extension=extension, modifierExtension=modifierExtension, url=url, identifier=identifier, version=version, name=name, title=title, status=status, date=date, publisher=publisher, contact=contact, description=description, note=note, useContext=useContext, jurisdiction=jurisdiction, copyright=copyright, approvalDate=approvalDate, lastReviewDate=lastReviewDate, effectivePeriod=effectivePeriod, topic=topic, author=author, editor=editor, reviewer=reviewer, endorser=endorser, relatedArtifact=relatedArtifact, synthesisType=synthesisType, studyType=studyType, population=population, exposure=exposure, outcome=outcome, sampleSize=sampleSize, riskEstimate=riskEstimate, certainty=certainty)
def __init__(self, *, id_: Optional[FhirId]=None, meta: Optional[Meta]=None, implicitRules: Optional[FhirUri]=None, language: Optional[CommonLanguagesCode]=None, text: Optional[Narrative]=None, contained: Optional[FhirList[ResourceContainer]]=None, extension: Optional[FhirList[ExtensionBase]]=None, modifierExtension: Optional[FhirList[ExtensionBase]]=None, url: Optional[FhirUri]=None, identifier: Optional[FhirList[Identifier]]=None, version: Optional[FhirString]=None, name: Optional[FhirString]=None, title: Optional[FhirString]=None, status: PublicationStatusCode, date: Optional[FhirDateTime]=None, publisher: Optional[FhirString]=None, contact: Optional[FhirList[ContactDetail]]=None, description: Optional[FhirMarkdown]=None, note: Optional[FhirList[Annotation]]=None, useContext: Optional[FhirList[UsageContext]]=None, jurisdiction: Optional[FhirList[CodeableConcept[JurisdictionValueSetCode]]]=None, copyright: Optional[FhirMarkdown]=None, approvalDate: Optional[FhirDate]=None, lastReviewDate: Optional[FhirDate]=None, effectivePeriod: Optional[Period]=None, topic: Optional[FhirList[CodeableConcept[DefinitionTopicCode]]]=None, author: Optional[FhirList[ContactDetail]]=None, editor: Optional[FhirList[ContactDetail]]=None, reviewer: Optional[FhirList[ContactDetail]]=None, endorser: Optional[FhirList[ContactDetail]]=None, relatedArtifact: Optional[FhirList[RelatedArtifact]]=None, synthesisType: Optional[CodeableConcept[SynthesisTypeCode]]=None, studyType: Optional[CodeableConcept[StudyTypeCode]]=None, population: Reference[EvidenceVariable], exposure: Optional[Reference[EvidenceVariable]]=None, outcome: Reference[EvidenceVariable], sampleSize: Optional[RiskEvidenceSynthesisSampleSize]=None, riskEstimate: Optional[RiskEvidenceSynthesisRiskEstimate]=None, certainty: Optional[FhirList[RiskEvidenceSynthesisCertainty]]=None) -> None: '\n The RiskEvidenceSynthesis resource describes the likelihood of an outcome in a\n population plus exposure state where the risk estimate is derived from a\n combination of research studies.\n If the element is present, it must have either a @value, an @id, or extensions\n\n :param id_: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n :param meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content might not always be associated with\n version changes to the resource.\n :param implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content. Often,\n this is a reference to an implementation guide that defines the special rules\n along with other profiles etc.\n :param language: The base language in which the resource is written.\n :param text: A human-readable narrative that contains a summary of the resource and can be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n :param contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n :param extension: May be used to represent additional information that is not part of the basic\n definition of the resource. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n :param modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the resource and that modifies the understanding of the element\n that contains it and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer is allowed to define an extension, there is a set of requirements\n that SHALL be met as part of the definition of the extension. Applications\n processing a resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n :param url: An absolute URI that is used to identify this risk evidence synthesis when it\n is referenced in a specification, model, design or an instance; also called\n its canonical identifier. This SHOULD be globally unique and SHOULD be a\n literal address at which at which an authoritative instance of this risk\n evidence synthesis is (or will be) published. This URL can be the target of a\n canonical reference. It SHALL remain the same when the risk evidence synthesis\n is stored on different servers.\n :param identifier: A formal identifier that is used to identify this risk evidence synthesis when\n it is represented in other formats, or referenced in a specification, model,\n design or an instance.\n :param version: The identifier that is used to identify this version of the risk evidence\n synthesis when it is referenced in a specification, model, design or instance.\n This is an arbitrary value managed by the risk evidence synthesis author and\n is not expected to be globally unique. For example, it might be a timestamp\n (e.g. yyyymmdd) if a managed version is not available. There is also no\n expectation that versions can be placed in a lexicographical sequence.\n :param name: A natural language name identifying the risk evidence synthesis. This name\n should be usable as an identifier for the module by machine processing\n applications such as code generation.\n :param title: A short, descriptive, user-friendly title for the risk evidence synthesis.\n :param status: The status of this risk evidence synthesis. Enables tracking the life-cycle of\n the content.\n :param date: The date (and optionally time) when the risk evidence synthesis was\n published. The date must change when the business version changes and it must\n change if the status code changes. In addition, it should change when the\n substantive content of the risk evidence synthesis changes.\n :param publisher: The name of the organization or individual that published the risk evidence\n synthesis.\n :param contact: Contact details to assist a user in finding and communicating with the\n publisher.\n :param description: A free text natural language description of the risk evidence synthesis from a\n consumer\'s perspective.\n :param note: A human-readable string to clarify or explain concepts about the resource.\n :param useContext: The content was developed with a focus and intent of supporting the contexts\n that are listed. These contexts may be general categories (gender, age, ...)\n or may be references to specific programs (insurance plans, studies, ...) and\n may be used to assist with indexing and searching for appropriate risk\n evidence synthesis instances.\n :param jurisdiction: A legal or geographic region in which the risk evidence synthesis is intended\n to be used.\n :param copyright: A copyright statement relating to the risk evidence synthesis and/or its\n contents. Copyright statements are generally legal restrictions on the use and\n publishing of the risk evidence synthesis.\n :param approvalDate: The date on which the resource content was approved by the publisher. Approval\n happens once when the content is officially approved for usage.\n :param lastReviewDate: The date on which the resource content was last reviewed. Review happens\n periodically after approval but does not change the original approval date.\n :param effectivePeriod: The period during which the risk evidence synthesis content was or is planned\n to be in active use.\n :param topic: Descriptive topics related to the content of the RiskEvidenceSynthesis. Topics\n provide a high-level categorization grouping types of EffectEvidenceSynthesiss\n that can be useful for filtering and searching.\n :param author: An individiual or organization primarily involved in the creation and\n maintenance of the content.\n :param editor: An individual or organization primarily responsible for internal coherence of\n the content.\n :param reviewer: An individual or organization primarily responsible for review of some aspect\n of the content.\n :param endorser: An individual or organization responsible for officially endorsing the content\n for use in some setting.\n :param relatedArtifact: Related artifacts such as additional documentation, justification, or\n bibliographic references.\n :param synthesisType: Type of synthesis eg meta-analysis.\n :param studyType: Type of study eg randomized trial.\n :param population: A reference to a EvidenceVariable resource that defines the population for the\n research.\n :param exposure: A reference to a EvidenceVariable resource that defines the exposure for the\n research.\n :param outcome: A reference to a EvidenceVariable resomece that defines the outcome for the\n research.\n :param sampleSize: A description of the size of the sample involved in the synthesis.\n :param riskEstimate: The estimated risk of the outcome.\n :param certainty: A description of the certainty of the risk estimate.\n ' super().__init__(resourceType='RiskEvidenceSynthesis', id_=id_, meta=meta, implicitRules=implicitRules, language=language, text=text, contained=contained, extension=extension, modifierExtension=modifierExtension, url=url, identifier=identifier, version=version, name=name, title=title, status=status, date=date, publisher=publisher, contact=contact, description=description, note=note, useContext=useContext, jurisdiction=jurisdiction, copyright=copyright, approvalDate=approvalDate, lastReviewDate=lastReviewDate, effectivePeriod=effectivePeriod, topic=topic, author=author, editor=editor, reviewer=reviewer, endorser=endorser, relatedArtifact=relatedArtifact, synthesisType=synthesisType, studyType=studyType, population=population, exposure=exposure, outcome=outcome, sampleSize=sampleSize, riskEstimate=riskEstimate, certainty=certainty)<|docstring|>The RiskEvidenceSynthesis resource describes the likelihood of an outcome in a population plus exposure state where the risk estimate is derived from a combination of research studies. If the element is present, it must have either a @value, an @id, or extensions :param id_: The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. :param meta: The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource. :param implicitRules: A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc. :param language: The base language in which the resource is written. :param text: A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. :param contained: These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. :param extension: May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. :param modifierExtension: May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). :param url: An absolute URI that is used to identify this risk evidence synthesis when it is referenced in a specification, model, design or an instance; also called its canonical identifier. This SHOULD be globally unique and SHOULD be a literal address at which at which an authoritative instance of this risk evidence synthesis is (or will be) published. This URL can be the target of a canonical reference. It SHALL remain the same when the risk evidence synthesis is stored on different servers. :param identifier: A formal identifier that is used to identify this risk evidence synthesis when it is represented in other formats, or referenced in a specification, model, design or an instance. :param version: The identifier that is used to identify this version of the risk evidence synthesis when it is referenced in a specification, model, design or instance. This is an arbitrary value managed by the risk evidence synthesis author and is not expected to be globally unique. For example, it might be a timestamp (e.g. yyyymmdd) if a managed version is not available. There is also no expectation that versions can be placed in a lexicographical sequence. :param name: A natural language name identifying the risk evidence synthesis. This name should be usable as an identifier for the module by machine processing applications such as code generation. :param title: A short, descriptive, user-friendly title for the risk evidence synthesis. :param status: The status of this risk evidence synthesis. Enables tracking the life-cycle of the content. :param date: The date (and optionally time) when the risk evidence synthesis was published. The date must change when the business version changes and it must change if the status code changes. In addition, it should change when the substantive content of the risk evidence synthesis changes. :param publisher: The name of the organization or individual that published the risk evidence synthesis. :param contact: Contact details to assist a user in finding and communicating with the publisher. :param description: A free text natural language description of the risk evidence synthesis from a consumer's perspective. :param note: A human-readable string to clarify or explain concepts about the resource. :param useContext: The content was developed with a focus and intent of supporting the contexts that are listed. These contexts may be general categories (gender, age, ...) or may be references to specific programs (insurance plans, studies, ...) and may be used to assist with indexing and searching for appropriate risk evidence synthesis instances. :param jurisdiction: A legal or geographic region in which the risk evidence synthesis is intended to be used. :param copyright: A copyright statement relating to the risk evidence synthesis and/or its contents. Copyright statements are generally legal restrictions on the use and publishing of the risk evidence synthesis. :param approvalDate: The date on which the resource content was approved by the publisher. Approval happens once when the content is officially approved for usage. :param lastReviewDate: The date on which the resource content was last reviewed. Review happens periodically after approval but does not change the original approval date. :param effectivePeriod: The period during which the risk evidence synthesis content was or is planned to be in active use. :param topic: Descriptive topics related to the content of the RiskEvidenceSynthesis. Topics provide a high-level categorization grouping types of EffectEvidenceSynthesiss that can be useful for filtering and searching. :param author: An individiual or organization primarily involved in the creation and maintenance of the content. :param editor: An individual or organization primarily responsible for internal coherence of the content. :param reviewer: An individual or organization primarily responsible for review of some aspect of the content. :param endorser: An individual or organization responsible for officially endorsing the content for use in some setting. :param relatedArtifact: Related artifacts such as additional documentation, justification, or bibliographic references. :param synthesisType: Type of synthesis eg meta-analysis. :param studyType: Type of study eg randomized trial. :param population: A reference to a EvidenceVariable resource that defines the population for the research. :param exposure: A reference to a EvidenceVariable resource that defines the exposure for the research. :param outcome: A reference to a EvidenceVariable resomece that defines the outcome for the research. :param sampleSize: A description of the size of the sample involved in the synthesis. :param riskEstimate: The estimated risk of the outcome. :param certainty: A description of the certainty of the risk estimate.<|endoftext|>
93aa22867e4422b26821cbbf9a334815f6366ff45424c6a3aab9c132ab4042d9
def key_from_ireq(ireq: InstallRequirement) -> str: 'Get a standardized key for an InstallRequirement.' if ((ireq.req is None) and (ireq.link is not None)): return str(ireq.link) else: return key_from_req(ireq.req)
Get a standardized key for an InstallRequirement.
piptools/utils.py
key_from_ireq
ssiano/pip-tools
2
python
def key_from_ireq(ireq: InstallRequirement) -> str: if ((ireq.req is None) and (ireq.link is not None)): return str(ireq.link) else: return key_from_req(ireq.req)
def key_from_ireq(ireq: InstallRequirement) -> str: if ((ireq.req is None) and (ireq.link is not None)): return str(ireq.link) else: return key_from_req(ireq.req)<|docstring|>Get a standardized key for an InstallRequirement.<|endoftext|>
1c99efb7a56979e9ff6de0741d131a2b190c221949019b40103eb39bb59796bc
def key_from_req(req: InstallRequirement) -> str: "Get an all-lowercase version of the requirement's name." if hasattr(req, 'key'): key = req.key else: key = req.name assert isinstance(key, str) key = key.replace('_', '-').lower() return key
Get an all-lowercase version of the requirement's name.
piptools/utils.py
key_from_req
ssiano/pip-tools
2
python
def key_from_req(req: InstallRequirement) -> str: if hasattr(req, 'key'): key = req.key else: key = req.name assert isinstance(key, str) key = key.replace('_', '-').lower() return key
def key_from_req(req: InstallRequirement) -> str: if hasattr(req, 'key'): key = req.key else: key = req.name assert isinstance(key, str) key = key.replace('_', '-').lower() return key<|docstring|>Get an all-lowercase version of the requirement's name.<|endoftext|>
a97405221d3298816ece024f9f8d2719ce882bd8419ae3e7dab476afc32326f4
def is_url_requirement(ireq: InstallRequirement) -> bool: '\n Return True if requirement was specified as a path or URL.\n ireq.original_link will have been set by InstallRequirement.__init__\n ' return bool(ireq.original_link)
Return True if requirement was specified as a path or URL. ireq.original_link will have been set by InstallRequirement.__init__
piptools/utils.py
is_url_requirement
ssiano/pip-tools
2
python
def is_url_requirement(ireq: InstallRequirement) -> bool: '\n Return True if requirement was specified as a path or URL.\n ireq.original_link will have been set by InstallRequirement.__init__\n ' return bool(ireq.original_link)
def is_url_requirement(ireq: InstallRequirement) -> bool: '\n Return True if requirement was specified as a path or URL.\n ireq.original_link will have been set by InstallRequirement.__init__\n ' return bool(ireq.original_link)<|docstring|>Return True if requirement was specified as a path or URL. ireq.original_link will have been set by InstallRequirement.__init__<|endoftext|>
90e1300d9ba7ef928759b6a6cf298446f8d36b0c3fa344eae23f1705f5d639cc
def format_requirement(ireq: InstallRequirement, marker: Optional[Marker]=None, hashes: Optional[Set[str]]=None) -> str: '\n Generic formatter for pretty printing InstallRequirements to the terminal\n in a less verbose way than using its `__str__` method.\n ' if ireq.editable: line = f'-e {ireq.link.url}' elif is_url_requirement(ireq): line = ireq.link.url else: line = str(ireq.req).lower() if marker: line = f'{line} ; {marker}' if hashes: for hash_ in sorted(hashes): line += f''' \ --hash={hash_}''' return line
Generic formatter for pretty printing InstallRequirements to the terminal in a less verbose way than using its `__str__` method.
piptools/utils.py
format_requirement
ssiano/pip-tools
2
python
def format_requirement(ireq: InstallRequirement, marker: Optional[Marker]=None, hashes: Optional[Set[str]]=None) -> str: '\n Generic formatter for pretty printing InstallRequirements to the terminal\n in a less verbose way than using its `__str__` method.\n ' if ireq.editable: line = f'-e {ireq.link.url}' elif is_url_requirement(ireq): line = ireq.link.url else: line = str(ireq.req).lower() if marker: line = f'{line} ; {marker}' if hashes: for hash_ in sorted(hashes): line += f' \ --hash={hash_}' return line
def format_requirement(ireq: InstallRequirement, marker: Optional[Marker]=None, hashes: Optional[Set[str]]=None) -> str: '\n Generic formatter for pretty printing InstallRequirements to the terminal\n in a less verbose way than using its `__str__` method.\n ' if ireq.editable: line = f'-e {ireq.link.url}' elif is_url_requirement(ireq): line = ireq.link.url else: line = str(ireq.req).lower() if marker: line = f'{line} ; {marker}' if hashes: for hash_ in sorted(hashes): line += f' \ --hash={hash_}' return line<|docstring|>Generic formatter for pretty printing InstallRequirements to the terminal in a less verbose way than using its `__str__` method.<|endoftext|>
3081c1402b147cd0f26a025073914281fd4d53d8a49473c93af5783dce0402dd
def format_specifier(ireq: InstallRequirement) -> str: '\n Generic formatter for pretty printing the specifier part of\n InstallRequirements to the terminal.\n ' specs = (ireq.specifier if (ireq.req is not None) else SpecifierSet()) specs = sorted(specs, key=(lambda x: x.version)) return (','.join((str(s) for s in specs)) or '<any>')
Generic formatter for pretty printing the specifier part of InstallRequirements to the terminal.
piptools/utils.py
format_specifier
ssiano/pip-tools
2
python
def format_specifier(ireq: InstallRequirement) -> str: '\n Generic formatter for pretty printing the specifier part of\n InstallRequirements to the terminal.\n ' specs = (ireq.specifier if (ireq.req is not None) else SpecifierSet()) specs = sorted(specs, key=(lambda x: x.version)) return (','.join((str(s) for s in specs)) or '<any>')
def format_specifier(ireq: InstallRequirement) -> str: '\n Generic formatter for pretty printing the specifier part of\n InstallRequirements to the terminal.\n ' specs = (ireq.specifier if (ireq.req is not None) else SpecifierSet()) specs = sorted(specs, key=(lambda x: x.version)) return (','.join((str(s) for s in specs)) or '<any>')<|docstring|>Generic formatter for pretty printing the specifier part of InstallRequirements to the terminal.<|endoftext|>
d1d31a9d5ea879150e8f17aadfacde138fdbfe9afa175c8995ce299975b80141
def is_pinned_requirement(ireq: InstallRequirement) -> bool: '\n Returns whether an InstallRequirement is a "pinned" requirement.\n\n An InstallRequirement is considered pinned if:\n\n - Is not editable\n - It has exactly one specifier\n - That specifier is "=="\n - The version does not contain a wildcard\n\n Examples:\n django==1.8 # pinned\n django>1.8 # NOT pinned\n django~=1.8 # NOT pinned\n django==1.* # NOT pinned\n ' if ireq.editable: return False if ((ireq.req is None) or (len(ireq.specifier) != 1)): return False spec = next(iter(ireq.specifier)) return ((spec.operator in {'==', '==='}) and (not spec.version.endswith('.*')))
Returns whether an InstallRequirement is a "pinned" requirement. An InstallRequirement is considered pinned if: - Is not editable - It has exactly one specifier - That specifier is "==" - The version does not contain a wildcard Examples: django==1.8 # pinned django>1.8 # NOT pinned django~=1.8 # NOT pinned django==1.* # NOT pinned
piptools/utils.py
is_pinned_requirement
ssiano/pip-tools
2
python
def is_pinned_requirement(ireq: InstallRequirement) -> bool: '\n Returns whether an InstallRequirement is a "pinned" requirement.\n\n An InstallRequirement is considered pinned if:\n\n - Is not editable\n - It has exactly one specifier\n - That specifier is "=="\n - The version does not contain a wildcard\n\n Examples:\n django==1.8 # pinned\n django>1.8 # NOT pinned\n django~=1.8 # NOT pinned\n django==1.* # NOT pinned\n ' if ireq.editable: return False if ((ireq.req is None) or (len(ireq.specifier) != 1)): return False spec = next(iter(ireq.specifier)) return ((spec.operator in {'==', '==='}) and (not spec.version.endswith('.*')))
def is_pinned_requirement(ireq: InstallRequirement) -> bool: '\n Returns whether an InstallRequirement is a "pinned" requirement.\n\n An InstallRequirement is considered pinned if:\n\n - Is not editable\n - It has exactly one specifier\n - That specifier is "=="\n - The version does not contain a wildcard\n\n Examples:\n django==1.8 # pinned\n django>1.8 # NOT pinned\n django~=1.8 # NOT pinned\n django==1.* # NOT pinned\n ' if ireq.editable: return False if ((ireq.req is None) or (len(ireq.specifier) != 1)): return False spec = next(iter(ireq.specifier)) return ((spec.operator in {'==', '==='}) and (not spec.version.endswith('.*')))<|docstring|>Returns whether an InstallRequirement is a "pinned" requirement. An InstallRequirement is considered pinned if: - Is not editable - It has exactly one specifier - That specifier is "==" - The version does not contain a wildcard Examples: django==1.8 # pinned django>1.8 # NOT pinned django~=1.8 # NOT pinned django==1.* # NOT pinned<|endoftext|>
adb22cf621ec0d1f3024bd86b08154e8f6caf84defb6f2e58f7d0398fc5523fd
def as_tuple(ireq: InstallRequirement) -> Tuple[(str, str, Tuple[(str, ...)])]: '\n Pulls out the (name: str, version:str, extras:(str)) tuple from\n the pinned InstallRequirement.\n ' if (not is_pinned_requirement(ireq)): raise TypeError(f'Expected a pinned InstallRequirement, got {ireq}') name = key_from_ireq(ireq) version = next(iter(ireq.specifier)).version extras = tuple(sorted(ireq.extras)) return (name, version, extras)
Pulls out the (name: str, version:str, extras:(str)) tuple from the pinned InstallRequirement.
piptools/utils.py
as_tuple
ssiano/pip-tools
2
python
def as_tuple(ireq: InstallRequirement) -> Tuple[(str, str, Tuple[(str, ...)])]: '\n Pulls out the (name: str, version:str, extras:(str)) tuple from\n the pinned InstallRequirement.\n ' if (not is_pinned_requirement(ireq)): raise TypeError(f'Expected a pinned InstallRequirement, got {ireq}') name = key_from_ireq(ireq) version = next(iter(ireq.specifier)).version extras = tuple(sorted(ireq.extras)) return (name, version, extras)
def as_tuple(ireq: InstallRequirement) -> Tuple[(str, str, Tuple[(str, ...)])]: '\n Pulls out the (name: str, version:str, extras:(str)) tuple from\n the pinned InstallRequirement.\n ' if (not is_pinned_requirement(ireq)): raise TypeError(f'Expected a pinned InstallRequirement, got {ireq}') name = key_from_ireq(ireq) version = next(iter(ireq.specifier)).version extras = tuple(sorted(ireq.extras)) return (name, version, extras)<|docstring|>Pulls out the (name: str, version:str, extras:(str)) tuple from the pinned InstallRequirement.<|endoftext|>
7c396ab1ad8b640b66ae76ce7bd82ce8a948aaf8a768cedd5390acf34b62c7e4
def flat_map(fn: Callable[([_T], Iterable[_S])], collection: Iterable[_T]) -> Iterator[_S]: 'Map a function over a collection and flatten the result by one-level' return itertools.chain.from_iterable(map(fn, collection))
Map a function over a collection and flatten the result by one-level
piptools/utils.py
flat_map
ssiano/pip-tools
2
python
def flat_map(fn: Callable[([_T], Iterable[_S])], collection: Iterable[_T]) -> Iterator[_S]: return itertools.chain.from_iterable(map(fn, collection))
def flat_map(fn: Callable[([_T], Iterable[_S])], collection: Iterable[_T]) -> Iterator[_S]: return itertools.chain.from_iterable(map(fn, collection))<|docstring|>Map a function over a collection and flatten the result by one-level<|endoftext|>
ebb3df47efba3d47e0ec42559d7e4045a6a590e665fd7ab323c586d180384e2b
def lookup_table_from_tuples(values: Iterable[Tuple[(_KT, _VT)]]) -> Dict[(_KT, Set[_VT])]: '\n Builds a dict-based lookup table (index) elegantly.\n ' lut: Dict[(_KT, Set[_VT])] = collections.defaultdict(set) for (k, v) in values: lut[k].add(v) return dict(lut)
Builds a dict-based lookup table (index) elegantly.
piptools/utils.py
lookup_table_from_tuples
ssiano/pip-tools
2
python
def lookup_table_from_tuples(values: Iterable[Tuple[(_KT, _VT)]]) -> Dict[(_KT, Set[_VT])]: '\n \n ' lut: Dict[(_KT, Set[_VT])] = collections.defaultdict(set) for (k, v) in values: lut[k].add(v) return dict(lut)
def lookup_table_from_tuples(values: Iterable[Tuple[(_KT, _VT)]]) -> Dict[(_KT, Set[_VT])]: '\n \n ' lut: Dict[(_KT, Set[_VT])] = collections.defaultdict(set) for (k, v) in values: lut[k].add(v) return dict(lut)<|docstring|>Builds a dict-based lookup table (index) elegantly.<|endoftext|>
51d79129fed6b69c8b90bf2a9a4c3d0de5c032e7bd499434ab842761ea368a0b
def lookup_table(values: Iterable[_VT], key: Callable[([_VT], _KT)]) -> Dict[(_KT, Set[_VT])]: '\n Builds a dict-based lookup table (index) elegantly.\n ' return lookup_table_from_tuples(((key(v), v) for v in values))
Builds a dict-based lookup table (index) elegantly.
piptools/utils.py
lookup_table
ssiano/pip-tools
2
python
def lookup_table(values: Iterable[_VT], key: Callable[([_VT], _KT)]) -> Dict[(_KT, Set[_VT])]: '\n \n ' return lookup_table_from_tuples(((key(v), v) for v in values))
def lookup_table(values: Iterable[_VT], key: Callable[([_VT], _KT)]) -> Dict[(_KT, Set[_VT])]: '\n \n ' return lookup_table_from_tuples(((key(v), v) for v in values))<|docstring|>Builds a dict-based lookup table (index) elegantly.<|endoftext|>
eb90a70998efba68ab3467a36a0275abda25e3f4c5b69970f3794f86b349c751
def dedup(iterable: Iterable[_T]) -> Iterable[_T]: 'Deduplicate an iterable object like iter(set(iterable)) but\n order-preserved.\n ' return iter(dict.fromkeys(iterable))
Deduplicate an iterable object like iter(set(iterable)) but order-preserved.
piptools/utils.py
dedup
ssiano/pip-tools
2
python
def dedup(iterable: Iterable[_T]) -> Iterable[_T]: 'Deduplicate an iterable object like iter(set(iterable)) but\n order-preserved.\n ' return iter(dict.fromkeys(iterable))
def dedup(iterable: Iterable[_T]) -> Iterable[_T]: 'Deduplicate an iterable object like iter(set(iterable)) but\n order-preserved.\n ' return iter(dict.fromkeys(iterable))<|docstring|>Deduplicate an iterable object like iter(set(iterable)) but order-preserved.<|endoftext|>
93eb8f07c9fa97dac2b18a73de9d3cdb0bfbeb522797c617f5403a680df77e42
def drop_extras(ireq: InstallRequirement) -> None: 'Remove "extra" markers (PEP-508) from requirement.' if (ireq.markers is None): return ireq.markers._markers = _drop_extras(ireq.markers._markers) if (not ireq.markers._markers): ireq.markers = None
Remove "extra" markers (PEP-508) from requirement.
piptools/utils.py
drop_extras
ssiano/pip-tools
2
python
def drop_extras(ireq: InstallRequirement) -> None: if (ireq.markers is None): return ireq.markers._markers = _drop_extras(ireq.markers._markers) if (not ireq.markers._markers): ireq.markers = None
def drop_extras(ireq: InstallRequirement) -> None: if (ireq.markers is None): return ireq.markers._markers = _drop_extras(ireq.markers._markers) if (not ireq.markers._markers): ireq.markers = None<|docstring|>Remove "extra" markers (PEP-508) from requirement.<|endoftext|>
9333f707ad6b3b49bdeb91badac6cfa2103d486fe2a5d6fc990489a175cd4ddd
def get_hashes_from_ireq(ireq: InstallRequirement) -> Set[str]: '\n Given an InstallRequirement, return a set of string hashes in the format\n "{algorithm}:{hash}". Return an empty set if there are no hashes in the\n requirement options.\n ' result = set() for (algorithm, hexdigests) in ireq.hash_options.items(): for hash_ in hexdigests: result.add(f'{algorithm}:{hash_}') return result
Given an InstallRequirement, return a set of string hashes in the format "{algorithm}:{hash}". Return an empty set if there are no hashes in the requirement options.
piptools/utils.py
get_hashes_from_ireq
ssiano/pip-tools
2
python
def get_hashes_from_ireq(ireq: InstallRequirement) -> Set[str]: '\n Given an InstallRequirement, return a set of string hashes in the format\n "{algorithm}:{hash}". Return an empty set if there are no hashes in the\n requirement options.\n ' result = set() for (algorithm, hexdigests) in ireq.hash_options.items(): for hash_ in hexdigests: result.add(f'{algorithm}:{hash_}') return result
def get_hashes_from_ireq(ireq: InstallRequirement) -> Set[str]: '\n Given an InstallRequirement, return a set of string hashes in the format\n "{algorithm}:{hash}". Return an empty set if there are no hashes in the\n requirement options.\n ' result = set() for (algorithm, hexdigests) in ireq.hash_options.items(): for hash_ in hexdigests: result.add(f'{algorithm}:{hash_}') return result<|docstring|>Given an InstallRequirement, return a set of string hashes in the format "{algorithm}:{hash}". Return an empty set if there are no hashes in the requirement options.<|endoftext|>
bb931e9eb0dc729b267c7d5edd26e58e71cbb63e392013aa37fc5131a464ce16
def get_compile_command(click_ctx: click.Context) -> str: "\n Returns a normalized compile command depending on cli context.\n\n The command will be normalized by:\n - expanding options short to long\n - removing values that are already default\n - sorting the arguments\n - removing one-off arguments like '--upgrade'\n - removing arguments that don't change build behaviour like '--verbose'\n " from piptools.scripts.compile import cli compile_options = {option.name: option for option in cli.params} left_args = [] right_args = [] for (option_name, value) in click_ctx.params.items(): option = compile_options[option_name] if (option.nargs < 0): if any(((val.startswith('-') and (val != '-')) for val in value)): right_args.append('--') right_args.extend([shlex.quote(val) for val in value]) continue assert isinstance(option, click.Option) option_long_name = option.opts[(- 1)] if (option_long_name in COMPILE_EXCLUDE_OPTIONS): continue if ((option.default is None) and (not value)): continue if (option.default == value): continue if isinstance(value, LazyFile): value = value.name if (not isinstance(value, (tuple, list))): value = [value] for val in value: if option.is_flag: if option.secondary_opts: secondary_option_long_name = option.secondary_opts[(- 1)] arg = (option_long_name if val else secondary_option_long_name) else: arg = option_long_name left_args.append(shlex.quote(arg)) else: if (isinstance(val, str) and is_url(val)): val = redact_auth_from_url(val) if (option.name == 'pip_args_str'): left_args.append(f'{option_long_name}={repr(val)}') else: left_args.append(f'{option_long_name}={shlex.quote(str(val))}') return ' '.join(['pip-compile', *sorted(left_args), *sorted(right_args)])
Returns a normalized compile command depending on cli context. The command will be normalized by: - expanding options short to long - removing values that are already default - sorting the arguments - removing one-off arguments like '--upgrade' - removing arguments that don't change build behaviour like '--verbose'
piptools/utils.py
get_compile_command
ssiano/pip-tools
2
python
def get_compile_command(click_ctx: click.Context) -> str: "\n Returns a normalized compile command depending on cli context.\n\n The command will be normalized by:\n - expanding options short to long\n - removing values that are already default\n - sorting the arguments\n - removing one-off arguments like '--upgrade'\n - removing arguments that don't change build behaviour like '--verbose'\n " from piptools.scripts.compile import cli compile_options = {option.name: option for option in cli.params} left_args = [] right_args = [] for (option_name, value) in click_ctx.params.items(): option = compile_options[option_name] if (option.nargs < 0): if any(((val.startswith('-') and (val != '-')) for val in value)): right_args.append('--') right_args.extend([shlex.quote(val) for val in value]) continue assert isinstance(option, click.Option) option_long_name = option.opts[(- 1)] if (option_long_name in COMPILE_EXCLUDE_OPTIONS): continue if ((option.default is None) and (not value)): continue if (option.default == value): continue if isinstance(value, LazyFile): value = value.name if (not isinstance(value, (tuple, list))): value = [value] for val in value: if option.is_flag: if option.secondary_opts: secondary_option_long_name = option.secondary_opts[(- 1)] arg = (option_long_name if val else secondary_option_long_name) else: arg = option_long_name left_args.append(shlex.quote(arg)) else: if (isinstance(val, str) and is_url(val)): val = redact_auth_from_url(val) if (option.name == 'pip_args_str'): left_args.append(f'{option_long_name}={repr(val)}') else: left_args.append(f'{option_long_name}={shlex.quote(str(val))}') return ' '.join(['pip-compile', *sorted(left_args), *sorted(right_args)])
def get_compile_command(click_ctx: click.Context) -> str: "\n Returns a normalized compile command depending on cli context.\n\n The command will be normalized by:\n - expanding options short to long\n - removing values that are already default\n - sorting the arguments\n - removing one-off arguments like '--upgrade'\n - removing arguments that don't change build behaviour like '--verbose'\n " from piptools.scripts.compile import cli compile_options = {option.name: option for option in cli.params} left_args = [] right_args = [] for (option_name, value) in click_ctx.params.items(): option = compile_options[option_name] if (option.nargs < 0): if any(((val.startswith('-') and (val != '-')) for val in value)): right_args.append('--') right_args.extend([shlex.quote(val) for val in value]) continue assert isinstance(option, click.Option) option_long_name = option.opts[(- 1)] if (option_long_name in COMPILE_EXCLUDE_OPTIONS): continue if ((option.default is None) and (not value)): continue if (option.default == value): continue if isinstance(value, LazyFile): value = value.name if (not isinstance(value, (tuple, list))): value = [value] for val in value: if option.is_flag: if option.secondary_opts: secondary_option_long_name = option.secondary_opts[(- 1)] arg = (option_long_name if val else secondary_option_long_name) else: arg = option_long_name left_args.append(shlex.quote(arg)) else: if (isinstance(val, str) and is_url(val)): val = redact_auth_from_url(val) if (option.name == 'pip_args_str'): left_args.append(f'{option_long_name}={repr(val)}') else: left_args.append(f'{option_long_name}={shlex.quote(str(val))}') return ' '.join(['pip-compile', *sorted(left_args), *sorted(right_args)])<|docstring|>Returns a normalized compile command depending on cli context. The command will be normalized by: - expanding options short to long - removing values that are already default - sorting the arguments - removing one-off arguments like '--upgrade' - removing arguments that don't change build behaviour like '--verbose'<|endoftext|>
7d3cf7f7ba7f8778d823e6fcac2f6a18cd241cd616a9fd4481c32f8e25978419
def get_required_pip_specification() -> SpecifierSet: '\n Returns pip version specifier requested by current pip-tools installation.\n ' project_dist = get_distribution('pip-tools') requirement = next((r for r in project_dist.requires() if (r.name == 'pip')), None) assert (requirement is not None), "'pip' is expected to be in the list of pip-tools requirements" return requirement.specifier
Returns pip version specifier requested by current pip-tools installation.
piptools/utils.py
get_required_pip_specification
ssiano/pip-tools
2
python
def get_required_pip_specification() -> SpecifierSet: '\n \n ' project_dist = get_distribution('pip-tools') requirement = next((r for r in project_dist.requires() if (r.name == 'pip')), None) assert (requirement is not None), "'pip' is expected to be in the list of pip-tools requirements" return requirement.specifier
def get_required_pip_specification() -> SpecifierSet: '\n \n ' project_dist = get_distribution('pip-tools') requirement = next((r for r in project_dist.requires() if (r.name == 'pip')), None) assert (requirement is not None), "'pip' is expected to be in the list of pip-tools requirements" return requirement.specifier<|docstring|>Returns pip version specifier requested by current pip-tools installation.<|endoftext|>
e2ff1c72c89952e81b3a941bedd8886b3131a041d010795a11d4b913ee854014
def get_pip_version_for_python_executable(python_executable: str) -> Version: '\n Returns pip version for the given python executable.\n ' str_version = run_python_snippet(python_executable, 'import pip;print(pip.__version__)') return Version(str_version)
Returns pip version for the given python executable.
piptools/utils.py
get_pip_version_for_python_executable
ssiano/pip-tools
2
python
def get_pip_version_for_python_executable(python_executable: str) -> Version: '\n \n ' str_version = run_python_snippet(python_executable, 'import pip;print(pip.__version__)') return Version(str_version)
def get_pip_version_for_python_executable(python_executable: str) -> Version: '\n \n ' str_version = run_python_snippet(python_executable, 'import pip;print(pip.__version__)') return Version(str_version)<|docstring|>Returns pip version for the given python executable.<|endoftext|>
c6952a921947229508ed97fabab35845d1b74bb3cb9def8188d76d82870e0099
def get_sys_path_for_python_executable(python_executable: str) -> List[str]: '\n Returns sys.path list for the given python executable.\n ' result = run_python_snippet(python_executable, 'import sys;import json;print(json.dumps(sys.path))') paths = json.loads(result) assert isinstance(paths, list) assert all((isinstance(i, str) for i in paths)) return [os.path.abspath(path) for path in paths]
Returns sys.path list for the given python executable.
piptools/utils.py
get_sys_path_for_python_executable
ssiano/pip-tools
2
python
def get_sys_path_for_python_executable(python_executable: str) -> List[str]: '\n \n ' result = run_python_snippet(python_executable, 'import sys;import json;print(json.dumps(sys.path))') paths = json.loads(result) assert isinstance(paths, list) assert all((isinstance(i, str) for i in paths)) return [os.path.abspath(path) for path in paths]
def get_sys_path_for_python_executable(python_executable: str) -> List[str]: '\n \n ' result = run_python_snippet(python_executable, 'import sys;import json;print(json.dumps(sys.path))') paths = json.loads(result) assert isinstance(paths, list) assert all((isinstance(i, str) for i in paths)) return [os.path.abspath(path) for path in paths]<|docstring|>Returns sys.path list for the given python executable.<|endoftext|>
7729409e782a067b2dff2bfb338b9091bf86203ad8384008bb78145e56e04e3c
def update_metadata(manuscript: BnF) -> None: '\n Update /m-k-manuscript-data/metadata/entry_metadata.csv with the current manuscript. Create a Pandas DataFrame\n indexed by entry. Create data columns, and remove the column that contains the entry objects. Save File.\n\n Input:\n manuscript -- Python object of the manuscript defined in digital_manuscript.py\n Output:\n None\n ' df = pd.DataFrame(columns=['entry'], data=manuscript.entries.values()) df['folio'] = df.entry.apply((lambda x: x.folio)) df['folio_display'] = df.entry.apply((lambda x: x.folio.lstrip('0'))) df['div_id'] = df.entry.apply((lambda x: x.identity)) df['categories'] = df.entry.apply((lambda x: ';'.join(x.categories))) df['heading_tc'] = df.entry.apply((lambda x: x.title['tc'])) df['heading_tcn'] = df.entry.apply((lambda x: x.title['tcn'])) df['heading_tl'] = df.entry.apply((lambda x: x.title['tl'])) df['margins'] = df.entry.apply((lambda x: len(x.margins))) df['del_tags'] = df.entry.apply((lambda x: '; '.join(x.del_tags))) for prop in properties: df[prop] = df.entry.apply((lambda x: '; '.join(x.get_prop(prop=prop, version='tc')))) df.drop(columns=['entry'], inplace=True) df.to_csv(f'{m_path}/metadata/entry_metadata.csv', index=False)
Update /m-k-manuscript-data/metadata/entry_metadata.csv with the current manuscript. Create a Pandas DataFrame indexed by entry. Create data columns, and remove the column that contains the entry objects. Save File. Input: manuscript -- Python object of the manuscript defined in digital_manuscript.py Output: None
ronikaufman_sp21_semantic-visualizations/update.py
update_metadata
cu-mkp/sandbox-projects
0
python
def update_metadata(manuscript: BnF) -> None: '\n Update /m-k-manuscript-data/metadata/entry_metadata.csv with the current manuscript. Create a Pandas DataFrame\n indexed by entry. Create data columns, and remove the column that contains the entry objects. Save File.\n\n Input:\n manuscript -- Python object of the manuscript defined in digital_manuscript.py\n Output:\n None\n ' df = pd.DataFrame(columns=['entry'], data=manuscript.entries.values()) df['folio'] = df.entry.apply((lambda x: x.folio)) df['folio_display'] = df.entry.apply((lambda x: x.folio.lstrip('0'))) df['div_id'] = df.entry.apply((lambda x: x.identity)) df['categories'] = df.entry.apply((lambda x: ';'.join(x.categories))) df['heading_tc'] = df.entry.apply((lambda x: x.title['tc'])) df['heading_tcn'] = df.entry.apply((lambda x: x.title['tcn'])) df['heading_tl'] = df.entry.apply((lambda x: x.title['tl'])) df['margins'] = df.entry.apply((lambda x: len(x.margins))) df['del_tags'] = df.entry.apply((lambda x: '; '.join(x.del_tags))) for prop in properties: df[prop] = df.entry.apply((lambda x: '; '.join(x.get_prop(prop=prop, version='tc')))) df.drop(columns=['entry'], inplace=True) df.to_csv(f'{m_path}/metadata/entry_metadata.csv', index=False)
def update_metadata(manuscript: BnF) -> None: '\n Update /m-k-manuscript-data/metadata/entry_metadata.csv with the current manuscript. Create a Pandas DataFrame\n indexed by entry. Create data columns, and remove the column that contains the entry objects. Save File.\n\n Input:\n manuscript -- Python object of the manuscript defined in digital_manuscript.py\n Output:\n None\n ' df = pd.DataFrame(columns=['entry'], data=manuscript.entries.values()) df['folio'] = df.entry.apply((lambda x: x.folio)) df['folio_display'] = df.entry.apply((lambda x: x.folio.lstrip('0'))) df['div_id'] = df.entry.apply((lambda x: x.identity)) df['categories'] = df.entry.apply((lambda x: ';'.join(x.categories))) df['heading_tc'] = df.entry.apply((lambda x: x.title['tc'])) df['heading_tcn'] = df.entry.apply((lambda x: x.title['tcn'])) df['heading_tl'] = df.entry.apply((lambda x: x.title['tl'])) df['margins'] = df.entry.apply((lambda x: len(x.margins))) df['del_tags'] = df.entry.apply((lambda x: '; '.join(x.del_tags))) for prop in properties: df[prop] = df.entry.apply((lambda x: '; '.join(x.get_prop(prop=prop, version='tc')))) df.drop(columns=['entry'], inplace=True) df.to_csv(f'{m_path}/metadata/entry_metadata.csv', index=False)<|docstring|>Update /m-k-manuscript-data/metadata/entry_metadata.csv with the current manuscript. Create a Pandas DataFrame indexed by entry. Create data columns, and remove the column that contains the entry objects. Save File. Input: manuscript -- Python object of the manuscript defined in digital_manuscript.py Output: None<|endoftext|>
3bb2610ab679e9b49b7479da2610eed1cf8d83fa30352d3a44f4a8959c928ed7
def update_entries(manuscript: BnF) -> None: '\n Update /m-k-manuscript-data/entries/ with the current manuscript from /ms-xml/. For each version, delete all existing\n entries. Regenerate folio text entry by entry, and save the file.\n\n Input:\n manuscript -- Python object of the manuscript defined in digital_manuscript.py\n Output:\n None\n ' for path in [f'{m_path}/entries', f'{m_path}/entries/txt', f'{m_path}/entries/xml']: if (not os.path.exists(path)): os.mkdir(path) for version in versions: txt_path = f'{m_path}/entries/txt/{version}' xml_path = f'{m_path}/entries/xml/{version}' for path in [txt_path, xml_path]: if (not os.path.exists(path)): os.mkdir(path) for (identity, entry) in manuscript.entries.items(): if identity: filename_txt = f'{txt_path}/{version}_{entry.identity}.txt' filename_xml = f'{xml_path}/{version}_{entry.identity}.xml' content_txt = entry.text(version, xml=False) content_xml = entry.text(version, xml=True) f_txt = open(filename_txt, 'w') f_txt.write(content_txt) f_txt.close() f_xml = open(filename_xml, 'w') f_xml.write(content_xml) f_xml.close()
Update /m-k-manuscript-data/entries/ with the current manuscript from /ms-xml/. For each version, delete all existing entries. Regenerate folio text entry by entry, and save the file. Input: manuscript -- Python object of the manuscript defined in digital_manuscript.py Output: None
ronikaufman_sp21_semantic-visualizations/update.py
update_entries
cu-mkp/sandbox-projects
0
python
def update_entries(manuscript: BnF) -> None: '\n Update /m-k-manuscript-data/entries/ with the current manuscript from /ms-xml/. For each version, delete all existing\n entries. Regenerate folio text entry by entry, and save the file.\n\n Input:\n manuscript -- Python object of the manuscript defined in digital_manuscript.py\n Output:\n None\n ' for path in [f'{m_path}/entries', f'{m_path}/entries/txt', f'{m_path}/entries/xml']: if (not os.path.exists(path)): os.mkdir(path) for version in versions: txt_path = f'{m_path}/entries/txt/{version}' xml_path = f'{m_path}/entries/xml/{version}' for path in [txt_path, xml_path]: if (not os.path.exists(path)): os.mkdir(path) for (identity, entry) in manuscript.entries.items(): if identity: filename_txt = f'{txt_path}/{version}_{entry.identity}.txt' filename_xml = f'{xml_path}/{version}_{entry.identity}.xml' content_txt = entry.text(version, xml=False) content_xml = entry.text(version, xml=True) f_txt = open(filename_txt, 'w') f_txt.write(content_txt) f_txt.close() f_xml = open(filename_xml, 'w') f_xml.write(content_xml) f_xml.close()
def update_entries(manuscript: BnF) -> None: '\n Update /m-k-manuscript-data/entries/ with the current manuscript from /ms-xml/. For each version, delete all existing\n entries. Regenerate folio text entry by entry, and save the file.\n\n Input:\n manuscript -- Python object of the manuscript defined in digital_manuscript.py\n Output:\n None\n ' for path in [f'{m_path}/entries', f'{m_path}/entries/txt', f'{m_path}/entries/xml']: if (not os.path.exists(path)): os.mkdir(path) for version in versions: txt_path = f'{m_path}/entries/txt/{version}' xml_path = f'{m_path}/entries/xml/{version}' for path in [txt_path, xml_path]: if (not os.path.exists(path)): os.mkdir(path) for (identity, entry) in manuscript.entries.items(): if identity: filename_txt = f'{txt_path}/{version}_{entry.identity}.txt' filename_xml = f'{xml_path}/{version}_{entry.identity}.xml' content_txt = entry.text(version, xml=False) content_xml = entry.text(version, xml=True) f_txt = open(filename_txt, 'w') f_txt.write(content_txt) f_txt.close() f_xml = open(filename_xml, 'w') f_xml.write(content_xml) f_xml.close()<|docstring|>Update /m-k-manuscript-data/entries/ with the current manuscript from /ms-xml/. For each version, delete all existing entries. Regenerate folio text entry by entry, and save the file. Input: manuscript -- Python object of the manuscript defined in digital_manuscript.py Output: None<|endoftext|>
d63517e6089f147a7a846881c31bc0c68b07fd13977bc152a2c9ea97dd2aefd8
def update_all_folios(manuscript: BnF) -> None: '\n Update /m-k-manuscript-data/allFolios/ with the current manuscript from /ms-xml/.\n\n Input:\n manuscript -- Python object of the manuscript defined in digital_manuscript.py\n Output:\n None\n ' for b in [True, False]: for version in versions: text = '' folder = ('xml' if b else 'txt') for (identity, entry) in manuscript.entries.items(): new_text = entry.text(version, xml=b) text = (f'''{text} {new_text}''' if text else new_text) f = open(f'{m_path}/allFolios/{folder}/all_{version}.{folder}', 'w') f.write(text) f.close()
Update /m-k-manuscript-data/allFolios/ with the current manuscript from /ms-xml/. Input: manuscript -- Python object of the manuscript defined in digital_manuscript.py Output: None
ronikaufman_sp21_semantic-visualizations/update.py
update_all_folios
cu-mkp/sandbox-projects
0
python
def update_all_folios(manuscript: BnF) -> None: '\n Update /m-k-manuscript-data/allFolios/ with the current manuscript from /ms-xml/.\n\n Input:\n manuscript -- Python object of the manuscript defined in digital_manuscript.py\n Output:\n None\n ' for b in [True, False]: for version in versions: text = folder = ('xml' if b else 'txt') for (identity, entry) in manuscript.entries.items(): new_text = entry.text(version, xml=b) text = (f'{text} {new_text}' if text else new_text) f = open(f'{m_path}/allFolios/{folder}/all_{version}.{folder}', 'w') f.write(text) f.close()
def update_all_folios(manuscript: BnF) -> None: '\n Update /m-k-manuscript-data/allFolios/ with the current manuscript from /ms-xml/.\n\n Input:\n manuscript -- Python object of the manuscript defined in digital_manuscript.py\n Output:\n None\n ' for b in [True, False]: for version in versions: text = folder = ('xml' if b else 'txt') for (identity, entry) in manuscript.entries.items(): new_text = entry.text(version, xml=b) text = (f'{text} {new_text}' if text else new_text) f = open(f'{m_path}/allFolios/{folder}/all_{version}.{folder}', 'w') f.write(text) f.close()<|docstring|>Update /m-k-manuscript-data/allFolios/ with the current manuscript from /ms-xml/. Input: manuscript -- Python object of the manuscript defined in digital_manuscript.py Output: None<|endoftext|>
b9fb38fd24ca2e4955fa8ec811f1a4ab43991d058b817de663677901a5c328e6
def update_time(): ' Extract timestamp at the top of this file and update it. ' now_str = str(datetime.now()).split(' ')[0] lines = [] with open('./update.py', 'r') as f: lines = f.read().split('\n') lines[0] = f'# Last Updated | {now_str}' f = open('./update.py', 'w') f.write('\n'.join(lines)) f.close
Extract timestamp at the top of this file and update it.
ronikaufman_sp21_semantic-visualizations/update.py
update_time
cu-mkp/sandbox-projects
0
python
def update_time(): ' ' now_str = str(datetime.now()).split(' ')[0] lines = [] with open('./update.py', 'r') as f: lines = f.read().split('\n') lines[0] = f'# Last Updated | {now_str}' f = open('./update.py', 'w') f.write('\n'.join(lines)) f.close
def update_time(): ' ' now_str = str(datetime.now()).split(' ')[0] lines = [] with open('./update.py', 'r') as f: lines = f.read().split('\n') lines[0] = f'# Last Updated | {now_str}' f = open('./update.py', 'w') f.write('\n'.join(lines)) f.close<|docstring|>Extract timestamp at the top of this file and update it.<|endoftext|>
f3565e334dca84ecd02cbf305aca284b4aa7d2502129706d70cc28a0f7284878
def main(): "\n testing to see how the assignment's datafile is organized:\n zip,eiaid,utility_name,state,service_type,ownership,comm_rate,ind_rate,res_rate\n zip = [0]\n name = [2]\n state = [3]\n comm_rate = [6]\n " cumulative_rate_sum = 0 num_of_rates = 0 file_to_use = '/home/cs241/assign02/rates.csv' with open(file_to_use) as open_file: first_line = open_file.readline().strip() next(open_file) for line in open_file: line = open_file.readline().strip() data = line.split(',') rate = float(data[6]) ult_name = data[2] num_of_rates += 1 cumulative_rate_sum += rate print(ult_name) print((cumulative_rate_sum / num_of_rates)) print(first_line)
testing to see how the assignment's datafile is organized: zip,eiaid,utility_name,state,service_type,ownership,comm_rate,ind_rate,res_rate zip = [0] name = [2] state = [3] comm_rate = [6]
test02.py
main
Millwr1ght/cs-241
0
python
def main(): "\n testing to see how the assignment's datafile is organized:\n zip,eiaid,utility_name,state,service_type,ownership,comm_rate,ind_rate,res_rate\n zip = [0]\n name = [2]\n state = [3]\n comm_rate = [6]\n " cumulative_rate_sum = 0 num_of_rates = 0 file_to_use = '/home/cs241/assign02/rates.csv' with open(file_to_use) as open_file: first_line = open_file.readline().strip() next(open_file) for line in open_file: line = open_file.readline().strip() data = line.split(',') rate = float(data[6]) ult_name = data[2] num_of_rates += 1 cumulative_rate_sum += rate print(ult_name) print((cumulative_rate_sum / num_of_rates)) print(first_line)
def main(): "\n testing to see how the assignment's datafile is organized:\n zip,eiaid,utility_name,state,service_type,ownership,comm_rate,ind_rate,res_rate\n zip = [0]\n name = [2]\n state = [3]\n comm_rate = [6]\n " cumulative_rate_sum = 0 num_of_rates = 0 file_to_use = '/home/cs241/assign02/rates.csv' with open(file_to_use) as open_file: first_line = open_file.readline().strip() next(open_file) for line in open_file: line = open_file.readline().strip() data = line.split(',') rate = float(data[6]) ult_name = data[2] num_of_rates += 1 cumulative_rate_sum += rate print(ult_name) print((cumulative_rate_sum / num_of_rates)) print(first_line)<|docstring|>testing to see how the assignment's datafile is organized: zip,eiaid,utility_name,state,service_type,ownership,comm_rate,ind_rate,res_rate zip = [0] name = [2] state = [3] comm_rate = [6]<|endoftext|>
3941e7ac80fcea384bea4b1e551fedde66662e251de6c5e1d71f9fd4ebba089d
def parse_headers_and_body_with_django(headers, body): 'Parse `headers` and `body` with Django\'s :class:`MultiPartParser`.\n\n `MultiPartParser` is a curiously ugly and RFC non-compliant concoction.\n\n Amongst other things, it coerces all field names, field data, and\n filenames into Unicode strings using the "replace" error strategy, so be\n warned that your data may be silently mangled.\n\n It also, in 1.3.1 at least, does not recognise any transfer encodings at\n *all* because its header parsing code was broken.\n\n I\'m also fairly sure that it\'ll fall over on headers than span more than\n one line.\n\n In short, it\'s a piece of code that inspires little confidence, yet we\n must work with it, hence we need to round-trip test multipart handling\n with it.\n ' handler = MemoryFileUploadHandler() meta = {'HTTP_CONTENT_TYPE': headers['Content-Type'], 'HTTP_CONTENT_LENGTH': headers['Content-Length']} parser = MultiPartParser(META=meta, input_data=BytesIO(body), upload_handlers=[handler]) return parser.parse()
Parse `headers` and `body` with Django's :class:`MultiPartParser`. `MultiPartParser` is a curiously ugly and RFC non-compliant concoction. Amongst other things, it coerces all field names, field data, and filenames into Unicode strings using the "replace" error strategy, so be warned that your data may be silently mangled. It also, in 1.3.1 at least, does not recognise any transfer encodings at *all* because its header parsing code was broken. I'm also fairly sure that it'll fall over on headers than span more than one line. In short, it's a piece of code that inspires little confidence, yet we must work with it, hence we need to round-trip test multipart handling with it.
_modules/testing/django.py
parse_headers_and_body_with_django
Perceptyx/salt-formula-maas
6
python
def parse_headers_and_body_with_django(headers, body): 'Parse `headers` and `body` with Django\'s :class:`MultiPartParser`.\n\n `MultiPartParser` is a curiously ugly and RFC non-compliant concoction.\n\n Amongst other things, it coerces all field names, field data, and\n filenames into Unicode strings using the "replace" error strategy, so be\n warned that your data may be silently mangled.\n\n It also, in 1.3.1 at least, does not recognise any transfer encodings at\n *all* because its header parsing code was broken.\n\n I\'m also fairly sure that it\'ll fall over on headers than span more than\n one line.\n\n In short, it\'s a piece of code that inspires little confidence, yet we\n must work with it, hence we need to round-trip test multipart handling\n with it.\n ' handler = MemoryFileUploadHandler() meta = {'HTTP_CONTENT_TYPE': headers['Content-Type'], 'HTTP_CONTENT_LENGTH': headers['Content-Length']} parser = MultiPartParser(META=meta, input_data=BytesIO(body), upload_handlers=[handler]) return parser.parse()
def parse_headers_and_body_with_django(headers, body): 'Parse `headers` and `body` with Django\'s :class:`MultiPartParser`.\n\n `MultiPartParser` is a curiously ugly and RFC non-compliant concoction.\n\n Amongst other things, it coerces all field names, field data, and\n filenames into Unicode strings using the "replace" error strategy, so be\n warned that your data may be silently mangled.\n\n It also, in 1.3.1 at least, does not recognise any transfer encodings at\n *all* because its header parsing code was broken.\n\n I\'m also fairly sure that it\'ll fall over on headers than span more than\n one line.\n\n In short, it\'s a piece of code that inspires little confidence, yet we\n must work with it, hence we need to round-trip test multipart handling\n with it.\n ' handler = MemoryFileUploadHandler() meta = {'HTTP_CONTENT_TYPE': headers['Content-Type'], 'HTTP_CONTENT_LENGTH': headers['Content-Length']} parser = MultiPartParser(META=meta, input_data=BytesIO(body), upload_handlers=[handler]) return parser.parse()<|docstring|>Parse `headers` and `body` with Django's :class:`MultiPartParser`. `MultiPartParser` is a curiously ugly and RFC non-compliant concoction. Amongst other things, it coerces all field names, field data, and filenames into Unicode strings using the "replace" error strategy, so be warned that your data may be silently mangled. It also, in 1.3.1 at least, does not recognise any transfer encodings at *all* because its header parsing code was broken. I'm also fairly sure that it'll fall over on headers than span more than one line. In short, it's a piece of code that inspires little confidence, yet we must work with it, hence we need to round-trip test multipart handling with it.<|endoftext|>
e00f12e978af185d363abfecfe940b067a128d5728e1e341d9513a5e2f5f416a
def parse_headers_and_body_with_mimer(headers, body): "Use piston's Mimer functionality to handle the content.\n\n :return: The value of 'request.data' after using Piston's translate_mime on\n the input.\n " from piston import emitters ignore_unused(emitters) from piston.utils import translate_mime environ = {'wsgi.input': BytesIO(body)} for (name, value) in headers.items(): environ[name.upper().replace('-', '_')] = value environ['REQUEST_METHOD'] = 'POST' environ['SCRIPT_NAME'] = '' environ['PATH_INFO'] = '' os.environ['DJANGO_SETTINGS_MODULE'] = 'maas.development' from django.core.handlers.wsgi import WSGIRequest request = WSGIRequest(environ) translate_mime(request) return request.data
Use piston's Mimer functionality to handle the content. :return: The value of 'request.data' after using Piston's translate_mime on the input.
_modules/testing/django.py
parse_headers_and_body_with_mimer
Perceptyx/salt-formula-maas
6
python
def parse_headers_and_body_with_mimer(headers, body): "Use piston's Mimer functionality to handle the content.\n\n :return: The value of 'request.data' after using Piston's translate_mime on\n the input.\n " from piston import emitters ignore_unused(emitters) from piston.utils import translate_mime environ = {'wsgi.input': BytesIO(body)} for (name, value) in headers.items(): environ[name.upper().replace('-', '_')] = value environ['REQUEST_METHOD'] = 'POST' environ['SCRIPT_NAME'] = environ['PATH_INFO'] = os.environ['DJANGO_SETTINGS_MODULE'] = 'maas.development' from django.core.handlers.wsgi import WSGIRequest request = WSGIRequest(environ) translate_mime(request) return request.data
def parse_headers_and_body_with_mimer(headers, body): "Use piston's Mimer functionality to handle the content.\n\n :return: The value of 'request.data' after using Piston's translate_mime on\n the input.\n " from piston import emitters ignore_unused(emitters) from piston.utils import translate_mime environ = {'wsgi.input': BytesIO(body)} for (name, value) in headers.items(): environ[name.upper().replace('-', '_')] = value environ['REQUEST_METHOD'] = 'POST' environ['SCRIPT_NAME'] = environ['PATH_INFO'] = os.environ['DJANGO_SETTINGS_MODULE'] = 'maas.development' from django.core.handlers.wsgi import WSGIRequest request = WSGIRequest(environ) translate_mime(request) return request.data<|docstring|>Use piston's Mimer functionality to handle the content. :return: The value of 'request.data' after using Piston's translate_mime on the input.<|endoftext|>
8287130fc609ec6b9d543947c9e3b890765e558c0e2fcf63320b090e0b9815d9
def process(self, data): 'Plot one SHM data.' obs = json.loads(data)['result'] nrow = obs['nrow'] ncol = obs['ncols'] lat_0 = obs['lat_0'] lon_0 = obs['lon_0'] dl = obs['cellsize'] data = np.array(obs['data']) lat = (lat_0 + (np.arange(ncol) * dl)) lon = (lon_0 + (np.arange(nrow) * dl)) (x, y) = np.meshgrid(lat, lon) r_latlon = np.stack((x.flatten(), y.flatten())) slm_latlon = np.array(obs['input_positions']['slm_latlon']) source_latlon = np.array(obs['input_positions']['sources_latlon']) wall_latlon = np.array(obs['input_positions']['walls_latlon']).T shm_area = np.array(obs['input_positions']['shm_area']) map_pos = source_latlon[(:, 0)] output_file('gmap.html') map_options = GMapOptions(lat=map_pos[0], lng=map_pos[1], zoom=18, map_type='roadmap', tilt=0) gmap_api_key = None if (gmap_api_key is None): raise NotImplementedError('Add gmaps api key in source.') p = gmap(gmap_api_key, map_options, title='Tivoli', height=700, width=1200) allvalues = data[self.freqindx].flatten() values = allvalues[(allvalues != None)].astype(float) normed_values = ((values - np.min(values)) / (np.max(values) - np.min(values))) colors = np.empty(allvalues.shape, dtype=object) colors.fill('#000000') colors.fill(None) colors[(allvalues != None)] = [to_hex(c) for c in cm.viridis(normed_values)] p.circle_cross(r_latlon[(1, :)][(allvalues != None)], r_latlon[(0, :)][(allvalues != None)], fill_color=colors[(allvalues != None)], size=10) p.circle_cross(slm_latlon[(1, :)], slm_latlon[(0, :)], legend='SLM', size=15, fill_color='green') if (wall_latlon.size > 0): p.multi_line(list(wall_latlon[(:, :, 1)]), list(wall_latlon[(:, :, 0)]), legend='Walls') p.circle_cross(source_latlon[(1, :)], source_latlon[(0, :)], legend='sources', size=15) p.circle_cross(shm_area[(1, :)], shm_area[(0, :)], legend='SHM area', size=15, fill_color='yellow') p.circle_cross(lon_0, lat_0, legend='reference', size=15, fill_color='black') save(p) webbrowser.open('gmap.html') print('Plotted.')
Plot one SHM data.
tools/display_maps.py
process
MONICA-Project/sound-heat-map
1
python
def process(self, data): obs = json.loads(data)['result'] nrow = obs['nrow'] ncol = obs['ncols'] lat_0 = obs['lat_0'] lon_0 = obs['lon_0'] dl = obs['cellsize'] data = np.array(obs['data']) lat = (lat_0 + (np.arange(ncol) * dl)) lon = (lon_0 + (np.arange(nrow) * dl)) (x, y) = np.meshgrid(lat, lon) r_latlon = np.stack((x.flatten(), y.flatten())) slm_latlon = np.array(obs['input_positions']['slm_latlon']) source_latlon = np.array(obs['input_positions']['sources_latlon']) wall_latlon = np.array(obs['input_positions']['walls_latlon']).T shm_area = np.array(obs['input_positions']['shm_area']) map_pos = source_latlon[(:, 0)] output_file('gmap.html') map_options = GMapOptions(lat=map_pos[0], lng=map_pos[1], zoom=18, map_type='roadmap', tilt=0) gmap_api_key = None if (gmap_api_key is None): raise NotImplementedError('Add gmaps api key in source.') p = gmap(gmap_api_key, map_options, title='Tivoli', height=700, width=1200) allvalues = data[self.freqindx].flatten() values = allvalues[(allvalues != None)].astype(float) normed_values = ((values - np.min(values)) / (np.max(values) - np.min(values))) colors = np.empty(allvalues.shape, dtype=object) colors.fill('#000000') colors.fill(None) colors[(allvalues != None)] = [to_hex(c) for c in cm.viridis(normed_values)] p.circle_cross(r_latlon[(1, :)][(allvalues != None)], r_latlon[(0, :)][(allvalues != None)], fill_color=colors[(allvalues != None)], size=10) p.circle_cross(slm_latlon[(1, :)], slm_latlon[(0, :)], legend='SLM', size=15, fill_color='green') if (wall_latlon.size > 0): p.multi_line(list(wall_latlon[(:, :, 1)]), list(wall_latlon[(:, :, 0)]), legend='Walls') p.circle_cross(source_latlon[(1, :)], source_latlon[(0, :)], legend='sources', size=15) p.circle_cross(shm_area[(1, :)], shm_area[(0, :)], legend='SHM area', size=15, fill_color='yellow') p.circle_cross(lon_0, lat_0, legend='reference', size=15, fill_color='black') save(p) webbrowser.open('gmap.html') print('Plotted.')
def process(self, data): obs = json.loads(data)['result'] nrow = obs['nrow'] ncol = obs['ncols'] lat_0 = obs['lat_0'] lon_0 = obs['lon_0'] dl = obs['cellsize'] data = np.array(obs['data']) lat = (lat_0 + (np.arange(ncol) * dl)) lon = (lon_0 + (np.arange(nrow) * dl)) (x, y) = np.meshgrid(lat, lon) r_latlon = np.stack((x.flatten(), y.flatten())) slm_latlon = np.array(obs['input_positions']['slm_latlon']) source_latlon = np.array(obs['input_positions']['sources_latlon']) wall_latlon = np.array(obs['input_positions']['walls_latlon']).T shm_area = np.array(obs['input_positions']['shm_area']) map_pos = source_latlon[(:, 0)] output_file('gmap.html') map_options = GMapOptions(lat=map_pos[0], lng=map_pos[1], zoom=18, map_type='roadmap', tilt=0) gmap_api_key = None if (gmap_api_key is None): raise NotImplementedError('Add gmaps api key in source.') p = gmap(gmap_api_key, map_options, title='Tivoli', height=700, width=1200) allvalues = data[self.freqindx].flatten() values = allvalues[(allvalues != None)].astype(float) normed_values = ((values - np.min(values)) / (np.max(values) - np.min(values))) colors = np.empty(allvalues.shape, dtype=object) colors.fill('#000000') colors.fill(None) colors[(allvalues != None)] = [to_hex(c) for c in cm.viridis(normed_values)] p.circle_cross(r_latlon[(1, :)][(allvalues != None)], r_latlon[(0, :)][(allvalues != None)], fill_color=colors[(allvalues != None)], size=10) p.circle_cross(slm_latlon[(1, :)], slm_latlon[(0, :)], legend='SLM', size=15, fill_color='green') if (wall_latlon.size > 0): p.multi_line(list(wall_latlon[(:, :, 1)]), list(wall_latlon[(:, :, 0)]), legend='Walls') p.circle_cross(source_latlon[(1, :)], source_latlon[(0, :)], legend='sources', size=15) p.circle_cross(shm_area[(1, :)], shm_area[(0, :)], legend='SHM area', size=15, fill_color='yellow') p.circle_cross(lon_0, lat_0, legend='reference', size=15, fill_color='black') save(p) webbrowser.open('gmap.html') print('Plotted.')<|docstring|>Plot one SHM data.<|endoftext|>
9affd8894602bb65f425a2806ad51ace80e2090fe27d678f73f5b2ea757112c3
def get_files(start_path, a_type=None): '\n Retrieves a list of directory and sub-directory files containing duplicate\n files. This function simply checks the file name and does not check the file\n size to determine if a copied file is a true copy.\n ' if isinstance(a_type, list): if (len(a_type) > 1): a_type = (('\\.(' + '|'.join(a_type)) + ')$') else: a_type = f'\.{a_type[0]}$' else: a_type = '' reg_copies = re.compile('(Copy|Copy ?\\(\\d+\\)| \\(\\d+\\)){}'.format(a_type)) def _get_files(path: str): temp_list = [] try: files = os.scandir(path) for file in files: if file.is_dir(): temp_list.extend(_get_files(file.path)) elif reg_copies.search(file.name[(- 15):]): temp_list.append(file.path) except: pass return temp_list return _get_files(os.path.normpath(start_path))
Retrieves a list of directory and sub-directory files containing duplicate files. This function simply checks the file name and does not check the file size to determine if a copied file is a true copy.
deletion/functions.py
get_files
wrenzenzen/python-delete-folder-file-copies
0
python
def get_files(start_path, a_type=None): '\n Retrieves a list of directory and sub-directory files containing duplicate\n files. This function simply checks the file name and does not check the file\n size to determine if a copied file is a true copy.\n ' if isinstance(a_type, list): if (len(a_type) > 1): a_type = (('\\.(' + '|'.join(a_type)) + ')$') else: a_type = f'\.{a_type[0]}$' else: a_type = reg_copies = re.compile('(Copy|Copy ?\\(\\d+\\)| \\(\\d+\\)){}'.format(a_type)) def _get_files(path: str): temp_list = [] try: files = os.scandir(path) for file in files: if file.is_dir(): temp_list.extend(_get_files(file.path)) elif reg_copies.search(file.name[(- 15):]): temp_list.append(file.path) except: pass return temp_list return _get_files(os.path.normpath(start_path))
def get_files(start_path, a_type=None): '\n Retrieves a list of directory and sub-directory files containing duplicate\n files. This function simply checks the file name and does not check the file\n size to determine if a copied file is a true copy.\n ' if isinstance(a_type, list): if (len(a_type) > 1): a_type = (('\\.(' + '|'.join(a_type)) + ')$') else: a_type = f'\.{a_type[0]}$' else: a_type = reg_copies = re.compile('(Copy|Copy ?\\(\\d+\\)| \\(\\d+\\)){}'.format(a_type)) def _get_files(path: str): temp_list = [] try: files = os.scandir(path) for file in files: if file.is_dir(): temp_list.extend(_get_files(file.path)) elif reg_copies.search(file.name[(- 15):]): temp_list.append(file.path) except: pass return temp_list return _get_files(os.path.normpath(start_path))<|docstring|>Retrieves a list of directory and sub-directory files containing duplicate files. This function simply checks the file name and does not check the file size to determine if a copied file is a true copy.<|endoftext|>
378ced0fda82d43a6c003834391a6cb53871aebe4a95992f3190b010687bb710
def delete_files(removal_list) -> None: '\n Deletes a list of files with a valid path \n ' print(f'Deleting ({len(removal_list)}) files') for a_file in removal_list: try: os.remove(a_file) print(a_file, '....deleted') except: print(a_file, '....could not delete')
Deletes a list of files with a valid path
deletion/functions.py
delete_files
wrenzenzen/python-delete-folder-file-copies
0
python
def delete_files(removal_list) -> None: '\n \n ' print(f'Deleting ({len(removal_list)}) files') for a_file in removal_list: try: os.remove(a_file) print(a_file, '....deleted') except: print(a_file, '....could not delete')
def delete_files(removal_list) -> None: '\n \n ' print(f'Deleting ({len(removal_list)}) files') for a_file in removal_list: try: os.remove(a_file) print(a_file, '....deleted') except: print(a_file, '....could not delete')<|docstring|>Deletes a list of files with a valid path<|endoftext|>
e39a3f8fef0fe3e6aba08683b4a48eb7e73c022e6049f9f8486e4ef793af08a2
def test_standard_prior_generator(): 'Test standard prior generator.' from mmrotate.core.anchor import build_prior_generator anchor_generator_cfg = dict(type='RotatedAnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8]) anchor_generator = build_prior_generator(anchor_generator_cfg) assert (anchor_generator.num_base_priors == anchor_generator.num_base_anchors) assert (anchor_generator.num_base_priors == [3, 3]) assert (anchor_generator is not None)
Test standard prior generator.
tests/test_utils/test_ranchor.py
test_standard_prior_generator
liufeinuaa/mmrotate
449
python
def test_standard_prior_generator(): from mmrotate.core.anchor import build_prior_generator anchor_generator_cfg = dict(type='RotatedAnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8]) anchor_generator = build_prior_generator(anchor_generator_cfg) assert (anchor_generator.num_base_priors == anchor_generator.num_base_anchors) assert (anchor_generator.num_base_priors == [3, 3]) assert (anchor_generator is not None)
def test_standard_prior_generator(): from mmrotate.core.anchor import build_prior_generator anchor_generator_cfg = dict(type='RotatedAnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8]) anchor_generator = build_prior_generator(anchor_generator_cfg) assert (anchor_generator.num_base_priors == anchor_generator.num_base_anchors) assert (anchor_generator.num_base_priors == [3, 3]) assert (anchor_generator is not None)<|docstring|>Test standard prior generator.<|endoftext|>
95e447eccf2944bed9692cff22ee6ded19c592a7eab170b97bb7baa0f3a20c30
def test_strides(): 'Test strides.' from mmrotate.core import RotatedAnchorGenerator self = RotatedAnchorGenerator([10], [1.0], [1.0], [10]) anchors = self.grid_priors([(2, 2)], device='cpu') expected_anchors = torch.tensor([[0.0, 0.0, 10.0, 10.0, 0.0], [10.0, 0.0, 10.0, 10.0, 0.0], [0.0, 10.0, 10.0, 10.0, 0.0], [10.0, 10.0, 10.0, 10.0, 0.0]]) assert torch.equal(anchors[0], expected_anchors) self = RotatedAnchorGenerator([(10, 20)], [1.0], [1.0], [10]) anchors = self.grid_priors([(2, 2)], device='cpu') expected_anchors = torch.tensor([[0.0, 0.0, 10.0, 10.0, 0.0], [10.0, 0.0, 10.0, 10.0, 0.0], [0.0, 20.0, 10.0, 10.0, 0.0], [10.0, 20.0, 10.0, 10.0, 0.0]]) assert torch.equal(anchors[0], expected_anchors)
Test strides.
tests/test_utils/test_ranchor.py
test_strides
liufeinuaa/mmrotate
449
python
def test_strides(): from mmrotate.core import RotatedAnchorGenerator self = RotatedAnchorGenerator([10], [1.0], [1.0], [10]) anchors = self.grid_priors([(2, 2)], device='cpu') expected_anchors = torch.tensor([[0.0, 0.0, 10.0, 10.0, 0.0], [10.0, 0.0, 10.0, 10.0, 0.0], [0.0, 10.0, 10.0, 10.0, 0.0], [10.0, 10.0, 10.0, 10.0, 0.0]]) assert torch.equal(anchors[0], expected_anchors) self = RotatedAnchorGenerator([(10, 20)], [1.0], [1.0], [10]) anchors = self.grid_priors([(2, 2)], device='cpu') expected_anchors = torch.tensor([[0.0, 0.0, 10.0, 10.0, 0.0], [10.0, 0.0, 10.0, 10.0, 0.0], [0.0, 20.0, 10.0, 10.0, 0.0], [10.0, 20.0, 10.0, 10.0, 0.0]]) assert torch.equal(anchors[0], expected_anchors)
def test_strides(): from mmrotate.core import RotatedAnchorGenerator self = RotatedAnchorGenerator([10], [1.0], [1.0], [10]) anchors = self.grid_priors([(2, 2)], device='cpu') expected_anchors = torch.tensor([[0.0, 0.0, 10.0, 10.0, 0.0], [10.0, 0.0, 10.0, 10.0, 0.0], [0.0, 10.0, 10.0, 10.0, 0.0], [10.0, 10.0, 10.0, 10.0, 0.0]]) assert torch.equal(anchors[0], expected_anchors) self = RotatedAnchorGenerator([(10, 20)], [1.0], [1.0], [10]) anchors = self.grid_priors([(2, 2)], device='cpu') expected_anchors = torch.tensor([[0.0, 0.0, 10.0, 10.0, 0.0], [10.0, 0.0, 10.0, 10.0, 0.0], [0.0, 20.0, 10.0, 10.0, 0.0], [10.0, 20.0, 10.0, 10.0, 0.0]]) assert torch.equal(anchors[0], expected_anchors)<|docstring|>Test strides.<|endoftext|>
a0d60bb3c102f67aa36308e1a7d024b4935c4053ee00520bc0d6726c41cc00bd
def __init__(self, delay_mean=100, delay_std_dev=10, min_delay=1, max_delay=500, reliability=0.9): "\n Every message sent into the channel is sent individually to all the receiving processes.\n All units are in milliseconds\n :param delay_mean: mean delay for a message to reach from in end to out end\n :param delay_std_dev: variation in delay for a message to reach fro in end to out end\n :param min_delay: guarantee that the delay won't be less than this value\n :param max_delay: guarantee that the delay won't be more than this value\n :param reliability: The reliability with which a message is delivered. [0.0, 1.0]\n " self._in_end = None self._out_end = None self.delay_mean = delay_mean self.delay_std_dev = delay_std_dev self.started = asyncio.Queue() self.in_transit = set() self.reached = set() self.min_delay = min_delay self.max_delay = max_delay self.reliability = reliability self._back = None
Every message sent into the channel is sent individually to all the receiving processes. All units are in milliseconds :param delay_mean: mean delay for a message to reach from in end to out end :param delay_std_dev: variation in delay for a message to reach fro in end to out end :param min_delay: guarantee that the delay won't be less than this value :param max_delay: guarantee that the delay won't be more than this value :param reliability: The reliability with which a message is delivered. [0.0, 1.0]
distalg/channel.py
__init__
abinashmeher999/Distributed-Algorithms
4
python
def __init__(self, delay_mean=100, delay_std_dev=10, min_delay=1, max_delay=500, reliability=0.9): "\n Every message sent into the channel is sent individually to all the receiving processes.\n All units are in milliseconds\n :param delay_mean: mean delay for a message to reach from in end to out end\n :param delay_std_dev: variation in delay for a message to reach fro in end to out end\n :param min_delay: guarantee that the delay won't be less than this value\n :param max_delay: guarantee that the delay won't be more than this value\n :param reliability: The reliability with which a message is delivered. [0.0, 1.0]\n " self._in_end = None self._out_end = None self.delay_mean = delay_mean self.delay_std_dev = delay_std_dev self.started = asyncio.Queue() self.in_transit = set() self.reached = set() self.min_delay = min_delay self.max_delay = max_delay self.reliability = reliability self._back = None
def __init__(self, delay_mean=100, delay_std_dev=10, min_delay=1, max_delay=500, reliability=0.9): "\n Every message sent into the channel is sent individually to all the receiving processes.\n All units are in milliseconds\n :param delay_mean: mean delay for a message to reach from in end to out end\n :param delay_std_dev: variation in delay for a message to reach fro in end to out end\n :param min_delay: guarantee that the delay won't be less than this value\n :param max_delay: guarantee that the delay won't be more than this value\n :param reliability: The reliability with which a message is delivered. [0.0, 1.0]\n " self._in_end = None self._out_end = None self.delay_mean = delay_mean self.delay_std_dev = delay_std_dev self.started = asyncio.Queue() self.in_transit = set() self.reached = set() self.min_delay = min_delay self.max_delay = max_delay self.reliability = reliability self._back = None<|docstring|>Every message sent into the channel is sent individually to all the receiving processes. All units are in milliseconds :param delay_mean: mean delay for a message to reach from in end to out end :param delay_std_dev: variation in delay for a message to reach fro in end to out end :param min_delay: guarantee that the delay won't be less than this value :param max_delay: guarantee that the delay won't be more than this value :param reliability: The reliability with which a message is delivered. [0.0, 1.0]<|endoftext|>
ee76a7f4da05eca157a9e2dc81f4b5fa4d947e4c1bd178eed30325c76edcd2a3
async def __deliver(self, message): '\n :param message: The Message object to be delivered\n :return:\n ' sample = random.random() if (sample >= self.reliability): return self.in_transit.add(message) delay_time = random.gauss(self.delay_mean, self.delay_std_dev) clamped_delay_time = min(self.max_delay, max(self.min_delay, delay_time)) (await asyncio.sleep((clamped_delay_time / 1000))) self.in_transit.remove(message) (await self._out_end.incoming_msgs.put(message))
:param message: The Message object to be delivered :return:
distalg/channel.py
__deliver
abinashmeher999/Distributed-Algorithms
4
python
async def __deliver(self, message): '\n :param message: The Message object to be delivered\n :return:\n ' sample = random.random() if (sample >= self.reliability): return self.in_transit.add(message) delay_time = random.gauss(self.delay_mean, self.delay_std_dev) clamped_delay_time = min(self.max_delay, max(self.min_delay, delay_time)) (await asyncio.sleep((clamped_delay_time / 1000))) self.in_transit.remove(message) (await self._out_end.incoming_msgs.put(message))
async def __deliver(self, message): '\n :param message: The Message object to be delivered\n :return:\n ' sample = random.random() if (sample >= self.reliability): return self.in_transit.add(message) delay_time = random.gauss(self.delay_mean, self.delay_std_dev) clamped_delay_time = min(self.max_delay, max(self.min_delay, delay_time)) (await asyncio.sleep((clamped_delay_time / 1000))) self.in_transit.remove(message) (await self._out_end.incoming_msgs.put(message))<|docstring|>:param message: The Message object to be delivered :return:<|endoftext|>
d800211c9fbf15c2ab4454288e521ebc336a0af6fd11d9fde032700ca7173e10
def test_expert(self): '\n Test Expert difficulty (Tier 2)\n ' URL = 'http://127.0.0.1:8000/war/2' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 2) self.assertEqual(war_info['difficulty'], 'Expert') self.assertEqual(war_info['tier_multiplier'], '7.0') self.assertEqual(war_info['tier_rank'], '0.16%-0.50%') self.assertEqual(war_info['nodes']['16'], ['Vivified - I', 'Brute Force'])
Test Expert difficulty (Tier 2)
uma/api/tests/test_war.py
test_expert
Rexians/uma
3
python
def test_expert(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/2' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 2) self.assertEqual(war_info['difficulty'], 'Expert') self.assertEqual(war_info['tier_multiplier'], '7.0') self.assertEqual(war_info['tier_rank'], '0.16%-0.50%') self.assertEqual(war_info['nodes']['16'], ['Vivified - I', 'Brute Force'])
def test_expert(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/2' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 2) self.assertEqual(war_info['difficulty'], 'Expert') self.assertEqual(war_info['tier_multiplier'], '7.0') self.assertEqual(war_info['tier_rank'], '0.16%-0.50%') self.assertEqual(war_info['nodes']['16'], ['Vivified - I', 'Brute Force'])<|docstring|>Test Expert difficulty (Tier 2)<|endoftext|>
1a7b73ab4db8d1a5b3c283e4185f68865a3df8a4edd75becef9a8791f84e04d0
def test_challenger(self): '\n Test Challenger difficulty (Tier 4)\n ' URL = 'http://127.0.0.1:8000/war/4' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 4) self.assertEqual(war_info['difficulty'], 'Challenger') self.assertEqual(war_info['tier_multiplier'], '4.5') self.assertEqual(war_info['tier_rank'], '2%-3%') self.assertEqual(war_info['nodes']['3'], ['COMBAT DÉJÀ VU - PROWESS', 'Prowess Puncture - 2'])
Test Challenger difficulty (Tier 4)
uma/api/tests/test_war.py
test_challenger
Rexians/uma
3
python
def test_challenger(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/4' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 4) self.assertEqual(war_info['difficulty'], 'Challenger') self.assertEqual(war_info['tier_multiplier'], '4.5') self.assertEqual(war_info['tier_rank'], '2%-3%') self.assertEqual(war_info['nodes']['3'], ['COMBAT DÉJÀ VU - PROWESS', 'Prowess Puncture - 2'])
def test_challenger(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/4' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 4) self.assertEqual(war_info['difficulty'], 'Challenger') self.assertEqual(war_info['tier_multiplier'], '4.5') self.assertEqual(war_info['tier_rank'], '2%-3%') self.assertEqual(war_info['nodes']['3'], ['COMBAT DÉJÀ VU - PROWESS', 'Prowess Puncture - 2'])<|docstring|>Test Challenger difficulty (Tier 4)<|endoftext|>
3e3435d9b4ce87d6590cc003dbd1679f9f185911e88054eef00fda7b3deacf33
def test_hard(self): '\n Test Hard difficulty (Tier 2)\n ' URL = 'http://127.0.0.1:8000/war/8' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 8) self.assertEqual(war_info['difficulty'], 'Hard') self.assertEqual(war_info['tier_multiplier'], '3.0') self.assertEqual(war_info['tier_rank'], '10%-11%') self.assertEqual(war_info['nodes']['48'], ['Feat of Vigilance 1', 'Power Efficiency', 'Missing in Action 1'])
Test Hard difficulty (Tier 2)
uma/api/tests/test_war.py
test_hard
Rexians/uma
3
python
def test_hard(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/8' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 8) self.assertEqual(war_info['difficulty'], 'Hard') self.assertEqual(war_info['tier_multiplier'], '3.0') self.assertEqual(war_info['tier_rank'], '10%-11%') self.assertEqual(war_info['nodes']['48'], ['Feat of Vigilance 1', 'Power Efficiency', 'Missing in Action 1'])
def test_hard(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/8' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 8) self.assertEqual(war_info['difficulty'], 'Hard') self.assertEqual(war_info['tier_multiplier'], '3.0') self.assertEqual(war_info['tier_rank'], '10%-11%') self.assertEqual(war_info['nodes']['48'], ['Feat of Vigilance 1', 'Power Efficiency', 'Missing in Action 1'])<|docstring|>Test Hard difficulty (Tier 2)<|endoftext|>
53524ef7ae7351a976876f5ef10a07bc586034f0af2119e79c926b80cf7f2098
def test_intermediate(self): '\n Test Intermediate difficulty (Tier 10)\n ' URL = 'http://127.0.0.1:8000/war/10' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 10) self.assertEqual(war_info['difficulty'], 'Intermediate') self.assertEqual(war_info['tier_multiplier'], '2.4') self.assertEqual(war_info['tier_rank'], '14%-15%') self.assertEqual(war_info['nodes']['23'], ['Mix Master', 'Aggression: Prowess'])
Test Intermediate difficulty (Tier 10)
uma/api/tests/test_war.py
test_intermediate
Rexians/uma
3
python
def test_intermediate(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/10' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 10) self.assertEqual(war_info['difficulty'], 'Intermediate') self.assertEqual(war_info['tier_multiplier'], '2.4') self.assertEqual(war_info['tier_rank'], '14%-15%') self.assertEqual(war_info['nodes']['23'], ['Mix Master', 'Aggression: Prowess'])
def test_intermediate(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/10' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 10) self.assertEqual(war_info['difficulty'], 'Intermediate') self.assertEqual(war_info['tier_multiplier'], '2.4') self.assertEqual(war_info['tier_rank'], '14%-15%') self.assertEqual(war_info['nodes']['23'], ['Mix Master', 'Aggression: Prowess'])<|docstring|>Test Intermediate difficulty (Tier 10)<|endoftext|>
3f7da2cbb80747931bbffa362ce3d60d3c7a79b0b58ec39f9fb131b730334295
def test_normal(self): '\n Test Normal difficulty (Tier 15)\n ' URL = 'http://127.0.0.1:8000/war/15' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 15) self.assertEqual(war_info['difficulty'], 'Normal') self.assertEqual(war_info['tier_multiplier'], '1.8') self.assertEqual(war_info['tier_rank'], '36%-40%') self.assertEqual(war_info['nodes']['1'], ['Cornered'])
Test Normal difficulty (Tier 15)
uma/api/tests/test_war.py
test_normal
Rexians/uma
3
python
def test_normal(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/15' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 15) self.assertEqual(war_info['difficulty'], 'Normal') self.assertEqual(war_info['tier_multiplier'], '1.8') self.assertEqual(war_info['tier_rank'], '36%-40%') self.assertEqual(war_info['nodes']['1'], ['Cornered'])
def test_normal(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/15' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 15) self.assertEqual(war_info['difficulty'], 'Normal') self.assertEqual(war_info['tier_multiplier'], '1.8') self.assertEqual(war_info['tier_rank'], '36%-40%') self.assertEqual(war_info['nodes']['1'], ['Cornered'])<|docstring|>Test Normal difficulty (Tier 15)<|endoftext|>
d66d29735aeae889e400c74a1010f7bf2b8ce4890601de25fb25dfe8f97f5d9c
def test_easy(self): '\n Test Easy difficulty (Tier 21)\n ' URL = 'http://127.0.0.1:8000/war/21' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 21) self.assertEqual(war_info['difficulty'], 'Easy') self.assertEqual(war_info['tier_multiplier'], '1.1') self.assertEqual(war_info['tier_rank'], '81%-90%') self.assertEqual(war_info['nodes']['21'], ['Plagued Mind', 'Immunity (Bleed)'])
Test Easy difficulty (Tier 21)
uma/api/tests/test_war.py
test_easy
Rexians/uma
3
python
def test_easy(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/21' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 21) self.assertEqual(war_info['difficulty'], 'Easy') self.assertEqual(war_info['tier_multiplier'], '1.1') self.assertEqual(war_info['tier_rank'], '81%-90%') self.assertEqual(war_info['nodes']['21'], ['Plagued Mind', 'Immunity (Bleed)'])
def test_easy(self): '\n \n ' URL = 'http://127.0.0.1:8000/war/21' war_info = requests.get(URL).json() self.assertEqual(war_info['tier'], 21) self.assertEqual(war_info['difficulty'], 'Easy') self.assertEqual(war_info['tier_multiplier'], '1.1') self.assertEqual(war_info['tier_rank'], '81%-90%') self.assertEqual(war_info['nodes']['21'], ['Plagued Mind', 'Immunity (Bleed)'])<|docstring|>Test Easy difficulty (Tier 21)<|endoftext|>
dfca7081304d9f7ee121250001c72204d546e989d91bc28b7e7fd4b20a19adea
def break_words(stuff): 'This function will break up words for us.' words = stuff.split(' ') return words
This function will break up words for us.
Python/Learning/Language/test.py
break_words
prynix/learning-programming
2
python
def break_words(stuff): words = stuff.split(' ') return words
def break_words(stuff): words = stuff.split(' ') return words<|docstring|>This function will break up words for us.<|endoftext|>
a6e1ee4f7d1a2f1a6dd45991cc6a984c31d5a336ea5a10fcae432c4895c5e829
def sort_words(words): 'Sorts the words.' return sorted(words)
Sorts the words.
Python/Learning/Language/test.py
sort_words
prynix/learning-programming
2
python
def sort_words(words): return sorted(words)
def sort_words(words): return sorted(words)<|docstring|>Sorts the words.<|endoftext|>
c44259c17df95394c6b3624ec562efd8a7e3869d24066edcaa8ff91760722b3e
def print_first_word(words): 'Prints the first word after popping it off.' word = words.pop(0) print(word)
Prints the first word after popping it off.
Python/Learning/Language/test.py
print_first_word
prynix/learning-programming
2
python
def print_first_word(words): word = words.pop(0) print(word)
def print_first_word(words): word = words.pop(0) print(word)<|docstring|>Prints the first word after popping it off.<|endoftext|>
56a89d52660c37cf5ce3a6b9e312eabb678a633846287bed0c22eda97a363bbb
def print_last_word(words): 'Prints the last word after popping it off.' word = words.pop((- 1)) print(word)
Prints the last word after popping it off.
Python/Learning/Language/test.py
print_last_word
prynix/learning-programming
2
python
def print_last_word(words): word = words.pop((- 1)) print(word)
def print_last_word(words): word = words.pop((- 1)) print(word)<|docstring|>Prints the last word after popping it off.<|endoftext|>
bcad118c4dbc01cee11c53f9fc6b12d2f5d8e9c9415041869f7d759679bc5231
def sort_sentence(sentence): 'Takes in a full sentence and returns the sorted words.' words = break_words(sentence) return sort_words(words)
Takes in a full sentence and returns the sorted words.
Python/Learning/Language/test.py
sort_sentence
prynix/learning-programming
2
python
def sort_sentence(sentence): words = break_words(sentence) return sort_words(words)
def sort_sentence(sentence): words = break_words(sentence) return sort_words(words)<|docstring|>Takes in a full sentence and returns the sorted words.<|endoftext|>
974a419d5234409764644af9b44a70547d1c22523eb9e2299e9e4337bfa5c9d8
def print_first_and_last(sentence): 'Prints the first and last words of the sentence.' words = break_words(sentence) print_first_word(words) print_last_word(words)
Prints the first and last words of the sentence.
Python/Learning/Language/test.py
print_first_and_last
prynix/learning-programming
2
python
def print_first_and_last(sentence): words = break_words(sentence) print_first_word(words) print_last_word(words)
def print_first_and_last(sentence): words = break_words(sentence) print_first_word(words) print_last_word(words)<|docstring|>Prints the first and last words of the sentence.<|endoftext|>
8909126637d367924b2eb58284cbc5d742bf2871da805e634e2583a50de3bb8e
def print_first_and_last_sorted(sentence): 'Sorts the words then prints the first and last one.' words = sort_sentence(sentence) print_first_word(words) print_last_word(words)
Sorts the words then prints the first and last one.
Python/Learning/Language/test.py
print_first_and_last_sorted
prynix/learning-programming
2
python
def print_first_and_last_sorted(sentence): words = sort_sentence(sentence) print_first_word(words) print_last_word(words)
def print_first_and_last_sorted(sentence): words = sort_sentence(sentence) print_first_word(words) print_last_word(words)<|docstring|>Sorts the words then prints the first and last one.<|endoftext|>
21f26e0a2de48c40cae28ddf6758ee901ef47111b098d20955b07d18adc0d723
def __getitem__(self, key): "\n Make class['foo'] automatically filter for the parameter 'foo'\n Makes the model code much cleaner\n\n :param key: Parameter name\n :type key: str\n :return: `array` filtered after the parameter selected\n " return self.temp_array.sel(parameter=key)
Make class['foo'] automatically filter for the parameter 'foo' Makes the model code much cleaner :param key: Parameter name :type key: str :return: `array` filtered after the parameter selected
carculator/inventory.py
__getitem__
rena-nong/carculator
1
python
def __getitem__(self, key): "\n Make class['foo'] automatically filter for the parameter 'foo'\n Makes the model code much cleaner\n\n :param key: Parameter name\n :type key: str\n :return: `array` filtered after the parameter selected\n " return self.temp_array.sel(parameter=key)
def __getitem__(self, key): "\n Make class['foo'] automatically filter for the parameter 'foo'\n Makes the model code much cleaner\n\n :param key: Parameter name\n :type key: str\n :return: `array` filtered after the parameter selected\n " return self.temp_array.sel(parameter=key)<|docstring|>Make class['foo'] automatically filter for the parameter 'foo' Makes the model code much cleaner :param key: Parameter name :type key: str :return: `array` filtered after the parameter selected<|endoftext|>
e20b2b34cc737a0749a7432ed77839416e1318760028b61da27813ffe80ee0ad
def get_results_table(self, split, sensitivity=False): '\n Format an xarray.DataArray array to receive the results.\n\n :param split: "components" or "impact categories". Split by impact categories only applicable when "endpoint" level is applied.\n :return: xarrray.DataArray\n ' if (split == 'components'): cat = ['direct - exhaust', 'direct - non-exhaust', 'energy chain', 'maintenance', 'glider', 'EoL', 'powertrain', 'energy storage', 'road'] dict_impact_cat = list(self.impact_categories.keys()) sizes = self.scope['size'] if isinstance(self.fleet, xr.core.dataarray.DataArray): sizes += ['fleet average'] if (sensitivity == False): response = xr.DataArray(np.zeros((self.B.shape[1], len(sizes), len(self.scope['powertrain']), len(self.scope['year']), len(cat), self.iterations)), coords=[dict_impact_cat, sizes, self.scope['powertrain'], self.scope['year'], cat, np.arange(0, self.iterations)], dims=['impact_category', 'size', 'powertrain', 'year', 'impact', 'value']) else: params = [a for a in self.array.value.values] response = xr.DataArray(np.zeros((self.B.shape[1], len(sizes), len(self.scope['powertrain']), len(self.scope['year']), self.iterations)), coords=[dict_impact_cat, sizes, self.scope['powertrain'], self.scope['year'], params], dims=['impact_category', 'size', 'powertrain', 'year', 'parameter']) return response
Format an xarray.DataArray array to receive the results. :param split: "components" or "impact categories". Split by impact categories only applicable when "endpoint" level is applied. :return: xarrray.DataArray
carculator/inventory.py
get_results_table
rena-nong/carculator
1
python
def get_results_table(self, split, sensitivity=False): '\n Format an xarray.DataArray array to receive the results.\n\n :param split: "components" or "impact categories". Split by impact categories only applicable when "endpoint" level is applied.\n :return: xarrray.DataArray\n ' if (split == 'components'): cat = ['direct - exhaust', 'direct - non-exhaust', 'energy chain', 'maintenance', 'glider', 'EoL', 'powertrain', 'energy storage', 'road'] dict_impact_cat = list(self.impact_categories.keys()) sizes = self.scope['size'] if isinstance(self.fleet, xr.core.dataarray.DataArray): sizes += ['fleet average'] if (sensitivity == False): response = xr.DataArray(np.zeros((self.B.shape[1], len(sizes), len(self.scope['powertrain']), len(self.scope['year']), len(cat), self.iterations)), coords=[dict_impact_cat, sizes, self.scope['powertrain'], self.scope['year'], cat, np.arange(0, self.iterations)], dims=['impact_category', 'size', 'powertrain', 'year', 'impact', 'value']) else: params = [a for a in self.array.value.values] response = xr.DataArray(np.zeros((self.B.shape[1], len(sizes), len(self.scope['powertrain']), len(self.scope['year']), self.iterations)), coords=[dict_impact_cat, sizes, self.scope['powertrain'], self.scope['year'], params], dims=['impact_category', 'size', 'powertrain', 'year', 'parameter']) return response
def get_results_table(self, split, sensitivity=False): '\n Format an xarray.DataArray array to receive the results.\n\n :param split: "components" or "impact categories". Split by impact categories only applicable when "endpoint" level is applied.\n :return: xarrray.DataArray\n ' if (split == 'components'): cat = ['direct - exhaust', 'direct - non-exhaust', 'energy chain', 'maintenance', 'glider', 'EoL', 'powertrain', 'energy storage', 'road'] dict_impact_cat = list(self.impact_categories.keys()) sizes = self.scope['size'] if isinstance(self.fleet, xr.core.dataarray.DataArray): sizes += ['fleet average'] if (sensitivity == False): response = xr.DataArray(np.zeros((self.B.shape[1], len(sizes), len(self.scope['powertrain']), len(self.scope['year']), len(cat), self.iterations)), coords=[dict_impact_cat, sizes, self.scope['powertrain'], self.scope['year'], cat, np.arange(0, self.iterations)], dims=['impact_category', 'size', 'powertrain', 'year', 'impact', 'value']) else: params = [a for a in self.array.value.values] response = xr.DataArray(np.zeros((self.B.shape[1], len(sizes), len(self.scope['powertrain']), len(self.scope['year']), self.iterations)), coords=[dict_impact_cat, sizes, self.scope['powertrain'], self.scope['year'], params], dims=['impact_category', 'size', 'powertrain', 'year', 'parameter']) return response<|docstring|>Format an xarray.DataArray array to receive the results. :param split: "components" or "impact categories". Split by impact categories only applicable when "endpoint" level is applied. :return: xarrray.DataArray<|endoftext|>
0ff7728556dd4a89237ad973a69723f3ba21f9044616f8238f7ebe919239ae6a
def get_split_indices(self): '\n Return list of indices to split the results into categories.\n\n :return: list of indices\n :rtype: list\n ' filename = 'dict_split.csv' filepath = (DATA_DIR / filename) if (not filepath.is_file()): raise FileNotFoundError('The dictionary of splits could not be found.') with open(filepath) as f: csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()] ((_, _, *header), *data) = csv_list csv_dict = {} for row in data: (key, sub_key, *values) = row if (key in csv_dict): if (sub_key in csv_dict[key]): csv_dict[key][sub_key].append({'search by': values[0], 'search for': values[1]}) else: csv_dict[key][sub_key] = [{'search by': values[0], 'search for': values[1]}] else: csv_dict[key] = {sub_key: [{'search by': values[0], 'search for': values[1]}]} flatten = itertools.chain.from_iterable d = {} l = [] d['direct - exhaust'] = [] d['direct - exhaust'].append(self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Methane, fossil', ('air',), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].extend(self.index_emissions) d['direct - exhaust'].extend(self.index_noise) l.append(d['direct - exhaust']) for cat in csv_dict['components']: d[cat] = list(flatten([self.get_index_of_flows([l['search for']], l['search by']) for l in csv_dict['components'][cat]])) l.append(d[cat]) list_ind = [d[x] for x in d] maxLen = max(map(len, list_ind)) for row in list_ind: while (len(row) < maxLen): row.extend([(len(self.inputs) - 1)]) return (list(d.keys()), list_ind)
Return list of indices to split the results into categories. :return: list of indices :rtype: list
carculator/inventory.py
get_split_indices
rena-nong/carculator
1
python
def get_split_indices(self): '\n Return list of indices to split the results into categories.\n\n :return: list of indices\n :rtype: list\n ' filename = 'dict_split.csv' filepath = (DATA_DIR / filename) if (not filepath.is_file()): raise FileNotFoundError('The dictionary of splits could not be found.') with open(filepath) as f: csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()] ((_, _, *header), *data) = csv_list csv_dict = {} for row in data: (key, sub_key, *values) = row if (key in csv_dict): if (sub_key in csv_dict[key]): csv_dict[key][sub_key].append({'search by': values[0], 'search for': values[1]}) else: csv_dict[key][sub_key] = [{'search by': values[0], 'search for': values[1]}] else: csv_dict[key] = {sub_key: [{'search by': values[0], 'search for': values[1]}]} flatten = itertools.chain.from_iterable d = {} l = [] d['direct - exhaust'] = [] d['direct - exhaust'].append(self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Methane, fossil', ('air',), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].extend(self.index_emissions) d['direct - exhaust'].extend(self.index_noise) l.append(d['direct - exhaust']) for cat in csv_dict['components']: d[cat] = list(flatten([self.get_index_of_flows([l['search for']], l['search by']) for l in csv_dict['components'][cat]])) l.append(d[cat]) list_ind = [d[x] for x in d] maxLen = max(map(len, list_ind)) for row in list_ind: while (len(row) < maxLen): row.extend([(len(self.inputs) - 1)]) return (list(d.keys()), list_ind)
def get_split_indices(self): '\n Return list of indices to split the results into categories.\n\n :return: list of indices\n :rtype: list\n ' filename = 'dict_split.csv' filepath = (DATA_DIR / filename) if (not filepath.is_file()): raise FileNotFoundError('The dictionary of splits could not be found.') with open(filepath) as f: csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()] ((_, _, *header), *data) = csv_list csv_dict = {} for row in data: (key, sub_key, *values) = row if (key in csv_dict): if (sub_key in csv_dict[key]): csv_dict[key][sub_key].append({'search by': values[0], 'search for': values[1]}) else: csv_dict[key][sub_key] = [{'search by': values[0], 'search for': values[1]}] else: csv_dict[key] = {sub_key: [{'search by': values[0], 'search for': values[1]}]} flatten = itertools.chain.from_iterable d = {} l = [] d['direct - exhaust'] = [] d['direct - exhaust'].append(self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Methane, fossil', ('air',), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].append(self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')]) d['direct - exhaust'].extend(self.index_emissions) d['direct - exhaust'].extend(self.index_noise) l.append(d['direct - exhaust']) for cat in csv_dict['components']: d[cat] = list(flatten([self.get_index_of_flows([l['search for']], l['search by']) for l in csv_dict['components'][cat]])) l.append(d[cat]) list_ind = [d[x] for x in d] maxLen = max(map(len, list_ind)) for row in list_ind: while (len(row) < maxLen): row.extend([(len(self.inputs) - 1)]) return (list(d.keys()), list_ind)<|docstring|>Return list of indices to split the results into categories. :return: list of indices :rtype: list<|endoftext|>
97168e118b31e67d5710a94e3e81ec74ea9b826a3e85ba1752f684d881a388a8
def get_A_matrix(self): '\n Load the A matrix. The A matrix contains exchanges of products (rows) between activities (columns).\n\n :return: A matrix with three dimensions of shape (number of values, number of products, number of activities).\n :rtype: numpy.ndarray\n\n ' filename = 'A_matrix.csv' filepath = Path(getframeinfo(currentframe()).filename).resolve().parent.joinpath(('data/' + filename)) if (not filepath.is_file()): raise FileNotFoundError('The technology matrix could not be found.') initial_A = np.genfromtxt(filepath, delimiter=';') new_A = np.identity(len(self.inputs)) new_A[(0:np.shape(initial_A)[0], 0:np.shape(initial_A)[0])] = initial_A new_A = np.resize(new_A, (self.array.shape[1], new_A.shape[0], new_A.shape[1])) return new_A.astype('float32')
Load the A matrix. The A matrix contains exchanges of products (rows) between activities (columns). :return: A matrix with three dimensions of shape (number of values, number of products, number of activities). :rtype: numpy.ndarray
carculator/inventory.py
get_A_matrix
rena-nong/carculator
1
python
def get_A_matrix(self): '\n Load the A matrix. The A matrix contains exchanges of products (rows) between activities (columns).\n\n :return: A matrix with three dimensions of shape (number of values, number of products, number of activities).\n :rtype: numpy.ndarray\n\n ' filename = 'A_matrix.csv' filepath = Path(getframeinfo(currentframe()).filename).resolve().parent.joinpath(('data/' + filename)) if (not filepath.is_file()): raise FileNotFoundError('The technology matrix could not be found.') initial_A = np.genfromtxt(filepath, delimiter=';') new_A = np.identity(len(self.inputs)) new_A[(0:np.shape(initial_A)[0], 0:np.shape(initial_A)[0])] = initial_A new_A = np.resize(new_A, (self.array.shape[1], new_A.shape[0], new_A.shape[1])) return new_A.astype('float32')
def get_A_matrix(self): '\n Load the A matrix. The A matrix contains exchanges of products (rows) between activities (columns).\n\n :return: A matrix with three dimensions of shape (number of values, number of products, number of activities).\n :rtype: numpy.ndarray\n\n ' filename = 'A_matrix.csv' filepath = Path(getframeinfo(currentframe()).filename).resolve().parent.joinpath(('data/' + filename)) if (not filepath.is_file()): raise FileNotFoundError('The technology matrix could not be found.') initial_A = np.genfromtxt(filepath, delimiter=';') new_A = np.identity(len(self.inputs)) new_A[(0:np.shape(initial_A)[0], 0:np.shape(initial_A)[0])] = initial_A new_A = np.resize(new_A, (self.array.shape[1], new_A.shape[0], new_A.shape[1])) return new_A.astype('float32')<|docstring|>Load the A matrix. The A matrix contains exchanges of products (rows) between activities (columns). :return: A matrix with three dimensions of shape (number of values, number of products, number of activities). :rtype: numpy.ndarray<|endoftext|>
5f108e7335838f6d700d366c9c29ad961ca5fb9583458104f8c7429245d22e49
def get_B_matrix(self): '\n Load the B matrix. The B matrix contains impact assessment figures for a give impact assessment method,\n per unit of activity. Its length column-wise equals the length of the A matrix row-wise.\n Its length row-wise equals the number of impact assessment methods.\n\n :param method: only "recipe" and "ilcd" available at the moment.\n :param level: only "midpoint" available at the moment.\n :return: an array with impact values per unit of activity for each method.\n :rtype: numpy.ndarray\n\n ' if (self.method == 'recipe'): if (self.method_type == 'midpoint'): list_file_names = glob.glob((str(REMIND_FILES_DIR) + '/*recipe_midpoint*{}*.csv'.format(self.scenario))) list_file_names = sorted(list_file_names) B = np.zeros((len(list_file_names), 21, len(self.inputs))) elif (self.method_type == 'endpoint'): list_file_names = glob.glob((str(REMIND_FILES_DIR) + '/*recipe_endpoint*{}*.csv'.format(self.scenario))) list_file_names = sorted(list_file_names) B = np.zeros((len(list_file_names), 4, len(self.inputs))) else: raise TypeError("The LCIA method type should be either 'midpoint' or 'endpoint'.") else: list_file_names = glob.glob((str(REMIND_FILES_DIR) + '/*ilcd*{}*.csv'.format(self.scenario))) list_file_names = sorted(list_file_names) B = np.zeros((len(list_file_names), 19, len(self.inputs))) for (f, fp) in enumerate(list_file_names): initial_B = np.genfromtxt(fp, delimiter=';') new_B = np.zeros((np.shape(initial_B)[0], len(self.inputs))) new_B[(0:np.shape(initial_B)[0], 0:np.shape(initial_B)[1])] = initial_B B[(f, :, :)] = new_B list_impact_categories = list(self.impact_categories.keys()) if (self.scenario != 'static'): response = xr.DataArray(B, coords=[[2005, 2010, 2020, 2030, 2040, 2050], list_impact_categories, list(self.inputs.keys())], dims=['year', 'category', 'activity']) else: response = xr.DataArray(B, coords=[[2020], list_impact_categories, list(self.inputs.keys())], dims=['year', 'category', 'activity']) return response
Load the B matrix. The B matrix contains impact assessment figures for a give impact assessment method, per unit of activity. Its length column-wise equals the length of the A matrix row-wise. Its length row-wise equals the number of impact assessment methods. :param method: only "recipe" and "ilcd" available at the moment. :param level: only "midpoint" available at the moment. :return: an array with impact values per unit of activity for each method. :rtype: numpy.ndarray
carculator/inventory.py
get_B_matrix
rena-nong/carculator
1
python
def get_B_matrix(self): '\n Load the B matrix. The B matrix contains impact assessment figures for a give impact assessment method,\n per unit of activity. Its length column-wise equals the length of the A matrix row-wise.\n Its length row-wise equals the number of impact assessment methods.\n\n :param method: only "recipe" and "ilcd" available at the moment.\n :param level: only "midpoint" available at the moment.\n :return: an array with impact values per unit of activity for each method.\n :rtype: numpy.ndarray\n\n ' if (self.method == 'recipe'): if (self.method_type == 'midpoint'): list_file_names = glob.glob((str(REMIND_FILES_DIR) + '/*recipe_midpoint*{}*.csv'.format(self.scenario))) list_file_names = sorted(list_file_names) B = np.zeros((len(list_file_names), 21, len(self.inputs))) elif (self.method_type == 'endpoint'): list_file_names = glob.glob((str(REMIND_FILES_DIR) + '/*recipe_endpoint*{}*.csv'.format(self.scenario))) list_file_names = sorted(list_file_names) B = np.zeros((len(list_file_names), 4, len(self.inputs))) else: raise TypeError("The LCIA method type should be either 'midpoint' or 'endpoint'.") else: list_file_names = glob.glob((str(REMIND_FILES_DIR) + '/*ilcd*{}*.csv'.format(self.scenario))) list_file_names = sorted(list_file_names) B = np.zeros((len(list_file_names), 19, len(self.inputs))) for (f, fp) in enumerate(list_file_names): initial_B = np.genfromtxt(fp, delimiter=';') new_B = np.zeros((np.shape(initial_B)[0], len(self.inputs))) new_B[(0:np.shape(initial_B)[0], 0:np.shape(initial_B)[1])] = initial_B B[(f, :, :)] = new_B list_impact_categories = list(self.impact_categories.keys()) if (self.scenario != 'static'): response = xr.DataArray(B, coords=[[2005, 2010, 2020, 2030, 2040, 2050], list_impact_categories, list(self.inputs.keys())], dims=['year', 'category', 'activity']) else: response = xr.DataArray(B, coords=[[2020], list_impact_categories, list(self.inputs.keys())], dims=['year', 'category', 'activity']) return response
def get_B_matrix(self): '\n Load the B matrix. The B matrix contains impact assessment figures for a give impact assessment method,\n per unit of activity. Its length column-wise equals the length of the A matrix row-wise.\n Its length row-wise equals the number of impact assessment methods.\n\n :param method: only "recipe" and "ilcd" available at the moment.\n :param level: only "midpoint" available at the moment.\n :return: an array with impact values per unit of activity for each method.\n :rtype: numpy.ndarray\n\n ' if (self.method == 'recipe'): if (self.method_type == 'midpoint'): list_file_names = glob.glob((str(REMIND_FILES_DIR) + '/*recipe_midpoint*{}*.csv'.format(self.scenario))) list_file_names = sorted(list_file_names) B = np.zeros((len(list_file_names), 21, len(self.inputs))) elif (self.method_type == 'endpoint'): list_file_names = glob.glob((str(REMIND_FILES_DIR) + '/*recipe_endpoint*{}*.csv'.format(self.scenario))) list_file_names = sorted(list_file_names) B = np.zeros((len(list_file_names), 4, len(self.inputs))) else: raise TypeError("The LCIA method type should be either 'midpoint' or 'endpoint'.") else: list_file_names = glob.glob((str(REMIND_FILES_DIR) + '/*ilcd*{}*.csv'.format(self.scenario))) list_file_names = sorted(list_file_names) B = np.zeros((len(list_file_names), 19, len(self.inputs))) for (f, fp) in enumerate(list_file_names): initial_B = np.genfromtxt(fp, delimiter=';') new_B = np.zeros((np.shape(initial_B)[0], len(self.inputs))) new_B[(0:np.shape(initial_B)[0], 0:np.shape(initial_B)[1])] = initial_B B[(f, :, :)] = new_B list_impact_categories = list(self.impact_categories.keys()) if (self.scenario != 'static'): response = xr.DataArray(B, coords=[[2005, 2010, 2020, 2030, 2040, 2050], list_impact_categories, list(self.inputs.keys())], dims=['year', 'category', 'activity']) else: response = xr.DataArray(B, coords=[[2020], list_impact_categories, list(self.inputs.keys())], dims=['year', 'category', 'activity']) return response<|docstring|>Load the B matrix. The B matrix contains impact assessment figures for a give impact assessment method, per unit of activity. Its length column-wise equals the length of the A matrix row-wise. Its length row-wise equals the number of impact assessment methods. :param method: only "recipe" and "ilcd" available at the moment. :param level: only "midpoint" available at the moment. :return: an array with impact values per unit of activity for each method. :rtype: numpy.ndarray<|endoftext|>
6cd859343dcc5ace1c765037fad40ecb78e7c1aa97fd166de4736129543d5381
def get_dict_input(self): '\n Load a dictionary with tuple ("name of activity", "location", "unit", "reference product") as key, row/column\n indices as values.\n\n :return: dictionary with `label:index` pairs.\n :rtype: dict\n\n ' filename = 'dict_inputs_A_matrix.csv' filepath = (DATA_DIR / filename) if (not filepath.is_file()): raise FileNotFoundError('The dictionary of activity labels could not be found.') csv_dict = {} count = 0 with open(filepath) as f: input_dict = csv.reader(f, delimiter=';') for row in input_dict: if ('(' in row[1]): new_str = row[1].replace('(', '') new_str = new_str.replace(')', '') new_str = [s.strip() for s in new_str.split(',') if s] t = () for s in new_str: if ('low population' in s): s = 'low population density, long-term' t += (s,) break else: t += (s.replace("'", ''),) csv_dict[(row[0], t, row[2])] = count else: csv_dict[(row[0], row[1], row[2], row[3])] = count count += 1 return csv_dict
Load a dictionary with tuple ("name of activity", "location", "unit", "reference product") as key, row/column indices as values. :return: dictionary with `label:index` pairs. :rtype: dict
carculator/inventory.py
get_dict_input
rena-nong/carculator
1
python
def get_dict_input(self): '\n Load a dictionary with tuple ("name of activity", "location", "unit", "reference product") as key, row/column\n indices as values.\n\n :return: dictionary with `label:index` pairs.\n :rtype: dict\n\n ' filename = 'dict_inputs_A_matrix.csv' filepath = (DATA_DIR / filename) if (not filepath.is_file()): raise FileNotFoundError('The dictionary of activity labels could not be found.') csv_dict = {} count = 0 with open(filepath) as f: input_dict = csv.reader(f, delimiter=';') for row in input_dict: if ('(' in row[1]): new_str = row[1].replace('(', ) new_str = new_str.replace(')', ) new_str = [s.strip() for s in new_str.split(',') if s] t = () for s in new_str: if ('low population' in s): s = 'low population density, long-term' t += (s,) break else: t += (s.replace("'", ),) csv_dict[(row[0], t, row[2])] = count else: csv_dict[(row[0], row[1], row[2], row[3])] = count count += 1 return csv_dict
def get_dict_input(self): '\n Load a dictionary with tuple ("name of activity", "location", "unit", "reference product") as key, row/column\n indices as values.\n\n :return: dictionary with `label:index` pairs.\n :rtype: dict\n\n ' filename = 'dict_inputs_A_matrix.csv' filepath = (DATA_DIR / filename) if (not filepath.is_file()): raise FileNotFoundError('The dictionary of activity labels could not be found.') csv_dict = {} count = 0 with open(filepath) as f: input_dict = csv.reader(f, delimiter=';') for row in input_dict: if ('(' in row[1]): new_str = row[1].replace('(', ) new_str = new_str.replace(')', ) new_str = [s.strip() for s in new_str.split(',') if s] t = () for s in new_str: if ('low population' in s): s = 'low population density, long-term' t += (s,) break else: t += (s.replace("'", ),) csv_dict[(row[0], t, row[2])] = count else: csv_dict[(row[0], row[1], row[2], row[3])] = count count += 1 return csv_dict<|docstring|>Load a dictionary with tuple ("name of activity", "location", "unit", "reference product") as key, row/column indices as values. :return: dictionary with `label:index` pairs. :rtype: dict<|endoftext|>
a057e576b0fd85f23d0d5ae6e23388e566ba08b7ac2a4ca06adfc80617dac5e8
def get_dict_impact_categories(self): "\n Load a dictionary with available impact assessment methods as keys, and assessment level and categories as values.\n\n ..code-block:: python\n\n {'recipe': {'midpoint': ['freshwater ecotoxicity',\n 'human toxicity',\n 'marine ecotoxicity',\n 'terrestrial ecotoxicity',\n 'metal depletion',\n 'agricultural land occupation',\n 'climate change',\n 'fossil depletion',\n 'freshwater eutrophication',\n 'ionising radiation',\n 'marine eutrophication',\n 'natural land transformation',\n 'ozone depletion',\n 'particulate matter formation',\n 'photochemical oxidant formation',\n 'terrestrial acidification',\n 'urban land occupation',\n 'water depletion',\n 'human noise',\n 'primary energy, non-renewable',\n 'primary energy, renewable']\n }\n }\n\n :return: dictionary\n :rtype: dict\n " filename = 'dict_impact_categories.csv' filepath = (DATA_DIR / filename) if (not filepath.is_file()): raise FileNotFoundError('The dictionary of impact categories could not be found.') csv_dict = {} with open(filepath) as f: input_dict = csv.reader(f, delimiter=';') for row in input_dict: if ((row[0] == self.method) and (row[3] == self.method_type)): csv_dict[row[2]] = {'method': row[1], 'category': row[2], 'type': row[3], 'abbreviation': row[4], 'unit': row[5], 'source': row[6]} return csv_dict
Load a dictionary with available impact assessment methods as keys, and assessment level and categories as values. ..code-block:: python {'recipe': {'midpoint': ['freshwater ecotoxicity', 'human toxicity', 'marine ecotoxicity', 'terrestrial ecotoxicity', 'metal depletion', 'agricultural land occupation', 'climate change', 'fossil depletion', 'freshwater eutrophication', 'ionising radiation', 'marine eutrophication', 'natural land transformation', 'ozone depletion', 'particulate matter formation', 'photochemical oxidant formation', 'terrestrial acidification', 'urban land occupation', 'water depletion', 'human noise', 'primary energy, non-renewable', 'primary energy, renewable'] } } :return: dictionary :rtype: dict
carculator/inventory.py
get_dict_impact_categories
rena-nong/carculator
1
python
def get_dict_impact_categories(self): "\n Load a dictionary with available impact assessment methods as keys, and assessment level and categories as values.\n\n ..code-block:: python\n\n {'recipe': {'midpoint': ['freshwater ecotoxicity',\n 'human toxicity',\n 'marine ecotoxicity',\n 'terrestrial ecotoxicity',\n 'metal depletion',\n 'agricultural land occupation',\n 'climate change',\n 'fossil depletion',\n 'freshwater eutrophication',\n 'ionising radiation',\n 'marine eutrophication',\n 'natural land transformation',\n 'ozone depletion',\n 'particulate matter formation',\n 'photochemical oxidant formation',\n 'terrestrial acidification',\n 'urban land occupation',\n 'water depletion',\n 'human noise',\n 'primary energy, non-renewable',\n 'primary energy, renewable']\n }\n }\n\n :return: dictionary\n :rtype: dict\n " filename = 'dict_impact_categories.csv' filepath = (DATA_DIR / filename) if (not filepath.is_file()): raise FileNotFoundError('The dictionary of impact categories could not be found.') csv_dict = {} with open(filepath) as f: input_dict = csv.reader(f, delimiter=';') for row in input_dict: if ((row[0] == self.method) and (row[3] == self.method_type)): csv_dict[row[2]] = {'method': row[1], 'category': row[2], 'type': row[3], 'abbreviation': row[4], 'unit': row[5], 'source': row[6]} return csv_dict
def get_dict_impact_categories(self): "\n Load a dictionary with available impact assessment methods as keys, and assessment level and categories as values.\n\n ..code-block:: python\n\n {'recipe': {'midpoint': ['freshwater ecotoxicity',\n 'human toxicity',\n 'marine ecotoxicity',\n 'terrestrial ecotoxicity',\n 'metal depletion',\n 'agricultural land occupation',\n 'climate change',\n 'fossil depletion',\n 'freshwater eutrophication',\n 'ionising radiation',\n 'marine eutrophication',\n 'natural land transformation',\n 'ozone depletion',\n 'particulate matter formation',\n 'photochemical oxidant formation',\n 'terrestrial acidification',\n 'urban land occupation',\n 'water depletion',\n 'human noise',\n 'primary energy, non-renewable',\n 'primary energy, renewable']\n }\n }\n\n :return: dictionary\n :rtype: dict\n " filename = 'dict_impact_categories.csv' filepath = (DATA_DIR / filename) if (not filepath.is_file()): raise FileNotFoundError('The dictionary of impact categories could not be found.') csv_dict = {} with open(filepath) as f: input_dict = csv.reader(f, delimiter=';') for row in input_dict: if ((row[0] == self.method) and (row[3] == self.method_type)): csv_dict[row[2]] = {'method': row[1], 'category': row[2], 'type': row[3], 'abbreviation': row[4], 'unit': row[5], 'source': row[6]} return csv_dict<|docstring|>Load a dictionary with available impact assessment methods as keys, and assessment level and categories as values. ..code-block:: python {'recipe': {'midpoint': ['freshwater ecotoxicity', 'human toxicity', 'marine ecotoxicity', 'terrestrial ecotoxicity', 'metal depletion', 'agricultural land occupation', 'climate change', 'fossil depletion', 'freshwater eutrophication', 'ionising radiation', 'marine eutrophication', 'natural land transformation', 'ozone depletion', 'particulate matter formation', 'photochemical oxidant formation', 'terrestrial acidification', 'urban land occupation', 'water depletion', 'human noise', 'primary energy, non-renewable', 'primary energy, renewable'] } } :return: dictionary :rtype: dict<|endoftext|>
6c9546d26806f517049e2e4412f1fbe2d5b0444fac16a5bb6dd38bb21eb660be
def get_rev_dict_input(self): '\n Reverse the self.inputs dictionary.\n\n :return: reversed dictionary\n :rtype: dict\n ' return {v: k for (k, v) in self.inputs.items()}
Reverse the self.inputs dictionary. :return: reversed dictionary :rtype: dict
carculator/inventory.py
get_rev_dict_input
rena-nong/carculator
1
python
def get_rev_dict_input(self): '\n Reverse the self.inputs dictionary.\n\n :return: reversed dictionary\n :rtype: dict\n ' return {v: k for (k, v) in self.inputs.items()}
def get_rev_dict_input(self): '\n Reverse the self.inputs dictionary.\n\n :return: reversed dictionary\n :rtype: dict\n ' return {v: k for (k, v) in self.inputs.items()}<|docstring|>Reverse the self.inputs dictionary. :return: reversed dictionary :rtype: dict<|endoftext|>
21fa57095c8a5deb3e505b0fdecc68b64ff09217367a59fe61b65a61ab3f640e
def get_index_vehicle_from_array(self, items_to_look_for, items_to_look_for_also=None, method='or'): '\n Return list of row/column indices of self.array of labels that contain the string defined in `items_to_look_for`.\n\n :param items_to_look_for: string to search for\n :return: list\n ' if (not isinstance(items_to_look_for, list)): items_to_look_for = [items_to_look_for] if (not (items_to_look_for_also is None)): if (not isinstance(items_to_look_for_also, list)): items_to_look_for_also = [items_to_look_for_also] list_vehicles = self.array.desired.values if (method == 'or'): return [c for (c, v) in enumerate(list_vehicles) if set(items_to_look_for).intersection(v)] if (method == 'and'): return [c for (c, v) in enumerate(list_vehicles) if (set(items_to_look_for).intersection(v) and set(items_to_look_for_also).intersection(v))]
Return list of row/column indices of self.array of labels that contain the string defined in `items_to_look_for`. :param items_to_look_for: string to search for :return: list
carculator/inventory.py
get_index_vehicle_from_array
rena-nong/carculator
1
python
def get_index_vehicle_from_array(self, items_to_look_for, items_to_look_for_also=None, method='or'): '\n Return list of row/column indices of self.array of labels that contain the string defined in `items_to_look_for`.\n\n :param items_to_look_for: string to search for\n :return: list\n ' if (not isinstance(items_to_look_for, list)): items_to_look_for = [items_to_look_for] if (not (items_to_look_for_also is None)): if (not isinstance(items_to_look_for_also, list)): items_to_look_for_also = [items_to_look_for_also] list_vehicles = self.array.desired.values if (method == 'or'): return [c for (c, v) in enumerate(list_vehicles) if set(items_to_look_for).intersection(v)] if (method == 'and'): return [c for (c, v) in enumerate(list_vehicles) if (set(items_to_look_for).intersection(v) and set(items_to_look_for_also).intersection(v))]
def get_index_vehicle_from_array(self, items_to_look_for, items_to_look_for_also=None, method='or'): '\n Return list of row/column indices of self.array of labels that contain the string defined in `items_to_look_for`.\n\n :param items_to_look_for: string to search for\n :return: list\n ' if (not isinstance(items_to_look_for, list)): items_to_look_for = [items_to_look_for] if (not (items_to_look_for_also is None)): if (not isinstance(items_to_look_for_also, list)): items_to_look_for_also = [items_to_look_for_also] list_vehicles = self.array.desired.values if (method == 'or'): return [c for (c, v) in enumerate(list_vehicles) if set(items_to_look_for).intersection(v)] if (method == 'and'): return [c for (c, v) in enumerate(list_vehicles) if (set(items_to_look_for).intersection(v) and set(items_to_look_for_also).intersection(v))]<|docstring|>Return list of row/column indices of self.array of labels that contain the string defined in `items_to_look_for`. :param items_to_look_for: string to search for :return: list<|endoftext|>
8047bec7133f47408b2e6754e250b4241a0f59c3d55976d84ed2f128900b451a
def get_index_of_flows(self, items_to_look_for, search_by='name'): '\n Return list of row/column indices of self.A of labels that contain the string defined in `items_to_look_for`.\n\n :param items_to_look_for: string\n :param search_by: "name" or "compartment" (for elementary flows)\n :return: list of row/column indices\n :rtype: list\n ' if (search_by == 'name'): return [int(self.inputs[c]) for c in self.inputs if all(((ele in c[0].lower()) for ele in items_to_look_for))] if (search_by == 'compartment'): return [int(self.inputs[c]) for c in self.inputs if all(((ele in c[1]) for ele in items_to_look_for))]
Return list of row/column indices of self.A of labels that contain the string defined in `items_to_look_for`. :param items_to_look_for: string :param search_by: "name" or "compartment" (for elementary flows) :return: list of row/column indices :rtype: list
carculator/inventory.py
get_index_of_flows
rena-nong/carculator
1
python
def get_index_of_flows(self, items_to_look_for, search_by='name'): '\n Return list of row/column indices of self.A of labels that contain the string defined in `items_to_look_for`.\n\n :param items_to_look_for: string\n :param search_by: "name" or "compartment" (for elementary flows)\n :return: list of row/column indices\n :rtype: list\n ' if (search_by == 'name'): return [int(self.inputs[c]) for c in self.inputs if all(((ele in c[0].lower()) for ele in items_to_look_for))] if (search_by == 'compartment'): return [int(self.inputs[c]) for c in self.inputs if all(((ele in c[1]) for ele in items_to_look_for))]
def get_index_of_flows(self, items_to_look_for, search_by='name'): '\n Return list of row/column indices of self.A of labels that contain the string defined in `items_to_look_for`.\n\n :param items_to_look_for: string\n :param search_by: "name" or "compartment" (for elementary flows)\n :return: list of row/column indices\n :rtype: list\n ' if (search_by == 'name'): return [int(self.inputs[c]) for c in self.inputs if all(((ele in c[0].lower()) for ele in items_to_look_for))] if (search_by == 'compartment'): return [int(self.inputs[c]) for c in self.inputs if all(((ele in c[1]) for ele in items_to_look_for))]<|docstring|>Return list of row/column indices of self.A of labels that contain the string defined in `items_to_look_for`. :param items_to_look_for: string :param search_by: "name" or "compartment" (for elementary flows) :return: list of row/column indices :rtype: list<|endoftext|>
e97092b7829916b6672548788742c53defe93ba94b53a5f68fd3747b5ae48c2d
def export_lci(self, presamples=True, ecoinvent_compatibility=True, ecoinvent_version='3.7', db_name='carculator db', forbidden_activities=None, create_vehicle_datasets=True): '\n Export the inventory as a dictionary. Also return a list of arrays that contain pre-sampled random values if\n :meth:`stochastic` of :class:`CarModel` class has been called.\n\n :param presamples: boolean.\n :param ecoinvent_compatibility: bool. If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.\n :param ecoinvent_version: str. "3.5", "3.6" or "uvek"\n :param create_vehicle_datasets: bool. Whether vehicles datasets (as structured in ecoinvent) should be created too.\n :return: inventory, and optionally, list of arrays containing pre-sampled values.\n :rtype: list\n ' self.inputs = self.get_dict_input() self.bs = BackgroundSystemModel() self.country = self.get_country_of_use() self.add_additional_activities() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() if create_vehicle_datasets: self.add_additional_activities_for_export() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix_for_export(self.array.values) else: self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix(self.array.values) if isinstance(self.fleet, xr.core.dataarray.DataArray): print('Building fleet average vehicles...') self.build_fleet_vehicles() self.rev_inputs = self.get_rev_dict_input() self.number_of_cars += (len(self.scope['year']) * len(self.scope['powertrain'])) if (not ecoinvent_compatibility): fuel_markets = [self.inputs[a] for a in self.inputs if ('fuel market for' in a[0])] electricity_inputs = [self.inputs[a] for a in self.inputs if ('electricity market for' in a[0])] self.A[np.ix_(range(self.A.shape[0]), electricity_inputs, fuel_markets)] = 0 if (presamples == True): (lci, array) = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return (lci, array) else: lci = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return lci
Export the inventory as a dictionary. Also return a list of arrays that contain pre-sampled random values if :meth:`stochastic` of :class:`CarModel` class has been called. :param presamples: boolean. :param ecoinvent_compatibility: bool. If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent. :param ecoinvent_version: str. "3.5", "3.6" or "uvek" :param create_vehicle_datasets: bool. Whether vehicles datasets (as structured in ecoinvent) should be created too. :return: inventory, and optionally, list of arrays containing pre-sampled values. :rtype: list
carculator/inventory.py
export_lci
rena-nong/carculator
1
python
def export_lci(self, presamples=True, ecoinvent_compatibility=True, ecoinvent_version='3.7', db_name='carculator db', forbidden_activities=None, create_vehicle_datasets=True): '\n Export the inventory as a dictionary. Also return a list of arrays that contain pre-sampled random values if\n :meth:`stochastic` of :class:`CarModel` class has been called.\n\n :param presamples: boolean.\n :param ecoinvent_compatibility: bool. If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.\n :param ecoinvent_version: str. "3.5", "3.6" or "uvek"\n :param create_vehicle_datasets: bool. Whether vehicles datasets (as structured in ecoinvent) should be created too.\n :return: inventory, and optionally, list of arrays containing pre-sampled values.\n :rtype: list\n ' self.inputs = self.get_dict_input() self.bs = BackgroundSystemModel() self.country = self.get_country_of_use() self.add_additional_activities() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() if create_vehicle_datasets: self.add_additional_activities_for_export() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix_for_export(self.array.values) else: self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix(self.array.values) if isinstance(self.fleet, xr.core.dataarray.DataArray): print('Building fleet average vehicles...') self.build_fleet_vehicles() self.rev_inputs = self.get_rev_dict_input() self.number_of_cars += (len(self.scope['year']) * len(self.scope['powertrain'])) if (not ecoinvent_compatibility): fuel_markets = [self.inputs[a] for a in self.inputs if ('fuel market for' in a[0])] electricity_inputs = [self.inputs[a] for a in self.inputs if ('electricity market for' in a[0])] self.A[np.ix_(range(self.A.shape[0]), electricity_inputs, fuel_markets)] = 0 if (presamples == True): (lci, array) = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return (lci, array) else: lci = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return lci
def export_lci(self, presamples=True, ecoinvent_compatibility=True, ecoinvent_version='3.7', db_name='carculator db', forbidden_activities=None, create_vehicle_datasets=True): '\n Export the inventory as a dictionary. Also return a list of arrays that contain pre-sampled random values if\n :meth:`stochastic` of :class:`CarModel` class has been called.\n\n :param presamples: boolean.\n :param ecoinvent_compatibility: bool. If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.\n :param ecoinvent_version: str. "3.5", "3.6" or "uvek"\n :param create_vehicle_datasets: bool. Whether vehicles datasets (as structured in ecoinvent) should be created too.\n :return: inventory, and optionally, list of arrays containing pre-sampled values.\n :rtype: list\n ' self.inputs = self.get_dict_input() self.bs = BackgroundSystemModel() self.country = self.get_country_of_use() self.add_additional_activities() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() if create_vehicle_datasets: self.add_additional_activities_for_export() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix_for_export(self.array.values) else: self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix(self.array.values) if isinstance(self.fleet, xr.core.dataarray.DataArray): print('Building fleet average vehicles...') self.build_fleet_vehicles() self.rev_inputs = self.get_rev_dict_input() self.number_of_cars += (len(self.scope['year']) * len(self.scope['powertrain'])) if (not ecoinvent_compatibility): fuel_markets = [self.inputs[a] for a in self.inputs if ('fuel market for' in a[0])] electricity_inputs = [self.inputs[a] for a in self.inputs if ('electricity market for' in a[0])] self.A[np.ix_(range(self.A.shape[0]), electricity_inputs, fuel_markets)] = 0 if (presamples == True): (lci, array) = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return (lci, array) else: lci = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return lci<|docstring|>Export the inventory as a dictionary. Also return a list of arrays that contain pre-sampled random values if :meth:`stochastic` of :class:`CarModel` class has been called. :param presamples: boolean. :param ecoinvent_compatibility: bool. If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent. :param ecoinvent_version: str. "3.5", "3.6" or "uvek" :param create_vehicle_datasets: bool. Whether vehicles datasets (as structured in ecoinvent) should be created too. :return: inventory, and optionally, list of arrays containing pre-sampled values. :rtype: list<|endoftext|>
e25e4e6d027cab003e09a09fb081d30c99910ad5ab275881d6760f1066a7fdbe
def export_lci_to_bw(self, presamples=True, ecoinvent_compatibility=True, ecoinvent_version='3.7', db_name='carculator db', forbidden_activities=None, create_vehicle_datasets=True): '\n Export the inventory as a `brightway2` bw2io.importers.base_lci.LCIImporter object\n with the inventory in the `data` attribute.\n\n .. code-block:: python\n\n # get the inventory\n i, _ = ic.export_lci_to_bw()\n\n # import it in a Brightway2 project\n i.match_database(\'ecoinvent 3.6 cutoff\', fields=(\'name\', \'unit\', \'location\', \'reference product\'))\n i.match_database("biosphere3", fields=(\'name\', \'unit\', \'categories\'))\n i.match_database(fields=(\'name\', \'unit\', \'location\', \'reference product\'))\n i.match_database(fields=(\'name\', \'unit\', \'categories\'))\n\n # Create an additional biosphere database for the few flows that do not\n # exist in "biosphere3"\n i.create_new_biosphere("additional_biosphere", relink=True)\n\n # Check if all exchanges link\n i.statistics()\n\n # Register the database\n i.write_database()\n\n :return: LCIImport object that can be directly registered in a `brightway2` project.\n :rtype: bw2io.importers.base_lci.LCIImporter\n ' self.inputs = self.get_dict_input() self.bs = BackgroundSystemModel() self.country = self.get_country_of_use() self.add_additional_activities() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() if create_vehicle_datasets: self.add_additional_activities_for_export() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix_for_export(self.array.values) else: self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix(self.array.values) if isinstance(self.fleet, xr.core.dataarray.DataArray): print('Building fleet average vehicles...') self.build_fleet_vehicles() self.rev_inputs = self.get_rev_dict_input() self.number_of_cars += (len(self.scope['year']) * len(self.scope['powertrain'])) if (not ecoinvent_compatibility): fuel_markets = [self.inputs[a] for a in self.inputs if ('fuel market for' in a[0])] electricity_inputs = [self.inputs[a] for a in self.inputs if ('electricity market for' in a[0])] self.A[np.ix_(range(self.A.shape[0]), electricity_inputs, fuel_markets)] = 0 if (presamples == True): (lci, array) = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci_to_bw(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return (lci, array) else: lci = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci_to_bw(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return lci
Export the inventory as a `brightway2` bw2io.importers.base_lci.LCIImporter object with the inventory in the `data` attribute. .. code-block:: python # get the inventory i, _ = ic.export_lci_to_bw() # import it in a Brightway2 project i.match_database('ecoinvent 3.6 cutoff', fields=('name', 'unit', 'location', 'reference product')) i.match_database("biosphere3", fields=('name', 'unit', 'categories')) i.match_database(fields=('name', 'unit', 'location', 'reference product')) i.match_database(fields=('name', 'unit', 'categories')) # Create an additional biosphere database for the few flows that do not # exist in "biosphere3" i.create_new_biosphere("additional_biosphere", relink=True) # Check if all exchanges link i.statistics() # Register the database i.write_database() :return: LCIImport object that can be directly registered in a `brightway2` project. :rtype: bw2io.importers.base_lci.LCIImporter
carculator/inventory.py
export_lci_to_bw
rena-nong/carculator
1
python
def export_lci_to_bw(self, presamples=True, ecoinvent_compatibility=True, ecoinvent_version='3.7', db_name='carculator db', forbidden_activities=None, create_vehicle_datasets=True): '\n Export the inventory as a `brightway2` bw2io.importers.base_lci.LCIImporter object\n with the inventory in the `data` attribute.\n\n .. code-block:: python\n\n # get the inventory\n i, _ = ic.export_lci_to_bw()\n\n # import it in a Brightway2 project\n i.match_database(\'ecoinvent 3.6 cutoff\', fields=(\'name\', \'unit\', \'location\', \'reference product\'))\n i.match_database("biosphere3", fields=(\'name\', \'unit\', \'categories\'))\n i.match_database(fields=(\'name\', \'unit\', \'location\', \'reference product\'))\n i.match_database(fields=(\'name\', \'unit\', \'categories\'))\n\n # Create an additional biosphere database for the few flows that do not\n # exist in "biosphere3"\n i.create_new_biosphere("additional_biosphere", relink=True)\n\n # Check if all exchanges link\n i.statistics()\n\n # Register the database\n i.write_database()\n\n :return: LCIImport object that can be directly registered in a `brightway2` project.\n :rtype: bw2io.importers.base_lci.LCIImporter\n ' self.inputs = self.get_dict_input() self.bs = BackgroundSystemModel() self.country = self.get_country_of_use() self.add_additional_activities() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() if create_vehicle_datasets: self.add_additional_activities_for_export() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix_for_export(self.array.values) else: self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix(self.array.values) if isinstance(self.fleet, xr.core.dataarray.DataArray): print('Building fleet average vehicles...') self.build_fleet_vehicles() self.rev_inputs = self.get_rev_dict_input() self.number_of_cars += (len(self.scope['year']) * len(self.scope['powertrain'])) if (not ecoinvent_compatibility): fuel_markets = [self.inputs[a] for a in self.inputs if ('fuel market for' in a[0])] electricity_inputs = [self.inputs[a] for a in self.inputs if ('electricity market for' in a[0])] self.A[np.ix_(range(self.A.shape[0]), electricity_inputs, fuel_markets)] = 0 if (presamples == True): (lci, array) = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci_to_bw(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return (lci, array) else: lci = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci_to_bw(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return lci
def export_lci_to_bw(self, presamples=True, ecoinvent_compatibility=True, ecoinvent_version='3.7', db_name='carculator db', forbidden_activities=None, create_vehicle_datasets=True): '\n Export the inventory as a `brightway2` bw2io.importers.base_lci.LCIImporter object\n with the inventory in the `data` attribute.\n\n .. code-block:: python\n\n # get the inventory\n i, _ = ic.export_lci_to_bw()\n\n # import it in a Brightway2 project\n i.match_database(\'ecoinvent 3.6 cutoff\', fields=(\'name\', \'unit\', \'location\', \'reference product\'))\n i.match_database("biosphere3", fields=(\'name\', \'unit\', \'categories\'))\n i.match_database(fields=(\'name\', \'unit\', \'location\', \'reference product\'))\n i.match_database(fields=(\'name\', \'unit\', \'categories\'))\n\n # Create an additional biosphere database for the few flows that do not\n # exist in "biosphere3"\n i.create_new_biosphere("additional_biosphere", relink=True)\n\n # Check if all exchanges link\n i.statistics()\n\n # Register the database\n i.write_database()\n\n :return: LCIImport object that can be directly registered in a `brightway2` project.\n :rtype: bw2io.importers.base_lci.LCIImporter\n ' self.inputs = self.get_dict_input() self.bs = BackgroundSystemModel() self.country = self.get_country_of_use() self.add_additional_activities() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() if create_vehicle_datasets: self.add_additional_activities_for_export() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix_for_export(self.array.values) else: self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix(self.array.values) if isinstance(self.fleet, xr.core.dataarray.DataArray): print('Building fleet average vehicles...') self.build_fleet_vehicles() self.rev_inputs = self.get_rev_dict_input() self.number_of_cars += (len(self.scope['year']) * len(self.scope['powertrain'])) if (not ecoinvent_compatibility): fuel_markets = [self.inputs[a] for a in self.inputs if ('fuel market for' in a[0])] electricity_inputs = [self.inputs[a] for a in self.inputs if ('electricity market for' in a[0])] self.A[np.ix_(range(self.A.shape[0]), electricity_inputs, fuel_markets)] = 0 if (presamples == True): (lci, array) = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci_to_bw(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return (lci, array) else: lci = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci_to_bw(presamples=presamples, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, forbidden_activities=forbidden_activities, vehicle_specs=self.specs) return lci<|docstring|>Export the inventory as a `brightway2` bw2io.importers.base_lci.LCIImporter object with the inventory in the `data` attribute. .. code-block:: python # get the inventory i, _ = ic.export_lci_to_bw() # import it in a Brightway2 project i.match_database('ecoinvent 3.6 cutoff', fields=('name', 'unit', 'location', 'reference product')) i.match_database("biosphere3", fields=('name', 'unit', 'categories')) i.match_database(fields=('name', 'unit', 'location', 'reference product')) i.match_database(fields=('name', 'unit', 'categories')) # Create an additional biosphere database for the few flows that do not # exist in "biosphere3" i.create_new_biosphere("additional_biosphere", relink=True) # Check if all exchanges link i.statistics() # Register the database i.write_database() :return: LCIImport object that can be directly registered in a `brightway2` project. :rtype: bw2io.importers.base_lci.LCIImporter<|endoftext|>
886d27c3ca7b316bf75dec46dc1667c6dc89f716996c038b10db74f52d5c65f3
def export_lci_to_excel(self, directory=None, ecoinvent_compatibility=True, ecoinvent_version='3.7', software_compatibility='brightway2', filename=None, forbidden_activities=None, create_vehicle_datasets=True, export_format='file'): '\n Export the inventory as an Excel file (if the destination software is Brightway2) or a CSV file (if the destination software is Simapro) file.\n Also return the file path where the file is stored.\n\n :param directory: directory where to save the file.\n :type directory: str\n :param ecoinvent_compatibility: If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.\n :param ecoinvent_version: "3.6", "3.5" or "uvek"\n :param software_compatibility: "brightway2" or "simapro"\n :return: file path where the file is stored.\n :rtype: str\n ' if (software_compatibility not in ('brightway2', 'simapro')): raise NameError("The destination software argument is not valid. Choose between 'brightway2' or 'simapro'.") if (software_compatibility == 'simapro'): if (ecoinvent_version == '3.7'): print('Simapro-compatible inventory export is only available for ecoinvent 3.5, 3.6 or UVEK.') return ecoinvent_compatibility = True self.inputs = self.get_dict_input() self.bs = BackgroundSystemModel() self.country = self.get_country_of_use() self.add_additional_activities() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() if create_vehicle_datasets: self.add_additional_activities_for_export() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() self.create_electricity_market_for_fuel_prep() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.create_electricity_market_for_battery_production() self.set_inputs_in_A_matrix_for_export(self.array.values) else: self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix(self.array.values) if isinstance(self.fleet, xr.core.dataarray.DataArray): print('Building fleet average vehicles...') self.build_fleet_vehicles() self.rev_inputs = self.get_rev_dict_input() self.number_of_cars += (len(self.scope['year']) * len(self.scope['powertrain'])) if (not ecoinvent_compatibility): fuel_markets = [self.inputs[a] for a in self.inputs if ('fuel market for' in a[0])] electricity_inputs = [self.inputs[a] for a in self.inputs if ('electricity market for' in a[0])] self.A[np.ix_(range(self.A.shape[0]), electricity_inputs, fuel_markets)] = 0 fp = ExportInventory(self.A, self.rev_inputs, db_name=(filename or 'carculator db')).write_lci_to_excel(directory=directory, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, software_compatibility=software_compatibility, filename=filename, forbidden_activities=forbidden_activities, export_format=export_format, vehicle_specs=self.specs) return fp
Export the inventory as an Excel file (if the destination software is Brightway2) or a CSV file (if the destination software is Simapro) file. Also return the file path where the file is stored. :param directory: directory where to save the file. :type directory: str :param ecoinvent_compatibility: If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent. :param ecoinvent_version: "3.6", "3.5" or "uvek" :param software_compatibility: "brightway2" or "simapro" :return: file path where the file is stored. :rtype: str
carculator/inventory.py
export_lci_to_excel
rena-nong/carculator
1
python
def export_lci_to_excel(self, directory=None, ecoinvent_compatibility=True, ecoinvent_version='3.7', software_compatibility='brightway2', filename=None, forbidden_activities=None, create_vehicle_datasets=True, export_format='file'): '\n Export the inventory as an Excel file (if the destination software is Brightway2) or a CSV file (if the destination software is Simapro) file.\n Also return the file path where the file is stored.\n\n :param directory: directory where to save the file.\n :type directory: str\n :param ecoinvent_compatibility: If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.\n :param ecoinvent_version: "3.6", "3.5" or "uvek"\n :param software_compatibility: "brightway2" or "simapro"\n :return: file path where the file is stored.\n :rtype: str\n ' if (software_compatibility not in ('brightway2', 'simapro')): raise NameError("The destination software argument is not valid. Choose between 'brightway2' or 'simapro'.") if (software_compatibility == 'simapro'): if (ecoinvent_version == '3.7'): print('Simapro-compatible inventory export is only available for ecoinvent 3.5, 3.6 or UVEK.') return ecoinvent_compatibility = True self.inputs = self.get_dict_input() self.bs = BackgroundSystemModel() self.country = self.get_country_of_use() self.add_additional_activities() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() if create_vehicle_datasets: self.add_additional_activities_for_export() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() self.create_electricity_market_for_fuel_prep() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.create_electricity_market_for_battery_production() self.set_inputs_in_A_matrix_for_export(self.array.values) else: self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix(self.array.values) if isinstance(self.fleet, xr.core.dataarray.DataArray): print('Building fleet average vehicles...') self.build_fleet_vehicles() self.rev_inputs = self.get_rev_dict_input() self.number_of_cars += (len(self.scope['year']) * len(self.scope['powertrain'])) if (not ecoinvent_compatibility): fuel_markets = [self.inputs[a] for a in self.inputs if ('fuel market for' in a[0])] electricity_inputs = [self.inputs[a] for a in self.inputs if ('electricity market for' in a[0])] self.A[np.ix_(range(self.A.shape[0]), electricity_inputs, fuel_markets)] = 0 fp = ExportInventory(self.A, self.rev_inputs, db_name=(filename or 'carculator db')).write_lci_to_excel(directory=directory, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, software_compatibility=software_compatibility, filename=filename, forbidden_activities=forbidden_activities, export_format=export_format, vehicle_specs=self.specs) return fp
def export_lci_to_excel(self, directory=None, ecoinvent_compatibility=True, ecoinvent_version='3.7', software_compatibility='brightway2', filename=None, forbidden_activities=None, create_vehicle_datasets=True, export_format='file'): '\n Export the inventory as an Excel file (if the destination software is Brightway2) or a CSV file (if the destination software is Simapro) file.\n Also return the file path where the file is stored.\n\n :param directory: directory where to save the file.\n :type directory: str\n :param ecoinvent_compatibility: If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.\n :param ecoinvent_version: "3.6", "3.5" or "uvek"\n :param software_compatibility: "brightway2" or "simapro"\n :return: file path where the file is stored.\n :rtype: str\n ' if (software_compatibility not in ('brightway2', 'simapro')): raise NameError("The destination software argument is not valid. Choose between 'brightway2' or 'simapro'.") if (software_compatibility == 'simapro'): if (ecoinvent_version == '3.7'): print('Simapro-compatible inventory export is only available for ecoinvent 3.5, 3.6 or UVEK.') return ecoinvent_compatibility = True self.inputs = self.get_dict_input() self.bs = BackgroundSystemModel() self.country = self.get_country_of_use() self.add_additional_activities() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() if create_vehicle_datasets: self.add_additional_activities_for_export() self.rev_inputs = self.get_rev_dict_input() self.A = self.get_A_matrix() self.create_electricity_market_for_fuel_prep() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.create_electricity_market_for_battery_production() self.set_inputs_in_A_matrix_for_export(self.array.values) else: self.create_electricity_market_for_fuel_prep() self.create_electricity_market_for_battery_production() self.fuel_blends = {} self.fuel_dictionary = self.create_fuel_dictionary() self.define_fuel_blends() self.set_actual_range() self.set_inputs_in_A_matrix(self.array.values) if isinstance(self.fleet, xr.core.dataarray.DataArray): print('Building fleet average vehicles...') self.build_fleet_vehicles() self.rev_inputs = self.get_rev_dict_input() self.number_of_cars += (len(self.scope['year']) * len(self.scope['powertrain'])) if (not ecoinvent_compatibility): fuel_markets = [self.inputs[a] for a in self.inputs if ('fuel market for' in a[0])] electricity_inputs = [self.inputs[a] for a in self.inputs if ('electricity market for' in a[0])] self.A[np.ix_(range(self.A.shape[0]), electricity_inputs, fuel_markets)] = 0 fp = ExportInventory(self.A, self.rev_inputs, db_name=(filename or 'carculator db')).write_lci_to_excel(directory=directory, ecoinvent_compatibility=ecoinvent_compatibility, ecoinvent_version=ecoinvent_version, software_compatibility=software_compatibility, filename=filename, forbidden_activities=forbidden_activities, export_format=export_format, vehicle_specs=self.specs) return fp<|docstring|>Export the inventory as an Excel file (if the destination software is Brightway2) or a CSV file (if the destination software is Simapro) file. Also return the file path where the file is stored. :param directory: directory where to save the file. :type directory: str :param ecoinvent_compatibility: If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent. :param ecoinvent_version: "3.6", "3.5" or "uvek" :param software_compatibility: "brightway2" or "simapro" :return: file path where the file is stored. :rtype: str<|endoftext|>
10ecc9b143fc48af8ff7ad6d02e3916d8413a71fc9ce904479de888172ad1c65
def define_electricity_mix_for_fuel_prep(self): '\n This function defines a fuel mix based either on user-defined mix, or on default mixes for a given country.\n The mix is calculated as the average mix, weighted by the distribution of annually driven kilometers.\n :return:\n ' try: losses_to_low = float(self.bs.losses[self.country]['LV']) except KeyError: losses_to_low = float(self.bs.losses['RER']['LV']) if ('custom electricity mix' in self.background_configuration): mix = self.background_configuration['custom electricity mix'] if (np.shape(mix)[0] != len(self.scope['year'])): raise ValueError('The number of electricity mixes ({}) must match with the number of years ({}).'.format(np.shape(mix)[0], len(self.scope['year']))) if (not np.allclose(np.sum(mix, 1), np.ones(len(self.scope['year'])))): print('The sum of the electricity mix share does not equal to 1 for each year.') else: use_year = (self.array.values[self.array_inputs['lifetime kilometers']] / self.array.values[self.array_inputs['kilometers per year']]).reshape(self.iterations, len(self.scope['powertrain']), len(self.scope['size']), len(self.scope['year'])).mean(axis=(0, 1, 2)) if (self.country not in self.bs.electricity_mix.country.values): print('The electricity mix for {} could not be found. Average European electricity mix is used instead.'.format(self.country)) country = 'RER' else: country = self.country mix = [(self.bs.electricity_mix.sel(country=country, variable=['Hydro', 'Nuclear', 'Gas', 'Solar', 'Wind', 'Biomass', 'Coal', 'Oil', 'Geothermal', 'Waste', 'Biogas CCS', 'Biomass CCS', 'Coal CCS', 'Gas CCS', 'Wood CCS']).interp(year=np.arange(year, (year + use_year[y])), kwargs={'fill_value': 'extrapolate'}).mean(axis=0).values if ((y + use_year[y]) <= 2050) else self.bs.electricity_mix.sel(country=country, variable=['Hydro', 'Nuclear', 'Gas', 'Solar', 'Wind', 'Biomass', 'Coal', 'Oil', 'Geothermal', 'Waste', 'Biogas CCS', 'Biomass CCS', 'Coal CCS', 'Gas CCS', 'Wood CCS']).interp(year=np.arange(year, 2051), kwargs={'fill_value': 'extrapolate'}).mean(axis=0).values) for (y, year) in enumerate(self.scope['year'])] return (np.clip(mix, 0, 1) / np.clip(mix, 0, 1).sum(axis=1)[(:, None)])
This function defines a fuel mix based either on user-defined mix, or on default mixes for a given country. The mix is calculated as the average mix, weighted by the distribution of annually driven kilometers. :return:
carculator/inventory.py
define_electricity_mix_for_fuel_prep
rena-nong/carculator
1
python
def define_electricity_mix_for_fuel_prep(self): '\n This function defines a fuel mix based either on user-defined mix, or on default mixes for a given country.\n The mix is calculated as the average mix, weighted by the distribution of annually driven kilometers.\n :return:\n ' try: losses_to_low = float(self.bs.losses[self.country]['LV']) except KeyError: losses_to_low = float(self.bs.losses['RER']['LV']) if ('custom electricity mix' in self.background_configuration): mix = self.background_configuration['custom electricity mix'] if (np.shape(mix)[0] != len(self.scope['year'])): raise ValueError('The number of electricity mixes ({}) must match with the number of years ({}).'.format(np.shape(mix)[0], len(self.scope['year']))) if (not np.allclose(np.sum(mix, 1), np.ones(len(self.scope['year'])))): print('The sum of the electricity mix share does not equal to 1 for each year.') else: use_year = (self.array.values[self.array_inputs['lifetime kilometers']] / self.array.values[self.array_inputs['kilometers per year']]).reshape(self.iterations, len(self.scope['powertrain']), len(self.scope['size']), len(self.scope['year'])).mean(axis=(0, 1, 2)) if (self.country not in self.bs.electricity_mix.country.values): print('The electricity mix for {} could not be found. Average European electricity mix is used instead.'.format(self.country)) country = 'RER' else: country = self.country mix = [(self.bs.electricity_mix.sel(country=country, variable=['Hydro', 'Nuclear', 'Gas', 'Solar', 'Wind', 'Biomass', 'Coal', 'Oil', 'Geothermal', 'Waste', 'Biogas CCS', 'Biomass CCS', 'Coal CCS', 'Gas CCS', 'Wood CCS']).interp(year=np.arange(year, (year + use_year[y])), kwargs={'fill_value': 'extrapolate'}).mean(axis=0).values if ((y + use_year[y]) <= 2050) else self.bs.electricity_mix.sel(country=country, variable=['Hydro', 'Nuclear', 'Gas', 'Solar', 'Wind', 'Biomass', 'Coal', 'Oil', 'Geothermal', 'Waste', 'Biogas CCS', 'Biomass CCS', 'Coal CCS', 'Gas CCS', 'Wood CCS']).interp(year=np.arange(year, 2051), kwargs={'fill_value': 'extrapolate'}).mean(axis=0).values) for (y, year) in enumerate(self.scope['year'])] return (np.clip(mix, 0, 1) / np.clip(mix, 0, 1).sum(axis=1)[(:, None)])
def define_electricity_mix_for_fuel_prep(self): '\n This function defines a fuel mix based either on user-defined mix, or on default mixes for a given country.\n The mix is calculated as the average mix, weighted by the distribution of annually driven kilometers.\n :return:\n ' try: losses_to_low = float(self.bs.losses[self.country]['LV']) except KeyError: losses_to_low = float(self.bs.losses['RER']['LV']) if ('custom electricity mix' in self.background_configuration): mix = self.background_configuration['custom electricity mix'] if (np.shape(mix)[0] != len(self.scope['year'])): raise ValueError('The number of electricity mixes ({}) must match with the number of years ({}).'.format(np.shape(mix)[0], len(self.scope['year']))) if (not np.allclose(np.sum(mix, 1), np.ones(len(self.scope['year'])))): print('The sum of the electricity mix share does not equal to 1 for each year.') else: use_year = (self.array.values[self.array_inputs['lifetime kilometers']] / self.array.values[self.array_inputs['kilometers per year']]).reshape(self.iterations, len(self.scope['powertrain']), len(self.scope['size']), len(self.scope['year'])).mean(axis=(0, 1, 2)) if (self.country not in self.bs.electricity_mix.country.values): print('The electricity mix for {} could not be found. Average European electricity mix is used instead.'.format(self.country)) country = 'RER' else: country = self.country mix = [(self.bs.electricity_mix.sel(country=country, variable=['Hydro', 'Nuclear', 'Gas', 'Solar', 'Wind', 'Biomass', 'Coal', 'Oil', 'Geothermal', 'Waste', 'Biogas CCS', 'Biomass CCS', 'Coal CCS', 'Gas CCS', 'Wood CCS']).interp(year=np.arange(year, (year + use_year[y])), kwargs={'fill_value': 'extrapolate'}).mean(axis=0).values if ((y + use_year[y]) <= 2050) else self.bs.electricity_mix.sel(country=country, variable=['Hydro', 'Nuclear', 'Gas', 'Solar', 'Wind', 'Biomass', 'Coal', 'Oil', 'Geothermal', 'Waste', 'Biogas CCS', 'Biomass CCS', 'Coal CCS', 'Gas CCS', 'Wood CCS']).interp(year=np.arange(year, 2051), kwargs={'fill_value': 'extrapolate'}).mean(axis=0).values) for (y, year) in enumerate(self.scope['year'])] return (np.clip(mix, 0, 1) / np.clip(mix, 0, 1).sum(axis=1)[(:, None)])<|docstring|>This function defines a fuel mix based either on user-defined mix, or on default mixes for a given country. The mix is calculated as the average mix, weighted by the distribution of annually driven kilometers. :return:<|endoftext|>