nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
OWASP/ZSC
5bb9fed69efdc17996be4856b54af632aaed87b0
module/readline_windows/pyreadline/modes/notemacs.py
python
NotEmacsMode.previous_history
(self, e)
Move back through the history list, fetching the previous command.
Move back through the history list, fetching the previous command.
[ "Move", "back", "through", "the", "history", "list", "fetching", "the", "previous", "command", "." ]
def previous_history(self, e): # (C-p) '''Move back through the history list, fetching the previous command. ''' self._history.previous_history(self.l_buffer)
[ "def", "previous_history", "(", "self", ",", "e", ")", ":", "# (C-p)", "self", ".", "_history", ".", "previous_history", "(", "self", ".", "l_buffer", ")" ]
https://github.com/OWASP/ZSC/blob/5bb9fed69efdc17996be4856b54af632aaed87b0/module/readline_windows/pyreadline/modes/notemacs.py#L142-L144
PlasmaPy/PlasmaPy
78d63e341216475ce3318e1409296480407c9019
plasmapy/formulary/braginskii.py
python
_nondim_resistivity
(hall, Z, particle, model, field_orientation)
return alpha_hat
Calculate dimensionless classical resistivity coefficients. This function is a switchboard / wrapper that calls the appropriate model-specific functions depending on which model is specified.
Calculate dimensionless classical resistivity coefficients.
[ "Calculate", "dimensionless", "classical", "resistivity", "coefficients", "." ]
def _nondim_resistivity(hall, Z, particle, model, field_orientation): """ Calculate dimensionless classical resistivity coefficients. This function is a switchboard / wrapper that calls the appropriate model-specific functions depending on which model is specified. """ if model == "spitzer-harm" or model == "spitzer": alpha_hat = _nondim_resist_spitzer(Z, field_orientation) elif model == "braginskii": alpha_hat = _nondim_resist_braginskii(hall, Z, field_orientation) elif model == "ji-held": alpha_hat = _nondim_resist_ji_held(hall, Z, field_orientation) else: raise ValueError(f"Unrecognized model '{model}' in _nondim_resistivity") return alpha_hat
[ "def", "_nondim_resistivity", "(", "hall", ",", "Z", ",", "particle", ",", "model", ",", "field_orientation", ")", ":", "if", "model", "==", "\"spitzer-harm\"", "or", "model", "==", "\"spitzer\"", ":", "alpha_hat", "=", "_nondim_resist_spitzer", "(", "Z", ",", "field_orientation", ")", "elif", "model", "==", "\"braginskii\"", ":", "alpha_hat", "=", "_nondim_resist_braginskii", "(", "hall", ",", "Z", ",", "field_orientation", ")", "elif", "model", "==", "\"ji-held\"", ":", "alpha_hat", "=", "_nondim_resist_ji_held", "(", "hall", ",", "Z", ",", "field_orientation", ")", "else", ":", "raise", "ValueError", "(", "f\"Unrecognized model '{model}' in _nondim_resistivity\"", ")", "return", "alpha_hat" ]
https://github.com/PlasmaPy/PlasmaPy/blob/78d63e341216475ce3318e1409296480407c9019/plasmapy/formulary/braginskii.py#L1199-L1214
yt-project/yt
dc7b24f9b266703db4c843e329c6c8644d47b824
yt/visualization/volume_rendering/old_camera.py
python
StereoSphericalCamera._render
(self, double_check, num_threads, image, sampler, msg)
return image
[]
def _render(self, double_check, num_threads, image, sampler, msg): ncells = sum(b.source_mask.size for b in self.volume.bricks) pbar = get_pbar("Ray casting " + msg, ncells) total_cells = 0 if double_check: for brick in self.volume.bricks: for data in brick.my_data: if np.any(np.isnan(data)): raise RuntimeError for brick in self.volume.traverse(self.front_center): sampler(brick, num_threads=num_threads) total_cells += brick.source_mask.size pbar.update(total_cells) pbar.finish() image = sampler.aimage.copy() image.shape = self.resolution[0], self.resolution[1], 4 if not self.transfer_function.grey_opacity: image[:, :, 3] = 1.0 image = image[1:-1, 1:-1, :] return image
[ "def", "_render", "(", "self", ",", "double_check", ",", "num_threads", ",", "image", ",", "sampler", ",", "msg", ")", ":", "ncells", "=", "sum", "(", "b", ".", "source_mask", ".", "size", "for", "b", "in", "self", ".", "volume", ".", "bricks", ")", "pbar", "=", "get_pbar", "(", "\"Ray casting \"", "+", "msg", ",", "ncells", ")", "total_cells", "=", "0", "if", "double_check", ":", "for", "brick", "in", "self", ".", "volume", ".", "bricks", ":", "for", "data", "in", "brick", ".", "my_data", ":", "if", "np", ".", "any", "(", "np", ".", "isnan", "(", "data", ")", ")", ":", "raise", "RuntimeError", "for", "brick", "in", "self", ".", "volume", ".", "traverse", "(", "self", ".", "front_center", ")", ":", "sampler", "(", "brick", ",", "num_threads", "=", "num_threads", ")", "total_cells", "+=", "brick", ".", "source_mask", ".", "size", "pbar", ".", "update", "(", "total_cells", ")", "pbar", ".", "finish", "(", ")", "image", "=", "sampler", ".", "aimage", ".", "copy", "(", ")", "image", ".", "shape", "=", "self", ".", "resolution", "[", "0", "]", ",", "self", ".", "resolution", "[", "1", "]", ",", "4", "if", "not", "self", ".", "transfer_function", ".", "grey_opacity", ":", "image", "[", ":", ",", ":", ",", "3", "]", "=", "1.0", "image", "=", "image", "[", "1", ":", "-", "1", ",", "1", ":", "-", "1", ",", ":", "]", "return", "image" ]
https://github.com/yt-project/yt/blob/dc7b24f9b266703db4c843e329c6c8644d47b824/yt/visualization/volume_rendering/old_camera.py#L2429-L2451
lfz/Guided-Denoise
8881ab768d16eaf87342da4ff7dc8271e183e205
Attackset/fgsm_v3_resv2_inresv2_random/nets/inception_v4.py
python
inception_v4_base
(inputs, final_endpoint='Mixed_7d', scope=None)
Creates the Inception V4 network up to the given final endpoint. Args: inputs: a 4-D tensor of size [batch_size, height, width, 3]. final_endpoint: specifies the endpoint to construct the network up to. It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c', 'Mixed_7d'] scope: Optional variable_scope. Returns: logits: the logits outputs of the model. end_points: the set of end_points from the inception model. Raises: ValueError: if final_endpoint is not set to one of the predefined values,
Creates the Inception V4 network up to the given final endpoint.
[ "Creates", "the", "Inception", "V4", "network", "up", "to", "the", "given", "final", "endpoint", "." ]
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None): """Creates the Inception V4 network up to the given final endpoint. Args: inputs: a 4-D tensor of size [batch_size, height, width, 3]. final_endpoint: specifies the endpoint to construct the network up to. It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c', 'Mixed_7d'] scope: Optional variable_scope. Returns: logits: the logits outputs of the model. end_points: the set of end_points from the inception model. Raises: ValueError: if final_endpoint is not set to one of the predefined values, """ end_points = {} def add_and_check_final(name, net): end_points[name] = net return name == final_endpoint with tf.variable_scope(scope, 'InceptionV4', [inputs]): with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'): # 299 x 299 x 3 net = slim.conv2d(inputs, 32, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points # 149 x 149 x 32 net = slim.conv2d(net, 32, [3, 3], padding='VALID', scope='Conv2d_2a_3x3') if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points # 147 x 147 x 32 net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3') if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points # 147 x 147 x 64 with tf.variable_scope('Mixed_3a'): with tf.variable_scope('Branch_0'): branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_0a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID', scope='Conv2d_0a_3x3') net = tf.concat(axis=3, values=[branch_0, branch_1]) if add_and_check_final('Mixed_3a', net): return net, end_points # 73 x 73 x 160 with tf.variable_scope('Mixed_4a'): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7') branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1') branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID', scope='Conv2d_1a_3x3') net = tf.concat(axis=3, values=[branch_0, branch_1]) if add_and_check_final('Mixed_4a', net): return net, end_points # 71 x 71 x 192 with tf.variable_scope('Mixed_5a'): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') net = tf.concat(axis=3, values=[branch_0, branch_1]) if add_and_check_final('Mixed_5a', net): return net, end_points # 35 x 35 x 384 # 4 x Inception-A blocks for idx in range(4): block_scope = 'Mixed_5' + chr(ord('b') + idx) net = block_inception_a(net, block_scope) if add_and_check_final(block_scope, net): return net, end_points # 35 x 35 x 384 # Reduction-A block net = block_reduction_a(net, 'Mixed_6a') if add_and_check_final('Mixed_6a', net): return net, end_points # 17 x 17 x 1024 # 7 x Inception-B blocks for idx in range(7): block_scope = 'Mixed_6' + chr(ord('b') + idx) net = block_inception_b(net, block_scope) if add_and_check_final(block_scope, net): return net, end_points # 17 x 17 x 1024 # Reduction-B block net = block_reduction_b(net, 'Mixed_7a') if add_and_check_final('Mixed_7a', net): return net, end_points # 8 x 8 x 1536 # 3 x Inception-C blocks for idx in range(3): block_scope = 'Mixed_7' + chr(ord('b') + idx) net = block_inception_c(net, block_scope) if add_and_check_final(block_scope, net): return net, end_points raise ValueError('Unknown final endpoint %s' % final_endpoint)
[ "def", "inception_v4_base", "(", "inputs", ",", "final_endpoint", "=", "'Mixed_7d'", ",", "scope", "=", "None", ")", ":", "end_points", "=", "{", "}", "def", "add_and_check_final", "(", "name", ",", "net", ")", ":", "end_points", "[", "name", "]", "=", "net", "return", "name", "==", "final_endpoint", "with", "tf", ".", "variable_scope", "(", "scope", ",", "'InceptionV4'", ",", "[", "inputs", "]", ")", ":", "with", "slim", ".", "arg_scope", "(", "[", "slim", ".", "conv2d", ",", "slim", ".", "max_pool2d", ",", "slim", ".", "avg_pool2d", "]", ",", "stride", "=", "1", ",", "padding", "=", "'SAME'", ")", ":", "# 299 x 299 x 3", "net", "=", "slim", ".", "conv2d", "(", "inputs", ",", "32", ",", "[", "3", ",", "3", "]", ",", "stride", "=", "2", ",", "padding", "=", "'VALID'", ",", "scope", "=", "'Conv2d_1a_3x3'", ")", "if", "add_and_check_final", "(", "'Conv2d_1a_3x3'", ",", "net", ")", ":", "return", "net", ",", "end_points", "# 149 x 149 x 32", "net", "=", "slim", ".", "conv2d", "(", "net", ",", "32", ",", "[", "3", ",", "3", "]", ",", "padding", "=", "'VALID'", ",", "scope", "=", "'Conv2d_2a_3x3'", ")", "if", "add_and_check_final", "(", "'Conv2d_2a_3x3'", ",", "net", ")", ":", "return", "net", ",", "end_points", "# 147 x 147 x 32", "net", "=", "slim", ".", "conv2d", "(", "net", ",", "64", ",", "[", "3", ",", "3", "]", ",", "scope", "=", "'Conv2d_2b_3x3'", ")", "if", "add_and_check_final", "(", "'Conv2d_2b_3x3'", ",", "net", ")", ":", "return", "net", ",", "end_points", "# 147 x 147 x 64", "with", "tf", ".", "variable_scope", "(", "'Mixed_3a'", ")", ":", "with", "tf", ".", "variable_scope", "(", "'Branch_0'", ")", ":", "branch_0", "=", "slim", ".", "max_pool2d", "(", "net", ",", "[", "3", ",", "3", "]", ",", "stride", "=", "2", ",", "padding", "=", "'VALID'", ",", "scope", "=", "'MaxPool_0a_3x3'", ")", "with", "tf", ".", "variable_scope", "(", "'Branch_1'", ")", ":", "branch_1", "=", "slim", ".", "conv2d", "(", "net", ",", "96", ",", "[", "3", ",", "3", "]", ",", "stride", "=", "2", ",", "padding", "=", "'VALID'", ",", "scope", "=", "'Conv2d_0a_3x3'", ")", "net", "=", "tf", ".", "concat", "(", "axis", "=", "3", ",", "values", "=", "[", "branch_0", ",", "branch_1", "]", ")", "if", "add_and_check_final", "(", "'Mixed_3a'", ",", "net", ")", ":", "return", "net", ",", "end_points", "# 73 x 73 x 160", "with", "tf", ".", "variable_scope", "(", "'Mixed_4a'", ")", ":", "with", "tf", ".", "variable_scope", "(", "'Branch_0'", ")", ":", "branch_0", "=", "slim", ".", "conv2d", "(", "net", ",", "64", ",", "[", "1", ",", "1", "]", ",", "scope", "=", "'Conv2d_0a_1x1'", ")", "branch_0", "=", "slim", ".", "conv2d", "(", "branch_0", ",", "96", ",", "[", "3", ",", "3", "]", ",", "padding", "=", "'VALID'", ",", "scope", "=", "'Conv2d_1a_3x3'", ")", "with", "tf", ".", "variable_scope", "(", "'Branch_1'", ")", ":", "branch_1", "=", "slim", ".", "conv2d", "(", "net", ",", "64", ",", "[", "1", ",", "1", "]", ",", "scope", "=", "'Conv2d_0a_1x1'", ")", "branch_1", "=", "slim", ".", "conv2d", "(", "branch_1", ",", "64", ",", "[", "1", ",", "7", "]", ",", "scope", "=", "'Conv2d_0b_1x7'", ")", "branch_1", "=", "slim", ".", "conv2d", "(", "branch_1", ",", "64", ",", "[", "7", ",", "1", "]", ",", "scope", "=", "'Conv2d_0c_7x1'", ")", "branch_1", "=", "slim", ".", "conv2d", "(", "branch_1", ",", "96", ",", "[", "3", ",", "3", "]", ",", "padding", "=", "'VALID'", ",", "scope", "=", "'Conv2d_1a_3x3'", ")", "net", "=", "tf", ".", "concat", "(", "axis", "=", "3", ",", "values", "=", "[", "branch_0", ",", "branch_1", "]", ")", "if", "add_and_check_final", "(", "'Mixed_4a'", ",", "net", ")", ":", "return", "net", ",", "end_points", "# 71 x 71 x 192", "with", "tf", ".", "variable_scope", "(", "'Mixed_5a'", ")", ":", "with", "tf", ".", "variable_scope", "(", "'Branch_0'", ")", ":", "branch_0", "=", "slim", ".", "conv2d", "(", "net", ",", "192", ",", "[", "3", ",", "3", "]", ",", "stride", "=", "2", ",", "padding", "=", "'VALID'", ",", "scope", "=", "'Conv2d_1a_3x3'", ")", "with", "tf", ".", "variable_scope", "(", "'Branch_1'", ")", ":", "branch_1", "=", "slim", ".", "max_pool2d", "(", "net", ",", "[", "3", ",", "3", "]", ",", "stride", "=", "2", ",", "padding", "=", "'VALID'", ",", "scope", "=", "'MaxPool_1a_3x3'", ")", "net", "=", "tf", ".", "concat", "(", "axis", "=", "3", ",", "values", "=", "[", "branch_0", ",", "branch_1", "]", ")", "if", "add_and_check_final", "(", "'Mixed_5a'", ",", "net", ")", ":", "return", "net", ",", "end_points", "# 35 x 35 x 384", "# 4 x Inception-A blocks", "for", "idx", "in", "range", "(", "4", ")", ":", "block_scope", "=", "'Mixed_5'", "+", "chr", "(", "ord", "(", "'b'", ")", "+", "idx", ")", "net", "=", "block_inception_a", "(", "net", ",", "block_scope", ")", "if", "add_and_check_final", "(", "block_scope", ",", "net", ")", ":", "return", "net", ",", "end_points", "# 35 x 35 x 384", "# Reduction-A block", "net", "=", "block_reduction_a", "(", "net", ",", "'Mixed_6a'", ")", "if", "add_and_check_final", "(", "'Mixed_6a'", ",", "net", ")", ":", "return", "net", ",", "end_points", "# 17 x 17 x 1024", "# 7 x Inception-B blocks", "for", "idx", "in", "range", "(", "7", ")", ":", "block_scope", "=", "'Mixed_6'", "+", "chr", "(", "ord", "(", "'b'", ")", "+", "idx", ")", "net", "=", "block_inception_b", "(", "net", ",", "block_scope", ")", "if", "add_and_check_final", "(", "block_scope", ",", "net", ")", ":", "return", "net", ",", "end_points", "# 17 x 17 x 1024", "# Reduction-B block", "net", "=", "block_reduction_b", "(", "net", ",", "'Mixed_7a'", ")", "if", "add_and_check_final", "(", "'Mixed_7a'", ",", "net", ")", ":", "return", "net", ",", "end_points", "# 8 x 8 x 1536", "# 3 x Inception-C blocks", "for", "idx", "in", "range", "(", "3", ")", ":", "block_scope", "=", "'Mixed_7'", "+", "chr", "(", "ord", "(", "'b'", ")", "+", "idx", ")", "net", "=", "block_inception_c", "(", "net", ",", "block_scope", ")", "if", "add_and_check_final", "(", "block_scope", ",", "net", ")", ":", "return", "net", ",", "end_points", "raise", "ValueError", "(", "'Unknown final endpoint %s'", "%", "final_endpoint", ")" ]
https://github.com/lfz/Guided-Denoise/blob/8881ab768d16eaf87342da4ff7dc8271e183e205/Attackset/fgsm_v3_resv2_inresv2_random/nets/inception_v4.py#L147-L254
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/rings/valuation/inductive_valuation.py
python
NonFinalInductiveValuation._test_is_equivalence_irreducible
(self, **options)
r""" Test the correctness of :meth:`is_equivalence_irreducible`. EXAMPLES:: sage: R.<x> = QQ[] sage: v = GaussValuation(R, valuations.TrivialValuation(QQ)) sage: v._test_is_equivalence_irreducible()
r""" Test the correctness of :meth:`is_equivalence_irreducible`.
[ "r", "Test", "the", "correctness", "of", ":", "meth", ":", "is_equivalence_irreducible", "." ]
def _test_is_equivalence_irreducible(self, **options): r""" Test the correctness of :meth:`is_equivalence_irreducible`. EXAMPLES:: sage: R.<x> = QQ[] sage: v = GaussValuation(R, valuations.TrivialValuation(QQ)) sage: v._test_is_equivalence_irreducible() """ tester = self._tester(**options) S = tester.some_elements(self.domain().some_elements()) for f in S: if f.is_constant(): continue is_equivalence_irreducible = self.is_equivalence_irreducible(f) F = self.equivalence_decomposition(f) tester.assertEqual(is_equivalence_irreducible, len(F)==0 or (len(F)==1 and F[0][1]==1)) if self.is_equivalence_unit(f): tester.assertTrue(f.is_constant() or self.is_equivalence_irreducible(f)) tester.assertTrue(self.is_equivalence_irreducible(self.phi())) tester.assertTrue(self.is_equivalence_irreducible(-self.phi())) tester.assertFalse(self.is_equivalence_irreducible(self.phi() ** 2))
[ "def", "_test_is_equivalence_irreducible", "(", "self", ",", "*", "*", "options", ")", ":", "tester", "=", "self", ".", "_tester", "(", "*", "*", "options", ")", "S", "=", "tester", ".", "some_elements", "(", "self", ".", "domain", "(", ")", ".", "some_elements", "(", ")", ")", "for", "f", "in", "S", ":", "if", "f", ".", "is_constant", "(", ")", ":", "continue", "is_equivalence_irreducible", "=", "self", ".", "is_equivalence_irreducible", "(", "f", ")", "F", "=", "self", ".", "equivalence_decomposition", "(", "f", ")", "tester", ".", "assertEqual", "(", "is_equivalence_irreducible", ",", "len", "(", "F", ")", "==", "0", "or", "(", "len", "(", "F", ")", "==", "1", "and", "F", "[", "0", "]", "[", "1", "]", "==", "1", ")", ")", "if", "self", ".", "is_equivalence_unit", "(", "f", ")", ":", "tester", ".", "assertTrue", "(", "f", ".", "is_constant", "(", ")", "or", "self", ".", "is_equivalence_irreducible", "(", "f", ")", ")", "tester", ".", "assertTrue", "(", "self", ".", "is_equivalence_irreducible", "(", "self", ".", "phi", "(", ")", ")", ")", "tester", ".", "assertTrue", "(", "self", ".", "is_equivalence_irreducible", "(", "-", "self", ".", "phi", "(", ")", ")", ")", "tester", ".", "assertFalse", "(", "self", ".", "is_equivalence_irreducible", "(", "self", ".", "phi", "(", ")", "**", "2", ")", ")" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/rings/valuation/inductive_valuation.py#L1583-L1607
biopython/biopython
2dd97e71762af7b046d7f7f8a4f1e38db6b06c86
Bio/Application/__init__.py
python
_Switch.__str__
(self)
Return the value of this option for the commandline. Includes a trailing space.
Return the value of this option for the commandline.
[ "Return", "the", "value", "of", "this", "option", "for", "the", "commandline", "." ]
def __str__(self): """Return the value of this option for the commandline. Includes a trailing space. """ assert not hasattr(self, "value") if self.is_set: return f"{self.names[0]} " else: return ""
[ "def", "__str__", "(", "self", ")", ":", "assert", "not", "hasattr", "(", "self", ",", "\"value\"", ")", "if", "self", ".", "is_set", ":", "return", "f\"{self.names[0]} \"", "else", ":", "return", "\"\"" ]
https://github.com/biopython/biopython/blob/2dd97e71762af7b046d7f7f8a4f1e38db6b06c86/Bio/Application/__init__.py#L699-L708
Calysto/calysto_scheme
15bf81987870bcae1264e5a0a06feb9a8ee12b8b
calysto_scheme/scheme.py
python
b_handler2_4_d
(assertions, right, test_name, verbose, wrong, env, handler, k)
[]
def b_handler2_4_d(assertions, right, test_name, verbose, wrong, env, handler, k): msg = get_exception_message(exception_reg) where = get_exception_info(exception_reg) assert_exp = (assertions).car proc_exp = aunparse((cdr_hat(assert_exp)).car) test_aexp = (cdr_hat(assert_exp)).cdr.car test_exp = aunparse(test_aexp) result_exp = (cdr_hat(assert_exp)).cdr.cdr.car traceback = get_traceback_string(List(symbol_exception, exception_reg)) if (False if ((GreaterThan(string_length(msg), 0)) is False) else True): if (False if (((where) is (symbol_none)) is False) else True): printf(" Error: ~a \"~a\"\n", test_name, msg) else: printf(" Error: ~a \"~a\" at ~a\n", test_name, msg, where) else: if (False if (((where) is (symbol_none)) is False) else True): printf(" Error: ~a\n", test_name) else: printf(" Error: ~a at ~a\n", test_name, where) initialize_stack_trace_b() GLOBALS['k_reg'] = make_cont2(b_cont2_89_d, assertions, msg, proc_exp, right, test_aexp, test_exp, test_name, traceback, verbose, wrong, env, handler, k) GLOBALS['handler_reg'] = handler GLOBALS['env_reg'] = env GLOBALS['exp_reg'] = result_exp GLOBALS['pc'] = m
[ "def", "b_handler2_4_d", "(", "assertions", ",", "right", ",", "test_name", ",", "verbose", ",", "wrong", ",", "env", ",", "handler", ",", "k", ")", ":", "msg", "=", "get_exception_message", "(", "exception_reg", ")", "where", "=", "get_exception_info", "(", "exception_reg", ")", "assert_exp", "=", "(", "assertions", ")", ".", "car", "proc_exp", "=", "aunparse", "(", "(", "cdr_hat", "(", "assert_exp", ")", ")", ".", "car", ")", "test_aexp", "=", "(", "cdr_hat", "(", "assert_exp", ")", ")", ".", "cdr", ".", "car", "test_exp", "=", "aunparse", "(", "test_aexp", ")", "result_exp", "=", "(", "cdr_hat", "(", "assert_exp", ")", ")", ".", "cdr", ".", "cdr", ".", "car", "traceback", "=", "get_traceback_string", "(", "List", "(", "symbol_exception", ",", "exception_reg", ")", ")", "if", "(", "False", "if", "(", "(", "GreaterThan", "(", "string_length", "(", "msg", ")", ",", "0", ")", ")", "is", "False", ")", "else", "True", ")", ":", "if", "(", "False", "if", "(", "(", "(", "where", ")", "is", "(", "symbol_none", ")", ")", "is", "False", ")", "else", "True", ")", ":", "printf", "(", "\" Error: ~a \\\"~a\\\"\\n\"", ",", "test_name", ",", "msg", ")", "else", ":", "printf", "(", "\" Error: ~a \\\"~a\\\" at ~a\\n\"", ",", "test_name", ",", "msg", ",", "where", ")", "else", ":", "if", "(", "False", "if", "(", "(", "(", "where", ")", "is", "(", "symbol_none", ")", ")", "is", "False", ")", "else", "True", ")", ":", "printf", "(", "\" Error: ~a\\n\"", ",", "test_name", ")", "else", ":", "printf", "(", "\" Error: ~a at ~a\\n\"", ",", "test_name", ",", "where", ")", "initialize_stack_trace_b", "(", ")", "GLOBALS", "[", "'k_reg'", "]", "=", "make_cont2", "(", "b_cont2_89_d", ",", "assertions", ",", "msg", ",", "proc_exp", ",", "right", ",", "test_aexp", ",", "test_exp", ",", "test_name", ",", "traceback", ",", "verbose", ",", "wrong", ",", "env", ",", "handler", ",", "k", ")", "GLOBALS", "[", "'handler_reg'", "]", "=", "handler", "GLOBALS", "[", "'env_reg'", "]", "=", "env", "GLOBALS", "[", "'exp_reg'", "]", "=", "result_exp", "GLOBALS", "[", "'pc'", "]", "=", "m" ]
https://github.com/Calysto/calysto_scheme/blob/15bf81987870bcae1264e5a0a06feb9a8ee12b8b/calysto_scheme/scheme.py#L3528-L3552
llimllib/pymag-trees
61a685089d0888dda13d493a4537450167466dee
reingold_actual.py
python
DrawTree.right
(self)
return self.thread or len(self.children) and self.children[-1]
[]
def right(self): return self.thread or len(self.children) and self.children[-1]
[ "def", "right", "(", "self", ")", ":", "return", "self", ".", "thread", "or", "len", "(", "self", ".", "children", ")", "and", "self", ".", "children", "[", "-", "1", "]" ]
https://github.com/llimllib/pymag-trees/blob/61a685089d0888dda13d493a4537450167466dee/reingold_actual.py#L13-L14
jthsieh/DDPAE-video-prediction
219e68301d24615410260c3d33c80ae74f6f2dc3
models/base_model.py
python
BaseModel.setup
(self, is_train)
[]
def setup(self, is_train): for _, net in self.nets.items(): if is_train: net.train() else: net.eval()
[ "def", "setup", "(", "self", ",", "is_train", ")", ":", "for", "_", ",", "net", "in", "self", ".", "nets", ".", "items", "(", ")", ":", "if", "is_train", ":", "net", ".", "train", "(", ")", "else", ":", "net", ".", "eval", "(", ")" ]
https://github.com/jthsieh/DDPAE-video-prediction/blob/219e68301d24615410260c3d33c80ae74f6f2dc3/models/base_model.py#L39-L44
BitconFeng/Deep-Feature-video
fff73fbcd0e21d5db566d2b63c644e18b2732551
lib/rpn/rpn.py
python
get_rpn_testbatch
(roidb, cfg)
return data, label, im_info
return a dict of testbatch :param roidb: ['image', 'flipped'] :return: data, label, im_info
return a dict of testbatch :param roidb: ['image', 'flipped'] :return: data, label, im_info
[ "return", "a", "dict", "of", "testbatch", ":", "param", "roidb", ":", "[", "image", "flipped", "]", ":", "return", ":", "data", "label", "im_info" ]
def get_rpn_testbatch(roidb, cfg): """ return a dict of testbatch :param roidb: ['image', 'flipped'] :return: data, label, im_info """ # assert len(roidb) == 1, 'Single batch only' imgs, roidb = get_image(roidb, cfg) im_array = imgs im_info = [np.array([roidb[i]['im_info']], dtype=np.float32) for i in range(len(roidb))] data = [{'data': im_array[i], 'im_info': im_info[i]} for i in range(len(roidb))] label = {} return data, label, im_info
[ "def", "get_rpn_testbatch", "(", "roidb", ",", "cfg", ")", ":", "# assert len(roidb) == 1, 'Single batch only'", "imgs", ",", "roidb", "=", "get_image", "(", "roidb", ",", "cfg", ")", "im_array", "=", "imgs", "im_info", "=", "[", "np", ".", "array", "(", "[", "roidb", "[", "i", "]", "[", "'im_info'", "]", "]", ",", "dtype", "=", "np", ".", "float32", ")", "for", "i", "in", "range", "(", "len", "(", "roidb", ")", ")", "]", "data", "=", "[", "{", "'data'", ":", "im_array", "[", "i", "]", ",", "'im_info'", ":", "im_info", "[", "i", "]", "}", "for", "i", "in", "range", "(", "len", "(", "roidb", ")", ")", "]", "label", "=", "{", "}", "return", "data", ",", "label", ",", "im_info" ]
https://github.com/BitconFeng/Deep-Feature-video/blob/fff73fbcd0e21d5db566d2b63c644e18b2732551/lib/rpn/rpn.py#L34-L49
GoogleCloudPlatform/gsutil
5be882803e76608e2fd29cf8c504ccd1fe0a7746
gslib/tracker_file.py
python
CreateTrackerDirIfNeeded
()
return tracker_dir
Looks up or creates the gsutil tracker file directory. This is the configured directory where gsutil keeps its resumable transfer tracker files. This function creates it if it doesn't already exist. Returns: The pathname to the tracker directory.
Looks up or creates the gsutil tracker file directory.
[ "Looks", "up", "or", "creates", "the", "gsutil", "tracker", "file", "directory", "." ]
def CreateTrackerDirIfNeeded(): """Looks up or creates the gsutil tracker file directory. This is the configured directory where gsutil keeps its resumable transfer tracker files. This function creates it if it doesn't already exist. Returns: The pathname to the tracker directory. """ tracker_dir = config.get('GSUtil', 'resumable_tracker_dir', os.path.join(GetGsutilStateDir(), 'tracker-files')) CreateDirIfNeeded(tracker_dir) return tracker_dir
[ "def", "CreateTrackerDirIfNeeded", "(", ")", ":", "tracker_dir", "=", "config", ".", "get", "(", "'GSUtil'", ",", "'resumable_tracker_dir'", ",", "os", ".", "path", ".", "join", "(", "GetGsutilStateDir", "(", ")", ",", "'tracker-files'", ")", ")", "CreateDirIfNeeded", "(", "tracker_dir", ")", "return", "tracker_dir" ]
https://github.com/GoogleCloudPlatform/gsutil/blob/5be882803e76608e2fd29cf8c504ccd1fe0a7746/gslib/tracker_file.py#L89-L101
lixinsu/RCZoo
37fcb7962fbd4c751c561d4a0c84173881ea8339
reader/drqa/predictor.py
python
Predictor.predict
(self, document, question, candidates=None, top_n=1)
return results[0]
Predict a single document - question pair.
Predict a single document - question pair.
[ "Predict", "a", "single", "document", "-", "question", "pair", "." ]
def predict(self, document, question, candidates=None, top_n=1): """Predict a single document - question pair.""" results = self.predict_batch([(document, question, candidates,)], top_n) return results[0]
[ "def", "predict", "(", "self", ",", "document", ",", "question", ",", "candidates", "=", "None", ",", "top_n", "=", "1", ")", ":", "results", "=", "self", ".", "predict_batch", "(", "[", "(", "document", ",", "question", ",", "candidates", ",", ")", "]", ",", "top_n", ")", "return", "results", "[", "0", "]" ]
https://github.com/lixinsu/RCZoo/blob/37fcb7962fbd4c751c561d4a0c84173881ea8339/reader/drqa/predictor.py#L84-L87
pyvista/pyvista
012dbb95a9aae406c3cd4cd94fc8c477f871e426
pyvista/themes.py
python
DefaultTheme.colorbar_horizontal
(self)
return self._colorbar_horizontal
Return or set the default parameters of a horizontal colorbar. Examples -------- Set the default horizontal colorbar width to 0.6. >>> import pyvista >>> pyvista.global_theme.colorbar_horizontal.width = 0.6 # doctest:+SKIP Set the default horizontal colorbar height to 0.2. >>> pyvista.global_theme.colorbar_horizontal.height = 0.2 # doctest:+SKIP
Return or set the default parameters of a horizontal colorbar.
[ "Return", "or", "set", "the", "default", "parameters", "of", "a", "horizontal", "colorbar", "." ]
def colorbar_horizontal(self) -> _ColorbarConfig: """Return or set the default parameters of a horizontal colorbar. Examples -------- Set the default horizontal colorbar width to 0.6. >>> import pyvista >>> pyvista.global_theme.colorbar_horizontal.width = 0.6 # doctest:+SKIP Set the default horizontal colorbar height to 0.2. >>> pyvista.global_theme.colorbar_horizontal.height = 0.2 # doctest:+SKIP """ return self._colorbar_horizontal
[ "def", "colorbar_horizontal", "(", "self", ")", "->", "_ColorbarConfig", ":", "return", "self", ".", "_colorbar_horizontal" ]
https://github.com/pyvista/pyvista/blob/012dbb95a9aae406c3cd4cd94fc8c477f871e426/pyvista/themes.py#L1682-L1697
enthought/mayavi
2103a273568b8f0bd62328801aafbd6252543ae8
mayavi/core/lut_manager.py
python
set_lut
(vtk_lut, lut_lst)
return vtk_lut
Setup the tvtk.LookupTable (`vtk_lut`) using the passed list of lut values.
Setup the tvtk.LookupTable (`vtk_lut`) using the passed list of lut values.
[ "Setup", "the", "tvtk", ".", "LookupTable", "(", "vtk_lut", ")", "using", "the", "passed", "list", "of", "lut", "values", "." ]
def set_lut(vtk_lut, lut_lst): """Setup the tvtk.LookupTable (`vtk_lut`) using the passed list of lut values.""" n_col = len(lut_lst) vtk_lut.number_of_colors = n_col vtk_lut.build() for i in range(0, n_col): lt = lut_lst[i] vtk_lut.set_table_value(i, lt[0], lt[1], lt[2], lt[3]) return vtk_lut
[ "def", "set_lut", "(", "vtk_lut", ",", "lut_lst", ")", ":", "n_col", "=", "len", "(", "lut_lst", ")", "vtk_lut", ".", "number_of_colors", "=", "n_col", "vtk_lut", ".", "build", "(", ")", "for", "i", "in", "range", "(", "0", ",", "n_col", ")", ":", "lt", "=", "lut_lst", "[", "i", "]", "vtk_lut", ".", "set_table_value", "(", "i", ",", "lt", "[", "0", "]", ",", "lt", "[", "1", "]", ",", "lt", "[", "2", "]", ",", "lt", "[", "3", "]", ")", "return", "vtk_lut" ]
https://github.com/enthought/mayavi/blob/2103a273568b8f0bd62328801aafbd6252543ae8/mayavi/core/lut_manager.py#L53-L63
mrlesmithjr/Ansible
d44f0dc0d942bdf3bf7334b307e6048f0ee16e36
roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py
python
_bypass_ensure_directory
(path)
Sandbox-bypassing version of ensure_directory()
Sandbox-bypassing version of ensure_directory()
[ "Sandbox", "-", "bypassing", "version", "of", "ensure_directory", "()" ]
def _bypass_ensure_directory(path): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) mkdir(dirname, 0o755)
[ "def", "_bypass_ensure_directory", "(", "path", ")", ":", "if", "not", "WRITE_SUPPORT", ":", "raise", "IOError", "(", "'\"os.mkdir\" not supported on this platform.'", ")", "dirname", ",", "filename", "=", "split", "(", "path", ")", "if", "dirname", "and", "filename", "and", "not", "isdir", "(", "dirname", ")", ":", "_bypass_ensure_directory", "(", "dirname", ")", "mkdir", "(", "dirname", ",", "0o755", ")" ]
https://github.com/mrlesmithjr/Ansible/blob/d44f0dc0d942bdf3bf7334b307e6048f0ee16e36/roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py#L2948-L2955
cloudera/impyla
0c736af4cad2bade9b8e313badc08ec50e81c948
impala/_thrift_gen/hive_metastore/ttypes.py
python
CommitTxnRequest.write
(self, oprot)
[]
def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('CommitTxnRequest') if self.txnid is not None: oprot.writeFieldBegin('txnid', TType.I64, 1) oprot.writeI64(self.txnid) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()
[ "def", "write", "(", "self", ",", "oprot", ")", ":", "if", "oprot", ".", "_fast_encode", "is", "not", "None", "and", "self", ".", "thrift_spec", "is", "not", "None", ":", "oprot", ".", "trans", ".", "write", "(", "oprot", ".", "_fast_encode", "(", "self", ",", "[", "self", ".", "__class__", ",", "self", ".", "thrift_spec", "]", ")", ")", "return", "oprot", ".", "writeStructBegin", "(", "'CommitTxnRequest'", ")", "if", "self", ".", "txnid", "is", "not", "None", ":", "oprot", ".", "writeFieldBegin", "(", "'txnid'", ",", "TType", ".", "I64", ",", "1", ")", "oprot", ".", "writeI64", "(", "self", ".", "txnid", ")", "oprot", ".", "writeFieldEnd", "(", ")", "oprot", ".", "writeFieldStop", "(", ")", "oprot", ".", "writeStructEnd", "(", ")" ]
https://github.com/cloudera/impyla/blob/0c736af4cad2bade9b8e313badc08ec50e81c948/impala/_thrift_gen/hive_metastore/ttypes.py#L7660-L7670
tobyyouup/conv_seq2seq
78a6e4e62a4c57a5caa9d584033a85e810fd726e
seq2seq/training/utils.py
python
TrainOptions.model_params
(self)
return self._model_params
Returns the training task class
Returns the training task class
[ "Returns", "the", "training", "task", "class" ]
def model_params(self): """Returns the training task class""" return self._model_params
[ "def", "model_params", "(", "self", ")", ":", "return", "self", ".", "_model_params" ]
https://github.com/tobyyouup/conv_seq2seq/blob/78a6e4e62a4c57a5caa9d584033a85e810fd726e/seq2seq/training/utils.py#L56-L58
nodesign/weio
1d67d705a5c36a2e825ad13feab910b0aca9a2e8
handlers/editorHandler.py
python
WeioEditorHandler.on_message
(self, data)
Parsing JSON data that is comming from browser into python object
Parsing JSON data that is comming from browser into python object
[ "Parsing", "JSON", "data", "that", "is", "comming", "from", "browser", "into", "python", "object" ]
def on_message(self, data): """Parsing JSON data that is comming from browser into python object""" req = json.loads(data) self.serve(req)
[ "def", "on_message", "(", "self", ",", "data", ")", ":", "req", "=", "json", ".", "loads", "(", "data", ")", "self", ".", "serve", "(", "req", ")" ]
https://github.com/nodesign/weio/blob/1d67d705a5c36a2e825ad13feab910b0aca9a2e8/handlers/editorHandler.py#L315-L318
mlrun/mlrun
4c120719d64327a34b7ee1ab08fb5e01b258b00a
mlrun/projects/project.py
python
MlrunProject.set_function
( self, func: typing.Union[str, mlrun.runtimes.BaseRuntime], name: str = "", kind: str = "", image: str = None, handler=None, with_repo: bool = None, requirements: typing.Union[str, typing.List[str]] = None, )
return function_object
update or add a function object to the project function can be provided as an object (func) or a .py/.ipynb/.yaml url support url prefixes:: object (s3://, v3io://, ..) MLRun DB e.g. db://project/func:ver functions hub/market: e.g. hub://sklearn_classifier:master examples:: proj.set_function(func_object) proj.set_function('./src/mycode.py', 'ingest', image='myrepo/ing:latest', with_repo=True) proj.set_function('http://.../mynb.ipynb', 'train') proj.set_function('./func.yaml') proj.set_function('hub://get_toy_data', 'getdata') :param func: function object or spec/code url :param name: name of the function (under the project) :param kind: runtime kind e.g. job, nuclio, spark, dask, mpijob default: job :param image: docker image to be used, can also be specified in the function object/yaml :param handler: default function handler to invoke (can only be set with .py/.ipynb files) :param with_repo: add (clone) the current repo to the build source :param requirements: list of python packages or pip requirements file path :returns: project object
update or add a function object to the project
[ "update", "or", "add", "a", "function", "object", "to", "the", "project" ]
def set_function( self, func: typing.Union[str, mlrun.runtimes.BaseRuntime], name: str = "", kind: str = "", image: str = None, handler=None, with_repo: bool = None, requirements: typing.Union[str, typing.List[str]] = None, ) -> mlrun.runtimes.BaseRuntime: """update or add a function object to the project function can be provided as an object (func) or a .py/.ipynb/.yaml url support url prefixes:: object (s3://, v3io://, ..) MLRun DB e.g. db://project/func:ver functions hub/market: e.g. hub://sklearn_classifier:master examples:: proj.set_function(func_object) proj.set_function('./src/mycode.py', 'ingest', image='myrepo/ing:latest', with_repo=True) proj.set_function('http://.../mynb.ipynb', 'train') proj.set_function('./func.yaml') proj.set_function('hub://get_toy_data', 'getdata') :param func: function object or spec/code url :param name: name of the function (under the project) :param kind: runtime kind e.g. job, nuclio, spark, dask, mpijob default: job :param image: docker image to be used, can also be specified in the function object/yaml :param handler: default function handler to invoke (can only be set with .py/.ipynb files) :param with_repo: add (clone) the current repo to the build source :param requirements: list of python packages or pip requirements file path :returns: project object """ if isinstance(func, str): # in hub or db functions name defaults to the function name if not name and not (func.startswith("db://") or func.startswith("hub://")): raise ValueError("function name must be specified") function_dict = { "url": func, "name": name, "kind": kind, "image": image, "handler": handler, "with_repo": with_repo, "requirements": requirements, } func = {k: v for k, v in function_dict.items() if v} name, function_object = _init_function_from_dict(func, self) func["name"] = name elif hasattr(func, "to_dict"): name, function_object = _init_function_from_obj(func, self, name=name) if handler: raise ValueError( "default handler cannot be set for existing function object" ) if image: function_object.spec.image = image if with_repo: function_object.spec.build.source = "./" if requirements: function_object.with_requirements(requirements) if not name: raise ValueError("function name must be specified") else: raise ValueError("func must be a function url or object") self.spec.set_function(name, function_object, func) return function_object
[ "def", "set_function", "(", "self", ",", "func", ":", "typing", ".", "Union", "[", "str", ",", "mlrun", ".", "runtimes", ".", "BaseRuntime", "]", ",", "name", ":", "str", "=", "\"\"", ",", "kind", ":", "str", "=", "\"\"", ",", "image", ":", "str", "=", "None", ",", "handler", "=", "None", ",", "with_repo", ":", "bool", "=", "None", ",", "requirements", ":", "typing", ".", "Union", "[", "str", ",", "typing", ".", "List", "[", "str", "]", "]", "=", "None", ",", ")", "->", "mlrun", ".", "runtimes", ".", "BaseRuntime", ":", "if", "isinstance", "(", "func", ",", "str", ")", ":", "# in hub or db functions name defaults to the function name", "if", "not", "name", "and", "not", "(", "func", ".", "startswith", "(", "\"db://\"", ")", "or", "func", ".", "startswith", "(", "\"hub://\"", ")", ")", ":", "raise", "ValueError", "(", "\"function name must be specified\"", ")", "function_dict", "=", "{", "\"url\"", ":", "func", ",", "\"name\"", ":", "name", ",", "\"kind\"", ":", "kind", ",", "\"image\"", ":", "image", ",", "\"handler\"", ":", "handler", ",", "\"with_repo\"", ":", "with_repo", ",", "\"requirements\"", ":", "requirements", ",", "}", "func", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "function_dict", ".", "items", "(", ")", "if", "v", "}", "name", ",", "function_object", "=", "_init_function_from_dict", "(", "func", ",", "self", ")", "func", "[", "\"name\"", "]", "=", "name", "elif", "hasattr", "(", "func", ",", "\"to_dict\"", ")", ":", "name", ",", "function_object", "=", "_init_function_from_obj", "(", "func", ",", "self", ",", "name", "=", "name", ")", "if", "handler", ":", "raise", "ValueError", "(", "\"default handler cannot be set for existing function object\"", ")", "if", "image", ":", "function_object", ".", "spec", ".", "image", "=", "image", "if", "with_repo", ":", "function_object", ".", "spec", ".", "build", ".", "source", "=", "\"./\"", "if", "requirements", ":", "function_object", ".", "with_requirements", "(", "requirements", ")", "if", "not", "name", ":", "raise", "ValueError", "(", "\"function name must be specified\"", ")", "else", ":", "raise", "ValueError", "(", "\"func must be a function url or object\"", ")", "self", ".", "spec", ".", "set_function", "(", "name", ",", "function_object", ",", "func", ")", "return", "function_object" ]
https://github.com/mlrun/mlrun/blob/4c120719d64327a34b7ee1ab08fb5e01b258b00a/mlrun/projects/project.py#L1316-L1390
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/distutils/command/build_clib.py
python
build_clib.get_library_names
(self)
return lib_names
[]
def get_library_names(self): # Assume the library list is valid -- 'check_library_list()' is # called from 'finalize_options()', so it should be! if not self.libraries: return None lib_names = [] for (lib_name, build_info) in self.libraries: lib_names.append(lib_name) return lib_names
[ "def", "get_library_names", "(", "self", ")", ":", "# Assume the library list is valid -- 'check_library_list()' is", "# called from 'finalize_options()', so it should be!", "if", "not", "self", ".", "libraries", ":", "return", "None", "lib_names", "=", "[", "]", "for", "(", "lib_name", ",", "build_info", ")", "in", "self", ".", "libraries", ":", "lib_names", ".", "append", "(", "lib_name", ")", "return", "lib_names" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/distutils/command/build_clib.py#L154-L163
anfederico/Gemini
54fbf71ce42799bab116d9047ca3cb89ceff8272
gemini/gemini_core/gemini_master.py
python
Gemini.results
(self)
return percent_change
Print results of backtest to console :return:
Print results of backtest to console :return:
[ "Print", "results", "of", "backtest", "to", "console", ":", "return", ":" ]
def results(self): """ Print results of backtest to console :return: """ ts = time.time() results_history = open("results_history.txt", 'a') results_history.write(datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') + '\n') title = "{:=^50}".format( " Results (freq {}) ".format(self.sim_params['data_frequency'])) print(title + "\n") begin_price = self.data.iloc[0]['open'] final_price = self.data.iloc[-1]['close'] shares = self.account.initial_capital / self.data.iloc[0]['close'] self.data['base_equity'] = [price * shares for price in self.data['close']] self.data['equity'] = [e for _, e in self.account.equity] # STRING FORMATS title_fmt = "{:-^40}" str_fmt = "{0:<13}: {1:.2f}{2}" # BENCHMARK percent_change = helpers.percent_change(self.data['base_equity'][0], self.data['base_equity'][-1]) benchmark = self.data['base_equity'].pct_change() bench = [ ("Capital", self.account.initial_capital, ""), ("Final Equity", self.data['base_equity'][-1], ""), ("Net profit", helpers.profit(self.account.initial_capital, percent_change), " ({:+.2f}%)".format(percent_change * 100)), ("Max Drawdown", max_drawdown(benchmark) * 100, "%"), ] print(title_fmt.format(" Benchmark ")) results_history.write(title_fmt.format(" Benchmark ") + '\n') for r in bench: print(str_fmt.format(*r)) results_history.write(str_fmt.format(*r) + '\n') # STRATEGY percent_change = helpers.percent_change(self.data['equity'][0], self.data['equity'][-1]) open_fee = sum([t.fee for t in self.account.opened_trades]) close_fee = sum([t.fee for t in self.account.closed_trades]) # print trades # for t in self.account.opened_trades: print(t) returns = self.data['equity'].pct_change() strategy = [ ("Capital", self.account.initial_capital, ""), ("Final Equity", self.data['equity'][-1], ""), ("Net profit", helpers.profit(self.account.initial_capital, percent_change), " ({:+.2f}%)".format(percent_change * 100)), ("Max Drawdown", max_drawdown(returns)*100, "%"), ("Sharpe Ratio", sharpe_ratio(returns), ""), ("Sortino Ratio", sortino_ratio(returns), ""), ("Alpha", alpha(returns, benchmark), ""), ("Beta", beta(returns, benchmark), ""), ("Fees paid", open_fee + close_fee, ""), ] print(title_fmt.format(" Strategy ")) results_history.write(title_fmt.format(" Strategy ") + '\n') for r in strategy: print(str_fmt.format(*r)) results_history.write(str_fmt.format(*r) + '\n') # STATISTICS longs = len( [t for t in self.account.opened_trades if t.type_ == 'Long']) sells = len( [t for t in self.account.closed_trades if t.type_ == 'Long']) shorts = len( [t for t in self.account.opened_trades if t.type_ == 'Short']) covers = len( [t for t in self.account.closed_trades if t.type_ == 'Short']) stat = [ ("Longs", longs, ""), ("Sells", sells, ""), ("Shorts", shorts, ""), ("Covers", covers, ""), ("Total Trades", longs + sells + shorts + covers, ""), ] str_fmt = "{0:<13}: {1:.0f}{2}" results_history.write(title_fmt.format(" Statistics ") + '\n') print(title_fmt.format(" Statistics ")) for r in stat: print(str_fmt.format(*r)) results_history.write(str_fmt.format(*r) + '\n') print("-" * len(title)) results_history.write("-" * len(title) + '\n' * 3) tuple_results = bench + stat + strategy for element in tuple_results: element = element[0:2] return percent_change
[ "def", "results", "(", "self", ")", ":", "ts", "=", "time", ".", "time", "(", ")", "results_history", "=", "open", "(", "\"results_history.txt\"", ",", "'a'", ")", "results_history", ".", "write", "(", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "ts", ")", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", "+", "'\\n'", ")", "title", "=", "\"{:=^50}\"", ".", "format", "(", "\" Results (freq {}) \"", ".", "format", "(", "self", ".", "sim_params", "[", "'data_frequency'", "]", ")", ")", "print", "(", "title", "+", "\"\\n\"", ")", "begin_price", "=", "self", ".", "data", ".", "iloc", "[", "0", "]", "[", "'open'", "]", "final_price", "=", "self", ".", "data", ".", "iloc", "[", "-", "1", "]", "[", "'close'", "]", "shares", "=", "self", ".", "account", ".", "initial_capital", "/", "self", ".", "data", ".", "iloc", "[", "0", "]", "[", "'close'", "]", "self", ".", "data", "[", "'base_equity'", "]", "=", "[", "price", "*", "shares", "for", "price", "in", "self", ".", "data", "[", "'close'", "]", "]", "self", ".", "data", "[", "'equity'", "]", "=", "[", "e", "for", "_", ",", "e", "in", "self", ".", "account", ".", "equity", "]", "# STRING FORMATS", "title_fmt", "=", "\"{:-^40}\"", "str_fmt", "=", "\"{0:<13}: {1:.2f}{2}\"", "# BENCHMARK", "percent_change", "=", "helpers", ".", "percent_change", "(", "self", ".", "data", "[", "'base_equity'", "]", "[", "0", "]", ",", "self", ".", "data", "[", "'base_equity'", "]", "[", "-", "1", "]", ")", "benchmark", "=", "self", ".", "data", "[", "'base_equity'", "]", ".", "pct_change", "(", ")", "bench", "=", "[", "(", "\"Capital\"", ",", "self", ".", "account", ".", "initial_capital", ",", "\"\"", ")", ",", "(", "\"Final Equity\"", ",", "self", ".", "data", "[", "'base_equity'", "]", "[", "-", "1", "]", ",", "\"\"", ")", ",", "(", "\"Net profit\"", ",", "helpers", ".", "profit", "(", "self", ".", "account", ".", "initial_capital", ",", "percent_change", ")", ",", "\" ({:+.2f}%)\"", ".", "format", "(", "percent_change", "*", "100", ")", ")", ",", "(", "\"Max Drawdown\"", ",", "max_drawdown", "(", "benchmark", ")", "*", "100", ",", "\"%\"", ")", ",", "]", "print", "(", "title_fmt", ".", "format", "(", "\" Benchmark \"", ")", ")", "results_history", ".", "write", "(", "title_fmt", ".", "format", "(", "\" Benchmark \"", ")", "+", "'\\n'", ")", "for", "r", "in", "bench", ":", "print", "(", "str_fmt", ".", "format", "(", "*", "r", ")", ")", "results_history", ".", "write", "(", "str_fmt", ".", "format", "(", "*", "r", ")", "+", "'\\n'", ")", "# STRATEGY", "percent_change", "=", "helpers", ".", "percent_change", "(", "self", ".", "data", "[", "'equity'", "]", "[", "0", "]", ",", "self", ".", "data", "[", "'equity'", "]", "[", "-", "1", "]", ")", "open_fee", "=", "sum", "(", "[", "t", ".", "fee", "for", "t", "in", "self", ".", "account", ".", "opened_trades", "]", ")", "close_fee", "=", "sum", "(", "[", "t", ".", "fee", "for", "t", "in", "self", ".", "account", ".", "closed_trades", "]", ")", "# print trades", "# for t in self.account.opened_trades: print(t)", "returns", "=", "self", ".", "data", "[", "'equity'", "]", ".", "pct_change", "(", ")", "strategy", "=", "[", "(", "\"Capital\"", ",", "self", ".", "account", ".", "initial_capital", ",", "\"\"", ")", ",", "(", "\"Final Equity\"", ",", "self", ".", "data", "[", "'equity'", "]", "[", "-", "1", "]", ",", "\"\"", ")", ",", "(", "\"Net profit\"", ",", "helpers", ".", "profit", "(", "self", ".", "account", ".", "initial_capital", ",", "percent_change", ")", ",", "\" ({:+.2f}%)\"", ".", "format", "(", "percent_change", "*", "100", ")", ")", ",", "(", "\"Max Drawdown\"", ",", "max_drawdown", "(", "returns", ")", "*", "100", ",", "\"%\"", ")", ",", "(", "\"Sharpe Ratio\"", ",", "sharpe_ratio", "(", "returns", ")", ",", "\"\"", ")", ",", "(", "\"Sortino Ratio\"", ",", "sortino_ratio", "(", "returns", ")", ",", "\"\"", ")", ",", "(", "\"Alpha\"", ",", "alpha", "(", "returns", ",", "benchmark", ")", ",", "\"\"", ")", ",", "(", "\"Beta\"", ",", "beta", "(", "returns", ",", "benchmark", ")", ",", "\"\"", ")", ",", "(", "\"Fees paid\"", ",", "open_fee", "+", "close_fee", ",", "\"\"", ")", ",", "]", "print", "(", "title_fmt", ".", "format", "(", "\" Strategy \"", ")", ")", "results_history", ".", "write", "(", "title_fmt", ".", "format", "(", "\" Strategy \"", ")", "+", "'\\n'", ")", "for", "r", "in", "strategy", ":", "print", "(", "str_fmt", ".", "format", "(", "*", "r", ")", ")", "results_history", ".", "write", "(", "str_fmt", ".", "format", "(", "*", "r", ")", "+", "'\\n'", ")", "# STATISTICS", "longs", "=", "len", "(", "[", "t", "for", "t", "in", "self", ".", "account", ".", "opened_trades", "if", "t", ".", "type_", "==", "'Long'", "]", ")", "sells", "=", "len", "(", "[", "t", "for", "t", "in", "self", ".", "account", ".", "closed_trades", "if", "t", ".", "type_", "==", "'Long'", "]", ")", "shorts", "=", "len", "(", "[", "t", "for", "t", "in", "self", ".", "account", ".", "opened_trades", "if", "t", ".", "type_", "==", "'Short'", "]", ")", "covers", "=", "len", "(", "[", "t", "for", "t", "in", "self", ".", "account", ".", "closed_trades", "if", "t", ".", "type_", "==", "'Short'", "]", ")", "stat", "=", "[", "(", "\"Longs\"", ",", "longs", ",", "\"\"", ")", ",", "(", "\"Sells\"", ",", "sells", ",", "\"\"", ")", ",", "(", "\"Shorts\"", ",", "shorts", ",", "\"\"", ")", ",", "(", "\"Covers\"", ",", "covers", ",", "\"\"", ")", ",", "(", "\"Total Trades\"", ",", "longs", "+", "sells", "+", "shorts", "+", "covers", ",", "\"\"", ")", ",", "]", "str_fmt", "=", "\"{0:<13}: {1:.0f}{2}\"", "results_history", ".", "write", "(", "title_fmt", ".", "format", "(", "\" Statistics \"", ")", "+", "'\\n'", ")", "print", "(", "title_fmt", ".", "format", "(", "\" Statistics \"", ")", ")", "for", "r", "in", "stat", ":", "print", "(", "str_fmt", ".", "format", "(", "*", "r", ")", ")", "results_history", ".", "write", "(", "str_fmt", ".", "format", "(", "*", "r", ")", "+", "'\\n'", ")", "print", "(", "\"-\"", "*", "len", "(", "title", ")", ")", "results_history", ".", "write", "(", "\"-\"", "*", "len", "(", "title", ")", "+", "'\\n'", "*", "3", ")", "tuple_results", "=", "bench", "+", "stat", "+", "strategy", "for", "element", "in", "tuple_results", ":", "element", "=", "element", "[", "0", ":", "2", "]", "return", "percent_change" ]
https://github.com/anfederico/Gemini/blob/54fbf71ce42799bab116d9047ca3cb89ceff8272/gemini/gemini_core/gemini_master.py#L126-L239
hankcs/HanLP
6c02812969c4827d74b404c3ad4207f71ca9165a
hanlp/common/dataset.py
python
TransformableDataset.should_load_file
(self, data)
return isinstance(data, str)
Determines whether data is a filepath. Args: data: Data to check. Returns: ``True`` to indicate it's a filepath.
Determines whether data is a filepath.
[ "Determines", "whether", "data", "is", "a", "filepath", "." ]
def should_load_file(self, data) -> bool: """Determines whether data is a filepath. Args: data: Data to check. Returns: ``True`` to indicate it's a filepath. """ return isinstance(data, str)
[ "def", "should_load_file", "(", "self", ",", "data", ")", "->", "bool", ":", "return", "isinstance", "(", "data", ",", "str", ")" ]
https://github.com/hankcs/HanLP/blob/6c02812969c4827d74b404c3ad4207f71ca9165a/hanlp/common/dataset.py#L165-L174
diefenbach/django-lfs
3bbcb3453d324c181ec68d11d5d35115a60a2fd5
lfs/catalog/models.py
python
Product.get_images
(self)
return images
Returns all images of the product, including the main image.
Returns all images of the product, including the main image.
[ "Returns", "all", "images", "of", "the", "product", "including", "the", "main", "image", "." ]
def get_images(self): """ Returns all images of the product, including the main image. """ cache_key = "%s-product-images-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, self.id) images = cache.get(cache_key) if images is None: if self.is_variant() and not self.active_images: obj = self.parent else: obj = self images = obj.images.all() cache.set(cache_key, images) return images
[ "def", "get_images", "(", "self", ")", ":", "cache_key", "=", "\"%s-product-images-%s\"", "%", "(", "settings", ".", "CACHE_MIDDLEWARE_KEY_PREFIX", ",", "self", ".", "id", ")", "images", "=", "cache", ".", "get", "(", "cache_key", ")", "if", "images", "is", "None", ":", "if", "self", ".", "is_variant", "(", ")", "and", "not", "self", ".", "active_images", ":", "obj", "=", "self", ".", "parent", "else", ":", "obj", "=", "self", "images", "=", "obj", ".", "images", ".", "all", "(", ")", "cache", ".", "set", "(", "cache_key", ",", "images", ")", "return", "images" ]
https://github.com/diefenbach/django-lfs/blob/3bbcb3453d324c181ec68d11d5d35115a60a2fd5/lfs/catalog/models.py#L976-L992
tp4a/teleport
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
server/www/packages/packages-darwin/x64/tornado/iostream.py
python
BaseIOStream._handle_events
(self, fd, events)
[]
def _handle_events(self, fd, events): if self.closed(): gen_log.warning("Got events for closed stream %s", fd) return try: if self._connecting: # Most IOLoops will report a write failed connect # with the WRITE event, but SelectIOLoop reports a # READ as well so we must check for connecting before # either. self._handle_connect() if self.closed(): return if events & self.io_loop.READ: self._handle_read() if self.closed(): return if events & self.io_loop.WRITE: self._handle_write() if self.closed(): return if events & self.io_loop.ERROR: self.error = self.get_fd_error() # We may have queued up a user callback in _handle_read or # _handle_write, so don't close the IOStream until those # callbacks have had a chance to run. self.io_loop.add_callback(self.close) return state = self.io_loop.ERROR if self.reading(): state |= self.io_loop.READ if self.writing(): state |= self.io_loop.WRITE if state == self.io_loop.ERROR and self._read_buffer_size == 0: # If the connection is idle, listen for reads too so # we can tell if the connection is closed. If there is # data in the read buffer we won't run the close callback # yet anyway, so we don't need to listen in this case. state |= self.io_loop.READ if state != self._state: assert self._state is not None, \ "shouldn't happen: _handle_events without self._state" self._state = state self.io_loop.update_handler(self.fileno(), self._state) except UnsatisfiableReadError as e: gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=e) except Exception as e: gen_log.error("Uncaught exception, closing connection.", exc_info=True) self.close(exc_info=e) raise
[ "def", "_handle_events", "(", "self", ",", "fd", ",", "events", ")", ":", "if", "self", ".", "closed", "(", ")", ":", "gen_log", ".", "warning", "(", "\"Got events for closed stream %s\"", ",", "fd", ")", "return", "try", ":", "if", "self", ".", "_connecting", ":", "# Most IOLoops will report a write failed connect", "# with the WRITE event, but SelectIOLoop reports a", "# READ as well so we must check for connecting before", "# either.", "self", ".", "_handle_connect", "(", ")", "if", "self", ".", "closed", "(", ")", ":", "return", "if", "events", "&", "self", ".", "io_loop", ".", "READ", ":", "self", ".", "_handle_read", "(", ")", "if", "self", ".", "closed", "(", ")", ":", "return", "if", "events", "&", "self", ".", "io_loop", ".", "WRITE", ":", "self", ".", "_handle_write", "(", ")", "if", "self", ".", "closed", "(", ")", ":", "return", "if", "events", "&", "self", ".", "io_loop", ".", "ERROR", ":", "self", ".", "error", "=", "self", ".", "get_fd_error", "(", ")", "# We may have queued up a user callback in _handle_read or", "# _handle_write, so don't close the IOStream until those", "# callbacks have had a chance to run.", "self", ".", "io_loop", ".", "add_callback", "(", "self", ".", "close", ")", "return", "state", "=", "self", ".", "io_loop", ".", "ERROR", "if", "self", ".", "reading", "(", ")", ":", "state", "|=", "self", ".", "io_loop", ".", "READ", "if", "self", ".", "writing", "(", ")", ":", "state", "|=", "self", ".", "io_loop", ".", "WRITE", "if", "state", "==", "self", ".", "io_loop", ".", "ERROR", "and", "self", ".", "_read_buffer_size", "==", "0", ":", "# If the connection is idle, listen for reads too so", "# we can tell if the connection is closed. If there is", "# data in the read buffer we won't run the close callback", "# yet anyway, so we don't need to listen in this case.", "state", "|=", "self", ".", "io_loop", ".", "READ", "if", "state", "!=", "self", ".", "_state", ":", "assert", "self", ".", "_state", "is", "not", "None", ",", "\"shouldn't happen: _handle_events without self._state\"", "self", ".", "_state", "=", "state", "self", ".", "io_loop", ".", "update_handler", "(", "self", ".", "fileno", "(", ")", ",", "self", ".", "_state", ")", "except", "UnsatisfiableReadError", "as", "e", ":", "gen_log", ".", "info", "(", "\"Unsatisfiable read, closing connection: %s\"", "%", "e", ")", "self", ".", "close", "(", "exc_info", "=", "e", ")", "except", "Exception", "as", "e", ":", "gen_log", ".", "error", "(", "\"Uncaught exception, closing connection.\"", ",", "exc_info", "=", "True", ")", "self", ".", "close", "(", "exc_info", "=", "e", ")", "raise" ]
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-darwin/x64/tornado/iostream.py#L695-L746
Octavian-ai/clevr-graph
a7d5be1da0c0c1e1e0a4b3c95f7e793b0de31b50
gqa/functional.py
python
Architecture.get
(self, graph)
return Architecture(random.choice(StationProperties["architecture"]))
[]
def get(self, graph): return Architecture(random.choice(StationProperties["architecture"]))
[ "def", "get", "(", "self", ",", "graph", ")", ":", "return", "Architecture", "(", "random", ".", "choice", "(", "StationProperties", "[", "\"architecture\"", "]", ")", ")" ]
https://github.com/Octavian-ai/clevr-graph/blob/a7d5be1da0c0c1e1e0a4b3c95f7e793b0de31b50/gqa/functional.py#L112-L113
21dotco/two1-python
4e833300fd5a58363e3104ed4c097631e5d296d3
two1/bitcoin/script_interpreter.py
python
ScriptInterpreter._op_lessthan
(self)
Returns 1 if a is less than b, 0 otherwise.
Returns 1 if a is less than b, 0 otherwise.
[ "Returns", "1", "if", "a", "is", "less", "than", "b", "0", "otherwise", "." ]
def _op_lessthan(self): """ Returns 1 if a is less than b, 0 otherwise. """ self._do_binary_op(lambda a, b: int(a < b))
[ "def", "_op_lessthan", "(", "self", ")", ":", "self", ".", "_do_binary_op", "(", "lambda", "a", ",", "b", ":", "int", "(", "a", "<", "b", ")", ")" ]
https://github.com/21dotco/two1-python/blob/4e833300fd5a58363e3104ed4c097631e5d296d3/two1/bitcoin/script_interpreter.py#L625-L628
yuanxiaosc/Multiple-Relations-Extraction-Only-Look-Once
3d1fc216c6aedc0494e52f2ff142af856dcb673f
bert/create_pretraining_data.py
python
truncate_seq_pair
(tokens_a, tokens_b, max_num_tokens, rng)
Truncates a pair of sequences to a maximum sequence length.
Truncates a pair of sequences to a maximum sequence length.
[ "Truncates", "a", "pair", "of", "sequences", "to", "a", "maximum", "sequence", "length", "." ]
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): """Truncates a pair of sequences to a maximum sequence length.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_num_tokens: break trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b assert len(trunc_tokens) >= 1 # We want to sometimes truncate from the front and sometimes from the # back to add more randomness and avoid biases. if rng.random() < 0.5: del trunc_tokens[0] else: trunc_tokens.pop()
[ "def", "truncate_seq_pair", "(", "tokens_a", ",", "tokens_b", ",", "max_num_tokens", ",", "rng", ")", ":", "while", "True", ":", "total_length", "=", "len", "(", "tokens_a", ")", "+", "len", "(", "tokens_b", ")", "if", "total_length", "<=", "max_num_tokens", ":", "break", "trunc_tokens", "=", "tokens_a", "if", "len", "(", "tokens_a", ")", ">", "len", "(", "tokens_b", ")", "else", "tokens_b", "assert", "len", "(", "trunc_tokens", ")", ">=", "1", "# We want to sometimes truncate from the front and sometimes from the", "# back to add more randomness and avoid biases.", "if", "rng", ".", "random", "(", ")", "<", "0.5", ":", "del", "trunc_tokens", "[", "0", "]", "else", ":", "trunc_tokens", ".", "pop", "(", ")" ]
https://github.com/yuanxiaosc/Multiple-Relations-Extraction-Only-Look-Once/blob/3d1fc216c6aedc0494e52f2ff142af856dcb673f/bert/create_pretraining_data.py#L391-L406
zhanlaoban/Transformers_for_Text_Classification
5e12b21616b29e445e11fe307948e5c55084bb0e
transformers/modeling_distilbert.py
python
MultiHeadSelfAttention.forward
(self, query, key, value, mask, head_mask = None)
Parameters ---------- query: torch.tensor(bs, seq_length, dim) key: torch.tensor(bs, seq_length, dim) value: torch.tensor(bs, seq_length, dim) mask: torch.tensor(bs, seq_length) Outputs ------- weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs, seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
Parameters ---------- query: torch.tensor(bs, seq_length, dim) key: torch.tensor(bs, seq_length, dim) value: torch.tensor(bs, seq_length, dim) mask: torch.tensor(bs, seq_length)
[ "Parameters", "----------", "query", ":", "torch", ".", "tensor", "(", "bs", "seq_length", "dim", ")", "key", ":", "torch", ".", "tensor", "(", "bs", "seq_length", "dim", ")", "value", ":", "torch", ".", "tensor", "(", "bs", "seq_length", "dim", ")", "mask", ":", "torch", ".", "tensor", "(", "bs", "seq_length", ")" ]
def forward(self, query, key, value, mask, head_mask = None): """ Parameters ---------- query: torch.tensor(bs, seq_length, dim) key: torch.tensor(bs, seq_length, dim) value: torch.tensor(bs, seq_length, dim) mask: torch.tensor(bs, seq_length) Outputs ------- weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs, seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True` """ bs, q_length, dim = query.size() k_length = key.size(1) # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim) # assert key.size() == value.size() dim_per_head = self.dim // self.n_heads mask_reshp = (bs, 1, 1, k_length) def shape(x): """ separate heads """ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2) def unshape(x): """ group heads """ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head) q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head) k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head) v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head) q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) scores = torch.matmul(q, k.transpose(2,3)) # (bs, n_heads, q_length, k_length) mask = (mask==0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length) scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length) weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length) weights = self.dropout(weights) # (bs, n_heads, q_length, k_length) # Mask heads if we want to if head_mask is not None: weights = weights * head_mask context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head) context = unshape(context) # (bs, q_length, dim) context = self.out_lin(context) # (bs, q_length, dim) if self.output_attentions: return (context, weights) else: return (context,)
[ "def", "forward", "(", "self", ",", "query", ",", "key", ",", "value", ",", "mask", ",", "head_mask", "=", "None", ")", ":", "bs", ",", "q_length", ",", "dim", "=", "query", ".", "size", "(", ")", "k_length", "=", "key", ".", "size", "(", "1", ")", "# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)", "# assert key.size() == value.size()", "dim_per_head", "=", "self", ".", "dim", "//", "self", ".", "n_heads", "mask_reshp", "=", "(", "bs", ",", "1", ",", "1", ",", "k_length", ")", "def", "shape", "(", "x", ")", ":", "\"\"\" separate heads \"\"\"", "return", "x", ".", "view", "(", "bs", ",", "-", "1", ",", "self", ".", "n_heads", ",", "dim_per_head", ")", ".", "transpose", "(", "1", ",", "2", ")", "def", "unshape", "(", "x", ")", ":", "\"\"\" group heads \"\"\"", "return", "x", ".", "transpose", "(", "1", ",", "2", ")", ".", "contiguous", "(", ")", ".", "view", "(", "bs", ",", "-", "1", ",", "self", ".", "n_heads", "*", "dim_per_head", ")", "q", "=", "shape", "(", "self", ".", "q_lin", "(", "query", ")", ")", "# (bs, n_heads, q_length, dim_per_head)", "k", "=", "shape", "(", "self", ".", "k_lin", "(", "key", ")", ")", "# (bs, n_heads, k_length, dim_per_head)", "v", "=", "shape", "(", "self", ".", "v_lin", "(", "value", ")", ")", "# (bs, n_heads, k_length, dim_per_head)", "q", "=", "q", "/", "math", ".", "sqrt", "(", "dim_per_head", ")", "# (bs, n_heads, q_length, dim_per_head)", "scores", "=", "torch", ".", "matmul", "(", "q", ",", "k", ".", "transpose", "(", "2", ",", "3", ")", ")", "# (bs, n_heads, q_length, k_length)", "mask", "=", "(", "mask", "==", "0", ")", ".", "view", "(", "mask_reshp", ")", ".", "expand_as", "(", "scores", ")", "# (bs, n_heads, q_length, k_length)", "scores", ".", "masked_fill_", "(", "mask", ",", "-", "float", "(", "'inf'", ")", ")", "# (bs, n_heads, q_length, k_length)", "weights", "=", "nn", ".", "Softmax", "(", "dim", "=", "-", "1", ")", "(", "scores", ")", "# (bs, n_heads, q_length, k_length)", "weights", "=", "self", ".", "dropout", "(", "weights", ")", "# (bs, n_heads, q_length, k_length)", "# Mask heads if we want to", "if", "head_mask", "is", "not", "None", ":", "weights", "=", "weights", "*", "head_mask", "context", "=", "torch", ".", "matmul", "(", "weights", ",", "v", ")", "# (bs, n_heads, q_length, dim_per_head)", "context", "=", "unshape", "(", "context", ")", "# (bs, q_length, dim)", "context", "=", "self", ".", "out_lin", "(", "context", ")", "# (bs, q_length, dim)", "if", "self", ".", "output_attentions", ":", "return", "(", "context", ",", "weights", ")", "else", ":", "return", "(", "context", ",", ")" ]
https://github.com/zhanlaoban/Transformers_for_Text_Classification/blob/5e12b21616b29e445e11fe307948e5c55084bb0e/transformers/modeling_distilbert.py#L142-L198
huawei-noah/Pretrained-Language-Model
d4694a134bdfacbaef8ff1d99735106bd3b3372b
TernaryBERT-MindSpore/src/cell_wrapper.py
python
ClipGradients.construct
(self, grads, clip_type, clip_value)
return new_grads
clip gradients
clip gradients
[ "clip", "gradients" ]
def construct(self, grads, clip_type, clip_value): """clip gradients""" if clip_type != 0 and clip_type != 1: return grads new_grads = () for grad in grads: dt = self.dtype(grad) if clip_type == 0: t = C.clip_by_value(grad, self.cast(F.tuple_to_array((-clip_value,)), dt), self.cast(F.tuple_to_array((clip_value,)), dt)) else: t = self.clip_by_norm(grad, self.cast(F.tuple_to_array((clip_value,)), dt)) new_grads = new_grads + (t,) return new_grads
[ "def", "construct", "(", "self", ",", "grads", ",", "clip_type", ",", "clip_value", ")", ":", "if", "clip_type", "!=", "0", "and", "clip_type", "!=", "1", ":", "return", "grads", "new_grads", "=", "(", ")", "for", "grad", "in", "grads", ":", "dt", "=", "self", ".", "dtype", "(", "grad", ")", "if", "clip_type", "==", "0", ":", "t", "=", "C", ".", "clip_by_value", "(", "grad", ",", "self", ".", "cast", "(", "F", ".", "tuple_to_array", "(", "(", "-", "clip_value", ",", ")", ")", ",", "dt", ")", ",", "self", ".", "cast", "(", "F", ".", "tuple_to_array", "(", "(", "clip_value", ",", ")", ")", ",", "dt", ")", ")", "else", ":", "t", "=", "self", ".", "clip_by_norm", "(", "grad", ",", "self", ".", "cast", "(", "F", ".", "tuple_to_array", "(", "(", "clip_value", ",", ")", ")", ",", "dt", ")", ")", "new_grads", "=", "new_grads", "+", "(", "t", ",", ")", "return", "new_grads" ]
https://github.com/huawei-noah/Pretrained-Language-Model/blob/d4694a134bdfacbaef8ff1d99735106bd3b3372b/TernaryBERT-MindSpore/src/cell_wrapper.py#L162-L178
cvlab-epfl/tf-lift
5341909002e0a3269a115dc7f9ff7b5330961052
networks/lift.py
python
Network._build_network
(self)
Define all the architecture here. Use the modules if necessary.
Define all the architecture here. Use the modules if necessary.
[ "Define", "all", "the", "architecture", "here", ".", "Use", "the", "modules", "if", "necessary", "." ]
def _build_network(self): """Define all the architecture here. Use the modules if necessary.""" # Import modules according to the configurations self.modules = {} for _key in ["kp", "ori", "desc"]: self.modules[_key] = importlib.import_module( "modules.{}".format( getattr(self.config, "module_" + _key))) # prepare dictionary for the output and parameters of each module self.outputs = {} self.params = {} self.allparams = {} for _key in self.modules: self.outputs[_key] = {} self.params[_key] = [] self.allparams[_key] = [] # create a joint params list # NOTE: params is a list, not a dict! self.params["joint"] = [] self.allparams["joint"] = [] # create outputs placeholder for crop and rot self.outputs["resize"] = {} self.outputs["crop"] = {} self.outputs["rot"] = {} # Actual Network definition with tf.variable_scope("lift"): # Graph construction depends on the subtask subtask = self.config.subtask # ---------------------------------------- # Initial resize for the keypoint module # Includes rotation when augmentations are used # if self.config.use_augmented_set: rot = self.inputs["aug_rot"] else: rot = None self._build_st( module="resize", xyz=None, cs=rot, names=["P1", "P2", "P3", "P4"], out_size=self.config.kp_input_size, reduce_ratio=float(get_patch_size_no_aug(self.config)) / float(get_patch_size(self.config)), ) # ---------------------------------------- # Keypoint Detector # # The keypoint detector takes each patch input and outputs (1) # "score": the score of the patch, (2) "xy": keypoint position in # side the patch. The score output is the soft-maximum (not a # softmax) of the scores. The position output from the network # should be in the form friendly to the spatial # transformer. Outputs are always dictionaries. # Rotate ground truth coordinates when augmenting rotations. aug_rot = self.inputs["aug_rot"] \ if self.config.augment_rotations else None xyz_gt_scaled = self.transform_xyz( self.inputs["xyz"], aug_rot, self.config.batch_size, self.scale_aug, transpose=True, names=["P1", "P2", "P3", "P4"]) self._build_module( module="kp", inputs=self.outputs["resize"], bypass=xyz_gt_scaled, names=["P1", "P2", "P3", "P4"], skip=subtask == "ori" or subtask == "desc", ) # For image based test self._build_module( module="kp", inputs=self.inputs["img"], bypass=self.inputs["img"], # This is a dummy names=["img"], skip=subtask != "kp", reuse=True, test_only=True, ) # ---------------------------------------- # The Crop Spatial Transformer # Output: use the same support region as for the descriptor # xyz_kp_scaled = self.transform_kp( self.outputs["kp"], aug_rot, self.config.batch_size, 1 / self.scale_aug, transpose=False, names=["P1", "P2", "P3"]) self._build_st( module="crop", xyz=xyz_kp_scaled, cs=aug_rot, names=["P1", "P2", "P3"], out_size=self.config.ori_input_size, reduce_ratio=float(self.config.desc_input_size) / float(get_patch_size(self.config)), ) # ---------------------------------------- # Orientation Estimator # # The orientation estimator takes the crop outputs as input and # outputs orientations for the spatial transformer to # use. Actually, since we output cos and sin, we can simply use the # *UNNORMALIZED* version of the two, normalize them, and directly # use it for our affine transform. In short it returns "cs": the # cos and the sin, but unnormalized. Outputs are always # dictionaries. # Bypass: just the GT angle if self.config.augment_rotations: rot = {} for name in ["P1", "P2", "P3"]: rot[name] = self.inputs["angle"][name] - \ self.inputs["aug_rot"][name]["angle"] else: rot = self.inputs["angle"] self._build_module( module="ori", inputs=self.outputs["crop"], bypass=rot, names=["P1", "P2", "P3"], skip=subtask == "kp" or subtask == "desc", ) # ---------------------------------------- # The Rot Spatial Transformer. # - No rotation augmentation: # Operates over the original patch with the ground truth angle when # bypassing. Otherwise, we combine the augmented angle and the # output of the orientation module. # We do not consider rotation augmentations for the descriptor. if self.config.augment_rotations: rot = self.chain_cs( self.inputs["aug_rot"], self.outputs["ori"], names=["P1", "P2", "P3"]) # rot = self.outputs["ori"] # xyz_desc_scaled = self.transform_kp( # self.outputs["kp"], # rot, # self.config.batch_size, # 1 / self.scale_aug, # transpose=False, # names=["P1", "P2", "P3"]) # elif self.config.use_augmented_set: else: rot = self.outputs["ori"] # xyz_desc_scaled = self.transform_kp( # self.outputs["kp"], # rot, # self.config.batch_size, # 1 / self.scale_aug, # transpose=False, # names=["P1", "P2", "P3"]) # else: # rot = None # xyz_desc_scaled = self.inputs["xyz"] self._build_st( module="rot", xyz=xyz_kp_scaled, cs=rot, names=["P1", "P2", "P3"], out_size=self.config.desc_input_size, reduce_ratio=float(self.config.desc_input_size) / float(get_patch_size(self.config)), ) # ---------------------------------------- # Feature Descriptor # # The descriptor simply computes the descriptors, given the patch. self._build_module( module="desc", inputs=self.outputs["rot"], bypass=self.outputs["rot"], names=["P1", "P2", "P3"], skip=False, )
[ "def", "_build_network", "(", "self", ")", ":", "# Import modules according to the configurations", "self", ".", "modules", "=", "{", "}", "for", "_key", "in", "[", "\"kp\"", ",", "\"ori\"", ",", "\"desc\"", "]", ":", "self", ".", "modules", "[", "_key", "]", "=", "importlib", ".", "import_module", "(", "\"modules.{}\"", ".", "format", "(", "getattr", "(", "self", ".", "config", ",", "\"module_\"", "+", "_key", ")", ")", ")", "# prepare dictionary for the output and parameters of each module", "self", ".", "outputs", "=", "{", "}", "self", ".", "params", "=", "{", "}", "self", ".", "allparams", "=", "{", "}", "for", "_key", "in", "self", ".", "modules", ":", "self", ".", "outputs", "[", "_key", "]", "=", "{", "}", "self", ".", "params", "[", "_key", "]", "=", "[", "]", "self", ".", "allparams", "[", "_key", "]", "=", "[", "]", "# create a joint params list", "# NOTE: params is a list, not a dict!", "self", ".", "params", "[", "\"joint\"", "]", "=", "[", "]", "self", ".", "allparams", "[", "\"joint\"", "]", "=", "[", "]", "# create outputs placeholder for crop and rot", "self", ".", "outputs", "[", "\"resize\"", "]", "=", "{", "}", "self", ".", "outputs", "[", "\"crop\"", "]", "=", "{", "}", "self", ".", "outputs", "[", "\"rot\"", "]", "=", "{", "}", "# Actual Network definition", "with", "tf", ".", "variable_scope", "(", "\"lift\"", ")", ":", "# Graph construction depends on the subtask", "subtask", "=", "self", ".", "config", ".", "subtask", "# ----------------------------------------", "# Initial resize for the keypoint module", "# Includes rotation when augmentations are used", "#", "if", "self", ".", "config", ".", "use_augmented_set", ":", "rot", "=", "self", ".", "inputs", "[", "\"aug_rot\"", "]", "else", ":", "rot", "=", "None", "self", ".", "_build_st", "(", "module", "=", "\"resize\"", ",", "xyz", "=", "None", ",", "cs", "=", "rot", ",", "names", "=", "[", "\"P1\"", ",", "\"P2\"", ",", "\"P3\"", ",", "\"P4\"", "]", ",", "out_size", "=", "self", ".", "config", ".", "kp_input_size", ",", "reduce_ratio", "=", "float", "(", "get_patch_size_no_aug", "(", "self", ".", "config", ")", ")", "/", "float", "(", "get_patch_size", "(", "self", ".", "config", ")", ")", ",", ")", "# ----------------------------------------", "# Keypoint Detector", "#", "# The keypoint detector takes each patch input and outputs (1)", "# \"score\": the score of the patch, (2) \"xy\": keypoint position in", "# side the patch. The score output is the soft-maximum (not a", "# softmax) of the scores. The position output from the network", "# should be in the form friendly to the spatial", "# transformer. Outputs are always dictionaries.", "# Rotate ground truth coordinates when augmenting rotations.", "aug_rot", "=", "self", ".", "inputs", "[", "\"aug_rot\"", "]", "if", "self", ".", "config", ".", "augment_rotations", "else", "None", "xyz_gt_scaled", "=", "self", ".", "transform_xyz", "(", "self", ".", "inputs", "[", "\"xyz\"", "]", ",", "aug_rot", ",", "self", ".", "config", ".", "batch_size", ",", "self", ".", "scale_aug", ",", "transpose", "=", "True", ",", "names", "=", "[", "\"P1\"", ",", "\"P2\"", ",", "\"P3\"", ",", "\"P4\"", "]", ")", "self", ".", "_build_module", "(", "module", "=", "\"kp\"", ",", "inputs", "=", "self", ".", "outputs", "[", "\"resize\"", "]", ",", "bypass", "=", "xyz_gt_scaled", ",", "names", "=", "[", "\"P1\"", ",", "\"P2\"", ",", "\"P3\"", ",", "\"P4\"", "]", ",", "skip", "=", "subtask", "==", "\"ori\"", "or", "subtask", "==", "\"desc\"", ",", ")", "# For image based test", "self", ".", "_build_module", "(", "module", "=", "\"kp\"", ",", "inputs", "=", "self", ".", "inputs", "[", "\"img\"", "]", ",", "bypass", "=", "self", ".", "inputs", "[", "\"img\"", "]", ",", "# This is a dummy", "names", "=", "[", "\"img\"", "]", ",", "skip", "=", "subtask", "!=", "\"kp\"", ",", "reuse", "=", "True", ",", "test_only", "=", "True", ",", ")", "# ----------------------------------------", "# The Crop Spatial Transformer", "# Output: use the same support region as for the descriptor", "#", "xyz_kp_scaled", "=", "self", ".", "transform_kp", "(", "self", ".", "outputs", "[", "\"kp\"", "]", ",", "aug_rot", ",", "self", ".", "config", ".", "batch_size", ",", "1", "/", "self", ".", "scale_aug", ",", "transpose", "=", "False", ",", "names", "=", "[", "\"P1\"", ",", "\"P2\"", ",", "\"P3\"", "]", ")", "self", ".", "_build_st", "(", "module", "=", "\"crop\"", ",", "xyz", "=", "xyz_kp_scaled", ",", "cs", "=", "aug_rot", ",", "names", "=", "[", "\"P1\"", ",", "\"P2\"", ",", "\"P3\"", "]", ",", "out_size", "=", "self", ".", "config", ".", "ori_input_size", ",", "reduce_ratio", "=", "float", "(", "self", ".", "config", ".", "desc_input_size", ")", "/", "float", "(", "get_patch_size", "(", "self", ".", "config", ")", ")", ",", ")", "# ----------------------------------------", "# Orientation Estimator", "#", "# The orientation estimator takes the crop outputs as input and", "# outputs orientations for the spatial transformer to", "# use. Actually, since we output cos and sin, we can simply use the", "# *UNNORMALIZED* version of the two, normalize them, and directly", "# use it for our affine transform. In short it returns \"cs\": the", "# cos and the sin, but unnormalized. Outputs are always", "# dictionaries.", "# Bypass: just the GT angle", "if", "self", ".", "config", ".", "augment_rotations", ":", "rot", "=", "{", "}", "for", "name", "in", "[", "\"P1\"", ",", "\"P2\"", ",", "\"P3\"", "]", ":", "rot", "[", "name", "]", "=", "self", ".", "inputs", "[", "\"angle\"", "]", "[", "name", "]", "-", "self", ".", "inputs", "[", "\"aug_rot\"", "]", "[", "name", "]", "[", "\"angle\"", "]", "else", ":", "rot", "=", "self", ".", "inputs", "[", "\"angle\"", "]", "self", ".", "_build_module", "(", "module", "=", "\"ori\"", ",", "inputs", "=", "self", ".", "outputs", "[", "\"crop\"", "]", ",", "bypass", "=", "rot", ",", "names", "=", "[", "\"P1\"", ",", "\"P2\"", ",", "\"P3\"", "]", ",", "skip", "=", "subtask", "==", "\"kp\"", "or", "subtask", "==", "\"desc\"", ",", ")", "# ----------------------------------------", "# The Rot Spatial Transformer.", "# - No rotation augmentation: ", "# Operates over the original patch with the ground truth angle when", "# bypassing. Otherwise, we combine the augmented angle and the", "# output of the orientation module.", "# We do not consider rotation augmentations for the descriptor.", "if", "self", ".", "config", ".", "augment_rotations", ":", "rot", "=", "self", ".", "chain_cs", "(", "self", ".", "inputs", "[", "\"aug_rot\"", "]", ",", "self", ".", "outputs", "[", "\"ori\"", "]", ",", "names", "=", "[", "\"P1\"", ",", "\"P2\"", ",", "\"P3\"", "]", ")", "# rot = self.outputs[\"ori\"]", "# xyz_desc_scaled = self.transform_kp(", "# self.outputs[\"kp\"],", "# rot,", "# self.config.batch_size,", "# 1 / self.scale_aug,", "# transpose=False,", "# names=[\"P1\", \"P2\", \"P3\"])", "# elif self.config.use_augmented_set:", "else", ":", "rot", "=", "self", ".", "outputs", "[", "\"ori\"", "]", "# xyz_desc_scaled = self.transform_kp(", "# self.outputs[\"kp\"],", "# rot,", "# self.config.batch_size,", "# 1 / self.scale_aug,", "# transpose=False,", "# names=[\"P1\", \"P2\", \"P3\"])", "# else:", "# rot = None", "# xyz_desc_scaled = self.inputs[\"xyz\"]", "self", ".", "_build_st", "(", "module", "=", "\"rot\"", ",", "xyz", "=", "xyz_kp_scaled", ",", "cs", "=", "rot", ",", "names", "=", "[", "\"P1\"", ",", "\"P2\"", ",", "\"P3\"", "]", ",", "out_size", "=", "self", ".", "config", ".", "desc_input_size", ",", "reduce_ratio", "=", "float", "(", "self", ".", "config", ".", "desc_input_size", ")", "/", "float", "(", "get_patch_size", "(", "self", ".", "config", ")", ")", ",", ")", "# ----------------------------------------", "# Feature Descriptor", "#", "# The descriptor simply computes the descriptors, given the patch.", "self", ".", "_build_module", "(", "module", "=", "\"desc\"", ",", "inputs", "=", "self", ".", "outputs", "[", "\"rot\"", "]", ",", "bypass", "=", "self", ".", "outputs", "[", "\"rot\"", "]", ",", "names", "=", "[", "\"P1\"", ",", "\"P2\"", ",", "\"P3\"", "]", ",", "skip", "=", "False", ",", ")" ]
https://github.com/cvlab-epfl/tf-lift/blob/5341909002e0a3269a115dc7f9ff7b5330961052/networks/lift.py#L414-L602
selinon/selinon
3613153566d454022a138639f0375c63f490c4cb
selinon/system.py
python
System._setup_nodes
(cls, system, nodes_definition, nodes_definition_file_name)
Configure nodes available in the system based on supplied configuration. :param system: system instance to be used :type system: selinon.system.System :param nodes_definition: a list of dictionaries holding flow configuration :type nodes_definition: dict :param nodes_definition_file_name: a name of nodes definition file (used in messages for better debugging) :type nodes_definition_file_name: str
Configure nodes available in the system based on supplied configuration.
[ "Configure", "nodes", "available", "in", "the", "system", "based", "on", "supplied", "configuration", "." ]
def _setup_nodes(cls, system, nodes_definition, nodes_definition_file_name): """Configure nodes available in the system based on supplied configuration. :param system: system instance to be used :type system: selinon.system.System :param nodes_definition: a list of dictionaries holding flow configuration :type nodes_definition: dict :param nodes_definition_file_name: a name of nodes definition file (used in messages for better debugging) :type nodes_definition_file_name: str """ def append_file_name_if_any(error_message): """Append file name to an error message.""" if nodes_definition_file_name: error_message += ", in file %r" % nodes_definition_file_name return error_message # known top-level YAML keys for YAML config files (note flows.yml could be merged to nodes.yml) known_yaml_keys = ('tasks', 'flows', 'storages', 'global', 'flow-definitions') unknown_conf = check_conf_keys(nodes_definition, known_conf_opts=known_yaml_keys) if unknown_conf: cls._logger.warning("Unknown configuration keys in the nodes definitions, " "will be skipped: %s", list(unknown_conf.keys())) for storage_dict in nodes_definition.get('storages', []): storage = Storage.from_dict(storage_dict) system.add_storage(storage) if 'global' in nodes_definition: GlobalConfig.from_dict(system, nodes_definition['global']) if 'tasks' not in nodes_definition or nodes_definition['tasks'] is None: raise ConfigurationError(append_file_name_if_any("No tasks defined in the system")) for task_dict in nodes_definition['tasks']: task = Task.from_dict(task_dict, system) task_class = system.class_of_task(task) if not task_class: task_class = TaskClass(task.class_name, task.import_path) system.task_classes.append(task_class) task_class.add_task(task) task.task_class = task_class system.add_task(task) if 'flows' not in nodes_definition or nodes_definition['flows'] is None: raise ConfigurationError(append_file_name_if_any("No flow listing defined in the system" "in nodes definition")) for flow_name in nodes_definition['flows']: flow = Flow(flow_name) system.add_flow(flow)
[ "def", "_setup_nodes", "(", "cls", ",", "system", ",", "nodes_definition", ",", "nodes_definition_file_name", ")", ":", "def", "append_file_name_if_any", "(", "error_message", ")", ":", "\"\"\"Append file name to an error message.\"\"\"", "if", "nodes_definition_file_name", ":", "error_message", "+=", "\", in file %r\"", "%", "nodes_definition_file_name", "return", "error_message", "# known top-level YAML keys for YAML config files (note flows.yml could be merged to nodes.yml)", "known_yaml_keys", "=", "(", "'tasks'", ",", "'flows'", ",", "'storages'", ",", "'global'", ",", "'flow-definitions'", ")", "unknown_conf", "=", "check_conf_keys", "(", "nodes_definition", ",", "known_conf_opts", "=", "known_yaml_keys", ")", "if", "unknown_conf", ":", "cls", ".", "_logger", ".", "warning", "(", "\"Unknown configuration keys in the nodes definitions, \"", "\"will be skipped: %s\"", ",", "list", "(", "unknown_conf", ".", "keys", "(", ")", ")", ")", "for", "storage_dict", "in", "nodes_definition", ".", "get", "(", "'storages'", ",", "[", "]", ")", ":", "storage", "=", "Storage", ".", "from_dict", "(", "storage_dict", ")", "system", ".", "add_storage", "(", "storage", ")", "if", "'global'", "in", "nodes_definition", ":", "GlobalConfig", ".", "from_dict", "(", "system", ",", "nodes_definition", "[", "'global'", "]", ")", "if", "'tasks'", "not", "in", "nodes_definition", "or", "nodes_definition", "[", "'tasks'", "]", "is", "None", ":", "raise", "ConfigurationError", "(", "append_file_name_if_any", "(", "\"No tasks defined in the system\"", ")", ")", "for", "task_dict", "in", "nodes_definition", "[", "'tasks'", "]", ":", "task", "=", "Task", ".", "from_dict", "(", "task_dict", ",", "system", ")", "task_class", "=", "system", ".", "class_of_task", "(", "task", ")", "if", "not", "task_class", ":", "task_class", "=", "TaskClass", "(", "task", ".", "class_name", ",", "task", ".", "import_path", ")", "system", ".", "task_classes", ".", "append", "(", "task_class", ")", "task_class", ".", "add_task", "(", "task", ")", "task", ".", "task_class", "=", "task_class", "system", ".", "add_task", "(", "task", ")", "if", "'flows'", "not", "in", "nodes_definition", "or", "nodes_definition", "[", "'flows'", "]", "is", "None", ":", "raise", "ConfigurationError", "(", "append_file_name_if_any", "(", "\"No flow listing defined in the system\"", "\"in nodes definition\"", ")", ")", "for", "flow_name", "in", "nodes_definition", "[", "'flows'", "]", ":", "flow", "=", "Flow", "(", "flow_name", ")", "system", ".", "add_flow", "(", "flow", ")" ]
https://github.com/selinon/selinon/blob/3613153566d454022a138639f0375c63f490c4cb/selinon/system.py#L1045-L1095
CalebBell/thermo
572a47d1b03d49fe609b8d5f826fa6a7cde00828
thermo/eos.py
python
IG.solve_T
(self, P, V, solution=None)
return P*V*R_inv
r'''Method to calculate `T` from a specified `P` and `V` for the ideal gas equation of state. .. math:: T = \frac{PV}{R} Parameters ---------- P : float Pressure, [Pa] V : float Molar volume, [m^3/mol] solution : str or None, optional Not used, [-] Returns ------- T : float Temperature, [K] Notes -----
r'''Method to calculate `T` from a specified `P` and `V` for the ideal gas equation of state.
[ "r", "Method", "to", "calculate", "T", "from", "a", "specified", "P", "and", "V", "for", "the", "ideal", "gas", "equation", "of", "state", "." ]
def solve_T(self, P, V, solution=None): r'''Method to calculate `T` from a specified `P` and `V` for the ideal gas equation of state. .. math:: T = \frac{PV}{R} Parameters ---------- P : float Pressure, [Pa] V : float Molar volume, [m^3/mol] solution : str or None, optional Not used, [-] Returns ------- T : float Temperature, [K] Notes ----- ''' self.no_T_spec = True return P*V*R_inv
[ "def", "solve_T", "(", "self", ",", "P", ",", "V", ",", "solution", "=", "None", ")", ":", "self", ".", "no_T_spec", "=", "True", "return", "P", "*", "V", "*", "R_inv" ]
https://github.com/CalebBell/thermo/blob/572a47d1b03d49fe609b8d5f826fa6a7cde00828/thermo/eos.py#L7250-L7275
eblot/pyftdi
72797e5ae0945d6f771f13102d7fccac255d83d7
pyftdi/jtag.py
python
JtagController.write_with_read
(self, out: BitSequence, use_last: bool = False)
return len(out)
Write the given BitSequence while reading the same number of bits into the FTDI read buffer. Returns the number of bits written.
Write the given BitSequence while reading the same number of bits into the FTDI read buffer. Returns the number of bits written.
[ "Write", "the", "given", "BitSequence", "while", "reading", "the", "same", "number", "of", "bits", "into", "the", "FTDI", "read", "buffer", ".", "Returns", "the", "number", "of", "bits", "written", "." ]
def write_with_read(self, out: BitSequence, use_last: bool = False) -> int: """Write the given BitSequence while reading the same number of bits into the FTDI read buffer. Returns the number of bits written.""" if not isinstance(out, BitSequence): return JtagError('Expect a BitSequence') if use_last: (out, self._last) = (out[:-1], int(out[-1])) byte_count = len(out)//8 pos = 8*byte_count bit_count = len(out)-pos if not byte_count and not bit_count: raise JtagError("Nothing to shift") if byte_count: blen = byte_count-1 # print("RW OUT %s" % out[:pos]) cmd = bytearray((Ftdi.RW_BYTES_PVE_NVE_LSB, blen, (blen >> 8) & 0xff)) cmd.extend(out[:pos].tobytes(msby=True)) self._stack_cmd(cmd) # print("push %d bytes" % byte_count) if bit_count: # print("RW OUT %s" % out[pos:]) cmd = bytearray((Ftdi.RW_BITS_PVE_NVE_LSB, bit_count-1)) cmd.append(out[pos:].tobyte()) self._stack_cmd(cmd) # print("push %d bits" % bit_count) return len(out)
[ "def", "write_with_read", "(", "self", ",", "out", ":", "BitSequence", ",", "use_last", ":", "bool", "=", "False", ")", "->", "int", ":", "if", "not", "isinstance", "(", "out", ",", "BitSequence", ")", ":", "return", "JtagError", "(", "'Expect a BitSequence'", ")", "if", "use_last", ":", "(", "out", ",", "self", ".", "_last", ")", "=", "(", "out", "[", ":", "-", "1", "]", ",", "int", "(", "out", "[", "-", "1", "]", ")", ")", "byte_count", "=", "len", "(", "out", ")", "//", "8", "pos", "=", "8", "*", "byte_count", "bit_count", "=", "len", "(", "out", ")", "-", "pos", "if", "not", "byte_count", "and", "not", "bit_count", ":", "raise", "JtagError", "(", "\"Nothing to shift\"", ")", "if", "byte_count", ":", "blen", "=", "byte_count", "-", "1", "# print(\"RW OUT %s\" % out[:pos])", "cmd", "=", "bytearray", "(", "(", "Ftdi", ".", "RW_BYTES_PVE_NVE_LSB", ",", "blen", ",", "(", "blen", ">>", "8", ")", "&", "0xff", ")", ")", "cmd", ".", "extend", "(", "out", "[", ":", "pos", "]", ".", "tobytes", "(", "msby", "=", "True", ")", ")", "self", ".", "_stack_cmd", "(", "cmd", ")", "# print(\"push %d bytes\" % byte_count)", "if", "bit_count", ":", "# print(\"RW OUT %s\" % out[pos:])", "cmd", "=", "bytearray", "(", "(", "Ftdi", ".", "RW_BITS_PVE_NVE_LSB", ",", "bit_count", "-", "1", ")", ")", "cmd", ".", "append", "(", "out", "[", "pos", ":", "]", ".", "tobyte", "(", ")", ")", "self", ".", "_stack_cmd", "(", "cmd", ")", "# print(\"push %d bits\" % bit_count)", "return", "len", "(", "out", ")" ]
https://github.com/eblot/pyftdi/blob/72797e5ae0945d6f771f13102d7fccac255d83d7/pyftdi/jtag.py#L299-L327
VLSIDA/OpenRAM
f66aac3264598eeae31225c62b6a4af52412d407
compiler/pgates/pinv.py
python
pinv.create_ptx
(self)
Create the PMOS and NMOS netlist.
Create the PMOS and NMOS netlist.
[ "Create", "the", "PMOS", "and", "NMOS", "netlist", "." ]
def create_ptx(self): """ Create the PMOS and NMOS netlist. """ self.pmos_inst = self.add_inst(name="pinv_pmos", mod=self.pmos) self.connect_inst(["Z", "A", "vdd", "vdd"]) self.nmos_inst = self.add_inst(name="pinv_nmos", mod=self.nmos) self.connect_inst(["Z", "A", "gnd", "gnd"])
[ "def", "create_ptx", "(", "self", ")", ":", "self", ".", "pmos_inst", "=", "self", ".", "add_inst", "(", "name", "=", "\"pinv_pmos\"", ",", "mod", "=", "self", ".", "pmos", ")", "self", ".", "connect_inst", "(", "[", "\"Z\"", ",", "\"A\"", ",", "\"vdd\"", ",", "\"vdd\"", "]", ")", "self", ".", "nmos_inst", "=", "self", ".", "add_inst", "(", "name", "=", "\"pinv_nmos\"", ",", "mod", "=", "self", ".", "nmos", ")", "self", ".", "connect_inst", "(", "[", "\"Z\"", ",", "\"A\"", ",", "\"gnd\"", ",", "\"gnd\"", "]", ")" ]
https://github.com/VLSIDA/OpenRAM/blob/f66aac3264598eeae31225c62b6a4af52412d407/compiler/pgates/pinv.py#L222-L233
openstack/ironic
b392dc19bcd29cef5a69ec00d2f18a7a19a679e5
ironic/db/api.py
python
Connection.create_volume_connector
(self, connector_info)
Create a new volume connector. :param connector_info: Dictionary containing information about the connector. Example:: { 'uuid': '000000-..', 'type': 'wwnn', 'connector_id': '00:01:02:03:04:05:06', 'node_id': 2 } :returns: A volume connector. :raises: VolumeConnectorTypeAndIdAlreadyExists If a connector already exists with a matching type and connector_id. :raises: VolumeConnectorAlreadyExists If a volume connector with the same UUID already exists.
Create a new volume connector.
[ "Create", "a", "new", "volume", "connector", "." ]
def create_volume_connector(self, connector_info): """Create a new volume connector. :param connector_info: Dictionary containing information about the connector. Example:: { 'uuid': '000000-..', 'type': 'wwnn', 'connector_id': '00:01:02:03:04:05:06', 'node_id': 2 } :returns: A volume connector. :raises: VolumeConnectorTypeAndIdAlreadyExists If a connector already exists with a matching type and connector_id. :raises: VolumeConnectorAlreadyExists If a volume connector with the same UUID already exists. """
[ "def", "create_volume_connector", "(", "self", ",", "connector_info", ")", ":" ]
https://github.com/openstack/ironic/blob/b392dc19bcd29cef5a69ec00d2f18a7a19a679e5/ironic/db/api.py#L795-L813
ValvePython/steam
7aef9d2df57c2195f35bd85013e1b5ccb04624a5
steam/steamid.py
python
SteamID.instance
(self)
return (int(self) >> 32) & 0xFFffF
:rtype: :class:`int`
:rtype: :class:`int`
[ ":", "rtype", ":", ":", "class", ":", "int" ]
def instance(self): """ :rtype: :class:`int` """ return (int(self) >> 32) & 0xFFffF
[ "def", "instance", "(", "self", ")", ":", "return", "(", "int", "(", "self", ")", ">>", "32", ")", "&", "0xFFffF" ]
https://github.com/ValvePython/steam/blob/7aef9d2df57c2195f35bd85013e1b5ccb04624a5/steam/steamid.py#L98-L102
deepmind/dm_control
806a10e896e7c887635328bfa8352604ad0fedae
dm_control/suite/acrobot.py
python
Physics.orientations
(self)
return np.concatenate((self.horizontal(), self.vertical()))
Returns the sines and cosines of the pole angles.
Returns the sines and cosines of the pole angles.
[ "Returns", "the", "sines", "and", "cosines", "of", "the", "pole", "angles", "." ]
def orientations(self): """Returns the sines and cosines of the pole angles.""" return np.concatenate((self.horizontal(), self.vertical()))
[ "def", "orientations", "(", "self", ")", ":", "return", "np", ".", "concatenate", "(", "(", "self", ".", "horizontal", "(", ")", ",", "self", ".", "vertical", "(", ")", ")", ")" ]
https://github.com/deepmind/dm_control/blob/806a10e896e7c887635328bfa8352604ad0fedae/dm_control/suite/acrobot.py#L76-L78
wifiphisher/wifiphisher
90b095a7f66ecdab167af2e857196ae0d9702aed
wifiphisher/common/interfaces.py
python
NetworkManager.get_interface_mac
(self, interface_name)
return self._name_to_object[interface_name].mac_address
Return the MAC address of the interface :param self: A NetworkManager object :param interface_name: Name of an interface :type self: NetworkManager :type interface_name: str :return: Interface MAC address :rtype: str
Return the MAC address of the interface
[ "Return", "the", "MAC", "address", "of", "the", "interface" ]
def get_interface_mac(self, interface_name): """ Return the MAC address of the interface :param self: A NetworkManager object :param interface_name: Name of an interface :type self: NetworkManager :type interface_name: str :return: Interface MAC address :rtype: str """ return self._name_to_object[interface_name].mac_address
[ "def", "get_interface_mac", "(", "self", ",", "interface_name", ")", ":", "return", "self", ".", "_name_to_object", "[", "interface_name", "]", ".", "mac_address" ]
https://github.com/wifiphisher/wifiphisher/blob/90b095a7f66ecdab167af2e857196ae0d9702aed/wifiphisher/common/interfaces.py#L524-L536
biopython/biopython
2dd97e71762af7b046d7f7f8a4f1e38db6b06c86
Bio/Graphics/GenomeDiagram/_Track.py
python
Track.to_string
(self, verbose=0)
Return a formatted string with information about the track. Arguments: - verbose - Boolean indicating whether a short or complete account of the track is required
Return a formatted string with information about the track.
[ "Return", "a", "formatted", "string", "with", "information", "about", "the", "track", "." ]
def to_string(self, verbose=0): """Return a formatted string with information about the track. Arguments: - verbose - Boolean indicating whether a short or complete account of the track is required """ if not verbose: # Return the short description return f"{self}" # Use __str__ method instead else: # Return the long description outstr = [f"\n<{self.__class__}: {self.name}>"] outstr.append("%d sets" % len(self._sets)) for key in self._sets: outstr.append(f"set: {self._sets[key]}") return "\n".join(outstr)
[ "def", "to_string", "(", "self", ",", "verbose", "=", "0", ")", ":", "if", "not", "verbose", ":", "# Return the short description", "return", "f\"{self}\"", "# Use __str__ method instead", "else", ":", "# Return the long description", "outstr", "=", "[", "f\"\\n<{self.__class__}: {self.name}>\"", "]", "outstr", ".", "append", "(", "\"%d sets\"", "%", "len", "(", "self", ".", "_sets", ")", ")", "for", "key", "in", "self", ".", "_sets", ":", "outstr", ".", "append", "(", "f\"set: {self._sets[key]}\"", ")", "return", "\"\\n\"", ".", "join", "(", "outstr", ")" ]
https://github.com/biopython/biopython/blob/2dd97e71762af7b046d7f7f8a4f1e38db6b06c86/Bio/Graphics/GenomeDiagram/_Track.py#L260-L275
triaquae/triaquae
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
TriAquae/models/Ubuntu_12/ecdsa/numbertheory.py
python
lcm2
(a,b)
return (a*b)//gcd(a,b)
Least common multiple of two integers.
Least common multiple of two integers.
[ "Least", "common", "multiple", "of", "two", "integers", "." ]
def lcm2(a,b): """Least common multiple of two integers.""" return (a*b)//gcd(a,b)
[ "def", "lcm2", "(", "a", ",", "b", ")", ":", "return", "(", "a", "*", "b", ")", "//", "gcd", "(", "a", ",", "b", ")" ]
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Ubuntu_12/ecdsa/numbertheory.py#L226-L229
Blosc/bloscpack
5efdadf5b6f61e995df1817943afb9629ce28c89
bloscpack/memory_io.py
python
CompressedMemorySink.write_bloscpack_header
(self)
[]
def write_bloscpack_header(self): # no op pass
[ "def", "write_bloscpack_header", "(", "self", ")", ":", "# no op", "pass" ]
https://github.com/Blosc/bloscpack/blob/5efdadf5b6f61e995df1817943afb9629ce28c89/bloscpack/memory_io.py#L85-L87
cuthbertLab/music21
bd30d4663e52955ed922c10fdf541419d8c67671
music21/chord/__init__.py
python
Chord.intervalVectorString
(self)
return Chord.formatVectorString(self.intervalVector)
Return the interval vector as a string representation. >>> c1 = chord.Chord(['c', 'e-', 'g']) >>> c1.intervalVectorString '<001110>'
Return the interval vector as a string representation.
[ "Return", "the", "interval", "vector", "as", "a", "string", "representation", "." ]
def intervalVectorString(self): ''' Return the interval vector as a string representation. >>> c1 = chord.Chord(['c', 'e-', 'g']) >>> c1.intervalVectorString '<001110>' ''' return Chord.formatVectorString(self.intervalVector)
[ "def", "intervalVectorString", "(", "self", ")", ":", "return", "Chord", ".", "formatVectorString", "(", "self", ".", "intervalVector", ")" ]
https://github.com/cuthbertLab/music21/blob/bd30d4663e52955ed922c10fdf541419d8c67671/music21/chord/__init__.py#L4579-L4587
youknowone/itunes-iap
2b744356b9cdeb7c6b6a01b84757c705e057bc1b
itunesiap/legacy.py
python
Request.verify_from
(self, url, verify_ssl=False)
return self.result
Try verification from given url.
Try verification from given url.
[ "Try", "verification", "from", "given", "url", "." ]
def verify_from(self, url, verify_ssl=False): """Try verification from given url.""" # If the password exists from kwargs, pass it up with the request, otherwise leave it alone post_body = json.dumps(self.request_content) try: self.response = requests.post(url, post_body, verify=verify_ssl) except requests.exceptions.RequestException: # pragma: no cover raise if self.response.status_code != 200: raise exceptions.ItunesServerNotAvailable(self.response.status_code, self.response.content) status = self.result['status'] if status != 0: e = exceptions.InvalidReceipt(self.result) e.receipt = self.result.get('receipt', None) raise e return self.result
[ "def", "verify_from", "(", "self", ",", "url", ",", "verify_ssl", "=", "False", ")", ":", "# If the password exists from kwargs, pass it up with the request, otherwise leave it alone", "post_body", "=", "json", ".", "dumps", "(", "self", ".", "request_content", ")", "try", ":", "self", ".", "response", "=", "requests", ".", "post", "(", "url", ",", "post_body", ",", "verify", "=", "verify_ssl", ")", "except", "requests", ".", "exceptions", ".", "RequestException", ":", "# pragma: no cover", "raise", "if", "self", ".", "response", ".", "status_code", "!=", "200", ":", "raise", "exceptions", ".", "ItunesServerNotAvailable", "(", "self", ".", "response", ".", "status_code", ",", "self", ".", "response", ".", "content", ")", "status", "=", "self", ".", "result", "[", "'status'", "]", "if", "status", "!=", "0", ":", "e", "=", "exceptions", ".", "InvalidReceipt", "(", "self", ".", "result", ")", "e", ".", "receipt", "=", "self", ".", "result", ".", "get", "(", "'receipt'", ",", "None", ")", "raise", "e", "return", "self", ".", "result" ]
https://github.com/youknowone/itunes-iap/blob/2b744356b9cdeb7c6b6a01b84757c705e057bc1b/itunesiap/legacy.py#L89-L106
pantsbuild/pex
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
pex/vendor/_vendored/setuptools/setuptools/dist.py
python
Feature.warn_deprecated
()
[]
def warn_deprecated(): msg = ( "Features are deprecated and will be removed in a future " "version. See https://github.com/pypa/setuptools/issues/65." ) warnings.warn(msg, DistDeprecationWarning, stacklevel=3)
[ "def", "warn_deprecated", "(", ")", ":", "msg", "=", "(", "\"Features are deprecated and will be removed in a future \"", "\"version. See https://github.com/pypa/setuptools/issues/65.\"", ")", "warnings", ".", "warn", "(", "msg", ",", "DistDeprecationWarning", ",", "stacklevel", "=", "3", ")" ]
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/setuptools/setuptools/dist.py#L1228-L1233
qiucheng025/zao-
3a5edf3607b3a523f95746bc69b688090c76d89a
lib/gui/display_analysis.py
python
Analysis.summarise_data
(session)
return session.full_summary
Summarise data in a LongRunningThread as it can take a while
Summarise data in a LongRunningThread as it can take a while
[ "Summarise", "data", "in", "a", "LongRunningThread", "as", "it", "can", "take", "a", "while" ]
def summarise_data(session): """ Summarise data in a LongRunningThread as it can take a while """ return session.full_summary
[ "def", "summarise_data", "(", "session", ")", ":", "return", "session", ".", "full_summary" ]
https://github.com/qiucheng025/zao-/blob/3a5edf3607b3a523f95746bc69b688090c76d89a/lib/gui/display_analysis.py#L157-L159
apache/incubator-spot
2d60a2adae7608b43e90ce1b9ec0adf24f6cc8eb
spot-oa/api/resources/flow.py
python
time_line
(ip,date)
return ImpalaEngine.execute_query_as_list(time_line_query)
[]
def time_line(ip,date): db = Configuration.db() time_line_query = (""" SELECT ip_threat,tstart,tend,srcip,dstip,proto, sport,dport,ipkt,ibyt FROM {0}.flow_timeline WHERE y={1} AND m={2} AND d={3} AND ip_threat = '{4}' """).format(db,date.year,date.month,date.day,ip) return ImpalaEngine.execute_query_as_list(time_line_query)
[ "def", "time_line", "(", "ip", ",", "date", ")", ":", "db", "=", "Configuration", ".", "db", "(", ")", "time_line_query", "=", "(", "\"\"\"\n SELECT\n ip_threat,tstart,tend,srcip,dstip,proto,\n\t\t sport,dport,ipkt,ibyt\n FROM {0}.flow_timeline\n WHERE\n y={1} AND m={2} AND d={3}\n AND ip_threat = '{4}'\n \"\"\"", ")", ".", "format", "(", "db", ",", "date", ".", "year", ",", "date", ".", "month", ",", "date", ".", "day", ",", "ip", ")", "return", "ImpalaEngine", ".", "execute_query_as_list", "(", "time_line_query", ")" ]
https://github.com/apache/incubator-spot/blob/2d60a2adae7608b43e90ce1b9ec0adf24f6cc8eb/spot-oa/api/resources/flow.py#L178-L191
merkremont/LineVodka
c2fa74107cecf00dd17416b62e4eb579e2c7bbaf
LineAlpha/LineThrift/ChannelService.py
python
Iface.getChannels
(self, lastSynced, locale)
Parameters: - lastSynced - locale
Parameters: - lastSynced - locale
[ "Parameters", ":", "-", "lastSynced", "-", "locale" ]
def getChannels(self, lastSynced, locale): """ Parameters: - lastSynced - locale """ pass
[ "def", "getChannels", "(", "self", ",", "lastSynced", ",", "locale", ")", ":", "pass" ]
https://github.com/merkremont/LineVodka/blob/c2fa74107cecf00dd17416b62e4eb579e2c7bbaf/LineAlpha/LineThrift/ChannelService.py#L75-L81
pyscf/pyscf
0adfb464333f5ceee07b664f291d4084801bae64
pyscf/fci/addons.py
python
fix_spin_
(fciobj, shift=PENALTY, ss=None, **kwargs)
return fciobj
r'''If FCI solver cannot stay on spin eigenfunction, this function can add a shift to the states which have wrong spin. .. math:: (H + shift*S^2) |\Psi\rangle = E |\Psi\rangle Args: fciobj : An instance of :class:`FCISolver` Kwargs: shift : float Level shift for states which have different spin ss : number S^2 expection value == s*(s+1) Returns A modified FCI object based on fciobj.
r'''If FCI solver cannot stay on spin eigenfunction, this function can add a shift to the states which have wrong spin.
[ "r", "If", "FCI", "solver", "cannot", "stay", "on", "spin", "eigenfunction", "this", "function", "can", "add", "a", "shift", "to", "the", "states", "which", "have", "wrong", "spin", "." ]
def fix_spin_(fciobj, shift=PENALTY, ss=None, **kwargs): r'''If FCI solver cannot stay on spin eigenfunction, this function can add a shift to the states which have wrong spin. .. math:: (H + shift*S^2) |\Psi\rangle = E |\Psi\rangle Args: fciobj : An instance of :class:`FCISolver` Kwargs: shift : float Level shift for states which have different spin ss : number S^2 expection value == s*(s+1) Returns A modified FCI object based on fciobj. ''' import types if 'ss_value' in kwargs: sys.stderr.write('fix_spin_: kwarg "ss_value" will be removed in future release. ' 'It was replaced by "ss"\n') ss_value = kwargs['ss_value'] else: ss_value = ss if (not isinstance(fciobj, types.ModuleType) and 'contract_2e' in getattr(fciobj, '__dict__', {})): del fciobj.contract_2e # To avoid initialize twice old_contract_2e = fciobj.contract_2e def contract_2e(eri, fcivec, norb, nelec, link_index=None, **kwargs): if isinstance(nelec, (int, numpy.number)): sz = (nelec % 2) * .5 else: sz = abs(nelec[0]-nelec[1]) * .5 if ss_value is None: ss = sz*(sz+1) else: ss = ss_value if ss < sz*(sz+1)+.1: # (S^2-ss)|Psi> to shift state other than the lowest state ci1 = fciobj.contract_ss(fcivec, norb, nelec).reshape(fcivec.shape) ci1 -= ss * fcivec else: # (S^2-ss)^2|Psi> to shift states except the given spin. # It still relies on the quality of initial guess tmp = fciobj.contract_ss(fcivec, norb, nelec).reshape(fcivec.shape) tmp -= ss * fcivec ci1 = -ss * tmp ci1 += fciobj.contract_ss(tmp, norb, nelec).reshape(fcivec.shape) tmp = None ci1 *= shift ci0 = old_contract_2e(eri, fcivec, norb, nelec, link_index, **kwargs) ci1 += ci0.reshape(fcivec.shape) return ci1 fciobj.davidson_only = True fciobj.contract_2e = contract_2e return fciobj
[ "def", "fix_spin_", "(", "fciobj", ",", "shift", "=", "PENALTY", ",", "ss", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "types", "if", "'ss_value'", "in", "kwargs", ":", "sys", ".", "stderr", ".", "write", "(", "'fix_spin_: kwarg \"ss_value\" will be removed in future release. '", "'It was replaced by \"ss\"\\n'", ")", "ss_value", "=", "kwargs", "[", "'ss_value'", "]", "else", ":", "ss_value", "=", "ss", "if", "(", "not", "isinstance", "(", "fciobj", ",", "types", ".", "ModuleType", ")", "and", "'contract_2e'", "in", "getattr", "(", "fciobj", ",", "'__dict__'", ",", "{", "}", ")", ")", ":", "del", "fciobj", ".", "contract_2e", "# To avoid initialize twice", "old_contract_2e", "=", "fciobj", ".", "contract_2e", "def", "contract_2e", "(", "eri", ",", "fcivec", ",", "norb", ",", "nelec", ",", "link_index", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "nelec", ",", "(", "int", ",", "numpy", ".", "number", ")", ")", ":", "sz", "=", "(", "nelec", "%", "2", ")", "*", ".5", "else", ":", "sz", "=", "abs", "(", "nelec", "[", "0", "]", "-", "nelec", "[", "1", "]", ")", "*", ".5", "if", "ss_value", "is", "None", ":", "ss", "=", "sz", "*", "(", "sz", "+", "1", ")", "else", ":", "ss", "=", "ss_value", "if", "ss", "<", "sz", "*", "(", "sz", "+", "1", ")", "+", ".1", ":", "# (S^2-ss)|Psi> to shift state other than the lowest state", "ci1", "=", "fciobj", ".", "contract_ss", "(", "fcivec", ",", "norb", ",", "nelec", ")", ".", "reshape", "(", "fcivec", ".", "shape", ")", "ci1", "-=", "ss", "*", "fcivec", "else", ":", "# (S^2-ss)^2|Psi> to shift states except the given spin.", "# It still relies on the quality of initial guess", "tmp", "=", "fciobj", ".", "contract_ss", "(", "fcivec", ",", "norb", ",", "nelec", ")", ".", "reshape", "(", "fcivec", ".", "shape", ")", "tmp", "-=", "ss", "*", "fcivec", "ci1", "=", "-", "ss", "*", "tmp", "ci1", "+=", "fciobj", ".", "contract_ss", "(", "tmp", ",", "norb", ",", "nelec", ")", ".", "reshape", "(", "fcivec", ".", "shape", ")", "tmp", "=", "None", "ci1", "*=", "shift", "ci0", "=", "old_contract_2e", "(", "eri", ",", "fcivec", ",", "norb", ",", "nelec", ",", "link_index", ",", "*", "*", "kwargs", ")", "ci1", "+=", "ci0", ".", "reshape", "(", "fcivec", ".", "shape", ")", "return", "ci1", "fciobj", ".", "davidson_only", "=", "True", "fciobj", ".", "contract_2e", "=", "contract_2e", "return", "fciobj" ]
https://github.com/pyscf/pyscf/blob/0adfb464333f5ceee07b664f291d4084801bae64/pyscf/fci/addons.py#L621-L683
django/django
0a17666045de6739ae1c2ac695041823d5f827f7
django/utils/hashable.py
python
make_hashable
(value)
return value
Attempt to make value hashable or raise a TypeError if it fails. The returned value should generate the same hash for equal values.
Attempt to make value hashable or raise a TypeError if it fails.
[ "Attempt", "to", "make", "value", "hashable", "or", "raise", "a", "TypeError", "if", "it", "fails", "." ]
def make_hashable(value): """ Attempt to make value hashable or raise a TypeError if it fails. The returned value should generate the same hash for equal values. """ if isinstance(value, dict): return tuple([ (key, make_hashable(nested_value)) for key, nested_value in sorted(value.items()) ]) # Try hash to avoid converting a hashable iterable (e.g. string, frozenset) # to a tuple. try: hash(value) except TypeError: if is_iterable(value): return tuple(map(make_hashable, value)) # Non-hashable, non-iterable. raise return value
[ "def", "make_hashable", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "tuple", "(", "[", "(", "key", ",", "make_hashable", "(", "nested_value", ")", ")", "for", "key", ",", "nested_value", "in", "sorted", "(", "value", ".", "items", "(", ")", ")", "]", ")", "# Try hash to avoid converting a hashable iterable (e.g. string, frozenset)", "# to a tuple.", "try", ":", "hash", "(", "value", ")", "except", "TypeError", ":", "if", "is_iterable", "(", "value", ")", ":", "return", "tuple", "(", "map", "(", "make_hashable", ",", "value", ")", ")", "# Non-hashable, non-iterable.", "raise", "return", "value" ]
https://github.com/django/django/blob/0a17666045de6739ae1c2ac695041823d5f827f7/django/utils/hashable.py#L4-L24
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/groups/abelian_gps/values.py
python
AbelianGroupWithValuesElement.__pow__
(self, n)
return pow_self
Exponentiate ``self`` INPUT: - ``n`` -- integer. The exponent. TESTS:: sage: G.<a,b> = AbelianGroupWithValues([5,2], 2) sage: a^3 a^3 sage: (a^3).value() 125
Exponentiate ``self``
[ "Exponentiate", "self" ]
def __pow__(self, n): """ Exponentiate ``self`` INPUT: - ``n`` -- integer. The exponent. TESTS:: sage: G.<a,b> = AbelianGroupWithValues([5,2], 2) sage: a^3 a^3 sage: (a^3).value() 125 """ m = Integer(n) if n != m: raise TypeError('argument n (= '+str(n)+') must be an integer.') pow_self = AbelianGroupElement.__pow__(self, m) pow_self._value = pow(self.value(), m) return pow_self
[ "def", "__pow__", "(", "self", ",", "n", ")", ":", "m", "=", "Integer", "(", "n", ")", "if", "n", "!=", "m", ":", "raise", "TypeError", "(", "'argument n (= '", "+", "str", "(", "n", ")", "+", "') must be an integer.'", ")", "pow_self", "=", "AbelianGroupElement", ".", "__pow__", "(", "self", ",", "m", ")", "pow_self", ".", "_value", "=", "pow", "(", "self", ".", "value", "(", ")", ",", "m", ")", "return", "pow_self" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/groups/abelian_gps/values.py#L309-L330
libertysoft3/saidit
271c7d03adb369f82921d811360b00812e42da24
r2/r2/controllers/api.py
python
ApiController.POST_set_sr_style_enabled
(self, form, jquery, sr_style_enabled)
Update enabling of individual sr themes; refresh the page style
Update enabling of individual sr themes; refresh the page style
[ "Update", "enabling", "of", "individual", "sr", "themes", ";", "refresh", "the", "page", "style" ]
def POST_set_sr_style_enabled(self, form, jquery, sr_style_enabled): """Update enabling of individual sr themes; refresh the page style""" if feature.is_enabled('stylesheets_everywhere'): c.user.set_subreddit_style(c.site, sr_style_enabled) c.can_apply_styles = True sr = DefaultSR() if sr_style_enabled: sr = c.site elif (c.user.pref_default_theme_sr and feature.is_enabled('stylesheets_everywhere')): sr = Subreddit._by_name(c.user.pref_default_theme_sr) if (not sr.can_view(c.user) or not c.user.pref_enable_default_themes): sr = DefaultSR() sr_stylesheet_url = Reddit.get_subreddit_stylesheet_url(sr) if not sr_stylesheet_url: sr_stylesheet_url = "" c.can_apply_styles = False jquery.apply_stylesheet_url(sr_stylesheet_url, sr_style_enabled) if not sr.header or header_url(sr.header) == g.default_header_url: jquery.remove_header_image(); else: jquery.apply_header_image(header_url(sr.header), sr.header_size, sr.header_title)
[ "def", "POST_set_sr_style_enabled", "(", "self", ",", "form", ",", "jquery", ",", "sr_style_enabled", ")", ":", "if", "feature", ".", "is_enabled", "(", "'stylesheets_everywhere'", ")", ":", "c", ".", "user", ".", "set_subreddit_style", "(", "c", ".", "site", ",", "sr_style_enabled", ")", "c", ".", "can_apply_styles", "=", "True", "sr", "=", "DefaultSR", "(", ")", "if", "sr_style_enabled", ":", "sr", "=", "c", ".", "site", "elif", "(", "c", ".", "user", ".", "pref_default_theme_sr", "and", "feature", ".", "is_enabled", "(", "'stylesheets_everywhere'", ")", ")", ":", "sr", "=", "Subreddit", ".", "_by_name", "(", "c", ".", "user", ".", "pref_default_theme_sr", ")", "if", "(", "not", "sr", ".", "can_view", "(", "c", ".", "user", ")", "or", "not", "c", ".", "user", ".", "pref_enable_default_themes", ")", ":", "sr", "=", "DefaultSR", "(", ")", "sr_stylesheet_url", "=", "Reddit", ".", "get_subreddit_stylesheet_url", "(", "sr", ")", "if", "not", "sr_stylesheet_url", ":", "sr_stylesheet_url", "=", "\"\"", "c", ".", "can_apply_styles", "=", "False", "jquery", ".", "apply_stylesheet_url", "(", "sr_stylesheet_url", ",", "sr_style_enabled", ")", "if", "not", "sr", ".", "header", "or", "header_url", "(", "sr", ".", "header", ")", "==", "g", ".", "default_header_url", ":", "jquery", ".", "remove_header_image", "(", ")", "else", ":", "jquery", ".", "apply_header_image", "(", "header_url", "(", "sr", ".", "header", ")", ",", "sr", ".", "header_size", ",", "sr", ".", "header_title", ")" ]
https://github.com/libertysoft3/saidit/blob/271c7d03adb369f82921d811360b00812e42da24/r2/r2/controllers/api.py#L4494-L4520
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/userreports/sql/adapter.py
python
MultiDBSqlAdapter.get_distinct_values
(self, column, limit)
return self.main_adapter.get_distinct_values(column, limit)
[]
def get_distinct_values(self, column, limit): return self.main_adapter.get_distinct_values(column, limit)
[ "def", "get_distinct_values", "(", "self", ",", "column", ",", "limit", ")", ":", "return", "self", ".", "main_adapter", ".", "get_distinct_values", "(", "column", ",", "limit", ")" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/userreports/sql/adapter.py#L259-L260
SigmaHQ/sigma
6f7d28b52a6468b2430e8d7dfefb79dc01e2f1af
tools/sigma/backends/sumologic.py
python
SumoLogicBackend.generateAggregation
(self, agg)
[]
def generateAggregation(self, agg): # lnx_shell_priv_esc_prep.yml # print("DEBUG generateAggregation(): %s, %s, %s, %s" % (agg.aggfunc_notrans, agg.aggfield, agg.groupfield, agg.cond_op)) if agg.groupfield == 'host': agg.groupfield = 'hostname' if agg.aggfunc_notrans == 'count() by': agg.aggfunc_notrans = 'count by' if agg.aggfunc == SigmaAggregationParser.AGGFUNC_NEAR: raise NotImplementedError("The 'near' aggregation operator is not yet implemented for this backend") if self.keypresent: if not agg.groupfield: if agg.aggfield: agg.aggfunc_notrans = "count_distinct" return " \n| %s(%s) \n| where _count_distinct %s %s" % ( agg.aggfunc_notrans, agg.aggfield, agg.cond_op, agg.condition) else: return " \n| %s | where _count %s %s" % ( agg.aggfunc_notrans, agg.cond_op, agg.condition) elif agg.groupfield: if agg.aggfield: agg.aggfunc_notrans = "count_distinct" return " \n| %s(%s) by %s \n| where _count_distinct %s %s" % ( agg.aggfunc_notrans, agg.aggfield, agg.groupfield, agg.cond_op, agg.condition) else: return " \n| %s by %s \n| where _count %s %s" % ( agg.aggfunc_notrans, agg.groupfield, agg.cond_op, agg.condition) else: return " \n| %s | where _count %s %s" % (agg.aggfunc_notrans, agg.cond_op, agg.condition) else: if not agg.groupfield: if agg.aggfield: agg.aggfunc_notrans = "count_distinct" return " \n| parse \"[%s=*]\" as searched nodrop\n| %s(searched) \n| where _count_distinct %s %s" % ( agg.aggfield, agg.aggfunc_notrans, agg.cond_op, agg.condition) else: return " \n| %s | where _count %s %s" % ( agg.aggfunc_notrans, agg.cond_op, agg.condition) elif agg.groupfield: if agg.aggfield: agg.aggfunc_notrans = "count_distinct" return " \n| parse \"[%s=*]\" as searched nodrop\n| parse \"[%s=*]\" as grpd nodrop\n| %s(searched) by grpd \n| where _count_distinct %s %s" % ( agg.aggfield, agg.groupfield, agg.aggfunc_notrans, agg.cond_op, agg.condition) else: return " \n| parse \"[%s=*]\" as grpd nodrop\n| %s by grpd \n| where _count %s %s" % ( agg.groupfield, agg.aggfunc_notrans, agg.cond_op, agg.condition) else: return " \n| %s | where _count %s %s" % (agg.aggfunc_notrans, agg.cond_op, agg.condition)
[ "def", "generateAggregation", "(", "self", ",", "agg", ")", ":", "# lnx_shell_priv_esc_prep.yml", "# print(\"DEBUG generateAggregation(): %s, %s, %s, %s\" % (agg.aggfunc_notrans, agg.aggfield, agg.groupfield, agg.cond_op))", "if", "agg", ".", "groupfield", "==", "'host'", ":", "agg", ".", "groupfield", "=", "'hostname'", "if", "agg", ".", "aggfunc_notrans", "==", "'count() by'", ":", "agg", ".", "aggfunc_notrans", "=", "'count by'", "if", "agg", ".", "aggfunc", "==", "SigmaAggregationParser", ".", "AGGFUNC_NEAR", ":", "raise", "NotImplementedError", "(", "\"The 'near' aggregation operator is not yet implemented for this backend\"", ")", "if", "self", ".", "keypresent", ":", "if", "not", "agg", ".", "groupfield", ":", "if", "agg", ".", "aggfield", ":", "agg", ".", "aggfunc_notrans", "=", "\"count_distinct\"", "return", "\" \\n| %s(%s) \\n| where _count_distinct %s %s\"", "%", "(", "agg", ".", "aggfunc_notrans", ",", "agg", ".", "aggfield", ",", "agg", ".", "cond_op", ",", "agg", ".", "condition", ")", "else", ":", "return", "\" \\n| %s | where _count %s %s\"", "%", "(", "agg", ".", "aggfunc_notrans", ",", "agg", ".", "cond_op", ",", "agg", ".", "condition", ")", "elif", "agg", ".", "groupfield", ":", "if", "agg", ".", "aggfield", ":", "agg", ".", "aggfunc_notrans", "=", "\"count_distinct\"", "return", "\" \\n| %s(%s) by %s \\n| where _count_distinct %s %s\"", "%", "(", "agg", ".", "aggfunc_notrans", ",", "agg", ".", "aggfield", ",", "agg", ".", "groupfield", ",", "agg", ".", "cond_op", ",", "agg", ".", "condition", ")", "else", ":", "return", "\" \\n| %s by %s \\n| where _count %s %s\"", "%", "(", "agg", ".", "aggfunc_notrans", ",", "agg", ".", "groupfield", ",", "agg", ".", "cond_op", ",", "agg", ".", "condition", ")", "else", ":", "return", "\" \\n| %s | where _count %s %s\"", "%", "(", "agg", ".", "aggfunc_notrans", ",", "agg", ".", "cond_op", ",", "agg", ".", "condition", ")", "else", ":", "if", "not", "agg", ".", "groupfield", ":", "if", "agg", ".", "aggfield", ":", "agg", ".", "aggfunc_notrans", "=", "\"count_distinct\"", "return", "\" \\n| parse \\\"[%s=*]\\\" as searched nodrop\\n| %s(searched) \\n| where _count_distinct %s %s\"", "%", "(", "agg", ".", "aggfield", ",", "agg", ".", "aggfunc_notrans", ",", "agg", ".", "cond_op", ",", "agg", ".", "condition", ")", "else", ":", "return", "\" \\n| %s | where _count %s %s\"", "%", "(", "agg", ".", "aggfunc_notrans", ",", "agg", ".", "cond_op", ",", "agg", ".", "condition", ")", "elif", "agg", ".", "groupfield", ":", "if", "agg", ".", "aggfield", ":", "agg", ".", "aggfunc_notrans", "=", "\"count_distinct\"", "return", "\" \\n| parse \\\"[%s=*]\\\" as searched nodrop\\n| parse \\\"[%s=*]\\\" as grpd nodrop\\n| %s(searched) by grpd \\n| where _count_distinct %s %s\"", "%", "(", "agg", ".", "aggfield", ",", "agg", ".", "groupfield", ",", "agg", ".", "aggfunc_notrans", ",", "agg", ".", "cond_op", ",", "agg", ".", "condition", ")", "else", ":", "return", "\" \\n| parse \\\"[%s=*]\\\" as grpd nodrop\\n| %s by grpd \\n| where _count %s %s\"", "%", "(", "agg", ".", "groupfield", ",", "agg", ".", "aggfunc_notrans", ",", "agg", ".", "cond_op", ",", "agg", ".", "condition", ")", "else", ":", "return", "\" \\n| %s | where _count %s %s\"", "%", "(", "agg", ".", "aggfunc_notrans", ",", "agg", ".", "cond_op", ",", "agg", ".", "condition", ")" ]
https://github.com/SigmaHQ/sigma/blob/6f7d28b52a6468b2430e8d7dfefb79dc01e2f1af/tools/sigma/backends/sumologic.py#L58-L104
src-d/ml
db23fb14fc93dece05b434342def5f77b01c6cc3
sourced/ml/algorithms/id_splitter/features.py
python
prepare_features
(csv_path: str, use_header: bool, max_identifier_len: int, identifier_col: int, split_identifier_col: int, test_ratio: float, padding: str, shuffle: bool = True)
return X_train, X_test, y_train[:, :, None], y_test[:, :, None]
Prepare the features to train the identifier splitting task. :param csv_path: path to the CSV file. :param use_header: uses header as normal line (True) or treat as header line with column names. :param max_identifier_len: maximum length of raw identifiers. Skip identifiers that are longer. :param identifier_col: column in the CSV file for the raw identifier. :param split_identifier_col: column in the CSV file for the split identifier. :param shuffle: indicates whether to reorder the list of identifiers at random after reading it. :param test_ratio: Proportion of test samples used for evaluation. :param padding: position where to add padding values: after the intput sequence if "post", before if "pre". :return: training and testing features to train the neural net for the splitting task.
Prepare the features to train the identifier splitting task.
[ "Prepare", "the", "features", "to", "train", "the", "identifier", "splitting", "task", "." ]
def prepare_features(csv_path: str, use_header: bool, max_identifier_len: int, identifier_col: int, split_identifier_col: int, test_ratio: float, padding: str, shuffle: bool = True) -> Tuple[numpy.array]: """ Prepare the features to train the identifier splitting task. :param csv_path: path to the CSV file. :param use_header: uses header as normal line (True) or treat as header line with column names. :param max_identifier_len: maximum length of raw identifiers. Skip identifiers that are longer. :param identifier_col: column in the CSV file for the raw identifier. :param split_identifier_col: column in the CSV file for the split identifier. :param shuffle: indicates whether to reorder the list of identifiers at random after reading it. :param test_ratio: Proportion of test samples used for evaluation. :param padding: position where to add padding values: after the intput sequence if "post", before if "pre". :return: training and testing features to train the neural net for the splitting task. """ from keras.preprocessing.sequence import pad_sequences log = logging.getLogger("prepare_features") # read data from the input file identifiers = read_identifiers(csv_path=csv_path, use_header=use_header, max_identifier_len=max_identifier_len, identifier_col=identifier_col, split_identifier_col=split_identifier_col, shuffle=shuffle) log.info("Converting identifiers to character indices") log.info("Number of identifiers: %d, Average length: %d characters" % (len(identifiers), numpy.mean([len(i) for i in identifiers]))) char2ind = {c: i + 1 for i, c in enumerate(sorted(string.ascii_lowercase))} char_id_seq = [] splits = [] for identifier in identifiers: # iterate through the identifier and convert to array of char indices & boolean split array index_arr = [] split_arr = [] skip_char = False for char in identifier.strip(): if char in char2ind: index_arr.append(char2ind[char]) if skip_char: skip_char = False continue split_arr.append(0) elif char == " ": split_arr.append(1) skip_char = True else: log.warning("Unexpected symbol %s in identifier", char) assert len(index_arr) == len(split_arr) char_id_seq.append(index_arr) splits.append(split_arr) log.info("Number of subtokens: %d, Number of distinct characters: %d" % (sum(sum(split_arr) for split_arr in splits) + len(identifiers), len({i for index_arr in char_id_seq for i in index_arr}))) log.info("Train/test splitting...") n_train = int((1 - test_ratio) * len(char_id_seq)) X_train = char_id_seq[:n_train] X_test = char_id_seq[n_train:] y_train = splits[:n_train] y_test = splits[n_train:] log.info("Number of train samples: %s, number of test samples: %s" % (len(X_train), len(X_test))) log.info("Padding the sequences...") X_train = pad_sequences(X_train, maxlen=max_identifier_len, padding=padding) X_test = pad_sequences(X_test, maxlen=max_identifier_len, padding=padding) y_train = pad_sequences(y_train, maxlen=max_identifier_len, padding=padding) y_test = pad_sequences(y_test, maxlen=max_identifier_len, padding=padding) return X_train, X_test, y_train[:, :, None], y_test[:, :, None]
[ "def", "prepare_features", "(", "csv_path", ":", "str", ",", "use_header", ":", "bool", ",", "max_identifier_len", ":", "int", ",", "identifier_col", ":", "int", ",", "split_identifier_col", ":", "int", ",", "test_ratio", ":", "float", ",", "padding", ":", "str", ",", "shuffle", ":", "bool", "=", "True", ")", "->", "Tuple", "[", "numpy", ".", "array", "]", ":", "from", "keras", ".", "preprocessing", ".", "sequence", "import", "pad_sequences", "log", "=", "logging", ".", "getLogger", "(", "\"prepare_features\"", ")", "# read data from the input file", "identifiers", "=", "read_identifiers", "(", "csv_path", "=", "csv_path", ",", "use_header", "=", "use_header", ",", "max_identifier_len", "=", "max_identifier_len", ",", "identifier_col", "=", "identifier_col", ",", "split_identifier_col", "=", "split_identifier_col", ",", "shuffle", "=", "shuffle", ")", "log", ".", "info", "(", "\"Converting identifiers to character indices\"", ")", "log", ".", "info", "(", "\"Number of identifiers: %d, Average length: %d characters\"", "%", "(", "len", "(", "identifiers", ")", ",", "numpy", ".", "mean", "(", "[", "len", "(", "i", ")", "for", "i", "in", "identifiers", "]", ")", ")", ")", "char2ind", "=", "{", "c", ":", "i", "+", "1", "for", "i", ",", "c", "in", "enumerate", "(", "sorted", "(", "string", ".", "ascii_lowercase", ")", ")", "}", "char_id_seq", "=", "[", "]", "splits", "=", "[", "]", "for", "identifier", "in", "identifiers", ":", "# iterate through the identifier and convert to array of char indices & boolean split array", "index_arr", "=", "[", "]", "split_arr", "=", "[", "]", "skip_char", "=", "False", "for", "char", "in", "identifier", ".", "strip", "(", ")", ":", "if", "char", "in", "char2ind", ":", "index_arr", ".", "append", "(", "char2ind", "[", "char", "]", ")", "if", "skip_char", ":", "skip_char", "=", "False", "continue", "split_arr", ".", "append", "(", "0", ")", "elif", "char", "==", "\" \"", ":", "split_arr", ".", "append", "(", "1", ")", "skip_char", "=", "True", "else", ":", "log", ".", "warning", "(", "\"Unexpected symbol %s in identifier\"", ",", "char", ")", "assert", "len", "(", "index_arr", ")", "==", "len", "(", "split_arr", ")", "char_id_seq", ".", "append", "(", "index_arr", ")", "splits", ".", "append", "(", "split_arr", ")", "log", ".", "info", "(", "\"Number of subtokens: %d, Number of distinct characters: %d\"", "%", "(", "sum", "(", "sum", "(", "split_arr", ")", "for", "split_arr", "in", "splits", ")", "+", "len", "(", "identifiers", ")", ",", "len", "(", "{", "i", "for", "index_arr", "in", "char_id_seq", "for", "i", "in", "index_arr", "}", ")", ")", ")", "log", ".", "info", "(", "\"Train/test splitting...\"", ")", "n_train", "=", "int", "(", "(", "1", "-", "test_ratio", ")", "*", "len", "(", "char_id_seq", ")", ")", "X_train", "=", "char_id_seq", "[", ":", "n_train", "]", "X_test", "=", "char_id_seq", "[", "n_train", ":", "]", "y_train", "=", "splits", "[", ":", "n_train", "]", "y_test", "=", "splits", "[", "n_train", ":", "]", "log", ".", "info", "(", "\"Number of train samples: %s, number of test samples: %s\"", "%", "(", "len", "(", "X_train", ")", ",", "len", "(", "X_test", ")", ")", ")", "log", ".", "info", "(", "\"Padding the sequences...\"", ")", "X_train", "=", "pad_sequences", "(", "X_train", ",", "maxlen", "=", "max_identifier_len", ",", "padding", "=", "padding", ")", "X_test", "=", "pad_sequences", "(", "X_test", ",", "maxlen", "=", "max_identifier_len", ",", "padding", "=", "padding", ")", "y_train", "=", "pad_sequences", "(", "y_train", ",", "maxlen", "=", "max_identifier_len", ",", "padding", "=", "padding", ")", "y_test", "=", "pad_sequences", "(", "y_test", ",", "maxlen", "=", "max_identifier_len", ",", "padding", "=", "padding", ")", "return", "X_train", ",", "X_test", ",", "y_train", "[", ":", ",", ":", ",", "None", "]", ",", "y_test", "[", ":", ",", ":", ",", "None", "]" ]
https://github.com/src-d/ml/blob/db23fb14fc93dece05b434342def5f77b01c6cc3/sourced/ml/algorithms/id_splitter/features.py#L44-L118
GRAND-Lab/ARGA
a970fa583d8c474b18f950da06bf91da03a647db
ARGA/arga/layers.py
python
dropout_sparse
(x, keep_prob, num_nonzero_elems)
return pre_out * (1./keep_prob)
Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
[ "Dropout", "for", "sparse", "tensors", ".", "Currently", "fails", "for", "very", "large", "sparse", "tensors", "(", ">", "1M", "elements", ")" ]
def dropout_sparse(x, keep_prob, num_nonzero_elems): """Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements) """ noise_shape = [num_nonzero_elems] random_tensor = keep_prob random_tensor += tf.random_uniform(noise_shape) dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool) pre_out = tf.sparse_retain(x, dropout_mask) return pre_out * (1./keep_prob)
[ "def", "dropout_sparse", "(", "x", ",", "keep_prob", ",", "num_nonzero_elems", ")", ":", "noise_shape", "=", "[", "num_nonzero_elems", "]", "random_tensor", "=", "keep_prob", "random_tensor", "+=", "tf", ".", "random_uniform", "(", "noise_shape", ")", "dropout_mask", "=", "tf", ".", "cast", "(", "tf", ".", "floor", "(", "random_tensor", ")", ",", "dtype", "=", "tf", ".", "bool", ")", "pre_out", "=", "tf", ".", "sparse_retain", "(", "x", ",", "dropout_mask", ")", "return", "pre_out", "*", "(", "1.", "/", "keep_prob", ")" ]
https://github.com/GRAND-Lab/ARGA/blob/a970fa583d8c474b18f950da06bf91da03a647db/ARGA/arga/layers.py#L22-L30
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/benchmarks/src/benchmarks/richards3-timed.py
python
IdleTask.__init__
(self,i,p,w,s,r)
[]
def __init__(self,i,p,w,s,r): Task.__init__(self,i,0,None,s,r)
[ "def", "__init__", "(", "self", ",", "i", ",", "p", ",", "w", ",", "s", ",", "r", ")", ":", "Task", ".", "__init__", "(", "self", ",", "i", ",", "0", ",", "None", ",", "s", ",", "r", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/richards3-timed.py#L297-L298
Kronuz/esprima-python
809cb6e257b1d3d5b0d23f2bca7976e21f02fc3d
esprima/nodes.py
python
Identifier.__init__
(self, name)
[]
def __init__(self, name): self.type = Syntax.Identifier self.name = name
[ "def", "__init__", "(", "self", ",", "name", ")", ":", "self", ".", "type", "=", "Syntax", ".", "Identifier", "self", ".", "name", "=", "name" ]
https://github.com/Kronuz/esprima-python/blob/809cb6e257b1d3d5b0d23f2bca7976e21f02fc3d/esprima/nodes.py#L310-L312
ysrc/xunfeng
40d40ecf55910019b8b904ef70ae1eebb6b6d26f
vulscan/vulscan.py
python
install_kunpeng_plugin
()
[]
def install_kunpeng_plugin(): time_ = datetime.datetime.now() for plugin in kp.get_plugin_list(): level_list = ['紧急','高危','中危','低危','提示'] plugin_info = { '_id': plugin['references']['kpid'], 'name': 'Kunpeng -' + plugin['name'], 'info': plugin['remarks'] + ' ' + plugin['references']['cve'], 'level': level_list[int(plugin['level'])], 'type': plugin['type'], 'author': plugin['author'], 'url': plugin['references']['url'], 'source': 1, 'keyword': '', 'add_time': time_, 'filename': plugin['references']['kpid'], 'count': 0 } na_plugin.insert(plugin_info)
[ "def", "install_kunpeng_plugin", "(", ")", ":", "time_", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "for", "plugin", "in", "kp", ".", "get_plugin_list", "(", ")", ":", "level_list", "=", "[", "'紧急','高危", "'", ",'中危','低", "危", "','提示']", "", "", "", "", "", "plugin_info", "=", "{", "'_id'", ":", "plugin", "[", "'references'", "]", "[", "'kpid'", "]", ",", "'name'", ":", "'Kunpeng -'", "+", "plugin", "[", "'name'", "]", ",", "'info'", ":", "plugin", "[", "'remarks'", "]", "+", "' '", "+", "plugin", "[", "'references'", "]", "[", "'cve'", "]", ",", "'level'", ":", "level_list", "[", "int", "(", "plugin", "[", "'level'", "]", ")", "]", ",", "'type'", ":", "plugin", "[", "'type'", "]", ",", "'author'", ":", "plugin", "[", "'author'", "]", ",", "'url'", ":", "plugin", "[", "'references'", "]", "[", "'url'", "]", ",", "'source'", ":", "1", ",", "'keyword'", ":", "''", ",", "'add_time'", ":", "time_", ",", "'filename'", ":", "plugin", "[", "'references'", "]", "[", "'kpid'", "]", ",", "'count'", ":", "0", "}", "na_plugin", ".", "insert", "(", "plugin_info", ")" ]
https://github.com/ysrc/xunfeng/blob/40d40ecf55910019b8b904ef70ae1eebb6b6d26f/vulscan/vulscan.py#L248-L266
tensorflow/lingvo
ce10019243d954c3c3ebe739f7589b5eebfdf907
lingvo/core/py_utils.py
python
WeightInit.GaussianSqrtDim
(scale=1.0, seed=None)
return WeightInit._Params('gaussian_sqrt_dim', scale, seed)
scale * tf.random.normal(0, 1 / sqrt(dim0)).
scale * tf.random.normal(0, 1 / sqrt(dim0)).
[ "scale", "*", "tf", ".", "random", ".", "normal", "(", "0", "1", "/", "sqrt", "(", "dim0", "))", "." ]
def GaussianSqrtDim(scale=1.0, seed=None): """scale * tf.random.normal(0, 1 / sqrt(dim0)).""" return WeightInit._Params('gaussian_sqrt_dim', scale, seed)
[ "def", "GaussianSqrtDim", "(", "scale", "=", "1.0", ",", "seed", "=", "None", ")", ":", "return", "WeightInit", ".", "_Params", "(", "'gaussian_sqrt_dim'", ",", "scale", ",", "seed", ")" ]
https://github.com/tensorflow/lingvo/blob/ce10019243d954c3c3ebe739f7589b5eebfdf907/lingvo/core/py_utils.py#L1063-L1065
openstack/designate
bff3d5f6e31fe595a77143ec4ac779c187bf72a8
designate/objects/base.py
python
ListObjectMixin.__contains__
(self, value)
return value in self.objects
List membership test
List membership test
[ "List", "membership", "test" ]
def __contains__(self, value): """List membership test""" return value in self.objects
[ "def", "__contains__", "(", "self", ",", "value", ")", ":", "return", "value", "in", "self", ".", "objects" ]
https://github.com/openstack/designate/blob/bff3d5f6e31fe595a77143ec4ac779c187bf72a8/designate/objects/base.py#L386-L388
mdiazcl/fuzzbunch-debian
2b76c2249ade83a389ae3badb12a1bd09901fd2c
windows/Resources/Python/Core/Lib/cookielib.py
python
is_third_party
(request)
RFC 2965, section 3.3.6: An unverifiable transaction is to a third-party host if its request- host U does not domain-match the reach R of the request-host O in the origin transaction.
RFC 2965, section 3.3.6: An unverifiable transaction is to a third-party host if its request- host U does not domain-match the reach R of the request-host O in the origin transaction.
[ "RFC", "2965", "section", "3", ".", "3", ".", "6", ":", "An", "unverifiable", "transaction", "is", "to", "a", "third", "-", "party", "host", "if", "its", "request", "-", "host", "U", "does", "not", "domain", "-", "match", "the", "reach", "R", "of", "the", "request", "-", "host", "O", "in", "the", "origin", "transaction", "." ]
def is_third_party(request): """ RFC 2965, section 3.3.6: An unverifiable transaction is to a third-party host if its request- host U does not domain-match the reach R of the request-host O in the origin transaction. """ req_host = request_host(request) if not domain_match(req_host, reach(request.get_origin_req_host())): return True else: return False
[ "def", "is_third_party", "(", "request", ")", ":", "req_host", "=", "request_host", "(", "request", ")", "if", "not", "domain_match", "(", "req_host", ",", "reach", "(", "request", ".", "get_origin_req_host", "(", ")", ")", ")", ":", "return", "True", "else", ":", "return", "False" ]
https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/Resources/Python/Core/Lib/cookielib.py#L635-L649
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
lib-python/2.7/atexit.py
python
register
(func, *targs, **kargs)
return func
register a function to be executed upon normal program termination func - function to be called at exit targs - optional arguments to pass to func kargs - optional keyword arguments to pass to func func is returned to facilitate usage as a decorator.
register a function to be executed upon normal program termination
[ "register", "a", "function", "to", "be", "executed", "upon", "normal", "program", "termination" ]
def register(func, *targs, **kargs): """register a function to be executed upon normal program termination func - function to be called at exit targs - optional arguments to pass to func kargs - optional keyword arguments to pass to func func is returned to facilitate usage as a decorator. """ _exithandlers.append((func, targs, kargs)) return func
[ "def", "register", "(", "func", ",", "*", "targs", ",", "*", "*", "kargs", ")", ":", "_exithandlers", ".", "append", "(", "(", "func", ",", "targs", ",", "kargs", ")", ")", "return", "func" ]
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/atexit.py#L37-L47
raffaele-forte/climber
5530a780446e35b1ce977bae140557050fe0b47c
Exscript/Account.py
python
Account.context
(self)
return Context(self)
When you need a 'with' context for an already-acquired account.
When you need a 'with' context for an already-acquired account.
[ "When", "you", "need", "a", "with", "context", "for", "an", "already", "-", "acquired", "account", "." ]
def context(self): """ When you need a 'with' context for an already-acquired account. """ return Context(self)
[ "def", "context", "(", "self", ")", ":", "return", "Context", "(", "self", ")" ]
https://github.com/raffaele-forte/climber/blob/5530a780446e35b1ce977bae140557050fe0b47c/Exscript/Account.py#L62-L66
CGATOxford/cgat
326aad4694bdfae8ddc194171bb5d73911243947
CGAT/CSV2DB.py
python
executewait
(dbhandle, statement, error, retry=False, wait=5, args=())
execute sql statement. Retry on error, if retry is True. Returns a cursor object.
execute sql statement.
[ "execute", "sql", "statement", "." ]
def executewait(dbhandle, statement, error, retry=False, wait=5, args=()): '''execute sql statement. Retry on error, if retry is True. Returns a cursor object. ''' cc = dbhandle.cursor() i = 20 while i > 0: try: cc.execute(statement, args) return cc except sqlite3.OperationalError as e: E.warn("import failed: msg=%s, statement=\n %s" % (str(e), statement)) # TODO: check for database locked msg if not retry: raise e if not re.search("locked", str(e)): raise e time.sleep(wait) i -= 1 continue break raise sqlite3.OperationalError("Database locked and too many retries")
[ "def", "executewait", "(", "dbhandle", ",", "statement", ",", "error", ",", "retry", "=", "False", ",", "wait", "=", "5", ",", "args", "=", "(", ")", ")", ":", "cc", "=", "dbhandle", ".", "cursor", "(", ")", "i", "=", "20", "while", "i", ">", "0", ":", "try", ":", "cc", ".", "execute", "(", "statement", ",", "args", ")", "return", "cc", "except", "sqlite3", ".", "OperationalError", "as", "e", ":", "E", ".", "warn", "(", "\"import failed: msg=%s, statement=\\n %s\"", "%", "(", "str", "(", "e", ")", ",", "statement", ")", ")", "# TODO: check for database locked msg", "if", "not", "retry", ":", "raise", "e", "if", "not", "re", ".", "search", "(", "\"locked\"", ",", "str", "(", "e", ")", ")", ":", "raise", "e", "time", ".", "sleep", "(", "wait", ")", "i", "-=", "1", "continue", "break", "raise", "sqlite3", ".", "OperationalError", "(", "\"Database locked and too many retries\"", ")" ]
https://github.com/CGATOxford/cgat/blob/326aad4694bdfae8ddc194171bb5d73911243947/CGAT/CSV2DB.py#L43-L71
exercism/python
f79d44ef6c9cf68d8c76cb94017a590f04391635
exercises/practice/luhn/.meta/example.py
python
Luhn.__init__
(self, card_num)
[]
def __init__(self, card_num): self.card_num = card_num self.checksum = -1 digits = card_num.replace(' ', '') length = len(digits) if digits.isdigit() and length > 1: self.checksum = 0 cadence = length % 2 for idx, digit in enumerate(digits): num = int(digit) if idx % 2 == cadence: num *= 2 if num > 9: num -= 9 self.checksum += num
[ "def", "__init__", "(", "self", ",", "card_num", ")", ":", "self", ".", "card_num", "=", "card_num", "self", ".", "checksum", "=", "-", "1", "digits", "=", "card_num", ".", "replace", "(", "' '", ",", "''", ")", "length", "=", "len", "(", "digits", ")", "if", "digits", ".", "isdigit", "(", ")", "and", "length", ">", "1", ":", "self", ".", "checksum", "=", "0", "cadence", "=", "length", "%", "2", "for", "idx", ",", "digit", "in", "enumerate", "(", "digits", ")", ":", "num", "=", "int", "(", "digit", ")", "if", "idx", "%", "2", "==", "cadence", ":", "num", "*=", "2", "if", "num", ">", "9", ":", "num", "-=", "9", "self", ".", "checksum", "+=", "num" ]
https://github.com/exercism/python/blob/f79d44ef6c9cf68d8c76cb94017a590f04391635/exercises/practice/luhn/.meta/example.py#L2-L16
django-parler/django-parler
577ca2f4a80713a9272c48db30e914a4d9332358
parler/fields.py
python
_validate_master
(new_class)
return shared_model
Check whether the 'master' field on a TranslatedFieldsModel is correctly configured.
Check whether the 'master' field on a TranslatedFieldsModel is correctly configured.
[ "Check", "whether", "the", "master", "field", "on", "a", "TranslatedFieldsModel", "is", "correctly", "configured", "." ]
def _validate_master(new_class): """ Check whether the 'master' field on a TranslatedFieldsModel is correctly configured. """ if not new_class.master or not isinstance(new_class.master, ForwardManyToOneDescriptor): raise ImproperlyConfigured( f"{new_class.__name__}.master should be a ForeignKey to the shared table." ) remote_field = new_class.master.field.remote_field shared_model = remote_field.model # Skip checks in migration if shared_model.__module__ == "__fake__": return shared_model try: meta = shared_model._parler_meta except AttributeError: raise TypeError( f"Translatable model {shared_model} does not appear to inherit from TranslatableModel" ) if meta is not None: if meta._has_translations_model(new_class): raise ImproperlyConfigured( f"The model '{shared_model.__name__}' already has an associated translation table!" ) if meta._has_translations_field(remote_field.related_name): raise ImproperlyConfigured( f"The model '{shared_model.__name__}' already has an associated translation field named '{remote_field.related_name}'!" ) return shared_model
[ "def", "_validate_master", "(", "new_class", ")", ":", "if", "not", "new_class", ".", "master", "or", "not", "isinstance", "(", "new_class", ".", "master", ",", "ForwardManyToOneDescriptor", ")", ":", "raise", "ImproperlyConfigured", "(", "f\"{new_class.__name__}.master should be a ForeignKey to the shared table.\"", ")", "remote_field", "=", "new_class", ".", "master", ".", "field", ".", "remote_field", "shared_model", "=", "remote_field", ".", "model", "# Skip checks in migration", "if", "shared_model", ".", "__module__", "==", "\"__fake__\"", ":", "return", "shared_model", "try", ":", "meta", "=", "shared_model", ".", "_parler_meta", "except", "AttributeError", ":", "raise", "TypeError", "(", "f\"Translatable model {shared_model} does not appear to inherit from TranslatableModel\"", ")", "if", "meta", "is", "not", "None", ":", "if", "meta", ".", "_has_translations_model", "(", "new_class", ")", ":", "raise", "ImproperlyConfigured", "(", "f\"The model '{shared_model.__name__}' already has an associated translation table!\"", ")", "if", "meta", ".", "_has_translations_field", "(", "remote_field", ".", "related_name", ")", ":", "raise", "ImproperlyConfigured", "(", "f\"The model '{shared_model.__name__}' already has an associated translation field named '{remote_field.related_name}'!\"", ")", "return", "shared_model" ]
https://github.com/django-parler/django-parler/blob/577ca2f4a80713a9272c48db30e914a4d9332358/parler/fields.py#L18-L51
pykaldi/pykaldi
b4e7a15a31286e57c01259edfda54d113b5ceb0e
kaldi/fstext/_api.py
python
_MutableFstBase.invert
(self)
return self
Inverts the FST's transduction. This operation destructively inverts the FST's transduction by exchanging input and output labels. Returns: self.
Inverts the FST's transduction.
[ "Inverts", "the", "FST", "s", "transduction", "." ]
def invert(self): """ Inverts the FST's transduction. This operation destructively inverts the FST's transduction by exchanging input and output labels. Returns: self. """ self._ops.invert(self) self._check_mutating_imethod() return self
[ "def", "invert", "(", "self", ")", ":", "self", ".", "_ops", ".", "invert", "(", "self", ")", "self", ".", "_check_mutating_imethod", "(", ")", "return", "self" ]
https://github.com/pykaldi/pykaldi/blob/b4e7a15a31286e57c01259edfda54d113b5ceb0e/kaldi/fstext/_api.py#L847-L859
faucetsdn/ryu
537f35f4b2bc634ef05e3f28373eb5e24609f989
ryu/services/protocols/bgp/application.py
python
RyuBGPSpeaker._add_routes
(self, settings)
Add BGP routes from given settings. All valid routes are loaded. Miss-configured routes are ignored and errors are logged.
Add BGP routes from given settings.
[ "Add", "BGP", "routes", "from", "given", "settings", "." ]
def _add_routes(self, settings): """ Add BGP routes from given settings. All valid routes are loaded. Miss-configured routes are ignored and errors are logged. """ for route_settings in settings: if 'prefix' in route_settings: prefix_add = self.speaker.prefix_add elif 'route_type' in route_settings: prefix_add = self.speaker.evpn_prefix_add elif 'flowspec_family' in route_settings: prefix_add = self.speaker.flowspec_prefix_add else: LOG.debug('Skip invalid route settings: %s', route_settings) continue LOG.debug('Adding route settings: %s', route_settings) try: prefix_add(**route_settings) except RuntimeConfigError as e: LOG.exception(e)
[ "def", "_add_routes", "(", "self", ",", "settings", ")", ":", "for", "route_settings", "in", "settings", ":", "if", "'prefix'", "in", "route_settings", ":", "prefix_add", "=", "self", ".", "speaker", ".", "prefix_add", "elif", "'route_type'", "in", "route_settings", ":", "prefix_add", "=", "self", ".", "speaker", ".", "evpn_prefix_add", "elif", "'flowspec_family'", "in", "route_settings", ":", "prefix_add", "=", "self", ".", "speaker", ".", "flowspec_prefix_add", "else", ":", "LOG", ".", "debug", "(", "'Skip invalid route settings: %s'", ",", "route_settings", ")", "continue", "LOG", ".", "debug", "(", "'Adding route settings: %s'", ",", "route_settings", ")", "try", ":", "prefix_add", "(", "*", "*", "route_settings", ")", "except", "RuntimeConfigError", "as", "e", ":", "LOG", ".", "exception", "(", "e", ")" ]
https://github.com/faucetsdn/ryu/blob/537f35f4b2bc634ef05e3f28373eb5e24609f989/ryu/services/protocols/bgp/application.py#L409-L431
aws-solutions/aws-instance-scheduler
d9208ddc4528536f20e14127ea0f49f8c52ea811
source/lambda/requesthandlers/scheduler_setup_handler.py
python
SchedulerSetupHandler.started_tags
(self)
return self.resource_properties.get(configuration.STARTED_TAGS, None)
Returns started tags as a string :return: started tags
Returns started tags as a string :return: started tags
[ "Returns", "started", "tags", "as", "a", "string", ":", "return", ":", "started", "tags" ]
def started_tags(self): """ Returns started tags as a string :return: started tags """ return self.resource_properties.get(configuration.STARTED_TAGS, None)
[ "def", "started_tags", "(", "self", ")", ":", "return", "self", ".", "resource_properties", ".", "get", "(", "configuration", ".", "STARTED_TAGS", ",", "None", ")" ]
https://github.com/aws-solutions/aws-instance-scheduler/blob/d9208ddc4528536f20e14127ea0f49f8c52ea811/source/lambda/requesthandlers/scheduler_setup_handler.py#L115-L120
robotframework/SeleniumLibrary
0d8caf35cd8325ff391c27fe814744060470018e
src/SeleniumLibrary/keywords/browsermanagement.py
python
BrowserManagementKeywords.get_browser_ids
(self)
return self.drivers.active_driver_ids
Returns index of all active browser as list. Example: | @{browser_ids}= | Get Browser Ids | | | | FOR | ${id} | IN | @{browser_ids} | | | @{window_titles}= | Get Window Titles | browser=${id} | | | Log | Browser ${id} has these windows: ${window_titles} | | | END | | | | See `Switch Browser` for more information and examples. New in SeleniumLibrary 4.0
Returns index of all active browser as list.
[ "Returns", "index", "of", "all", "active", "browser", "as", "list", "." ]
def get_browser_ids(self) -> List[str]: """Returns index of all active browser as list. Example: | @{browser_ids}= | Get Browser Ids | | | | FOR | ${id} | IN | @{browser_ids} | | | @{window_titles}= | Get Window Titles | browser=${id} | | | Log | Browser ${id} has these windows: ${window_titles} | | | END | | | | See `Switch Browser` for more information and examples. New in SeleniumLibrary 4.0 """ return self.drivers.active_driver_ids
[ "def", "get_browser_ids", "(", "self", ")", "->", "List", "[", "str", "]", ":", "return", "self", ".", "drivers", ".", "active_driver_ids" ]
https://github.com/robotframework/SeleniumLibrary/blob/0d8caf35cd8325ff391c27fe814744060470018e/src/SeleniumLibrary/keywords/browsermanagement.py#L442-L456
theopolis/uefi-firmware-parser
0171b4639ca0825db4fac5240c996e8a733bc6a4
uefi_firmware/utils.py
python
aguid
(b, big=False)
return [a, b, c] + [_c for _c in d]
RFC4122 binary GUID as int array.
RFC4122 binary GUID as int array.
[ "RFC4122", "binary", "GUID", "as", "int", "array", "." ]
def aguid(b, big=False): '''RFC4122 binary GUID as int array.''' a, b, c, d = struct.unpack("%sIHH8s" % (">" if big else "<"), b) return [a, b, c] + [_c for _c in d]
[ "def", "aguid", "(", "b", ",", "big", "=", "False", ")", ":", "a", ",", "b", ",", "c", ",", "d", "=", "struct", ".", "unpack", "(", "\"%sIHH8s\"", "%", "(", "\">\"", "if", "big", "else", "\"<\"", ")", ",", "b", ")", "return", "[", "a", ",", "b", ",", "c", "]", "+", "[", "_c", "for", "_c", "in", "d", "]" ]
https://github.com/theopolis/uefi-firmware-parser/blob/0171b4639ca0825db4fac5240c996e8a733bc6a4/uefi_firmware/utils.py#L103-L106
gem/oq-engine
1bdb88f3914e390abcbd285600bfd39477aae47c
openquake/hazardlib/geo/polygon.py
python
Polygon.wkt
(self)
return 'POLYGON((%s))' % ', '.join(pairs)
Generate WKT (Well-Known Text) to represent this polygon.
Generate WKT (Well-Known Text) to represent this polygon.
[ "Generate", "WKT", "(", "Well", "-", "Known", "Text", ")", "to", "represent", "this", "polygon", "." ]
def wkt(self): """ Generate WKT (Well-Known Text) to represent this polygon. """ pairs = ['%.5f %.5f' % (lon, lat) for lon, lat in zip(self.lons, self.lats)] # the polygon must form a closed loop; first and last coord pairs # are the same pairs.append(pairs[0]) return 'POLYGON((%s))' % ', '.join(pairs)
[ "def", "wkt", "(", "self", ")", ":", "pairs", "=", "[", "'%.5f %.5f'", "%", "(", "lon", ",", "lat", ")", "for", "lon", ",", "lat", "in", "zip", "(", "self", ".", "lons", ",", "self", ".", "lats", ")", "]", "# the polygon must form a closed loop; first and last coord pairs", "# are the same", "pairs", ".", "append", "(", "pairs", "[", "0", "]", ")", "return", "'POLYGON((%s))'", "%", "', '", ".", "join", "(", "pairs", ")" ]
https://github.com/gem/oq-engine/blob/1bdb88f3914e390abcbd285600bfd39477aae47c/openquake/hazardlib/geo/polygon.py#L74-L83
robcarver17/pysystemtrade
b0385705b7135c52d39cb6d2400feece881bcca9
systems/forecast_combine.py
python
ForecastCombine.calculation_of_raw_estimated_monthly_forecast_weights
(self, instrument_code)
return weight_func
Does an optimisation for a single instrument We do this if we can't do the special case of a fully pooled optimisation (both costs and returns pooled) Estimate the forecast weights for this instrument We store this intermediate step to expose the calculation object :param instrument_code: :type str: :returns: TxK pd.DataFrame containing weights, columns are trading rule variation names, T covers all
Does an optimisation for a single instrument
[ "Does", "an", "optimisation", "for", "a", "single", "instrument" ]
def calculation_of_raw_estimated_monthly_forecast_weights(self, instrument_code): """ Does an optimisation for a single instrument We do this if we can't do the special case of a fully pooled optimisation (both costs and returns pooled) Estimate the forecast weights for this instrument We store this intermediate step to expose the calculation object :param instrument_code: :type str: :returns: TxK pd.DataFrame containing weights, columns are trading rule variation names, T covers all """ self.log.terse("Calculating raw forecast weights for %s" % instrument_code) config = self.config # Get some useful stuff from the config weighting_params = copy(config.forecast_weight_estimate) # which function to use for calculation weighting_func = resolve_function(weighting_params.pop("func")) returns_pre_processor = self.returns_pre_processor_for_code(instrument_code) weight_func = weighting_func( returns_pre_processor, asset_name=instrument_code, log=self.log, **weighting_params ) return weight_func
[ "def", "calculation_of_raw_estimated_monthly_forecast_weights", "(", "self", ",", "instrument_code", ")", ":", "self", ".", "log", ".", "terse", "(", "\"Calculating raw forecast weights for %s\"", "%", "instrument_code", ")", "config", "=", "self", ".", "config", "# Get some useful stuff from the config", "weighting_params", "=", "copy", "(", "config", ".", "forecast_weight_estimate", ")", "# which function to use for calculation", "weighting_func", "=", "resolve_function", "(", "weighting_params", ".", "pop", "(", "\"func\"", ")", ")", "returns_pre_processor", "=", "self", ".", "returns_pre_processor_for_code", "(", "instrument_code", ")", "weight_func", "=", "weighting_func", "(", "returns_pre_processor", ",", "asset_name", "=", "instrument_code", ",", "log", "=", "self", ".", "log", ",", "*", "*", "weighting_params", ")", "return", "weight_func" ]
https://github.com/robcarver17/pysystemtrade/blob/b0385705b7135c52d39cb6d2400feece881bcca9/systems/forecast_combine.py#L564-L599
dask/dask-jobqueue
4980e746e9be15e5fe6736b6c496b8faea737fd7
dask_jobqueue/_version.py
python
git_pieces_from_vcs
(tag_prefix, root, verbose, run_command=run_command)
return pieces
Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree.
Get version from 'git describe' in the root of the source tree.
[ "Get", "version", "from", "git", "describe", "in", "the", "root", "of", "the", "source", "tree", "." ]
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command( GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix, ], cwd=root, ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( full_tag, tag_prefix, ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ 0 ].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces
[ "def", "git_pieces_from_vcs", "(", "tag_prefix", ",", "root", ",", "verbose", ",", "run_command", "=", "run_command", ")", ":", "GITS", "=", "[", "\"git\"", "]", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "GITS", "=", "[", "\"git.cmd\"", ",", "\"git.exe\"", "]", "out", ",", "rc", "=", "run_command", "(", "GITS", ",", "[", "\"rev-parse\"", ",", "\"--git-dir\"", "]", ",", "cwd", "=", "root", ",", "hide_stderr", "=", "True", ")", "if", "rc", "!=", "0", ":", "if", "verbose", ":", "print", "(", "\"Directory %s not under git control\"", "%", "root", ")", "raise", "NotThisMethod", "(", "\"'git rev-parse --git-dir' returned error\"", ")", "# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]", "# if there isn't one, this yields HEX[-dirty] (no NUM)", "describe_out", ",", "rc", "=", "run_command", "(", "GITS", ",", "[", "\"describe\"", ",", "\"--tags\"", ",", "\"--dirty\"", ",", "\"--always\"", ",", "\"--long\"", ",", "\"--match\"", ",", "\"%s*\"", "%", "tag_prefix", ",", "]", ",", "cwd", "=", "root", ",", ")", "# --long was added in git-1.5.5", "if", "describe_out", "is", "None", ":", "raise", "NotThisMethod", "(", "\"'git describe' failed\"", ")", "describe_out", "=", "describe_out", ".", "strip", "(", ")", "full_out", ",", "rc", "=", "run_command", "(", "GITS", ",", "[", "\"rev-parse\"", ",", "\"HEAD\"", "]", ",", "cwd", "=", "root", ")", "if", "full_out", "is", "None", ":", "raise", "NotThisMethod", "(", "\"'git rev-parse' failed\"", ")", "full_out", "=", "full_out", ".", "strip", "(", ")", "pieces", "=", "{", "}", "pieces", "[", "\"long\"", "]", "=", "full_out", "pieces", "[", "\"short\"", "]", "=", "full_out", "[", ":", "7", "]", "# maybe improved later", "pieces", "[", "\"error\"", "]", "=", "None", "# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]", "# TAG might have hyphens.", "git_describe", "=", "describe_out", "# look for -dirty suffix", "dirty", "=", "git_describe", ".", "endswith", "(", "\"-dirty\"", ")", "pieces", "[", "\"dirty\"", "]", "=", "dirty", "if", "dirty", ":", "git_describe", "=", "git_describe", "[", ":", "git_describe", ".", "rindex", "(", "\"-dirty\"", ")", "]", "# now we have TAG-NUM-gHEX or HEX", "if", "\"-\"", "in", "git_describe", ":", "# TAG-NUM-gHEX", "mo", "=", "re", ".", "search", "(", "r\"^(.+)-(\\d+)-g([0-9a-f]+)$\"", ",", "git_describe", ")", "if", "not", "mo", ":", "# unparseable. Maybe git-describe is misbehaving?", "pieces", "[", "\"error\"", "]", "=", "\"unable to parse git-describe output: '%s'\"", "%", "describe_out", "return", "pieces", "# tag", "full_tag", "=", "mo", ".", "group", "(", "1", ")", "if", "not", "full_tag", ".", "startswith", "(", "tag_prefix", ")", ":", "if", "verbose", ":", "fmt", "=", "\"tag '%s' doesn't start with prefix '%s'\"", "print", "(", "fmt", "%", "(", "full_tag", ",", "tag_prefix", ")", ")", "pieces", "[", "\"error\"", "]", "=", "\"tag '%s' doesn't start with prefix '%s'\"", "%", "(", "full_tag", ",", "tag_prefix", ",", ")", "return", "pieces", "pieces", "[", "\"closest-tag\"", "]", "=", "full_tag", "[", "len", "(", "tag_prefix", ")", ":", "]", "# distance: number of commits since tag", "pieces", "[", "\"distance\"", "]", "=", "int", "(", "mo", ".", "group", "(", "2", ")", ")", "# commit: short hex revision ID", "pieces", "[", "\"short\"", "]", "=", "mo", ".", "group", "(", "3", ")", "else", ":", "# HEX: no tags", "pieces", "[", "\"closest-tag\"", "]", "=", "None", "count_out", ",", "rc", "=", "run_command", "(", "GITS", ",", "[", "\"rev-list\"", ",", "\"HEAD\"", ",", "\"--count\"", "]", ",", "cwd", "=", "root", ")", "pieces", "[", "\"distance\"", "]", "=", "int", "(", "count_out", ")", "# total number of commits", "# commit date: see ISO-8601 comment in git_versions_from_keywords()", "date", "=", "run_command", "(", "GITS", ",", "[", "\"show\"", ",", "\"-s\"", ",", "\"--format=%ci\"", ",", "\"HEAD\"", "]", ",", "cwd", "=", "root", ")", "[", "0", "]", ".", "strip", "(", ")", "pieces", "[", "\"date\"", "]", "=", "date", ".", "strip", "(", ")", ".", "replace", "(", "\" \"", ",", "\"T\"", ",", "1", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ",", "1", ")", "return", "pieces" ]
https://github.com/dask/dask-jobqueue/blob/4980e746e9be15e5fe6736b6c496b8faea737fd7/dask_jobqueue/_version.py#L233-L330
j-bennet/wharfee
a48536cba5d3830a29c63bb440b3e74a8a23431d
scripts/optionizer.py
python
format_option
(info)
return textwrap.dedent(result).strip()
Format code to create CommandOption. :param info: OptInfo :return: str
Format code to create CommandOption. :param info: OptInfo :return: str
[ "Format", "code", "to", "create", "CommandOption", ".", ":", "param", "info", ":", "OptInfo", ":", "return", ":", "str" ]
def format_option(info): """ Format code to create CommandOption. :param info: OptInfo :return: str """ tmpl = Template(""" CommandOption( CommandOption.{{ const_type }}, {{ short_name }}, {{ long_name }}, action='{{ action }}', dest='{{ dest }}',{% if default is not none %} default={{ default }},{% endif %} help='{{ help }}.' ), """) result = tmpl.render( const_type=info.type_str, short_name=maybe_quote(info.short_name), long_name=maybe_quote(info.long_name), action=info.action, dest=info.dest, default=maybe_quote(info.default), help=info.help.rstrip('.') ) return textwrap.dedent(result).strip()
[ "def", "format_option", "(", "info", ")", ":", "tmpl", "=", "Template", "(", "\"\"\"\n CommandOption(\n CommandOption.{{ const_type }},\n {{ short_name }},\n {{ long_name }},\n action='{{ action }}',\n dest='{{ dest }}',{% if default is not none %}\n default={{ default }},{% endif %}\n help='{{ help }}.'\n ),\n \"\"\"", ")", "result", "=", "tmpl", ".", "render", "(", "const_type", "=", "info", ".", "type_str", ",", "short_name", "=", "maybe_quote", "(", "info", ".", "short_name", ")", ",", "long_name", "=", "maybe_quote", "(", "info", ".", "long_name", ")", ",", "action", "=", "info", ".", "action", ",", "dest", "=", "info", ".", "dest", ",", "default", "=", "maybe_quote", "(", "info", ".", "default", ")", ",", "help", "=", "info", ".", "help", ".", "rstrip", "(", "'.'", ")", ")", "return", "textwrap", ".", "dedent", "(", "result", ")", ".", "strip", "(", ")" ]
https://github.com/j-bennet/wharfee/blob/a48536cba5d3830a29c63bb440b3e74a8a23431d/scripts/optionizer.py#L320-L346
collinsctk/PyQYT
7af3673955f94ff1b2df2f94220cd2dab2e252af
ExtentionPackages/pycparser/c_parser.py
python
CParser.p_postfix_expression_3
(self, p)
postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN | postfix_expression LPAREN RPAREN
postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN | postfix_expression LPAREN RPAREN
[ "postfix_expression", ":", "postfix_expression", "LPAREN", "argument_expression_list", "RPAREN", "|", "postfix_expression", "LPAREN", "RPAREN" ]
def p_postfix_expression_3(self, p): """ postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN | postfix_expression LPAREN RPAREN """ p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
[ "def", "p_postfix_expression_3", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "c_ast", ".", "FuncCall", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", "if", "len", "(", "p", ")", "==", "5", "else", "None", ",", "p", "[", "1", "]", ".", "coord", ")" ]
https://github.com/collinsctk/PyQYT/blob/7af3673955f94ff1b2df2f94220cd2dab2e252af/ExtentionPackages/pycparser/c_parser.py#L1541-L1545
enthought/traits
d22ce1f096e2a6f87c78d7f1bb5bf0abab1a18ff
traits/trait_type.py
python
TraitType.clone
(self, default_value=NoDefaultSpecified, **metadata)
return new
Copy, optionally modifying default value and metadata. Clones the contents of this object into a new instance of the same class, and then modifies the cloned copy using the specified ``default_value`` and ``metadata``. Returns the cloned object as the result. Note that subclasses can change the signature of this method if needed, but should always call the 'super' method if possible. Parameters ---------- default_value : any The new default value for the trait. **metadata : dict A dictionary of metadata names and corresponding values as arbitrary keyword arguments. Returns ------- clone : TraitType Clone of self.
Copy, optionally modifying default value and metadata.
[ "Copy", "optionally", "modifying", "default", "value", "and", "metadata", "." ]
def clone(self, default_value=NoDefaultSpecified, **metadata): """ Copy, optionally modifying default value and metadata. Clones the contents of this object into a new instance of the same class, and then modifies the cloned copy using the specified ``default_value`` and ``metadata``. Returns the cloned object as the result. Note that subclasses can change the signature of this method if needed, but should always call the 'super' method if possible. Parameters ---------- default_value : any The new default value for the trait. **metadata : dict A dictionary of metadata names and corresponding values as arbitrary keyword arguments. Returns ------- clone : TraitType Clone of self. """ if "parent" not in metadata: metadata["parent"] = self new = self.__class__.__new__(self.__class__) new_dict = new.__dict__ new_dict.update(self.__dict__) if "editor" in new_dict: del new_dict["editor"] if "_metadata" in new_dict: new._metadata = new._metadata.copy() else: new._metadata = {} new._metadata.update(metadata) if default_value is not NoDefaultSpecified: new.default_value = default_value if self.validate is not None: try: new.default_value = self.validate( None, None, default_value ) except Exception: pass return new
[ "def", "clone", "(", "self", ",", "default_value", "=", "NoDefaultSpecified", ",", "*", "*", "metadata", ")", ":", "if", "\"parent\"", "not", "in", "metadata", ":", "metadata", "[", "\"parent\"", "]", "=", "self", "new", "=", "self", ".", "__class__", ".", "__new__", "(", "self", ".", "__class__", ")", "new_dict", "=", "new", ".", "__dict__", "new_dict", ".", "update", "(", "self", ".", "__dict__", ")", "if", "\"editor\"", "in", "new_dict", ":", "del", "new_dict", "[", "\"editor\"", "]", "if", "\"_metadata\"", "in", "new_dict", ":", "new", ".", "_metadata", "=", "new", ".", "_metadata", ".", "copy", "(", ")", "else", ":", "new", ".", "_metadata", "=", "{", "}", "new", ".", "_metadata", ".", "update", "(", "metadata", ")", "if", "default_value", "is", "not", "NoDefaultSpecified", ":", "new", ".", "default_value", "=", "default_value", "if", "self", ".", "validate", "is", "not", "None", ":", "try", ":", "new", ".", "default_value", "=", "self", ".", "validate", "(", "None", ",", "None", ",", "default_value", ")", "except", "Exception", ":", "pass", "return", "new" ]
https://github.com/enthought/traits/blob/d22ce1f096e2a6f87c78d7f1bb5bf0abab1a18ff/traits/trait_type.py#L265-L317
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/nntplib.py
python
NNTP.body
(self, id, file=None)
return self.artcmd('BODY ' + id, file)
Process a BODY command. Argument: - id: article number or message id - file: Filename string or file object to store the article in Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article's body or an empty list if file was used
Process a BODY command. Argument: - id: article number or message id - file: Filename string or file object to store the article in Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article's body or an empty list if file was used
[ "Process", "a", "BODY", "command", ".", "Argument", ":", "-", "id", ":", "article", "number", "or", "message", "id", "-", "file", ":", "Filename", "string", "or", "file", "object", "to", "store", "the", "article", "in", "Returns", ":", "-", "resp", ":", "server", "response", "if", "successful", "-", "nr", ":", "article", "number", "-", "id", ":", "message", "id", "-", "list", ":", "the", "lines", "of", "the", "article", "s", "body", "or", "an", "empty", "list", "if", "file", "was", "used" ]
def body(self, id, file=None): """Process a BODY command. Argument: - id: article number or message id - file: Filename string or file object to store the article in Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article's body or an empty list if file was used""" return self.artcmd('BODY ' + id, file)
[ "def", "body", "(", "self", ",", "id", ",", "file", "=", "None", ")", ":", "return", "self", ".", "artcmd", "(", "'BODY '", "+", "id", ",", "file", ")" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/nntplib.py#L431-L442
BlueBrain/BluePyOpt
6d4185479bc6dddb3daad84fa27e0b8457d69652
bluepyopt/ephys/serializer.py
python
DictMixin.to_dict
(self)
return ret
create dictionary
create dictionary
[ "create", "dictionary" ]
def to_dict(self): '''create dictionary''' ret = {} for field in self.SERIALIZED_FIELDS: ret[field] = DictMixin._serializer(getattr(self, field)) ret['class'] = repr(self.__class__) return ret
[ "def", "to_dict", "(", "self", ")", ":", "ret", "=", "{", "}", "for", "field", "in", "self", ".", "SERIALIZED_FIELDS", ":", "ret", "[", "field", "]", "=", "DictMixin", ".", "_serializer", "(", "getattr", "(", "self", ",", "field", ")", ")", "ret", "[", "'class'", "]", "=", "repr", "(", "self", ".", "__class__", ")", "return", "ret" ]
https://github.com/BlueBrain/BluePyOpt/blob/6d4185479bc6dddb3daad84fa27e0b8457d69652/bluepyopt/ephys/serializer.py#L47-L53
golismero/golismero
7d605b937e241f51c1ca4f47b20f755eeefb9d76
thirdparty_libs/nltk/misc/minimalset.py
python
MinimalSet.contexts
(self, minimum=2)
return [c for c in self._contexts if len(self._seen[c]) >= minimum]
Determine which contexts occurred with enough distinct targets. :param minimum: the minimum number of distinct target forms :type minimum: int :rtype list
Determine which contexts occurred with enough distinct targets.
[ "Determine", "which", "contexts", "occurred", "with", "enough", "distinct", "targets", "." ]
def contexts(self, minimum=2): """ Determine which contexts occurred with enough distinct targets. :param minimum: the minimum number of distinct target forms :type minimum: int :rtype list """ return [c for c in self._contexts if len(self._seen[c]) >= minimum]
[ "def", "contexts", "(", "self", ",", "minimum", "=", "2", ")", ":", "return", "[", "c", "for", "c", "in", "self", ".", "_contexts", "if", "len", "(", "self", ".", "_seen", "[", "c", "]", ")", ">=", "minimum", "]" ]
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/thirdparty_libs/nltk/misc/minimalset.py#L58-L66
DataBrewery/cubes
140133e8c2e3f2ff60631cc3ebc9966d16c1655e
cubes/metadata/cube.py
python
Cube.base_attributes
(self)
return [attr for attr in self.all_attributes if attr.is_base]
Returns a list of attributes that are not derived from other attributes, do not depend on other cube attributes, variables or parameters. Any attribute that has an expression (regardless of it's contents, it might be a constant) is considered derived attribute. The list contains also aggregate attributes that are base – for example attributes that represent pre-aggregated column in a table. .. versionadded:: 1.1
Returns a list of attributes that are not derived from other attributes, do not depend on other cube attributes, variables or parameters. Any attribute that has an expression (regardless of it's contents, it might be a constant) is considered derived attribute.
[ "Returns", "a", "list", "of", "attributes", "that", "are", "not", "derived", "from", "other", "attributes", "do", "not", "depend", "on", "other", "cube", "attributes", "variables", "or", "parameters", ".", "Any", "attribute", "that", "has", "an", "expression", "(", "regardless", "of", "it", "s", "contents", "it", "might", "be", "a", "constant", ")", "is", "considered", "derived", "attribute", "." ]
def base_attributes(self): """Returns a list of attributes that are not derived from other attributes, do not depend on other cube attributes, variables or parameters. Any attribute that has an expression (regardless of it's contents, it might be a constant) is considered derived attribute. The list contains also aggregate attributes that are base – for example attributes that represent pre-aggregated column in a table. .. versionadded:: 1.1 """ return [attr for attr in self.all_attributes if attr.is_base]
[ "def", "base_attributes", "(", "self", ")", ":", "return", "[", "attr", "for", "attr", "in", "self", ".", "all_attributes", "if", "attr", ".", "is_base", "]" ]
https://github.com/DataBrewery/cubes/blob/140133e8c2e3f2ff60631cc3ebc9966d16c1655e/cubes/metadata/cube.py#L377-L389
containerbuildsystem/atomic-reactor
159e76234d31348e46804e63fa392362706e1c60
atomic_reactor/util.py
python
create_tar_gz_archive
(file_name: str, file_content: str)
return tar.name
Create tar.gz archive with a single file with a specific content :param str file_name: Name of the file packed in archive :param str file_content: File content string :return: Absolute path to the file archive
Create tar.gz archive with a single file with a specific content
[ "Create", "tar", ".", "gz", "archive", "with", "a", "single", "file", "with", "a", "specific", "content" ]
def create_tar_gz_archive(file_name: str, file_content: str): """Create tar.gz archive with a single file with a specific content :param str file_name: Name of the file packed in archive :param str file_content: File content string :return: Absolute path to the file archive """ with tempfile.NamedTemporaryFile('wb', suffix='.tar.gz', delete=False) as f: with tarfile.open(fileobj=f, mode='w:gz') as tar: data = file_content.encode('utf-8') info = tarfile.TarInfo(name=file_name) info.size = len(data) tar.addfile(tarinfo=info, fileobj=io.BytesIO(data)) return tar.name
[ "def", "create_tar_gz_archive", "(", "file_name", ":", "str", ",", "file_content", ":", "str", ")", ":", "with", "tempfile", ".", "NamedTemporaryFile", "(", "'wb'", ",", "suffix", "=", "'.tar.gz'", ",", "delete", "=", "False", ")", "as", "f", ":", "with", "tarfile", ".", "open", "(", "fileobj", "=", "f", ",", "mode", "=", "'w:gz'", ")", "as", "tar", ":", "data", "=", "file_content", ".", "encode", "(", "'utf-8'", ")", "info", "=", "tarfile", ".", "TarInfo", "(", "name", "=", "file_name", ")", "info", ".", "size", "=", "len", "(", "data", ")", "tar", ".", "addfile", "(", "tarinfo", "=", "info", ",", "fileobj", "=", "io", ".", "BytesIO", "(", "data", ")", ")", "return", "tar", ".", "name" ]
https://github.com/containerbuildsystem/atomic-reactor/blob/159e76234d31348e46804e63fa392362706e1c60/atomic_reactor/util.py#L1998-L2013
wger-project/wger
3a17a2cf133d242d1f8c357faa53cf675a7b3223
wger/measurements/api/views.py
python
MeasurementViewSet.get_queryset
(self)
return Measurement.objects.filter(category__user=self.request.user)
Only allow access to appropriate objects
Only allow access to appropriate objects
[ "Only", "allow", "access", "to", "appropriate", "objects" ]
def get_queryset(self): """ Only allow access to appropriate objects """ return Measurement.objects.filter(category__user=self.request.user)
[ "def", "get_queryset", "(", "self", ")", ":", "return", "Measurement", ".", "objects", ".", "filter", "(", "category__user", "=", "self", ".", "request", ".", "user", ")" ]
https://github.com/wger-project/wger/blob/3a17a2cf133d242d1f8c357faa53cf675a7b3223/wger/measurements/api/views.py#L79-L83
OpenMined/PySyft
f181ca02d307d57bfff9477610358df1a12e3ac9
packages/syft/src/syft/core/tensor/smpc/mpc_tensor.py
python
MPCTensor.__pow__
(self, power: int)
return result
Compute integer power of a number iteratively using mul. - Divide power by 2 and multiply base to itself (if the power is even) - Decrement power by 1 to make it even and then follow the first step Args: power (int): integer value to apply the Returns: MPCTensor: Result of the pow operation Raises: RuntimeError: if negative power is given
Compute integer power of a number iteratively using mul.
[ "Compute", "integer", "power", "of", "a", "number", "iteratively", "using", "mul", "." ]
def __pow__(self, power: int) -> MPCTensor: """Compute integer power of a number iteratively using mul. - Divide power by 2 and multiply base to itself (if the power is even) - Decrement power by 1 to make it even and then follow the first step Args: power (int): integer value to apply the Returns: MPCTensor: Result of the pow operation Raises: RuntimeError: if negative power is given """ # TODO: Implement after we have reciprocal function. if power < 0: raise RuntimeError("Negative integer powers not supported yet.") base = self # TODO: should modify for general ring sizes. result = np.ones(shape=self.shape, dtype=np.int32) while power > 0: # If power is odd if power % 2 == 1: result = base * result result.block # Divide the power by 2 power = power // 2 # Multiply base to itself base = base * base base.block return result
[ "def", "__pow__", "(", "self", ",", "power", ":", "int", ")", "->", "MPCTensor", ":", "# TODO: Implement after we have reciprocal function.", "if", "power", "<", "0", ":", "raise", "RuntimeError", "(", "\"Negative integer powers not supported yet.\"", ")", "base", "=", "self", "# TODO: should modify for general ring sizes.", "result", "=", "np", ".", "ones", "(", "shape", "=", "self", ".", "shape", ",", "dtype", "=", "np", ".", "int32", ")", "while", "power", ">", "0", ":", "# If power is odd", "if", "power", "%", "2", "==", "1", ":", "result", "=", "base", "*", "result", "result", ".", "block", "# Divide the power by 2", "power", "=", "power", "//", "2", "# Multiply base to itself", "base", "=", "base", "*", "base", "base", ".", "block", "return", "result" ]
https://github.com/OpenMined/PySyft/blob/f181ca02d307d57bfff9477610358df1a12e3ac9/packages/syft/src/syft/core/tensor/smpc/mpc_tensor.py#L839-L875
imagr/imagr
e54bcf3f0f951babcd2fa153de2dd8556aa3506d
Imagr/gmacpyutil/systemconfig.py
python
SystemProfiler.GetDiskSerialNumber
(self)
Retrieves the primary disk serial number. Returns: string of serial number Raises: SystemProfilerError: when disk0 is not found on SATA bus.
Retrieves the primary disk serial number.
[ "Retrieves", "the", "primary", "disk", "serial", "number", "." ]
def GetDiskSerialNumber(self): """Retrieves the primary disk serial number. Returns: string of serial number Raises: SystemProfilerError: when disk0 is not found on SATA bus. """ # the order is important so we prefer SATA then RAID finally PATA sp_types = ['SPSerialATADataType', 'SPHardwareRAIDDataType', 'SPParallelATADataType'] for sp_type in sp_types: for data in self._GetSystemProfile(sp_type): if data.get('_dataType', None) == sp_type: for controller in data['_items']: for device in controller.get('_items', []): if device.get('bsd_name', '').find('disk0') > -1: logging.debug('device_serial: %s', device['device_serial']) return device['device_serial'] raise SystemProfilerError('Could not find disk0')
[ "def", "GetDiskSerialNumber", "(", "self", ")", ":", "# the order is important so we prefer SATA then RAID finally PATA", "sp_types", "=", "[", "'SPSerialATADataType'", ",", "'SPHardwareRAIDDataType'", ",", "'SPParallelATADataType'", "]", "for", "sp_type", "in", "sp_types", ":", "for", "data", "in", "self", ".", "_GetSystemProfile", "(", "sp_type", ")", ":", "if", "data", ".", "get", "(", "'_dataType'", ",", "None", ")", "==", "sp_type", ":", "for", "controller", "in", "data", "[", "'_items'", "]", ":", "for", "device", "in", "controller", ".", "get", "(", "'_items'", ",", "[", "]", ")", ":", "if", "device", ".", "get", "(", "'bsd_name'", ",", "''", ")", ".", "find", "(", "'disk0'", ")", ">", "-", "1", ":", "logging", ".", "debug", "(", "'device_serial: %s'", ",", "device", "[", "'device_serial'", "]", ")", "return", "device", "[", "'device_serial'", "]", "raise", "SystemProfilerError", "(", "'Could not find disk0'", ")" ]
https://github.com/imagr/imagr/blob/e54bcf3f0f951babcd2fa153de2dd8556aa3506d/Imagr/gmacpyutil/systemconfig.py#L305-L325
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/federatedml/evaluation/evaluation.py
python
Evaluation.obtain_data
(self, data_list)
return data_list
[]
def obtain_data(self, data_list): return data_list
[ "def", "obtain_data", "(", "self", ",", "data_list", ")", ":", "return", "data_list" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/evaluation/evaluation.py#L287-L288
x0rz/EQGRP_Lost_in_Translation
6692b1486f562f027567a49523b8c151a4050988
windows/Resources/Python/Override/Lib/multiprocessing/__init__.py
python
RLock
()
return RLock()
Returns a recursive lock object
Returns a recursive lock object
[ "Returns", "a", "recursive", "lock", "object" ]
def RLock(): ''' Returns a recursive lock object ''' from multiprocessing.synchronize import RLock return RLock()
[ "def", "RLock", "(", ")", ":", "from", "multiprocessing", ".", "synchronize", "import", "RLock", "return", "RLock", "(", ")" ]
https://github.com/x0rz/EQGRP_Lost_in_Translation/blob/6692b1486f562f027567a49523b8c151a4050988/windows/Resources/Python/Override/Lib/multiprocessing/__init__.py#L178-L183
triaquae/triaquae
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
TriAquae/models/Centos_5.9/paramiko/sftp_file.py
python
SFTPFile.check
(self, hash_algorithm, offset=0, length=0, block_size=0)
return data
Ask the server for a hash of a section of this file. This can be used to verify a successful upload or download, or for various rsync-like operations. The file is hashed from C{offset}, for C{length} bytes. If C{length} is 0, the remainder of the file is hashed. Thus, if both C{offset} and C{length} are zero, the entire file is hashed. Normally, C{block_size} will be 0 (the default), and this method will return a byte string representing the requested hash (for example, a string of length 16 for MD5, or 20 for SHA-1). If a non-zero C{block_size} is given, each chunk of the file (from C{offset} to C{offset + length}) of C{block_size} bytes is computed as a separate hash. The hash results are all concatenated and returned as a single string. For example, C{check('sha1', 0, 1024, 512)} will return a string of length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes of the file, and the last 20 bytes will be the SHA-1 of the next 512 bytes. @param hash_algorithm: the name of the hash algorithm to use (normally C{"sha1"} or C{"md5"}) @type hash_algorithm: str @param offset: offset into the file to begin hashing (0 means to start from the beginning) @type offset: int or long @param length: number of bytes to hash (0 means continue to the end of the file) @type length: int or long @param block_size: number of bytes to hash per result (must not be less than 256; 0 means to compute only one hash of the entire segment) @type block_size: int @return: string of bytes representing the hash of each block, concatenated together @rtype: str @note: Many (most?) servers don't support this extension yet. @raise IOError: if the server doesn't support the "check-file" extension, or possibly doesn't support the hash algorithm requested @since: 1.4
Ask the server for a hash of a section of this file. This can be used to verify a successful upload or download, or for various rsync-like operations. The file is hashed from C{offset}, for C{length} bytes. If C{length} is 0, the remainder of the file is hashed. Thus, if both C{offset} and C{length} are zero, the entire file is hashed. Normally, C{block_size} will be 0 (the default), and this method will return a byte string representing the requested hash (for example, a string of length 16 for MD5, or 20 for SHA-1). If a non-zero C{block_size} is given, each chunk of the file (from C{offset} to C{offset + length}) of C{block_size} bytes is computed as a separate hash. The hash results are all concatenated and returned as a single string. For example, C{check('sha1', 0, 1024, 512)} will return a string of length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes of the file, and the last 20 bytes will be the SHA-1 of the next 512 bytes.
[ "Ask", "the", "server", "for", "a", "hash", "of", "a", "section", "of", "this", "file", ".", "This", "can", "be", "used", "to", "verify", "a", "successful", "upload", "or", "download", "or", "for", "various", "rsync", "-", "like", "operations", ".", "The", "file", "is", "hashed", "from", "C", "{", "offset", "}", "for", "C", "{", "length", "}", "bytes", ".", "If", "C", "{", "length", "}", "is", "0", "the", "remainder", "of", "the", "file", "is", "hashed", ".", "Thus", "if", "both", "C", "{", "offset", "}", "and", "C", "{", "length", "}", "are", "zero", "the", "entire", "file", "is", "hashed", ".", "Normally", "C", "{", "block_size", "}", "will", "be", "0", "(", "the", "default", ")", "and", "this", "method", "will", "return", "a", "byte", "string", "representing", "the", "requested", "hash", "(", "for", "example", "a", "string", "of", "length", "16", "for", "MD5", "or", "20", "for", "SHA", "-", "1", ")", ".", "If", "a", "non", "-", "zero", "C", "{", "block_size", "}", "is", "given", "each", "chunk", "of", "the", "file", "(", "from", "C", "{", "offset", "}", "to", "C", "{", "offset", "+", "length", "}", ")", "of", "C", "{", "block_size", "}", "bytes", "is", "computed", "as", "a", "separate", "hash", ".", "The", "hash", "results", "are", "all", "concatenated", "and", "returned", "as", "a", "single", "string", ".", "For", "example", "C", "{", "check", "(", "sha1", "0", "1024", "512", ")", "}", "will", "return", "a", "string", "of", "length", "40", ".", "The", "first", "20", "bytes", "will", "be", "the", "SHA", "-", "1", "of", "the", "first", "512", "bytes", "of", "the", "file", "and", "the", "last", "20", "bytes", "will", "be", "the", "SHA", "-", "1", "of", "the", "next", "512", "bytes", "." ]
def check(self, hash_algorithm, offset=0, length=0, block_size=0): """ Ask the server for a hash of a section of this file. This can be used to verify a successful upload or download, or for various rsync-like operations. The file is hashed from C{offset}, for C{length} bytes. If C{length} is 0, the remainder of the file is hashed. Thus, if both C{offset} and C{length} are zero, the entire file is hashed. Normally, C{block_size} will be 0 (the default), and this method will return a byte string representing the requested hash (for example, a string of length 16 for MD5, or 20 for SHA-1). If a non-zero C{block_size} is given, each chunk of the file (from C{offset} to C{offset + length}) of C{block_size} bytes is computed as a separate hash. The hash results are all concatenated and returned as a single string. For example, C{check('sha1', 0, 1024, 512)} will return a string of length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes of the file, and the last 20 bytes will be the SHA-1 of the next 512 bytes. @param hash_algorithm: the name of the hash algorithm to use (normally C{"sha1"} or C{"md5"}) @type hash_algorithm: str @param offset: offset into the file to begin hashing (0 means to start from the beginning) @type offset: int or long @param length: number of bytes to hash (0 means continue to the end of the file) @type length: int or long @param block_size: number of bytes to hash per result (must not be less than 256; 0 means to compute only one hash of the entire segment) @type block_size: int @return: string of bytes representing the hash of each block, concatenated together @rtype: str @note: Many (most?) servers don't support this extension yet. @raise IOError: if the server doesn't support the "check-file" extension, or possibly doesn't support the hash algorithm requested @since: 1.4 """ t, msg = self.sftp._request(CMD_EXTENDED, 'check-file', self.handle, hash_algorithm, long(offset), long(length), block_size) ext = msg.get_string() alg = msg.get_string() data = msg.get_remainder() return data
[ "def", "check", "(", "self", ",", "hash_algorithm", ",", "offset", "=", "0", ",", "length", "=", "0", ",", "block_size", "=", "0", ")", ":", "t", ",", "msg", "=", "self", ".", "sftp", ".", "_request", "(", "CMD_EXTENDED", ",", "'check-file'", ",", "self", ".", "handle", ",", "hash_algorithm", ",", "long", "(", "offset", ")", ",", "long", "(", "length", ")", ",", "block_size", ")", "ext", "=", "msg", ".", "get_string", "(", ")", "alg", "=", "msg", ".", "get_string", "(", ")", "data", "=", "msg", ".", "get_remainder", "(", ")", "return", "data" ]
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Centos_5.9/paramiko/sftp_file.py#L302-L354
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/config/area_registry.py
python
async_setup
(hass)
return True
Enable the Area Registry views.
Enable the Area Registry views.
[ "Enable", "the", "Area", "Registry", "views", "." ]
async def async_setup(hass): """Enable the Area Registry views.""" websocket_api.async_register_command(hass, websocket_list_areas) websocket_api.async_register_command(hass, websocket_create_area) websocket_api.async_register_command(hass, websocket_delete_area) websocket_api.async_register_command(hass, websocket_update_area) return True
[ "async", "def", "async_setup", "(", "hass", ")", ":", "websocket_api", ".", "async_register_command", "(", "hass", ",", "websocket_list_areas", ")", "websocket_api", ".", "async_register_command", "(", "hass", ",", "websocket_create_area", ")", "websocket_api", ".", "async_register_command", "(", "hass", ",", "websocket_delete_area", ")", "websocket_api", ".", "async_register_command", "(", "hass", ",", "websocket_update_area", ")", "return", "True" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/config/area_registry.py#L9-L15
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/quantum/quantum/rootwrap/wrapper.py
python
load_filters
(filters_path)
return filterlist
Load filters from a list of directories
Load filters from a list of directories
[ "Load", "filters", "from", "a", "list", "of", "directories" ]
def load_filters(filters_path): """Load filters from a list of directories""" filterlist = [] for filterdir in filters_path: if not os.path.isdir(filterdir): continue for filterfile in os.listdir(filterdir): filterconfig = ConfigParser.RawConfigParser() filterconfig.read(os.path.join(filterdir, filterfile)) for (name, value) in filterconfig.items("Filters"): filterdefinition = [string.strip(s) for s in value.split(',')] newfilter = build_filter(*filterdefinition) if newfilter is None: continue filterlist.append(newfilter) return filterlist
[ "def", "load_filters", "(", "filters_path", ")", ":", "filterlist", "=", "[", "]", "for", "filterdir", "in", "filters_path", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "filterdir", ")", ":", "continue", "for", "filterfile", "in", "os", ".", "listdir", "(", "filterdir", ")", ":", "filterconfig", "=", "ConfigParser", ".", "RawConfigParser", "(", ")", "filterconfig", ".", "read", "(", "os", ".", "path", ".", "join", "(", "filterdir", ",", "filterfile", ")", ")", "for", "(", "name", ",", "value", ")", "in", "filterconfig", ".", "items", "(", "\"Filters\"", ")", ":", "filterdefinition", "=", "[", "string", ".", "strip", "(", "s", ")", "for", "s", "in", "value", ".", "split", "(", "','", ")", "]", "newfilter", "=", "build_filter", "(", "*", "filterdefinition", ")", "if", "newfilter", "is", "None", ":", "continue", "filterlist", ".", "append", "(", "newfilter", ")", "return", "filterlist" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/quantum/quantum/rootwrap/wrapper.py#L38-L53
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/federatedml/ensemble/basic_algorithms/decision_tree/hetero/hetero_fast_decision_tree_host.py
python
HeteroFastDecisionTreeHost.set_tree_work_mode
(self, tree_type, target_host_id)
[]
def set_tree_work_mode(self, tree_type, target_host_id): self.tree_type, self.target_host_id = tree_type, target_host_id
[ "def", "set_tree_work_mode", "(", "self", ",", "tree_type", ",", "target_host_id", ")", ":", "self", ".", "tree_type", ",", "self", ".", "target_host_id", "=", "tree_type", ",", "target_host_id" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/ensemble/basic_algorithms/decision_tree/hetero/hetero_fast_decision_tree_host.py#L37-L38
IBM/watson-online-store
4c8b60883b319f07c3187d9cb433ef9c3ae29aea
watsononlinestore/watson_online_store.py
python
WatsonOnlineStore.handle_delete_from_cart
(self)
return False
Pulls cart_item from Watson context and deletes from Cloudant DB cart_item in context must be an int or delete will silently fail.
Pulls cart_item from Watson context and deletes from Cloudant DB
[ "Pulls", "cart_item", "from", "Watson", "context", "and", "deletes", "from", "Cloudant", "DB" ]
def handle_delete_from_cart(self): """Pulls cart_item from Watson context and deletes from Cloudant DB cart_item in context must be an int or delete will silently fail. """ email = self.customer.email shopping_list = self.cloudant_online_store.list_shopping_cart(email) try: item_num = int(self.context['cart_item']) except ValueError: LOG.exception("cart_item must be a number") return False for index, item in enumerate(shopping_list): if index+1 == item_num: self.cloudant_online_store.delete_item_shopping_cart(email, item) self.clear_shopping_cart() # no need for user input, return to Watson Dialogue return False
[ "def", "handle_delete_from_cart", "(", "self", ")", ":", "email", "=", "self", ".", "customer", ".", "email", "shopping_list", "=", "self", ".", "cloudant_online_store", ".", "list_shopping_cart", "(", "email", ")", "try", ":", "item_num", "=", "int", "(", "self", ".", "context", "[", "'cart_item'", "]", ")", "except", "ValueError", ":", "LOG", ".", "exception", "(", "\"cart_item must be a number\"", ")", "return", "False", "for", "index", ",", "item", "in", "enumerate", "(", "shopping_list", ")", ":", "if", "index", "+", "1", "==", "item_num", ":", "self", ".", "cloudant_online_store", ".", "delete_item_shopping_cart", "(", "email", ",", "item", ")", "self", ".", "clear_shopping_cart", "(", ")", "# no need for user input, return to Watson Dialogue", "return", "False" ]
https://github.com/IBM/watson-online-store/blob/4c8b60883b319f07c3187d9cb433ef9c3ae29aea/watsononlinestore/watson_online_store.py#L757-L777
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/config/config_entries.py
python
config_entries_progress
(hass, connection, msg)
List flows that are in progress but not started by a user. Example of a non-user initiated flow is a discovered Hue hub that requires user interaction to finish setup.
List flows that are in progress but not started by a user.
[ "List", "flows", "that", "are", "in", "progress", "but", "not", "started", "by", "a", "user", "." ]
def config_entries_progress(hass, connection, msg): """List flows that are in progress but not started by a user. Example of a non-user initiated flow is a discovered Hue hub that requires user interaction to finish setup. """ connection.send_result( msg["id"], [ flw for flw in hass.config_entries.flow.async_progress() if flw["context"]["source"] != config_entries.SOURCE_USER ], )
[ "def", "config_entries_progress", "(", "hass", ",", "connection", ",", "msg", ")", ":", "connection", ".", "send_result", "(", "msg", "[", "\"id\"", "]", ",", "[", "flw", "for", "flw", "in", "hass", ".", "config_entries", ".", "flow", ".", "async_progress", "(", ")", "if", "flw", "[", "\"context\"", "]", "[", "\"source\"", "]", "!=", "config_entries", ".", "SOURCE_USER", "]", ",", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/config/config_entries.py#L220-L233
trakt/Plex-Trakt-Scrobbler
aeb0bfbe62fad4b06c164f1b95581da7f35dce0b
Trakttv.bundle/Contents/Libraries/Shared/CodernityDB/database.py
python
Database._update_indexes
(self, _rev, data)
return _id, new_rev
Performs update operation on all indexes in order
Performs update operation on all indexes in order
[ "Performs", "update", "operation", "on", "all", "indexes", "in", "order" ]
def _update_indexes(self, _rev, data): """ Performs update operation on all indexes in order """ _id, new_rev, db_data = self._update_id_index(_rev, data) for index in self.indexes[1:]: self._single_update_index(index, data, db_data, _id) return _id, new_rev
[ "def", "_update_indexes", "(", "self", ",", "_rev", ",", "data", ")", ":", "_id", ",", "new_rev", ",", "db_data", "=", "self", ".", "_update_id_index", "(", "_rev", ",", "data", ")", "for", "index", "in", "self", ".", "indexes", "[", "1", ":", "]", ":", "self", ".", "_single_update_index", "(", "index", ",", "data", ",", "db_data", ",", "_id", ")", "return", "_id", ",", "new_rev" ]
https://github.com/trakt/Plex-Trakt-Scrobbler/blob/aeb0bfbe62fad4b06c164f1b95581da7f35dce0b/Trakttv.bundle/Contents/Libraries/Shared/CodernityDB/database.py#L674-L681
mars-project/mars
6afd7ed86db77f29cc9470485698ef192ecc6d33
mars/dataframe/base/memory_usage.py
python
DataFrameMemoryUsage._tile_dataframe
(cls, op: "DataFrameMemoryUsage")
return new_op.new_series( [df], dtype=output.dtype, shape=output.shape, index_value=output.index_value, chunks=list(chunks_to_reduce[0, :]), nsplits=op._adapt_nsplits(df.nsplits), )
Tile dataframes using tree reduction
Tile dataframes using tree reduction
[ "Tile", "dataframes", "using", "tree", "reduction" ]
def _tile_dataframe(cls, op: "DataFrameMemoryUsage"): """ Tile dataframes using tree reduction """ df = op.inputs[0] output = op.outputs[0] is_range_index = isinstance(df.index_value.value, IndexValue.RangeIndex) # produce map chunks # allocate matrix of chunks chunks_to_reduce = np.empty(shape=df.chunk_shape, dtype=np.object) for c in df.chunks: new_op = op.copy().reset_key() new_op.stage = OperandStage.map if op.index and is_range_index: # when the index is ``pd.RangeIndex``, the size should be included # after all computations are done new_op.index = False else: # when the chunk is not the first chunk in the row, index size is not needed new_op.index = op.index and c.index[-1] == 0 new_shape = ( (c.shape[-1] + 1,) if c.index[-1] == 0 and op.index else (c.shape[-1],) ) chunks_to_reduce[c.index] = new_op.new_chunk( [c], index=(c.index[-1],), dtype=output.dtype, shape=new_shape, index_value=op._adapt_index(c.columns_value, c.index[-1]), ) # reduce chunks using tree reduction combine_size = options.combine_size while chunks_to_reduce.shape[0] > 1: # allocate matrix of chunks new_chunks_to_reduce = np.empty( ( ceildiv(chunks_to_reduce.shape[0], combine_size), chunks_to_reduce.shape[1], ), dtype=np.object, ) for idx in range(0, chunks_to_reduce.shape[0], combine_size): for idx2 in range(chunks_to_reduce.shape[1]): new_op = op.copy().reset_key() new_op.stage = OperandStage.reduce chunks = list(chunks_to_reduce[idx : idx + combine_size, idx2]) new_chunks_to_reduce[idx // combine_size, idx2] = new_op.new_chunk( chunks, index=(idx2,), dtype=output.dtype, shape=chunks[0].shape, index_value=chunks[0].index_value, ) chunks_to_reduce = new_chunks_to_reduce # handle RangeIndex at final outputs if op.index and is_range_index: chunks_to_reduce[ 0, 0 ].op.range_index_size = df.index_value.to_pandas().memory_usage() # return series with chunks and nsplits new_op = op.copy().reset_key() return new_op.new_series( [df], dtype=output.dtype, shape=output.shape, index_value=output.index_value, chunks=list(chunks_to_reduce[0, :]), nsplits=op._adapt_nsplits(df.nsplits), )
[ "def", "_tile_dataframe", "(", "cls", ",", "op", ":", "\"DataFrameMemoryUsage\"", ")", ":", "df", "=", "op", ".", "inputs", "[", "0", "]", "output", "=", "op", ".", "outputs", "[", "0", "]", "is_range_index", "=", "isinstance", "(", "df", ".", "index_value", ".", "value", ",", "IndexValue", ".", "RangeIndex", ")", "# produce map chunks", "# allocate matrix of chunks", "chunks_to_reduce", "=", "np", ".", "empty", "(", "shape", "=", "df", ".", "chunk_shape", ",", "dtype", "=", "np", ".", "object", ")", "for", "c", "in", "df", ".", "chunks", ":", "new_op", "=", "op", ".", "copy", "(", ")", ".", "reset_key", "(", ")", "new_op", ".", "stage", "=", "OperandStage", ".", "map", "if", "op", ".", "index", "and", "is_range_index", ":", "# when the index is ``pd.RangeIndex``, the size should be included", "# after all computations are done", "new_op", ".", "index", "=", "False", "else", ":", "# when the chunk is not the first chunk in the row, index size is not needed", "new_op", ".", "index", "=", "op", ".", "index", "and", "c", ".", "index", "[", "-", "1", "]", "==", "0", "new_shape", "=", "(", "(", "c", ".", "shape", "[", "-", "1", "]", "+", "1", ",", ")", "if", "c", ".", "index", "[", "-", "1", "]", "==", "0", "and", "op", ".", "index", "else", "(", "c", ".", "shape", "[", "-", "1", "]", ",", ")", ")", "chunks_to_reduce", "[", "c", ".", "index", "]", "=", "new_op", ".", "new_chunk", "(", "[", "c", "]", ",", "index", "=", "(", "c", ".", "index", "[", "-", "1", "]", ",", ")", ",", "dtype", "=", "output", ".", "dtype", ",", "shape", "=", "new_shape", ",", "index_value", "=", "op", ".", "_adapt_index", "(", "c", ".", "columns_value", ",", "c", ".", "index", "[", "-", "1", "]", ")", ",", ")", "# reduce chunks using tree reduction", "combine_size", "=", "options", ".", "combine_size", "while", "chunks_to_reduce", ".", "shape", "[", "0", "]", ">", "1", ":", "# allocate matrix of chunks", "new_chunks_to_reduce", "=", "np", ".", "empty", "(", "(", "ceildiv", "(", "chunks_to_reduce", ".", "shape", "[", "0", "]", ",", "combine_size", ")", ",", "chunks_to_reduce", ".", "shape", "[", "1", "]", ",", ")", ",", "dtype", "=", "np", ".", "object", ",", ")", "for", "idx", "in", "range", "(", "0", ",", "chunks_to_reduce", ".", "shape", "[", "0", "]", ",", "combine_size", ")", ":", "for", "idx2", "in", "range", "(", "chunks_to_reduce", ".", "shape", "[", "1", "]", ")", ":", "new_op", "=", "op", ".", "copy", "(", ")", ".", "reset_key", "(", ")", "new_op", ".", "stage", "=", "OperandStage", ".", "reduce", "chunks", "=", "list", "(", "chunks_to_reduce", "[", "idx", ":", "idx", "+", "combine_size", ",", "idx2", "]", ")", "new_chunks_to_reduce", "[", "idx", "//", "combine_size", ",", "idx2", "]", "=", "new_op", ".", "new_chunk", "(", "chunks", ",", "index", "=", "(", "idx2", ",", ")", ",", "dtype", "=", "output", ".", "dtype", ",", "shape", "=", "chunks", "[", "0", "]", ".", "shape", ",", "index_value", "=", "chunks", "[", "0", "]", ".", "index_value", ",", ")", "chunks_to_reduce", "=", "new_chunks_to_reduce", "# handle RangeIndex at final outputs", "if", "op", ".", "index", "and", "is_range_index", ":", "chunks_to_reduce", "[", "0", ",", "0", "]", ".", "op", ".", "range_index_size", "=", "df", ".", "index_value", ".", "to_pandas", "(", ")", ".", "memory_usage", "(", ")", "# return series with chunks and nsplits", "new_op", "=", "op", ".", "copy", "(", ")", ".", "reset_key", "(", ")", "return", "new_op", ".", "new_series", "(", "[", "df", "]", ",", "dtype", "=", "output", ".", "dtype", ",", "shape", "=", "output", ".", "shape", ",", "index_value", "=", "output", ".", "index_value", ",", "chunks", "=", "list", "(", "chunks_to_reduce", "[", "0", ",", ":", "]", ")", ",", "nsplits", "=", "op", ".", "_adapt_nsplits", "(", "df", ".", "nsplits", ")", ",", ")" ]
https://github.com/mars-project/mars/blob/6afd7ed86db77f29cc9470485698ef192ecc6d33/mars/dataframe/base/memory_usage.py#L164-L241
pysmt/pysmt
ade4dc2a825727615033a96d31c71e9f53ce4764
pysmt/solvers/yices.py
python
YicesConverter.walk_bv_ror
(self, formula, args, **kwargs)
return res
[]
def walk_bv_ror(self, formula, args, **kwargs): res = yicespy.yices_rotate_right(args[0], formula.bv_rotation_step()) self._check_term_result(res) return res
[ "def", "walk_bv_ror", "(", "self", ",", "formula", ",", "args", ",", "*", "*", "kwargs", ")", ":", "res", "=", "yicespy", ".", "yices_rotate_right", "(", "args", "[", "0", "]", ",", "formula", ".", "bv_rotation_step", "(", ")", ")", "self", ".", "_check_term_result", "(", "res", ")", "return", "res" ]
https://github.com/pysmt/pysmt/blob/ade4dc2a825727615033a96d31c71e9f53ce4764/pysmt/solvers/yices.py#L561-L564
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/email/message.py
python
Message.set_charset
(self, charset)
Set the charset of the payload to a given character set. charset can be a Charset instance, a string naming a character set, or None. If it is a string it will be converted to a Charset instance. If charset is None, the charset parameter will be removed from the Content-Type field. Anything else will generate a TypeError. The message will be assumed to be of type text/* encoded with charset.input_charset. It will be converted to charset.output_charset and encoded properly, if needed, when generating the plain text representation of the message. MIME headers (MIME-Version, Content-Type, Content-Transfer-Encoding) will be added as needed.
Set the charset of the payload to a given character set.
[ "Set", "the", "charset", "of", "the", "payload", "to", "a", "given", "character", "set", "." ]
def set_charset(self, charset): """Set the charset of the payload to a given character set. charset can be a Charset instance, a string naming a character set, or None. If it is a string it will be converted to a Charset instance. If charset is None, the charset parameter will be removed from the Content-Type field. Anything else will generate a TypeError. The message will be assumed to be of type text/* encoded with charset.input_charset. It will be converted to charset.output_charset and encoded properly, if needed, when generating the plain text representation of the message. MIME headers (MIME-Version, Content-Type, Content-Transfer-Encoding) will be added as needed. """ if charset is None: self.del_param('charset') self._charset = None return if isinstance(charset, basestring): charset = email.charset.Charset(charset) if not isinstance(charset, email.charset.Charset): raise TypeError(charset) # BAW: should we accept strings that can serve as arguments to the # Charset constructor? self._charset = charset if 'MIME-Version' not in self: self.add_header('MIME-Version', '1.0') if 'Content-Type' not in self: self.add_header('Content-Type', 'text/plain', charset=charset.get_output_charset()) else: self.set_param('charset', charset.get_output_charset()) if isinstance(self._payload, unicode): self._payload = self._payload.encode(charset.output_charset) if str(charset) != charset.get_output_charset(): self._payload = charset.body_encode(self._payload) if 'Content-Transfer-Encoding' not in self: cte = charset.get_body_encoding() try: cte(self) except TypeError: self._payload = charset.body_encode(self._payload) self.add_header('Content-Transfer-Encoding', cte)
[ "def", "set_charset", "(", "self", ",", "charset", ")", ":", "if", "charset", "is", "None", ":", "self", ".", "del_param", "(", "'charset'", ")", "self", ".", "_charset", "=", "None", "return", "if", "isinstance", "(", "charset", ",", "basestring", ")", ":", "charset", "=", "email", ".", "charset", ".", "Charset", "(", "charset", ")", "if", "not", "isinstance", "(", "charset", ",", "email", ".", "charset", ".", "Charset", ")", ":", "raise", "TypeError", "(", "charset", ")", "# BAW: should we accept strings that can serve as arguments to the", "# Charset constructor?", "self", ".", "_charset", "=", "charset", "if", "'MIME-Version'", "not", "in", "self", ":", "self", ".", "add_header", "(", "'MIME-Version'", ",", "'1.0'", ")", "if", "'Content-Type'", "not", "in", "self", ":", "self", ".", "add_header", "(", "'Content-Type'", ",", "'text/plain'", ",", "charset", "=", "charset", ".", "get_output_charset", "(", ")", ")", "else", ":", "self", ".", "set_param", "(", "'charset'", ",", "charset", ".", "get_output_charset", "(", ")", ")", "if", "isinstance", "(", "self", ".", "_payload", ",", "unicode", ")", ":", "self", ".", "_payload", "=", "self", ".", "_payload", ".", "encode", "(", "charset", ".", "output_charset", ")", "if", "str", "(", "charset", ")", "!=", "charset", ".", "get_output_charset", "(", ")", ":", "self", ".", "_payload", "=", "charset", ".", "body_encode", "(", "self", ".", "_payload", ")", "if", "'Content-Transfer-Encoding'", "not", "in", "self", ":", "cte", "=", "charset", ".", "get_body_encoding", "(", ")", "try", ":", "cte", "(", "self", ")", "except", "TypeError", ":", "self", ".", "_payload", "=", "charset", ".", "body_encode", "(", "self", ".", "_payload", ")", "self", ".", "add_header", "(", "'Content-Transfer-Encoding'", ",", "cte", ")" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/email/message.py#L228-L271
zhaoweicai/Detectron-Cascade-RCNN
5a297fcc16eab6c26b7b1a9fe2767c626730f03b
detectron/utils/io.py
python
cache_url
(url_or_file, cache_dir)
return cache_file_path
Download the file specified by the URL to the cache_dir and return the path to the cached file. If the argument is not a URL, simply return it as is.
Download the file specified by the URL to the cache_dir and return the path to the cached file. If the argument is not a URL, simply return it as is.
[ "Download", "the", "file", "specified", "by", "the", "URL", "to", "the", "cache_dir", "and", "return", "the", "path", "to", "the", "cached", "file", ".", "If", "the", "argument", "is", "not", "a", "URL", "simply", "return", "it", "as", "is", "." ]
def cache_url(url_or_file, cache_dir): """Download the file specified by the URL to the cache_dir and return the path to the cached file. If the argument is not a URL, simply return it as is. """ is_url = re.match(r'^(?:http)s?://', url_or_file, re.IGNORECASE) is not None if not is_url: return url_or_file url = url_or_file assert url.startswith(_DETECTRON_S3_BASE_URL), \ ('Detectron only automatically caches URLs in the Detectron S3 ' 'bucket: {}').format(_DETECTRON_S3_BASE_URL) cache_file_path = url.replace(_DETECTRON_S3_BASE_URL, cache_dir) if os.path.exists(cache_file_path): assert_cache_file_is_ok(url, cache_file_path) return cache_file_path cache_file_dir = os.path.dirname(cache_file_path) if not os.path.exists(cache_file_dir): os.makedirs(cache_file_dir) logger.info('Downloading remote file {} to {}'.format(url, cache_file_path)) download_url(url, cache_file_path) assert_cache_file_is_ok(url, cache_file_path) return cache_file_path
[ "def", "cache_url", "(", "url_or_file", ",", "cache_dir", ")", ":", "is_url", "=", "re", ".", "match", "(", "r'^(?:http)s?://'", ",", "url_or_file", ",", "re", ".", "IGNORECASE", ")", "is", "not", "None", "if", "not", "is_url", ":", "return", "url_or_file", "url", "=", "url_or_file", "assert", "url", ".", "startswith", "(", "_DETECTRON_S3_BASE_URL", ")", ",", "(", "'Detectron only automatically caches URLs in the Detectron S3 '", "'bucket: {}'", ")", ".", "format", "(", "_DETECTRON_S3_BASE_URL", ")", "cache_file_path", "=", "url", ".", "replace", "(", "_DETECTRON_S3_BASE_URL", ",", "cache_dir", ")", "if", "os", ".", "path", ".", "exists", "(", "cache_file_path", ")", ":", "assert_cache_file_is_ok", "(", "url", ",", "cache_file_path", ")", "return", "cache_file_path", "cache_file_dir", "=", "os", ".", "path", ".", "dirname", "(", "cache_file_path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "cache_file_dir", ")", ":", "os", ".", "makedirs", "(", "cache_file_dir", ")", "logger", ".", "info", "(", "'Downloading remote file {} to {}'", ".", "format", "(", "url", ",", "cache_file_path", ")", ")", "download_url", "(", "url", ",", "cache_file_path", ")", "assert_cache_file_is_ok", "(", "url", ",", "cache_file_path", ")", "return", "cache_file_path" ]
https://github.com/zhaoweicai/Detectron-Cascade-RCNN/blob/5a297fcc16eab6c26b7b1a9fe2767c626730f03b/detectron/utils/io.py#L43-L70
GoogleCloudPlatform/professional-services
0c707aa97437f3d154035ef8548109b7882f71da
tools/hive-bigquery/hive_to_bigquery/bigquery_component.py
python
BigQueryComponent.create_table
(self, dataset_id, table_name, schema)
Creates BigQuery table. Args: dataset_id (str): BigQuery dataset id. table_name (str): BigQuery table name. schema (List[google.cloud.bigquery.schema.SchemaField]): Schema of the table to be created.
Creates BigQuery table.
[ "Creates", "BigQuery", "table", "." ]
def create_table(self, dataset_id, table_name, schema): """Creates BigQuery table. Args: dataset_id (str): BigQuery dataset id. table_name (str): BigQuery table name. schema (List[google.cloud.bigquery.schema.SchemaField]): Schema of the table to be created. """ dataset_ref = self.client.dataset(dataset_id) table_ref = dataset_ref.table(table_name) table = bigquery.Table(table_ref, schema) self.client.create_table(table)
[ "def", "create_table", "(", "self", ",", "dataset_id", ",", "table_name", ",", "schema", ")", ":", "dataset_ref", "=", "self", ".", "client", ".", "dataset", "(", "dataset_id", ")", "table_ref", "=", "dataset_ref", ".", "table", "(", "table_name", ")", "table", "=", "bigquery", ".", "Table", "(", "table_ref", ",", "schema", ")", "self", ".", "client", ".", "create_table", "(", "table", ")" ]
https://github.com/GoogleCloudPlatform/professional-services/blob/0c707aa97437f3d154035ef8548109b7882f71da/tools/hive-bigquery/hive_to_bigquery/bigquery_component.py#L118-L131
peering-manager/peering-manager
62c870fb9caa6dfc056feb77c595d45bc3c4988a
devices/crypto/juniper.py
python
__gap_encode
(pc, prev, encode)
return crypt
[]
def __gap_encode(pc, prev, encode): __ord = ord(pc) crypt = "" gaps = [] for mod in __reverse(encode): gaps.insert(0, int(__ord / mod)) __ord %= mod for gap in gaps: gap += ALPHA_NUM[prev] + 1 prev = NUM_ALPHA[gap % len(NUM_ALPHA)] crypt += prev return crypt
[ "def", "__gap_encode", "(", "pc", ",", "prev", ",", "encode", ")", ":", "__ord", "=", "ord", "(", "pc", ")", "crypt", "=", "\"\"", "gaps", "=", "[", "]", "for", "mod", "in", "__reverse", "(", "encode", ")", ":", "gaps", ".", "insert", "(", "0", ",", "int", "(", "__ord", "/", "mod", ")", ")", "__ord", "%=", "mod", "for", "gap", "in", "gaps", ":", "gap", "+=", "ALPHA_NUM", "[", "prev", "]", "+", "1", "prev", "=", "NUM_ALPHA", "[", "gap", "%", "len", "(", "NUM_ALPHA", ")", "]", "crypt", "+=", "prev", "return", "crypt" ]
https://github.com/peering-manager/peering-manager/blob/62c870fb9caa6dfc056feb77c595d45bc3c4988a/devices/crypto/juniper.py#L68-L82