nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
elbayadm/attn2d
982653439dedc7306e484e00b3dfb90e2cd7c9e1
examples/pervasive/modules/archive/dilated_resnet.py
python
DilatedResnet.forward
(self, x, encoder_mask=None, decoder_mask=None, incremental_state=None)
return add_up
Input : N, Tt, Ts, C Output : N, Tt, Ts, C
Input : N, Tt, Ts, C Output : N, Tt, Ts, C
[ "Input", ":", "N", "Tt", "Ts", "C", "Output", ":", "N", "Tt", "Ts", "C" ]
def forward(self, x, encoder_mask=None, decoder_mask=None, incremental_state=None): """ Input : N, Tt, Ts, C Output : N, Tt, Ts, C """ if self.reduce_channels is not None: x = self.reduce_channels(x) add_up = self.add_up_scale * x for layer in self.residual_blocks: x = layer(x, encoder_mask=encoder_mask, decoder_mask=decoder_mask, incremental_state=incremental_state) add_up += self.add_up_scale * x return add_up
[ "def", "forward", "(", "self", ",", "x", ",", "encoder_mask", "=", "None", ",", "decoder_mask", "=", "None", ",", "incremental_state", "=", "None", ")", ":", "if", "self", ".", "reduce_channels", "is", "not", "None", ":", "x", "=", "self", ".", "reduce_channels", "(", "x", ")", "add_up", "=", "self", ".", "add_up_scale", "*", "x", "for", "layer", "in", "self", ".", "residual_blocks", ":", "x", "=", "layer", "(", "x", ",", "encoder_mask", "=", "encoder_mask", ",", "decoder_mask", "=", "decoder_mask", ",", "incremental_state", "=", "incremental_state", ")", "add_up", "+=", "self", ".", "add_up_scale", "*", "x", "return", "add_up" ]
https://github.com/elbayadm/attn2d/blob/982653439dedc7306e484e00b3dfb90e2cd7c9e1/examples/pervasive/modules/archive/dilated_resnet.py#L50-L67
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/pip/_internal/vcs/subversion.py
python
Subversion.is_commit_id_equal
(cls, dest, name)
return False
Always assume the versions don't match
Always assume the versions don't match
[ "Always", "assume", "the", "versions", "don", "t", "match" ]
def is_commit_id_equal(cls, dest, name): """Always assume the versions don't match""" return False
[ "def", "is_commit_id_equal", "(", "cls", ",", "dest", ",", "name", ")", ":", "return", "False" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/pip/_internal/vcs/subversion.py#L229-L231
google/coursebuilder-core
08f809db3226d9269e30d5edd0edd33bd22041f4
coursebuilder/modules/analytics/clustering.py
python
_has_right_side
(dim)
return dim.get(DIM_HIGH) != None and dim.get(DIM_HIGH) != ''
Returns True if the value of dim[DIM_HIGH] is not None or ''.
Returns True if the value of dim[DIM_HIGH] is not None or ''.
[ "Returns", "True", "if", "the", "value", "of", "dim", "[", "DIM_HIGH", "]", "is", "not", "None", "or", "." ]
def _has_right_side(dim): """Returns True if the value of dim[DIM_HIGH] is not None or ''.""" return dim.get(DIM_HIGH) != None and dim.get(DIM_HIGH) != ''
[ "def", "_has_right_side", "(", "dim", ")", ":", "return", "dim", ".", "get", "(", "DIM_HIGH", ")", "!=", "None", "and", "dim", ".", "get", "(", "DIM_HIGH", ")", "!=", "''" ]
https://github.com/google/coursebuilder-core/blob/08f809db3226d9269e30d5edd0edd33bd22041f4/coursebuilder/modules/analytics/clustering.py#L188-L190
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/studio/v2/flow/__init__.py
python
FlowInstance.delete
(self)
return self._proxy.delete()
Deletes the FlowInstance :returns: True if delete succeeds, False otherwise :rtype: bool
Deletes the FlowInstance
[ "Deletes", "the", "FlowInstance" ]
def delete(self): """ Deletes the FlowInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._proxy.delete()
[ "def", "delete", "(", "self", ")", ":", "return", "self", ".", "_proxy", ".", "delete", "(", ")" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/studio/v2/flow/__init__.py#L523-L530
KalleHallden/AutoTimer
2d954216700c4930baa154e28dbddc34609af7ce
env/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py
python
PyPIJSONLocator.get_distribution_names
(self)
Return all the distribution names known to this locator.
Return all the distribution names known to this locator.
[ "Return", "all", "the", "distribution", "names", "known", "to", "this", "locator", "." ]
def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Not available from this locator')
[ "def", "get_distribution_names", "(", "self", ")", ":", "raise", "NotImplementedError", "(", "'Not available from this locator'", ")" ]
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py#L461-L465
fredrik-johansson/mpmath
c11db84b3237bd8fc6721f5a0c5d7c0c98a24dc1
mpmath/math2.py
python
erfc
(x)
return 1.0 - _erf_taylor(x)
erfc of a real number.
erfc of a real number.
[ "erfc", "of", "a", "real", "number", "." ]
def erfc(x): """ erfc of a real number. """ x = float(x) if x != x: return x if x < 0.0: if x < -6.0: return 2.0 return 2.0-erfc(-x) if x > 9.0: return _erfc_asymp(x) if x >= 1.0: return _erfc_mid(x) return 1.0 - _erf_taylor(x)
[ "def", "erfc", "(", "x", ")", ":", "x", "=", "float", "(", "x", ")", "if", "x", "!=", "x", ":", "return", "x", "if", "x", "<", "0.0", ":", "if", "x", "<", "-", "6.0", ":", "return", "2.0", "return", "2.0", "-", "erfc", "(", "-", "x", ")", "if", "x", ">", "9.0", ":", "return", "_erfc_asymp", "(", "x", ")", "if", "x", ">=", "1.0", ":", "return", "_erfc_mid", "(", "x", ")", "return", "1.0", "-", "_erf_taylor", "(", "x", ")" ]
https://github.com/fredrik-johansson/mpmath/blob/c11db84b3237bd8fc6721f5a0c5d7c0c98a24dc1/mpmath/math2.py#L440-L455
napari/napari
dbf4158e801fa7a429de8ef1cdee73bf6d64c61e
napari/utils/context/_expressions.py
python
parse_expression
(expr: str)
Parse string expression into an :class:`Expr` instance. Parameters ---------- expr : str Expression to parse. Returns ------- Expr Instance of `Expr`. Raises ------ SyntaxError If the provided string is not an expression (e.g. it's a statement), or if it uses any forbidden syntax components (e.g. Call, Attribute, Containers, Indexing, Slicing, f-strings, named expression, comprehensions.)
Parse string expression into an :class:`Expr` instance.
[ "Parse", "string", "expression", "into", "an", ":", "class", ":", "Expr", "instance", "." ]
def parse_expression(expr: str) -> Expr: """Parse string expression into an :class:`Expr` instance. Parameters ---------- expr : str Expression to parse. Returns ------- Expr Instance of `Expr`. Raises ------ SyntaxError If the provided string is not an expression (e.g. it's a statement), or if it uses any forbidden syntax components (e.g. Call, Attribute, Containers, Indexing, Slicing, f-strings, named expression, comprehensions.) """ try: # mode='eval' means the expr must consist of a single expression tree = ast.parse(expr, mode='eval') if not isinstance(tree, ast.Expression): raise SyntaxError # pragma: no cover return ExprTranformer().visit(tree.body) except SyntaxError as e: raise SyntaxError( trans._( "{expr} is not a valid expression: ({error}).", deferred=True, expr=f"{expr!r}", error=e, ) ) from None
[ "def", "parse_expression", "(", "expr", ":", "str", ")", "->", "Expr", ":", "try", ":", "# mode='eval' means the expr must consist of a single expression", "tree", "=", "ast", ".", "parse", "(", "expr", ",", "mode", "=", "'eval'", ")", "if", "not", "isinstance", "(", "tree", ",", "ast", ".", "Expression", ")", ":", "raise", "SyntaxError", "# pragma: no cover", "return", "ExprTranformer", "(", ")", ".", "visit", "(", "tree", ".", "body", ")", "except", "SyntaxError", "as", "e", ":", "raise", "SyntaxError", "(", "trans", ".", "_", "(", "\"{expr} is not a valid expression: ({error}).\"", ",", "deferred", "=", "True", ",", "expr", "=", "f\"{expr!r}\"", ",", "error", "=", "e", ",", ")", ")", "from", "None" ]
https://github.com/napari/napari/blob/dbf4158e801fa7a429de8ef1cdee73bf6d64c61e/napari/utils/context/_expressions.py#L59-L94
djaodjin/djaodjin-saas
17f66871bd571741b06d93876da401a05a2cc162
saas/utils.py
python
get_user_serializer
()
return import_string(settings.USER_SERIALIZER)
Returns the user serializer model that is active in this project.
Returns the user serializer model that is active in this project.
[ "Returns", "the", "user", "serializer", "model", "that", "is", "active", "in", "this", "project", "." ]
def get_user_serializer(): """ Returns the user serializer model that is active in this project. """ from . import settings return import_string(settings.USER_SERIALIZER)
[ "def", "get_user_serializer", "(", ")", ":", "from", ".", "import", "settings", "return", "import_string", "(", "settings", ".", "USER_SERIALIZER", ")" ]
https://github.com/djaodjin/djaodjin-saas/blob/17f66871bd571741b06d93876da401a05a2cc162/saas/utils.py#L195-L200
NVIDIA/NeMo
5b0c0b4dec12d87d3cd960846de4105309ce938e
nemo/collections/asr/parts/submodules/multi_head_attention.py
python
MultiHeadAttention.__init__
(self, n_head, n_feat, dropout_rate)
Construct an MultiHeadedAttention object.
Construct an MultiHeadedAttention object.
[ "Construct", "an", "MultiHeadedAttention", "object", "." ]
def __init__(self, n_head, n_feat, dropout_rate): """Construct an MultiHeadedAttention object.""" super(MultiHeadAttention, self).__init__() assert n_feat % n_head == 0 # We assume d_v always equals d_k self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate)
[ "def", "__init__", "(", "self", ",", "n_head", ",", "n_feat", ",", "dropout_rate", ")", ":", "super", "(", "MultiHeadAttention", ",", "self", ")", ".", "__init__", "(", ")", "assert", "n_feat", "%", "n_head", "==", "0", "# We assume d_v always equals d_k", "self", ".", "d_k", "=", "n_feat", "//", "n_head", "self", ".", "h", "=", "n_head", "self", ".", "linear_q", "=", "nn", ".", "Linear", "(", "n_feat", ",", "n_feat", ")", "self", ".", "linear_k", "=", "nn", ".", "Linear", "(", "n_feat", ",", "n_feat", ")", "self", ".", "linear_v", "=", "nn", ".", "Linear", "(", "n_feat", ",", "n_feat", ")", "self", ".", "linear_out", "=", "nn", ".", "Linear", "(", "n_feat", ",", "n_feat", ")", "self", ".", "dropout", "=", "nn", ".", "Dropout", "(", "p", "=", "dropout_rate", ")" ]
https://github.com/NVIDIA/NeMo/blob/5b0c0b4dec12d87d3cd960846de4105309ce938e/nemo/collections/asr/parts/submodules/multi_head_attention.py#L55-L66
google/trax
d6cae2067dedd0490b78d831033607357e975015
trax/layers/base.py
python
Layer._settable_attrs
(self)
return ('weights', 'state', 'rng')
We only allow to set these attributes in Trax layers to prevent typos.
We only allow to set these attributes in Trax layers to prevent typos.
[ "We", "only", "allow", "to", "set", "these", "attributes", "in", "Trax", "layers", "to", "prevent", "typos", "." ]
def _settable_attrs(self): """We only allow to set these attributes in Trax layers to prevent typos.""" return ('weights', 'state', 'rng')
[ "def", "_settable_attrs", "(", "self", ")", ":", "return", "(", "'weights'", ",", "'state'", ",", "'rng'", ")" ]
https://github.com/google/trax/blob/d6cae2067dedd0490b78d831033607357e975015/trax/layers/base.py#L676-L678
pyvista/pyvista
012dbb95a9aae406c3cd4cd94fc8c477f871e426
pyvista/plotting/charts.py
python
StackPlot.update
(self, x, ys)
Update the locations and/or size of the stacks (areas) in this plot. Parameters ---------- x : array_like The new x coordinates of the stacks (areas) to draw. ys : list or tuple of array_like The new sizes of the stacks (areas) to draw. Examples -------- Create a stack plot. >>> import pyvista >>> chart = pyvista.Chart2D() >>> plot = chart.stack([0, 1, 2], [[2, 1, 3],[1, 2, 1]]) >>> chart.show() Update the stack sizes. >>> plot.update([0, 1, 2], [[3, 1, 2], [0, 3, 1]]) >>> chart.show()
Update the locations and/or size of the stacks (areas) in this plot.
[ "Update", "the", "locations", "and", "/", "or", "size", "of", "the", "stacks", "(", "areas", ")", "in", "this", "plot", "." ]
def update(self, x, ys): """Update the locations and/or size of the stacks (areas) in this plot. Parameters ---------- x : array_like The new x coordinates of the stacks (areas) to draw. ys : list or tuple of array_like The new sizes of the stacks (areas) to draw. Examples -------- Create a stack plot. >>> import pyvista >>> chart = pyvista.Chart2D() >>> plot = chart.stack([0, 1, 2], [[2, 1, 3],[1, 2, 1]]) >>> chart.show() Update the stack sizes. >>> plot.update([0, 1, 2], [[3, 1, 2], [0, 3, 1]]) >>> chart.show() """ if len(x) > 0: if not isinstance(ys[0], (Sequence, np.ndarray)): ys = (ys,) y_data = {f"y{i}": np.array(ys[i], copy=False) for i in range(len(ys))} self._table.update({"x": np.array(x, copy=False), **y_data}) self.visible = True else: self.visible = False
[ "def", "update", "(", "self", ",", "x", ",", "ys", ")", ":", "if", "len", "(", "x", ")", ">", "0", ":", "if", "not", "isinstance", "(", "ys", "[", "0", "]", ",", "(", "Sequence", ",", "np", ".", "ndarray", ")", ")", ":", "ys", "=", "(", "ys", ",", ")", "y_data", "=", "{", "f\"y{i}\"", ":", "np", ".", "array", "(", "ys", "[", "i", "]", ",", "copy", "=", "False", ")", "for", "i", "in", "range", "(", "len", "(", "ys", ")", ")", "}", "self", ".", "_table", ".", "update", "(", "{", "\"x\"", ":", "np", ".", "array", "(", "x", ",", "copy", "=", "False", ")", ",", "*", "*", "y_data", "}", ")", "self", ".", "visible", "=", "True", "else", ":", "self", ".", "visible", "=", "False" ]
https://github.com/pyvista/pyvista/blob/012dbb95a9aae406c3cd4cd94fc8c477f871e426/pyvista/plotting/charts.py#L2669-L2702
pymedusa/Medusa
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
ext/boto/elasticache/layer1.py
python
ElastiCacheConnection.describe_reserved_cache_nodes
(self, reserved_cache_node_id=None, reserved_cache_nodes_offering_id=None, cache_node_type=None, duration=None, product_description=None, offering_type=None, max_records=None, marker=None)
return self._make_request( action='DescribeReservedCacheNodes', verb='POST', path='/', params=params)
The DescribeReservedCacheNodes operation returns information about reserved cache nodes for this account, or about a specified reserved cache node. :type reserved_cache_node_id: string :param reserved_cache_node_id: The reserved cache node identifier filter value. Use this parameter to show only the reservation that matches the specified reservation ID. :type reserved_cache_nodes_offering_id: string :param reserved_cache_nodes_offering_id: The offering identifier filter value. Use this parameter to show only purchased reservations matching the specified offering identifier. :type cache_node_type: string :param cache_node_type: The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type. :type duration: string :param duration: The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration. Valid Values: `1 | 3 | 31536000 | 94608000` :type product_description: string :param product_description: The product description filter value. Use this parameter to show only those reservations matching the specified product description. :type offering_type: string :param offering_type: The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type. Valid values: `"Light Utilization" | "Medium Utilization" | "Heavy Utilization" ` :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20; maximum 100. :type marker: string :param marker: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
The DescribeReservedCacheNodes operation returns information about reserved cache nodes for this account, or about a specified reserved cache node.
[ "The", "DescribeReservedCacheNodes", "operation", "returns", "information", "about", "reserved", "cache", "nodes", "for", "this", "account", "or", "about", "a", "specified", "reserved", "cache", "node", "." ]
def describe_reserved_cache_nodes(self, reserved_cache_node_id=None, reserved_cache_nodes_offering_id=None, cache_node_type=None, duration=None, product_description=None, offering_type=None, max_records=None, marker=None): """ The DescribeReservedCacheNodes operation returns information about reserved cache nodes for this account, or about a specified reserved cache node. :type reserved_cache_node_id: string :param reserved_cache_node_id: The reserved cache node identifier filter value. Use this parameter to show only the reservation that matches the specified reservation ID. :type reserved_cache_nodes_offering_id: string :param reserved_cache_nodes_offering_id: The offering identifier filter value. Use this parameter to show only purchased reservations matching the specified offering identifier. :type cache_node_type: string :param cache_node_type: The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type. :type duration: string :param duration: The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration. Valid Values: `1 | 3 | 31536000 | 94608000` :type product_description: string :param product_description: The product description filter value. Use this parameter to show only those reservations matching the specified product description. :type offering_type: string :param offering_type: The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type. Valid values: `"Light Utilization" | "Medium Utilization" | "Heavy Utilization" ` :type max_records: integer :param max_records: The maximum number of records to include in the response. If more records exist than the specified `MaxRecords` value, a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20; maximum 100. :type marker: string :param marker: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords . """ params = {} if reserved_cache_node_id is not None: params['ReservedCacheNodeId'] = reserved_cache_node_id if reserved_cache_nodes_offering_id is not None: params['ReservedCacheNodesOfferingId'] = reserved_cache_nodes_offering_id if cache_node_type is not None: params['CacheNodeType'] = cache_node_type if duration is not None: params['Duration'] = duration if product_description is not None: params['ProductDescription'] = product_description if offering_type is not None: params['OfferingType'] = offering_type if max_records is not None: params['MaxRecords'] = max_records if marker is not None: params['Marker'] = marker return self._make_request( action='DescribeReservedCacheNodes', verb='POST', path='/', params=params)
[ "def", "describe_reserved_cache_nodes", "(", "self", ",", "reserved_cache_node_id", "=", "None", ",", "reserved_cache_nodes_offering_id", "=", "None", ",", "cache_node_type", "=", "None", ",", "duration", "=", "None", ",", "product_description", "=", "None", ",", "offering_type", "=", "None", ",", "max_records", "=", "None", ",", "marker", "=", "None", ")", ":", "params", "=", "{", "}", "if", "reserved_cache_node_id", "is", "not", "None", ":", "params", "[", "'ReservedCacheNodeId'", "]", "=", "reserved_cache_node_id", "if", "reserved_cache_nodes_offering_id", "is", "not", "None", ":", "params", "[", "'ReservedCacheNodesOfferingId'", "]", "=", "reserved_cache_nodes_offering_id", "if", "cache_node_type", "is", "not", "None", ":", "params", "[", "'CacheNodeType'", "]", "=", "cache_node_type", "if", "duration", "is", "not", "None", ":", "params", "[", "'Duration'", "]", "=", "duration", "if", "product_description", "is", "not", "None", ":", "params", "[", "'ProductDescription'", "]", "=", "product_description", "if", "offering_type", "is", "not", "None", ":", "params", "[", "'OfferingType'", "]", "=", "offering_type", "if", "max_records", "is", "not", "None", ":", "params", "[", "'MaxRecords'", "]", "=", "max_records", "if", "marker", "is", "not", "None", ":", "params", "[", "'Marker'", "]", "=", "marker", "return", "self", ".", "_make_request", "(", "action", "=", "'DescribeReservedCacheNodes'", ",", "verb", "=", "'POST'", ",", "path", "=", "'/'", ",", "params", "=", "params", ")" ]
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/boto/elasticache/layer1.py#L1007-L1087
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_vendored_deps/library/oc_group.py
python
OCGroup.needs_update
(self)
return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=['users'], debug=True)
verify an update is needed
verify an update is needed
[ "verify", "an", "update", "is", "needed" ]
def needs_update(self): ''' verify an update is needed ''' return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=['users'], debug=True)
[ "def", "needs_update", "(", "self", ")", ":", "return", "not", "Utils", ".", "check_def_equal", "(", "self", ".", "config", ".", "data", ",", "self", ".", "group", ".", "yaml_dict", ",", "skip_keys", "=", "[", "'users'", "]", ",", "debug", "=", "True", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_vendored_deps/library/oc_group.py#L1541-L1543
benedekrozemberczki/role2vec
d43fc337d00f8fe50475165c0e3695ba795df89a
src/walkers.py
python
alias_setup
(probs)
return J, q
Compute utility lists for non-uniform sampling from discrete distributions. Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details
Compute utility lists for non-uniform sampling from discrete distributions. Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details
[ "Compute", "utility", "lists", "for", "non", "-", "uniform", "sampling", "from", "discrete", "distributions", ".", "Refer", "to", "https", ":", "//", "hips", ".", "seas", ".", "harvard", ".", "edu", "/", "blog", "/", "2013", "/", "03", "/", "03", "/", "the", "-", "alias", "-", "method", "-", "efficient", "-", "sampling", "-", "with", "-", "many", "-", "discrete", "-", "outcomes", "/", "for", "details" ]
def alias_setup(probs): """ Compute utility lists for non-uniform sampling from discrete distributions. Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details """ K = len(probs) q = np.zeros(K) J = np.zeros(K, dtype=np.int) smaller = [] larger = [] for kk, prob in enumerate(probs): q[kk] = K*prob if q[kk] < 1.0: smaller.append(kk) else: larger.append(kk) while len(smaller) > 0 and len(larger) > 0: small = smaller.pop() large = larger.pop() J[small] = large q[large] = q[large] + q[small] - 1.0 if q[large] < 1.0: smaller.append(large) else: larger.append(large) return J, q
[ "def", "alias_setup", "(", "probs", ")", ":", "K", "=", "len", "(", "probs", ")", "q", "=", "np", ".", "zeros", "(", "K", ")", "J", "=", "np", ".", "zeros", "(", "K", ",", "dtype", "=", "np", ".", "int", ")", "smaller", "=", "[", "]", "larger", "=", "[", "]", "for", "kk", ",", "prob", "in", "enumerate", "(", "probs", ")", ":", "q", "[", "kk", "]", "=", "K", "*", "prob", "if", "q", "[", "kk", "]", "<", "1.0", ":", "smaller", ".", "append", "(", "kk", ")", "else", ":", "larger", ".", "append", "(", "kk", ")", "while", "len", "(", "smaller", ")", ">", "0", "and", "len", "(", "larger", ")", ">", "0", ":", "small", "=", "smaller", ".", "pop", "(", ")", "large", "=", "larger", ".", "pop", "(", ")", "J", "[", "small", "]", "=", "large", "q", "[", "large", "]", "=", "q", "[", "large", "]", "+", "q", "[", "small", "]", "-", "1.0", "if", "q", "[", "large", "]", "<", "1.0", ":", "smaller", ".", "append", "(", "large", ")", "else", ":", "larger", ".", "append", "(", "large", ")", "return", "J", ",", "q" ]
https://github.com/benedekrozemberczki/role2vec/blob/d43fc337d00f8fe50475165c0e3695ba795df89a/src/walkers.py#L118-L148
GalSim-developers/GalSim
a05d4ec3b8d8574f99d3b0606ad882cbba53f345
galsim/nfw_halo.py
python
NFWHalo.getMagnification
(self, pos, z_s, units=arcsec)
return self._getMagnification(pos_x, pos_y, z_s)
Calculate magnification of halo at specified positions. Parameters: pos: Position(s) of the source(s), assumed to be post-lensing! Valid ways to input this: - single `Position` instance - tuple of floats: (x,y) - list/array of `Position` instances - tuple of lists/arrays: ( xlist, ylist ) z_s: Source redshift(s). units: Angular units of coordinates. [default: galsim.arcsec] Returns: the magnification mu If the input ``pos`` is given a single position, mu is the magnification value. If the input ``pos`` is given a list/array of positions, mu is a NumPy array.
Calculate magnification of halo at specified positions.
[ "Calculate", "magnification", "of", "halo", "at", "specified", "positions", "." ]
def getMagnification(self, pos, z_s, units=arcsec): """Calculate magnification of halo at specified positions. Parameters: pos: Position(s) of the source(s), assumed to be post-lensing! Valid ways to input this: - single `Position` instance - tuple of floats: (x,y) - list/array of `Position` instances - tuple of lists/arrays: ( xlist, ylist ) z_s: Source redshift(s). units: Angular units of coordinates. [default: galsim.arcsec] Returns: the magnification mu If the input ``pos`` is given a single position, mu is the magnification value. If the input ``pos`` is given a list/array of positions, mu is a NumPy array. """ # Convert to numpy arrays for internal usage: pos_x, pos_y = utilities._convertPositions(pos, units, 'getMagnification') return self._getMagnification(pos_x, pos_y, z_s)
[ "def", "getMagnification", "(", "self", ",", "pos", ",", "z_s", ",", "units", "=", "arcsec", ")", ":", "# Convert to numpy arrays for internal usage:", "pos_x", ",", "pos_y", "=", "utilities", ".", "_convertPositions", "(", "pos", ",", "units", ",", "'getMagnification'", ")", "return", "self", ".", "_getMagnification", "(", "pos_x", ",", "pos_y", ",", "z_s", ")" ]
https://github.com/GalSim-developers/GalSim/blob/a05d4ec3b8d8574f99d3b0606ad882cbba53f345/galsim/nfw_halo.py#L391-L414
cunjian/pytorch_face_landmark
f575be168a24af6f4807c852173fdfedf6d2c67d
FaceBoxes/utils/box_utils.py
python
intersect
(box_a, box_b)
return inter[:, :, 0] * inter[:, :, 1]
We resize both tensors to [A,B,2] without new malloc: [A,2] -> [A,1,2] -> [A,B,2] [B,2] -> [1,B,2] -> [A,B,2] Then we compute the area of intersect between box_a and box_b. Args: box_a: (tensor) bounding boxes, Shape: [A,4]. box_b: (tensor) bounding boxes, Shape: [B,4]. Return: (tensor) intersection area, Shape: [A,B].
We resize both tensors to [A,B,2] without new malloc: [A,2] -> [A,1,2] -> [A,B,2] [B,2] -> [1,B,2] -> [A,B,2] Then we compute the area of intersect between box_a and box_b. Args: box_a: (tensor) bounding boxes, Shape: [A,4]. box_b: (tensor) bounding boxes, Shape: [B,4]. Return: (tensor) intersection area, Shape: [A,B].
[ "We", "resize", "both", "tensors", "to", "[", "A", "B", "2", "]", "without", "new", "malloc", ":", "[", "A", "2", "]", "-", ">", "[", "A", "1", "2", "]", "-", ">", "[", "A", "B", "2", "]", "[", "B", "2", "]", "-", ">", "[", "1", "B", "2", "]", "-", ">", "[", "A", "B", "2", "]", "Then", "we", "compute", "the", "area", "of", "intersect", "between", "box_a", "and", "box_b", ".", "Args", ":", "box_a", ":", "(", "tensor", ")", "bounding", "boxes", "Shape", ":", "[", "A", "4", "]", ".", "box_b", ":", "(", "tensor", ")", "bounding", "boxes", "Shape", ":", "[", "B", "4", "]", ".", "Return", ":", "(", "tensor", ")", "intersection", "area", "Shape", ":", "[", "A", "B", "]", "." ]
def intersect(box_a, box_b): """ We resize both tensors to [A,B,2] without new malloc: [A,2] -> [A,1,2] -> [A,B,2] [B,2] -> [1,B,2] -> [A,B,2] Then we compute the area of intersect between box_a and box_b. Args: box_a: (tensor) bounding boxes, Shape: [A,4]. box_b: (tensor) bounding boxes, Shape: [B,4]. Return: (tensor) intersection area, Shape: [A,B]. """ A = box_a.size(0) B = box_b.size(0) max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), box_b[:, 2:].unsqueeze(0).expand(A, B, 2)) min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), box_b[:, :2].unsqueeze(0).expand(A, B, 2)) inter = torch.clamp((max_xy - min_xy), min=0) return inter[:, :, 0] * inter[:, :, 1]
[ "def", "intersect", "(", "box_a", ",", "box_b", ")", ":", "A", "=", "box_a", ".", "size", "(", "0", ")", "B", "=", "box_b", ".", "size", "(", "0", ")", "max_xy", "=", "torch", ".", "min", "(", "box_a", "[", ":", ",", "2", ":", "]", ".", "unsqueeze", "(", "1", ")", ".", "expand", "(", "A", ",", "B", ",", "2", ")", ",", "box_b", "[", ":", ",", "2", ":", "]", ".", "unsqueeze", "(", "0", ")", ".", "expand", "(", "A", ",", "B", ",", "2", ")", ")", "min_xy", "=", "torch", ".", "max", "(", "box_a", "[", ":", ",", ":", "2", "]", ".", "unsqueeze", "(", "1", ")", ".", "expand", "(", "A", ",", "B", ",", "2", ")", ",", "box_b", "[", ":", ",", ":", "2", "]", ".", "unsqueeze", "(", "0", ")", ".", "expand", "(", "A", ",", "B", ",", "2", ")", ")", "inter", "=", "torch", ".", "clamp", "(", "(", "max_xy", "-", "min_xy", ")", ",", "min", "=", "0", ")", "return", "inter", "[", ":", ",", ":", ",", "0", "]", "*", "inter", "[", ":", ",", ":", ",", "1", "]" ]
https://github.com/cunjian/pytorch_face_landmark/blob/f575be168a24af6f4807c852173fdfedf6d2c67d/FaceBoxes/utils/box_utils.py#L31-L49
TobyPDE/FRRN
20041107a4625106d6acef1dcc903d4352220d3e
check_dependencies.py
python
check_python
()
Issues a warning if you're not running P2.7 or P3.4.
Issues a warning if you're not running P2.7 or P3.4.
[ "Issues", "a", "warning", "if", "you", "re", "not", "running", "P2", ".", "7", "or", "P3", ".", "4", "." ]
def check_python(): """Issues a warning if you're not running P2.7 or P3.4.""" version = sys.version[:3] if version != "2.7" and version != "3.4": logging.warning("You are running Python {}. We only officially support " "Python 2.7 and 3.4. This software may " "or may not run.".format(version)) else: logging.info("Found supported Python version {}.".format(version))
[ "def", "check_python", "(", ")", ":", "version", "=", "sys", ".", "version", "[", ":", "3", "]", "if", "version", "!=", "\"2.7\"", "and", "version", "!=", "\"3.4\"", ":", "logging", ".", "warning", "(", "\"You are running Python {}. We only officially support \"", "\"Python 2.7 and 3.4. This software may \"", "\"or may not run.\"", ".", "format", "(", "version", ")", ")", "else", ":", "logging", ".", "info", "(", "\"Found supported Python version {}.\"", ".", "format", "(", "version", ")", ")" ]
https://github.com/TobyPDE/FRRN/blob/20041107a4625106d6acef1dcc903d4352220d3e/check_dependencies.py#L99-L107
pyparsing/pyparsing
1ccf846394a055924b810faaf9628dac53633848
examples/booleansearchparser.py
python
BooleanSearchParser.evaluateQuotes
(self, argument)
return self.GetQuotes(" ".join(search_terms), r)
Evaluate quoted strings First is does an 'and' on the individual search terms, then it asks the function GetQuoted to only return the subset of ID's that contain the literal string.
Evaluate quoted strings
[ "Evaluate", "quoted", "strings" ]
def evaluateQuotes(self, argument): """Evaluate quoted strings First is does an 'and' on the individual search terms, then it asks the function GetQuoted to only return the subset of ID's that contain the literal string. """ # r = set() r = False search_terms = [] for item in argument: search_terms.append(item[0]) r = r and self.evaluate(item) return self.GetQuotes(" ".join(search_terms), r)
[ "def", "evaluateQuotes", "(", "self", ",", "argument", ")", ":", "# r = set()", "r", "=", "False", "search_terms", "=", "[", "]", "for", "item", "in", "argument", ":", "search_terms", ".", "append", "(", "item", "[", "0", "]", ")", "r", "=", "r", "and", "self", ".", "evaluate", "(", "item", ")", "return", "self", ".", "GetQuotes", "(", "\" \"", ".", "join", "(", "search_terms", ")", ",", "r", ")" ]
https://github.com/pyparsing/pyparsing/blob/1ccf846394a055924b810faaf9628dac53633848/examples/booleansearchparser.py#L220-L233
chainer/chainercv
7159616642e0be7c5b3ef380b848e16b7e99355b
chainercv/links/model/faster_rcnn/faster_rcnn_train_chain.py
python
FasterRCNNTrainChain.__init__
(self, faster_rcnn, rpn_sigma=3., roi_sigma=1., anchor_target_creator=AnchorTargetCreator(), proposal_target_creator=ProposalTargetCreator())
[]
def __init__(self, faster_rcnn, rpn_sigma=3., roi_sigma=1., anchor_target_creator=AnchorTargetCreator(), proposal_target_creator=ProposalTargetCreator()): super(FasterRCNNTrainChain, self).__init__() with self.init_scope(): self.faster_rcnn = faster_rcnn self.rpn_sigma = rpn_sigma self.roi_sigma = roi_sigma self.anchor_target_creator = anchor_target_creator self.proposal_target_creator = proposal_target_creator self.loc_normalize_mean = faster_rcnn.loc_normalize_mean self.loc_normalize_std = faster_rcnn.loc_normalize_std
[ "def", "__init__", "(", "self", ",", "faster_rcnn", ",", "rpn_sigma", "=", "3.", ",", "roi_sigma", "=", "1.", ",", "anchor_target_creator", "=", "AnchorTargetCreator", "(", ")", ",", "proposal_target_creator", "=", "ProposalTargetCreator", "(", ")", ")", ":", "super", "(", "FasterRCNNTrainChain", ",", "self", ")", ".", "__init__", "(", ")", "with", "self", ".", "init_scope", "(", ")", ":", "self", ".", "faster_rcnn", "=", "faster_rcnn", "self", ".", "rpn_sigma", "=", "rpn_sigma", "self", ".", "roi_sigma", "=", "roi_sigma", "self", ".", "anchor_target_creator", "=", "anchor_target_creator", "self", ".", "proposal_target_creator", "=", "proposal_target_creator", "self", ".", "loc_normalize_mean", "=", "faster_rcnn", ".", "loc_normalize_mean", "self", ".", "loc_normalize_std", "=", "faster_rcnn", ".", "loc_normalize_std" ]
https://github.com/chainer/chainercv/blob/7159616642e0be7c5b3ef380b848e16b7e99355b/chainercv/links/model/faster_rcnn/faster_rcnn_train_chain.py#L48-L61
mongodb/mongo-python-driver
c760f900f2e4109a247c2ffc8ad3549362007772
pymongo/client_session.py
python
_reraise_with_unknown_commit
(exc)
Re-raise an exception with the UnknownTransactionCommitResult label.
Re-raise an exception with the UnknownTransactionCommitResult label.
[ "Re", "-", "raise", "an", "exception", "with", "the", "UnknownTransactionCommitResult", "label", "." ]
def _reraise_with_unknown_commit(exc): """Re-raise an exception with the UnknownTransactionCommitResult label.""" exc._add_error_label("UnknownTransactionCommitResult") raise
[ "def", "_reraise_with_unknown_commit", "(", "exc", ")", ":", "exc", ".", "_add_error_label", "(", "\"UnknownTransactionCommitResult\"", ")", "raise" ]
https://github.com/mongodb/mongo-python-driver/blob/c760f900f2e4109a247c2ffc8ad3549362007772/pymongo/client_session.py#L400-L403
NeuromorphicProcessorProject/snn_toolbox
a85ada7b5d060500703285ef8a68f06ea1ffda65
snntoolbox/simulation/backends/inisim/temporal_pattern.py
python
SpikeDense.build
(self, input_shape)
Creates the layer neurons and connections. Parameters ---------- input_shape: Union[list, tuple, Any] Keras tensor (future input to layer) or list/tuple of Keras tensors to reference for weight shape computations.
Creates the layer neurons and connections.
[ "Creates", "the", "layer", "neurons", "and", "connections", "." ]
def build(self, input_shape): """Creates the layer neurons and connections. Parameters ---------- input_shape: Union[list, tuple, Any] Keras tensor (future input to layer) or list/tuple of Keras tensors to reference for weight shape computations. """ Dense.build(self, input_shape) self.init_neurons(input_shape)
[ "def", "build", "(", "self", ",", "input_shape", ")", ":", "Dense", ".", "build", "(", "self", ",", "input_shape", ")", "self", ".", "init_neurons", "(", "input_shape", ")" ]
https://github.com/NeuromorphicProcessorProject/snn_toolbox/blob/a85ada7b5d060500703285ef8a68f06ea1ffda65/snntoolbox/simulation/backends/inisim/temporal_pattern.py#L243-L255
lesscpy/lesscpy
1172a1693df2f4bc929a88b1bebb920e666c0c9f
lesscpy/lessc/color.py
python
Color.darken
(self, color, diff, *args)
Darken a color args: color (str): color diff (str): percentage returns: str
Darken a color args: color (str): color diff (str): percentage returns: str
[ "Darken", "a", "color", "args", ":", "color", "(", "str", ")", ":", "color", "diff", "(", "str", ")", ":", "percentage", "returns", ":", "str" ]
def darken(self, color, diff, *args): """ Darken a color args: color (str): color diff (str): percentage returns: str """ if color and diff: return self._ophsl(color, diff, 1, operator.sub) raise ValueError('Illegal color values')
[ "def", "darken", "(", "self", ",", "color", ",", "diff", ",", "*", "args", ")", ":", "if", "color", "and", "diff", ":", "return", "self", ".", "_ophsl", "(", "color", ",", "diff", ",", "1", ",", "operator", ".", "sub", ")", "raise", "ValueError", "(", "'Illegal color values'", ")" ]
https://github.com/lesscpy/lesscpy/blob/1172a1693df2f4bc929a88b1bebb920e666c0c9f/lesscpy/lessc/color.py#L240-L250
DataDog/integrations-core
934674b29d94b70ccc008f76ea172d0cdae05e1e
hyperv/datadog_checks/hyperv/config_models/defaults.py
python
instance_min_collection_interval
(field, value)
return 15
[]
def instance_min_collection_interval(field, value): return 15
[ "def", "instance_min_collection_interval", "(", "field", ",", "value", ")", ":", "return", "15" ]
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/hyperv/datadog_checks/hyperv/config_models/defaults.py#L53-L54
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
bin/x86/Debug/scripting_engine/Lib/site-packages/lxml/html/html5parser.py
python
fragment_fromstring
(html, create_parent=False, guess_charset=False, parser=None)
return result
Parses a single HTML element; it is an error if there is more than one element, or if anything but whitespace precedes or follows the element. If create_parent is true (or is a tag name) then a parent node will be created to encapsulate the HTML in a single element. In this case, leading or trailing text is allowed.
Parses a single HTML element; it is an error if there is more than one element, or if anything but whitespace precedes or follows the element.
[ "Parses", "a", "single", "HTML", "element", ";", "it", "is", "an", "error", "if", "there", "is", "more", "than", "one", "element", "or", "if", "anything", "but", "whitespace", "precedes", "or", "follows", "the", "element", "." ]
def fragment_fromstring(html, create_parent=False, guess_charset=False, parser=None): """Parses a single HTML element; it is an error if there is more than one element, or if anything but whitespace precedes or follows the element. If create_parent is true (or is a tag name) then a parent node will be created to encapsulate the HTML in a single element. In this case, leading or trailing text is allowed. """ if not isinstance(html, _strings): raise TypeError('string required') accept_leading_text = bool(create_parent) elements = fragments_fromstring( html, guess_charset=guess_charset, parser=parser, no_leading_text=not accept_leading_text) if create_parent: if not isinstance(create_parent, _strings): create_parent = 'div' new_root = Element(create_parent) if elements: if isinstance(elements[0], _strings): new_root.text = elements[0] del elements[0] new_root.extend(elements) return new_root if not elements: raise etree.ParserError('No elements found') if len(elements) > 1: raise etree.ParserError('Multiple elements found') result = elements[0] if result.tail and result.tail.strip(): raise etree.ParserError('Element followed by text: %r' % result.tail) result.tail = None return result
[ "def", "fragment_fromstring", "(", "html", ",", "create_parent", "=", "False", ",", "guess_charset", "=", "False", ",", "parser", "=", "None", ")", ":", "if", "not", "isinstance", "(", "html", ",", "_strings", ")", ":", "raise", "TypeError", "(", "'string required'", ")", "accept_leading_text", "=", "bool", "(", "create_parent", ")", "elements", "=", "fragments_fromstring", "(", "html", ",", "guess_charset", "=", "guess_charset", ",", "parser", "=", "parser", ",", "no_leading_text", "=", "not", "accept_leading_text", ")", "if", "create_parent", ":", "if", "not", "isinstance", "(", "create_parent", ",", "_strings", ")", ":", "create_parent", "=", "'div'", "new_root", "=", "Element", "(", "create_parent", ")", "if", "elements", ":", "if", "isinstance", "(", "elements", "[", "0", "]", ",", "_strings", ")", ":", "new_root", ".", "text", "=", "elements", "[", "0", "]", "del", "elements", "[", "0", "]", "new_root", ".", "extend", "(", "elements", ")", "return", "new_root", "if", "not", "elements", ":", "raise", "etree", ".", "ParserError", "(", "'No elements found'", ")", "if", "len", "(", "elements", ")", ">", "1", ":", "raise", "etree", ".", "ParserError", "(", "'Multiple elements found'", ")", "result", "=", "elements", "[", "0", "]", "if", "result", ".", "tail", "and", "result", ".", "tail", ".", "strip", "(", ")", ":", "raise", "etree", ".", "ParserError", "(", "'Element followed by text: %r'", "%", "result", ".", "tail", ")", "result", ".", "tail", "=", "None", "return", "result" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/bin/x86/Debug/scripting_engine/Lib/site-packages/lxml/html/html5parser.py#L92-L130
Fizzadar/pyinfra
ff0913d6a172966760b63fe59e55dff9ea852e0d
pyinfra/api/connectors/ssh.py
python
rsync
( state, host, src, dest, flags, print_output=False, print_input=False, sudo=False, sudo_user=None, **ignored_kwargs )
return True
[]
def rsync( state, host, src, dest, flags, print_output=False, print_input=False, sudo=False, sudo_user=None, **ignored_kwargs ): hostname = host.data.ssh_hostname or host.name user = '' if host.data.ssh_user: user = '{0}@'.format(host.data.ssh_user) ssh_flags = [] port = host.data.ssh_port if port: ssh_flags.append('-p {0}'.format(port)) ssh_key = host.data.ssh_key if ssh_key: ssh_flags.append('-i {0}'.format(ssh_key)) remote_rsync_command = 'rsync' if sudo: remote_rsync_command = 'sudo rsync' if sudo_user: remote_rsync_command = 'sudo -u {0} rsync'.format(sudo_user) rsync_command = ( 'rsync {rsync_flags} ' "--rsh 'ssh -o BatchMode=yes -o StrictHostKeyChecking=no {ssh_flags}' " "--rsync-path '{remote_rsync_command}' " '{src} {user}{hostname}:{dest}' ).format( rsync_flags=' '.join(flags), ssh_flags=' '.join(ssh_flags), remote_rsync_command=remote_rsync_command, user=user, hostname=hostname, src=src, dest=dest, ) if print_input: click.echo('{0}>>> {1}'.format(host.print_prefix, rsync_command), err=True) return_code, combined_output = run_local_process( rsync_command, print_output=print_output, print_prefix=host.print_prefix, ) status = return_code == 0 if not status: _, stderr = split_combined_output(combined_output) raise IOError('\n'.join(stderr)) return True
[ "def", "rsync", "(", "state", ",", "host", ",", "src", ",", "dest", ",", "flags", ",", "print_output", "=", "False", ",", "print_input", "=", "False", ",", "sudo", "=", "False", ",", "sudo_user", "=", "None", ",", "*", "*", "ignored_kwargs", ")", ":", "hostname", "=", "host", ".", "data", ".", "ssh_hostname", "or", "host", ".", "name", "user", "=", "''", "if", "host", ".", "data", ".", "ssh_user", ":", "user", "=", "'{0}@'", ".", "format", "(", "host", ".", "data", ".", "ssh_user", ")", "ssh_flags", "=", "[", "]", "port", "=", "host", ".", "data", ".", "ssh_port", "if", "port", ":", "ssh_flags", ".", "append", "(", "'-p {0}'", ".", "format", "(", "port", ")", ")", "ssh_key", "=", "host", ".", "data", ".", "ssh_key", "if", "ssh_key", ":", "ssh_flags", ".", "append", "(", "'-i {0}'", ".", "format", "(", "ssh_key", ")", ")", "remote_rsync_command", "=", "'rsync'", "if", "sudo", ":", "remote_rsync_command", "=", "'sudo rsync'", "if", "sudo_user", ":", "remote_rsync_command", "=", "'sudo -u {0} rsync'", ".", "format", "(", "sudo_user", ")", "rsync_command", "=", "(", "'rsync {rsync_flags} '", "\"--rsh 'ssh -o BatchMode=yes -o StrictHostKeyChecking=no {ssh_flags}' \"", "\"--rsync-path '{remote_rsync_command}' \"", "'{src} {user}{hostname}:{dest}'", ")", ".", "format", "(", "rsync_flags", "=", "' '", ".", "join", "(", "flags", ")", ",", "ssh_flags", "=", "' '", ".", "join", "(", "ssh_flags", ")", ",", "remote_rsync_command", "=", "remote_rsync_command", ",", "user", "=", "user", ",", "hostname", "=", "hostname", ",", "src", "=", "src", ",", "dest", "=", "dest", ",", ")", "if", "print_input", ":", "click", ".", "echo", "(", "'{0}>>> {1}'", ".", "format", "(", "host", ".", "print_prefix", ",", "rsync_command", ")", ",", "err", "=", "True", ")", "return_code", ",", "combined_output", "=", "run_local_process", "(", "rsync_command", ",", "print_output", "=", "print_output", ",", "print_prefix", "=", "host", ".", "print_prefix", ",", ")", "status", "=", "return_code", "==", "0", "if", "not", "status", ":", "_", ",", "stderr", "=", "split_combined_output", "(", "combined_output", ")", "raise", "IOError", "(", "'\\n'", ".", "join", "(", "stderr", ")", ")", "return", "True" ]
https://github.com/Fizzadar/pyinfra/blob/ff0913d6a172966760b63fe59e55dff9ea852e0d/pyinfra/api/connectors/ssh.py#L523-L579
facebookresearch/detectron2
cb92ae1763cd7d3777c243f07749574cdaec6cb8
projects/DensePose/densepose/data/build.py
python
has_inference_based_loaders
(cfg: CfgNode)
return len(cfg.BOOTSTRAP_DATASETS) > 0
Returns True, if at least one inferense-based loader must be instantiated for training
Returns True, if at least one inferense-based loader must be instantiated for training
[ "Returns", "True", "if", "at", "least", "one", "inferense", "-", "based", "loader", "must", "be", "instantiated", "for", "training" ]
def has_inference_based_loaders(cfg: CfgNode) -> bool: """ Returns True, if at least one inferense-based loader must be instantiated for training """ return len(cfg.BOOTSTRAP_DATASETS) > 0
[ "def", "has_inference_based_loaders", "(", "cfg", ":", "CfgNode", ")", "->", "bool", ":", "return", "len", "(", "cfg", ".", "BOOTSTRAP_DATASETS", ")", ">", "0" ]
https://github.com/facebookresearch/detectron2/blob/cb92ae1763cd7d3777c243f07749574cdaec6cb8/projects/DensePose/densepose/data/build.py#L679-L684
francisck/DanderSpritz_docs
86bb7caca5a957147f120b18bb5c31f299914904
Python/Core/Lib/rfc822.py
python
Message.__getitem__
(self, name)
return self.dict[name.lower()]
Get a specific header, as from a dictionary.
Get a specific header, as from a dictionary.
[ "Get", "a", "specific", "header", "as", "from", "a", "dictionary", "." ]
def __getitem__(self, name): """Get a specific header, as from a dictionary.""" return self.dict[name.lower()]
[ "def", "__getitem__", "(", "self", ",", "name", ")", ":", "return", "self", ".", "dict", "[", "name", ".", "lower", "(", ")", "]" ]
https://github.com/francisck/DanderSpritz_docs/blob/86bb7caca5a957147f120b18bb5c31f299914904/Python/Core/Lib/rfc822.py#L387-L389
pypa/setuptools
9f37366aab9cd8f6baa23e6a77cfdb8daf97757e
pkg_resources/__init__.py
python
WorkingSet._build_master
(cls)
return ws
Prepare the master working set.
Prepare the master working set.
[ "Prepare", "the", "master", "working", "set", "." ]
def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws
[ "def", "_build_master", "(", "cls", ")", ":", "ws", "=", "cls", "(", ")", "try", ":", "from", "__main__", "import", "__requires__", "except", "ImportError", ":", "# The main program does not list any requirements", "return", "ws", "# ensure the requirements are met", "try", ":", "ws", ".", "require", "(", "__requires__", ")", "except", "VersionConflict", ":", "return", "cls", ".", "_build_from_requirements", "(", "__requires__", ")", "return", "ws" ]
https://github.com/pypa/setuptools/blob/9f37366aab9cd8f6baa23e6a77cfdb8daf97757e/pkg_resources/__init__.py#L560-L577
awslabs/aws-data-wrangler
548f5197bacd91bd50ebc66a0173eff9c56f69b1
awswrangler/redshift.py
python
copy_from_files
( # pylint: disable=too-many-locals,too-many-arguments path: str, con: redshift_connector.Connection, table: str, schema: str, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, parquet_infer_sampling: float = 1.0, mode: str = "append", overwrite_method: str = "drop", diststyle: str = "AUTO", distkey: Optional[str] = None, sortstyle: str = "COMPOUND", sortkey: Optional[List[str]] = None, primary_keys: Optional[List[str]] = None, varchar_lengths_default: int = 256, varchar_lengths: Optional[Dict[str, int]] = None, serialize_to_json: bool = False, path_suffix: Optional[str] = None, path_ignore_suffix: Optional[str] = None, use_threads: Union[bool, int] = True, lock: bool = False, commit_transaction: bool = True, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, )
Load Parquet files from S3 to a Table on Amazon Redshift (Through COPY command). https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html Note ---- If the table does not exist yet, it will be automatically created for you using the Parquet metadata to infer the columns data types. Note ---- In case of `use_threads=True` the number of threads that will be spawned will be gotten from os.cpu_count(). Parameters ---------- path : str S3 prefix (e.g. s3://bucket/prefix/) con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. table : str Table name schema : str Schema name iam_role : str, optional AWS IAM role with the related permissions. aws_access_key_id : str, optional The access key for your AWS account. aws_secret_access_key : str, optional The secret key for your AWS account. aws_session_token : str, optional The session key for your AWS account. This is only needed when you are using temporary credentials. parquet_infer_sampling : float Random sample ratio of files that will have the metadata inspected. Must be `0.0 < sampling <= 1.0`. The higher, the more accurate. The lower, the faster. mode : str Append, overwrite or upsert. overwrite_method : str Drop, cascade, truncate, or delete. Only applicable in overwrite mode. "drop" - ``DROP ... RESTRICT`` - drops the table. Fails if there are any views that depend on it. "cascade" - ``DROP ... CASCADE`` - drops the table, and all views that depend on it. "truncate" - ``TRUNCATE ...`` - truncates the table, but immediatly commits current transaction & starts a new one, hence the overwrite happens in two transactions and is not atomic. "delete" - ``DELETE FROM ...`` - deletes all rows from the table. Slow relative to the other methods. diststyle : str Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"]. https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html distkey : str, optional Specifies a column name or positional number for the distribution key. sortstyle : str Sorting can be "COMPOUND" or "INTERLEAVED". https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html sortkey : List[str], optional List of columns to be sorted. primary_keys : List[str], optional Primary keys. varchar_lengths_default : int The size that will be set for all VARCHAR columns not specified with varchar_lengths. varchar_lengths : Dict[str, int], optional Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}). serialize_to_json : bool Should Wrangler add SERIALIZETOJSON parameter into the COPY command? SERIALIZETOJSON is necessary to load nested data https://docs.aws.amazon.com/redshift/latest/dg/ingest-super.html#copy_json path_suffix: Union[str, List[str], None] Suffix or List of suffixes to be scanned on s3 for the schema extraction (e.g. [".gz.parquet", ".snappy.parquet"]). Only has effect during the table creation. If None, will try to read all files. (default) path_ignore_suffix: Union[str, List[str], None] Suffix or List of suffixes for S3 keys to be ignored during the schema extraction. (e.g. [".csv", "_SUCCESS"]). Only has effect during the table creation. If None, will try to read all files. (default) use_threads : bool, int True to enable concurrent requests, False to disable multiple threads. If enabled os.cpu_count() will be used as the max number of threads. If integer is provided, specified number is used. lock : bool True to execute LOCK command inside the transaction to force serializable isolation. commit_transaction: bool Whether to commit the transaction. True by default. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. s3_additional_kwargs: Forwarded to botocore requests. e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'} Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> wr.redshift.copy_from_files( ... path="s3://bucket/my_parquet_files/", ... con=con, ... table="my_table", ... schema="public", ... iam_role="arn:aws:iam::XXX:role/XXX" ... ) >>> con.close()
Load Parquet files from S3 to a Table on Amazon Redshift (Through COPY command).
[ "Load", "Parquet", "files", "from", "S3", "to", "a", "Table", "on", "Amazon", "Redshift", "(", "Through", "COPY", "command", ")", "." ]
def copy_from_files( # pylint: disable=too-many-locals,too-many-arguments path: str, con: redshift_connector.Connection, table: str, schema: str, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, parquet_infer_sampling: float = 1.0, mode: str = "append", overwrite_method: str = "drop", diststyle: str = "AUTO", distkey: Optional[str] = None, sortstyle: str = "COMPOUND", sortkey: Optional[List[str]] = None, primary_keys: Optional[List[str]] = None, varchar_lengths_default: int = 256, varchar_lengths: Optional[Dict[str, int]] = None, serialize_to_json: bool = False, path_suffix: Optional[str] = None, path_ignore_suffix: Optional[str] = None, use_threads: Union[bool, int] = True, lock: bool = False, commit_transaction: bool = True, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, ) -> None: """Load Parquet files from S3 to a Table on Amazon Redshift (Through COPY command). https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html Note ---- If the table does not exist yet, it will be automatically created for you using the Parquet metadata to infer the columns data types. Note ---- In case of `use_threads=True` the number of threads that will be spawned will be gotten from os.cpu_count(). Parameters ---------- path : str S3 prefix (e.g. s3://bucket/prefix/) con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. table : str Table name schema : str Schema name iam_role : str, optional AWS IAM role with the related permissions. aws_access_key_id : str, optional The access key for your AWS account. aws_secret_access_key : str, optional The secret key for your AWS account. aws_session_token : str, optional The session key for your AWS account. This is only needed when you are using temporary credentials. parquet_infer_sampling : float Random sample ratio of files that will have the metadata inspected. Must be `0.0 < sampling <= 1.0`. The higher, the more accurate. The lower, the faster. mode : str Append, overwrite or upsert. overwrite_method : str Drop, cascade, truncate, or delete. Only applicable in overwrite mode. "drop" - ``DROP ... RESTRICT`` - drops the table. Fails if there are any views that depend on it. "cascade" - ``DROP ... CASCADE`` - drops the table, and all views that depend on it. "truncate" - ``TRUNCATE ...`` - truncates the table, but immediatly commits current transaction & starts a new one, hence the overwrite happens in two transactions and is not atomic. "delete" - ``DELETE FROM ...`` - deletes all rows from the table. Slow relative to the other methods. diststyle : str Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"]. https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html distkey : str, optional Specifies a column name or positional number for the distribution key. sortstyle : str Sorting can be "COMPOUND" or "INTERLEAVED". https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html sortkey : List[str], optional List of columns to be sorted. primary_keys : List[str], optional Primary keys. varchar_lengths_default : int The size that will be set for all VARCHAR columns not specified with varchar_lengths. varchar_lengths : Dict[str, int], optional Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}). serialize_to_json : bool Should Wrangler add SERIALIZETOJSON parameter into the COPY command? SERIALIZETOJSON is necessary to load nested data https://docs.aws.amazon.com/redshift/latest/dg/ingest-super.html#copy_json path_suffix: Union[str, List[str], None] Suffix or List of suffixes to be scanned on s3 for the schema extraction (e.g. [".gz.parquet", ".snappy.parquet"]). Only has effect during the table creation. If None, will try to read all files. (default) path_ignore_suffix: Union[str, List[str], None] Suffix or List of suffixes for S3 keys to be ignored during the schema extraction. (e.g. [".csv", "_SUCCESS"]). Only has effect during the table creation. If None, will try to read all files. (default) use_threads : bool, int True to enable concurrent requests, False to disable multiple threads. If enabled os.cpu_count() will be used as the max number of threads. If integer is provided, specified number is used. lock : bool True to execute LOCK command inside the transaction to force serializable isolation. commit_transaction: bool Whether to commit the transaction. True by default. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. s3_additional_kwargs: Forwarded to botocore requests. e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'} Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> wr.redshift.copy_from_files( ... path="s3://bucket/my_parquet_files/", ... con=con, ... table="my_table", ... schema="public", ... iam_role="arn:aws:iam::XXX:role/XXX" ... ) >>> con.close() """ autocommit_temp: bool = con.autocommit con.autocommit = False try: with con.cursor() as cursor: created_table, created_schema = _create_table( df=None, path=path, parquet_infer_sampling=parquet_infer_sampling, path_suffix=path_suffix, path_ignore_suffix=path_ignore_suffix, con=con, cursor=cursor, table=table, schema=schema, mode=mode, overwrite_method=overwrite_method, diststyle=diststyle, sortstyle=sortstyle, distkey=distkey, sortkey=sortkey, primary_keys=primary_keys, varchar_lengths_default=varchar_lengths_default, varchar_lengths=varchar_lengths, index=False, dtype=None, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) if lock and table == created_table: # Lock before copy if copying into target (not temp) table _lock(cursor, [table], schema=schema) _copy( cursor=cursor, path=path, table=created_table, schema=created_schema, iam_role=iam_role, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, boto3_session=boto3_session, serialize_to_json=serialize_to_json, ) if table != created_table: # upsert if lock: _lock(cursor, [table], schema=schema) _upsert(cursor=cursor, schema=schema, table=table, temp_table=created_table, primary_keys=primary_keys) if commit_transaction: con.commit() except Exception as ex: con.rollback() _logger.error(ex) raise finally: con.autocommit = autocommit_temp
[ "def", "copy_from_files", "(", "# pylint: disable=too-many-locals,too-many-arguments", "path", ":", "str", ",", "con", ":", "redshift_connector", ".", "Connection", ",", "table", ":", "str", ",", "schema", ":", "str", ",", "iam_role", ":", "Optional", "[", "str", "]", "=", "None", ",", "aws_access_key_id", ":", "Optional", "[", "str", "]", "=", "None", ",", "aws_secret_access_key", ":", "Optional", "[", "str", "]", "=", "None", ",", "aws_session_token", ":", "Optional", "[", "str", "]", "=", "None", ",", "parquet_infer_sampling", ":", "float", "=", "1.0", ",", "mode", ":", "str", "=", "\"append\"", ",", "overwrite_method", ":", "str", "=", "\"drop\"", ",", "diststyle", ":", "str", "=", "\"AUTO\"", ",", "distkey", ":", "Optional", "[", "str", "]", "=", "None", ",", "sortstyle", ":", "str", "=", "\"COMPOUND\"", ",", "sortkey", ":", "Optional", "[", "List", "[", "str", "]", "]", "=", "None", ",", "primary_keys", ":", "Optional", "[", "List", "[", "str", "]", "]", "=", "None", ",", "varchar_lengths_default", ":", "int", "=", "256", ",", "varchar_lengths", ":", "Optional", "[", "Dict", "[", "str", ",", "int", "]", "]", "=", "None", ",", "serialize_to_json", ":", "bool", "=", "False", ",", "path_suffix", ":", "Optional", "[", "str", "]", "=", "None", ",", "path_ignore_suffix", ":", "Optional", "[", "str", "]", "=", "None", ",", "use_threads", ":", "Union", "[", "bool", ",", "int", "]", "=", "True", ",", "lock", ":", "bool", "=", "False", ",", "commit_transaction", ":", "bool", "=", "True", ",", "boto3_session", ":", "Optional", "[", "boto3", ".", "Session", "]", "=", "None", ",", "s3_additional_kwargs", ":", "Optional", "[", "Dict", "[", "str", ",", "str", "]", "]", "=", "None", ",", ")", "->", "None", ":", "autocommit_temp", ":", "bool", "=", "con", ".", "autocommit", "con", ".", "autocommit", "=", "False", "try", ":", "with", "con", ".", "cursor", "(", ")", "as", "cursor", ":", "created_table", ",", "created_schema", "=", "_create_table", "(", "df", "=", "None", ",", "path", "=", "path", ",", "parquet_infer_sampling", "=", "parquet_infer_sampling", ",", "path_suffix", "=", "path_suffix", ",", "path_ignore_suffix", "=", "path_ignore_suffix", ",", "con", "=", "con", ",", "cursor", "=", "cursor", ",", "table", "=", "table", ",", "schema", "=", "schema", ",", "mode", "=", "mode", ",", "overwrite_method", "=", "overwrite_method", ",", "diststyle", "=", "diststyle", ",", "sortstyle", "=", "sortstyle", ",", "distkey", "=", "distkey", ",", "sortkey", "=", "sortkey", ",", "primary_keys", "=", "primary_keys", ",", "varchar_lengths_default", "=", "varchar_lengths_default", ",", "varchar_lengths", "=", "varchar_lengths", ",", "index", "=", "False", ",", "dtype", "=", "None", ",", "use_threads", "=", "use_threads", ",", "boto3_session", "=", "boto3_session", ",", "s3_additional_kwargs", "=", "s3_additional_kwargs", ",", ")", "if", "lock", "and", "table", "==", "created_table", ":", "# Lock before copy if copying into target (not temp) table", "_lock", "(", "cursor", ",", "[", "table", "]", ",", "schema", "=", "schema", ")", "_copy", "(", "cursor", "=", "cursor", ",", "path", "=", "path", ",", "table", "=", "created_table", ",", "schema", "=", "created_schema", ",", "iam_role", "=", "iam_role", ",", "aws_access_key_id", "=", "aws_access_key_id", ",", "aws_secret_access_key", "=", "aws_secret_access_key", ",", "aws_session_token", "=", "aws_session_token", ",", "boto3_session", "=", "boto3_session", ",", "serialize_to_json", "=", "serialize_to_json", ",", ")", "if", "table", "!=", "created_table", ":", "# upsert", "if", "lock", ":", "_lock", "(", "cursor", ",", "[", "table", "]", ",", "schema", "=", "schema", ")", "_upsert", "(", "cursor", "=", "cursor", ",", "schema", "=", "schema", ",", "table", "=", "table", ",", "temp_table", "=", "created_table", ",", "primary_keys", "=", "primary_keys", ")", "if", "commit_transaction", ":", "con", ".", "commit", "(", ")", "except", "Exception", "as", "ex", ":", "con", ".", "rollback", "(", ")", "_logger", ".", "error", "(", "ex", ")", "raise", "finally", ":", "con", ".", "autocommit", "=", "autocommit_temp" ]
https://github.com/awslabs/aws-data-wrangler/blob/548f5197bacd91bd50ebc66a0173eff9c56f69b1/awswrangler/redshift.py#L1133-L1329
ctxis/canape
5f0e03424577296bcc60c2008a60a98ec5307e4b
CANAPE.Scripting/Lib/imputil.py
python
ImportManager.uninstall
(self)
Restore the previous import mechanism.
Restore the previous import mechanism.
[ "Restore", "the", "previous", "import", "mechanism", "." ]
def uninstall(self): "Restore the previous import mechanism." self.namespace['__import__'] = self.previous_importer
[ "def", "uninstall", "(", "self", ")", ":", "self", ".", "namespace", "[", "'__import__'", "]", "=", "self", ".", "previous_importer" ]
https://github.com/ctxis/canape/blob/5f0e03424577296bcc60c2008a60a98ec5307e4b/CANAPE.Scripting/Lib/imputil.py#L49-L51
windelbouwman/ppci
915c069e0667042c085ec42c78e9e3c9a5295324
ppci/lang/c/semantics.py
python
CSemantics.init_store
(self, init_cursor, value)
Store an initial value at position pointed by cursor.
Store an initial value at position pointed by cursor.
[ "Store", "an", "initial", "value", "at", "position", "pointed", "by", "cursor", "." ]
def init_store(self, init_cursor, value): """ Store an initial value at position pointed by cursor. """ if init_cursor.at_end(): self.warning("Excess elements!", value.location) # Determine if we need implicit init levels: target_typ = init_cursor.at_typ() while not self.equal_types(value.typ, target_typ): # If we are at a complex type, implicit descend otherwise cast: if target_typ.is_compound: init_cursor.enter_compound(target_typ, value.location, True) target_typ = init_cursor.at_typ() else: value = self.pointer(value) value = self.coerce(value, target_typ) break self.logger.debug("Storing %s at cursor %s", value, init_cursor) # Retrieve current value to check overwrite: previous_value = init_cursor.get_value() if previous_value: self.warning( "This overwrites other initial value.", value.location ) self.warning("previously defined here.", previous_value.location) init_cursor.set_value(value)
[ "def", "init_store", "(", "self", ",", "init_cursor", ",", "value", ")", ":", "if", "init_cursor", ".", "at_end", "(", ")", ":", "self", ".", "warning", "(", "\"Excess elements!\"", ",", "value", ".", "location", ")", "# Determine if we need implicit init levels:", "target_typ", "=", "init_cursor", ".", "at_typ", "(", ")", "while", "not", "self", ".", "equal_types", "(", "value", ".", "typ", ",", "target_typ", ")", ":", "# If we are at a complex type, implicit descend otherwise cast:", "if", "target_typ", ".", "is_compound", ":", "init_cursor", ".", "enter_compound", "(", "target_typ", ",", "value", ".", "location", ",", "True", ")", "target_typ", "=", "init_cursor", ".", "at_typ", "(", ")", "else", ":", "value", "=", "self", ".", "pointer", "(", "value", ")", "value", "=", "self", ".", "coerce", "(", "value", ",", "target_typ", ")", "break", "self", ".", "logger", ".", "debug", "(", "\"Storing %s at cursor %s\"", ",", "value", ",", "init_cursor", ")", "# Retrieve current value to check overwrite:", "previous_value", "=", "init_cursor", ".", "get_value", "(", ")", "if", "previous_value", ":", "self", ".", "warning", "(", "\"This overwrites other initial value.\"", ",", "value", ".", "location", ")", "self", ".", "warning", "(", "\"previously defined here.\"", ",", "previous_value", ".", "location", ")", "init_cursor", ".", "set_value", "(", "value", ")" ]
https://github.com/windelbouwman/ppci/blob/915c069e0667042c085ec42c78e9e3c9a5295324/ppci/lang/c/semantics.py#L201-L228
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/docutils/utils/math/math2html.py
python
TaggedBit.complete
(self, contents, tag, breaklines = False)
return self
Set the constant and the tag
Set the constant and the tag
[ "Set", "the", "constant", "and", "the", "tag" ]
def complete(self, contents, tag, breaklines = False): "Set the constant and the tag" self.contents = contents self.output = TaggedOutput().settag(tag, breaklines) return self
[ "def", "complete", "(", "self", ",", "contents", ",", "tag", ",", "breaklines", "=", "False", ")", ":", "self", ".", "contents", "=", "contents", "self", ".", "output", "=", "TaggedOutput", "(", ")", ".", "settag", "(", "tag", ",", "breaklines", ")", "return", "self" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/docutils/utils/math/math2html.py#L2540-L2544
adafruit/Adafruit_Python_BluefruitLE
a01dec2c88fa38143afb855e1df4f9ac774156b7
Adafruit_BluefruitLE/services/colorific.py
python
Colorific.set_color
(self, r, g, b)
Set the red, green, blue color of the bulb.
Set the red, green, blue color of the bulb.
[ "Set", "the", "red", "green", "blue", "color", "of", "the", "bulb", "." ]
def set_color(self, r, g, b): """Set the red, green, blue color of the bulb.""" # See more details on the bulb's protocol from this guide: # https://learn.adafruit.com/reverse-engineering-a-bluetooth-low-energy-light-bulb/overview command = '\x58\x01\x03\x01\xFF\x00{0}{1}{2}'.format(chr(r & 0xFF), chr(g & 0xFF), chr(b & 0xFF)) self._color.write_value(command)
[ "def", "set_color", "(", "self", ",", "r", ",", "g", ",", "b", ")", ":", "# See more details on the bulb's protocol from this guide:", "# https://learn.adafruit.com/reverse-engineering-a-bluetooth-low-energy-light-bulb/overview", "command", "=", "'\\x58\\x01\\x03\\x01\\xFF\\x00{0}{1}{2}'", ".", "format", "(", "chr", "(", "r", "&", "0xFF", ")", ",", "chr", "(", "g", "&", "0xFF", ")", ",", "chr", "(", "b", "&", "0xFF", ")", ")", "self", ".", "_color", ".", "write_value", "(", "command", ")" ]
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/a01dec2c88fa38143afb855e1df4f9ac774156b7/Adafruit_BluefruitLE/services/colorific.py#L47-L54
nltk/nltk
3f74ac55681667d7ef78b664557487145f51eb02
nltk/classify/naivebayes.py
python
NaiveBayesClassifier.__init__
(self, label_probdist, feature_probdist)
:param label_probdist: P(label), the probability distribution over labels. It is expressed as a ``ProbDistI`` whose samples are labels. I.e., P(label) = ``label_probdist.prob(label)``. :param feature_probdist: P(fname=fval|label), the probability distribution for feature values, given labels. It is expressed as a dictionary whose keys are ``(label, fname)`` pairs and whose values are ``ProbDistI`` objects over feature values. I.e., P(fname=fval|label) = ``feature_probdist[label,fname].prob(fval)``. If a given ``(label,fname)`` is not a key in ``feature_probdist``, then it is assumed that the corresponding P(fname=fval|label) is 0 for all values of ``fval``.
:param label_probdist: P(label), the probability distribution over labels. It is expressed as a ``ProbDistI`` whose samples are labels. I.e., P(label) = ``label_probdist.prob(label)``.
[ ":", "param", "label_probdist", ":", "P", "(", "label", ")", "the", "probability", "distribution", "over", "labels", ".", "It", "is", "expressed", "as", "a", "ProbDistI", "whose", "samples", "are", "labels", ".", "I", ".", "e", ".", "P", "(", "label", ")", "=", "label_probdist", ".", "prob", "(", "label", ")", "." ]
def __init__(self, label_probdist, feature_probdist): """ :param label_probdist: P(label), the probability distribution over labels. It is expressed as a ``ProbDistI`` whose samples are labels. I.e., P(label) = ``label_probdist.prob(label)``. :param feature_probdist: P(fname=fval|label), the probability distribution for feature values, given labels. It is expressed as a dictionary whose keys are ``(label, fname)`` pairs and whose values are ``ProbDistI`` objects over feature values. I.e., P(fname=fval|label) = ``feature_probdist[label,fname].prob(fval)``. If a given ``(label,fname)`` is not a key in ``feature_probdist``, then it is assumed that the corresponding P(fname=fval|label) is 0 for all values of ``fval``. """ self._label_probdist = label_probdist self._feature_probdist = feature_probdist self._labels = list(label_probdist.samples())
[ "def", "__init__", "(", "self", ",", "label_probdist", ",", "feature_probdist", ")", ":", "self", ".", "_label_probdist", "=", "label_probdist", "self", ".", "_feature_probdist", "=", "feature_probdist", "self", ".", "_labels", "=", "list", "(", "label_probdist", ".", "samples", "(", ")", ")" ]
https://github.com/nltk/nltk/blob/3f74ac55681667d7ef78b664557487145f51eb02/nltk/classify/naivebayes.py#L64-L83
CGATOxford/cgat
326aad4694bdfae8ddc194171bb5d73911243947
CGAT/scripts/gtfs2graph.py
python
CounterGenes.run
(self, filename1, filename2)
count overlap between two gtf files.
count overlap between two gtf files.
[ "count", "overlap", "between", "two", "gtf", "files", "." ]
def run(self, filename1, filename2): """count overlap between two gtf files.""" E.info("counting started for %s versus %s" % (filename1, filename2)) idx2 = self.buildIndex(filename2) self._run(filename1, idx2)
[ "def", "run", "(", "self", ",", "filename1", ",", "filename2", ")", ":", "E", ".", "info", "(", "\"counting started for %s versus %s\"", "%", "(", "filename1", ",", "filename2", ")", ")", "idx2", "=", "self", ".", "buildIndex", "(", "filename2", ")", "self", ".", "_run", "(", "filename1", ",", "idx2", ")" ]
https://github.com/CGATOxford/cgat/blob/326aad4694bdfae8ddc194171bb5d73911243947/CGAT/scripts/gtfs2graph.py#L204-L210
sympy/sympy
d822fcba181155b85ff2b29fe525adbafb22b448
sympy/categories/diagram_drawing.py
python
DiagramGrid._build_skeleton
(morphisms)
return edges
Creates a dictionary which maps edges to corresponding morphisms. Thus for a morphism `f:A\rightarrow B`, the edge `(A, B)` will be associated with `f`. This function also adds to the list those edges which are formed by juxtaposition of two edges already in the list. These new edges are not associated with any morphism and are only added to assure that the diagram can be decomposed into triangles.
Creates a dictionary which maps edges to corresponding morphisms. Thus for a morphism `f:A\rightarrow B`, the edge `(A, B)` will be associated with `f`. This function also adds to the list those edges which are formed by juxtaposition of two edges already in the list. These new edges are not associated with any morphism and are only added to assure that the diagram can be decomposed into triangles.
[ "Creates", "a", "dictionary", "which", "maps", "edges", "to", "corresponding", "morphisms", ".", "Thus", "for", "a", "morphism", "f", ":", "A", "\\", "rightarrow", "B", "the", "edge", "(", "A", "B", ")", "will", "be", "associated", "with", "f", ".", "This", "function", "also", "adds", "to", "the", "list", "those", "edges", "which", "are", "formed", "by", "juxtaposition", "of", "two", "edges", "already", "in", "the", "list", ".", "These", "new", "edges", "are", "not", "associated", "with", "any", "morphism", "and", "are", "only", "added", "to", "assure", "that", "the", "diagram", "can", "be", "decomposed", "into", "triangles", "." ]
def _build_skeleton(morphisms): """ Creates a dictionary which maps edges to corresponding morphisms. Thus for a morphism `f:A\rightarrow B`, the edge `(A, B)` will be associated with `f`. This function also adds to the list those edges which are formed by juxtaposition of two edges already in the list. These new edges are not associated with any morphism and are only added to assure that the diagram can be decomposed into triangles. """ edges = {} # Create edges for morphisms. for morphism in morphisms: DiagramGrid._add_edge_append( edges, frozenset([morphism.domain, morphism.codomain]), morphism) # Create new edges by juxtaposing existing edges. edges1 = dict(edges) for w in edges1: for v in edges1: wv = DiagramGrid._juxtapose_edges(w, v) if wv and wv not in edges: edges[wv] = [] return edges
[ "def", "_build_skeleton", "(", "morphisms", ")", ":", "edges", "=", "{", "}", "# Create edges for morphisms.", "for", "morphism", "in", "morphisms", ":", "DiagramGrid", ".", "_add_edge_append", "(", "edges", ",", "frozenset", "(", "[", "morphism", ".", "domain", ",", "morphism", ".", "codomain", "]", ")", ",", "morphism", ")", "# Create new edges by juxtaposing existing edges.", "edges1", "=", "dict", "(", "edges", ")", "for", "w", "in", "edges1", ":", "for", "v", "in", "edges1", ":", "wv", "=", "DiagramGrid", ".", "_juxtapose_edges", "(", "w", ",", "v", ")", "if", "wv", "and", "wv", "not", "in", "edges", ":", "edges", "[", "wv", "]", "=", "[", "]", "return", "edges" ]
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/categories/diagram_drawing.py#L372-L396
taylorlu/Speaker-Diarization
ed0985950cbac6dc699bf54f58dbbc813110d3f7
uisrnn/utils.py
python
enforce_cluster_id_uniqueness
(cluster_ids)
return new_cluster_ids
Enforce uniqueness of cluster id across sequences. Args: cluster_ids: a list of 1-dim list/numpy.ndarray of strings Returns: a new list with same length of cluster_ids Raises: TypeError: if cluster_ids or its element has wrong type
Enforce uniqueness of cluster id across sequences.
[ "Enforce", "uniqueness", "of", "cluster", "id", "across", "sequences", "." ]
def enforce_cluster_id_uniqueness(cluster_ids): """Enforce uniqueness of cluster id across sequences. Args: cluster_ids: a list of 1-dim list/numpy.ndarray of strings Returns: a new list with same length of cluster_ids Raises: TypeError: if cluster_ids or its element has wrong type """ if not isinstance(cluster_ids, list): raise TypeError('cluster_ids must be a list') new_cluster_ids = [] for cluster_id in cluster_ids: sequence_id = generate_random_string() if isinstance(cluster_id, np.ndarray): cluster_id = cluster_id.tolist() if not isinstance(cluster_id, list): raise TypeError('Elements of cluster_ids must be list or numpy.ndarray') new_cluster_id = ['_'.join([sequence_id, s]) for s in cluster_id] new_cluster_ids.append(new_cluster_id) return new_cluster_ids
[ "def", "enforce_cluster_id_uniqueness", "(", "cluster_ids", ")", ":", "if", "not", "isinstance", "(", "cluster_ids", ",", "list", ")", ":", "raise", "TypeError", "(", "'cluster_ids must be a list'", ")", "new_cluster_ids", "=", "[", "]", "for", "cluster_id", "in", "cluster_ids", ":", "sequence_id", "=", "generate_random_string", "(", ")", "if", "isinstance", "(", "cluster_id", ",", "np", ".", "ndarray", ")", ":", "cluster_id", "=", "cluster_id", ".", "tolist", "(", ")", "if", "not", "isinstance", "(", "cluster_id", ",", "list", ")", ":", "raise", "TypeError", "(", "'Elements of cluster_ids must be list or numpy.ndarray'", ")", "new_cluster_id", "=", "[", "'_'", ".", "join", "(", "[", "sequence_id", ",", "s", "]", ")", "for", "s", "in", "cluster_id", "]", "new_cluster_ids", ".", "append", "(", "new_cluster_id", ")", "return", "new_cluster_ids" ]
https://github.com/taylorlu/Speaker-Diarization/blob/ed0985950cbac6dc699bf54f58dbbc813110d3f7/uisrnn/utils.py#L55-L78
gradientinstitute/aboleth
53a3de23dce4d607ffec92be936e83d2dd7ebb3c
aboleth/layers.py
python
MaxPool2D._build
(self, X)
return Net, KL
Build the graph of this layer.
Build the graph of this layer.
[ "Build", "the", "graph", "of", "this", "layer", "." ]
def _build(self, X): """Build the graph of this layer.""" Net = tf.map_fn(lambda inputs: tf.nn.max_pool(inputs, ksize=self.ksize, strides=self.strides, padding=self.padding), X) KL = 0. return Net, KL
[ "def", "_build", "(", "self", ",", "X", ")", ":", "Net", "=", "tf", ".", "map_fn", "(", "lambda", "inputs", ":", "tf", ".", "nn", ".", "max_pool", "(", "inputs", ",", "ksize", "=", "self", ".", "ksize", ",", "strides", "=", "self", ".", "strides", ",", "padding", "=", "self", ".", "padding", ")", ",", "X", ")", "KL", "=", "0.", "return", "Net", ",", "KL" ]
https://github.com/gradientinstitute/aboleth/blob/53a3de23dce4d607ffec92be936e83d2dd7ebb3c/aboleth/layers.py#L228-L235
CLUEbenchmark/CLUE
5bd39732734afecb490cf18a5212e692dbf2c007
baselines/models/roberta_wwm_ext/run_classifier.py
python
input_fn_builder
(features, seq_length, is_training, drop_remainder)
return input_fn
Creates an `input_fn` closure to be passed to TPUEstimator.
Creates an `input_fn` closure to be passed to TPUEstimator.
[ "Creates", "an", "input_fn", "closure", "to", "be", "passed", "to", "TPUEstimator", "." ]
def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn
[ "def", "input_fn_builder", "(", "features", ",", "seq_length", ",", "is_training", ",", "drop_remainder", ")", ":", "all_input_ids", "=", "[", "]", "all_input_mask", "=", "[", "]", "all_segment_ids", "=", "[", "]", "all_label_ids", "=", "[", "]", "for", "feature", "in", "features", ":", "all_input_ids", ".", "append", "(", "feature", ".", "input_ids", ")", "all_input_mask", ".", "append", "(", "feature", ".", "input_mask", ")", "all_segment_ids", ".", "append", "(", "feature", ".", "segment_ids", ")", "all_label_ids", ".", "append", "(", "feature", ".", "label_id", ")", "def", "input_fn", "(", "params", ")", ":", "\"\"\"The actual input function.\"\"\"", "batch_size", "=", "params", "[", "\"batch_size\"", "]", "num_examples", "=", "len", "(", "features", ")", "# This is for demo purposes and does NOT scale to large data sets. We do", "# not use Dataset.from_generator() because that uses tf.py_func which is", "# not TPU compatible. The right way to load data is with TFRecordReader.", "d", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "{", "\"input_ids\"", ":", "tf", ".", "constant", "(", "all_input_ids", ",", "shape", "=", "[", "num_examples", ",", "seq_length", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "\"input_mask\"", ":", "tf", ".", "constant", "(", "all_input_mask", ",", "shape", "=", "[", "num_examples", ",", "seq_length", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "\"segment_ids\"", ":", "tf", ".", "constant", "(", "all_segment_ids", ",", "shape", "=", "[", "num_examples", ",", "seq_length", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "\"label_ids\"", ":", "tf", ".", "constant", "(", "all_label_ids", ",", "shape", "=", "[", "num_examples", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "}", ")", "if", "is_training", ":", "d", "=", "d", ".", "repeat", "(", ")", "d", "=", "d", ".", "shuffle", "(", "buffer_size", "=", "100", ")", "d", "=", "d", ".", "batch", "(", "batch_size", "=", "batch_size", ",", "drop_remainder", "=", "drop_remainder", ")", "return", "d", "return", "input_fn" ]
https://github.com/CLUEbenchmark/CLUE/blob/5bd39732734afecb490cf18a5212e692dbf2c007/baselines/models/roberta_wwm_ext/run_classifier.py#L640-L689
xonsh/xonsh
b76d6f994f22a4078f602f8b386f4ec280c8461f
xonsh/completers/path.py
python
_normpath
(p)
return p
Wraps os.normpath() to avoid removing './' at the beginning and '/' at the end. On windows it does the same with backslashes
Wraps os.normpath() to avoid removing './' at the beginning and '/' at the end. On windows it does the same with backslashes
[ "Wraps", "os", ".", "normpath", "()", "to", "avoid", "removing", ".", "/", "at", "the", "beginning", "and", "/", "at", "the", "end", ".", "On", "windows", "it", "does", "the", "same", "with", "backslashes" ]
def _normpath(p): """ Wraps os.normpath() to avoid removing './' at the beginning and '/' at the end. On windows it does the same with backslashes """ initial_dotslash = p.startswith(os.curdir + os.sep) initial_dotslash |= xp.ON_WINDOWS and p.startswith(os.curdir + os.altsep) p = p.rstrip() trailing_slash = p.endswith(os.sep) trailing_slash |= xp.ON_WINDOWS and p.endswith(os.altsep) p = os.path.normpath(p) if initial_dotslash and p != ".": p = os.path.join(os.curdir, p) if trailing_slash: p = os.path.join(p, "") if xp.ON_WINDOWS and XSH.env.get("FORCE_POSIX_PATHS"): p = p.replace(os.sep, os.altsep) return p
[ "def", "_normpath", "(", "p", ")", ":", "initial_dotslash", "=", "p", ".", "startswith", "(", "os", ".", "curdir", "+", "os", ".", "sep", ")", "initial_dotslash", "|=", "xp", ".", "ON_WINDOWS", "and", "p", ".", "startswith", "(", "os", ".", "curdir", "+", "os", ".", "altsep", ")", "p", "=", "p", ".", "rstrip", "(", ")", "trailing_slash", "=", "p", ".", "endswith", "(", "os", ".", "sep", ")", "trailing_slash", "|=", "xp", ".", "ON_WINDOWS", "and", "p", ".", "endswith", "(", "os", ".", "altsep", ")", "p", "=", "os", ".", "path", ".", "normpath", "(", "p", ")", "if", "initial_dotslash", "and", "p", "!=", "\".\"", ":", "p", "=", "os", ".", "path", ".", "join", "(", "os", ".", "curdir", ",", "p", ")", "if", "trailing_slash", ":", "p", "=", "os", ".", "path", ".", "join", "(", "p", ",", "\"\"", ")", "if", "xp", ".", "ON_WINDOWS", "and", "XSH", ".", "env", ".", "get", "(", "\"FORCE_POSIX_PATHS\"", ")", ":", "p", "=", "p", ".", "replace", "(", "os", ".", "sep", ",", "os", ".", "altsep", ")", "return", "p" ]
https://github.com/xonsh/xonsh/blob/b76d6f994f22a4078f602f8b386f4ec280c8461f/xonsh/completers/path.py#L91-L108
JBakamovic/cxxd
142c19649b036bd6f6bdcd4684de735ea11a6c94
services/source_code_model/indexer/symbol_database.py
python
SymbolDatabase.delete_all_entries
(self)
[]
def delete_all_entries(self): try: self.db_connection.cursor().execute('DELETE FROM symbol') self.db_connection.cursor().execute('DELETE FROM diagnostics') except: logging.error(sys.exc_info())
[ "def", "delete_all_entries", "(", "self", ")", ":", "try", ":", "self", ".", "db_connection", ".", "cursor", "(", ")", ".", "execute", "(", "'DELETE FROM symbol'", ")", "self", ".", "db_connection", ".", "cursor", "(", ")", ".", "execute", "(", "'DELETE FROM diagnostics'", ")", "except", ":", "logging", ".", "error", "(", "sys", ".", "exc_info", "(", ")", ")" ]
https://github.com/JBakamovic/cxxd/blob/142c19649b036bd6f6bdcd4684de735ea11a6c94/services/source_code_model/indexer/symbol_database.py#L312-L317
UDST/urbansim
0db75668ada0005352b7c7e0a405265f78ccadd7
urbansim/utils/misc.py
python
compute_range
(travel_data, attr, travel_time_attr, dist, agg=np.sum)
return travel_data.groupby(level=0).attr.apply(agg)
Compute a zone-based accessibility query using the urbansim format travel data dataframe. Parameters ---------- travel_data : dataframe The dataframe of urbansim format travel data. Has from_zone_id as first index, to_zone_id as second index, and different impedances between zones as columns. attr : series The attr to aggregate. Should be indexed by zone_id and the values will be aggregated. travel_time_attr : string The column name in travel_data to use as the impedance. dist : float The max distance to aggregate up to agg : function, optional, np.sum by default The numpy function to use for aggregation
Compute a zone-based accessibility query using the urbansim format travel data dataframe.
[ "Compute", "a", "zone", "-", "based", "accessibility", "query", "using", "the", "urbansim", "format", "travel", "data", "dataframe", "." ]
def compute_range(travel_data, attr, travel_time_attr, dist, agg=np.sum): """ Compute a zone-based accessibility query using the urbansim format travel data dataframe. Parameters ---------- travel_data : dataframe The dataframe of urbansim format travel data. Has from_zone_id as first index, to_zone_id as second index, and different impedances between zones as columns. attr : series The attr to aggregate. Should be indexed by zone_id and the values will be aggregated. travel_time_attr : string The column name in travel_data to use as the impedance. dist : float The max distance to aggregate up to agg : function, optional, np.sum by default The numpy function to use for aggregation """ travel_data = travel_data.reset_index(level=1) travel_data = travel_data[travel_data[travel_time_attr] < dist] travel_data["attr"] = attr.reindex(travel_data.to_zone_id, fill_value=0).values return travel_data.groupby(level=0).attr.apply(agg)
[ "def", "compute_range", "(", "travel_data", ",", "attr", ",", "travel_time_attr", ",", "dist", ",", "agg", "=", "np", ".", "sum", ")", ":", "travel_data", "=", "travel_data", ".", "reset_index", "(", "level", "=", "1", ")", "travel_data", "=", "travel_data", "[", "travel_data", "[", "travel_time_attr", "]", "<", "dist", "]", "travel_data", "[", "\"attr\"", "]", "=", "attr", ".", "reindex", "(", "travel_data", ".", "to_zone_id", ",", "fill_value", "=", "0", ")", ".", "values", "return", "travel_data", ".", "groupby", "(", "level", "=", "0", ")", ".", "attr", ".", "apply", "(", "agg", ")" ]
https://github.com/UDST/urbansim/blob/0db75668ada0005352b7c7e0a405265f78ccadd7/urbansim/utils/misc.py#L118-L142
aws/aws-parallelcluster
f1fe5679a01c524e7ea904c329bd6d17318c6cd9
awsbatch-cli/src/awsbatch/awsbout.py
python
main
()
Command entrypoint.
Command entrypoint.
[ "Command", "entrypoint", "." ]
def main(): """Command entrypoint.""" try: # parse input parameters and config file args = _get_parser().parse_args() _validate_parameters(args) log = config_logger(args.log_level) log.info("Input parameters: %s", args) config = AWSBatchCliConfig(log=log, cluster=args.cluster) boto3_factory = Boto3ClientFactory(region=config.region, proxy=config.proxy) AWSBoutCommand(log, boto3_factory).run( job_id=args.job_id, head=args.head, tail=args.tail, stream=args.stream, stream_period=args.stream_period ) except KeyboardInterrupt: print("Exiting...") sys.exit(0) except Exception as e: fail("Unexpected error. Command failed with exception: %s" % e)
[ "def", "main", "(", ")", ":", "try", ":", "# parse input parameters and config file", "args", "=", "_get_parser", "(", ")", ".", "parse_args", "(", ")", "_validate_parameters", "(", "args", ")", "log", "=", "config_logger", "(", "args", ".", "log_level", ")", "log", ".", "info", "(", "\"Input parameters: %s\"", ",", "args", ")", "config", "=", "AWSBatchCliConfig", "(", "log", "=", "log", ",", "cluster", "=", "args", ".", "cluster", ")", "boto3_factory", "=", "Boto3ClientFactory", "(", "region", "=", "config", ".", "region", ",", "proxy", "=", "config", ".", "proxy", ")", "AWSBoutCommand", "(", "log", ",", "boto3_factory", ")", ".", "run", "(", "job_id", "=", "args", ".", "job_id", ",", "head", "=", "args", ".", "head", ",", "tail", "=", "args", ".", "tail", ",", "stream", "=", "args", ".", "stream", ",", "stream_period", "=", "args", ".", "stream_period", ")", "except", "KeyboardInterrupt", ":", "print", "(", "\"Exiting...\"", ")", "sys", ".", "exit", "(", "0", ")", "except", "Exception", "as", "e", ":", "fail", "(", "\"Unexpected error. Command failed with exception: %s\"", "%", "e", ")" ]
https://github.com/aws/aws-parallelcluster/blob/f1fe5679a01c524e7ea904c329bd6d17318c6cd9/awsbatch-cli/src/awsbatch/awsbout.py#L194-L213
beeware/ouroboros
a29123c6fab6a807caffbb7587cf548e0c370296
ouroboros/calendar.py
python
Calendar.yeardays2calendar
(self, year, width=3)
return [months[i:i+width] for i in range(0, len(months), width) ]
Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are (day number, weekday number) tuples. Day numbers outside this month are zero.
Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are (day number, weekday number) tuples. Day numbers outside this month are zero.
[ "Return", "the", "data", "for", "the", "specified", "year", "ready", "for", "formatting", "(", "similar", "to", "yeardatescalendar", "()", ")", ".", "Entries", "in", "the", "week", "lists", "are", "(", "day", "number", "weekday", "number", ")", "tuples", ".", "Day", "numbers", "outside", "this", "month", "are", "zero", "." ]
def yeardays2calendar(self, year, width=3): """ Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are (day number, weekday number) tuples. Day numbers outside this month are zero. """ months = [ self.monthdays2calendar(year, i) for i in range(January, January+12) ] return [months[i:i+width] for i in range(0, len(months), width) ]
[ "def", "yeardays2calendar", "(", "self", ",", "year", ",", "width", "=", "3", ")", ":", "months", "=", "[", "self", ".", "monthdays2calendar", "(", "year", ",", "i", ")", "for", "i", "in", "range", "(", "January", ",", "January", "+", "12", ")", "]", "return", "[", "months", "[", "i", ":", "i", "+", "width", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "months", ")", ",", "width", ")", "]" ]
https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/calendar.py#L233-L244
HymanLiuTS/flaskTs
286648286976e85d9b9a5873632331efcafe0b21
flasky/lib/python2.7/site-packages/setuptools/msvc.py
python
SystemInfo._find_dot_net_versions
(self, bits=32)
return frameworkver
Find Microsoft .NET Framework versions. Parameters ---------- bits: int Platform number of bits: 32 or 64.
Find Microsoft .NET Framework versions.
[ "Find", "Microsoft", ".", "NET", "Framework", "versions", "." ]
def _find_dot_net_versions(self, bits=32): """ Find Microsoft .NET Framework versions. Parameters ---------- bits: int Platform number of bits: 32 or 64. """ # Find actual .NET version ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) or '' # Set .NET versions for specified MSVC++ version if self.vc_ver >= 12.0: frameworkver = (ver, 'v4.0') elif self.vc_ver >= 10.0: frameworkver = ('v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5') elif self.vc_ver == 9.0: frameworkver = ('v3.5', 'v2.0.50727') if self.vc_ver == 8.0: frameworkver = ('v3.0', 'v2.0.50727') return frameworkver
[ "def", "_find_dot_net_versions", "(", "self", ",", "bits", "=", "32", ")", ":", "# Find actual .NET version", "ver", "=", "self", ".", "ri", ".", "lookup", "(", "self", ".", "ri", ".", "vc", ",", "'frameworkver%d'", "%", "bits", ")", "or", "''", "# Set .NET versions for specified MSVC++ version", "if", "self", ".", "vc_ver", ">=", "12.0", ":", "frameworkver", "=", "(", "ver", ",", "'v4.0'", ")", "elif", "self", ".", "vc_ver", ">=", "10.0", ":", "frameworkver", "=", "(", "'v4.0.30319'", "if", "ver", ".", "lower", "(", ")", "[", ":", "2", "]", "!=", "'v4'", "else", "ver", ",", "'v3.5'", ")", "elif", "self", ".", "vc_ver", "==", "9.0", ":", "frameworkver", "=", "(", "'v3.5'", ",", "'v2.0.50727'", ")", "if", "self", ".", "vc_ver", "==", "8.0", ":", "frameworkver", "=", "(", "'v3.0'", ",", "'v2.0.50727'", ")", "return", "frameworkver" ]
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/setuptools/msvc.py#L716-L738
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/lib/django-1.2/django/contrib/gis/db/models/query.py
python
GeoQuerySet._spatial_attribute
(self, att, settings, field_name=None, model_att=None)
return self.extra(select={model_att : fmt % settings['procedure_args']}, select_params=settings['select_params'])
DRY routine for calling a spatial stored procedure on a geometry column and attaching its output as an attribute of the model. Arguments: att: The name of the spatial attribute that holds the spatial SQL function to call. settings: Dictonary of internal settings to customize for the spatial procedure. Public Keyword Arguments: field_name: The name of the geographic field to call the spatial function on. May also be a lookup to a geometry field as part of a foreign key relation. model_att: The name of the model attribute to attach the output of the spatial function to.
DRY routine for calling a spatial stored procedure on a geometry column and attaching its output as an attribute of the model.
[ "DRY", "routine", "for", "calling", "a", "spatial", "stored", "procedure", "on", "a", "geometry", "column", "and", "attaching", "its", "output", "as", "an", "attribute", "of", "the", "model", "." ]
def _spatial_attribute(self, att, settings, field_name=None, model_att=None): """ DRY routine for calling a spatial stored procedure on a geometry column and attaching its output as an attribute of the model. Arguments: att: The name of the spatial attribute that holds the spatial SQL function to call. settings: Dictonary of internal settings to customize for the spatial procedure. Public Keyword Arguments: field_name: The name of the geographic field to call the spatial function on. May also be a lookup to a geometry field as part of a foreign key relation. model_att: The name of the model attribute to attach the output of the spatial function to. """ # Default settings. settings.setdefault('desc', None) settings.setdefault('geom_args', ()) settings.setdefault('geom_field', None) settings.setdefault('procedure_args', {}) settings.setdefault('procedure_fmt', '%(geo_col)s') settings.setdefault('select_params', []) connection = connections[self.db] backend = connection.ops # Performing setup for the spatial column, unless told not to. if settings.get('setup', True): default_args, geo_field = self._spatial_setup(att, desc=settings['desc'], field_name=field_name, geo_field_type=settings.get('geo_field_type', None)) for k, v in default_args.iteritems(): settings['procedure_args'].setdefault(k, v) else: geo_field = settings['geo_field'] # The attribute to attach to the model. if not isinstance(model_att, basestring): model_att = att # Special handling for any argument that is a geometry. for name in settings['geom_args']: # Using the field's get_placeholder() routine to get any needed # transformation SQL. geom = geo_field.get_prep_value(settings['procedure_args'][name]) params = geo_field.get_db_prep_lookup('contains', geom, connection=connection) geom_placeholder = geo_field.get_placeholder(geom, connection) # Replacing the procedure format with that of any needed # transformation SQL. old_fmt = '%%(%s)s' % name new_fmt = geom_placeholder % '%%s' settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt) settings['select_params'].extend(params) # Getting the format for the stored procedure. fmt = '%%(function)s(%s)' % settings['procedure_fmt'] # If the result of this function needs to be converted. if settings.get('select_field', False): sel_fld = settings['select_field'] if isinstance(sel_fld, GeomField) and backend.select: self.query.custom_select[model_att] = backend.select if connection.ops.oracle: sel_fld.empty_strings_allowed = False self.query.extra_select_fields[model_att] = sel_fld # Finally, setting the extra selection attribute with # the format string expanded with the stored procedure # arguments. return self.extra(select={model_att : fmt % settings['procedure_args']}, select_params=settings['select_params'])
[ "def", "_spatial_attribute", "(", "self", ",", "att", ",", "settings", ",", "field_name", "=", "None", ",", "model_att", "=", "None", ")", ":", "# Default settings.", "settings", ".", "setdefault", "(", "'desc'", ",", "None", ")", "settings", ".", "setdefault", "(", "'geom_args'", ",", "(", ")", ")", "settings", ".", "setdefault", "(", "'geom_field'", ",", "None", ")", "settings", ".", "setdefault", "(", "'procedure_args'", ",", "{", "}", ")", "settings", ".", "setdefault", "(", "'procedure_fmt'", ",", "'%(geo_col)s'", ")", "settings", ".", "setdefault", "(", "'select_params'", ",", "[", "]", ")", "connection", "=", "connections", "[", "self", ".", "db", "]", "backend", "=", "connection", ".", "ops", "# Performing setup for the spatial column, unless told not to.", "if", "settings", ".", "get", "(", "'setup'", ",", "True", ")", ":", "default_args", ",", "geo_field", "=", "self", ".", "_spatial_setup", "(", "att", ",", "desc", "=", "settings", "[", "'desc'", "]", ",", "field_name", "=", "field_name", ",", "geo_field_type", "=", "settings", ".", "get", "(", "'geo_field_type'", ",", "None", ")", ")", "for", "k", ",", "v", "in", "default_args", ".", "iteritems", "(", ")", ":", "settings", "[", "'procedure_args'", "]", ".", "setdefault", "(", "k", ",", "v", ")", "else", ":", "geo_field", "=", "settings", "[", "'geo_field'", "]", "# The attribute to attach to the model.", "if", "not", "isinstance", "(", "model_att", ",", "basestring", ")", ":", "model_att", "=", "att", "# Special handling for any argument that is a geometry.", "for", "name", "in", "settings", "[", "'geom_args'", "]", ":", "# Using the field's get_placeholder() routine to get any needed", "# transformation SQL.", "geom", "=", "geo_field", ".", "get_prep_value", "(", "settings", "[", "'procedure_args'", "]", "[", "name", "]", ")", "params", "=", "geo_field", ".", "get_db_prep_lookup", "(", "'contains'", ",", "geom", ",", "connection", "=", "connection", ")", "geom_placeholder", "=", "geo_field", ".", "get_placeholder", "(", "geom", ",", "connection", ")", "# Replacing the procedure format with that of any needed", "# transformation SQL.", "old_fmt", "=", "'%%(%s)s'", "%", "name", "new_fmt", "=", "geom_placeholder", "%", "'%%s'", "settings", "[", "'procedure_fmt'", "]", "=", "settings", "[", "'procedure_fmt'", "]", ".", "replace", "(", "old_fmt", ",", "new_fmt", ")", "settings", "[", "'select_params'", "]", ".", "extend", "(", "params", ")", "# Getting the format for the stored procedure.", "fmt", "=", "'%%(function)s(%s)'", "%", "settings", "[", "'procedure_fmt'", "]", "# If the result of this function needs to be converted.", "if", "settings", ".", "get", "(", "'select_field'", ",", "False", ")", ":", "sel_fld", "=", "settings", "[", "'select_field'", "]", "if", "isinstance", "(", "sel_fld", ",", "GeomField", ")", "and", "backend", ".", "select", ":", "self", ".", "query", ".", "custom_select", "[", "model_att", "]", "=", "backend", ".", "select", "if", "connection", ".", "ops", ".", "oracle", ":", "sel_fld", ".", "empty_strings_allowed", "=", "False", "self", ".", "query", ".", "extra_select_fields", "[", "model_att", "]", "=", "sel_fld", "# Finally, setting the extra selection attribute with", "# the format string expanded with the stored procedure", "# arguments.", "return", "self", ".", "extra", "(", "select", "=", "{", "model_att", ":", "fmt", "%", "settings", "[", "'procedure_args'", "]", "}", ",", "select_params", "=", "settings", "[", "'select_params'", "]", ")" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/django-1.2/django/contrib/gis/db/models/query.py#L492-L569
salopensource/sal
464414a2666e39bdf5b4b0033a84d5129c93c053
sal/plugin.py
python
BasePlugin.checkin_processor
(self, machine, report_data)
Process checkin data prior to recording in DB. The default implementation does nothing. Plugins can define a checkin processor method by overriding this. This processor is run at the conclusion of the client checkin, and includes the report data processed during that run. Args: machine (server.models.Machine): The machine checking in. report_data (dict): All of the report data.
Process checkin data prior to recording in DB.
[ "Process", "checkin", "data", "prior", "to", "recording", "in", "DB", "." ]
def checkin_processor(self, machine, report_data): """Process checkin data prior to recording in DB. The default implementation does nothing. Plugins can define a checkin processor method by overriding this. This processor is run at the conclusion of the client checkin, and includes the report data processed during that run. Args: machine (server.models.Machine): The machine checking in. report_data (dict): All of the report data. """ pass
[ "def", "checkin_processor", "(", "self", ",", "machine", ",", "report_data", ")", ":", "pass" ]
https://github.com/salopensource/sal/blob/464414a2666e39bdf5b4b0033a84d5129c93c053/sal/plugin.py#L266-L280
facebookresearch/mobile-vision
f40401a44e86bb3ba9c1b66e7700e15f96b880cb
runtime_lut/code/api.py
python
OpLut.add_op
(self, op_record)
Add a new record to the OpLut Args: op_record: a LUTSchema instance to be added
Add a new record to the OpLut Args: op_record: a LUTSchema instance to be added
[ "Add", "a", "new", "record", "to", "the", "OpLut", "Args", ":", "op_record", ":", "a", "LUTSchema", "instance", "to", "be", "added" ]
def add_op(self, op_record): """ Add a new record to the OpLut Args: op_record: a LUTSchema instance to be added """ assert isinstance(op_record, LUTSchema) if self.find_op(op_record) != []: print("Operator already exists.") return self.ops.append(op_record)
[ "def", "add_op", "(", "self", ",", "op_record", ")", ":", "assert", "isinstance", "(", "op_record", ",", "LUTSchema", ")", "if", "self", ".", "find_op", "(", "op_record", ")", "!=", "[", "]", ":", "print", "(", "\"Operator already exists.\"", ")", "return", "self", ".", "ops", ".", "append", "(", "op_record", ")" ]
https://github.com/facebookresearch/mobile-vision/blob/f40401a44e86bb3ba9c1b66e7700e15f96b880cb/runtime_lut/code/api.py#L97-L108
tensorflow/lingvo
ce10019243d954c3c3ebe739f7589b5eebfdf907
lingvo/core/conv_layers_with_time_padding.py
python
CausalConv2DLayerWithPadding.zero_state
(self, batch_size)
return py_utils.NestedMap(context=context)
Returns the initial state given the batch size. Args: batch_size: the batch size. Returns: state0: A NestedMap of tensors including: - context: A Tensor of shape [b, filter_shape[0]-1, 1, c].
Returns the initial state given the batch size.
[ "Returns", "the", "initial", "state", "given", "the", "batch", "size", "." ]
def zero_state(self, batch_size): """Returns the initial state given the batch size. Args: batch_size: the batch size. Returns: state0: A NestedMap of tensors including: - context: A Tensor of shape [b, filter_shape[0]-1, 1, c]. """ p = self.params assert p.filter_shape[1] == 1, ( 'zero_state() only supports 1d causal convolution.') context = tf.zeros( shape=[batch_size] + [p.filter_shape[0] - 1, p.filter_shape[1], p.filter_shape[2]], dtype=py_utils.FPropDtype(p)) return py_utils.NestedMap(context=context)
[ "def", "zero_state", "(", "self", ",", "batch_size", ")", ":", "p", "=", "self", ".", "params", "assert", "p", ".", "filter_shape", "[", "1", "]", "==", "1", ",", "(", "'zero_state() only supports 1d causal convolution.'", ")", "context", "=", "tf", ".", "zeros", "(", "shape", "=", "[", "batch_size", "]", "+", "[", "p", ".", "filter_shape", "[", "0", "]", "-", "1", ",", "p", ".", "filter_shape", "[", "1", "]", ",", "p", ".", "filter_shape", "[", "2", "]", "]", ",", "dtype", "=", "py_utils", ".", "FPropDtype", "(", "p", ")", ")", "return", "py_utils", ".", "NestedMap", "(", "context", "=", "context", ")" ]
https://github.com/tensorflow/lingvo/blob/ce10019243d954c3c3ebe739f7589b5eebfdf907/lingvo/core/conv_layers_with_time_padding.py#L503-L521
napalm-automation/napalm
ad1ff72000d0de59f25c8847694f51a4ad5aca86
docs/conf.py
python
build_getters_support_matrix
(app)
Build the getters support matrix.
Build the getters support matrix.
[ "Build", "the", "getters", "support", "matrix", "." ]
def build_getters_support_matrix(app): """Build the getters support matrix.""" status = subprocess.call("./test.sh", stdout=sys.stdout, stderr=sys.stderr) if status != 0: print("Something bad happened when processing the test reports.") sys.exit(-1) drivers = set() matrix = { m: defaultdict(dict) for m in dir(NetworkDriver) if not (m.startswith("_") or m in EXCLUDE_METHODS) } regex_name = re.compile(r"(?P<driver>\w+)\/.*::test_(?P<getter>\w+)") filename = "./support/tests/report.json" with open(filename, "r") as f: data = json.loads(f.read()) for test in data["report"]["tests"]: match = regex_name.search(test["name"]) if match: driver = match.group("driver") drivers.add(driver) method = match.group("getter") else: continue if method in EXCLUDE_IN_REPORT: continue result = test["outcome"] if method in METHOD_ALIASES.keys(): method = METHOD_ALIASES[method] intermediate_result = matrix[method].get(driver, None) matrix[method][driver] = _merge_results(result, intermediate_result) sorted_methods = sorted(matrix.keys()) drivers = sorted(drivers) env = Environment(loader=FileSystemLoader(".")) template_file = env.get_template("matrix.j2") rendered_template = template_file.render( matrix=matrix, drivers=drivers, sorted_methods=sorted_methods ) with open("support/matrix.rst", "w") as f: f.write(rendered_template)
[ "def", "build_getters_support_matrix", "(", "app", ")", ":", "status", "=", "subprocess", ".", "call", "(", "\"./test.sh\"", ",", "stdout", "=", "sys", ".", "stdout", ",", "stderr", "=", "sys", ".", "stderr", ")", "if", "status", "!=", "0", ":", "print", "(", "\"Something bad happened when processing the test reports.\"", ")", "sys", ".", "exit", "(", "-", "1", ")", "drivers", "=", "set", "(", ")", "matrix", "=", "{", "m", ":", "defaultdict", "(", "dict", ")", "for", "m", "in", "dir", "(", "NetworkDriver", ")", "if", "not", "(", "m", ".", "startswith", "(", "\"_\"", ")", "or", "m", "in", "EXCLUDE_METHODS", ")", "}", "regex_name", "=", "re", ".", "compile", "(", "r\"(?P<driver>\\w+)\\/.*::test_(?P<getter>\\w+)\"", ")", "filename", "=", "\"./support/tests/report.json\"", "with", "open", "(", "filename", ",", "\"r\"", ")", "as", "f", ":", "data", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "for", "test", "in", "data", "[", "\"report\"", "]", "[", "\"tests\"", "]", ":", "match", "=", "regex_name", ".", "search", "(", "test", "[", "\"name\"", "]", ")", "if", "match", ":", "driver", "=", "match", ".", "group", "(", "\"driver\"", ")", "drivers", ".", "add", "(", "driver", ")", "method", "=", "match", ".", "group", "(", "\"getter\"", ")", "else", ":", "continue", "if", "method", "in", "EXCLUDE_IN_REPORT", ":", "continue", "result", "=", "test", "[", "\"outcome\"", "]", "if", "method", "in", "METHOD_ALIASES", ".", "keys", "(", ")", ":", "method", "=", "METHOD_ALIASES", "[", "method", "]", "intermediate_result", "=", "matrix", "[", "method", "]", ".", "get", "(", "driver", ",", "None", ")", "matrix", "[", "method", "]", "[", "driver", "]", "=", "_merge_results", "(", "result", ",", "intermediate_result", ")", "sorted_methods", "=", "sorted", "(", "matrix", ".", "keys", "(", ")", ")", "drivers", "=", "sorted", "(", "drivers", ")", "env", "=", "Environment", "(", "loader", "=", "FileSystemLoader", "(", "\".\"", ")", ")", "template_file", "=", "env", ".", "get_template", "(", "\"matrix.j2\"", ")", "rendered_template", "=", "template_file", ".", "render", "(", "matrix", "=", "matrix", ",", "drivers", "=", "drivers", ",", "sorted_methods", "=", "sorted_methods", ")", "with", "open", "(", "\"support/matrix.rst\"", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "rendered_template", ")" ]
https://github.com/napalm-automation/napalm/blob/ad1ff72000d0de59f25c8847694f51a4ad5aca86/docs/conf.py#L359-L406
bungnoid/glTools
8ff0899de43784a18bd4543285655e68e28fb5e5
utils/boundingBox.py
python
getBoundingBox
(geometry,worldSpace=True)
return glTools.utils.base.getMBoundingBox(geometry,worldSpace=worldSpace)
Return bounding box for the specified geometry. @param geometry: Geometry to return bounding box for @type geometry: str @param worldSpace: Calculate bounding box in world or local space @type worldSpace: bool
Return bounding box for the specified geometry.
[ "Return", "bounding", "box", "for", "the", "specified", "geometry", "." ]
def getBoundingBox(geometry,worldSpace=True): ''' Return bounding box for the specified geometry. @param geometry: Geometry to return bounding box for @type geometry: str @param worldSpace: Calculate bounding box in world or local space @type worldSpace: bool ''' return glTools.utils.base.getMBoundingBox(geometry,worldSpace=worldSpace)
[ "def", "getBoundingBox", "(", "geometry", ",", "worldSpace", "=", "True", ")", ":", "return", "glTools", ".", "utils", ".", "base", ".", "getMBoundingBox", "(", "geometry", ",", "worldSpace", "=", "worldSpace", ")" ]
https://github.com/bungnoid/glTools/blob/8ff0899de43784a18bd4543285655e68e28fb5e5/utils/boundingBox.py#L59-L67
lad1337/XDM
0c1b7009fe00f06f102a6f67c793478f515e7efe
site-packages/jinja2/utils.py
python
LRUCache.iteritems
(self)
return iter(self.items())
Iterate over all items.
Iterate over all items.
[ "Iterate", "over", "all", "items", "." ]
def iteritems(self): """Iterate over all items.""" return iter(self.items())
[ "def", "iteritems", "(", "self", ")", ":", "return", "iter", "(", "self", ".", "items", "(", ")", ")" ]
https://github.com/lad1337/XDM/blob/0c1b7009fe00f06f102a6f67c793478f515e7efe/site-packages/jinja2/utils.py#L500-L502
openstack/kuryr-kubernetes
513ada72685ef02cd9f3aca418ac1b2e0dc1b8ba
kuryr_kubernetes/os_vif_util.py
python
_make_vif_subnets
(neutron_port, subnets)
return list(vif_subnets.values())
Gets a list of os-vif Subnet objects for port. :param neutron_port: dict containing port information as returned by neutron client's 'show_port' or openstack.network.v2.port.Port object :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets :return: list of os-vif Subnet object
Gets a list of os-vif Subnet objects for port.
[ "Gets", "a", "list", "of", "os", "-", "vif", "Subnet", "objects", "for", "port", "." ]
def _make_vif_subnets(neutron_port, subnets): """Gets a list of os-vif Subnet objects for port. :param neutron_port: dict containing port information as returned by neutron client's 'show_port' or openstack.network.v2.port.Port object :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets :return: list of os-vif Subnet object """ vif_subnets = {} try: fixed_ips = neutron_port.get('fixed_ips', []) port_id = neutron_port.get('id') except TypeError: fixed_ips = neutron_port.fixed_ips port_id = neutron_port.get.id for neutron_fixed_ip in fixed_ips: subnet_id = neutron_fixed_ip['subnet_id'] ip_address = neutron_fixed_ip['ip_address'] if subnet_id not in subnets: continue try: subnet = vif_subnets[subnet_id] except KeyError: subnet = _make_vif_subnet(subnets, subnet_id) vif_subnets[subnet_id] = subnet subnet.ips.objects.append(osv_fixed_ip.FixedIP(address=ip_address)) if not vif_subnets: raise k_exc.IntegrityError(_( "No valid subnets found for port %(port_id)s") % { 'port_id': port_id}) return list(vif_subnets.values())
[ "def", "_make_vif_subnets", "(", "neutron_port", ",", "subnets", ")", ":", "vif_subnets", "=", "{", "}", "try", ":", "fixed_ips", "=", "neutron_port", ".", "get", "(", "'fixed_ips'", ",", "[", "]", ")", "port_id", "=", "neutron_port", ".", "get", "(", "'id'", ")", "except", "TypeError", ":", "fixed_ips", "=", "neutron_port", ".", "fixed_ips", "port_id", "=", "neutron_port", ".", "get", ".", "id", "for", "neutron_fixed_ip", "in", "fixed_ips", ":", "subnet_id", "=", "neutron_fixed_ip", "[", "'subnet_id'", "]", "ip_address", "=", "neutron_fixed_ip", "[", "'ip_address'", "]", "if", "subnet_id", "not", "in", "subnets", ":", "continue", "try", ":", "subnet", "=", "vif_subnets", "[", "subnet_id", "]", "except", "KeyError", ":", "subnet", "=", "_make_vif_subnet", "(", "subnets", ",", "subnet_id", ")", "vif_subnets", "[", "subnet_id", "]", "=", "subnet", "subnet", ".", "ips", ".", "objects", ".", "append", "(", "osv_fixed_ip", ".", "FixedIP", "(", "address", "=", "ip_address", ")", ")", "if", "not", "vif_subnets", ":", "raise", "k_exc", ".", "IntegrityError", "(", "_", "(", "\"No valid subnets found for port %(port_id)s\"", ")", "%", "{", "'port_id'", ":", "port_id", "}", ")", "return", "list", "(", "vif_subnets", ".", "values", "(", ")", ")" ]
https://github.com/openstack/kuryr-kubernetes/blob/513ada72685ef02cd9f3aca418ac1b2e0dc1b8ba/kuryr_kubernetes/os_vif_util.py#L126-L164
fofix/fofix
7730d1503c66562b901f62b33a5bd46c3d5e5c34
fofix/core/Font.py
python
Font.drawSquare
(self, w, h, tw, th)
New drawing relaying only on pygame.font.render Use arrays to increase performance :param w: width :param h: height :param tw: texture width :param th: texture height
New drawing relaying only on pygame.font.render Use arrays to increase performance
[ "New", "drawing", "relaying", "only", "on", "pygame", ".", "font", ".", "render", "Use", "arrays", "to", "increase", "performance" ]
def drawSquare(self, w, h, tw, th): """ New drawing relaying only on pygame.font.render Use arrays to increase performance :param w: width :param h: height :param tw: texture width :param th: texture height """ self.square_prim[1, 0] = self.square_prim[3, 0] = w self.square_prim[2, 1] = self.square_prim[3, 1] = h self.square_tex[0, 1] = self.square_tex[1, 1] = th self.square_tex[1, 0] = self.square_tex[3, 0] = tw cmgl.draw_arrays(GL_TRIANGLE_STRIP, vertices=self.square_prim, texcoords=self.square_tex)
[ "def", "drawSquare", "(", "self", ",", "w", ",", "h", ",", "tw", ",", "th", ")", ":", "self", ".", "square_prim", "[", "1", ",", "0", "]", "=", "self", ".", "square_prim", "[", "3", ",", "0", "]", "=", "w", "self", ".", "square_prim", "[", "2", ",", "1", "]", "=", "self", ".", "square_prim", "[", "3", ",", "1", "]", "=", "h", "self", ".", "square_tex", "[", "0", ",", "1", "]", "=", "self", ".", "square_tex", "[", "1", ",", "1", "]", "=", "th", "self", ".", "square_tex", "[", "1", ",", "0", "]", "=", "self", ".", "square_tex", "[", "3", ",", "0", "]", "=", "tw", "cmgl", ".", "draw_arrays", "(", "GL_TRIANGLE_STRIP", ",", "vertices", "=", "self", ".", "square_prim", ",", "texcoords", "=", "self", ".", "square_tex", ")" ]
https://github.com/fofix/fofix/blob/7730d1503c66562b901f62b33a5bd46c3d5e5c34/fofix/core/Font.py#L148-L162
fabioz/PyDev.Debugger
0f8c02a010fe5690405da1dd30ed72326191ce63
third_party/pep8/autopep8.py
python
get_encoding
()
return locale.getpreferredencoding() or sys.getdefaultencoding()
Return preferred encoding.
Return preferred encoding.
[ "Return", "preferred", "encoding", "." ]
def get_encoding(): """Return preferred encoding.""" return locale.getpreferredencoding() or sys.getdefaultencoding()
[ "def", "get_encoding", "(", ")", ":", "return", "locale", ".", "getpreferredencoding", "(", ")", "or", "sys", ".", "getdefaultencoding", "(", ")" ]
https://github.com/fabioz/PyDev.Debugger/blob/0f8c02a010fe5690405da1dd30ed72326191ce63/third_party/pep8/autopep8.py#L3752-L3754
microsoft/botbuilder-python
3d410365461dc434df59bdfeaa2f16d28d9df868
libraries/botbuilder-schema/botbuilder/schema/_models_py3.py
python
Activity.create_event_activity
()
return Activity(type=ActivityTypes.event)
Creates an instance of the :class:`Activity` class as an EventActivity object. :returns: The new event activity.
Creates an instance of the :class:`Activity` class as an EventActivity object.
[ "Creates", "an", "instance", "of", "the", ":", "class", ":", "Activity", "class", "as", "an", "EventActivity", "object", "." ]
def create_event_activity(): """ Creates an instance of the :class:`Activity` class as an EventActivity object. :returns: The new event activity. """ return Activity(type=ActivityTypes.event)
[ "def", "create_event_activity", "(", ")", ":", "return", "Activity", "(", "type", "=", "ActivityTypes", ".", "event", ")" ]
https://github.com/microsoft/botbuilder-python/blob/3d410365461dc434df59bdfeaa2f16d28d9df868/libraries/botbuilder-schema/botbuilder/schema/_models_py3.py#L584-L590
ales-tsurko/cells
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
packaging/macos/python/lib/python3.7/importlib/abc.py
python
ResourceReader.open_resource
(self, resource)
Return an opened, file-like object for binary reading. The 'resource' argument is expected to represent only a file name and thus not contain any subdirectory components. If the resource cannot be found, FileNotFoundError is raised.
Return an opened, file-like object for binary reading.
[ "Return", "an", "opened", "file", "-", "like", "object", "for", "binary", "reading", "." ]
def open_resource(self, resource): """Return an opened, file-like object for binary reading. The 'resource' argument is expected to represent only a file name and thus not contain any subdirectory components. If the resource cannot be found, FileNotFoundError is raised. """ raise FileNotFoundError
[ "def", "open_resource", "(", "self", ",", "resource", ")", ":", "raise", "FileNotFoundError" ]
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/importlib/abc.py#L355-L363
makerdao/pymaker
9245b3e22bcb257004d54337df6c2b0c9cbe42c8
pymaker/sai.py
python
Tub.approve
(self, approval_function)
Approve the `Tub` to access our GEM, SKR, SAI and GOV balances. For available approval functions (i.e. approval modes) see `directly` and `via_tx_manager` in `pymaker.approval`. Args: approval_function: Approval function (i.e. approval mode).
Approve the `Tub` to access our GEM, SKR, SAI and GOV balances.
[ "Approve", "the", "Tub", "to", "access", "our", "GEM", "SKR", "SAI", "and", "GOV", "balances", "." ]
def approve(self, approval_function): """Approve the `Tub` to access our GEM, SKR, SAI and GOV balances. For available approval functions (i.e. approval modes) see `directly` and `via_tx_manager` in `pymaker.approval`. Args: approval_function: Approval function (i.e. approval mode). """ assert(callable(approval_function)) approval_function(ERC20Token(web3=self.web3, address=self.gem()), self.address, 'Tub') approval_function(ERC20Token(web3=self.web3, address=self.skr()), self.address, 'Tub') approval_function(ERC20Token(web3=self.web3, address=self.sai()), self.address, 'Tub') approval_function(ERC20Token(web3=self.web3, address=self.gov()), self.address, 'Tub')
[ "def", "approve", "(", "self", ",", "approval_function", ")", ":", "assert", "(", "callable", "(", "approval_function", ")", ")", "approval_function", "(", "ERC20Token", "(", "web3", "=", "self", ".", "web3", ",", "address", "=", "self", ".", "gem", "(", ")", ")", ",", "self", ".", "address", ",", "'Tub'", ")", "approval_function", "(", "ERC20Token", "(", "web3", "=", "self", ".", "web3", ",", "address", "=", "self", ".", "skr", "(", ")", ")", ",", "self", ".", "address", ",", "'Tub'", ")", "approval_function", "(", "ERC20Token", "(", "web3", "=", "self", ".", "web3", ",", "address", "=", "self", ".", "sai", "(", ")", ")", ",", "self", ".", "address", ",", "'Tub'", ")", "approval_function", "(", "ERC20Token", "(", "web3", "=", "self", ".", "web3", ",", "address", "=", "self", ".", "gov", "(", ")", ")", ",", "self", ".", "address", ",", "'Tub'", ")" ]
https://github.com/makerdao/pymaker/blob/9245b3e22bcb257004d54337df6c2b0c9cbe42c8/pymaker/sai.py#L104-L118
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/ply-3.11/example/ansic/cparse.py
python
p_direct_declarator_5
(t)
direct_declarator : direct_declarator LPAREN identifier_list RPAREN
direct_declarator : direct_declarator LPAREN identifier_list RPAREN
[ "direct_declarator", ":", "direct_declarator", "LPAREN", "identifier_list", "RPAREN" ]
def p_direct_declarator_5(t): 'direct_declarator : direct_declarator LPAREN identifier_list RPAREN ' pass
[ "def", "p_direct_declarator_5", "(", "t", ")", ":", "pass" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/ply-3.11/example/ansic/cparse.py#L351-L353
rq/rq
c5a1ef17345e17269085e7f72858ac9bd6faf1dd
rq/local.py
python
LocalProxy.__getattr__
(self, name)
return getattr(self._get_current_object(), name)
[]
def __getattr__(self, name): if name == '__members__': return dir(self._get_current_object()) return getattr(self._get_current_object(), name)
[ "def", "__getattr__", "(", "self", ",", "name", ")", ":", "if", "name", "==", "'__members__'", ":", "return", "dir", "(", "self", ".", "_get_current_object", "(", ")", ")", "return", "getattr", "(", "self", ".", "_get_current_object", "(", ")", ",", "name", ")" ]
https://github.com/rq/rq/blob/c5a1ef17345e17269085e7f72858ac9bd6faf1dd/rq/local.py#L318-L321
facebookresearch/votenet
2f6d6d36ff98d96901182e935afe48ccee82d566
sunrgbd/sunrgbd_utils.py
python
SUNRGBD_Calibration.project_upright_camera_to_upright_depth
(self, pc)
return flip_axis_to_depth(pc)
[]
def project_upright_camera_to_upright_depth(self, pc): return flip_axis_to_depth(pc)
[ "def", "project_upright_camera_to_upright_depth", "(", "self", ",", "pc", ")", ":", "return", "flip_axis_to_depth", "(", "pc", ")" ]
https://github.com/facebookresearch/votenet/blob/2f6d6d36ff98d96901182e935afe48ccee82d566/sunrgbd/sunrgbd_utils.py#L119-L120
pabigot/pyxb
14737c23a125fd12c954823ad64fc4497816fae3
pyxb/xmlschema/structures.py
python
_NamedComponent_mixin.__new__
(cls, *args, **kw)
return rv
Pickling support. Normally, we just create a new instance of this class. However, if we're unpickling a reference in a loadable schema, we need to return the existing component instance by looking up the name in the component map of the desired namespace. We can tell the difference because no normal constructors that inherit from this have positional arguments; only invocations by unpickling with a value returned in __getnewargs__ do. This does require that the dependent namespace already have been validated (or that it be validated here). That shouldn't be a problem, except for the dependency loop resulting from use of xml:lang in the XMLSchema namespace. For that issue, see pyxb.namespace._XMLSchema.
Pickling support.
[ "Pickling", "support", "." ]
def __new__ (cls, *args, **kw): """Pickling support. Normally, we just create a new instance of this class. However, if we're unpickling a reference in a loadable schema, we need to return the existing component instance by looking up the name in the component map of the desired namespace. We can tell the difference because no normal constructors that inherit from this have positional arguments; only invocations by unpickling with a value returned in __getnewargs__ do. This does require that the dependent namespace already have been validated (or that it be validated here). That shouldn't be a problem, except for the dependency loop resulting from use of xml:lang in the XMLSchema namespace. For that issue, see pyxb.namespace._XMLSchema. """ if 0 == len(args): rv = super(_NamedComponent_mixin, cls).__new__(cls) return rv ( object_reference, scope, icls ) = args object_reference = _PickledAnonymousReference.FromPickled(object_reference) # Explicitly validate here: the lookup operations won't do so, # but will abort if the namespace hasn't been validated yet. object_reference.validateComponentModel() rv = None if isinstance(scope, (tuple, _PickledAnonymousReference)): # Scope is the expanded name of the complex type in which the # named value can be located. scope_ref = _PickledAnonymousReference.FromPickled(scope) if object_reference.namespace() != scope_ref.namespace(): scope_ref.validateComponentModel() assert 'typeDefinition' in scope_ref.namespace().categories() scope_ctd = scope_ref.typeDefinition() if scope_ctd is None: raise pyxb.SchemaValidationError('Unable to resolve local scope %s' % (scope_ref,)) if issubclass(icls, AttributeDeclaration): rv = scope_ctd.lookupScopedAttributeDeclaration(object_reference) elif issubclass(icls, ElementDeclaration): rv = scope_ctd.lookupScopedElementDeclaration(object_reference) if rv is None: raise pyxb.SchemaValidationError('Unable to resolve %s as %s in scope %s' % (object_reference, icls, scope_ref)) elif _ScopedDeclaration_mixin.ScopeIsGlobal(scope) or _ScopedDeclaration_mixin.ScopeIsIndeterminate(scope): if (issubclass(icls, SimpleTypeDefinition) or issubclass(icls, ComplexTypeDefinition)): rv = object_reference.typeDefinition() elif issubclass(icls, AttributeGroupDefinition): rv = object_reference.attributeGroupDefinition() elif issubclass(icls, ModelGroupDefinition): rv = object_reference.modelGroupDefinition() elif issubclass(icls, AttributeDeclaration): rv = object_reference.attributeDeclaration() elif issubclass(icls, ElementDeclaration): rv = object_reference.elementDeclaration() elif issubclass(icls, IdentityConstraintDefinition): rv = object_reference.identityConstraintDefinition() if rv is None: raise pyxb.SchemaValidationError('Unable to resolve %s as %s' % (object_reference, icls)) if rv is None: raise pyxb.SchemaValidationError('Unable to resolve reference %s, scope %s ns %s type %s, class %s' % (object_reference, scope, (scope is None and "<unknown>" or scope.targetNamespace()), type(scope), icls)) return rv
[ "def", "__new__", "(", "cls", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "0", "==", "len", "(", "args", ")", ":", "rv", "=", "super", "(", "_NamedComponent_mixin", ",", "cls", ")", ".", "__new__", "(", "cls", ")", "return", "rv", "(", "object_reference", ",", "scope", ",", "icls", ")", "=", "args", "object_reference", "=", "_PickledAnonymousReference", ".", "FromPickled", "(", "object_reference", ")", "# Explicitly validate here: the lookup operations won't do so,", "# but will abort if the namespace hasn't been validated yet.", "object_reference", ".", "validateComponentModel", "(", ")", "rv", "=", "None", "if", "isinstance", "(", "scope", ",", "(", "tuple", ",", "_PickledAnonymousReference", ")", ")", ":", "# Scope is the expanded name of the complex type in which the", "# named value can be located.", "scope_ref", "=", "_PickledAnonymousReference", ".", "FromPickled", "(", "scope", ")", "if", "object_reference", ".", "namespace", "(", ")", "!=", "scope_ref", ".", "namespace", "(", ")", ":", "scope_ref", ".", "validateComponentModel", "(", ")", "assert", "'typeDefinition'", "in", "scope_ref", ".", "namespace", "(", ")", ".", "categories", "(", ")", "scope_ctd", "=", "scope_ref", ".", "typeDefinition", "(", ")", "if", "scope_ctd", "is", "None", ":", "raise", "pyxb", ".", "SchemaValidationError", "(", "'Unable to resolve local scope %s'", "%", "(", "scope_ref", ",", ")", ")", "if", "issubclass", "(", "icls", ",", "AttributeDeclaration", ")", ":", "rv", "=", "scope_ctd", ".", "lookupScopedAttributeDeclaration", "(", "object_reference", ")", "elif", "issubclass", "(", "icls", ",", "ElementDeclaration", ")", ":", "rv", "=", "scope_ctd", ".", "lookupScopedElementDeclaration", "(", "object_reference", ")", "if", "rv", "is", "None", ":", "raise", "pyxb", ".", "SchemaValidationError", "(", "'Unable to resolve %s as %s in scope %s'", "%", "(", "object_reference", ",", "icls", ",", "scope_ref", ")", ")", "elif", "_ScopedDeclaration_mixin", ".", "ScopeIsGlobal", "(", "scope", ")", "or", "_ScopedDeclaration_mixin", ".", "ScopeIsIndeterminate", "(", "scope", ")", ":", "if", "(", "issubclass", "(", "icls", ",", "SimpleTypeDefinition", ")", "or", "issubclass", "(", "icls", ",", "ComplexTypeDefinition", ")", ")", ":", "rv", "=", "object_reference", ".", "typeDefinition", "(", ")", "elif", "issubclass", "(", "icls", ",", "AttributeGroupDefinition", ")", ":", "rv", "=", "object_reference", ".", "attributeGroupDefinition", "(", ")", "elif", "issubclass", "(", "icls", ",", "ModelGroupDefinition", ")", ":", "rv", "=", "object_reference", ".", "modelGroupDefinition", "(", ")", "elif", "issubclass", "(", "icls", ",", "AttributeDeclaration", ")", ":", "rv", "=", "object_reference", ".", "attributeDeclaration", "(", ")", "elif", "issubclass", "(", "icls", ",", "ElementDeclaration", ")", ":", "rv", "=", "object_reference", ".", "elementDeclaration", "(", ")", "elif", "issubclass", "(", "icls", ",", "IdentityConstraintDefinition", ")", ":", "rv", "=", "object_reference", ".", "identityConstraintDefinition", "(", ")", "if", "rv", "is", "None", ":", "raise", "pyxb", ".", "SchemaValidationError", "(", "'Unable to resolve %s as %s'", "%", "(", "object_reference", ",", "icls", ")", ")", "if", "rv", "is", "None", ":", "raise", "pyxb", ".", "SchemaValidationError", "(", "'Unable to resolve reference %s, scope %s ns %s type %s, class %s'", "%", "(", "object_reference", ",", "scope", ",", "(", "scope", "is", "None", "and", "\"<unknown>\"", "or", "scope", ".", "targetNamespace", "(", ")", ")", ",", "type", "(", "scope", ")", ",", "icls", ")", ")", "return", "rv" ]
https://github.com/pabigot/pyxb/blob/14737c23a125fd12c954823ad64fc4497816fae3/pyxb/xmlschema/structures.py#L706-L768
googleapis/python-dialogflow
e48ea001b7c8a4a5c1fe4b162bad49ea397458e9
google/cloud/dialogflow_v2/services/intents/async_client.py
python
IntentsAsyncClient.list_intents
( self, request: Union[intent.ListIntentsRequest, dict] = None, *, parent: str = None, language_code: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), )
return response
r"""Returns the list of all intents in the specified agent. Args: request (Union[google.cloud.dialogflow_v2.types.ListIntentsRequest, dict]): The request object. The request message for [Intents.ListIntents][google.cloud.dialogflow.v2.Intents.ListIntents]. parent (:class:`str`): Required. The agent to list all intents from. Format: ``projects/<Project ID>/agent`` or ``projects/<Project ID>/locations/<Location ID>/agent``. Alternatively, you can specify the environment to list intents for. Format: ``projects/<Project ID>/agent/environments/<Environment ID>`` or ``projects/<Project ID>/locations/<Location ID>/agent/environments/<Environment ID>``. Note: training phrases of the intents will not be returned for non-draft environment. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. language_code (:class:`str`): Optional. The language used to access language-specific data. If not specified, the agent's default language is used. For more information, see `Multilingual intent and entity data <https://cloud.google.com/dialogflow/docs/agents-multilingual#intent-entity>`__. This corresponds to the ``language_code`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dialogflow_v2.services.intents.pagers.ListIntentsAsyncPager: The response message for [Intents.ListIntents][google.cloud.dialogflow.v2.Intents.ListIntents]. Iterating over this object will yield results and resolve additional pages automatically.
r"""Returns the list of all intents in the specified agent.
[ "r", "Returns", "the", "list", "of", "all", "intents", "in", "the", "specified", "agent", "." ]
async def list_intents( self, request: Union[intent.ListIntentsRequest, dict] = None, *, parent: str = None, language_code: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListIntentsAsyncPager: r"""Returns the list of all intents in the specified agent. Args: request (Union[google.cloud.dialogflow_v2.types.ListIntentsRequest, dict]): The request object. The request message for [Intents.ListIntents][google.cloud.dialogflow.v2.Intents.ListIntents]. parent (:class:`str`): Required. The agent to list all intents from. Format: ``projects/<Project ID>/agent`` or ``projects/<Project ID>/locations/<Location ID>/agent``. Alternatively, you can specify the environment to list intents for. Format: ``projects/<Project ID>/agent/environments/<Environment ID>`` or ``projects/<Project ID>/locations/<Location ID>/agent/environments/<Environment ID>``. Note: training phrases of the intents will not be returned for non-draft environment. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. language_code (:class:`str`): Optional. The language used to access language-specific data. If not specified, the agent's default language is used. For more information, see `Multilingual intent and entity data <https://cloud.google.com/dialogflow/docs/agents-multilingual#intent-entity>`__. This corresponds to the ``language_code`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dialogflow_v2.services.intents.pagers.ListIntentsAsyncPager: The response message for [Intents.ListIntents][google.cloud.dialogflow.v2.Intents.ListIntents]. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, language_code]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = intent.ListIntentsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if language_code is not None: request.language_code = language_code # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_intents, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListIntentsAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response
[ "async", "def", "list_intents", "(", "self", ",", "request", ":", "Union", "[", "intent", ".", "ListIntentsRequest", ",", "dict", "]", "=", "None", ",", "*", ",", "parent", ":", "str", "=", "None", ",", "language_code", ":", "str", "=", "None", ",", "retry", ":", "OptionalRetry", "=", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", ":", "float", "=", "None", ",", "metadata", ":", "Sequence", "[", "Tuple", "[", "str", ",", "str", "]", "]", "=", "(", ")", ",", ")", "->", "pagers", ".", "ListIntentsAsyncPager", ":", "# Create or coerce a protobuf request object.", "# Sanity check: If we got a request object, we should *not* have", "# gotten any keyword arguments that map to the request.", "has_flattened_params", "=", "any", "(", "[", "parent", ",", "language_code", "]", ")", "if", "request", "is", "not", "None", "and", "has_flattened_params", ":", "raise", "ValueError", "(", "\"If the `request` argument is set, then none of \"", "\"the individual field arguments should be set.\"", ")", "request", "=", "intent", ".", "ListIntentsRequest", "(", "request", ")", "# If we have keyword arguments corresponding to fields on the", "# request, apply these.", "if", "parent", "is", "not", "None", ":", "request", ".", "parent", "=", "parent", "if", "language_code", "is", "not", "None", ":", "request", ".", "language_code", "=", "language_code", "# Wrap the RPC method; this adds retry and timeout information,", "# and friendly error handling.", "rpc", "=", "gapic_v1", ".", "method_async", ".", "wrap_method", "(", "self", ".", "_client", ".", "_transport", ".", "list_intents", ",", "default_timeout", "=", "None", ",", "client_info", "=", "DEFAULT_CLIENT_INFO", ",", ")", "# Certain fields should be provided within the metadata header;", "# add these here.", "metadata", "=", "tuple", "(", "metadata", ")", "+", "(", "gapic_v1", ".", "routing_header", ".", "to_grpc_metadata", "(", "(", "(", "\"parent\"", ",", "request", ".", "parent", ")", ",", ")", ")", ",", ")", "# Send the request.", "response", "=", "await", "rpc", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ",", ")", "# This method is paged; wrap the response in a pager, which provides", "# an `__aiter__` convenience method.", "response", "=", "pagers", ".", "ListIntentsAsyncPager", "(", "method", "=", "rpc", ",", "request", "=", "request", ",", "response", "=", "response", ",", "metadata", "=", "metadata", ",", ")", "# Done; return the response.", "return", "response" ]
https://github.com/googleapis/python-dialogflow/blob/e48ea001b7c8a4a5c1fe4b162bad49ea397458e9/google/cloud/dialogflow_v2/services/intents/async_client.py#L170-L271
windelbouwman/ppci
915c069e0667042c085ec42c78e9e3c9a5295324
ppci/programs/ir_program.py
python
IrProgram.to_x86
(self, **options)
return self._new('x86', [ob])
Compile to X86 machine code. Status: ...
Compile to X86 machine code.
[ "Compile", "to", "X86", "machine", "code", "." ]
def to_x86(self, **options): """ Compile to X86 machine code. Status: ... """ if options.get('win', ''): arch = get_arch('x86_64:wincc') else: arch = get_arch('x86_64') # todo: don't we want to be able to pass debug_db here? ppci_modules = [m for m in self.items] if self.debugdb: ob = ir_to_object(ppci_modules, arch, debug=True) else: ob = ir_to_object(ppci_modules, arch, debug=False) return self._new('x86', [ob])
[ "def", "to_x86", "(", "self", ",", "*", "*", "options", ")", ":", "if", "options", ".", "get", "(", "'win'", ",", "''", ")", ":", "arch", "=", "get_arch", "(", "'x86_64:wincc'", ")", "else", ":", "arch", "=", "get_arch", "(", "'x86_64'", ")", "# todo: don't we want to be able to pass debug_db here?", "ppci_modules", "=", "[", "m", "for", "m", "in", "self", ".", "items", "]", "if", "self", ".", "debugdb", ":", "ob", "=", "ir_to_object", "(", "ppci_modules", ",", "arch", ",", "debug", "=", "True", ")", "else", ":", "ob", "=", "ir_to_object", "(", "ppci_modules", ",", "arch", ",", "debug", "=", "False", ")", "return", "self", ".", "_new", "(", "'x86'", ",", "[", "ob", "]", ")" ]
https://github.com/windelbouwman/ppci/blob/915c069e0667042c085ec42c78e9e3c9a5295324/ppci/programs/ir_program.py#L41-L59
francisck/DanderSpritz_docs
86bb7caca5a957147f120b18bb5c31f299914904
Python/Core/Lib/lib-tk/Tkinter.py
python
IntVar.__init__
(self, master=None, value=None, name=None)
Construct an integer variable. MASTER can be given as master widget. VALUE is an optional value (defaults to 0) NAME is an optional Tcl name (defaults to PY_VARnum). If NAME matches an existing variable and VALUE is omitted then the existing value is retained.
Construct an integer variable. MASTER can be given as master widget. VALUE is an optional value (defaults to 0) NAME is an optional Tcl name (defaults to PY_VARnum). If NAME matches an existing variable and VALUE is omitted then the existing value is retained.
[ "Construct", "an", "integer", "variable", ".", "MASTER", "can", "be", "given", "as", "master", "widget", ".", "VALUE", "is", "an", "optional", "value", "(", "defaults", "to", "0", ")", "NAME", "is", "an", "optional", "Tcl", "name", "(", "defaults", "to", "PY_VARnum", ")", ".", "If", "NAME", "matches", "an", "existing", "variable", "and", "VALUE", "is", "omitted", "then", "the", "existing", "value", "is", "retained", "." ]
def __init__(self, master=None, value=None, name=None): """Construct an integer variable. MASTER can be given as master widget. VALUE is an optional value (defaults to 0) NAME is an optional Tcl name (defaults to PY_VARnum). If NAME matches an existing variable and VALUE is omitted then the existing value is retained. """ Variable.__init__(self, master, value, name)
[ "def", "__init__", "(", "self", ",", "master", "=", "None", ",", "value", "=", "None", ",", "name", "=", "None", ")", ":", "Variable", ".", "__init__", "(", "self", ",", "master", ",", "value", ",", "name", ")" ]
https://github.com/francisck/DanderSpritz_docs/blob/86bb7caca5a957147f120b18bb5c31f299914904/Python/Core/Lib/lib-tk/Tkinter.py#L291-L301
kubernetes-client/python
47b9da9de2d02b2b7a34fbe05afb44afd130d73a
kubernetes/client/models/v1beta1_flow_schema_list.py
python
V1beta1FlowSchemaList.__repr__
(self)
return self.to_str()
For `print` and `pprint`
For `print` and `pprint`
[ "For", "print", "and", "pprint" ]
def __repr__(self): """For `print` and `pprint`""" return self.to_str()
[ "def", "__repr__", "(", "self", ")", ":", "return", "self", ".", "to_str", "(", ")" ]
https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/models/v1beta1_flow_schema_list.py#L189-L191
ysrc/xunfeng
40d40ecf55910019b8b904ef70ae1eebb6b6d26f
vulscan/vuldb/crack_supervisor_web.py
python
get_plugin_info
()
return plugin_info
[]
def get_plugin_info(): plugin_info = { "name": "Supervisor CVE-2017-11610", "info": "Supervisor 接口未授权访问、弱口令、代码执行漏洞", "level": "高危", "type": "弱口令", "author": "unknown", "url": "https://github.com/Medicean/VulApps/blob/master/s/supervisor/1/", "keyword": "port:9001", "source": 1 } return plugin_info
[ "def", "get_plugin_info", "(", ")", ":", "plugin_info", "=", "{", "\"name\"", ":", "\"Supervisor CVE-2017-11610\"", ",", "\"info\"", ":", "\"Supervisor 接口未授权访问、弱口令、代码执行漏洞\",", "", "\"level\"", ":", "\"高危\",", "", "\"type\"", ":", "\"弱口令\",", "", "\"author\"", ":", "\"unknown\"", ",", "\"url\"", ":", "\"https://github.com/Medicean/VulApps/blob/master/s/supervisor/1/\"", ",", "\"keyword\"", ":", "\"port:9001\"", ",", "\"source\"", ":", "1", "}", "return", "plugin_info" ]
https://github.com/ysrc/xunfeng/blob/40d40ecf55910019b8b904ef70ae1eebb6b6d26f/vulscan/vuldb/crack_supervisor_web.py#L8-L19
DEAP/deap
2f63dcf6aaa341b8fe5d66d99e9e003a21312fef
deap/benchmarks/gp.py
python
salustowicz_2d
(data)
return exp(-data[0]) * data[0]**3 * cos(data[0]) * sin(data[0]) * (cos(data[0]) * sin(data[0])**2 - 1) * (data[1] - 5)
Salustowicz benchmark function. .. list-table:: :widths: 10 50 :stub-columns: 1 * - Range - :math:`\mathbf{x} \in [0, 7]^2` * - Function - :math:`f(\mathbf{x}) = e^{-x_1} x_1^3 \cos(x_1) \sin(x_1) (\cos(x_1) \sin^2(x_1) - 1) (x_2 -5)`
Salustowicz benchmark function.
[ "Salustowicz", "benchmark", "function", "." ]
def salustowicz_2d(data): """Salustowicz benchmark function. .. list-table:: :widths: 10 50 :stub-columns: 1 * - Range - :math:`\mathbf{x} \in [0, 7]^2` * - Function - :math:`f(\mathbf{x}) = e^{-x_1} x_1^3 \cos(x_1) \sin(x_1) (\cos(x_1) \sin^2(x_1) - 1) (x_2 -5)` """ return exp(-data[0]) * data[0]**3 * cos(data[0]) * sin(data[0]) * (cos(data[0]) * sin(data[0])**2 - 1) * (data[1] - 5)
[ "def", "salustowicz_2d", "(", "data", ")", ":", "return", "exp", "(", "-", "data", "[", "0", "]", ")", "*", "data", "[", "0", "]", "**", "3", "*", "cos", "(", "data", "[", "0", "]", ")", "*", "sin", "(", "data", "[", "0", "]", ")", "*", "(", "cos", "(", "data", "[", "0", "]", ")", "*", "sin", "(", "data", "[", "0", "]", ")", "**", "2", "-", "1", ")", "*", "(", "data", "[", "1", "]", "-", "5", ")" ]
https://github.com/DEAP/deap/blob/2f63dcf6aaa341b8fe5d66d99e9e003a21312fef/deap/benchmarks/gp.py#L46-L58
rwth-i6/returnn
f2d718a197a280b0d5f0fd91a7fcb8658560dddb
returnn/import_/common.py
python
_normalize_pkg_name
(name)
return name
:param str name: :rtype: str
:param str name: :rtype: str
[ ":", "param", "str", "name", ":", ":", "rtype", ":", "str" ]
def _normalize_pkg_name(name): """ :param str name: :rtype: str """ name = name.replace(".", "_") name = name.replace("-", "_") return name
[ "def", "_normalize_pkg_name", "(", "name", ")", ":", "name", "=", "name", ".", "replace", "(", "\".\"", ",", "\"_\"", ")", "name", "=", "name", ".", "replace", "(", "\"-\"", ",", "\"_\"", ")", "return", "name" ]
https://github.com/rwth-i6/returnn/blob/f2d718a197a280b0d5f0fd91a7fcb8658560dddb/returnn/import_/common.py#L128-L135
rwth-i6/returnn
f2d718a197a280b0d5f0fd91a7fcb8658560dddb
returnn/util/basic.py
python
NativeCodeCompiler.__init__
(self, base_name, code_version, code, is_cpp=True, c_macro_defines=None, ld_flags=None, include_paths=(), include_deps=None, static_version_name=None, should_cleanup_old_all=True, should_cleanup_old_mydir=False, use_cxx11_abi=False, log_stream=None, verbose=False)
:param str base_name: base name for the module, e.g. "zero_out" :param int|tuple[int] code_version: check for the cache whether to reuse :param str code: the source code itself :param bool is_cpp: if False, C is assumed :param dict[str,str|int]|None c_macro_defines: e.g. {"TENSORFLOW": 1} :param list[str]|None ld_flags: e.g. ["-lblas"] :param list[str]|tuple[str] include_paths: :param list[str]|None include_deps: if provided and an existing lib file, we will check if any dependency is newer and we need to recompile. we could also do it automatically via -MD but that seems overkill and too slow. :param str|None static_version_name: normally, we use .../base_name/hash as the dir but this would use .../base_name/static_version_name. :param bool should_cleanup_old_all: whether we should look in the cache dir and check all ops if we can delete some old ones which are older than some limit (self._cleanup_time_limit_days) :param bool should_cleanup_old_mydir: whether we should delete our op dir before we compile there. :param typing.TextIO|None log_stream: file stream for print statements :param bool verbose: be slightly more verbose
:param str base_name: base name for the module, e.g. "zero_out" :param int|tuple[int] code_version: check for the cache whether to reuse :param str code: the source code itself :param bool is_cpp: if False, C is assumed :param dict[str,str|int]|None c_macro_defines: e.g. {"TENSORFLOW": 1} :param list[str]|None ld_flags: e.g. ["-lblas"] :param list[str]|tuple[str] include_paths: :param list[str]|None include_deps: if provided and an existing lib file, we will check if any dependency is newer and we need to recompile. we could also do it automatically via -MD but that seems overkill and too slow. :param str|None static_version_name: normally, we use .../base_name/hash as the dir but this would use .../base_name/static_version_name. :param bool should_cleanup_old_all: whether we should look in the cache dir and check all ops if we can delete some old ones which are older than some limit (self._cleanup_time_limit_days) :param bool should_cleanup_old_mydir: whether we should delete our op dir before we compile there. :param typing.TextIO|None log_stream: file stream for print statements :param bool verbose: be slightly more verbose
[ ":", "param", "str", "base_name", ":", "base", "name", "for", "the", "module", "e", ".", "g", ".", "zero_out", ":", "param", "int|tuple", "[", "int", "]", "code_version", ":", "check", "for", "the", "cache", "whether", "to", "reuse", ":", "param", "str", "code", ":", "the", "source", "code", "itself", ":", "param", "bool", "is_cpp", ":", "if", "False", "C", "is", "assumed", ":", "param", "dict", "[", "str", "str|int", "]", "|None", "c_macro_defines", ":", "e", ".", "g", ".", "{", "TENSORFLOW", ":", "1", "}", ":", "param", "list", "[", "str", "]", "|None", "ld_flags", ":", "e", ".", "g", ".", "[", "-", "lblas", "]", ":", "param", "list", "[", "str", "]", "|tuple", "[", "str", "]", "include_paths", ":", ":", "param", "list", "[", "str", "]", "|None", "include_deps", ":", "if", "provided", "and", "an", "existing", "lib", "file", "we", "will", "check", "if", "any", "dependency", "is", "newer", "and", "we", "need", "to", "recompile", ".", "we", "could", "also", "do", "it", "automatically", "via", "-", "MD", "but", "that", "seems", "overkill", "and", "too", "slow", ".", ":", "param", "str|None", "static_version_name", ":", "normally", "we", "use", "...", "/", "base_name", "/", "hash", "as", "the", "dir", "but", "this", "would", "use", "...", "/", "base_name", "/", "static_version_name", ".", ":", "param", "bool", "should_cleanup_old_all", ":", "whether", "we", "should", "look", "in", "the", "cache", "dir", "and", "check", "all", "ops", "if", "we", "can", "delete", "some", "old", "ones", "which", "are", "older", "than", "some", "limit", "(", "self", ".", "_cleanup_time_limit_days", ")", ":", "param", "bool", "should_cleanup_old_mydir", ":", "whether", "we", "should", "delete", "our", "op", "dir", "before", "we", "compile", "there", ".", ":", "param", "typing", ".", "TextIO|None", "log_stream", ":", "file", "stream", "for", "print", "statements", ":", "param", "bool", "verbose", ":", "be", "slightly", "more", "verbose" ]
def __init__(self, base_name, code_version, code, is_cpp=True, c_macro_defines=None, ld_flags=None, include_paths=(), include_deps=None, static_version_name=None, should_cleanup_old_all=True, should_cleanup_old_mydir=False, use_cxx11_abi=False, log_stream=None, verbose=False): """ :param str base_name: base name for the module, e.g. "zero_out" :param int|tuple[int] code_version: check for the cache whether to reuse :param str code: the source code itself :param bool is_cpp: if False, C is assumed :param dict[str,str|int]|None c_macro_defines: e.g. {"TENSORFLOW": 1} :param list[str]|None ld_flags: e.g. ["-lblas"] :param list[str]|tuple[str] include_paths: :param list[str]|None include_deps: if provided and an existing lib file, we will check if any dependency is newer and we need to recompile. we could also do it automatically via -MD but that seems overkill and too slow. :param str|None static_version_name: normally, we use .../base_name/hash as the dir but this would use .../base_name/static_version_name. :param bool should_cleanup_old_all: whether we should look in the cache dir and check all ops if we can delete some old ones which are older than some limit (self._cleanup_time_limit_days) :param bool should_cleanup_old_mydir: whether we should delete our op dir before we compile there. :param typing.TextIO|None log_stream: file stream for print statements :param bool verbose: be slightly more verbose """ if self.CollectedCompilers is not None: self.CollectedCompilers.append(self) self.verbose = verbose self.cache_dir = "%s/%s" % (get_cache_dir(), self.CacheDirName) self._include_paths = list(include_paths) self.base_name = base_name self.code_version = code_version self.code = code self.is_cpp = is_cpp self.c_macro_defines = c_macro_defines or {} self.ld_flags = ld_flags or [] self.include_deps = include_deps self.static_version_name = static_version_name self._code_hash = self._make_code_hash() self._info_dict = self._make_info_dict() self._hash = self._make_hash() self._ctypes_lib = None if should_cleanup_old_all: self._cleanup_old() self._should_cleanup_old_mydir = should_cleanup_old_mydir self.use_cxx11_abi = use_cxx11_abi self._log_stream = log_stream if self.verbose: print("%s: %r" % (self.__class__.__name__, self), file=log_stream)
[ "def", "__init__", "(", "self", ",", "base_name", ",", "code_version", ",", "code", ",", "is_cpp", "=", "True", ",", "c_macro_defines", "=", "None", ",", "ld_flags", "=", "None", ",", "include_paths", "=", "(", ")", ",", "include_deps", "=", "None", ",", "static_version_name", "=", "None", ",", "should_cleanup_old_all", "=", "True", ",", "should_cleanup_old_mydir", "=", "False", ",", "use_cxx11_abi", "=", "False", ",", "log_stream", "=", "None", ",", "verbose", "=", "False", ")", ":", "if", "self", ".", "CollectedCompilers", "is", "not", "None", ":", "self", ".", "CollectedCompilers", ".", "append", "(", "self", ")", "self", ".", "verbose", "=", "verbose", "self", ".", "cache_dir", "=", "\"%s/%s\"", "%", "(", "get_cache_dir", "(", ")", ",", "self", ".", "CacheDirName", ")", "self", ".", "_include_paths", "=", "list", "(", "include_paths", ")", "self", ".", "base_name", "=", "base_name", "self", ".", "code_version", "=", "code_version", "self", ".", "code", "=", "code", "self", ".", "is_cpp", "=", "is_cpp", "self", ".", "c_macro_defines", "=", "c_macro_defines", "or", "{", "}", "self", ".", "ld_flags", "=", "ld_flags", "or", "[", "]", "self", ".", "include_deps", "=", "include_deps", "self", ".", "static_version_name", "=", "static_version_name", "self", ".", "_code_hash", "=", "self", ".", "_make_code_hash", "(", ")", "self", ".", "_info_dict", "=", "self", ".", "_make_info_dict", "(", ")", "self", ".", "_hash", "=", "self", ".", "_make_hash", "(", ")", "self", ".", "_ctypes_lib", "=", "None", "if", "should_cleanup_old_all", ":", "self", ".", "_cleanup_old", "(", ")", "self", ".", "_should_cleanup_old_mydir", "=", "should_cleanup_old_mydir", "self", ".", "use_cxx11_abi", "=", "use_cxx11_abi", "self", ".", "_log_stream", "=", "log_stream", "if", "self", ".", "verbose", ":", "print", "(", "\"%s: %r\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "self", ")", ",", "file", "=", "log_stream", ")" ]
https://github.com/rwth-i6/returnn/blob/f2d718a197a280b0d5f0fd91a7fcb8658560dddb/returnn/util/basic.py#L3327-L3373
pymedusa/Medusa
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
ext/trakt/sync.py
python
Scrobbler.update
(self, progress)
Update the scobbling progress of this :class:`Scrobbler`'s *media* object
Update the scobbling progress of this :class:`Scrobbler`'s *media* object
[ "Update", "the", "scobbling", "progress", "of", "this", ":", "class", ":", "Scrobbler", "s", "*", "media", "*", "object" ]
def update(self, progress): """Update the scobbling progress of this :class:`Scrobbler`'s *media* object """ self.progress = progress self.start()
[ "def", "update", "(", "self", ",", "progress", ")", ":", "self", ".", "progress", "=", "progress", "self", ".", "start", "(", ")" ]
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/trakt/sync.py#L475-L480
EricSteinberger/PokerRL
e02ea667061b96912e424231da071b6f20a262f7
PokerRL/game/_/rl_env/base/PokerEnv.py
python
PokerEnv._get_step_reward
(self, is_terminal)
return [(p.stack - p.starting_stack_this_episode) / self.REWARD_SCALAR for p in self.seats]
[]
def _get_step_reward(self, is_terminal): if not is_terminal: return np.zeros(shape=self.N_SEATS, dtype=np.float32) return [(p.stack - p.starting_stack_this_episode) / self.REWARD_SCALAR for p in self.seats]
[ "def", "_get_step_reward", "(", "self", ",", "is_terminal", ")", ":", "if", "not", "is_terminal", ":", "return", "np", ".", "zeros", "(", "shape", "=", "self", ".", "N_SEATS", ",", "dtype", "=", "np", ".", "float32", ")", "return", "[", "(", "p", ".", "stack", "-", "p", ".", "starting_stack_this_episode", ")", "/", "self", ".", "REWARD_SCALAR", "for", "p", "in", "self", ".", "seats", "]" ]
https://github.com/EricSteinberger/PokerRL/blob/e02ea667061b96912e424231da071b6f20a262f7/PokerRL/game/_/rl_env/base/PokerEnv.py#L1069-L1072
MaslowCNC/GroundControl
294a05dea5b9753383e24b07ea47d78e76e49422
CalibrationWidgets/computeCalibrationSteps.py
python
ComputeCalibrationSteps.on_Enter
(self)
This function runs when the step is entered
This function runs when the step is entered
[ "This", "function", "runs", "when", "the", "step", "is", "entered" ]
def on_Enter(self): ''' This function runs when the step is entered ''' self.setupListOfSteps() Clock.schedule_once(self.loadNextStep, 5)
[ "def", "on_Enter", "(", "self", ")", ":", "self", ".", "setupListOfSteps", "(", ")", "Clock", ".", "schedule_once", "(", "self", ".", "loadNextStep", ",", "5", ")" ]
https://github.com/MaslowCNC/GroundControl/blob/294a05dea5b9753383e24b07ea47d78e76e49422/CalibrationWidgets/computeCalibrationSteps.py#L16-L24
replit-archive/empythoned
977ec10ced29a3541a4973dc2b59910805695752
dist/lib/python2.7/lib2to3/refactor.py
python
RefactoringTool.processed_file
(self, new_text, filename, old_text=None, write=False, encoding=None)
Called when a file has been refactored, and there are changes.
Called when a file has been refactored, and there are changes.
[ "Called", "when", "a", "file", "has", "been", "refactored", "and", "there", "are", "changes", "." ]
def processed_file(self, new_text, filename, old_text=None, write=False, encoding=None): """ Called when a file has been refactored, and there are changes. """ self.files.append(filename) if old_text is None: old_text = self._read_python_source(filename)[0] if old_text is None: return equal = old_text == new_text self.print_output(old_text, new_text, filename, equal) if equal: self.log_debug("No changes to %s", filename) return if write: self.write_file(new_text, filename, old_text, encoding) else: self.log_debug("Not writing changes to %s", filename)
[ "def", "processed_file", "(", "self", ",", "new_text", ",", "filename", ",", "old_text", "=", "None", ",", "write", "=", "False", ",", "encoding", "=", "None", ")", ":", "self", ".", "files", ".", "append", "(", "filename", ")", "if", "old_text", "is", "None", ":", "old_text", "=", "self", ".", "_read_python_source", "(", "filename", ")", "[", "0", "]", "if", "old_text", "is", "None", ":", "return", "equal", "=", "old_text", "==", "new_text", "self", ".", "print_output", "(", "old_text", ",", "new_text", ",", "filename", ",", "equal", ")", "if", "equal", ":", "self", ".", "log_debug", "(", "\"No changes to %s\"", ",", "filename", ")", "return", "if", "write", ":", "self", ".", "write_file", "(", "new_text", ",", "filename", ",", "old_text", ",", "encoding", ")", "else", ":", "self", ".", "log_debug", "(", "\"Not writing changes to %s\"", ",", "filename", ")" ]
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/dist/lib/python2.7/lib2to3/refactor.py#L502-L520
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/polys/factortools.py
python
dmp_zz_wang_lead_coeffs
(f, T, cs, E, H, A, u, K)
return f, HHH, CCC
Wang/EEZ: Compute correct leading coefficients.
Wang/EEZ: Compute correct leading coefficients.
[ "Wang", "/", "EEZ", ":", "Compute", "correct", "leading", "coefficients", "." ]
def dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K): """Wang/EEZ: Compute correct leading coefficients. """ C, J, v = [], [0]*len(E), u - 1 for h in H: c = dmp_one(v, K) d = dup_LC(h, K)*cs for i in reversed(xrange(len(E))): k, e, (t, _) = 0, E[i], T[i] while not (d % e): d, k = d//e, k + 1 if k != 0: c, J[i] = dmp_mul(c, dmp_pow(t, k, v, K), v, K), 1 C.append(c) if any(not j for j in J): raise ExtraneousFactors # pragma: no cover CC, HH = [], [] for c, h in zip(C, H): d = dmp_eval_tail(c, A, v, K) lc = dup_LC(h, K) if K.is_one(cs): cc = lc//d else: g = K.gcd(lc, d) d, cc = d//g, lc//g h, cs = dup_mul_ground(h, d, K), cs//d c = dmp_mul_ground(c, cc, v, K) CC.append(c) HH.append(h) if K.is_one(cs): return f, HH, CC CCC, HHH = [], [] for c, h in zip(CC, HH): CCC.append(dmp_mul_ground(c, cs, v, K)) HHH.append(dmp_mul_ground(h, cs, 0, K)) f = dmp_mul_ground(f, cs**(len(H) - 1), u, K) return f, HHH, CCC
[ "def", "dmp_zz_wang_lead_coeffs", "(", "f", ",", "T", ",", "cs", ",", "E", ",", "H", ",", "A", ",", "u", ",", "K", ")", ":", "C", ",", "J", ",", "v", "=", "[", "]", ",", "[", "0", "]", "*", "len", "(", "E", ")", ",", "u", "-", "1", "for", "h", "in", "H", ":", "c", "=", "dmp_one", "(", "v", ",", "K", ")", "d", "=", "dup_LC", "(", "h", ",", "K", ")", "*", "cs", "for", "i", "in", "reversed", "(", "xrange", "(", "len", "(", "E", ")", ")", ")", ":", "k", ",", "e", ",", "(", "t", ",", "_", ")", "=", "0", ",", "E", "[", "i", "]", ",", "T", "[", "i", "]", "while", "not", "(", "d", "%", "e", ")", ":", "d", ",", "k", "=", "d", "//", "e", ",", "k", "+", "1", "if", "k", "!=", "0", ":", "c", ",", "J", "[", "i", "]", "=", "dmp_mul", "(", "c", ",", "dmp_pow", "(", "t", ",", "k", ",", "v", ",", "K", ")", ",", "v", ",", "K", ")", ",", "1", "C", ".", "append", "(", "c", ")", "if", "any", "(", "not", "j", "for", "j", "in", "J", ")", ":", "raise", "ExtraneousFactors", "# pragma: no cover", "CC", ",", "HH", "=", "[", "]", ",", "[", "]", "for", "c", ",", "h", "in", "zip", "(", "C", ",", "H", ")", ":", "d", "=", "dmp_eval_tail", "(", "c", ",", "A", ",", "v", ",", "K", ")", "lc", "=", "dup_LC", "(", "h", ",", "K", ")", "if", "K", ".", "is_one", "(", "cs", ")", ":", "cc", "=", "lc", "//", "d", "else", ":", "g", "=", "K", ".", "gcd", "(", "lc", ",", "d", ")", "d", ",", "cc", "=", "d", "//", "g", ",", "lc", "//", "g", "h", ",", "cs", "=", "dup_mul_ground", "(", "h", ",", "d", ",", "K", ")", ",", "cs", "//", "d", "c", "=", "dmp_mul_ground", "(", "c", ",", "cc", ",", "v", ",", "K", ")", "CC", ".", "append", "(", "c", ")", "HH", ".", "append", "(", "h", ")", "if", "K", ".", "is_one", "(", "cs", ")", ":", "return", "f", ",", "HH", ",", "CC", "CCC", ",", "HHH", "=", "[", "]", ",", "[", "]", "for", "c", ",", "h", "in", "zip", "(", "CC", ",", "HH", ")", ":", "CCC", ".", "append", "(", "dmp_mul_ground", "(", "c", ",", "cs", ",", "v", ",", "K", ")", ")", "HHH", ".", "append", "(", "dmp_mul_ground", "(", "h", ",", "cs", ",", "0", ",", "K", ")", ")", "f", "=", "dmp_mul_ground", "(", "f", ",", "cs", "**", "(", "len", "(", "H", ")", "-", "1", ")", ",", "u", ",", "K", ")", "return", "f", ",", "HHH", ",", "CCC" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/polys/factortools.py#L672-L723
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/code.py
python
InteractiveInterpreter.write
(self, data)
Write a string. The base implementation writes to sys.stderr; a subclass may replace this with a different implementation.
Write a string.
[ "Write", "a", "string", "." ]
def write(self, data): """Write a string. The base implementation writes to sys.stderr; a subclass may replace this with a different implementation. """ sys.stderr.write(data)
[ "def", "write", "(", "self", ",", "data", ")", ":", "sys", ".", "stderr", ".", "write", "(", "data", ")" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/code.py#L164-L171
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/ply-3.11/example/ansic/cparse.py
python
p_equality_expression_2
(t)
equality_expression : equality_expression EQ relational_expression
equality_expression : equality_expression EQ relational_expression
[ "equality_expression", ":", "equality_expression", "EQ", "relational_expression" ]
def p_equality_expression_2(t): 'equality_expression : equality_expression EQ relational_expression' pass
[ "def", "p_equality_expression_2", "(", "t", ")", ":", "pass" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/ply-3.11/example/ansic/cparse.py#L817-L819
MushroomRL/mushroom-rl
a0eaa2cf8001e433419234a9fc48b64170e3f61c
mushroom_rl/approximators/parametric/cmac.py
python
CMAC.diff
(self, state, action=None)
return super().diff(phi, action)
Compute the derivative of the output w.r.t. ``state``, and ``action`` if provided. Args: state (np.ndarray): the state; action (np.ndarray, None): the action. Returns: The derivative of the output w.r.t. ``state``, and ``action`` if provided.
Compute the derivative of the output w.r.t. ``state``, and ``action`` if provided.
[ "Compute", "the", "derivative", "of", "the", "output", "w", ".", "r", ".", "t", ".", "state", "and", "action", "if", "provided", "." ]
def diff(self, state, action=None): """ Compute the derivative of the output w.r.t. ``state``, and ``action`` if provided. Args: state (np.ndarray): the state; action (np.ndarray, None): the action. Returns: The derivative of the output w.r.t. ``state``, and ``action`` if provided. """ phi = self._phi(state) return super().diff(phi, action)
[ "def", "diff", "(", "self", ",", "state", ",", "action", "=", "None", ")", ":", "phi", "=", "self", ".", "_phi", "(", "state", ")", "return", "super", "(", ")", ".", "diff", "(", "phi", ",", "action", ")" ]
https://github.com/MushroomRL/mushroom-rl/blob/a0eaa2cf8001e433419234a9fc48b64170e3f61c/mushroom_rl/approximators/parametric/cmac.py#L85-L101
google/grr
8ad8a4d2c5a93c92729206b7771af19d92d4f915
grr/core/grr_response_core/lib/casing.py
python
SnakeToCamel
(snake_str: str)
return words[0] + "".join(map(str.capitalize, words[1:]))
Convert a snake_case string representing one identifier to lowerCamelCase. The function uses a best-effort approach to convert the given string to a valid lowerCamelCase string, meaning that it converts even strings that use multiple consecutive underscores between words and/or that start/end with an underscore. Args: snake_str: A snake_case string representing a single identifier. Returns: A lowerCamelCase representation of the given string.
Convert a snake_case string representing one identifier to lowerCamelCase.
[ "Convert", "a", "snake_case", "string", "representing", "one", "identifier", "to", "lowerCamelCase", "." ]
def SnakeToCamel(snake_str: str) -> str: """Convert a snake_case string representing one identifier to lowerCamelCase. The function uses a best-effort approach to convert the given string to a valid lowerCamelCase string, meaning that it converts even strings that use multiple consecutive underscores between words and/or that start/end with an underscore. Args: snake_str: A snake_case string representing a single identifier. Returns: A lowerCamelCase representation of the given string. """ # Extract the words from the snake_case string. words = [word for word in snake_str.split("_") if word] if not words: return "" words[:] = map(str.lower, words) return words[0] + "".join(map(str.capitalize, words[1:]))
[ "def", "SnakeToCamel", "(", "snake_str", ":", "str", ")", "->", "str", ":", "# Extract the words from the snake_case string.", "words", "=", "[", "word", "for", "word", "in", "snake_str", ".", "split", "(", "\"_\"", ")", "if", "word", "]", "if", "not", "words", ":", "return", "\"\"", "words", "[", ":", "]", "=", "map", "(", "str", ".", "lower", ",", "words", ")", "return", "words", "[", "0", "]", "+", "\"\"", ".", "join", "(", "map", "(", "str", ".", "capitalize", ",", "words", "[", "1", ":", "]", ")", ")" ]
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/core/grr_response_core/lib/casing.py#L6-L27
microsoft/unilm
65f15af2a307ebb64cfb25adf54375b002e6fe8d
xtune/src/transformers/modeling_tf_openai.py
python
TFOpenAIGPTMainLayer._prune_heads
(self, heads_to_prune)
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
[ "Prunes", "heads", "of", "the", "model", ".", "heads_to_prune", ":", "dict", "of", "{", "layer_num", ":", "list", "of", "heads", "to", "prune", "in", "this", "layer", "}" ]
def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError
[ "def", "_prune_heads", "(", "self", ",", "heads_to_prune", ")", ":", "raise", "NotImplementedError" ]
https://github.com/microsoft/unilm/blob/65f15af2a307ebb64cfb25adf54375b002e6fe8d/xtune/src/transformers/modeling_tf_openai.py#L227-L231
Mailu/Mailu
1e53530164e9eaf77a89c322e34bff447ace5a28
core/admin/migrations/env.py
python
run_migrations_online
()
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
Run migrations in 'online' mode.
[ "Run", "migrations", "in", "online", "mode", "." ]
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ # this callback is used to prevent an auto-migration from being generated # when there are no changes to the schema # reference: http://alembic.readthedocs.org/en/latest/cookbook.html def process_revision_directives(context, revision, directives): if getattr(config.cmd_opts, 'autogenerate', False): script = directives[0] if script.upgrade_ops.is_empty(): directives[:] = [] logger.info('No changes in schema detected.') engine = engine_from_config( config.get_section(config.config_ini_section), prefix = 'sqlalchemy.', poolclass = pool.NullPool ) @tenacity.retry( stop = tenacity.stop_after_attempt(100), wait = tenacity.wait_random(min=2, max=5), before = tenacity.before_log(logging.getLogger('tenacity.retry'), logging.DEBUG), before_sleep = tenacity.before_sleep_log(logging.getLogger('tenacity.retry'), logging.INFO), after = tenacity.after_log(logging.getLogger('tenacity.retry'), logging.DEBUG) ) def try_connect(db): return db.connect() with try_connect(engine) as connection: context.configure( connection = connection, target_metadata = target_metadata, process_revision_directives = process_revision_directives, **current_app.extensions['migrate'].configure_args ) with context.begin_transaction(): context.run_migrations() connection.close()
[ "def", "run_migrations_online", "(", ")", ":", "# this callback is used to prevent an auto-migration from being generated", "# when there are no changes to the schema", "# reference: http://alembic.readthedocs.org/en/latest/cookbook.html", "def", "process_revision_directives", "(", "context", ",", "revision", ",", "directives", ")", ":", "if", "getattr", "(", "config", ".", "cmd_opts", ",", "'autogenerate'", ",", "False", ")", ":", "script", "=", "directives", "[", "0", "]", "if", "script", ".", "upgrade_ops", ".", "is_empty", "(", ")", ":", "directives", "[", ":", "]", "=", "[", "]", "logger", ".", "info", "(", "'No changes in schema detected.'", ")", "engine", "=", "engine_from_config", "(", "config", ".", "get_section", "(", "config", ".", "config_ini_section", ")", ",", "prefix", "=", "'sqlalchemy.'", ",", "poolclass", "=", "pool", ".", "NullPool", ")", "@", "tenacity", ".", "retry", "(", "stop", "=", "tenacity", ".", "stop_after_attempt", "(", "100", ")", ",", "wait", "=", "tenacity", ".", "wait_random", "(", "min", "=", "2", ",", "max", "=", "5", ")", ",", "before", "=", "tenacity", ".", "before_log", "(", "logging", ".", "getLogger", "(", "'tenacity.retry'", ")", ",", "logging", ".", "DEBUG", ")", ",", "before_sleep", "=", "tenacity", ".", "before_sleep_log", "(", "logging", ".", "getLogger", "(", "'tenacity.retry'", ")", ",", "logging", ".", "INFO", ")", ",", "after", "=", "tenacity", ".", "after_log", "(", "logging", ".", "getLogger", "(", "'tenacity.retry'", ")", ",", "logging", ".", "DEBUG", ")", ")", "def", "try_connect", "(", "db", ")", ":", "return", "db", ".", "connect", "(", ")", "with", "try_connect", "(", "engine", ")", "as", "connection", ":", "context", ".", "configure", "(", "connection", "=", "connection", ",", "target_metadata", "=", "target_metadata", ",", "process_revision_directives", "=", "process_revision_directives", ",", "*", "*", "current_app", ".", "extensions", "[", "'migrate'", "]", ".", "configure_args", ")", "with", "context", ".", "begin_transaction", "(", ")", ":", "context", ".", "run_migrations", "(", ")", "connection", ".", "close", "(", ")" ]
https://github.com/Mailu/Mailu/blob/1e53530164e9eaf77a89c322e34bff447ace5a28/core/admin/migrations/env.py#L48-L94
rhinstaller/anaconda
63edc8680f1b05cbfe11bef28703acba808c5174
pyanaconda/modules/payloads/source/harddrive/initialization.py
python
SetUpHardDriveSourceTask.run
(self)
Run Hard drive installation source setup. Always sets up two mount points: First for the device, and second for the ISO image or a bind for unpacked ISO. These depend on each other, and must be destroyed in the correct order again. :raise: SourceSetupError :return: named tuple with path to the install tree and name of ISO if set or empty string :rtype: SetupHardDriveResult instance
Run Hard drive installation source setup.
[ "Run", "Hard", "drive", "installation", "source", "setup", "." ]
def run(self): """Run Hard drive installation source setup. Always sets up two mount points: First for the device, and second for the ISO image or a bind for unpacked ISO. These depend on each other, and must be destroyed in the correct order again. :raise: SourceSetupError :return: named tuple with path to the install tree and name of ISO if set or empty string :rtype: SetupHardDriveResult instance """ log.debug("Setting up Hard drive source") for mount_point in [self._device_mount, self._iso_mount]: if os.path.ismount(mount_point): raise SourceSetupError("The mount point {} is already in use.".format( mount_point )) if not find_and_mount_device(self._partition, self._device_mount): raise SourceSetupError( "Could not mount device specified as {}".format(self._partition) ) full_path_on_mounted_device = os.path.normpath( "{}/{}".format(self._device_mount, self._directory) ) iso_name = find_and_mount_iso_image(full_path_on_mounted_device, self._iso_mount) if iso_name: log.debug("Using the ISO '%s' mounted at '%s'.", iso_name, self._iso_mount) return SetupHardDriveResult(self._iso_mount, iso_name) if verify_valid_repository(full_path_on_mounted_device): log.debug("Using the directory at '%s'.", full_path_on_mounted_device) return SetupHardDriveResult(full_path_on_mounted_device, "") # nothing found unmount the existing device unmount(self._device_mount) raise SourceSetupError( "Nothing useful found for Hard drive ISO source at partition={} directory={}".format( self._partition, self._directory))
[ "def", "run", "(", "self", ")", ":", "log", ".", "debug", "(", "\"Setting up Hard drive source\"", ")", "for", "mount_point", "in", "[", "self", ".", "_device_mount", ",", "self", ".", "_iso_mount", "]", ":", "if", "os", ".", "path", ".", "ismount", "(", "mount_point", ")", ":", "raise", "SourceSetupError", "(", "\"The mount point {} is already in use.\"", ".", "format", "(", "mount_point", ")", ")", "if", "not", "find_and_mount_device", "(", "self", ".", "_partition", ",", "self", ".", "_device_mount", ")", ":", "raise", "SourceSetupError", "(", "\"Could not mount device specified as {}\"", ".", "format", "(", "self", ".", "_partition", ")", ")", "full_path_on_mounted_device", "=", "os", ".", "path", ".", "normpath", "(", "\"{}/{}\"", ".", "format", "(", "self", ".", "_device_mount", ",", "self", ".", "_directory", ")", ")", "iso_name", "=", "find_and_mount_iso_image", "(", "full_path_on_mounted_device", ",", "self", ".", "_iso_mount", ")", "if", "iso_name", ":", "log", ".", "debug", "(", "\"Using the ISO '%s' mounted at '%s'.\"", ",", "iso_name", ",", "self", ".", "_iso_mount", ")", "return", "SetupHardDriveResult", "(", "self", ".", "_iso_mount", ",", "iso_name", ")", "if", "verify_valid_repository", "(", "full_path_on_mounted_device", ")", ":", "log", ".", "debug", "(", "\"Using the directory at '%s'.\"", ",", "full_path_on_mounted_device", ")", "return", "SetupHardDriveResult", "(", "full_path_on_mounted_device", ",", "\"\"", ")", "# nothing found unmount the existing device", "unmount", "(", "self", ".", "_device_mount", ")", "raise", "SourceSetupError", "(", "\"Nothing useful found for Hard drive ISO source at partition={} directory={}\"", ".", "format", "(", "self", ".", "_partition", ",", "self", ".", "_directory", ")", ")" ]
https://github.com/rhinstaller/anaconda/blob/63edc8680f1b05cbfe11bef28703acba808c5174/pyanaconda/modules/payloads/source/harddrive/initialization.py#L51-L93
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/full/http/cookiejar.py
python
Cookie.__str__
(self)
return "<Cookie %s for %s>" % (namevalue, limit)
[]
def __str__(self): if self.port is None: p = "" else: p = ":"+self.port limit = self.domain + p + self.path if self.value is not None: namevalue = "%s=%s" % (self.name, self.value) else: namevalue = self.name return "<Cookie %s for %s>" % (namevalue, limit)
[ "def", "__str__", "(", "self", ")", ":", "if", "self", ".", "port", "is", "None", ":", "p", "=", "\"\"", "else", ":", "p", "=", "\":\"", "+", "self", ".", "port", "limit", "=", "self", ".", "domain", "+", "p", "+", "self", ".", "path", "if", "self", ".", "value", "is", "not", "None", ":", "namevalue", "=", "\"%s=%s\"", "%", "(", "self", ".", "name", ",", "self", ".", "value", ")", "else", ":", "namevalue", "=", "self", ".", "name", "return", "\"<Cookie %s for %s>\"", "%", "(", "namevalue", ",", "limit", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/http/cookiejar.py#L817-L825
SteveDoyle2/pyNastran
eda651ac2d4883d95a34951f8a002ff94f642a1a
pyNastran/bdf/cards/material_deps.py
python
MATT2.uncross_reference
(self)
Removes cross-reference links
Removes cross-reference links
[ "Removes", "cross", "-", "reference", "links" ]
def uncross_reference(self) -> None: """Removes cross-reference links""" self.mid = self.Mid() self.g11_table = self.G11_table() self.g12_table = self.G12_table() self.g13_table = self.G13_table() self.g22_table = self.G22_table() self.g23_table = self.G23_table() self.g33_table = self.G33_table() self.rho_table = self.Rho_table() self.a1_table = self.A1_table() self.a2_table = self.A2_table() self.a3_table = self.A3_table() self.ge_table = self.Ge_table() self.st_table = self.St_table() self.sc_table = self.Sc_table() self.ss_table = self.Ss_table() self.mid_ref = None
[ "def", "uncross_reference", "(", "self", ")", "->", "None", ":", "self", ".", "mid", "=", "self", ".", "Mid", "(", ")", "self", ".", "g11_table", "=", "self", ".", "G11_table", "(", ")", "self", ".", "g12_table", "=", "self", ".", "G12_table", "(", ")", "self", ".", "g13_table", "=", "self", ".", "G13_table", "(", ")", "self", ".", "g22_table", "=", "self", ".", "G22_table", "(", ")", "self", ".", "g23_table", "=", "self", ".", "G23_table", "(", ")", "self", ".", "g33_table", "=", "self", ".", "G33_table", "(", ")", "self", ".", "rho_table", "=", "self", ".", "Rho_table", "(", ")", "self", ".", "a1_table", "=", "self", ".", "A1_table", "(", ")", "self", ".", "a2_table", "=", "self", ".", "A2_table", "(", ")", "self", ".", "a3_table", "=", "self", ".", "A3_table", "(", ")", "self", ".", "ge_table", "=", "self", ".", "Ge_table", "(", ")", "self", ".", "st_table", "=", "self", ".", "St_table", "(", ")", "self", ".", "sc_table", "=", "self", ".", "Sc_table", "(", ")", "self", ".", "ss_table", "=", "self", ".", "Ss_table", "(", ")", "self", ".", "mid_ref", "=", "None" ]
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/bdf/cards/material_deps.py#L697-L714
Azure/azure-cli
6c1b085a0910c6c2139006fcbd8ade44006eb6dd
src/azure-cli/azure/cli/command_modules/apim/_format.py
python
_get_value_as_str
(item, *args)
Get a nested value from a dict. :param dict item: The dict object
Get a nested value from a dict. :param dict item: The dict object
[ "Get", "a", "nested", "value", "from", "a", "dict", ".", ":", "param", "dict", "item", ":", "The", "dict", "object" ]
def _get_value_as_str(item, *args): """Get a nested value from a dict. :param dict item: The dict object """ try: for arg in args: item = item[arg] return str(item) if item else ' ' except (KeyError, TypeError, IndexError): return ' '
[ "def", "_get_value_as_str", "(", "item", ",", "*", "args", ")", ":", "try", ":", "for", "arg", "in", "args", ":", "item", "=", "item", "[", "arg", "]", "return", "str", "(", "item", ")", "if", "item", "else", "' '", "except", "(", "KeyError", ",", "TypeError", ",", "IndexError", ")", ":", "return", "' '" ]
https://github.com/Azure/azure-cli/blob/6c1b085a0910c6c2139006fcbd8ade44006eb6dd/src/azure-cli/azure/cli/command_modules/apim/_format.py#L43-L52
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/docutils/writers/__init__.py
python
Writer.write
(self, document, destination)
return output
Process a document into its final form. Translate `document` (a Docutils document tree) into the Writer's native format, and write it out to its `destination` (a `docutils.io.Output` subclass object). Normally not overridden or extended in subclasses.
Process a document into its final form.
[ "Process", "a", "document", "into", "its", "final", "form", "." ]
def write(self, document, destination): """ Process a document into its final form. Translate `document` (a Docutils document tree) into the Writer's native format, and write it out to its `destination` (a `docutils.io.Output` subclass object). Normally not overridden or extended in subclasses. """ self.document = document self.language = languages.get_language( document.settings.language_code, document.reporter) self.destination = destination self.translate() output = self.destination.write(self.output) return output
[ "def", "write", "(", "self", ",", "document", ",", "destination", ")", ":", "self", ".", "document", "=", "document", "self", ".", "language", "=", "languages", ".", "get_language", "(", "document", ".", "settings", ".", "language_code", ",", "document", ".", "reporter", ")", "self", ".", "destination", "=", "destination", "self", ".", "translate", "(", ")", "output", "=", "self", ".", "destination", ".", "write", "(", "self", ".", "output", ")", "return", "output" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/docutils/writers/__init__.py#L65-L82
ceph/teuthology
6fc2011361437a9dfe4e45b50de224392eed8abc
teuthology/report.py
python
ResultsReporter.get_run
(self, run_name, fields=None)
return response.json()
Query the results server for a run :param run_name: The name of the run :param fields: Optional. A list of fields to include in the result. Defaults to returning all fields.
Query the results server for a run
[ "Query", "the", "results", "server", "for", "a", "run" ]
def get_run(self, run_name, fields=None): """ Query the results server for a run :param run_name: The name of the run :param fields: Optional. A list of fields to include in the result. Defaults to returning all fields. """ uri = "{base}/runs/{name}".format(base=self.base_uri, name=run_name) if fields: uri += "?fields=" + ','.join(fields) response = self.session.get(uri) response.raise_for_status() return response.json()
[ "def", "get_run", "(", "self", ",", "run_name", ",", "fields", "=", "None", ")", ":", "uri", "=", "\"{base}/runs/{name}\"", ".", "format", "(", "base", "=", "self", ".", "base_uri", ",", "name", "=", "run_name", ")", "if", "fields", ":", "uri", "+=", "\"?fields=\"", "+", "','", ".", "join", "(", "fields", ")", "response", "=", "self", ".", "session", ".", "get", "(", "uri", ")", "response", ".", "raise_for_status", "(", ")", "return", "response", ".", "json", "(", ")" ]
https://github.com/ceph/teuthology/blob/6fc2011361437a9dfe4e45b50de224392eed8abc/teuthology/report.py#L376-L389
hakril/PythonForWindows
61e027a678d5b87aa64fcf8a37a6661a86236589
windows/winobject/bits.py
python
BitsCopyCallbackSetEvent.JobError
(self, this, job, error)
return True
[]
def JobError(self, this, job, error): job = BitsCopyJob(job) error = BitsCopyError(error) errcode, errctx = error.error print("Copy failed with error code <{0:#x}> (ctx={1})".format(errcode, errctx)) print("see <https://msdn.microsoft.com/en-us/library/windows/desktop/aa362823(v=vs.85).aspx>") self.event.set() return True
[ "def", "JobError", "(", "self", ",", "this", ",", "job", ",", "error", ")", ":", "job", "=", "BitsCopyJob", "(", "job", ")", "error", "=", "BitsCopyError", "(", "error", ")", "errcode", ",", "errctx", "=", "error", ".", "error", "print", "(", "\"Copy failed with error code <{0:#x}> (ctx={1})\"", ".", "format", "(", "errcode", ",", "errctx", ")", ")", "print", "(", "\"see <https://msdn.microsoft.com/en-us/library/windows/desktop/aa362823(v=vs.85).aspx>\"", ")", "self", ".", "event", ".", "set", "(", ")", "return", "True" ]
https://github.com/hakril/PythonForWindows/blob/61e027a678d5b87aa64fcf8a37a6661a86236589/windows/winobject/bits.py#L48-L55
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/twisted/twisted/news/nntp.py
python
NNTPClient.fetchGroup
(self, group)
Get group information for the specified group from the server. gotGroup() is called on success, getGroupFailed() on failure.
Get group information for the specified group from the server. gotGroup() is called on success, getGroupFailed() on failure.
[ "Get", "group", "information", "for", "the", "specified", "group", "from", "the", "server", ".", "gotGroup", "()", "is", "called", "on", "success", "getGroupFailed", "()", "on", "failure", "." ]
def fetchGroup(self, group): """ Get group information for the specified group from the server. gotGroup() is called on success, getGroupFailed() on failure. """ self.sendLine('GROUP %s' % (group,)) self._newState(None, self.getGroupFailed, self._headerGroup)
[ "def", "fetchGroup", "(", "self", ",", "group", ")", ":", "self", ".", "sendLine", "(", "'GROUP %s'", "%", "(", "group", ",", ")", ")", "self", ".", "_newState", "(", "None", ",", "self", ".", "getGroupFailed", ",", "self", ".", "_headerGroup", ")" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/news/nntp.py#L226-L232
pyscf/pyscf
0adfb464333f5ceee07b664f291d4084801bae64
pyscf/scf/hf.py
python
get_ovlp
(mol)
return mol.intor_symmetric('int1e_ovlp')
Overlap matrix
Overlap matrix
[ "Overlap", "matrix" ]
def get_ovlp(mol): '''Overlap matrix ''' return mol.intor_symmetric('int1e_ovlp')
[ "def", "get_ovlp", "(", "mol", ")", ":", "return", "mol", ".", "intor_symmetric", "(", "'int1e_ovlp'", ")" ]
https://github.com/pyscf/pyscf/blob/0adfb464333f5ceee07b664f291d4084801bae64/pyscf/scf/hf.py#L327-L330
sahana/eden
1696fa50e90ce967df69f66b571af45356cc18da
modules/s3/codecs/pdf.py
python
S3html2pdf.parse
(self, html)
return result
Entry point for class
Entry point for class
[ "Entry", "point", "for", "class" ]
def parse(self, html): """ Entry point for class """ result = self.select_tag(html) return result
[ "def", "parse", "(", "self", ",", "html", ")", ":", "result", "=", "self", ".", "select_tag", "(", "html", ")", "return", "result" ]
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/s3/codecs/pdf.py#L1834-L1840
bugcrowd/HUNT
ed3e1adee724bf6c98750f377f6c40cd656c82d3
Burp/lib/scanner_table_models.py
python
ScannerTableModels.set_scanner_table_model
(self, scanner_issue, issue_name, issue_param, vuln_param)
[]
def set_scanner_table_model(self, scanner_issue, issue_name, issue_param, vuln_param): key = issue_name + "." + vuln_param scanner_issue_id = str(scanner_issue.getRequestResponse()).split("@")[1] scanner_table_model = self.scanner_table_models[key] # Using the addRow() method requires that the data type being passed to be of type # Vector() or Object(). Passing a Python object of type list in addRow causes a type # conversion error of sorts which presents as an ArrayOutOfBoundsException. Therefore, # row is an instantiation of Object() to avoid this error. row = Object() row = [False, issue_param, scanner_issue.getHttpService().getHost(), scanner_issue.getPath(), scanner_issue_id] try: scanner_table_model.addRow(row) except Exception as e: print(e)
[ "def", "set_scanner_table_model", "(", "self", ",", "scanner_issue", ",", "issue_name", ",", "issue_param", ",", "vuln_param", ")", ":", "key", "=", "issue_name", "+", "\".\"", "+", "vuln_param", "scanner_issue_id", "=", "str", "(", "scanner_issue", ".", "getRequestResponse", "(", ")", ")", ".", "split", "(", "\"@\"", ")", "[", "1", "]", "scanner_table_model", "=", "self", ".", "scanner_table_models", "[", "key", "]", "# Using the addRow() method requires that the data type being passed to be of type", "# Vector() or Object(). Passing a Python object of type list in addRow causes a type", "# conversion error of sorts which presents as an ArrayOutOfBoundsException. Therefore,", "# row is an instantiation of Object() to avoid this error.", "row", "=", "Object", "(", ")", "row", "=", "[", "False", ",", "issue_param", ",", "scanner_issue", ".", "getHttpService", "(", ")", ".", "getHost", "(", ")", ",", "scanner_issue", ".", "getPath", "(", ")", ",", "scanner_issue_id", "]", "try", ":", "scanner_table_model", ".", "addRow", "(", "row", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")" ]
https://github.com/bugcrowd/HUNT/blob/ed3e1adee724bf6c98750f377f6c40cd656c82d3/Burp/lib/scanner_table_models.py#L26-L41
pinax/django-user-accounts
e83effdd4a23cd8d830169904c261ff6677ee3e6
account/auth_backends.py
python
EmailAuthenticationBackend.authenticate
(self, request, username=None, password=None, **kwargs)
Authenticate the user based email
Authenticate the user based email
[ "Authenticate", "the", "user", "based", "email" ]
def authenticate(self, request, username=None, password=None, **kwargs): """Authenticate the user based email""" qs = EmailAddress.objects.filter(Q(primary=True) | Q(verified=True)) if username is None or password is None: return None try: email_address = qs.get(email__iexact=username) except EmailAddress.DoesNotExist: return None user = email_address.user if user.check_password(password): return user
[ "def", "authenticate", "(", "self", ",", "request", ",", "username", "=", "None", ",", "password", "=", "None", ",", "*", "*", "kwargs", ")", ":", "qs", "=", "EmailAddress", ".", "objects", ".", "filter", "(", "Q", "(", "primary", "=", "True", ")", "|", "Q", "(", "verified", "=", "True", ")", ")", "if", "username", "is", "None", "or", "password", "is", "None", ":", "return", "None", "try", ":", "email_address", "=", "qs", ".", "get", "(", "email__iexact", "=", "username", ")", "except", "EmailAddress", ".", "DoesNotExist", ":", "return", "None", "user", "=", "email_address", ".", "user", "if", "user", ".", "check_password", "(", "password", ")", ":", "return", "user" ]
https://github.com/pinax/django-user-accounts/blob/e83effdd4a23cd8d830169904c261ff6677ee3e6/account/auth_backends.py#L48-L62
wzzheng/HDML
fa03ef0eb0be2256df7aab928bcc596c41d44fb3
lib/nn_Ops.py
python
weight_variable
(shape, name, wd=True)
return weight
A function to create weight variables :param shape: The shape of weight :param name: The name of the weight :param wd: Whether or not this variable should be weight decade :return: A weight-variable
A function to create weight variables :param shape: The shape of weight :param name: The name of the weight :param wd: Whether or not this variable should be weight decade :return: A weight-variable
[ "A", "function", "to", "create", "weight", "variables", ":", "param", "shape", ":", "The", "shape", "of", "weight", ":", "param", "name", ":", "The", "name", "of", "the", "weight", ":", "param", "wd", ":", "Whether", "or", "not", "this", "variable", "should", "be", "weight", "decade", ":", "return", ":", "A", "weight", "-", "variable" ]
def weight_variable(shape, name, wd=True): """ A function to create weight variables :param shape: The shape of weight :param name: The name of the weight :param wd: Whether or not this variable should be weight decade :return: A weight-variable """ initializer = tf.glorot_uniform_initializer()#tf.contrib.layers.xavier_initializer() # tf.truncated_normal_initializer(stddev=0.1) if wd: weight = tf.get_variable(name='weight'+name, shape=shape, initializer=initializer, collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.GLOBAL_VARIABLES]) else: weight = tf.get_variable(name='weight' + name, shape=shape, initializer=initializer, collections=[tf.GraphKeys.GLOBAL_VARIABLES]) return weight
[ "def", "weight_variable", "(", "shape", ",", "name", ",", "wd", "=", "True", ")", ":", "initializer", "=", "tf", ".", "glorot_uniform_initializer", "(", ")", "#tf.contrib.layers.xavier_initializer() # tf.truncated_normal_initializer(stddev=0.1)", "if", "wd", ":", "weight", "=", "tf", ".", "get_variable", "(", "name", "=", "'weight'", "+", "name", ",", "shape", "=", "shape", ",", "initializer", "=", "initializer", ",", "collections", "=", "[", "tf", ".", "GraphKeys", ".", "WEIGHTS", ",", "tf", ".", "GraphKeys", ".", "GLOBAL_VARIABLES", "]", ")", "else", ":", "weight", "=", "tf", ".", "get_variable", "(", "name", "=", "'weight'", "+", "name", ",", "shape", "=", "shape", ",", "initializer", "=", "initializer", ",", "collections", "=", "[", "tf", ".", "GraphKeys", ".", "GLOBAL_VARIABLES", "]", ")", "return", "weight" ]
https://github.com/wzzheng/HDML/blob/fa03ef0eb0be2256df7aab928bcc596c41d44fb3/lib/nn_Ops.py#L10-L28
ialbert/biostar-central
2dc7bd30691a50b2da9c2833ba354056bc686afa
biostar/forum/markdown.py
python
BiostarInlineLexer.output_anchor_link
(self, m)
return f'<a href="{link}">{title}</a>'
[]
def output_anchor_link(self, m): uid = m.group("uid") link = m.group(0) post = Post.objects.filter(uid=uid).first() title = post.root.title if post else "Post not found" return f'<a href="{link}">{title}</a>'
[ "def", "output_anchor_link", "(", "self", ",", "m", ")", ":", "uid", "=", "m", ".", "group", "(", "\"uid\"", ")", "link", "=", "m", ".", "group", "(", "0", ")", "post", "=", "Post", ".", "objects", ".", "filter", "(", "uid", "=", "uid", ")", ".", "first", "(", ")", "title", "=", "post", ".", "root", ".", "title", "if", "post", "else", "\"Post not found\"", "return", "f'<a href=\"{link}\">{title}</a>'" ]
https://github.com/ialbert/biostar-central/blob/2dc7bd30691a50b2da9c2833ba354056bc686afa/biostar/forum/markdown.py#L269-L274
openstack/trove
be86b79119d16ee77f596172f43b0c97cb2617bd
trove/guestagent/datastore/postgres/query.py
python
UserQuery.update_password
(cls, name, password, encrypt_password=None)
return cls.alter_user(name, password, encrypt_password)
Query to update the password for a user.
Query to update the password for a user.
[ "Query", "to", "update", "the", "password", "for", "a", "user", "." ]
def update_password(cls, name, password, encrypt_password=None): """Query to update the password for a user.""" return cls.alter_user(name, password, encrypt_password)
[ "def", "update_password", "(", "cls", ",", "name", ",", "password", ",", "encrypt_password", "=", "None", ")", ":", "return", "cls", ".", "alter_user", "(", "name", ",", "password", ",", "encrypt_password", ")" ]
https://github.com/openstack/trove/blob/be86b79119d16ee77f596172f43b0c97cb2617bd/trove/guestagent/datastore/postgres/query.py#L116-L119
tensorflow/kfac
fe90e36c3e0b42c73e4a34835a66f6d45e2a442d
kfac/python/ops/fisher_factors.py
python
ConvInputSUAKroneckerFactor.instantiate_inv_variables
(self)
Makes the internal "inverse" variable(s).
Makes the internal "inverse" variable(s).
[ "Makes", "the", "internal", "inverse", "variable", "(", "s", ")", "." ]
def instantiate_inv_variables(self): """Makes the internal "inverse" variable(s).""" for (exp, damping_id) in self._matpower_registrations: if exp != -1.: raise ValueError("ConvInputSUAKroneckerFactor only supports inverse" "computation") exp_string = scalar_or_tensor_to_string(exp) damping_func = self._damping_funcs_by_id[damping_id] damping_string = graph_func_to_string(damping_func) with tf.variable_scope(self._var_scope): matpower = tf.get_variable( "matpower_exp{}_damp{}".format(exp_string, damping_string), initializer=inverse_initializer, shape=self._cov_shape, trainable=False, dtype=self._dtype, use_resource=True) assert (exp, damping_id) not in self._matpower_by_exp_and_damping self._matpower_by_exp_and_damping[(exp, damping_id)] = matpower self._damping_var_by_id[damping_id] = tf.get_variable( "damping_var_{}_{}".format(exp_string, damping_string), initializer=tf.zeros_initializer(), shape=(), trainable=False, dtype=self._dtype, use_resource=True) if not ASSUME_ZERO_MEAN_ACTIVATIONS: self._cov_inv_mu_by_damping_id[damping_id] = tf.get_variable( "cov_inv_mu_{}_{}".format(exp_string, damping_string), initializer=tf.zeros_initializer(), shape=(self._in_channels, 1), trainable=False, dtype=self._dtype, use_resource=True) self._rank_one_update_scale_by_damping_id[damping_id] = tf.get_variable( "rank_one_update_scale_{}_{}".format(exp_string, damping_string), initializer=tf.zeros_initializer(), shape=(), trainable=False, dtype=self._dtype, use_resource=True)
[ "def", "instantiate_inv_variables", "(", "self", ")", ":", "for", "(", "exp", ",", "damping_id", ")", "in", "self", ".", "_matpower_registrations", ":", "if", "exp", "!=", "-", "1.", ":", "raise", "ValueError", "(", "\"ConvInputSUAKroneckerFactor only supports inverse\"", "\"computation\"", ")", "exp_string", "=", "scalar_or_tensor_to_string", "(", "exp", ")", "damping_func", "=", "self", ".", "_damping_funcs_by_id", "[", "damping_id", "]", "damping_string", "=", "graph_func_to_string", "(", "damping_func", ")", "with", "tf", ".", "variable_scope", "(", "self", ".", "_var_scope", ")", ":", "matpower", "=", "tf", ".", "get_variable", "(", "\"matpower_exp{}_damp{}\"", ".", "format", "(", "exp_string", ",", "damping_string", ")", ",", "initializer", "=", "inverse_initializer", ",", "shape", "=", "self", ".", "_cov_shape", ",", "trainable", "=", "False", ",", "dtype", "=", "self", ".", "_dtype", ",", "use_resource", "=", "True", ")", "assert", "(", "exp", ",", "damping_id", ")", "not", "in", "self", ".", "_matpower_by_exp_and_damping", "self", ".", "_matpower_by_exp_and_damping", "[", "(", "exp", ",", "damping_id", ")", "]", "=", "matpower", "self", ".", "_damping_var_by_id", "[", "damping_id", "]", "=", "tf", ".", "get_variable", "(", "\"damping_var_{}_{}\"", ".", "format", "(", "exp_string", ",", "damping_string", ")", ",", "initializer", "=", "tf", ".", "zeros_initializer", "(", ")", ",", "shape", "=", "(", ")", ",", "trainable", "=", "False", ",", "dtype", "=", "self", ".", "_dtype", ",", "use_resource", "=", "True", ")", "if", "not", "ASSUME_ZERO_MEAN_ACTIVATIONS", ":", "self", ".", "_cov_inv_mu_by_damping_id", "[", "damping_id", "]", "=", "tf", ".", "get_variable", "(", "\"cov_inv_mu_{}_{}\"", ".", "format", "(", "exp_string", ",", "damping_string", ")", ",", "initializer", "=", "tf", ".", "zeros_initializer", "(", ")", ",", "shape", "=", "(", "self", ".", "_in_channels", ",", "1", ")", ",", "trainable", "=", "False", ",", "dtype", "=", "self", ".", "_dtype", ",", "use_resource", "=", "True", ")", "self", ".", "_rank_one_update_scale_by_damping_id", "[", "damping_id", "]", "=", "tf", ".", "get_variable", "(", "\"rank_one_update_scale_{}_{}\"", ".", "format", "(", "exp_string", ",", "damping_string", ")", ",", "initializer", "=", "tf", ".", "zeros_initializer", "(", ")", ",", "shape", "=", "(", ")", ",", "trainable", "=", "False", ",", "dtype", "=", "self", ".", "_dtype", ",", "use_resource", "=", "True", ")" ]
https://github.com/tensorflow/kfac/blob/fe90e36c3e0b42c73e4a34835a66f6d45e2a442d/kfac/python/ops/fisher_factors.py#L2247-L2293
bikalims/bika.lims
35e4bbdb5a3912cae0b5eb13e51097c8b0486349
bika/lims/exportimport/setupdata/__init__.py
python
Invoice_Batches.Import
(self)
[]
def Import(self): folder = self.context.invoices for row in self.get_rows(3): obj = _createObjectByType("InvoiceBatch", folder, tmpID()) if not row['title']: message = _("InvoiceBatch has no Title") raise Exception(t(message)) if not row['start']: message = _("InvoiceBatch has no Start Date") raise Exception(t(message)) if not row['end']: message = _("InvoiceBatch has no End Date") raise Exception(t(message)) obj.edit( title=row['title'], BatchStartDate=row['start'], BatchEndDate=row['end'], ) renameAfterCreation(obj)
[ "def", "Import", "(", "self", ")", ":", "folder", "=", "self", ".", "context", ".", "invoices", "for", "row", "in", "self", ".", "get_rows", "(", "3", ")", ":", "obj", "=", "_createObjectByType", "(", "\"InvoiceBatch\"", ",", "folder", ",", "tmpID", "(", ")", ")", "if", "not", "row", "[", "'title'", "]", ":", "message", "=", "_", "(", "\"InvoiceBatch has no Title\"", ")", "raise", "Exception", "(", "t", "(", "message", ")", ")", "if", "not", "row", "[", "'start'", "]", ":", "message", "=", "_", "(", "\"InvoiceBatch has no Start Date\"", ")", "raise", "Exception", "(", "t", "(", "message", ")", ")", "if", "not", "row", "[", "'end'", "]", ":", "message", "=", "_", "(", "\"InvoiceBatch has no End Date\"", ")", "raise", "Exception", "(", "t", "(", "message", ")", ")", "obj", ".", "edit", "(", "title", "=", "row", "[", "'title'", "]", ",", "BatchStartDate", "=", "row", "[", "'start'", "]", ",", "BatchEndDate", "=", "row", "[", "'end'", "]", ",", ")", "renameAfterCreation", "(", "obj", ")" ]
https://github.com/bikalims/bika.lims/blob/35e4bbdb5a3912cae0b5eb13e51097c8b0486349/bika/lims/exportimport/setupdata/__init__.py#L2262-L2280
bikalims/bika.lims
35e4bbdb5a3912cae0b5eb13e51097c8b0486349
bika/lims/validators.py
python
InterimFieldsValidator.__call__
(self, value, *args, **kwargs)
return True
[]
def __call__(self, value, *args, **kwargs): instance = kwargs['instance'] fieldname = kwargs['field'].getName() request = kwargs.get('REQUEST', {}) form = request.form interim_fields = form.get(fieldname, []) translate = getToolByName(instance, 'translation_service').translate bsc = getToolByName(instance, 'bika_setup_catalog') # We run through the validator once per form submit, and check all # values # this value in request prevents running once per subfield value. key = instance.id + fieldname if instance.REQUEST.get(key, False): return True for x in range(len(interim_fields)): row = interim_fields[x] keys = row.keys() if 'title' not in keys: instance.REQUEST[key] = to_utf8( translate(_("Validation failed: title is required"))) return instance.REQUEST[key] if 'keyword' not in keys: instance.REQUEST[key] = to_utf8( translate(_("Validation failed: keyword is required"))) return instance.REQUEST[key] if not re.match(r"^[A-Za-z\w\d\-\_]+$", row['keyword']): instance.REQUEST[key] = _( "Validation failed: keyword contains invalid characters") return instance.REQUEST[key] # keywords and titles used once only in the submitted form keywords = {} titles = {} for field in interim_fields: if 'keyword' in field: if field['keyword'] in keywords: keywords[field['keyword']] += 1 else: keywords[field['keyword']] = 1 if 'title' in field: if field['title'] in titles: titles[field['title']] += 1 else: titles[field['title']] = 1 for k in [k for k in keywords.keys() if keywords[k] > 1]: msg = _("Validation failed: '${keyword}': duplicate keyword", mapping={'keyword': safe_unicode(k)}) instance.REQUEST[key] = to_utf8(translate(msg)) return instance.REQUEST[key] for t in [t for t in titles.keys() if titles[t] > 1]: msg = _("Validation failed: '${title}': duplicate title", mapping={'title': safe_unicode(t)}) instance.REQUEST[key] = to_utf8(translate(msg)) return instance.REQUEST[key] # check all keywords against all AnalysisService keywords for dups services = bsc(portal_type='AnalysisService', getKeyword=value) if services: msg = _("Validation failed: '${title}': " "This keyword is already in use by service '${used_by}'", mapping={'title': safe_unicode(value), 'used_by': safe_unicode(services[0].Title)}) instance.REQUEST[key] = to_utf8(translate(msg)) return instance.REQUEST[key] # any duplicated interimfield titles must share the same keyword # any duplicated interimfield keywords must share the same title calcs = bsc(portal_type='Calculation') keyword_titles = {} title_keywords = {} for calc in calcs: if calc.UID == instance.UID(): continue calc = calc.getObject() for field in calc.getInterimFields(): keyword_titles[field['keyword']] = field['title'] title_keywords[field['title']] = field['keyword'] for field in interim_fields: if field['keyword'] != value: continue if 'title' in field and \ field['title'] in title_keywords.keys() and \ title_keywords[field['title']] != field['keyword']: msg = _("Validation failed: column title '${title}' " "must have keyword '${keyword}'", mapping={'title': safe_unicode(field['title']), 'keyword': safe_unicode( title_keywords[field['title']])}) instance.REQUEST[key] = to_utf8(translate(msg)) return instance.REQUEST[key] if 'keyword' in field and \ field['keyword'] in keyword_titles.keys() and \ keyword_titles[field['keyword']] != field['title']: msg = _("Validation failed: keyword '${keyword}' " "must have column title '${title}'", mapping={'keyword': safe_unicode(field['keyword']), 'title': safe_unicode( keyword_titles[field['keyword']])}) instance.REQUEST[key] = to_utf8(translate(msg)) return instance.REQUEST[key] instance.REQUEST[key] = True return True
[ "def", "__call__", "(", "self", ",", "value", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "kwargs", "[", "'instance'", "]", "fieldname", "=", "kwargs", "[", "'field'", "]", ".", "getName", "(", ")", "request", "=", "kwargs", ".", "get", "(", "'REQUEST'", ",", "{", "}", ")", "form", "=", "request", ".", "form", "interim_fields", "=", "form", ".", "get", "(", "fieldname", ",", "[", "]", ")", "translate", "=", "getToolByName", "(", "instance", ",", "'translation_service'", ")", ".", "translate", "bsc", "=", "getToolByName", "(", "instance", ",", "'bika_setup_catalog'", ")", "# We run through the validator once per form submit, and check all", "# values", "# this value in request prevents running once per subfield value.", "key", "=", "instance", ".", "id", "+", "fieldname", "if", "instance", ".", "REQUEST", ".", "get", "(", "key", ",", "False", ")", ":", "return", "True", "for", "x", "in", "range", "(", "len", "(", "interim_fields", ")", ")", ":", "row", "=", "interim_fields", "[", "x", "]", "keys", "=", "row", ".", "keys", "(", ")", "if", "'title'", "not", "in", "keys", ":", "instance", ".", "REQUEST", "[", "key", "]", "=", "to_utf8", "(", "translate", "(", "_", "(", "\"Validation failed: title is required\"", ")", ")", ")", "return", "instance", ".", "REQUEST", "[", "key", "]", "if", "'keyword'", "not", "in", "keys", ":", "instance", ".", "REQUEST", "[", "key", "]", "=", "to_utf8", "(", "translate", "(", "_", "(", "\"Validation failed: keyword is required\"", ")", ")", ")", "return", "instance", ".", "REQUEST", "[", "key", "]", "if", "not", "re", ".", "match", "(", "r\"^[A-Za-z\\w\\d\\-\\_]+$\"", ",", "row", "[", "'keyword'", "]", ")", ":", "instance", ".", "REQUEST", "[", "key", "]", "=", "_", "(", "\"Validation failed: keyword contains invalid characters\"", ")", "return", "instance", ".", "REQUEST", "[", "key", "]", "# keywords and titles used once only in the submitted form", "keywords", "=", "{", "}", "titles", "=", "{", "}", "for", "field", "in", "interim_fields", ":", "if", "'keyword'", "in", "field", ":", "if", "field", "[", "'keyword'", "]", "in", "keywords", ":", "keywords", "[", "field", "[", "'keyword'", "]", "]", "+=", "1", "else", ":", "keywords", "[", "field", "[", "'keyword'", "]", "]", "=", "1", "if", "'title'", "in", "field", ":", "if", "field", "[", "'title'", "]", "in", "titles", ":", "titles", "[", "field", "[", "'title'", "]", "]", "+=", "1", "else", ":", "titles", "[", "field", "[", "'title'", "]", "]", "=", "1", "for", "k", "in", "[", "k", "for", "k", "in", "keywords", ".", "keys", "(", ")", "if", "keywords", "[", "k", "]", ">", "1", "]", ":", "msg", "=", "_", "(", "\"Validation failed: '${keyword}': duplicate keyword\"", ",", "mapping", "=", "{", "'keyword'", ":", "safe_unicode", "(", "k", ")", "}", ")", "instance", ".", "REQUEST", "[", "key", "]", "=", "to_utf8", "(", "translate", "(", "msg", ")", ")", "return", "instance", ".", "REQUEST", "[", "key", "]", "for", "t", "in", "[", "t", "for", "t", "in", "titles", ".", "keys", "(", ")", "if", "titles", "[", "t", "]", ">", "1", "]", ":", "msg", "=", "_", "(", "\"Validation failed: '${title}': duplicate title\"", ",", "mapping", "=", "{", "'title'", ":", "safe_unicode", "(", "t", ")", "}", ")", "instance", ".", "REQUEST", "[", "key", "]", "=", "to_utf8", "(", "translate", "(", "msg", ")", ")", "return", "instance", ".", "REQUEST", "[", "key", "]", "# check all keywords against all AnalysisService keywords for dups", "services", "=", "bsc", "(", "portal_type", "=", "'AnalysisService'", ",", "getKeyword", "=", "value", ")", "if", "services", ":", "msg", "=", "_", "(", "\"Validation failed: '${title}': \"", "\"This keyword is already in use by service '${used_by}'\"", ",", "mapping", "=", "{", "'title'", ":", "safe_unicode", "(", "value", ")", ",", "'used_by'", ":", "safe_unicode", "(", "services", "[", "0", "]", ".", "Title", ")", "}", ")", "instance", ".", "REQUEST", "[", "key", "]", "=", "to_utf8", "(", "translate", "(", "msg", ")", ")", "return", "instance", ".", "REQUEST", "[", "key", "]", "# any duplicated interimfield titles must share the same keyword", "# any duplicated interimfield keywords must share the same title", "calcs", "=", "bsc", "(", "portal_type", "=", "'Calculation'", ")", "keyword_titles", "=", "{", "}", "title_keywords", "=", "{", "}", "for", "calc", "in", "calcs", ":", "if", "calc", ".", "UID", "==", "instance", ".", "UID", "(", ")", ":", "continue", "calc", "=", "calc", ".", "getObject", "(", ")", "for", "field", "in", "calc", ".", "getInterimFields", "(", ")", ":", "keyword_titles", "[", "field", "[", "'keyword'", "]", "]", "=", "field", "[", "'title'", "]", "title_keywords", "[", "field", "[", "'title'", "]", "]", "=", "field", "[", "'keyword'", "]", "for", "field", "in", "interim_fields", ":", "if", "field", "[", "'keyword'", "]", "!=", "value", ":", "continue", "if", "'title'", "in", "field", "and", "field", "[", "'title'", "]", "in", "title_keywords", ".", "keys", "(", ")", "and", "title_keywords", "[", "field", "[", "'title'", "]", "]", "!=", "field", "[", "'keyword'", "]", ":", "msg", "=", "_", "(", "\"Validation failed: column title '${title}' \"", "\"must have keyword '${keyword}'\"", ",", "mapping", "=", "{", "'title'", ":", "safe_unicode", "(", "field", "[", "'title'", "]", ")", ",", "'keyword'", ":", "safe_unicode", "(", "title_keywords", "[", "field", "[", "'title'", "]", "]", ")", "}", ")", "instance", ".", "REQUEST", "[", "key", "]", "=", "to_utf8", "(", "translate", "(", "msg", ")", ")", "return", "instance", ".", "REQUEST", "[", "key", "]", "if", "'keyword'", "in", "field", "and", "field", "[", "'keyword'", "]", "in", "keyword_titles", ".", "keys", "(", ")", "and", "keyword_titles", "[", "field", "[", "'keyword'", "]", "]", "!=", "field", "[", "'title'", "]", ":", "msg", "=", "_", "(", "\"Validation failed: keyword '${keyword}' \"", "\"must have column title '${title}'\"", ",", "mapping", "=", "{", "'keyword'", ":", "safe_unicode", "(", "field", "[", "'keyword'", "]", ")", ",", "'title'", ":", "safe_unicode", "(", "keyword_titles", "[", "field", "[", "'keyword'", "]", "]", ")", "}", ")", "instance", ".", "REQUEST", "[", "key", "]", "=", "to_utf8", "(", "translate", "(", "msg", ")", ")", "return", "instance", ".", "REQUEST", "[", "key", "]", "instance", ".", "REQUEST", "[", "key", "]", "=", "True", "return", "True" ]
https://github.com/bikalims/bika.lims/blob/35e4bbdb5a3912cae0b5eb13e51097c8b0486349/bika/lims/validators.py#L247-L352
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
chap19/monitor/python-monitorclient-1.1/build/lib.linux-x86_64-2.7/monitorclient/openstack/common/memorycache.py
python
Client.incr
(self, key, delta=1)
return new_value
Increments the value for a key.
Increments the value for a key.
[ "Increments", "the", "value", "for", "a", "key", "." ]
def incr(self, key, delta=1): """Increments the value for a key.""" value = self.get(key) if value is None: return None new_value = int(value) + delta self.cache[key] = (self.cache[key][0], str(new_value)) return new_value
[ "def", "incr", "(", "self", ",", "key", ",", "delta", "=", "1", ")", ":", "value", "=", "self", ".", "get", "(", "key", ")", "if", "value", "is", "None", ":", "return", "None", "new_value", "=", "int", "(", "value", ")", "+", "delta", "self", ".", "cache", "[", "key", "]", "=", "(", "self", ".", "cache", "[", "key", "]", "[", "0", "]", ",", "str", "(", "new_value", ")", ")", "return", "new_value" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/chap19/monitor/python-monitorclient-1.1/build/lib.linux-x86_64-2.7/monitorclient/openstack/common/memorycache.py#L84-L91