repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Stranger6667/postmarker
postmarker/models/stats.py
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L67-L71
def clicks(self, tag=None, fromdate=None, todate=None): """ Gets total counts of unique links that were clicked. """ return self.call("GET", "/stats/outbound/clicks", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "clicks", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/clicks\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets total counts of unique links that were clicked.
[ "Gets", "total", "counts", "of", "unique", "links", "that", "were", "clicked", "." ]
python
train
django-ses/django-ses
django_ses/__init__.py
https://github.com/django-ses/django-ses/blob/2f0fd8e3fdc76d3512982c0bb8e2f6e93e09fa3c/django_ses/__init__.py#L77-L96
def open(self): """Create a connection to the AWS API server. This can be reused for sending multiple emails. """ if self.connection: return False try: self.connection = SESConnection( aws_access_key_id=self._access_key_id, aws_secret_access_key=self._access_key, region=self._region, proxy=self._proxy, proxy_port=self._proxy_port, proxy_user=self._proxy_user, proxy_pass=self._proxy_pass, ) except Exception: if not self.fail_silently: raise
[ "def", "open", "(", "self", ")", ":", "if", "self", ".", "connection", ":", "return", "False", "try", ":", "self", ".", "connection", "=", "SESConnection", "(", "aws_access_key_id", "=", "self", ".", "_access_key_id", ",", "aws_secret_access_key", "=", "self", ".", "_access_key", ",", "region", "=", "self", ".", "_region", ",", "proxy", "=", "self", ".", "_proxy", ",", "proxy_port", "=", "self", ".", "_proxy_port", ",", "proxy_user", "=", "self", ".", "_proxy_user", ",", "proxy_pass", "=", "self", ".", "_proxy_pass", ",", ")", "except", "Exception", ":", "if", "not", "self", ".", "fail_silently", ":", "raise" ]
Create a connection to the AWS API server. This can be reused for sending multiple emails.
[ "Create", "a", "connection", "to", "the", "AWS", "API", "server", ".", "This", "can", "be", "reused", "for", "sending", "multiple", "emails", "." ]
python
train
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_taggers.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_taggers.py#L415-L451
def multi_token_match(stream_item, aligner_data): ''' iterate through tokens looking for near-exact matches to strings in si.ratings...mentions ''' tagger_id = _get_tagger_id(stream_item, aligner_data) sentences = stream_item.body.sentences.get(tagger_id) if not sentences: return ## construct a list of tuples, where the first part of each tuple ## is a tuple of cleansed strings, and the second part is the ## Token object from which it came. tokens = map(lambda tok: (cleanse(tok.token.decode('utf8')).split(' '), tok), itertools.chain(*[sent.tokens for sent in sentences])) required_annotator_id = aligner_data['annotator_id'] for annotator_id, ratings in stream_item.ratings.items(): if (required_annotator_id is None) or (annotator_id == required_annotator_id): for rating in ratings: label = Label(annotator=rating.annotator, target=rating.target) num_tokens_matched = 0 for tok in look_ahead_match(rating, tokens): if aligner_data.get('update_labels'): tok.labels.pop(annotator_id, None) add_annotation(tok, label) num_tokens_matched += 1 if num_tokens_matched == 0: logger.warning('multi_token_match didn\'t actually match ' 'entity %r in stream_id %r', rating.target.target_id, stream_item.stream_id) else: logger.debug('matched %d tokens for %r in %r', num_tokens_matched, rating.target.target_id, stream_item.stream_id)
[ "def", "multi_token_match", "(", "stream_item", ",", "aligner_data", ")", ":", "tagger_id", "=", "_get_tagger_id", "(", "stream_item", ",", "aligner_data", ")", "sentences", "=", "stream_item", ".", "body", ".", "sentences", ".", "get", "(", "tagger_id", ")", "if", "not", "sentences", ":", "return", "## construct a list of tuples, where the first part of each tuple", "## is a tuple of cleansed strings, and the second part is the", "## Token object from which it came.", "tokens", "=", "map", "(", "lambda", "tok", ":", "(", "cleanse", "(", "tok", ".", "token", ".", "decode", "(", "'utf8'", ")", ")", ".", "split", "(", "' '", ")", ",", "tok", ")", ",", "itertools", ".", "chain", "(", "*", "[", "sent", ".", "tokens", "for", "sent", "in", "sentences", "]", ")", ")", "required_annotator_id", "=", "aligner_data", "[", "'annotator_id'", "]", "for", "annotator_id", ",", "ratings", "in", "stream_item", ".", "ratings", ".", "items", "(", ")", ":", "if", "(", "required_annotator_id", "is", "None", ")", "or", "(", "annotator_id", "==", "required_annotator_id", ")", ":", "for", "rating", "in", "ratings", ":", "label", "=", "Label", "(", "annotator", "=", "rating", ".", "annotator", ",", "target", "=", "rating", ".", "target", ")", "num_tokens_matched", "=", "0", "for", "tok", "in", "look_ahead_match", "(", "rating", ",", "tokens", ")", ":", "if", "aligner_data", ".", "get", "(", "'update_labels'", ")", ":", "tok", ".", "labels", ".", "pop", "(", "annotator_id", ",", "None", ")", "add_annotation", "(", "tok", ",", "label", ")", "num_tokens_matched", "+=", "1", "if", "num_tokens_matched", "==", "0", ":", "logger", ".", "warning", "(", "'multi_token_match didn\\'t actually match '", "'entity %r in stream_id %r'", ",", "rating", ".", "target", ".", "target_id", ",", "stream_item", ".", "stream_id", ")", "else", ":", "logger", ".", "debug", "(", "'matched %d tokens for %r in %r'", ",", "num_tokens_matched", ",", "rating", ".", "target", ".", "target_id", ",", "stream_item", ".", "stream_id", ")" ]
iterate through tokens looking for near-exact matches to strings in si.ratings...mentions
[ "iterate", "through", "tokens", "looking", "for", "near", "-", "exact", "matches", "to", "strings", "in", "si", ".", "ratings", "...", "mentions" ]
python
test
TestInABox/stackInABox
stackinabox/util/requests_mock/core.py
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/util/requests_mock/core.py#L226-L228
def requests_post(url, data=None, json=None, **kwargs): """Requests-mock requests.post wrapper.""" return requests_request('post', url, data=data, json=json, **kwargs)
[ "def", "requests_post", "(", "url", ",", "data", "=", "None", ",", "json", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "requests_request", "(", "'post'", ",", "url", ",", "data", "=", "data", ",", "json", "=", "json", ",", "*", "*", "kwargs", ")" ]
Requests-mock requests.post wrapper.
[ "Requests", "-", "mock", "requests", ".", "post", "wrapper", "." ]
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1886-L1889
def p_genvarlist(self, p): 'genvarlist : genvarlist COMMA genvar' p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1))
[ "def", "p_genvarlist", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "(", "p", "[", "3", "]", ",", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
genvarlist : genvarlist COMMA genvar
[ "genvarlist", ":", "genvarlist", "COMMA", "genvar" ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/uncategorized/os_deployment_servers.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/uncategorized/os_deployment_servers.py#L153-L171
def delete(self, resource, force=False, timeout=-1): """ Deletes a Deployment Server object based on its UUID or URI. Args: resource (dict): Object to delete. force: If set to true, the operation completes despite any problems with network connectivity or errors on the resource itself. The default is false. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: bool: Indicates if the volume was successfully deleted. """ return self._client.delete(resource, force=force, timeout=timeout)
[ "def", "delete", "(", "self", ",", "resource", ",", "force", "=", "False", ",", "timeout", "=", "-", "1", ")", ":", "return", "self", ".", "_client", ".", "delete", "(", "resource", ",", "force", "=", "force", ",", "timeout", "=", "timeout", ")" ]
Deletes a Deployment Server object based on its UUID or URI. Args: resource (dict): Object to delete. force: If set to true, the operation completes despite any problems with network connectivity or errors on the resource itself. The default is false. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: bool: Indicates if the volume was successfully deleted.
[ "Deletes", "a", "Deployment", "Server", "object", "based", "on", "its", "UUID", "or", "URI", "." ]
python
train
pyviz/holoviews
holoviews/core/traversal.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/traversal.py#L40-L95
def unique_dimkeys(obj, default_dim='Frame'): """ Finds all common dimension keys in the object including subsets of dimensions. If there are is no common subset of dimensions, None is returned. Returns the list of dimensions followed by the list of unique keys. """ from .ndmapping import NdMapping, item_check from .spaces import HoloMap key_dims = obj.traverse(lambda x: (tuple(x.kdims), list(x.data.keys())), (HoloMap,)) if not key_dims: return [Dimension(default_dim)], [(0,)] dim_groups, keys = zip(*sorted(key_dims, key=lambda x: -len(x[0]))) dgroups = [frozenset(d.name for d in dg) for dg in dim_groups] subset = all(g1 <= g2 or g1 >= g2 for g1 in dgroups for g2 in dgroups) # Find unique keys if subset: dims = merge_dimensions(dim_groups) all_dims = sorted(dims, key=lambda x: dim_groups[0].index(x)) else: # Handle condition when HoloMap/DynamicMap dimensions do not overlap hmaps = obj.traverse(lambda x: x, ['HoloMap']) if hmaps: raise ValueError('When combining HoloMaps into a composite plot ' 'their dimensions must be subsets of each other.') dimensions = merge_dimensions(dim_groups) dim_keys = {} for dims, keys in key_dims: for key in keys: for d, k in zip(dims, key): dim_keys[d.name] = k if dim_keys: keys = [tuple(dim_keys.get(dim.name) for dim in dimensions)] else: keys = [] return merge_dimensions(dim_groups), keys ndims = len(all_dims) unique_keys = [] for group, keys in zip(dim_groups, keys): dim_idxs = [all_dims.index(dim) for dim in group] for key in keys: padded_key = create_ndkey(ndims, dim_idxs, key) matches = [item for item in unique_keys if padded_key == tuple(k if k is None else i for i, k in zip(item, padded_key))] if not matches: unique_keys.append(padded_key) with item_check(False): sorted_keys = NdMapping({key: None for key in unique_keys}, kdims=all_dims).data.keys() return all_dims, list(sorted_keys)
[ "def", "unique_dimkeys", "(", "obj", ",", "default_dim", "=", "'Frame'", ")", ":", "from", ".", "ndmapping", "import", "NdMapping", ",", "item_check", "from", ".", "spaces", "import", "HoloMap", "key_dims", "=", "obj", ".", "traverse", "(", "lambda", "x", ":", "(", "tuple", "(", "x", ".", "kdims", ")", ",", "list", "(", "x", ".", "data", ".", "keys", "(", ")", ")", ")", ",", "(", "HoloMap", ",", ")", ")", "if", "not", "key_dims", ":", "return", "[", "Dimension", "(", "default_dim", ")", "]", ",", "[", "(", "0", ",", ")", "]", "dim_groups", ",", "keys", "=", "zip", "(", "*", "sorted", "(", "key_dims", ",", "key", "=", "lambda", "x", ":", "-", "len", "(", "x", "[", "0", "]", ")", ")", ")", "dgroups", "=", "[", "frozenset", "(", "d", ".", "name", "for", "d", "in", "dg", ")", "for", "dg", "in", "dim_groups", "]", "subset", "=", "all", "(", "g1", "<=", "g2", "or", "g1", ">=", "g2", "for", "g1", "in", "dgroups", "for", "g2", "in", "dgroups", ")", "# Find unique keys", "if", "subset", ":", "dims", "=", "merge_dimensions", "(", "dim_groups", ")", "all_dims", "=", "sorted", "(", "dims", ",", "key", "=", "lambda", "x", ":", "dim_groups", "[", "0", "]", ".", "index", "(", "x", ")", ")", "else", ":", "# Handle condition when HoloMap/DynamicMap dimensions do not overlap", "hmaps", "=", "obj", ".", "traverse", "(", "lambda", "x", ":", "x", ",", "[", "'HoloMap'", "]", ")", "if", "hmaps", ":", "raise", "ValueError", "(", "'When combining HoloMaps into a composite plot '", "'their dimensions must be subsets of each other.'", ")", "dimensions", "=", "merge_dimensions", "(", "dim_groups", ")", "dim_keys", "=", "{", "}", "for", "dims", ",", "keys", "in", "key_dims", ":", "for", "key", "in", "keys", ":", "for", "d", ",", "k", "in", "zip", "(", "dims", ",", "key", ")", ":", "dim_keys", "[", "d", ".", "name", "]", "=", "k", "if", "dim_keys", ":", "keys", "=", "[", "tuple", "(", "dim_keys", ".", "get", "(", "dim", ".", "name", ")", "for", "dim", "in", "dimensions", ")", "]", "else", ":", "keys", "=", "[", "]", "return", "merge_dimensions", "(", "dim_groups", ")", ",", "keys", "ndims", "=", "len", "(", "all_dims", ")", "unique_keys", "=", "[", "]", "for", "group", ",", "keys", "in", "zip", "(", "dim_groups", ",", "keys", ")", ":", "dim_idxs", "=", "[", "all_dims", ".", "index", "(", "dim", ")", "for", "dim", "in", "group", "]", "for", "key", "in", "keys", ":", "padded_key", "=", "create_ndkey", "(", "ndims", ",", "dim_idxs", ",", "key", ")", "matches", "=", "[", "item", "for", "item", "in", "unique_keys", "if", "padded_key", "==", "tuple", "(", "k", "if", "k", "is", "None", "else", "i", "for", "i", ",", "k", "in", "zip", "(", "item", ",", "padded_key", ")", ")", "]", "if", "not", "matches", ":", "unique_keys", ".", "append", "(", "padded_key", ")", "with", "item_check", "(", "False", ")", ":", "sorted_keys", "=", "NdMapping", "(", "{", "key", ":", "None", "for", "key", "in", "unique_keys", "}", ",", "kdims", "=", "all_dims", ")", ".", "data", ".", "keys", "(", ")", "return", "all_dims", ",", "list", "(", "sorted_keys", ")" ]
Finds all common dimension keys in the object including subsets of dimensions. If there are is no common subset of dimensions, None is returned. Returns the list of dimensions followed by the list of unique keys.
[ "Finds", "all", "common", "dimension", "keys", "in", "the", "object", "including", "subsets", "of", "dimensions", ".", "If", "there", "are", "is", "no", "common", "subset", "of", "dimensions", "None", "is", "returned", "." ]
python
train
pennersr/django-allauth
allauth/socialaccount/models.py
https://github.com/pennersr/django-allauth/blob/f70cb3d622f992f15fe9b57098e0b328445b664e/allauth/socialaccount/models.py#L254-L285
def lookup(self): """ Lookup existing account, if any. """ assert not self.is_existing try: a = SocialAccount.objects.get(provider=self.account.provider, uid=self.account.uid) # Update account a.extra_data = self.account.extra_data self.account = a self.user = self.account.user a.save() # Update token if app_settings.STORE_TOKENS and self.token: assert not self.token.pk try: t = SocialToken.objects.get(account=self.account, app=self.token.app) t.token = self.token.token if self.token.token_secret: # only update the refresh token if we got one # many oauth2 providers do not resend the refresh token t.token_secret = self.token.token_secret t.expires_at = self.token.expires_at t.save() self.token = t except SocialToken.DoesNotExist: self.token.account = a self.token.save() except SocialAccount.DoesNotExist: pass
[ "def", "lookup", "(", "self", ")", ":", "assert", "not", "self", ".", "is_existing", "try", ":", "a", "=", "SocialAccount", ".", "objects", ".", "get", "(", "provider", "=", "self", ".", "account", ".", "provider", ",", "uid", "=", "self", ".", "account", ".", "uid", ")", "# Update account", "a", ".", "extra_data", "=", "self", ".", "account", ".", "extra_data", "self", ".", "account", "=", "a", "self", ".", "user", "=", "self", ".", "account", ".", "user", "a", ".", "save", "(", ")", "# Update token", "if", "app_settings", ".", "STORE_TOKENS", "and", "self", ".", "token", ":", "assert", "not", "self", ".", "token", ".", "pk", "try", ":", "t", "=", "SocialToken", ".", "objects", ".", "get", "(", "account", "=", "self", ".", "account", ",", "app", "=", "self", ".", "token", ".", "app", ")", "t", ".", "token", "=", "self", ".", "token", ".", "token", "if", "self", ".", "token", ".", "token_secret", ":", "# only update the refresh token if we got one", "# many oauth2 providers do not resend the refresh token", "t", ".", "token_secret", "=", "self", ".", "token", ".", "token_secret", "t", ".", "expires_at", "=", "self", ".", "token", ".", "expires_at", "t", ".", "save", "(", ")", "self", ".", "token", "=", "t", "except", "SocialToken", ".", "DoesNotExist", ":", "self", ".", "token", ".", "account", "=", "a", "self", ".", "token", ".", "save", "(", ")", "except", "SocialAccount", ".", "DoesNotExist", ":", "pass" ]
Lookup existing account, if any.
[ "Lookup", "existing", "account", "if", "any", "." ]
python
train
bitcraze/crazyflie-lib-python
cflib/positioning/position_hl_commander.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/positioning/position_hl_commander.py#L82-L110
def take_off(self, height=DEFAULT, velocity=DEFAULT): """ Takes off, that is starts the motors, goes straight up and hovers. Do not call this function if you use the with keyword. Take off is done automatically when the context is created. :param height: the height (meters) to hover at. None uses the default height set when constructed. :param velocity: the velocity (meters/second) when taking off :return: """ if self._is_flying: raise Exception('Already flying') if not self._cf.is_connected(): raise Exception('Crazyflie is not connected') self._is_flying = True self._reset_position_estimator() self._activate_controller() self._activate_high_level_commander() self._hl_commander = self._cf.high_level_commander height = self._height(height) duration_s = height / self._velocity(velocity) self._hl_commander.takeoff(height, duration_s) time.sleep(duration_s) self._z = height
[ "def", "take_off", "(", "self", ",", "height", "=", "DEFAULT", ",", "velocity", "=", "DEFAULT", ")", ":", "if", "self", ".", "_is_flying", ":", "raise", "Exception", "(", "'Already flying'", ")", "if", "not", "self", ".", "_cf", ".", "is_connected", "(", ")", ":", "raise", "Exception", "(", "'Crazyflie is not connected'", ")", "self", ".", "_is_flying", "=", "True", "self", ".", "_reset_position_estimator", "(", ")", "self", ".", "_activate_controller", "(", ")", "self", ".", "_activate_high_level_commander", "(", ")", "self", ".", "_hl_commander", "=", "self", ".", "_cf", ".", "high_level_commander", "height", "=", "self", ".", "_height", "(", "height", ")", "duration_s", "=", "height", "/", "self", ".", "_velocity", "(", "velocity", ")", "self", ".", "_hl_commander", ".", "takeoff", "(", "height", ",", "duration_s", ")", "time", ".", "sleep", "(", "duration_s", ")", "self", ".", "_z", "=", "height" ]
Takes off, that is starts the motors, goes straight up and hovers. Do not call this function if you use the with keyword. Take off is done automatically when the context is created. :param height: the height (meters) to hover at. None uses the default height set when constructed. :param velocity: the velocity (meters/second) when taking off :return:
[ "Takes", "off", "that", "is", "starts", "the", "motors", "goes", "straight", "up", "and", "hovers", ".", "Do", "not", "call", "this", "function", "if", "you", "use", "the", "with", "keyword", ".", "Take", "off", "is", "done", "automatically", "when", "the", "context", "is", "created", "." ]
python
train
skorch-dev/skorch
skorch/classifier.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/classifier.py#L321-L362
def predict_proba(self, X): """Where applicable, return probability estimates for samples. If the module's forward method returns multiple outputs as a tuple, it is assumed that the first output contains the relevant information and the other values are ignored. If all values are relevant, consider using :func:`~skorch.NeuralNet.forward` instead. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. Returns ------- y_proba : numpy ndarray """ y_probas = [] bce_logits_loss = isinstance( self.criterion_, torch.nn.BCEWithLogitsLoss) for yp in self.forward_iter(X, training=False): yp = yp[0] if isinstance(yp, tuple) else yp if bce_logits_loss: yp = torch.sigmoid(yp) y_probas.append(to_numpy(yp)) y_proba = np.concatenate(y_probas, 0) return y_proba
[ "def", "predict_proba", "(", "self", ",", "X", ")", ":", "y_probas", "=", "[", "]", "bce_logits_loss", "=", "isinstance", "(", "self", ".", "criterion_", ",", "torch", ".", "nn", ".", "BCEWithLogitsLoss", ")", "for", "yp", "in", "self", ".", "forward_iter", "(", "X", ",", "training", "=", "False", ")", ":", "yp", "=", "yp", "[", "0", "]", "if", "isinstance", "(", "yp", ",", "tuple", ")", "else", "yp", "if", "bce_logits_loss", ":", "yp", "=", "torch", ".", "sigmoid", "(", "yp", ")", "y_probas", ".", "append", "(", "to_numpy", "(", "yp", ")", ")", "y_proba", "=", "np", ".", "concatenate", "(", "y_probas", ",", "0", ")", "return", "y_proba" ]
Where applicable, return probability estimates for samples. If the module's forward method returns multiple outputs as a tuple, it is assumed that the first output contains the relevant information and the other values are ignored. If all values are relevant, consider using :func:`~skorch.NeuralNet.forward` instead. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. Returns ------- y_proba : numpy ndarray
[ "Where", "applicable", "return", "probability", "estimates", "for", "samples", "." ]
python
train
saltstack/salt
salt/states/win_path.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_path.py#L66-L234
def exists(name, index=None): ''' Add the directory to the system PATH at index location index Position where the directory should be placed in the PATH. This is 0-indexed, so 0 means to prepend at the very start of the PATH. .. note:: If the index is not specified, and the directory needs to be added to the PATH, then the directory will be appended to the PATH, and this state will not enforce its location within the PATH. Examples: .. code-block:: yaml 'C:\\python27': win_path.exists 'C:\\sysinternals': win_path.exists: - index: 0 'C:\\mystuff': win_path.exists: - index: -1 ''' try: name = os.path.normpath(salt.utils.stringutils.to_unicode(name)) except TypeError: name = six.text_type(name) ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} if index is not None and not isinstance(index, six.integer_types): ret['comment'] = 'Index must be an integer' ret['result'] = False return ret def _get_path_lowercase(): return [x.lower() for x in __salt__['win_path.get_path']()] def _index(path=None): if path is None: path = _get_path_lowercase() try: pos = path.index(name.lower()) except ValueError: return None else: if index is not None and index < 0: # Since a negative index was used, convert the index to a # negative index to make the changes dict easier to read, as # well as making comparisons manageable. return -(len(path) - pos) else: return pos def _changes(old, new): return {'index': {'old': old, 'new': new}} pre_path = _get_path_lowercase() num_dirs = len(pre_path) if index is not None: if index > num_dirs: ret.setdefault('warnings', []).append( 'There are only {0} directories in the PATH, using an index ' 'of {0} instead of {1}.'.format(num_dirs, index) ) index = num_dirs elif index <= -num_dirs: ret.setdefault('warnings', []).append( 'There are only {0} directories in the PATH, using an index ' 'of 0 instead of {1}.'.format(num_dirs, index) ) index = 0 old_index = _index(pre_path) comments = [] if old_index is not None: # Directory exists in PATH if index is None: # We're not enforcing the index, and the directory is in the PATH. # There's nothing to do here. comments.append('{0} already exists in the PATH.'.format(name)) return _format_comments(ret, comments) else: if index == old_index: comments.append( '{0} already exists in the PATH at index {1}.'.format( name, index ) ) return _format_comments(ret, comments) else: if __opts__['test']: ret['result'] = None comments.append( '{0} would be moved from index {1} to {2}.'.format( name, old_index, index ) ) ret['changes'] = _changes(old_index, index) return _format_comments(ret, comments) else: # Directory does not exist in PATH if __opts__['test']: ret['result'] = None comments.append( '{0} would be added to the PATH{1}.'.format( name, ' at index {0}'.format(index) if index is not None else '' ) ) ret['changes'] = _changes(old_index, index) return _format_comments(ret, comments) try: ret['result'] = __salt__['win_path.add'](name, index=index, rehash=False) except Exception as exc: comments.append('Encountered error: {0}.'.format(exc)) ret['result'] = False if ret['result']: ret['result'] = __salt__['win_path.rehash']() if not ret['result']: comments.append( 'Updated registry with new PATH, but failed to rehash.' ) new_index = _index() if ret['result']: # If we have not already determined a False result based on the return # from either win_path.add or win_path.rehash, check the new_index. ret['result'] = new_index is not None \ if index is None \ else index == new_index if index is not None and old_index is not None: comments.append( '{0} {1} from index {2} to {3}.'.format( 'Moved' if ret['result'] else 'Failed to move', name, old_index, index ) ) else: comments.append( '{0} {1} to the PATH{2}.'.format( 'Added' if ret['result'] else 'Failed to add', name, ' at index {0}'.format(index) if index is not None else '' ) ) if old_index != new_index: ret['changes'] = _changes(old_index, new_index) return _format_comments(ret, comments)
[ "def", "exists", "(", "name", ",", "index", "=", "None", ")", ":", "try", ":", "name", "=", "os", ".", "path", ".", "normpath", "(", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "name", ")", ")", "except", "TypeError", ":", "name", "=", "six", ".", "text_type", "(", "name", ")", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "if", "index", "is", "not", "None", "and", "not", "isinstance", "(", "index", ",", "six", ".", "integer_types", ")", ":", "ret", "[", "'comment'", "]", "=", "'Index must be an integer'", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "def", "_get_path_lowercase", "(", ")", ":", "return", "[", "x", ".", "lower", "(", ")", "for", "x", "in", "__salt__", "[", "'win_path.get_path'", "]", "(", ")", "]", "def", "_index", "(", "path", "=", "None", ")", ":", "if", "path", "is", "None", ":", "path", "=", "_get_path_lowercase", "(", ")", "try", ":", "pos", "=", "path", ".", "index", "(", "name", ".", "lower", "(", ")", ")", "except", "ValueError", ":", "return", "None", "else", ":", "if", "index", "is", "not", "None", "and", "index", "<", "0", ":", "# Since a negative index was used, convert the index to a", "# negative index to make the changes dict easier to read, as", "# well as making comparisons manageable.", "return", "-", "(", "len", "(", "path", ")", "-", "pos", ")", "else", ":", "return", "pos", "def", "_changes", "(", "old", ",", "new", ")", ":", "return", "{", "'index'", ":", "{", "'old'", ":", "old", ",", "'new'", ":", "new", "}", "}", "pre_path", "=", "_get_path_lowercase", "(", ")", "num_dirs", "=", "len", "(", "pre_path", ")", "if", "index", "is", "not", "None", ":", "if", "index", ">", "num_dirs", ":", "ret", ".", "setdefault", "(", "'warnings'", ",", "[", "]", ")", ".", "append", "(", "'There are only {0} directories in the PATH, using an index '", "'of {0} instead of {1}.'", ".", "format", "(", "num_dirs", ",", "index", ")", ")", "index", "=", "num_dirs", "elif", "index", "<=", "-", "num_dirs", ":", "ret", ".", "setdefault", "(", "'warnings'", ",", "[", "]", ")", ".", "append", "(", "'There are only {0} directories in the PATH, using an index '", "'of 0 instead of {1}.'", ".", "format", "(", "num_dirs", ",", "index", ")", ")", "index", "=", "0", "old_index", "=", "_index", "(", "pre_path", ")", "comments", "=", "[", "]", "if", "old_index", "is", "not", "None", ":", "# Directory exists in PATH", "if", "index", "is", "None", ":", "# We're not enforcing the index, and the directory is in the PATH.", "# There's nothing to do here.", "comments", ".", "append", "(", "'{0} already exists in the PATH.'", ".", "format", "(", "name", ")", ")", "return", "_format_comments", "(", "ret", ",", "comments", ")", "else", ":", "if", "index", "==", "old_index", ":", "comments", ".", "append", "(", "'{0} already exists in the PATH at index {1}.'", ".", "format", "(", "name", ",", "index", ")", ")", "return", "_format_comments", "(", "ret", ",", "comments", ")", "else", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "comments", ".", "append", "(", "'{0} would be moved from index {1} to {2}.'", ".", "format", "(", "name", ",", "old_index", ",", "index", ")", ")", "ret", "[", "'changes'", "]", "=", "_changes", "(", "old_index", ",", "index", ")", "return", "_format_comments", "(", "ret", ",", "comments", ")", "else", ":", "# Directory does not exist in PATH", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "comments", ".", "append", "(", "'{0} would be added to the PATH{1}.'", ".", "format", "(", "name", ",", "' at index {0}'", ".", "format", "(", "index", ")", "if", "index", "is", "not", "None", "else", "''", ")", ")", "ret", "[", "'changes'", "]", "=", "_changes", "(", "old_index", ",", "index", ")", "return", "_format_comments", "(", "ret", ",", "comments", ")", "try", ":", "ret", "[", "'result'", "]", "=", "__salt__", "[", "'win_path.add'", "]", "(", "name", ",", "index", "=", "index", ",", "rehash", "=", "False", ")", "except", "Exception", "as", "exc", ":", "comments", ".", "append", "(", "'Encountered error: {0}.'", ".", "format", "(", "exc", ")", ")", "ret", "[", "'result'", "]", "=", "False", "if", "ret", "[", "'result'", "]", ":", "ret", "[", "'result'", "]", "=", "__salt__", "[", "'win_path.rehash'", "]", "(", ")", "if", "not", "ret", "[", "'result'", "]", ":", "comments", ".", "append", "(", "'Updated registry with new PATH, but failed to rehash.'", ")", "new_index", "=", "_index", "(", ")", "if", "ret", "[", "'result'", "]", ":", "# If we have not already determined a False result based on the return", "# from either win_path.add or win_path.rehash, check the new_index.", "ret", "[", "'result'", "]", "=", "new_index", "is", "not", "None", "if", "index", "is", "None", "else", "index", "==", "new_index", "if", "index", "is", "not", "None", "and", "old_index", "is", "not", "None", ":", "comments", ".", "append", "(", "'{0} {1} from index {2} to {3}.'", ".", "format", "(", "'Moved'", "if", "ret", "[", "'result'", "]", "else", "'Failed to move'", ",", "name", ",", "old_index", ",", "index", ")", ")", "else", ":", "comments", ".", "append", "(", "'{0} {1} to the PATH{2}.'", ".", "format", "(", "'Added'", "if", "ret", "[", "'result'", "]", "else", "'Failed to add'", ",", "name", ",", "' at index {0}'", ".", "format", "(", "index", ")", "if", "index", "is", "not", "None", "else", "''", ")", ")", "if", "old_index", "!=", "new_index", ":", "ret", "[", "'changes'", "]", "=", "_changes", "(", "old_index", ",", "new_index", ")", "return", "_format_comments", "(", "ret", ",", "comments", ")" ]
Add the directory to the system PATH at index location index Position where the directory should be placed in the PATH. This is 0-indexed, so 0 means to prepend at the very start of the PATH. .. note:: If the index is not specified, and the directory needs to be added to the PATH, then the directory will be appended to the PATH, and this state will not enforce its location within the PATH. Examples: .. code-block:: yaml 'C:\\python27': win_path.exists 'C:\\sysinternals': win_path.exists: - index: 0 'C:\\mystuff': win_path.exists: - index: -1
[ "Add", "the", "directory", "to", "the", "system", "PATH", "at", "index", "location" ]
python
train
ultrabug/uhashring
uhashring/ring.py
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring.py#L140-L168
def _get(self, key, what): """Generic getter magic method. The node with the nearest but not less hash value is returned. :param key: the key to look for. :param what: the information to look for in, allowed values: - instance (default): associated node instance - nodename: node name - pos: index of the given key in the ring - tuple: ketama compatible (pos, name) tuple - weight: node weight """ if not self.runtime._ring: return None pos = self._get_pos(key) if what == 'pos': return pos nodename = self.runtime._ring[self.runtime._keys[pos]] if what in ['hostname', 'instance', 'port', 'weight']: return self.runtime._nodes[nodename][what] elif what == 'dict': return self.runtime._nodes[nodename] elif what == 'nodename': return nodename elif what == 'tuple': return (self.runtime._keys[pos], nodename)
[ "def", "_get", "(", "self", ",", "key", ",", "what", ")", ":", "if", "not", "self", ".", "runtime", ".", "_ring", ":", "return", "None", "pos", "=", "self", ".", "_get_pos", "(", "key", ")", "if", "what", "==", "'pos'", ":", "return", "pos", "nodename", "=", "self", ".", "runtime", ".", "_ring", "[", "self", ".", "runtime", ".", "_keys", "[", "pos", "]", "]", "if", "what", "in", "[", "'hostname'", ",", "'instance'", ",", "'port'", ",", "'weight'", "]", ":", "return", "self", ".", "runtime", ".", "_nodes", "[", "nodename", "]", "[", "what", "]", "elif", "what", "==", "'dict'", ":", "return", "self", ".", "runtime", ".", "_nodes", "[", "nodename", "]", "elif", "what", "==", "'nodename'", ":", "return", "nodename", "elif", "what", "==", "'tuple'", ":", "return", "(", "self", ".", "runtime", ".", "_keys", "[", "pos", "]", ",", "nodename", ")" ]
Generic getter magic method. The node with the nearest but not less hash value is returned. :param key: the key to look for. :param what: the information to look for in, allowed values: - instance (default): associated node instance - nodename: node name - pos: index of the given key in the ring - tuple: ketama compatible (pos, name) tuple - weight: node weight
[ "Generic", "getter", "magic", "method", "." ]
python
train
opendatateam/udata
udata/core/dataset/commands.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/commands.py#L30-L64
def licenses(source=DEFAULT_LICENSE_FILE): '''Feed the licenses from a JSON file''' if source.startswith('http'): json_licenses = requests.get(source).json() else: with open(source) as fp: json_licenses = json.load(fp) if len(json_licenses): log.info('Dropping existing licenses') License.drop_collection() for json_license in json_licenses: flags = [] for field, flag in FLAGS_MAP.items(): if json_license.get(field, False): flags.append(flag) license = License.objects.create( id=json_license['id'], title=json_license['title'], url=json_license['url'] or None, maintainer=json_license['maintainer'] or None, flags=flags, active=json_license.get('active', False), alternate_urls=json_license.get('alternate_urls', []), alternate_titles=json_license.get('alternate_titles', []), ) log.info('Added license "%s"', license.title) try: License.objects.get(id=DEFAULT_LICENSE['id']) except License.DoesNotExist: License.objects.create(**DEFAULT_LICENSE) log.info('Added license "%s"', DEFAULT_LICENSE['title']) success('Done')
[ "def", "licenses", "(", "source", "=", "DEFAULT_LICENSE_FILE", ")", ":", "if", "source", ".", "startswith", "(", "'http'", ")", ":", "json_licenses", "=", "requests", ".", "get", "(", "source", ")", ".", "json", "(", ")", "else", ":", "with", "open", "(", "source", ")", "as", "fp", ":", "json_licenses", "=", "json", ".", "load", "(", "fp", ")", "if", "len", "(", "json_licenses", ")", ":", "log", ".", "info", "(", "'Dropping existing licenses'", ")", "License", ".", "drop_collection", "(", ")", "for", "json_license", "in", "json_licenses", ":", "flags", "=", "[", "]", "for", "field", ",", "flag", "in", "FLAGS_MAP", ".", "items", "(", ")", ":", "if", "json_license", ".", "get", "(", "field", ",", "False", ")", ":", "flags", ".", "append", "(", "flag", ")", "license", "=", "License", ".", "objects", ".", "create", "(", "id", "=", "json_license", "[", "'id'", "]", ",", "title", "=", "json_license", "[", "'title'", "]", ",", "url", "=", "json_license", "[", "'url'", "]", "or", "None", ",", "maintainer", "=", "json_license", "[", "'maintainer'", "]", "or", "None", ",", "flags", "=", "flags", ",", "active", "=", "json_license", ".", "get", "(", "'active'", ",", "False", ")", ",", "alternate_urls", "=", "json_license", ".", "get", "(", "'alternate_urls'", ",", "[", "]", ")", ",", "alternate_titles", "=", "json_license", ".", "get", "(", "'alternate_titles'", ",", "[", "]", ")", ",", ")", "log", ".", "info", "(", "'Added license \"%s\"'", ",", "license", ".", "title", ")", "try", ":", "License", ".", "objects", ".", "get", "(", "id", "=", "DEFAULT_LICENSE", "[", "'id'", "]", ")", "except", "License", ".", "DoesNotExist", ":", "License", ".", "objects", ".", "create", "(", "*", "*", "DEFAULT_LICENSE", ")", "log", ".", "info", "(", "'Added license \"%s\"'", ",", "DEFAULT_LICENSE", "[", "'title'", "]", ")", "success", "(", "'Done'", ")" ]
Feed the licenses from a JSON file
[ "Feed", "the", "licenses", "from", "a", "JSON", "file" ]
python
train
titusjan/argos
argos/utils/cls.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/utils/cls.py#L373-L387
def check_class(obj, target_class, allow_none = False): """ Checks that the obj is a (sub)type of target_class. Raises a TypeError if this is not the case. :param obj: object whos type is to be checked :type obj: any type :param target_class: target type/class :type target_class: any class or type :param allow_none: if true obj may be None :type allow_none: boolean """ if not isinstance(obj, target_class): if not (allow_none and obj is None): raise TypeError("obj must be a of type {}, got: {}" .format(target_class, type(obj)))
[ "def", "check_class", "(", "obj", ",", "target_class", ",", "allow_none", "=", "False", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "target_class", ")", ":", "if", "not", "(", "allow_none", "and", "obj", "is", "None", ")", ":", "raise", "TypeError", "(", "\"obj must be a of type {}, got: {}\"", ".", "format", "(", "target_class", ",", "type", "(", "obj", ")", ")", ")" ]
Checks that the obj is a (sub)type of target_class. Raises a TypeError if this is not the case. :param obj: object whos type is to be checked :type obj: any type :param target_class: target type/class :type target_class: any class or type :param allow_none: if true obj may be None :type allow_none: boolean
[ "Checks", "that", "the", "obj", "is", "a", "(", "sub", ")", "type", "of", "target_class", ".", "Raises", "a", "TypeError", "if", "this", "is", "not", "the", "case", "." ]
python
train
shreyaspotnis/rampage
rampage/ramps.py
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L270-L277
def del_unused_keyframes(self): """Scans through list of keyframes in the channel and removes those which are not in self.key_frame_list.""" skl = self.key_frame_list.sorted_key_list() unused_keys = [k for k in self.dct['keys'] if k not in skl] for k in unused_keys: del self.dct['keys'][k]
[ "def", "del_unused_keyframes", "(", "self", ")", ":", "skl", "=", "self", ".", "key_frame_list", ".", "sorted_key_list", "(", ")", "unused_keys", "=", "[", "k", "for", "k", "in", "self", ".", "dct", "[", "'keys'", "]", "if", "k", "not", "in", "skl", "]", "for", "k", "in", "unused_keys", ":", "del", "self", ".", "dct", "[", "'keys'", "]", "[", "k", "]" ]
Scans through list of keyframes in the channel and removes those which are not in self.key_frame_list.
[ "Scans", "through", "list", "of", "keyframes", "in", "the", "channel", "and", "removes", "those", "which", "are", "not", "in", "self", ".", "key_frame_list", "." ]
python
train
kytos/kytos-utils
kytos/utils/napps.py
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/napps.py#L325-L337
def _extract(filename): """Extract package to a temporary folder. Return: pathlib.Path: Temp dir with package contents. """ random_string = '{:0d}'.format(randint(0, 10**6)) tmp = '/tmp/kytos-napp-' + Path(filename).stem + '-' + random_string os.mkdir(tmp) with tarfile.open(filename, 'r:xz') as tar: tar.extractall(tmp) return Path(tmp)
[ "def", "_extract", "(", "filename", ")", ":", "random_string", "=", "'{:0d}'", ".", "format", "(", "randint", "(", "0", ",", "10", "**", "6", ")", ")", "tmp", "=", "'/tmp/kytos-napp-'", "+", "Path", "(", "filename", ")", ".", "stem", "+", "'-'", "+", "random_string", "os", ".", "mkdir", "(", "tmp", ")", "with", "tarfile", ".", "open", "(", "filename", ",", "'r:xz'", ")", "as", "tar", ":", "tar", ".", "extractall", "(", "tmp", ")", "return", "Path", "(", "tmp", ")" ]
Extract package to a temporary folder. Return: pathlib.Path: Temp dir with package contents.
[ "Extract", "package", "to", "a", "temporary", "folder", "." ]
python
train
tensorpack/tensorpack
tensorpack/utils/concurrency.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/concurrency.py#L208-L219
def mask_sigint(): """ Returns: If called in main thread, returns a context where ``SIGINT`` is ignored, and yield True. Otherwise yield False. """ if is_main_thread(): sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) yield True signal.signal(signal.SIGINT, sigint_handler) else: yield False
[ "def", "mask_sigint", "(", ")", ":", "if", "is_main_thread", "(", ")", ":", "sigint_handler", "=", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_IGN", ")", "yield", "True", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "sigint_handler", ")", "else", ":", "yield", "False" ]
Returns: If called in main thread, returns a context where ``SIGINT`` is ignored, and yield True. Otherwise yield False.
[ "Returns", ":", "If", "called", "in", "main", "thread", "returns", "a", "context", "where", "SIGINT", "is", "ignored", "and", "yield", "True", ".", "Otherwise", "yield", "False", "." ]
python
train
davenquinn/Attitude
attitude/error/axes.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/error/axes.py#L67-L77
def angular_errors(hyp_axes): """ Minimum and maximum angular errors corresponding to 1st and 2nd axes of PCA distribution. Ordered as [minimum, maximum] angular error. """ # Not quite sure why this is sqrt but it is empirically correct ax = N.sqrt(hyp_axes) return tuple(N.arctan2(ax[-1],ax[:-1]))
[ "def", "angular_errors", "(", "hyp_axes", ")", ":", "# Not quite sure why this is sqrt but it is empirically correct", "ax", "=", "N", ".", "sqrt", "(", "hyp_axes", ")", "return", "tuple", "(", "N", ".", "arctan2", "(", "ax", "[", "-", "1", "]", ",", "ax", "[", ":", "-", "1", "]", ")", ")" ]
Minimum and maximum angular errors corresponding to 1st and 2nd axes of PCA distribution. Ordered as [minimum, maximum] angular error.
[ "Minimum", "and", "maximum", "angular", "errors", "corresponding", "to", "1st", "and", "2nd", "axes", "of", "PCA", "distribution", "." ]
python
train
boriel/zxbasic
zxbparser.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L468-L515
def p_start(p): """ start : program """ global ast, data_ast user_data = make_label('.ZXBASIC_USER_DATA', 0) make_label('.ZXBASIC_USER_DATA_LEN', 0) if PRINT_IS_USED: zxbpp.ID_TABLE.define('___PRINT_IS_USED___', 1) # zxbasmpp.ID_TABLE.define('___PRINT_IS_USED___', 1) if zxblex.IN_STATE: p.type = 'NEWLINE' p_error(p) sys.exit(1) ast = p[0] = p[1] __end = make_sentence('END', make_number(0, lineno=p.lexer.lineno)) if not is_null(ast): ast.appendChild(__end) else: ast = __end SYMBOL_TABLE.check_labels() SYMBOL_TABLE.check_classes() if gl.has_errors: return __DEBUG__('Checking pending labels', 1) if not api.check.check_pending_labels(ast): return __DEBUG__('Checking pending calls', 1) if not api.check.check_pending_calls(): return data_ast = make_sentence('BLOCK', user_data) # Appends variable declarations at the end. for var in SYMBOL_TABLE.vars_: data_ast.appendChild(make_var_declaration(var)) # Appends arrays declarations at the end. for var in SYMBOL_TABLE.arrays: data_ast.appendChild(make_array_declaration(var))
[ "def", "p_start", "(", "p", ")", ":", "global", "ast", ",", "data_ast", "user_data", "=", "make_label", "(", "'.ZXBASIC_USER_DATA'", ",", "0", ")", "make_label", "(", "'.ZXBASIC_USER_DATA_LEN'", ",", "0", ")", "if", "PRINT_IS_USED", ":", "zxbpp", ".", "ID_TABLE", ".", "define", "(", "'___PRINT_IS_USED___'", ",", "1", ")", "# zxbasmpp.ID_TABLE.define('___PRINT_IS_USED___', 1)", "if", "zxblex", ".", "IN_STATE", ":", "p", ".", "type", "=", "'NEWLINE'", "p_error", "(", "p", ")", "sys", ".", "exit", "(", "1", ")", "ast", "=", "p", "[", "0", "]", "=", "p", "[", "1", "]", "__end", "=", "make_sentence", "(", "'END'", ",", "make_number", "(", "0", ",", "lineno", "=", "p", ".", "lexer", ".", "lineno", ")", ")", "if", "not", "is_null", "(", "ast", ")", ":", "ast", ".", "appendChild", "(", "__end", ")", "else", ":", "ast", "=", "__end", "SYMBOL_TABLE", ".", "check_labels", "(", ")", "SYMBOL_TABLE", ".", "check_classes", "(", ")", "if", "gl", ".", "has_errors", ":", "return", "__DEBUG__", "(", "'Checking pending labels'", ",", "1", ")", "if", "not", "api", ".", "check", ".", "check_pending_labels", "(", "ast", ")", ":", "return", "__DEBUG__", "(", "'Checking pending calls'", ",", "1", ")", "if", "not", "api", ".", "check", ".", "check_pending_calls", "(", ")", ":", "return", "data_ast", "=", "make_sentence", "(", "'BLOCK'", ",", "user_data", ")", "# Appends variable declarations at the end.", "for", "var", "in", "SYMBOL_TABLE", ".", "vars_", ":", "data_ast", ".", "appendChild", "(", "make_var_declaration", "(", "var", ")", ")", "# Appends arrays declarations at the end.", "for", "var", "in", "SYMBOL_TABLE", ".", "arrays", ":", "data_ast", ".", "appendChild", "(", "make_array_declaration", "(", "var", ")", ")" ]
start : program
[ "start", ":", "program" ]
python
train
ouroboroscoding/format-oc-python
FormatOC/__init__.py
https://github.com/ouroboroscoding/format-oc-python/blob/c160b46fe4ff2c92333c776991c712de23991225/FormatOC/__init__.py#L836-L859
def toDict(self): """To Dict Returns the Hashed Node as a dictionary in the same format as is used in constructing it Returns: dict """ # Init the dictionary we will return dRet = {} # Add the hash key dRet['__hash__'] = self._key.toDict() # Get the parents dict and add it to the return dRet.update(super(HashNode,self).toDict()) # Get the nodes dict and also add it to the return dRet.update(self._node.toDict()) # Return return dRet
[ "def", "toDict", "(", "self", ")", ":", "# Init the dictionary we will return", "dRet", "=", "{", "}", "# Add the hash key", "dRet", "[", "'__hash__'", "]", "=", "self", ".", "_key", ".", "toDict", "(", ")", "# Get the parents dict and add it to the return", "dRet", ".", "update", "(", "super", "(", "HashNode", ",", "self", ")", ".", "toDict", "(", ")", ")", "# Get the nodes dict and also add it to the return", "dRet", ".", "update", "(", "self", ".", "_node", ".", "toDict", "(", ")", ")", "# Return", "return", "dRet" ]
To Dict Returns the Hashed Node as a dictionary in the same format as is used in constructing it Returns: dict
[ "To", "Dict" ]
python
train
edx/edx-drf-extensions
edx_rest_framework_extensions/auth/bearer/authentication.py
https://github.com/edx/edx-drf-extensions/blob/2f4c1682b8471bf894ea566a43fd9f91ba219f83/edx_rest_framework_extensions/auth/bearer/authentication.py#L85-L115
def get_user_info(self, token): """ Retrieves the user info from the OAuth provider. Arguments: token (str): OAuth2 access token. Returns: dict Raises: UserInfoRetrievalFailed: Retrieval of user info from the remote server failed. """ url = self.get_user_info_url() try: headers = {'Authorization': 'Bearer {}'.format(token)} response = requests.get(url, headers=headers) except requests.RequestException: logger.exception('Failed to retrieve user info due to a request exception.') raise UserInfoRetrievalFailed if response.status_code == 200: return self.process_user_info_response(response.json()) else: msg = 'Failed to retrieve user info. Server [{server}] responded with status [{status}].'.format( server=url, status=response.status_code ) raise UserInfoRetrievalFailed(msg)
[ "def", "get_user_info", "(", "self", ",", "token", ")", ":", "url", "=", "self", ".", "get_user_info_url", "(", ")", "try", ":", "headers", "=", "{", "'Authorization'", ":", "'Bearer {}'", ".", "format", "(", "token", ")", "}", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "except", "requests", ".", "RequestException", ":", "logger", ".", "exception", "(", "'Failed to retrieve user info due to a request exception.'", ")", "raise", "UserInfoRetrievalFailed", "if", "response", ".", "status_code", "==", "200", ":", "return", "self", ".", "process_user_info_response", "(", "response", ".", "json", "(", ")", ")", "else", ":", "msg", "=", "'Failed to retrieve user info. Server [{server}] responded with status [{status}].'", ".", "format", "(", "server", "=", "url", ",", "status", "=", "response", ".", "status_code", ")", "raise", "UserInfoRetrievalFailed", "(", "msg", ")" ]
Retrieves the user info from the OAuth provider. Arguments: token (str): OAuth2 access token. Returns: dict Raises: UserInfoRetrievalFailed: Retrieval of user info from the remote server failed.
[ "Retrieves", "the", "user", "info", "from", "the", "OAuth", "provider", "." ]
python
train
AshleySetter/optoanalysis
optoanalysis/optoanalysis/optoanalysis.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L2881-L2924
def IIR_filter_design(CentralFreq, bandwidth, transitionWidth, SampleFreq, GainStop=40, GainPass=0.01): """ Function to calculate the coefficients of an IIR filter, IMPORTANT NOTE: make_butterworth_bandpass_b_a and make_butterworth_b_a can produce IIR filters with higher sample rates and are prefereable due to this. Parameters ---------- CentralFreq : float Central frequency of the IIR filter to be designed bandwidth : float The width of the passband to be created about the central frequency transitionWidth : float The width of the transition band between the pass-band and stop-band SampleFreq : float The sample frequency (rate) of the data to be filtered GainStop : float, optional The dB of attenuation within the stopband (i.e. outside the passband) GainPass : float, optional The dB attenuation inside the passband (ideally close to 0 for a bandpass filter) Returns ------- b : ndarray coefficients multiplying the current and past inputs (feedforward coefficients) a : ndarray coefficients multiplying the past outputs (feedback coefficients) """ NyquistFreq = SampleFreq / 2 if (CentralFreq + bandwidth / 2 + transitionWidth > NyquistFreq): raise ValueError( "Need a higher Sample Frequency for this Central Freq, Bandwidth and transition Width") CentralFreqNormed = CentralFreq / NyquistFreq bandwidthNormed = bandwidth / NyquistFreq transitionWidthNormed = transitionWidth / NyquistFreq bandpass = [CentralFreqNormed - bandwidthNormed / 2, CentralFreqNormed + bandwidthNormed / 2] bandstop = [CentralFreqNormed - bandwidthNormed / 2 - transitionWidthNormed, CentralFreqNormed + bandwidthNormed / 2 + transitionWidthNormed] print(bandpass, bandstop) b, a = scipy.signal.iirdesign(bandpass, bandstop, GainPass, GainStop) return b, a
[ "def", "IIR_filter_design", "(", "CentralFreq", ",", "bandwidth", ",", "transitionWidth", ",", "SampleFreq", ",", "GainStop", "=", "40", ",", "GainPass", "=", "0.01", ")", ":", "NyquistFreq", "=", "SampleFreq", "/", "2", "if", "(", "CentralFreq", "+", "bandwidth", "/", "2", "+", "transitionWidth", ">", "NyquistFreq", ")", ":", "raise", "ValueError", "(", "\"Need a higher Sample Frequency for this Central Freq, Bandwidth and transition Width\"", ")", "CentralFreqNormed", "=", "CentralFreq", "/", "NyquistFreq", "bandwidthNormed", "=", "bandwidth", "/", "NyquistFreq", "transitionWidthNormed", "=", "transitionWidth", "/", "NyquistFreq", "bandpass", "=", "[", "CentralFreqNormed", "-", "bandwidthNormed", "/", "2", ",", "CentralFreqNormed", "+", "bandwidthNormed", "/", "2", "]", "bandstop", "=", "[", "CentralFreqNormed", "-", "bandwidthNormed", "/", "2", "-", "transitionWidthNormed", ",", "CentralFreqNormed", "+", "bandwidthNormed", "/", "2", "+", "transitionWidthNormed", "]", "print", "(", "bandpass", ",", "bandstop", ")", "b", ",", "a", "=", "scipy", ".", "signal", ".", "iirdesign", "(", "bandpass", ",", "bandstop", ",", "GainPass", ",", "GainStop", ")", "return", "b", ",", "a" ]
Function to calculate the coefficients of an IIR filter, IMPORTANT NOTE: make_butterworth_bandpass_b_a and make_butterworth_b_a can produce IIR filters with higher sample rates and are prefereable due to this. Parameters ---------- CentralFreq : float Central frequency of the IIR filter to be designed bandwidth : float The width of the passband to be created about the central frequency transitionWidth : float The width of the transition band between the pass-band and stop-band SampleFreq : float The sample frequency (rate) of the data to be filtered GainStop : float, optional The dB of attenuation within the stopband (i.e. outside the passband) GainPass : float, optional The dB attenuation inside the passband (ideally close to 0 for a bandpass filter) Returns ------- b : ndarray coefficients multiplying the current and past inputs (feedforward coefficients) a : ndarray coefficients multiplying the past outputs (feedback coefficients)
[ "Function", "to", "calculate", "the", "coefficients", "of", "an", "IIR", "filter", "IMPORTANT", "NOTE", ":", "make_butterworth_bandpass_b_a", "and", "make_butterworth_b_a", "can", "produce", "IIR", "filters", "with", "higher", "sample", "rates", "and", "are", "prefereable", "due", "to", "this", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/local_env.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/local_env.py#L2042-L2109
def get_q2(self, thetas=None, phis=None): """ Calculates the value of the bond orientational order parameter of weight l=2. If the function is called with non-empty lists of polar and azimuthal angles the corresponding trigonometric terms are computed afresh. Otherwise, it is expected that the compute_trigonometric_terms function has been just called. Args: thetas ([float]): polar angles of all neighbors in radians. phis ([float]): azimuth angles of all neighbors in radians. Returns: float: bond orientational order parameter of weight l=2 corresponding to the input angles thetas and phis. """ if thetas is not None and phis is not None: self.compute_trigonometric_terms(thetas, phis) nnn = len(self._pow_sin_t[1]) nnn_range = range(nnn) sqrt_15_2pi = sqrt(15.0 / (2.0 * pi)) sqrt_5_pi = sqrt(5.0 / pi) pre_y_2_2 = [0.25 * sqrt_15_2pi * val for val in self._pow_sin_t[2]] pre_y_2_1 = [0.5 * sqrt_15_2pi * val[0] * val[1] for val in zip(self._pow_sin_t[1], self._pow_cos_t[1])] acc = 0.0 # Y_2_-2 real = imag = 0.0 for i in nnn_range: real += pre_y_2_2[i] * self._cos_n_p[2][i] imag -= pre_y_2_2[i] * self._sin_n_p[2][i] acc += (real * real + imag * imag) # Y_2_-1 real = imag = 0.0 for i in nnn_range: real += pre_y_2_1[i] * self._cos_n_p[1][i] imag -= pre_y_2_1[i] * self._sin_n_p[1][i] acc += (real * real + imag * imag) # Y_2_0 real = imag = 0.0 for i in nnn_range: real += 0.25 * sqrt_5_pi * (3.0 * self._pow_cos_t[2][i] - 1.0) acc += (real * real) # Y_2_1 real = imag = 0.0 for i in nnn_range: real -= pre_y_2_1[i] * self._cos_n_p[1][i] imag -= pre_y_2_1[i] * self._sin_n_p[1][i] acc += (real * real + imag * imag) # Y_2_2 real = imag = 0.0 for i in nnn_range: real += pre_y_2_2[i] * self._cos_n_p[2][i] imag += pre_y_2_2[i] * self._sin_n_p[2][i] acc += (real * real + imag * imag) q2 = sqrt(4.0 * pi * acc / (5.0 * float(nnn * nnn))) return q2
[ "def", "get_q2", "(", "self", ",", "thetas", "=", "None", ",", "phis", "=", "None", ")", ":", "if", "thetas", "is", "not", "None", "and", "phis", "is", "not", "None", ":", "self", ".", "compute_trigonometric_terms", "(", "thetas", ",", "phis", ")", "nnn", "=", "len", "(", "self", ".", "_pow_sin_t", "[", "1", "]", ")", "nnn_range", "=", "range", "(", "nnn", ")", "sqrt_15_2pi", "=", "sqrt", "(", "15.0", "/", "(", "2.0", "*", "pi", ")", ")", "sqrt_5_pi", "=", "sqrt", "(", "5.0", "/", "pi", ")", "pre_y_2_2", "=", "[", "0.25", "*", "sqrt_15_2pi", "*", "val", "for", "val", "in", "self", ".", "_pow_sin_t", "[", "2", "]", "]", "pre_y_2_1", "=", "[", "0.5", "*", "sqrt_15_2pi", "*", "val", "[", "0", "]", "*", "val", "[", "1", "]", "for", "val", "in", "zip", "(", "self", ".", "_pow_sin_t", "[", "1", "]", ",", "self", ".", "_pow_cos_t", "[", "1", "]", ")", "]", "acc", "=", "0.0", "# Y_2_-2", "real", "=", "imag", "=", "0.0", "for", "i", "in", "nnn_range", ":", "real", "+=", "pre_y_2_2", "[", "i", "]", "*", "self", ".", "_cos_n_p", "[", "2", "]", "[", "i", "]", "imag", "-=", "pre_y_2_2", "[", "i", "]", "*", "self", ".", "_sin_n_p", "[", "2", "]", "[", "i", "]", "acc", "+=", "(", "real", "*", "real", "+", "imag", "*", "imag", ")", "# Y_2_-1", "real", "=", "imag", "=", "0.0", "for", "i", "in", "nnn_range", ":", "real", "+=", "pre_y_2_1", "[", "i", "]", "*", "self", ".", "_cos_n_p", "[", "1", "]", "[", "i", "]", "imag", "-=", "pre_y_2_1", "[", "i", "]", "*", "self", ".", "_sin_n_p", "[", "1", "]", "[", "i", "]", "acc", "+=", "(", "real", "*", "real", "+", "imag", "*", "imag", ")", "# Y_2_0", "real", "=", "imag", "=", "0.0", "for", "i", "in", "nnn_range", ":", "real", "+=", "0.25", "*", "sqrt_5_pi", "*", "(", "3.0", "*", "self", ".", "_pow_cos_t", "[", "2", "]", "[", "i", "]", "-", "1.0", ")", "acc", "+=", "(", "real", "*", "real", ")", "# Y_2_1", "real", "=", "imag", "=", "0.0", "for", "i", "in", "nnn_range", ":", "real", "-=", "pre_y_2_1", "[", "i", "]", "*", "self", ".", "_cos_n_p", "[", "1", "]", "[", "i", "]", "imag", "-=", "pre_y_2_1", "[", "i", "]", "*", "self", ".", "_sin_n_p", "[", "1", "]", "[", "i", "]", "acc", "+=", "(", "real", "*", "real", "+", "imag", "*", "imag", ")", "# Y_2_2", "real", "=", "imag", "=", "0.0", "for", "i", "in", "nnn_range", ":", "real", "+=", "pre_y_2_2", "[", "i", "]", "*", "self", ".", "_cos_n_p", "[", "2", "]", "[", "i", "]", "imag", "+=", "pre_y_2_2", "[", "i", "]", "*", "self", ".", "_sin_n_p", "[", "2", "]", "[", "i", "]", "acc", "+=", "(", "real", "*", "real", "+", "imag", "*", "imag", ")", "q2", "=", "sqrt", "(", "4.0", "*", "pi", "*", "acc", "/", "(", "5.0", "*", "float", "(", "nnn", "*", "nnn", ")", ")", ")", "return", "q2" ]
Calculates the value of the bond orientational order parameter of weight l=2. If the function is called with non-empty lists of polar and azimuthal angles the corresponding trigonometric terms are computed afresh. Otherwise, it is expected that the compute_trigonometric_terms function has been just called. Args: thetas ([float]): polar angles of all neighbors in radians. phis ([float]): azimuth angles of all neighbors in radians. Returns: float: bond orientational order parameter of weight l=2 corresponding to the input angles thetas and phis.
[ "Calculates", "the", "value", "of", "the", "bond", "orientational", "order", "parameter", "of", "weight", "l", "=", "2", ".", "If", "the", "function", "is", "called", "with", "non", "-", "empty", "lists", "of", "polar", "and", "azimuthal", "angles", "the", "corresponding", "trigonometric", "terms", "are", "computed", "afresh", ".", "Otherwise", "it", "is", "expected", "that", "the", "compute_trigonometric_terms", "function", "has", "been", "just", "called", "." ]
python
train
onnx/onnxmltools
onnxmltools/utils/main.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/utils/main.py#L78-L96
def set_model_version(model, version): """ Sets the version of the ONNX model. :param model: instance of an ONNX model :param version: integer containing the version of the model Example: :: from onnxmltools.utils import set_model_version onnx_model = load_model("SqueezeNet.onnx") set_model_version(onnx_model, 1) """ if model is None or not isinstance(model, onnx_proto.ModelProto): raise ValueError("Model is not a valid ONNX model.") if not convert_utils.is_numeric_type(version): raise ValueError("Version must be a numeric type.") model.model_version = version
[ "def", "set_model_version", "(", "model", ",", "version", ")", ":", "if", "model", "is", "None", "or", "not", "isinstance", "(", "model", ",", "onnx_proto", ".", "ModelProto", ")", ":", "raise", "ValueError", "(", "\"Model is not a valid ONNX model.\"", ")", "if", "not", "convert_utils", ".", "is_numeric_type", "(", "version", ")", ":", "raise", "ValueError", "(", "\"Version must be a numeric type.\"", ")", "model", ".", "model_version", "=", "version" ]
Sets the version of the ONNX model. :param model: instance of an ONNX model :param version: integer containing the version of the model Example: :: from onnxmltools.utils import set_model_version onnx_model = load_model("SqueezeNet.onnx") set_model_version(onnx_model, 1)
[ "Sets", "the", "version", "of", "the", "ONNX", "model", "." ]
python
train
saltstack/salt-pylint
saltpylint/strings.py
https://github.com/saltstack/salt-pylint/blob/524a419d3bfc7dbd91c9c85040bc64935a275b24/saltpylint/strings.py#L247-L258
def process_non_raw_string_token(self, prefix, string_body, start_row): ''' check for bad escapes in a non-raw string. prefix: lowercase string of eg 'ur' string prefix markers. string_body: the un-parsed body of the string, not including the quote marks. start_row: integer line number in the source. ''' if 'u' in prefix: if string_body.find('\\0') != -1: self.add_message('null-byte-unicode-literal', line=start_row)
[ "def", "process_non_raw_string_token", "(", "self", ",", "prefix", ",", "string_body", ",", "start_row", ")", ":", "if", "'u'", "in", "prefix", ":", "if", "string_body", ".", "find", "(", "'\\\\0'", ")", "!=", "-", "1", ":", "self", ".", "add_message", "(", "'null-byte-unicode-literal'", ",", "line", "=", "start_row", ")" ]
check for bad escapes in a non-raw string. prefix: lowercase string of eg 'ur' string prefix markers. string_body: the un-parsed body of the string, not including the quote marks. start_row: integer line number in the source.
[ "check", "for", "bad", "escapes", "in", "a", "non", "-", "raw", "string", "." ]
python
train
kislyuk/aegea
aegea/packages/github3/repos/repo.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/repos/repo.py#L314-L338
def archive(self, format, path='', ref='master'): """Get the tarball or zipball archive for this repo at ref. See: http://developer.github.com/v3/repos/contents/#get-archive-link :param str format: (required), accepted values: ('tarball', 'zipball') :param path: (optional), path where the file should be saved to, default is the filename provided in the headers and will be written in the current directory. it can take a file-like object as well :type path: str, file :param str ref: (optional) :returns: bool -- True if successful, False otherwise """ resp = None if format in ('tarball', 'zipball'): url = self._build_url(format, ref, base_url=self._api) resp = self._get(url, allow_redirects=True, stream=True) if resp and self._boolean(resp, 200, 404): stream_response_to_file(resp, path) return True return False
[ "def", "archive", "(", "self", ",", "format", ",", "path", "=", "''", ",", "ref", "=", "'master'", ")", ":", "resp", "=", "None", "if", "format", "in", "(", "'tarball'", ",", "'zipball'", ")", ":", "url", "=", "self", ".", "_build_url", "(", "format", ",", "ref", ",", "base_url", "=", "self", ".", "_api", ")", "resp", "=", "self", ".", "_get", "(", "url", ",", "allow_redirects", "=", "True", ",", "stream", "=", "True", ")", "if", "resp", "and", "self", ".", "_boolean", "(", "resp", ",", "200", ",", "404", ")", ":", "stream_response_to_file", "(", "resp", ",", "path", ")", "return", "True", "return", "False" ]
Get the tarball or zipball archive for this repo at ref. See: http://developer.github.com/v3/repos/contents/#get-archive-link :param str format: (required), accepted values: ('tarball', 'zipball') :param path: (optional), path where the file should be saved to, default is the filename provided in the headers and will be written in the current directory. it can take a file-like object as well :type path: str, file :param str ref: (optional) :returns: bool -- True if successful, False otherwise
[ "Get", "the", "tarball", "or", "zipball", "archive", "for", "this", "repo", "at", "ref", "." ]
python
train
CalebBell/fpi
fpi/saltation.py
https://github.com/CalebBell/fpi/blob/6e6da3b9d0c17e10cc0886c97bc1bb8aeba2cca5/fpi/saltation.py#L455-L518
def Geldart_Ling(mp, rhog, D, mug): r'''Calculates saltation velocity of the gas for pneumatic conveying, according to [1]_ as described in [2]_ and [3]_. if Gs/D < 47000, use equation 1, otherwise use equation 2. .. math:: V_{salt} = 1.5G_s^{0.465}D^{-0.01} \mu^{0.055}\rho_f^{-0.42} V_{salt} = 8.7G_s^{0.302}D^{0.153} \mu^{0.055}\rho_f^{-0.42} Fr_s = 15\mu^{0.25}\left(\frac{d_p}{D}\right)^{0.1} Fr_s = \frac{V_{salt}}{\sqrt{gD}} \mu = \frac{m_p}{\frac{\pi}{4}D^2V \rho_f} G_s = \frac{m_p}{A} Parameters ---------- mp : float Solid mass flow rate, [kg/s] rhog : float Gas density, [kg/m^3] D : float Diameter of pipe, [m] mug : float Gas viscosity, [Pa*S] Returns ------- V : float Saltation velocity of gas, [m/s] Notes ----- Model is rearanged to be explicit in terms of saltation velocity internally. Examples -------- >>> Geldart_Ling(1., 1.2, 0.1, 2E-5) 7.467495862402707 References ---------- .. [1] Weber, M. 1981. Principles of hydraulic and pneumatic conveying in pipes. Bulk Solids Handling 1: 57-63. .. [2] Rabinovich, Evgeny, and Haim Kalman. "Threshold Velocities of Particle-Fluid Flows in Horizontal Pipes and Ducts: Literature Review." Reviews in Chemical Engineering 27, no. 5-6 (January 1, 2011). doi:10.1515/REVCE.2011.011. .. [3] Gomes, L. M., and A. L. Amarante Mesquita. "On the Prediction of Pickup and Saltation Velocities in Pneumatic Conveying." Brazilian Journal of Chemical Engineering 31, no. 1 (March 2014): 35-46. doi:10.1590/S0104-66322014000100005 ''' Gs = mp/(pi/4*D**2) if Gs/D <= 47000: V = 1.5*Gs**0.465*D**-0.01*mug**0.055*rhog**-0.42 else: V = 8.7*Gs**0.302*D**0.153*mug**0.055*rhog**-0.42 return V
[ "def", "Geldart_Ling", "(", "mp", ",", "rhog", ",", "D", ",", "mug", ")", ":", "Gs", "=", "mp", "/", "(", "pi", "/", "4", "*", "D", "**", "2", ")", "if", "Gs", "/", "D", "<=", "47000", ":", "V", "=", "1.5", "*", "Gs", "**", "0.465", "*", "D", "**", "-", "0.01", "*", "mug", "**", "0.055", "*", "rhog", "**", "-", "0.42", "else", ":", "V", "=", "8.7", "*", "Gs", "**", "0.302", "*", "D", "**", "0.153", "*", "mug", "**", "0.055", "*", "rhog", "**", "-", "0.42", "return", "V" ]
r'''Calculates saltation velocity of the gas for pneumatic conveying, according to [1]_ as described in [2]_ and [3]_. if Gs/D < 47000, use equation 1, otherwise use equation 2. .. math:: V_{salt} = 1.5G_s^{0.465}D^{-0.01} \mu^{0.055}\rho_f^{-0.42} V_{salt} = 8.7G_s^{0.302}D^{0.153} \mu^{0.055}\rho_f^{-0.42} Fr_s = 15\mu^{0.25}\left(\frac{d_p}{D}\right)^{0.1} Fr_s = \frac{V_{salt}}{\sqrt{gD}} \mu = \frac{m_p}{\frac{\pi}{4}D^2V \rho_f} G_s = \frac{m_p}{A} Parameters ---------- mp : float Solid mass flow rate, [kg/s] rhog : float Gas density, [kg/m^3] D : float Diameter of pipe, [m] mug : float Gas viscosity, [Pa*S] Returns ------- V : float Saltation velocity of gas, [m/s] Notes ----- Model is rearanged to be explicit in terms of saltation velocity internally. Examples -------- >>> Geldart_Ling(1., 1.2, 0.1, 2E-5) 7.467495862402707 References ---------- .. [1] Weber, M. 1981. Principles of hydraulic and pneumatic conveying in pipes. Bulk Solids Handling 1: 57-63. .. [2] Rabinovich, Evgeny, and Haim Kalman. "Threshold Velocities of Particle-Fluid Flows in Horizontal Pipes and Ducts: Literature Review." Reviews in Chemical Engineering 27, no. 5-6 (January 1, 2011). doi:10.1515/REVCE.2011.011. .. [3] Gomes, L. M., and A. L. Amarante Mesquita. "On the Prediction of Pickup and Saltation Velocities in Pneumatic Conveying." Brazilian Journal of Chemical Engineering 31, no. 1 (March 2014): 35-46. doi:10.1590/S0104-66322014000100005
[ "r", "Calculates", "saltation", "velocity", "of", "the", "gas", "for", "pneumatic", "conveying", "according", "to", "[", "1", "]", "_", "as", "described", "in", "[", "2", "]", "_", "and", "[", "3", "]", "_", "." ]
python
train
jrigden/pyPodcastParser
pyPodcastParser/Podcast.py
https://github.com/jrigden/pyPodcastParser/blob/b21e027bb56ec77986d76fc1990f4e420c6de869/pyPodcastParser/Podcast.py#L315-L321
def set_itunes_complete(self): """Parses complete from itunes tags and sets value""" try: self.itunes_complete = self.soup.find('itunes:complete').string self.itunes_complete = self.itunes_complete.lower() except AttributeError: self.itunes_complete = None
[ "def", "set_itunes_complete", "(", "self", ")", ":", "try", ":", "self", ".", "itunes_complete", "=", "self", ".", "soup", ".", "find", "(", "'itunes:complete'", ")", ".", "string", "self", ".", "itunes_complete", "=", "self", ".", "itunes_complete", ".", "lower", "(", ")", "except", "AttributeError", ":", "self", ".", "itunes_complete", "=", "None" ]
Parses complete from itunes tags and sets value
[ "Parses", "complete", "from", "itunes", "tags", "and", "sets", "value" ]
python
train
pantsbuild/pants
src/python/pants/init/extension_loader.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/init/extension_loader.py#L40-L96
def load_plugins(build_configuration, plugins, working_set): """Load named plugins from the current working_set into the supplied build_configuration "Loading" a plugin here refers to calling registration methods -- it is assumed each plugin is already on the path and an error will be thrown if it is not. Plugins should define their entrypoints in the `pantsbuild.plugin` group when configuring their distribution. Like source backends, the `build_file_aliases`, `global_subsystems` and `register_goals` methods are called if those entry points are defined. * Plugins are loaded in the order they are provided. * This is important as loading can add, remove or replace existing tasks installed by other plugins. If a plugin needs to assert that another plugin is registered before it, it can define an entrypoint "load_after" which can return a list of plugins which must have been loaded before it can be loaded. This does not change the order or what plugins are loaded in any way -- it is purely an assertion to guard against misconfiguration. :param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases). :param list<str> plugins: A list of plugin names optionally with versions, in requirement format. eg ['widgetpublish', 'widgetgen==1.2']. :param WorkingSet working_set: A pkg_resources.WorkingSet to load plugins from. """ loaded = {} for plugin in plugins: req = Requirement.parse(plugin) dist = working_set.find(req) if not dist: raise PluginNotFound('Could not find plugin: {}'.format(req)) entries = dist.get_entry_map().get('pantsbuild.plugin', {}) if 'load_after' in entries: deps = entries['load_after'].load()() for dep_name in deps: dep = Requirement.parse(dep_name) if dep.key not in loaded: raise PluginLoadOrderError('Plugin {0} must be loaded after {1}'.format(plugin, dep)) if 'build_file_aliases' in entries: aliases = entries['build_file_aliases'].load()() build_configuration.register_aliases(aliases) if 'register_goals' in entries: entries['register_goals'].load()() if 'global_subsystems' in entries: subsystems = entries['global_subsystems'].load()() build_configuration.register_optionables(subsystems) if 'rules' in entries: rules = entries['rules'].load()() build_configuration.register_rules(rules) loaded[dist.as_requirement().key] = dist
[ "def", "load_plugins", "(", "build_configuration", ",", "plugins", ",", "working_set", ")", ":", "loaded", "=", "{", "}", "for", "plugin", "in", "plugins", ":", "req", "=", "Requirement", ".", "parse", "(", "plugin", ")", "dist", "=", "working_set", ".", "find", "(", "req", ")", "if", "not", "dist", ":", "raise", "PluginNotFound", "(", "'Could not find plugin: {}'", ".", "format", "(", "req", ")", ")", "entries", "=", "dist", ".", "get_entry_map", "(", ")", ".", "get", "(", "'pantsbuild.plugin'", ",", "{", "}", ")", "if", "'load_after'", "in", "entries", ":", "deps", "=", "entries", "[", "'load_after'", "]", ".", "load", "(", ")", "(", ")", "for", "dep_name", "in", "deps", ":", "dep", "=", "Requirement", ".", "parse", "(", "dep_name", ")", "if", "dep", ".", "key", "not", "in", "loaded", ":", "raise", "PluginLoadOrderError", "(", "'Plugin {0} must be loaded after {1}'", ".", "format", "(", "plugin", ",", "dep", ")", ")", "if", "'build_file_aliases'", "in", "entries", ":", "aliases", "=", "entries", "[", "'build_file_aliases'", "]", ".", "load", "(", ")", "(", ")", "build_configuration", ".", "register_aliases", "(", "aliases", ")", "if", "'register_goals'", "in", "entries", ":", "entries", "[", "'register_goals'", "]", ".", "load", "(", ")", "(", ")", "if", "'global_subsystems'", "in", "entries", ":", "subsystems", "=", "entries", "[", "'global_subsystems'", "]", ".", "load", "(", ")", "(", ")", "build_configuration", ".", "register_optionables", "(", "subsystems", ")", "if", "'rules'", "in", "entries", ":", "rules", "=", "entries", "[", "'rules'", "]", ".", "load", "(", ")", "(", ")", "build_configuration", ".", "register_rules", "(", "rules", ")", "loaded", "[", "dist", ".", "as_requirement", "(", ")", ".", "key", "]", "=", "dist" ]
Load named plugins from the current working_set into the supplied build_configuration "Loading" a plugin here refers to calling registration methods -- it is assumed each plugin is already on the path and an error will be thrown if it is not. Plugins should define their entrypoints in the `pantsbuild.plugin` group when configuring their distribution. Like source backends, the `build_file_aliases`, `global_subsystems` and `register_goals` methods are called if those entry points are defined. * Plugins are loaded in the order they are provided. * This is important as loading can add, remove or replace existing tasks installed by other plugins. If a plugin needs to assert that another plugin is registered before it, it can define an entrypoint "load_after" which can return a list of plugins which must have been loaded before it can be loaded. This does not change the order or what plugins are loaded in any way -- it is purely an assertion to guard against misconfiguration. :param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases). :param list<str> plugins: A list of plugin names optionally with versions, in requirement format. eg ['widgetpublish', 'widgetgen==1.2']. :param WorkingSet working_set: A pkg_resources.WorkingSet to load plugins from.
[ "Load", "named", "plugins", "from", "the", "current", "working_set", "into", "the", "supplied", "build_configuration" ]
python
train
insightindustry/validator-collection
validator_collection/validators.py
https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/validators.py#L1883-L1968
def writeable(value, allow_empty = False, **kwargs): """Validate that ``value`` is a path to a writeable file. .. caution:: This validator does **NOT** work correctly on a Windows file system. This is due to the vagaries of how Windows manages its file system and the various ways in which it can manage file permission. If called on a Windows file system, this validator will raise :class:`NotImplementedError() <python:NotImplementedError>`. .. caution:: **Use of this validator is an anti-pattern and should be used with caution.** Validating the writability of a file *before* attempting to write to it exposes your code to a bug called `TOCTOU <https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use>`_. This particular class of bug can expose your code to **security vulnerabilities** and so this validator should only be used if you are an advanced user. A better pattern to use when writing to file is to apply the principle of EAFP ("easier to ask forgiveness than permission"), and simply attempt to write to the file using a ``try ... except`` block: .. code-block:: python try: with open('path/to/filename.txt', mode = 'a') as file_object: # write to file here except (OSError, IOError) as error: # Handle an error if unable to write. .. note:: This validator relies on :func:`os.access() <python:os.access>` to check whether ``value`` is writeable. This function has certain limitations, most especially that: * It will **ignore** file-locking (yielding a false-positive) if the file is locked. * It focuses on *local operating system permissions*, which means if trying to access a path over a network you might get a false positive or false negative (because network paths may have more complicated authentication methods). :param value: The path to a file on the local filesystem whose writeability is to be validated. :type value: Path-like object :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: Validated absolute path or :obj:`None <python:None>` :rtype: Path-like object or :obj:`None <python:None>` :raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value`` is empty :raises NotImplementedError: if used on a Windows system :raises NotPathlikeError: if ``value`` is not a path-like object :raises NotWriteableError: if ``value`` cannot be opened for writing """ if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None value = path(value, force_run = True) if sys.platform in ['win32', 'cygwin']: raise NotImplementedError('not supported on Windows') is_valid = os.access(value, mode = os.W_OK) if not is_valid: raise errors.NotWriteableError('writing not allowed for file at %s' % value) return value
[ "def", "writeable", "(", "value", ",", "allow_empty", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "value", "and", "not", "allow_empty", ":", "raise", "errors", ".", "EmptyValueError", "(", "'value (%s) was empty'", "%", "value", ")", "elif", "not", "value", ":", "return", "None", "value", "=", "path", "(", "value", ",", "force_run", "=", "True", ")", "if", "sys", ".", "platform", "in", "[", "'win32'", ",", "'cygwin'", "]", ":", "raise", "NotImplementedError", "(", "'not supported on Windows'", ")", "is_valid", "=", "os", ".", "access", "(", "value", ",", "mode", "=", "os", ".", "W_OK", ")", "if", "not", "is_valid", ":", "raise", "errors", ".", "NotWriteableError", "(", "'writing not allowed for file at %s'", "%", "value", ")", "return", "value" ]
Validate that ``value`` is a path to a writeable file. .. caution:: This validator does **NOT** work correctly on a Windows file system. This is due to the vagaries of how Windows manages its file system and the various ways in which it can manage file permission. If called on a Windows file system, this validator will raise :class:`NotImplementedError() <python:NotImplementedError>`. .. caution:: **Use of this validator is an anti-pattern and should be used with caution.** Validating the writability of a file *before* attempting to write to it exposes your code to a bug called `TOCTOU <https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use>`_. This particular class of bug can expose your code to **security vulnerabilities** and so this validator should only be used if you are an advanced user. A better pattern to use when writing to file is to apply the principle of EAFP ("easier to ask forgiveness than permission"), and simply attempt to write to the file using a ``try ... except`` block: .. code-block:: python try: with open('path/to/filename.txt', mode = 'a') as file_object: # write to file here except (OSError, IOError) as error: # Handle an error if unable to write. .. note:: This validator relies on :func:`os.access() <python:os.access>` to check whether ``value`` is writeable. This function has certain limitations, most especially that: * It will **ignore** file-locking (yielding a false-positive) if the file is locked. * It focuses on *local operating system permissions*, which means if trying to access a path over a network you might get a false positive or false negative (because network paths may have more complicated authentication methods). :param value: The path to a file on the local filesystem whose writeability is to be validated. :type value: Path-like object :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: Validated absolute path or :obj:`None <python:None>` :rtype: Path-like object or :obj:`None <python:None>` :raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value`` is empty :raises NotImplementedError: if used on a Windows system :raises NotPathlikeError: if ``value`` is not a path-like object :raises NotWriteableError: if ``value`` cannot be opened for writing
[ "Validate", "that", "value", "is", "a", "path", "to", "a", "writeable", "file", "." ]
python
train
gitpython-developers/GitPython
git/remote.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/remote.py#L751-L792
def fetch(self, refspec=None, progress=None, **kwargs): """Fetch the latest changes for this remote :param refspec: A "refspec" is used by fetch and push to describe the mapping between remote ref and local ref. They are combined with a colon in the format <src>:<dst>, preceded by an optional plus sign, +. For example: git fetch $URL refs/heads/master:refs/heads/origin means "grab the master branch head from the $URL and store it as my origin branch head". And git push $URL refs/heads/master:refs/heads/to-upstream means "publish my master branch head as to-upstream branch at $URL". See also git-push(1). Taken from the git manual Fetch supports multiple refspecs (as the underlying git-fetch does) - supplying a list rather than a string for 'refspec' will make use of this facility. :param progress: See 'push' method :param kwargs: Additional arguments to be passed to git-fetch :return: IterableList(FetchInfo, ...) list of FetchInfo instances providing detailed information about the fetch results :note: As fetch does not provide progress information to non-ttys, we cannot make it available here unfortunately as in the 'push' method.""" if refspec is None: # No argument refspec, then ensure the repo's config has a fetch refspec. self._assert_refspec() kwargs = add_progress(kwargs, self.repo.git, progress) if isinstance(refspec, list): args = refspec else: args = [refspec] proc = self.repo.git.fetch(self, *args, as_process=True, with_stdout=False, universal_newlines=True, v=True, **kwargs) res = self._get_fetch_info_from_stderr(proc, progress) if hasattr(self.repo.odb, 'update_cache'): self.repo.odb.update_cache() return res
[ "def", "fetch", "(", "self", ",", "refspec", "=", "None", ",", "progress", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "refspec", "is", "None", ":", "# No argument refspec, then ensure the repo's config has a fetch refspec.", "self", ".", "_assert_refspec", "(", ")", "kwargs", "=", "add_progress", "(", "kwargs", ",", "self", ".", "repo", ".", "git", ",", "progress", ")", "if", "isinstance", "(", "refspec", ",", "list", ")", ":", "args", "=", "refspec", "else", ":", "args", "=", "[", "refspec", "]", "proc", "=", "self", ".", "repo", ".", "git", ".", "fetch", "(", "self", ",", "*", "args", ",", "as_process", "=", "True", ",", "with_stdout", "=", "False", ",", "universal_newlines", "=", "True", ",", "v", "=", "True", ",", "*", "*", "kwargs", ")", "res", "=", "self", ".", "_get_fetch_info_from_stderr", "(", "proc", ",", "progress", ")", "if", "hasattr", "(", "self", ".", "repo", ".", "odb", ",", "'update_cache'", ")", ":", "self", ".", "repo", ".", "odb", ".", "update_cache", "(", ")", "return", "res" ]
Fetch the latest changes for this remote :param refspec: A "refspec" is used by fetch and push to describe the mapping between remote ref and local ref. They are combined with a colon in the format <src>:<dst>, preceded by an optional plus sign, +. For example: git fetch $URL refs/heads/master:refs/heads/origin means "grab the master branch head from the $URL and store it as my origin branch head". And git push $URL refs/heads/master:refs/heads/to-upstream means "publish my master branch head as to-upstream branch at $URL". See also git-push(1). Taken from the git manual Fetch supports multiple refspecs (as the underlying git-fetch does) - supplying a list rather than a string for 'refspec' will make use of this facility. :param progress: See 'push' method :param kwargs: Additional arguments to be passed to git-fetch :return: IterableList(FetchInfo, ...) list of FetchInfo instances providing detailed information about the fetch results :note: As fetch does not provide progress information to non-ttys, we cannot make it available here unfortunately as in the 'push' method.
[ "Fetch", "the", "latest", "changes", "for", "this", "remote" ]
python
train
uber/tchannel-python
tchannel/_queue.py
https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/_queue.py#L159-L190
def get(self): """Gets the next item from the queue. Returns a Future that resolves to the next item once it is available. """ io_loop = IOLoop.current() new_get = Future() with self._lock: get, self._get = self._get, new_get answer = Future() def _on_node(future): if future.exception(): # pragma: no cover (never happens) return answer.set_exc_info(future.exc_info()) node = future.result() value = node.value new_hole, node.next = node.next, None new_get.set_result(new_hole) answer.set_result(value) def _on_get(future): if future.exception(): # pragma: no cover (never happens) return answer.set_exc_info(future.exc_info()) hole = future.result() io_loop.add_future(hole, _on_node) io_loop.add_future(get, _on_get) return answer
[ "def", "get", "(", "self", ")", ":", "io_loop", "=", "IOLoop", ".", "current", "(", ")", "new_get", "=", "Future", "(", ")", "with", "self", ".", "_lock", ":", "get", ",", "self", ".", "_get", "=", "self", ".", "_get", ",", "new_get", "answer", "=", "Future", "(", ")", "def", "_on_node", "(", "future", ")", ":", "if", "future", ".", "exception", "(", ")", ":", "# pragma: no cover (never happens)", "return", "answer", ".", "set_exc_info", "(", "future", ".", "exc_info", "(", ")", ")", "node", "=", "future", ".", "result", "(", ")", "value", "=", "node", ".", "value", "new_hole", ",", "node", ".", "next", "=", "node", ".", "next", ",", "None", "new_get", ".", "set_result", "(", "new_hole", ")", "answer", ".", "set_result", "(", "value", ")", "def", "_on_get", "(", "future", ")", ":", "if", "future", ".", "exception", "(", ")", ":", "# pragma: no cover (never happens)", "return", "answer", ".", "set_exc_info", "(", "future", ".", "exc_info", "(", ")", ")", "hole", "=", "future", ".", "result", "(", ")", "io_loop", ".", "add_future", "(", "hole", ",", "_on_node", ")", "io_loop", ".", "add_future", "(", "get", ",", "_on_get", ")", "return", "answer" ]
Gets the next item from the queue. Returns a Future that resolves to the next item once it is available.
[ "Gets", "the", "next", "item", "from", "the", "queue", "." ]
python
train
gem/oq-engine
openquake/calculators/extract.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/extract.py#L710-L726
def build_damage_array(data, damage_dt): """ :param data: an array of shape (A, L, 1, D) or (A, L, 2, D) :param damage_dt: a damage composite data type loss_type -> states :returns: a composite array of length N and dtype damage_dt """ A, L, MS, D = data.shape dmg = numpy.zeros(A, damage_dt) for a in range(A): for l, lt in enumerate(damage_dt.names): std = any(f for f in damage_dt[lt].names if f.endswith('_stdv')) if MS == 1 or not std: # there is only the mean value dmg[lt][a] = tuple(data[a, l, 0]) else: # there are both mean and stddev # data[a, l].T has shape (D, 2) dmg[lt][a] = tuple(numpy.concatenate(data[a, l].T)) return dmg
[ "def", "build_damage_array", "(", "data", ",", "damage_dt", ")", ":", "A", ",", "L", ",", "MS", ",", "D", "=", "data", ".", "shape", "dmg", "=", "numpy", ".", "zeros", "(", "A", ",", "damage_dt", ")", "for", "a", "in", "range", "(", "A", ")", ":", "for", "l", ",", "lt", "in", "enumerate", "(", "damage_dt", ".", "names", ")", ":", "std", "=", "any", "(", "f", "for", "f", "in", "damage_dt", "[", "lt", "]", ".", "names", "if", "f", ".", "endswith", "(", "'_stdv'", ")", ")", "if", "MS", "==", "1", "or", "not", "std", ":", "# there is only the mean value", "dmg", "[", "lt", "]", "[", "a", "]", "=", "tuple", "(", "data", "[", "a", ",", "l", ",", "0", "]", ")", "else", ":", "# there are both mean and stddev", "# data[a, l].T has shape (D, 2)", "dmg", "[", "lt", "]", "[", "a", "]", "=", "tuple", "(", "numpy", ".", "concatenate", "(", "data", "[", "a", ",", "l", "]", ".", "T", ")", ")", "return", "dmg" ]
:param data: an array of shape (A, L, 1, D) or (A, L, 2, D) :param damage_dt: a damage composite data type loss_type -> states :returns: a composite array of length N and dtype damage_dt
[ ":", "param", "data", ":", "an", "array", "of", "shape", "(", "A", "L", "1", "D", ")", "or", "(", "A", "L", "2", "D", ")", ":", "param", "damage_dt", ":", "a", "damage", "composite", "data", "type", "loss_type", "-", ">", "states", ":", "returns", ":", "a", "composite", "array", "of", "length", "N", "and", "dtype", "damage_dt" ]
python
train
bionikspoon/pureyaml
pureyaml/_compat/total_ordering.py
https://github.com/bionikspoon/pureyaml/blob/784830b907ca14525c4cecdb6ae35306f6f8a877/pureyaml/_compat/total_ordering.py#L62-L67
def _lt_from_gt(self, other): """Return a < b. Computed by @total_ordering from (not a > b) and (a != b).""" op_result = self.__gt__(other) if op_result is NotImplemented: return NotImplemented return not op_result and self != other
[ "def", "_lt_from_gt", "(", "self", ",", "other", ")", ":", "op_result", "=", "self", ".", "__gt__", "(", "other", ")", "if", "op_result", "is", "NotImplemented", ":", "return", "NotImplemented", "return", "not", "op_result", "and", "self", "!=", "other" ]
Return a < b. Computed by @total_ordering from (not a > b) and (a != b).
[ "Return", "a", "<", "b", ".", "Computed", "by" ]
python
train
spyder-ide/spyder
spyder/app/mainwindow.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2043-L2059
def load_last_visible_toolbars(self): """Loads the last visible toolbars from the .ini file.""" toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[]) if toolbars_names: dic = {} for toolbar in self.toolbarslist: dic[toolbar.objectName()] = toolbar toolbars = [] for name in toolbars_names: if name in dic: toolbars.append(dic[name]) self.visible_toolbars = toolbars else: self.get_visible_toolbars() self._update_show_toolbars_action()
[ "def", "load_last_visible_toolbars", "(", "self", ")", ":", "toolbars_names", "=", "CONF", ".", "get", "(", "'main'", ",", "'last_visible_toolbars'", ",", "default", "=", "[", "]", ")", "if", "toolbars_names", ":", "dic", "=", "{", "}", "for", "toolbar", "in", "self", ".", "toolbarslist", ":", "dic", "[", "toolbar", ".", "objectName", "(", ")", "]", "=", "toolbar", "toolbars", "=", "[", "]", "for", "name", "in", "toolbars_names", ":", "if", "name", "in", "dic", ":", "toolbars", ".", "append", "(", "dic", "[", "name", "]", ")", "self", ".", "visible_toolbars", "=", "toolbars", "else", ":", "self", ".", "get_visible_toolbars", "(", ")", "self", ".", "_update_show_toolbars_action", "(", ")" ]
Loads the last visible toolbars from the .ini file.
[ "Loads", "the", "last", "visible", "toolbars", "from", "the", ".", "ini", "file", "." ]
python
train
swharden/PyOriginTools
PyOriginTools/workbook.py
https://github.com/swharden/PyOriginTools/blob/536fb8e11234ffdc27e26b1800e0358179ca7d26/PyOriginTools/workbook.py#L162-L186
def push(self,bookName=None,sheetName=None,overwrite=False): """pull this OR.SHEET into a real book/sheet in Origin""" # tons of validation if bookName: self.bookName=bookName if sheetName: self.sheetName=sheetName if not self.sheetName in OR.sheetNames(bookName): print("can't find [%s]%s!"%(bookName,sheetName)) return # clear out out sheet by deleting EVERY column poSheet=OR.getSheet(bookName,sheetName) # CPyWorksheetPageI if not poSheet: print("WARNING: didn't get posheet",poSheet,bookName,sheetName) for poCol in [x for x in poSheet if x.IsValid()]: poCol.Destroy() # create columns and assign properties to each for i in range(len(self.colNames)): poSheet.InsertCol(i,self.colNames[i]) poSheet.Columns(i).SetName(self.colNames[i]) poSheet.Columns(i).SetLongName(self.colDesc[i]) poSheet.Columns(i).SetUnits(self.colUnits[i]) poSheet.Columns(i).SetComments(self.colComments[i]) poSheet.Columns(i).SetType(self.colTypes[i]) poSheet.Columns(i).SetData(self.colData[i])
[ "def", "push", "(", "self", ",", "bookName", "=", "None", ",", "sheetName", "=", "None", ",", "overwrite", "=", "False", ")", ":", "# tons of validation", "if", "bookName", ":", "self", ".", "bookName", "=", "bookName", "if", "sheetName", ":", "self", ".", "sheetName", "=", "sheetName", "if", "not", "self", ".", "sheetName", "in", "OR", ".", "sheetNames", "(", "bookName", ")", ":", "print", "(", "\"can't find [%s]%s!\"", "%", "(", "bookName", ",", "sheetName", ")", ")", "return", "# clear out out sheet by deleting EVERY column", "poSheet", "=", "OR", ".", "getSheet", "(", "bookName", ",", "sheetName", ")", "# CPyWorksheetPageI", "if", "not", "poSheet", ":", "print", "(", "\"WARNING: didn't get posheet\"", ",", "poSheet", ",", "bookName", ",", "sheetName", ")", "for", "poCol", "in", "[", "x", "for", "x", "in", "poSheet", "if", "x", ".", "IsValid", "(", ")", "]", ":", "poCol", ".", "Destroy", "(", ")", "# create columns and assign properties to each", "for", "i", "in", "range", "(", "len", "(", "self", ".", "colNames", ")", ")", ":", "poSheet", ".", "InsertCol", "(", "i", ",", "self", ".", "colNames", "[", "i", "]", ")", "poSheet", ".", "Columns", "(", "i", ")", ".", "SetName", "(", "self", ".", "colNames", "[", "i", "]", ")", "poSheet", ".", "Columns", "(", "i", ")", ".", "SetLongName", "(", "self", ".", "colDesc", "[", "i", "]", ")", "poSheet", ".", "Columns", "(", "i", ")", ".", "SetUnits", "(", "self", ".", "colUnits", "[", "i", "]", ")", "poSheet", ".", "Columns", "(", "i", ")", ".", "SetComments", "(", "self", ".", "colComments", "[", "i", "]", ")", "poSheet", ".", "Columns", "(", "i", ")", ".", "SetType", "(", "self", ".", "colTypes", "[", "i", "]", ")", "poSheet", ".", "Columns", "(", "i", ")", ".", "SetData", "(", "self", ".", "colData", "[", "i", "]", ")" ]
pull this OR.SHEET into a real book/sheet in Origin
[ "pull", "this", "OR", ".", "SHEET", "into", "a", "real", "book", "/", "sheet", "in", "Origin" ]
python
train
xapple/plumbing
plumbing/databases/access_database.py
https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/databases/access_database.py#L134-L138
def table_as_df(self, table_name): """Return a table as a dataframe.""" self.table_must_exist(table_name) query = "SELECT * FROM `%s`" % table_name.lower() return pandas.read_sql(query, self.own_conn)
[ "def", "table_as_df", "(", "self", ",", "table_name", ")", ":", "self", ".", "table_must_exist", "(", "table_name", ")", "query", "=", "\"SELECT * FROM `%s`\"", "%", "table_name", ".", "lower", "(", ")", "return", "pandas", ".", "read_sql", "(", "query", ",", "self", ".", "own_conn", ")" ]
Return a table as a dataframe.
[ "Return", "a", "table", "as", "a", "dataframe", "." ]
python
train
jaywink/federation
federation/entities/diaspora/mappers.py
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/entities/diaspora/mappers.py#L158-L233
def transform_attributes(attrs, cls): """Transform some attribute keys. :param attrs: Properties from the XML :type attrs: dict :param cls: Class of the entity :type cls: class """ transformed = {} for key, value in attrs.items(): if value is None: value = "" if key == "text": transformed["raw_content"] = value elif key == "author": if cls == DiasporaProfile: # Diaspora Profile XML message contains no GUID. We need the guid. Fetch it. profile = retrieve_and_parse_profile(value) transformed['id'] = value transformed["guid"] = profile.guid else: transformed["actor_id"] = value transformed["handle"] = value elif key == 'guid': if cls != DiasporaProfile: transformed["id"] = value transformed["guid"] = value elif key in ("root_author", "recipient"): transformed["target_id"] = value transformed["target_handle"] = value elif key in ("target_guid", "root_guid", "parent_guid"): transformed["target_id"] = value transformed["target_guid"] = value elif key in ("first_name", "last_name"): values = [attrs.get('first_name'), attrs.get('last_name')] values = [v for v in values if v] transformed["name"] = " ".join(values) elif key == "image_url": if "image_urls" not in transformed: transformed["image_urls"] = {} transformed["image_urls"]["large"] = value elif key == "image_url_small": if "image_urls" not in transformed: transformed["image_urls"] = {} transformed["image_urls"]["small"] = value elif key == "image_url_medium": if "image_urls" not in transformed: transformed["image_urls"] = {} transformed["image_urls"]["medium"] = value elif key == "tag_string": if value: transformed["tag_list"] = value.replace("#", "").split(" ") elif key == "bio": transformed["raw_content"] = value elif key == "searchable": transformed["public"] = True if value == "true" else False elif key in ["target_type"] and cls == DiasporaRetraction: transformed["entity_type"] = DiasporaRetraction.entity_type_from_remote(value) elif key == "remote_photo_path": transformed["remote_path"] = value elif key == "remote_photo_name": transformed["remote_name"] = value elif key == "status_message_guid": transformed["linked_guid"] = value transformed["linked_type"] = "Post" elif key == "author_signature": transformed["signature"] = value elif key in BOOLEAN_KEYS: transformed[key] = True if value == "true" else False elif key in DATETIME_KEYS: transformed[key] = datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ") elif key in INTEGER_KEYS: transformed[key] = int(value) else: transformed[key] = value return transformed
[ "def", "transform_attributes", "(", "attrs", ",", "cls", ")", ":", "transformed", "=", "{", "}", "for", "key", ",", "value", "in", "attrs", ".", "items", "(", ")", ":", "if", "value", "is", "None", ":", "value", "=", "\"\"", "if", "key", "==", "\"text\"", ":", "transformed", "[", "\"raw_content\"", "]", "=", "value", "elif", "key", "==", "\"author\"", ":", "if", "cls", "==", "DiasporaProfile", ":", "# Diaspora Profile XML message contains no GUID. We need the guid. Fetch it.", "profile", "=", "retrieve_and_parse_profile", "(", "value", ")", "transformed", "[", "'id'", "]", "=", "value", "transformed", "[", "\"guid\"", "]", "=", "profile", ".", "guid", "else", ":", "transformed", "[", "\"actor_id\"", "]", "=", "value", "transformed", "[", "\"handle\"", "]", "=", "value", "elif", "key", "==", "'guid'", ":", "if", "cls", "!=", "DiasporaProfile", ":", "transformed", "[", "\"id\"", "]", "=", "value", "transformed", "[", "\"guid\"", "]", "=", "value", "elif", "key", "in", "(", "\"root_author\"", ",", "\"recipient\"", ")", ":", "transformed", "[", "\"target_id\"", "]", "=", "value", "transformed", "[", "\"target_handle\"", "]", "=", "value", "elif", "key", "in", "(", "\"target_guid\"", ",", "\"root_guid\"", ",", "\"parent_guid\"", ")", ":", "transformed", "[", "\"target_id\"", "]", "=", "value", "transformed", "[", "\"target_guid\"", "]", "=", "value", "elif", "key", "in", "(", "\"first_name\"", ",", "\"last_name\"", ")", ":", "values", "=", "[", "attrs", ".", "get", "(", "'first_name'", ")", ",", "attrs", ".", "get", "(", "'last_name'", ")", "]", "values", "=", "[", "v", "for", "v", "in", "values", "if", "v", "]", "transformed", "[", "\"name\"", "]", "=", "\" \"", ".", "join", "(", "values", ")", "elif", "key", "==", "\"image_url\"", ":", "if", "\"image_urls\"", "not", "in", "transformed", ":", "transformed", "[", "\"image_urls\"", "]", "=", "{", "}", "transformed", "[", "\"image_urls\"", "]", "[", "\"large\"", "]", "=", "value", "elif", "key", "==", "\"image_url_small\"", ":", "if", "\"image_urls\"", "not", "in", "transformed", ":", "transformed", "[", "\"image_urls\"", "]", "=", "{", "}", "transformed", "[", "\"image_urls\"", "]", "[", "\"small\"", "]", "=", "value", "elif", "key", "==", "\"image_url_medium\"", ":", "if", "\"image_urls\"", "not", "in", "transformed", ":", "transformed", "[", "\"image_urls\"", "]", "=", "{", "}", "transformed", "[", "\"image_urls\"", "]", "[", "\"medium\"", "]", "=", "value", "elif", "key", "==", "\"tag_string\"", ":", "if", "value", ":", "transformed", "[", "\"tag_list\"", "]", "=", "value", ".", "replace", "(", "\"#\"", ",", "\"\"", ")", ".", "split", "(", "\" \"", ")", "elif", "key", "==", "\"bio\"", ":", "transformed", "[", "\"raw_content\"", "]", "=", "value", "elif", "key", "==", "\"searchable\"", ":", "transformed", "[", "\"public\"", "]", "=", "True", "if", "value", "==", "\"true\"", "else", "False", "elif", "key", "in", "[", "\"target_type\"", "]", "and", "cls", "==", "DiasporaRetraction", ":", "transformed", "[", "\"entity_type\"", "]", "=", "DiasporaRetraction", ".", "entity_type_from_remote", "(", "value", ")", "elif", "key", "==", "\"remote_photo_path\"", ":", "transformed", "[", "\"remote_path\"", "]", "=", "value", "elif", "key", "==", "\"remote_photo_name\"", ":", "transformed", "[", "\"remote_name\"", "]", "=", "value", "elif", "key", "==", "\"status_message_guid\"", ":", "transformed", "[", "\"linked_guid\"", "]", "=", "value", "transformed", "[", "\"linked_type\"", "]", "=", "\"Post\"", "elif", "key", "==", "\"author_signature\"", ":", "transformed", "[", "\"signature\"", "]", "=", "value", "elif", "key", "in", "BOOLEAN_KEYS", ":", "transformed", "[", "key", "]", "=", "True", "if", "value", "==", "\"true\"", "else", "False", "elif", "key", "in", "DATETIME_KEYS", ":", "transformed", "[", "key", "]", "=", "datetime", ".", "strptime", "(", "value", ",", "\"%Y-%m-%dT%H:%M:%SZ\"", ")", "elif", "key", "in", "INTEGER_KEYS", ":", "transformed", "[", "key", "]", "=", "int", "(", "value", ")", "else", ":", "transformed", "[", "key", "]", "=", "value", "return", "transformed" ]
Transform some attribute keys. :param attrs: Properties from the XML :type attrs: dict :param cls: Class of the entity :type cls: class
[ "Transform", "some", "attribute", "keys", "." ]
python
train
langloisjp/tstore
tstore/pgtablestorage.py
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/pgtablestorage.py#L472-L492
def parse_dburl(dburl): """Parse DB URL. Return (scheme, user, password, host, dbname) pg://user:pass@host/dbname >>> parse_dburl("pg://user:pass@host/name") ('pg', 'user', 'pass', 'host', 'name') >>> parse_dburl("dbm:///dbfile") ('dbm', '', '', '', 'dbfile') >>> parse_dburl("pg://user:@/name") ('pg', 'user', '', '', 'name') """ res = urlparse.urlparse(dburl) if '@' in res.netloc: (creds, host) = res.netloc.split('@') else: creds = ':' host = res.netloc (user, password) = creds.split(':') return (res.scheme, user, password, host, res.path[1:])
[ "def", "parse_dburl", "(", "dburl", ")", ":", "res", "=", "urlparse", ".", "urlparse", "(", "dburl", ")", "if", "'@'", "in", "res", ".", "netloc", ":", "(", "creds", ",", "host", ")", "=", "res", ".", "netloc", ".", "split", "(", "'@'", ")", "else", ":", "creds", "=", "':'", "host", "=", "res", ".", "netloc", "(", "user", ",", "password", ")", "=", "creds", ".", "split", "(", "':'", ")", "return", "(", "res", ".", "scheme", ",", "user", ",", "password", ",", "host", ",", "res", ".", "path", "[", "1", ":", "]", ")" ]
Parse DB URL. Return (scheme, user, password, host, dbname) pg://user:pass@host/dbname >>> parse_dburl("pg://user:pass@host/name") ('pg', 'user', 'pass', 'host', 'name') >>> parse_dburl("dbm:///dbfile") ('dbm', '', '', '', 'dbfile') >>> parse_dburl("pg://user:@/name") ('pg', 'user', '', '', 'name')
[ "Parse", "DB", "URL", ".", "Return", "(", "scheme", "user", "password", "host", "dbname", ")", "pg", ":", "//", "user", ":", "pass@host", "/", "dbname" ]
python
train
gtaylor/django-athumb
athumb/pial/engines/base.py
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/pial/engines/base.py#L106-L125
def write(self, image, dest_fobj, quality=95, format=None): """ Wrapper for ``_write`` :param Image image: This is your engine's ``Image`` object. For PIL it's PIL.Image. :keyword int quality: A quality level as a percent. The lower, the higher the compression, the worse the artifacts. :keyword str format: The format to save to. If omitted, guess based on the extension. We recommend specifying this. Typical values are 'JPEG', 'GIF', 'PNG'. Other formats largely depend on your choice of Engine. """ if isinstance(format, basestring) and format.lower() == 'jpg': # This mistake is made all the time. Let's just effectively alias # this, since it's commonly used. format = 'JPEG' raw_data = self._get_raw_data(image, format, quality) dest_fobj.write(raw_data)
[ "def", "write", "(", "self", ",", "image", ",", "dest_fobj", ",", "quality", "=", "95", ",", "format", "=", "None", ")", ":", "if", "isinstance", "(", "format", ",", "basestring", ")", "and", "format", ".", "lower", "(", ")", "==", "'jpg'", ":", "# This mistake is made all the time. Let's just effectively alias", "# this, since it's commonly used.", "format", "=", "'JPEG'", "raw_data", "=", "self", ".", "_get_raw_data", "(", "image", ",", "format", ",", "quality", ")", "dest_fobj", ".", "write", "(", "raw_data", ")" ]
Wrapper for ``_write`` :param Image image: This is your engine's ``Image`` object. For PIL it's PIL.Image. :keyword int quality: A quality level as a percent. The lower, the higher the compression, the worse the artifacts. :keyword str format: The format to save to. If omitted, guess based on the extension. We recommend specifying this. Typical values are 'JPEG', 'GIF', 'PNG'. Other formats largely depend on your choice of Engine.
[ "Wrapper", "for", "_write" ]
python
train
inveniosoftware/invenio-github
invenio_github/api.py
https://github.com/inveniosoftware/invenio-github/blob/ec42fd6a06079310dcbe2c46d9fd79d5197bbe26/invenio_github/api.py#L354-L361
def title(self): """Extract title from a release.""" if self.event: if self.release['name']: return u'{0}: {1}'.format( self.repository['full_name'], self.release['name'] ) return u'{0} {1}'.format(self.repo_model.name, self.model.tag)
[ "def", "title", "(", "self", ")", ":", "if", "self", ".", "event", ":", "if", "self", ".", "release", "[", "'name'", "]", ":", "return", "u'{0}: {1}'", ".", "format", "(", "self", ".", "repository", "[", "'full_name'", "]", ",", "self", ".", "release", "[", "'name'", "]", ")", "return", "u'{0} {1}'", ".", "format", "(", "self", ".", "repo_model", ".", "name", ",", "self", ".", "model", ".", "tag", ")" ]
Extract title from a release.
[ "Extract", "title", "from", "a", "release", "." ]
python
train
googledatalab/pydatalab
google/datalab/contrib/mlworkbench/_local_predict.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_local_predict.py#L56-L87
def _tf_predict(model_dir, input_csvlines): """Prediction with a tf savedmodel. Args: model_dir: directory that contains a saved model input_csvlines: list of csv strings Returns: Dict in the form tensor_name:prediction_list. Note that the value is always a list, even if there was only 1 row in input_csvlines. """ with tf.Graph().as_default(), tf.Session() as sess: input_alias_map, output_alias_map = _tf_load_model(sess, model_dir) csv_tensor_name = list(input_alias_map.values())[0] results = sess.run(fetches=output_alias_map, feed_dict={csv_tensor_name: input_csvlines}) # convert any scalar values to a list. This may happen when there is one # example in input_csvlines and the model uses tf.squeeze on the output # tensor. if len(input_csvlines) == 1: for k, v in six.iteritems(results): if not isinstance(v, (list, np.ndarray)): results[k] = [v] # Convert bytes to string. In python3 the results may be bytes. for k, v in six.iteritems(results): if any(isinstance(x, bytes) for x in v): results[k] = [x.decode('utf-8') for x in v] return results
[ "def", "_tf_predict", "(", "model_dir", ",", "input_csvlines", ")", ":", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ",", "tf", ".", "Session", "(", ")", "as", "sess", ":", "input_alias_map", ",", "output_alias_map", "=", "_tf_load_model", "(", "sess", ",", "model_dir", ")", "csv_tensor_name", "=", "list", "(", "input_alias_map", ".", "values", "(", ")", ")", "[", "0", "]", "results", "=", "sess", ".", "run", "(", "fetches", "=", "output_alias_map", ",", "feed_dict", "=", "{", "csv_tensor_name", ":", "input_csvlines", "}", ")", "# convert any scalar values to a list. This may happen when there is one", "# example in input_csvlines and the model uses tf.squeeze on the output", "# tensor.", "if", "len", "(", "input_csvlines", ")", "==", "1", ":", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "results", ")", ":", "if", "not", "isinstance", "(", "v", ",", "(", "list", ",", "np", ".", "ndarray", ")", ")", ":", "results", "[", "k", "]", "=", "[", "v", "]", "# Convert bytes to string. In python3 the results may be bytes.", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "results", ")", ":", "if", "any", "(", "isinstance", "(", "x", ",", "bytes", ")", "for", "x", "in", "v", ")", ":", "results", "[", "k", "]", "=", "[", "x", ".", "decode", "(", "'utf-8'", ")", "for", "x", "in", "v", "]", "return", "results" ]
Prediction with a tf savedmodel. Args: model_dir: directory that contains a saved model input_csvlines: list of csv strings Returns: Dict in the form tensor_name:prediction_list. Note that the value is always a list, even if there was only 1 row in input_csvlines.
[ "Prediction", "with", "a", "tf", "savedmodel", "." ]
python
train
ethereum/py-evm
eth/tools/_utils/mappings.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/tools/_utils/mappings.py#L24-L49
def is_cleanly_mergable(*dicts: Dict[Any, Any]) -> bool: """Check that nothing will be overwritten when dictionaries are merged using `deep_merge`. Examples: >>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"c": 3}) True >>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"a": 0, c": 3}) False >>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"c": 3, {"b": {"bb": 4}}) True >>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"b": {"ba": 4}}) False """ if len(dicts) <= 1: return True elif len(dicts) == 2: if not all(isinstance(d, Mapping) for d in dicts): return False else: shared_keys = set(dicts[0].keys()) & set(dicts[1].keys()) return all(is_cleanly_mergable(dicts[0][key], dicts[1][key]) for key in shared_keys) else: dict_combinations = itertools.combinations(dicts, 2) return all(is_cleanly_mergable(*combination) for combination in dict_combinations)
[ "def", "is_cleanly_mergable", "(", "*", "dicts", ":", "Dict", "[", "Any", ",", "Any", "]", ")", "->", "bool", ":", "if", "len", "(", "dicts", ")", "<=", "1", ":", "return", "True", "elif", "len", "(", "dicts", ")", "==", "2", ":", "if", "not", "all", "(", "isinstance", "(", "d", ",", "Mapping", ")", "for", "d", "in", "dicts", ")", ":", "return", "False", "else", ":", "shared_keys", "=", "set", "(", "dicts", "[", "0", "]", ".", "keys", "(", ")", ")", "&", "set", "(", "dicts", "[", "1", "]", ".", "keys", "(", ")", ")", "return", "all", "(", "is_cleanly_mergable", "(", "dicts", "[", "0", "]", "[", "key", "]", ",", "dicts", "[", "1", "]", "[", "key", "]", ")", "for", "key", "in", "shared_keys", ")", "else", ":", "dict_combinations", "=", "itertools", ".", "combinations", "(", "dicts", ",", "2", ")", "return", "all", "(", "is_cleanly_mergable", "(", "*", "combination", ")", "for", "combination", "in", "dict_combinations", ")" ]
Check that nothing will be overwritten when dictionaries are merged using `deep_merge`. Examples: >>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"c": 3}) True >>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"a": 0, c": 3}) False >>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"c": 3, {"b": {"bb": 4}}) True >>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"b": {"ba": 4}}) False
[ "Check", "that", "nothing", "will", "be", "overwritten", "when", "dictionaries", "are", "merged", "using", "deep_merge", "." ]
python
train
Skyscanner/pages
pages/ui_component.py
https://github.com/Skyscanner/pages/blob/f80471ef01f84b11e4d751dff1e6398ae1e230b8/pages/ui_component.py#L68-L80
def locate(self): """ Lazily locates the element on the DOM if the WebElement instance is not available already. Returns a WebElement object. It also caches the element if caching has been set through cache(). """ if self._web_element: return self._web_element else: locator_type, locator_value = self.__locator element = self.driver.find_element(by=locator_type, value=locator_value) self._cache_web_element(element) # cache the element if allowed return element
[ "def", "locate", "(", "self", ")", ":", "if", "self", ".", "_web_element", ":", "return", "self", ".", "_web_element", "else", ":", "locator_type", ",", "locator_value", "=", "self", ".", "__locator", "element", "=", "self", ".", "driver", ".", "find_element", "(", "by", "=", "locator_type", ",", "value", "=", "locator_value", ")", "self", ".", "_cache_web_element", "(", "element", ")", "# cache the element if allowed", "return", "element" ]
Lazily locates the element on the DOM if the WebElement instance is not available already. Returns a WebElement object. It also caches the element if caching has been set through cache().
[ "Lazily", "locates", "the", "element", "on", "the", "DOM", "if", "the", "WebElement", "instance", "is", "not", "available", "already", ".", "Returns", "a", "WebElement", "object", ".", "It", "also", "caches", "the", "element", "if", "caching", "has", "been", "set", "through", "cache", "()", "." ]
python
test
PolyJIT/benchbuild
benchbuild/utils/versions.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/versions.py#L40-L58
def get_git_hash(from_url): """ Get the git commit hash of HEAD from :from_url. Args: from_url: The file system url of our git repository. Returns: git commit hash of HEAD, or empty string. """ from benchbuild.utils.cmd import git if from_url is None: return "" if not path.exists(from_url): return "" with local.cwd(from_url): return git("rev-parse", "HEAD", retcode=None)
[ "def", "get_git_hash", "(", "from_url", ")", ":", "from", "benchbuild", ".", "utils", ".", "cmd", "import", "git", "if", "from_url", "is", "None", ":", "return", "\"\"", "if", "not", "path", ".", "exists", "(", "from_url", ")", ":", "return", "\"\"", "with", "local", ".", "cwd", "(", "from_url", ")", ":", "return", "git", "(", "\"rev-parse\"", ",", "\"HEAD\"", ",", "retcode", "=", "None", ")" ]
Get the git commit hash of HEAD from :from_url. Args: from_url: The file system url of our git repository. Returns: git commit hash of HEAD, or empty string.
[ "Get", "the", "git", "commit", "hash", "of", "HEAD", "from", ":", "from_url", "." ]
python
train
apache/incubator-superset
superset/jinja_context.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/jinja_context.py#L85-L125
def filter_values(column, default=None): """ Gets a values for a particular filter as a list This is useful if: - you want to use a filter box to filter a query where the name of filter box column doesn't match the one in the select statement - you want to have the ability for filter inside the main query for speed purposes This searches for "filters" and "extra_filters" in form_data for a match Usage example: SELECT action, count(*) as times FROM logs WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} ) GROUP BY 1 :param column: column/filter name to lookup :type column: str :param default: default value to return if there's no matching columns :type default: str :return: returns a list of filter values :type: list """ form_data = json.loads(request.form.get('form_data', '{}')) return_val = [] for filter_type in ['filters', 'extra_filters']: if filter_type not in form_data: continue for f in form_data[filter_type]: if f['col'] == column: for v in f['val']: return_val.append(v) if return_val: return return_val if default: return [default] else: return []
[ "def", "filter_values", "(", "column", ",", "default", "=", "None", ")", ":", "form_data", "=", "json", ".", "loads", "(", "request", ".", "form", ".", "get", "(", "'form_data'", ",", "'{}'", ")", ")", "return_val", "=", "[", "]", "for", "filter_type", "in", "[", "'filters'", ",", "'extra_filters'", "]", ":", "if", "filter_type", "not", "in", "form_data", ":", "continue", "for", "f", "in", "form_data", "[", "filter_type", "]", ":", "if", "f", "[", "'col'", "]", "==", "column", ":", "for", "v", "in", "f", "[", "'val'", "]", ":", "return_val", ".", "append", "(", "v", ")", "if", "return_val", ":", "return", "return_val", "if", "default", ":", "return", "[", "default", "]", "else", ":", "return", "[", "]" ]
Gets a values for a particular filter as a list This is useful if: - you want to use a filter box to filter a query where the name of filter box column doesn't match the one in the select statement - you want to have the ability for filter inside the main query for speed purposes This searches for "filters" and "extra_filters" in form_data for a match Usage example: SELECT action, count(*) as times FROM logs WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} ) GROUP BY 1 :param column: column/filter name to lookup :type column: str :param default: default value to return if there's no matching columns :type default: str :return: returns a list of filter values :type: list
[ "Gets", "a", "values", "for", "a", "particular", "filter", "as", "a", "list" ]
python
train
mfcovington/django-project-home-templatetags
project_home_tags/templatetags/project_home.py
https://github.com/mfcovington/django-project-home-templatetags/blob/abc660906086088792c5e5e7be6ecd151c2ccddb/project_home_tags/templatetags/project_home.py#L86-L122
def project_home_breadcrumb_bs3(label): """A template tag to return the project's home URL and label formatted as a Bootstrap 3 breadcrumb. PROJECT_HOME_NAMESPACE must be defined in settings, for example: PROJECT_HOME_NAMESPACE = 'project_name:index_view' Usage Example: {% load project_home_tags %} <ol class="breadcrumb"> {% project_home_breadcrumb_bs3 %} {# <--- #} <li><a href="{% url 'app:namespace' %}">List of Objects</a></li> <li class="active">Object Detail</li> </ol> This gets converted into: <ol class="breadcrumb"> <li><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #} <li><a href="{% url 'app:namespace' %}">List of Objects</a></li> <li class="active">Object Detail</li> </ol> By default, the link's text is 'Home'. A project-wide label can be defined with PROJECT_HOME_LABEL in settings. Both the default and the project-wide label can be overridden by passing a string to the template tag. For example: {% project_home_breadcrumb_bs3 'Custom Label' %} """ url = home_url() if url: return format_html( '<li><a href="{}">{}</a></li>', url, label) else: return format_html('<li>{}</li>', label)
[ "def", "project_home_breadcrumb_bs3", "(", "label", ")", ":", "url", "=", "home_url", "(", ")", "if", "url", ":", "return", "format_html", "(", "'<li><a href=\"{}\">{}</a></li>'", ",", "url", ",", "label", ")", "else", ":", "return", "format_html", "(", "'<li>{}</li>'", ",", "label", ")" ]
A template tag to return the project's home URL and label formatted as a Bootstrap 3 breadcrumb. PROJECT_HOME_NAMESPACE must be defined in settings, for example: PROJECT_HOME_NAMESPACE = 'project_name:index_view' Usage Example: {% load project_home_tags %} <ol class="breadcrumb"> {% project_home_breadcrumb_bs3 %} {# <--- #} <li><a href="{% url 'app:namespace' %}">List of Objects</a></li> <li class="active">Object Detail</li> </ol> This gets converted into: <ol class="breadcrumb"> <li><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #} <li><a href="{% url 'app:namespace' %}">List of Objects</a></li> <li class="active">Object Detail</li> </ol> By default, the link's text is 'Home'. A project-wide label can be defined with PROJECT_HOME_LABEL in settings. Both the default and the project-wide label can be overridden by passing a string to the template tag. For example: {% project_home_breadcrumb_bs3 'Custom Label' %}
[ "A", "template", "tag", "to", "return", "the", "project", "s", "home", "URL", "and", "label", "formatted", "as", "a", "Bootstrap", "3", "breadcrumb", "." ]
python
test
mitsei/dlkit
dlkit/handcar/repository/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L2703-L2726
def get_composition_search_session(self, proxy): """Gets a composition search session. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionSearchSession) - a CompositionSearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_search() is false compliance: optional - This method must be implemented if supports_composition_search() is true. """ if not self.supports_composition_search(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.CompositionSearchSession(proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
[ "def", "get_composition_search_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_composition_search", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "# OperationFailed()", "proxy", "=", "self", ".", "_convert_proxy", "(", "proxy", ")", "try", ":", "session", "=", "sessions", ".", "CompositionSearchSession", "(", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ")", "except", "AttributeError", ":", "raise", "# OperationFailed()", "return", "session" ]
Gets a composition search session. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionSearchSession) - a CompositionSearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_search() is false compliance: optional - This method must be implemented if supports_composition_search() is true.
[ "Gets", "a", "composition", "search", "session", "." ]
python
train
GibbsConsulting/django-plotly-dash
demo/demo/plotly_apps.py
https://github.com/GibbsConsulting/django-plotly-dash/blob/773ed081fc2ea3cc7607590322a14686a7a79bc5/demo/demo/plotly_apps.py#L101-L118
def callback_c(*args, **kwargs): 'Update the output following a change of the input selection' #da = kwargs['dash_app'] session_state = kwargs['session_state'] calls_so_far = session_state.get('calls_so_far', 0) session_state['calls_so_far'] = calls_so_far + 1 user_counts = session_state.get('user_counts', None) user_name = str(kwargs['user']) if user_counts is None: user_counts = {user_name:1} session_state['user_counts'] = user_counts else: user_counts[user_name] = user_counts.get(user_name, 0) + 1 return "Args are [%s] and kwargs are %s" %(",".join(args), str(kwargs))
[ "def", "callback_c", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "#da = kwargs['dash_app']", "session_state", "=", "kwargs", "[", "'session_state'", "]", "calls_so_far", "=", "session_state", ".", "get", "(", "'calls_so_far'", ",", "0", ")", "session_state", "[", "'calls_so_far'", "]", "=", "calls_so_far", "+", "1", "user_counts", "=", "session_state", ".", "get", "(", "'user_counts'", ",", "None", ")", "user_name", "=", "str", "(", "kwargs", "[", "'user'", "]", ")", "if", "user_counts", "is", "None", ":", "user_counts", "=", "{", "user_name", ":", "1", "}", "session_state", "[", "'user_counts'", "]", "=", "user_counts", "else", ":", "user_counts", "[", "user_name", "]", "=", "user_counts", ".", "get", "(", "user_name", ",", "0", ")", "+", "1", "return", "\"Args are [%s] and kwargs are %s\"", "%", "(", "\",\"", ".", "join", "(", "args", ")", ",", "str", "(", "kwargs", ")", ")" ]
Update the output following a change of the input selection
[ "Update", "the", "output", "following", "a", "change", "of", "the", "input", "selection" ]
python
train
StanfordVL/robosuite
robosuite/models/base.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/models/base.py#L113-L127
def save_model(self, fname, pretty=False): """ Saves the xml to file. Args: fname: output file location pretty: attempts!! to pretty print the output """ with open(fname, "w") as f: xml_str = ET.tostring(self.root, encoding="unicode") if pretty: # TODO: get a better pretty print library parsed_xml = xml.dom.minidom.parseString(xml_str) xml_str = parsed_xml.toprettyxml(newl="") f.write(xml_str)
[ "def", "save_model", "(", "self", ",", "fname", ",", "pretty", "=", "False", ")", ":", "with", "open", "(", "fname", ",", "\"w\"", ")", "as", "f", ":", "xml_str", "=", "ET", ".", "tostring", "(", "self", ".", "root", ",", "encoding", "=", "\"unicode\"", ")", "if", "pretty", ":", "# TODO: get a better pretty print library", "parsed_xml", "=", "xml", ".", "dom", ".", "minidom", ".", "parseString", "(", "xml_str", ")", "xml_str", "=", "parsed_xml", ".", "toprettyxml", "(", "newl", "=", "\"\"", ")", "f", ".", "write", "(", "xml_str", ")" ]
Saves the xml to file. Args: fname: output file location pretty: attempts!! to pretty print the output
[ "Saves", "the", "xml", "to", "file", "." ]
python
train
lsst-sqre/sqre-apikit
apikit/convenience.py
https://github.com/lsst-sqre/sqre-apikit/blob/ff505b63d2e29303ff7f05f2bd5eabd0f6d7026e/apikit/convenience.py#L104-L146
def add_metadata_route(app, route): """ Creates a /metadata route that returns service metadata. Also creates a /v{api_version}/metadata route, and those routes with ".json" appended. If route is specified, prepends it (or each component) to the front of the route. Parameters ---------- app : :class:`flask.Flask` instance Flask application for the microservice you're adding metadata to. route : `None`, `str`, or list of `str`, optional The 'route' parameter must be None, a string, or a list of strings. If supplied, each string will be prepended to the metadata route. Returns ------- Nothing, but decorates app with `/metadata` and `/v{app_version}/metadata` routes. """ errstr = add_metadata_route.__doc__ if route is None: route = [""] if isinstance(route, str): route = [route] if not isinstance(route, list): raise TypeError(errstr) if not all(isinstance(item, str) for item in route): raise TypeError(errstr) api_version = app.config["API_VERSION"] for rcomp in route: # Make canonical rcomp = "/" + rcomp.strip("/") if rcomp == "/": rcomp = "" for rbase in ["/metadata", "/v" + api_version + "/metadata"]: for rext in ["", ".json"]: rte = rcomp + rbase + rext with app.app_context(): app.add_url_rule(rte, '_return_metadata', _return_metadata)
[ "def", "add_metadata_route", "(", "app", ",", "route", ")", ":", "errstr", "=", "add_metadata_route", ".", "__doc__", "if", "route", "is", "None", ":", "route", "=", "[", "\"\"", "]", "if", "isinstance", "(", "route", ",", "str", ")", ":", "route", "=", "[", "route", "]", "if", "not", "isinstance", "(", "route", ",", "list", ")", ":", "raise", "TypeError", "(", "errstr", ")", "if", "not", "all", "(", "isinstance", "(", "item", ",", "str", ")", "for", "item", "in", "route", ")", ":", "raise", "TypeError", "(", "errstr", ")", "api_version", "=", "app", ".", "config", "[", "\"API_VERSION\"", "]", "for", "rcomp", "in", "route", ":", "# Make canonical", "rcomp", "=", "\"/\"", "+", "rcomp", ".", "strip", "(", "\"/\"", ")", "if", "rcomp", "==", "\"/\"", ":", "rcomp", "=", "\"\"", "for", "rbase", "in", "[", "\"/metadata\"", ",", "\"/v\"", "+", "api_version", "+", "\"/metadata\"", "]", ":", "for", "rext", "in", "[", "\"\"", ",", "\".json\"", "]", ":", "rte", "=", "rcomp", "+", "rbase", "+", "rext", "with", "app", ".", "app_context", "(", ")", ":", "app", ".", "add_url_rule", "(", "rte", ",", "'_return_metadata'", ",", "_return_metadata", ")" ]
Creates a /metadata route that returns service metadata. Also creates a /v{api_version}/metadata route, and those routes with ".json" appended. If route is specified, prepends it (or each component) to the front of the route. Parameters ---------- app : :class:`flask.Flask` instance Flask application for the microservice you're adding metadata to. route : `None`, `str`, or list of `str`, optional The 'route' parameter must be None, a string, or a list of strings. If supplied, each string will be prepended to the metadata route. Returns ------- Nothing, but decorates app with `/metadata` and `/v{app_version}/metadata` routes.
[ "Creates", "a", "/", "metadata", "route", "that", "returns", "service", "metadata", ".", "Also", "creates", "a", "/", "v", "{", "api_version", "}", "/", "metadata", "route", "and", "those", "routes", "with", ".", "json", "appended", ".", "If", "route", "is", "specified", "prepends", "it", "(", "or", "each", "component", ")", "to", "the", "front", "of", "the", "route", "." ]
python
train
saulpw/visidata
visidata/canvas.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/canvas.py#L566-L568
def scaleY(self, y): 'returns plotter y coordinate' return round(self.plotviewBox.ymin+(y-self.visibleBox.ymin)*self.yScaler)
[ "def", "scaleY", "(", "self", ",", "y", ")", ":", "return", "round", "(", "self", ".", "plotviewBox", ".", "ymin", "+", "(", "y", "-", "self", ".", "visibleBox", ".", "ymin", ")", "*", "self", ".", "yScaler", ")" ]
returns plotter y coordinate
[ "returns", "plotter", "y", "coordinate" ]
python
train
PyMySQL/mysqlclient-python
MySQLdb/connections.py
https://github.com/PyMySQL/mysqlclient-python/blob/b66971ee36be96b772ae7fdec79ccc1611376f3c/MySQLdb/connections.py#L309-L318
def show_warnings(self): """Return detailed information about warnings as a sequence of tuples of (Level, Code, Message). This is only supported in MySQL-4.1 and up. If your server is an earlier version, an empty sequence is returned.""" if self._server_version < (4,1): return () self.query("SHOW WARNINGS") r = self.store_result() warnings = r.fetch_row(0) return warnings
[ "def", "show_warnings", "(", "self", ")", ":", "if", "self", ".", "_server_version", "<", "(", "4", ",", "1", ")", ":", "return", "(", ")", "self", ".", "query", "(", "\"SHOW WARNINGS\"", ")", "r", "=", "self", ".", "store_result", "(", ")", "warnings", "=", "r", ".", "fetch_row", "(", "0", ")", "return", "warnings" ]
Return detailed information about warnings as a sequence of tuples of (Level, Code, Message). This is only supported in MySQL-4.1 and up. If your server is an earlier version, an empty sequence is returned.
[ "Return", "detailed", "information", "about", "warnings", "as", "a", "sequence", "of", "tuples", "of", "(", "Level", "Code", "Message", ")", ".", "This", "is", "only", "supported", "in", "MySQL", "-", "4", ".", "1", "and", "up", ".", "If", "your", "server", "is", "an", "earlier", "version", "an", "empty", "sequence", "is", "returned", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/psutil/_psosx.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/psutil/_psosx.py#L196-L201
def get_ext_memory_info(self): """Return a tuple with the process' RSS and VMS size.""" rss, vms, pfaults, pageins = _psutil_osx.get_process_memory_info(self.pid) return self._nt_ext_mem(rss, vms, pfaults * _PAGESIZE, pageins * _PAGESIZE)
[ "def", "get_ext_memory_info", "(", "self", ")", ":", "rss", ",", "vms", ",", "pfaults", ",", "pageins", "=", "_psutil_osx", ".", "get_process_memory_info", "(", "self", ".", "pid", ")", "return", "self", ".", "_nt_ext_mem", "(", "rss", ",", "vms", ",", "pfaults", "*", "_PAGESIZE", ",", "pageins", "*", "_PAGESIZE", ")" ]
Return a tuple with the process' RSS and VMS size.
[ "Return", "a", "tuple", "with", "the", "process", "RSS", "and", "VMS", "size", "." ]
python
test
pywbem/pywbem
pywbem/_utils.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/_utils.py#L131-L147
def _stacklevel_above_module(mod_name): """ Return the stack level (with 1 = caller of this function) of the first caller that is not defined in the specified module (e.g. "pywbem.cim_obj"). The returned stack level can be used directly by the caller of this function as an argument for the stacklevel parameter of warnings.warn(). """ stacklevel = 2 # start with caller of our caller frame = inspect.stack()[stacklevel][0] # stack() level is 0-based while True: if frame.f_globals.get('__name__', None) != mod_name: break stacklevel += 1 frame = frame.f_back del frame return stacklevel
[ "def", "_stacklevel_above_module", "(", "mod_name", ")", ":", "stacklevel", "=", "2", "# start with caller of our caller", "frame", "=", "inspect", ".", "stack", "(", ")", "[", "stacklevel", "]", "[", "0", "]", "# stack() level is 0-based", "while", "True", ":", "if", "frame", ".", "f_globals", ".", "get", "(", "'__name__'", ",", "None", ")", "!=", "mod_name", ":", "break", "stacklevel", "+=", "1", "frame", "=", "frame", ".", "f_back", "del", "frame", "return", "stacklevel" ]
Return the stack level (with 1 = caller of this function) of the first caller that is not defined in the specified module (e.g. "pywbem.cim_obj"). The returned stack level can be used directly by the caller of this function as an argument for the stacklevel parameter of warnings.warn().
[ "Return", "the", "stack", "level", "(", "with", "1", "=", "caller", "of", "this", "function", ")", "of", "the", "first", "caller", "that", "is", "not", "defined", "in", "the", "specified", "module", "(", "e", ".", "g", ".", "pywbem", ".", "cim_obj", ")", "." ]
python
train
HPENetworking/PYHPEIMC
pyhpeimc/plat/alarms.py
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/pyhpeimc/plat/alarms.py#L158-L175
def acknowledge_alarm(alarm_id, auth, url): """ Function tasks input of str of alarm ID and sends to REST API. Function will acknowledge designated alarm in the IMC alarm database. :param alarm_id: str of alarm ID param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: integer HTTP response code :rtype int """ f_url = url + "/imcrs/fault/alarm/acknowledge/"+str(alarm_id) response = requests.put(f_url, auth=auth, headers=HEADERS) try: return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' get_alarms: An Error has occured'
[ "def", "acknowledge_alarm", "(", "alarm_id", ",", "auth", ",", "url", ")", ":", "f_url", "=", "url", "+", "\"/imcrs/fault/alarm/acknowledge/\"", "+", "str", "(", "alarm_id", ")", "response", "=", "requests", ".", "put", "(", "f_url", ",", "auth", "=", "auth", ",", "headers", "=", "HEADERS", ")", "try", ":", "return", "response", ".", "status_code", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "error", ":", "return", "\"Error:\\n\"", "+", "str", "(", "error", ")", "+", "' get_alarms: An Error has occured'" ]
Function tasks input of str of alarm ID and sends to REST API. Function will acknowledge designated alarm in the IMC alarm database. :param alarm_id: str of alarm ID param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: integer HTTP response code :rtype int
[ "Function", "tasks", "input", "of", "str", "of", "alarm", "ID", "and", "sends", "to", "REST", "API", ".", "Function", "will", "acknowledge", "designated", "alarm", "in", "the", "IMC", "alarm", "database", ".", ":", "param", "alarm_id", ":", "str", "of", "alarm", "ID", "param", "auth", ":", "requests", "auth", "object", "#usually", "auth", ".", "creds", "from", "auth", "pyhpeimc", ".", "auth", ".", "class" ]
python
train
saltstack/salt
salt/utils/openstack/neutron.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/neutron.py#L700-L739
def create_ipsec_site_connection(self, name, ipsecpolicy, ikepolicy, vpnservice, peer_cidrs, peer_address, peer_id, psk, admin_state_up=True, **kwargs): ''' Creates a new IPsecSiteConnection ''' ipsecpolicy_id = self._find_ipsecpolicy_id(ipsecpolicy) ikepolicy_id = self._find_ikepolicy_id(ikepolicy) vpnservice_id = self._find_vpnservice_id(vpnservice) body = {'psk': psk, 'ipsecpolicy_id': ipsecpolicy_id, 'admin_state_up': admin_state_up, 'peer_cidrs': [peer_cidrs], 'ikepolicy_id': ikepolicy_id, 'vpnservice_id': vpnservice_id, 'peer_address': peer_address, 'peer_id': peer_id, 'name': name} if 'initiator' in kwargs: body['initiator'] = kwargs['initiator'] if 'mtu' in kwargs: body['mtu'] = kwargs['mtu'] if 'dpd_action' in kwargs: body['dpd'] = {'action': kwargs['dpd_action']} if 'dpd_interval' in kwargs: if 'dpd' not in body: body['dpd'] = {} body['dpd']['interval'] = kwargs['dpd_interval'] if 'dpd_timeout' in kwargs: if 'dpd' not in body: body['dpd'] = {} body['dpd']['timeout'] = kwargs['dpd_timeout'] return self.network_conn.create_ipsec_site_connection( body={'ipsec_site_connection': body})
[ "def", "create_ipsec_site_connection", "(", "self", ",", "name", ",", "ipsecpolicy", ",", "ikepolicy", ",", "vpnservice", ",", "peer_cidrs", ",", "peer_address", ",", "peer_id", ",", "psk", ",", "admin_state_up", "=", "True", ",", "*", "*", "kwargs", ")", ":", "ipsecpolicy_id", "=", "self", ".", "_find_ipsecpolicy_id", "(", "ipsecpolicy", ")", "ikepolicy_id", "=", "self", ".", "_find_ikepolicy_id", "(", "ikepolicy", ")", "vpnservice_id", "=", "self", ".", "_find_vpnservice_id", "(", "vpnservice", ")", "body", "=", "{", "'psk'", ":", "psk", ",", "'ipsecpolicy_id'", ":", "ipsecpolicy_id", ",", "'admin_state_up'", ":", "admin_state_up", ",", "'peer_cidrs'", ":", "[", "peer_cidrs", "]", ",", "'ikepolicy_id'", ":", "ikepolicy_id", ",", "'vpnservice_id'", ":", "vpnservice_id", ",", "'peer_address'", ":", "peer_address", ",", "'peer_id'", ":", "peer_id", ",", "'name'", ":", "name", "}", "if", "'initiator'", "in", "kwargs", ":", "body", "[", "'initiator'", "]", "=", "kwargs", "[", "'initiator'", "]", "if", "'mtu'", "in", "kwargs", ":", "body", "[", "'mtu'", "]", "=", "kwargs", "[", "'mtu'", "]", "if", "'dpd_action'", "in", "kwargs", ":", "body", "[", "'dpd'", "]", "=", "{", "'action'", ":", "kwargs", "[", "'dpd_action'", "]", "}", "if", "'dpd_interval'", "in", "kwargs", ":", "if", "'dpd'", "not", "in", "body", ":", "body", "[", "'dpd'", "]", "=", "{", "}", "body", "[", "'dpd'", "]", "[", "'interval'", "]", "=", "kwargs", "[", "'dpd_interval'", "]", "if", "'dpd_timeout'", "in", "kwargs", ":", "if", "'dpd'", "not", "in", "body", ":", "body", "[", "'dpd'", "]", "=", "{", "}", "body", "[", "'dpd'", "]", "[", "'timeout'", "]", "=", "kwargs", "[", "'dpd_timeout'", "]", "return", "self", ".", "network_conn", ".", "create_ipsec_site_connection", "(", "body", "=", "{", "'ipsec_site_connection'", ":", "body", "}", ")" ]
Creates a new IPsecSiteConnection
[ "Creates", "a", "new", "IPsecSiteConnection" ]
python
train
RPi-Distro/python-gpiozero
gpiozero/spi_devices.py
https://github.com/RPi-Distro/python-gpiozero/blob/7b67374fd0c8c4fde5586d9bad9531f076db9c0c/gpiozero/spi_devices.py#L93-L107
def _words_to_int(self, words, expected_bits=None): """ Given a sequence of words which each fit in the internal SPI interface's number of bits per word, returns the value obtained by concatenating each word into a single bit-string. If *expected_bits* is specified, it limits the size of the output to the specified number of bits (by masking off bits above the expected number). If unspecified, no limit will be applied. """ if expected_bits is None: expected_bits = len(words) * self._spi.bits_per_word shifts = range(0, expected_bits, self._spi.bits_per_word)[::-1] mask = 2 ** expected_bits - 1 return reduce(or_, (word << shift for word, shift in zip(words, shifts))) & mask
[ "def", "_words_to_int", "(", "self", ",", "words", ",", "expected_bits", "=", "None", ")", ":", "if", "expected_bits", "is", "None", ":", "expected_bits", "=", "len", "(", "words", ")", "*", "self", ".", "_spi", ".", "bits_per_word", "shifts", "=", "range", "(", "0", ",", "expected_bits", ",", "self", ".", "_spi", ".", "bits_per_word", ")", "[", ":", ":", "-", "1", "]", "mask", "=", "2", "**", "expected_bits", "-", "1", "return", "reduce", "(", "or_", ",", "(", "word", "<<", "shift", "for", "word", ",", "shift", "in", "zip", "(", "words", ",", "shifts", ")", ")", ")", "&", "mask" ]
Given a sequence of words which each fit in the internal SPI interface's number of bits per word, returns the value obtained by concatenating each word into a single bit-string. If *expected_bits* is specified, it limits the size of the output to the specified number of bits (by masking off bits above the expected number). If unspecified, no limit will be applied.
[ "Given", "a", "sequence", "of", "words", "which", "each", "fit", "in", "the", "internal", "SPI", "interface", "s", "number", "of", "bits", "per", "word", "returns", "the", "value", "obtained", "by", "concatenating", "each", "word", "into", "a", "single", "bit", "-", "string", "." ]
python
train
ThreatConnect-Inc/tcex
app_init/playbook_utility/app.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/app_init/playbook_utility/app.py#L25-L48
def run(self): """Run the App main logic. This method should contain the core logic of the App. """ # read inputs indent = int(self.tcex.playbook.read(self.args.indent)) json_data = self.tcex.playbook.read(self.args.json_data) # get the playbook variable type json_data_type = self.tcex.playbook.variable_type(self.args.json_data) # convert string input to dict if json_data_type in ['String']: json_data = json.loads(json_data) # generate the new "pretty" json (this will be used as an option variable) try: self.pretty_json = json.dumps(json_data, indent=indent, sort_keys=self.args.sort_keys) except Exception: self.tcex.exit(1, 'Failed parsing JSON data.') # set the App exit message self.exit_message = 'JSON prettified.'
[ "def", "run", "(", "self", ")", ":", "# read inputs", "indent", "=", "int", "(", "self", ".", "tcex", ".", "playbook", ".", "read", "(", "self", ".", "args", ".", "indent", ")", ")", "json_data", "=", "self", ".", "tcex", ".", "playbook", ".", "read", "(", "self", ".", "args", ".", "json_data", ")", "# get the playbook variable type", "json_data_type", "=", "self", ".", "tcex", ".", "playbook", ".", "variable_type", "(", "self", ".", "args", ".", "json_data", ")", "# convert string input to dict", "if", "json_data_type", "in", "[", "'String'", "]", ":", "json_data", "=", "json", ".", "loads", "(", "json_data", ")", "# generate the new \"pretty\" json (this will be used as an option variable)", "try", ":", "self", ".", "pretty_json", "=", "json", ".", "dumps", "(", "json_data", ",", "indent", "=", "indent", ",", "sort_keys", "=", "self", ".", "args", ".", "sort_keys", ")", "except", "Exception", ":", "self", ".", "tcex", ".", "exit", "(", "1", ",", "'Failed parsing JSON data.'", ")", "# set the App exit message", "self", ".", "exit_message", "=", "'JSON prettified.'" ]
Run the App main logic. This method should contain the core logic of the App.
[ "Run", "the", "App", "main", "logic", "." ]
python
train
marrow/mailer
marrow/mailer/message.py
https://github.com/marrow/mailer/blob/3995ef98a3f7feb75f1aeb652e6afe40a5c94def/marrow/mailer/message.py#L254-L337
def attach(self, name, data=None, maintype=None, subtype=None, inline=False, filename=None, filename_charset='', filename_language='', encoding=None): """Attach a file to this message. :param name: Path to the file to attach if data is None, or the name of the file if the ``data`` argument is given :param data: Contents of the file to attach, or None if the data is to be read from the file pointed to by the ``name`` argument :type data: bytes or a file-like object :param maintype: First part of the MIME type of the file -- will be automatically guessed if not given :param subtype: Second part of the MIME type of the file -- will be automatically guessed if not given :param inline: Whether to set the Content-Disposition for the file to "inline" (True) or "attachment" (False) :param filename: The file name of the attached file as seen by the user in his/her mail client. :param filename_charset: Charset used for the filename paramenter. Allows for attachment names with characters from UTF-8 or Latin 1. See RFC 2231. :param filename_language: Used to specify what language the filename is in. See RFC 2231. :param encoding: Value of the Content-Encoding MIME header (e.g. "gzip" in case of .tar.gz, but usually empty) """ self._dirty = True if not maintype: maintype, guessed_encoding = guess_type(name) encoding = encoding or guessed_encoding if not maintype: maintype, subtype = 'application', 'octet-stream' else: maintype, _, subtype = maintype.partition('/') part = MIMENonMultipart(maintype, subtype) part.add_header('Content-Transfer-Encoding', 'base64') if encoding: part.add_header('Content-Encoding', encoding) if data is None: with open(name, 'rb') as fp: value = fp.read() name = os.path.basename(name) elif isinstance(data, bytes): value = data elif hasattr(data, 'read'): value = data.read() else: raise TypeError("Unable to read attachment contents") part.set_payload(base64.encodestring(value)) if not filename: filename = name filename = os.path.basename(filename) if filename_charset or filename_language: if not filename_charset: filename_charset = 'utf-8' # See https://docs.python.org/2/library/email.message.html#email.message.Message.add_header # for more information. # add_header() in the email module expects its arguments to be ASCII strings. Go ahead and handle # the case where these arguments come in as unicode strings, since encoding ASCII strings # as UTF-8 can't hurt. if sys.version_info < (3, 0): filename=(filename_charset.encode('utf-8'), filename_language.encode('utf-8'), filename.encode('utf-8')) else: filename=(filename_charset, filename_language, filename) if inline: if sys.version_info < (3, 0): part.add_header('Content-Disposition'.encode('utf-8'), 'inline'.encode('utf-8'), filename=filename) part.add_header('Content-ID'.encode('utf-8'), '<%s>'.encode('utf-8') % filename) else: part.add_header('Content-Disposition', 'inline', filename=filename) part.add_header('Content-ID', '<%s>' % filename) self.embedded.append(part) else: if sys.version_info < (3, 0): part.add_header('Content-Disposition'.encode('utf-8'), 'attachment'.encode('utf-8'), filename=filename) else: part.add_header('Content-Disposition', 'attachment', filename=filename) self.attachments.append(part)
[ "def", "attach", "(", "self", ",", "name", ",", "data", "=", "None", ",", "maintype", "=", "None", ",", "subtype", "=", "None", ",", "inline", "=", "False", ",", "filename", "=", "None", ",", "filename_charset", "=", "''", ",", "filename_language", "=", "''", ",", "encoding", "=", "None", ")", ":", "self", ".", "_dirty", "=", "True", "if", "not", "maintype", ":", "maintype", ",", "guessed_encoding", "=", "guess_type", "(", "name", ")", "encoding", "=", "encoding", "or", "guessed_encoding", "if", "not", "maintype", ":", "maintype", ",", "subtype", "=", "'application'", ",", "'octet-stream'", "else", ":", "maintype", ",", "_", ",", "subtype", "=", "maintype", ".", "partition", "(", "'/'", ")", "part", "=", "MIMENonMultipart", "(", "maintype", ",", "subtype", ")", "part", ".", "add_header", "(", "'Content-Transfer-Encoding'", ",", "'base64'", ")", "if", "encoding", ":", "part", ".", "add_header", "(", "'Content-Encoding'", ",", "encoding", ")", "if", "data", "is", "None", ":", "with", "open", "(", "name", ",", "'rb'", ")", "as", "fp", ":", "value", "=", "fp", ".", "read", "(", ")", "name", "=", "os", ".", "path", ".", "basename", "(", "name", ")", "elif", "isinstance", "(", "data", ",", "bytes", ")", ":", "value", "=", "data", "elif", "hasattr", "(", "data", ",", "'read'", ")", ":", "value", "=", "data", ".", "read", "(", ")", "else", ":", "raise", "TypeError", "(", "\"Unable to read attachment contents\"", ")", "part", ".", "set_payload", "(", "base64", ".", "encodestring", "(", "value", ")", ")", "if", "not", "filename", ":", "filename", "=", "name", "filename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "if", "filename_charset", "or", "filename_language", ":", "if", "not", "filename_charset", ":", "filename_charset", "=", "'utf-8'", "# See https://docs.python.org/2/library/email.message.html#email.message.Message.add_header", "# for more information.", "# add_header() in the email module expects its arguments to be ASCII strings. Go ahead and handle", "# the case where these arguments come in as unicode strings, since encoding ASCII strings", "# as UTF-8 can't hurt.", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "filename", "=", "(", "filename_charset", ".", "encode", "(", "'utf-8'", ")", ",", "filename_language", ".", "encode", "(", "'utf-8'", ")", ",", "filename", ".", "encode", "(", "'utf-8'", ")", ")", "else", ":", "filename", "=", "(", "filename_charset", ",", "filename_language", ",", "filename", ")", "if", "inline", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "part", ".", "add_header", "(", "'Content-Disposition'", ".", "encode", "(", "'utf-8'", ")", ",", "'inline'", ".", "encode", "(", "'utf-8'", ")", ",", "filename", "=", "filename", ")", "part", ".", "add_header", "(", "'Content-ID'", ".", "encode", "(", "'utf-8'", ")", ",", "'<%s>'", ".", "encode", "(", "'utf-8'", ")", "%", "filename", ")", "else", ":", "part", ".", "add_header", "(", "'Content-Disposition'", ",", "'inline'", ",", "filename", "=", "filename", ")", "part", ".", "add_header", "(", "'Content-ID'", ",", "'<%s>'", "%", "filename", ")", "self", ".", "embedded", ".", "append", "(", "part", ")", "else", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "part", ".", "add_header", "(", "'Content-Disposition'", ".", "encode", "(", "'utf-8'", ")", ",", "'attachment'", ".", "encode", "(", "'utf-8'", ")", ",", "filename", "=", "filename", ")", "else", ":", "part", ".", "add_header", "(", "'Content-Disposition'", ",", "'attachment'", ",", "filename", "=", "filename", ")", "self", ".", "attachments", ".", "append", "(", "part", ")" ]
Attach a file to this message. :param name: Path to the file to attach if data is None, or the name of the file if the ``data`` argument is given :param data: Contents of the file to attach, or None if the data is to be read from the file pointed to by the ``name`` argument :type data: bytes or a file-like object :param maintype: First part of the MIME type of the file -- will be automatically guessed if not given :param subtype: Second part of the MIME type of the file -- will be automatically guessed if not given :param inline: Whether to set the Content-Disposition for the file to "inline" (True) or "attachment" (False) :param filename: The file name of the attached file as seen by the user in his/her mail client. :param filename_charset: Charset used for the filename paramenter. Allows for attachment names with characters from UTF-8 or Latin 1. See RFC 2231. :param filename_language: Used to specify what language the filename is in. See RFC 2231. :param encoding: Value of the Content-Encoding MIME header (e.g. "gzip" in case of .tar.gz, but usually empty)
[ "Attach", "a", "file", "to", "this", "message", "." ]
python
train
tensorflow/datasets
tensorflow_datasets/core/download/resource.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L193-L196
def get_dl_dirname(url): """Returns name of temp dir for given url.""" checksum = hashlib.sha256(tf.compat.as_bytes(url)).hexdigest() return get_dl_fname(url, checksum)
[ "def", "get_dl_dirname", "(", "url", ")", ":", "checksum", "=", "hashlib", ".", "sha256", "(", "tf", ".", "compat", ".", "as_bytes", "(", "url", ")", ")", ".", "hexdigest", "(", ")", "return", "get_dl_fname", "(", "url", ",", "checksum", ")" ]
Returns name of temp dir for given url.
[ "Returns", "name", "of", "temp", "dir", "for", "given", "url", "." ]
python
train
Duke-GCB/DukeDSClient
ddsc/cmdparser.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/cmdparser.py#L257-L268
def _add_include_arg(arg_parser): """ Adds optional repeatable include parameter to a parser. :param arg_parser: ArgumentParser parser to add this argument to. """ arg_parser.add_argument("--include", metavar='Path', action='append', type=to_unicode, dest='include_paths', help="Specifies a single path to include. This argument can be repeated.", default=[])
[ "def", "_add_include_arg", "(", "arg_parser", ")", ":", "arg_parser", ".", "add_argument", "(", "\"--include\"", ",", "metavar", "=", "'Path'", ",", "action", "=", "'append'", ",", "type", "=", "to_unicode", ",", "dest", "=", "'include_paths'", ",", "help", "=", "\"Specifies a single path to include. This argument can be repeated.\"", ",", "default", "=", "[", "]", ")" ]
Adds optional repeatable include parameter to a parser. :param arg_parser: ArgumentParser parser to add this argument to.
[ "Adds", "optional", "repeatable", "include", "parameter", "to", "a", "parser", ".", ":", "param", "arg_parser", ":", "ArgumentParser", "parser", "to", "add", "this", "argument", "to", "." ]
python
train
erget/StereoVision
stereovision/blockmatchers.py
https://github.com/erget/StereoVision/blob/1adff45e291362f52188e0fd0211265845a4461a/stereovision/blockmatchers.py#L190-L204
def get_disparity(self, pair): """ Compute disparity from image pair (left, right). First, convert images to grayscale if needed. Then pass to the ``_block_matcher`` for stereo matching. """ gray = [] if pair[0].ndim == 3: for side in pair: gray.append(cv2.cvtColor(side, cv2.COLOR_BGR2GRAY)) else: gray = pair return self._block_matcher.compute(gray[0], gray[1], disptype=cv2.CV_32F)
[ "def", "get_disparity", "(", "self", ",", "pair", ")", ":", "gray", "=", "[", "]", "if", "pair", "[", "0", "]", ".", "ndim", "==", "3", ":", "for", "side", "in", "pair", ":", "gray", ".", "append", "(", "cv2", ".", "cvtColor", "(", "side", ",", "cv2", ".", "COLOR_BGR2GRAY", ")", ")", "else", ":", "gray", "=", "pair", "return", "self", ".", "_block_matcher", ".", "compute", "(", "gray", "[", "0", "]", ",", "gray", "[", "1", "]", ",", "disptype", "=", "cv2", ".", "CV_32F", ")" ]
Compute disparity from image pair (left, right). First, convert images to grayscale if needed. Then pass to the ``_block_matcher`` for stereo matching.
[ "Compute", "disparity", "from", "image", "pair", "(", "left", "right", ")", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library_ext/vbox.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library_ext/vbox.py#L75-L83
def register_on_snapshot_taken(self, callback): """Set the callback function to consume on snapshot taken events. Callback receives a ISnapshotTakenEvent object. Returns the callback_id """ event_type = library.VBoxEventType.on_snapshot_taken return self.event_source.register_callback(callback, event_type)
[ "def", "register_on_snapshot_taken", "(", "self", ",", "callback", ")", ":", "event_type", "=", "library", ".", "VBoxEventType", ".", "on_snapshot_taken", "return", "self", ".", "event_source", ".", "register_callback", "(", "callback", ",", "event_type", ")" ]
Set the callback function to consume on snapshot taken events. Callback receives a ISnapshotTakenEvent object. Returns the callback_id
[ "Set", "the", "callback", "function", "to", "consume", "on", "snapshot", "taken", "events", "." ]
python
train
sci-bots/svg-model
docs/generate_modules.py
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/docs/generate_modules.py#L230-L257
def main(): """ Parse and check the command line arguments. """ parser = optparse.OptionParser(usage="""usage: %prog [options] <package path> [exclude paths, ...] Note: By default this script will not overwrite already created files.""") parser.add_option("-n", "--doc-header", action="store", dest="header", help="Documentation Header (default=Project)", default="Project") parser.add_option("-d", "--dest-dir", action="store", dest="destdir", help="Output destination directory", default="") parser.add_option("-s", "--suffix", action="store", dest="suffix", help="module suffix (default=txt)", default="txt") parser.add_option("-m", "--maxdepth", action="store", dest="maxdepth", help="Maximum depth of submodules to show in the TOC (default=4)", type="int", default=4) parser.add_option("-r", "--dry-run", action="store_true", dest="dryrun", help="Run the script without creating the files") parser.add_option("-f", "--force", action="store_true", dest="force", help="Overwrite all the files") parser.add_option("-t", "--no-toc", action="store_true", dest="notoc", help="Don't create the table of content file") (opts, args) = parser.parse_args() if not args: parser.error("package path is required.") else: rootpath, excludes = args[0], args[1:] if os.path.isdir(rootpath): # check if the output destination is a valid directory if opts.destdir and os.path.isdir(opts.destdir): excludes = normalize_excludes(rootpath, excludes) recurse_tree(rootpath, excludes, opts) else: print '%s is not a valid output destination directory.' % opts.destdir else: print '%s is not a valid directory.' % rootpath
[ "def", "main", "(", ")", ":", "parser", "=", "optparse", ".", "OptionParser", "(", "usage", "=", "\"\"\"usage: %prog [options] <package path> [exclude paths, ...]\n\nNote: By default this script will not overwrite already created files.\"\"\"", ")", "parser", ".", "add_option", "(", "\"-n\"", ",", "\"--doc-header\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"header\"", ",", "help", "=", "\"Documentation Header (default=Project)\"", ",", "default", "=", "\"Project\"", ")", "parser", ".", "add_option", "(", "\"-d\"", ",", "\"--dest-dir\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"destdir\"", ",", "help", "=", "\"Output destination directory\"", ",", "default", "=", "\"\"", ")", "parser", ".", "add_option", "(", "\"-s\"", ",", "\"--suffix\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"suffix\"", ",", "help", "=", "\"module suffix (default=txt)\"", ",", "default", "=", "\"txt\"", ")", "parser", ".", "add_option", "(", "\"-m\"", ",", "\"--maxdepth\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"maxdepth\"", ",", "help", "=", "\"Maximum depth of submodules to show in the TOC (default=4)\"", ",", "type", "=", "\"int\"", ",", "default", "=", "4", ")", "parser", ".", "add_option", "(", "\"-r\"", ",", "\"--dry-run\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"dryrun\"", ",", "help", "=", "\"Run the script without creating the files\"", ")", "parser", ".", "add_option", "(", "\"-f\"", ",", "\"--force\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"force\"", ",", "help", "=", "\"Overwrite all the files\"", ")", "parser", ".", "add_option", "(", "\"-t\"", ",", "\"--no-toc\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"notoc\"", ",", "help", "=", "\"Don't create the table of content file\"", ")", "(", "opts", ",", "args", ")", "=", "parser", ".", "parse_args", "(", ")", "if", "not", "args", ":", "parser", ".", "error", "(", "\"package path is required.\"", ")", "else", ":", "rootpath", ",", "excludes", "=", "args", "[", "0", "]", ",", "args", "[", "1", ":", "]", "if", "os", ".", "path", ".", "isdir", "(", "rootpath", ")", ":", "# check if the output destination is a valid directory", "if", "opts", ".", "destdir", "and", "os", ".", "path", ".", "isdir", "(", "opts", ".", "destdir", ")", ":", "excludes", "=", "normalize_excludes", "(", "rootpath", ",", "excludes", ")", "recurse_tree", "(", "rootpath", ",", "excludes", ",", "opts", ")", "else", ":", "print", "'%s is not a valid output destination directory.'", "%", "opts", ".", "destdir", "else", ":", "print", "'%s is not a valid directory.'", "%", "rootpath" ]
Parse and check the command line arguments.
[ "Parse", "and", "check", "the", "command", "line", "arguments", "." ]
python
train
salu133445/pypianoroll
pypianoroll/multitrack.py
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L282-L299
def get_active_length(self): """ Return the maximum active length (i.e., without trailing silence) among the pianorolls of all tracks. The unit is time step. Returns ------- active_length : int The maximum active length (i.e., without trailing silence) among the pianorolls of all tracks. The unit is time step. """ active_length = 0 for track in self.tracks: now_length = track.get_active_length() if active_length < track.get_active_length(): active_length = now_length return active_length
[ "def", "get_active_length", "(", "self", ")", ":", "active_length", "=", "0", "for", "track", "in", "self", ".", "tracks", ":", "now_length", "=", "track", ".", "get_active_length", "(", ")", "if", "active_length", "<", "track", ".", "get_active_length", "(", ")", ":", "active_length", "=", "now_length", "return", "active_length" ]
Return the maximum active length (i.e., without trailing silence) among the pianorolls of all tracks. The unit is time step. Returns ------- active_length : int The maximum active length (i.e., without trailing silence) among the pianorolls of all tracks. The unit is time step.
[ "Return", "the", "maximum", "active", "length", "(", "i", ".", "e", ".", "without", "trailing", "silence", ")", "among", "the", "pianorolls", "of", "all", "tracks", ".", "The", "unit", "is", "time", "step", "." ]
python
train
Azure/blobxfer
blobxfer/operations/azure/__init__.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/azure/__init__.py#L188-L208
def _key_is_sas(key): # type: (str) -> bool """Determine if key is a sas :param str key: key to parse :rtype: bool :return: if key is a sas """ # keys starting with ? are sas keys as ? is not in the base-64 # character range if key.startswith('?'): return True else: # & is not in the base-64 character range, so technically # the presence of this character means the key is a sas. however, # perform a stronger check for the sig= parameter. tmp = key.split('&') if len(tmp) == 1: return False elif any(x.startswith('sig=') for x in tmp): return True return False
[ "def", "_key_is_sas", "(", "key", ")", ":", "# type: (str) -> bool", "# keys starting with ? are sas keys as ? is not in the base-64", "# character range", "if", "key", ".", "startswith", "(", "'?'", ")", ":", "return", "True", "else", ":", "# & is not in the base-64 character range, so technically", "# the presence of this character means the key is a sas. however,", "# perform a stronger check for the sig= parameter.", "tmp", "=", "key", ".", "split", "(", "'&'", ")", "if", "len", "(", "tmp", ")", "==", "1", ":", "return", "False", "elif", "any", "(", "x", ".", "startswith", "(", "'sig='", ")", "for", "x", "in", "tmp", ")", ":", "return", "True", "return", "False" ]
Determine if key is a sas :param str key: key to parse :rtype: bool :return: if key is a sas
[ "Determine", "if", "key", "is", "a", "sas", ":", "param", "str", "key", ":", "key", "to", "parse", ":", "rtype", ":", "bool", ":", "return", ":", "if", "key", "is", "a", "sas" ]
python
train
pyupio/pyup
pyup/bot.py
https://github.com/pyupio/pyup/blob/b20fa88e03cfdf5dc409a9f00d27629188171c31/pyup/bot.py#L275-L294
def is_bot_the_only_committer(self, pr): """ Checks if the bot is the only committer for the given pull request. :param update: Update to check :return: bool - True if conflict found """ committer = self.provider.get_pull_request_committer( self.user_repo, pr) # flatten the list and remove duplicates committer_set = set([c.login for c in committer]) # it's impossible to get the bots login if this is an integration, just check that # there's only one commit in the commit history. if self.integration or getattr(self.provider, 'name', '') == 'gitlab': return len(committer_set) == 1 # check that there's exactly one committer in this PRs commit history and # that the committer is the bot return len(committer_set) == 1 and self.provider.is_same_user(self.bot, committer[0])
[ "def", "is_bot_the_only_committer", "(", "self", ",", "pr", ")", ":", "committer", "=", "self", ".", "provider", ".", "get_pull_request_committer", "(", "self", ".", "user_repo", ",", "pr", ")", "# flatten the list and remove duplicates", "committer_set", "=", "set", "(", "[", "c", ".", "login", "for", "c", "in", "committer", "]", ")", "# it's impossible to get the bots login if this is an integration, just check that", "# there's only one commit in the commit history.", "if", "self", ".", "integration", "or", "getattr", "(", "self", ".", "provider", ",", "'name'", ",", "''", ")", "==", "'gitlab'", ":", "return", "len", "(", "committer_set", ")", "==", "1", "# check that there's exactly one committer in this PRs commit history and", "# that the committer is the bot", "return", "len", "(", "committer_set", ")", "==", "1", "and", "self", ".", "provider", ".", "is_same_user", "(", "self", ".", "bot", ",", "committer", "[", "0", "]", ")" ]
Checks if the bot is the only committer for the given pull request. :param update: Update to check :return: bool - True if conflict found
[ "Checks", "if", "the", "bot", "is", "the", "only", "committer", "for", "the", "given", "pull", "request", ".", ":", "param", "update", ":", "Update", "to", "check", ":", "return", ":", "bool", "-", "True", "if", "conflict", "found" ]
python
train
enkore/i3pystatus
i3pystatus/core/io.py
https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/core/io.py#L193-L203
def parse_line(self, line): """Parse a single line of JSON and write modified JSON back.""" prefix = "" # ignore comma at start of lines if line.startswith(","): line, prefix = line[1:], "," j = json.loads(line) yield j self.io.write_line(prefix + json.dumps(j))
[ "def", "parse_line", "(", "self", ",", "line", ")", ":", "prefix", "=", "\"\"", "# ignore comma at start of lines", "if", "line", ".", "startswith", "(", "\",\"", ")", ":", "line", ",", "prefix", "=", "line", "[", "1", ":", "]", ",", "\",\"", "j", "=", "json", ".", "loads", "(", "line", ")", "yield", "j", "self", ".", "io", ".", "write_line", "(", "prefix", "+", "json", ".", "dumps", "(", "j", ")", ")" ]
Parse a single line of JSON and write modified JSON back.
[ "Parse", "a", "single", "line", "of", "JSON", "and", "write", "modified", "JSON", "back", "." ]
python
train
OLC-Bioinformatics/sipprverse
sixteenS/sixteens_full.py
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L235-L259
def makeblastdb(self): """ Makes blast database files from targets as necessary """ # Iterate through the samples to set the bait file. for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': # Remove the file extension db = os.path.splitext(sample[self.analysistype].baitfile)[0] # Add '.nhr' for searching below nhr = '{}.nhr'.format(db) # Check for already existing database files if not os.path.isfile(str(nhr)): # Create the databases command = 'makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'\ .format(sample[self.analysistype].baitfile, db) out, err = run_subprocess(command) write_to_logfile(command, command, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr)
[ "def", "makeblastdb", "(", "self", ")", ":", "# Iterate through the samples to set the bait file.", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "# Remove the file extension", "db", "=", "os", ".", "path", ".", "splitext", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "baitfile", ")", "[", "0", "]", "# Add '.nhr' for searching below", "nhr", "=", "'{}.nhr'", ".", "format", "(", "db", ")", "# Check for already existing database files", "if", "not", "os", ".", "path", ".", "isfile", "(", "str", "(", "nhr", ")", ")", ":", "# Create the databases", "command", "=", "'makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'", ".", "format", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "baitfile", ",", "db", ")", "out", ",", "err", "=", "run_subprocess", "(", "command", ")", "write_to_logfile", "(", "command", ",", "command", ",", "self", ".", "logfile", ",", "sample", ".", "general", ".", "logout", ",", "sample", ".", "general", ".", "logerr", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logout", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logerr", ")", "write_to_logfile", "(", "out", ",", "err", ",", "self", ".", "logfile", ",", "sample", ".", "general", ".", "logout", ",", "sample", ".", "general", ".", "logerr", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logout", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "logerr", ")" ]
Makes blast database files from targets as necessary
[ "Makes", "blast", "database", "files", "from", "targets", "as", "necessary" ]
python
train
iotile/coretools
transport_plugins/bled112/iotile_transport_bled112/bled112.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/bled112/iotile_transport_bled112/bled112.py#L140-L147
def reset_scan_stats(self): """Clears the scan event statistics and updates the last reset time""" self._scan_event_count = 0 self._v1_scan_count = 0 self._v1_scan_response_count = 0 self._v2_scan_count = 0 self._device_scan_counts = {} self._last_reset_time = time.time()
[ "def", "reset_scan_stats", "(", "self", ")", ":", "self", ".", "_scan_event_count", "=", "0", "self", ".", "_v1_scan_count", "=", "0", "self", ".", "_v1_scan_response_count", "=", "0", "self", ".", "_v2_scan_count", "=", "0", "self", ".", "_device_scan_counts", "=", "{", "}", "self", ".", "_last_reset_time", "=", "time", ".", "time", "(", ")" ]
Clears the scan event statistics and updates the last reset time
[ "Clears", "the", "scan", "event", "statistics", "and", "updates", "the", "last", "reset", "time" ]
python
train
nvbn/thefuck
thefuck/shells/fish.py
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/shells/fish.py#L107-L112
def info(self): """Returns the name and version of the current shell""" proc = Popen(['fish', '--version'], stdout=PIPE, stderr=DEVNULL) version = proc.stdout.read().decode('utf-8').split()[-1] return u'Fish Shell {}'.format(version)
[ "def", "info", "(", "self", ")", ":", "proc", "=", "Popen", "(", "[", "'fish'", ",", "'--version'", "]", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "DEVNULL", ")", "version", "=", "proc", ".", "stdout", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", ")", "[", "-", "1", "]", "return", "u'Fish Shell {}'", ".", "format", "(", "version", ")" ]
Returns the name and version of the current shell
[ "Returns", "the", "name", "and", "version", "of", "the", "current", "shell" ]
python
train
tilezen/mapbox-vector-tile
mapbox_vector_tile/optimise.py
https://github.com/tilezen/mapbox-vector-tile/blob/7327b8cff0aa2de1d5233e556bf00429ba2126a0/mapbox_vector_tile/optimise.py#L226-L247
def optimise_tile(tile_bytes): """ Decode a sequence of bytes as an MVT tile and reorder the string table of its layers and the order of its multilinestrings to save a few bytes. """ t = tile() t.ParseFromString(tile_bytes) for layer in t.layers: sto = StringTableOptimiser() for feature in layer.features: # (multi)linestrings only if feature.type == 2: optimise_multilinestring(feature.geometry) sto.add_tags(feature.tags) sto.update_string_table(layer) return t.SerializeToString()
[ "def", "optimise_tile", "(", "tile_bytes", ")", ":", "t", "=", "tile", "(", ")", "t", ".", "ParseFromString", "(", "tile_bytes", ")", "for", "layer", "in", "t", ".", "layers", ":", "sto", "=", "StringTableOptimiser", "(", ")", "for", "feature", "in", "layer", ".", "features", ":", "# (multi)linestrings only", "if", "feature", ".", "type", "==", "2", ":", "optimise_multilinestring", "(", "feature", ".", "geometry", ")", "sto", ".", "add_tags", "(", "feature", ".", "tags", ")", "sto", ".", "update_string_table", "(", "layer", ")", "return", "t", ".", "SerializeToString", "(", ")" ]
Decode a sequence of bytes as an MVT tile and reorder the string table of its layers and the order of its multilinestrings to save a few bytes.
[ "Decode", "a", "sequence", "of", "bytes", "as", "an", "MVT", "tile", "and", "reorder", "the", "string", "table", "of", "its", "layers", "and", "the", "order", "of", "its", "multilinestrings", "to", "save", "a", "few", "bytes", "." ]
python
train
fastai/fastai
fastai/callbacks/general_sched.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/general_sched.py#L40-L46
def on_batch_end(self, train, **kwargs:Any)->None: "Take a step in lr,mom sched, start next stepper when the current one is complete." if train: if self.idx_s >= len(self.scheds): return {'stop_training': True, 'stop_epoch': True} sched = self.scheds[self.idx_s] for k,v in sched.items(): self.opt.set_stat(k, v.step()) if list(sched.values())[0].is_done: self.idx_s += 1
[ "def", "on_batch_end", "(", "self", ",", "train", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "None", ":", "if", "train", ":", "if", "self", ".", "idx_s", ">=", "len", "(", "self", ".", "scheds", ")", ":", "return", "{", "'stop_training'", ":", "True", ",", "'stop_epoch'", ":", "True", "}", "sched", "=", "self", ".", "scheds", "[", "self", ".", "idx_s", "]", "for", "k", ",", "v", "in", "sched", ".", "items", "(", ")", ":", "self", ".", "opt", ".", "set_stat", "(", "k", ",", "v", ".", "step", "(", ")", ")", "if", "list", "(", "sched", ".", "values", "(", ")", ")", "[", "0", "]", ".", "is_done", ":", "self", ".", "idx_s", "+=", "1" ]
Take a step in lr,mom sched, start next stepper when the current one is complete.
[ "Take", "a", "step", "in", "lr", "mom", "sched", "start", "next", "stepper", "when", "the", "current", "one", "is", "complete", "." ]
python
train
toejough/pimento
pimento/__init__.py
https://github.com/toejough/pimento/blob/cdb00a93976733aa5521f8504152cedeedfc711a/pimento/__init__.py#L298-L311
def _exact_match(response, matches, insensitive, fuzzy): ''' returns an exact match, if it exists, given parameters for the match ''' for match in matches: if response == match: return match elif insensitive and response.lower() == match.lower(): return match elif fuzzy and _exact_fuzzy_match(response, match, insensitive): return match else: return None
[ "def", "_exact_match", "(", "response", ",", "matches", ",", "insensitive", ",", "fuzzy", ")", ":", "for", "match", "in", "matches", ":", "if", "response", "==", "match", ":", "return", "match", "elif", "insensitive", "and", "response", ".", "lower", "(", ")", "==", "match", ".", "lower", "(", ")", ":", "return", "match", "elif", "fuzzy", "and", "_exact_fuzzy_match", "(", "response", ",", "match", ",", "insensitive", ")", ":", "return", "match", "else", ":", "return", "None" ]
returns an exact match, if it exists, given parameters for the match
[ "returns", "an", "exact", "match", "if", "it", "exists", "given", "parameters", "for", "the", "match" ]
python
train
saltstack/salt
salt/modules/ps.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L416-L435
def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict())
[ "def", "virtual_memory", "(", ")", ":", "if", "psutil", ".", "version_info", "<", "(", "0", ",", "6", ",", "0", ")", ":", "msg", "=", "'virtual_memory is only available in psutil 0.6.0 or greater'", "raise", "CommandExecutionError", "(", "msg", ")", "return", "dict", "(", "psutil", ".", "virtual_memory", "(", ")", ".", "_asdict", "(", ")", ")" ]
.. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory
[ "..", "versionadded", "::", "2014", ".", "7", ".", "0" ]
python
train
Esri/ArcREST
src/arcrest/manageorg/_portals.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_portals.py#L1367-L1408
def cost(self, tileStorage=0, fileStorage=0, featureStorage=0, generatedTileCount=0, loadedTileCount=0, enrichVariableCount=0, enrichReportCount=0, serviceAreaCount=0, geocodeCount=0): """ returns the cost values for a given portal Inputs: tileStorage - int - numbe of tiles to store in MBs fileStorage - int - size of file to store in MBs featureStorage - int - size in MBs generateTileCount - int - number of tiles to genearte on site loadedTileCount -int- cost to host a certian number of tiles enrichVariableCount - int - cost to enrich data enrichReportCount - int - cost to generate an enrichment report serviceAreaCount - int - cost to generate x number of service areas geocodeCount - int - cost to generate x number of addresses """ params = { "f" : "json", "tileStorage": tileStorage, "fileStorage": fileStorage, "featureStorage": featureStorage, "generatedTileCount": generatedTileCount, "loadedTileCount":loadedTileCount, "enrichVariableCount": enrichVariableCount, "enrichReportCount" : enrichReportCount, "serviceAreaCount" : serviceAreaCount, "geocodeCount" : geocodeCount } url = self._url + "/cost" return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "cost", "(", "self", ",", "tileStorage", "=", "0", ",", "fileStorage", "=", "0", ",", "featureStorage", "=", "0", ",", "generatedTileCount", "=", "0", ",", "loadedTileCount", "=", "0", ",", "enrichVariableCount", "=", "0", ",", "enrichReportCount", "=", "0", ",", "serviceAreaCount", "=", "0", ",", "geocodeCount", "=", "0", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"tileStorage\"", ":", "tileStorage", ",", "\"fileStorage\"", ":", "fileStorage", ",", "\"featureStorage\"", ":", "featureStorage", ",", "\"generatedTileCount\"", ":", "generatedTileCount", ",", "\"loadedTileCount\"", ":", "loadedTileCount", ",", "\"enrichVariableCount\"", ":", "enrichVariableCount", ",", "\"enrichReportCount\"", ":", "enrichReportCount", ",", "\"serviceAreaCount\"", ":", "serviceAreaCount", ",", "\"geocodeCount\"", ":", "geocodeCount", "}", "url", "=", "self", ".", "_url", "+", "\"/cost\"", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
returns the cost values for a given portal Inputs: tileStorage - int - numbe of tiles to store in MBs fileStorage - int - size of file to store in MBs featureStorage - int - size in MBs generateTileCount - int - number of tiles to genearte on site loadedTileCount -int- cost to host a certian number of tiles enrichVariableCount - int - cost to enrich data enrichReportCount - int - cost to generate an enrichment report serviceAreaCount - int - cost to generate x number of service areas geocodeCount - int - cost to generate x number of addresses
[ "returns", "the", "cost", "values", "for", "a", "given", "portal", "Inputs", ":", "tileStorage", "-", "int", "-", "numbe", "of", "tiles", "to", "store", "in", "MBs", "fileStorage", "-", "int", "-", "size", "of", "file", "to", "store", "in", "MBs", "featureStorage", "-", "int", "-", "size", "in", "MBs", "generateTileCount", "-", "int", "-", "number", "of", "tiles", "to", "genearte", "on", "site", "loadedTileCount", "-", "int", "-", "cost", "to", "host", "a", "certian", "number", "of", "tiles", "enrichVariableCount", "-", "int", "-", "cost", "to", "enrich", "data", "enrichReportCount", "-", "int", "-", "cost", "to", "generate", "an", "enrichment", "report", "serviceAreaCount", "-", "int", "-", "cost", "to", "generate", "x", "number", "of", "service", "areas", "geocodeCount", "-", "int", "-", "cost", "to", "generate", "x", "number", "of", "addresses" ]
python
train
CEA-COSMIC/ModOpt
modopt/opt/proximity.py
https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/opt/proximity.py#L90-L105
def _cost_method(self, *args, **kwargs): """Calculate positivity component of the cost This method returns 0 as the posivituty does not contribute to the cost. Returns ------- float zero """ if 'verbose' in kwargs and kwargs['verbose']: print(' - Min (X):', np.min(args[0])) return 0.0
[ "def", "_cost_method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'verbose'", "in", "kwargs", "and", "kwargs", "[", "'verbose'", "]", ":", "print", "(", "' - Min (X):'", ",", "np", ".", "min", "(", "args", "[", "0", "]", ")", ")", "return", "0.0" ]
Calculate positivity component of the cost This method returns 0 as the posivituty does not contribute to the cost. Returns ------- float zero
[ "Calculate", "positivity", "component", "of", "the", "cost" ]
python
train
googledatalab/pydatalab
solutionbox/ml_workbench/tensorflow/transform.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/transform.py#L373-L389
def encode_csv(data_dict, column_names): """Builds a csv string. Args: data_dict: dict of {column_name: 1 value} column_names: list of column names Returns: A csv string version of data_dict """ import csv import six values = [str(data_dict[x]) for x in column_names] str_buff = six.StringIO() writer = csv.writer(str_buff, lineterminator='') writer.writerow(values) return str_buff.getvalue()
[ "def", "encode_csv", "(", "data_dict", ",", "column_names", ")", ":", "import", "csv", "import", "six", "values", "=", "[", "str", "(", "data_dict", "[", "x", "]", ")", "for", "x", "in", "column_names", "]", "str_buff", "=", "six", ".", "StringIO", "(", ")", "writer", "=", "csv", ".", "writer", "(", "str_buff", ",", "lineterminator", "=", "''", ")", "writer", ".", "writerow", "(", "values", ")", "return", "str_buff", ".", "getvalue", "(", ")" ]
Builds a csv string. Args: data_dict: dict of {column_name: 1 value} column_names: list of column names Returns: A csv string version of data_dict
[ "Builds", "a", "csv", "string", "." ]
python
train
tensorflow/mesh
mesh_tensorflow/auto_mtf/graph_interface.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L259-L272
def get_tensor_mtf_dimension_names(self, tensor_name): """The Mesh TensorFlow dimensions associated with a tensor. Args: tensor_name: a string, name of a tensor in the graph. Returns: a [string], the names of Mesh TensorFlow dimensions. """ tensor = self._name_to_tensor(tensor_name) if isinstance(tensor, mtf.Tensor): return tensor.shape.dimension_names else: # tf.Tensor return []
[ "def", "get_tensor_mtf_dimension_names", "(", "self", ",", "tensor_name", ")", ":", "tensor", "=", "self", ".", "_name_to_tensor", "(", "tensor_name", ")", "if", "isinstance", "(", "tensor", ",", "mtf", ".", "Tensor", ")", ":", "return", "tensor", ".", "shape", ".", "dimension_names", "else", ":", "# tf.Tensor", "return", "[", "]" ]
The Mesh TensorFlow dimensions associated with a tensor. Args: tensor_name: a string, name of a tensor in the graph. Returns: a [string], the names of Mesh TensorFlow dimensions.
[ "The", "Mesh", "TensorFlow", "dimensions", "associated", "with", "a", "tensor", "." ]
python
train
merll/docker-map
dockermap/dep.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/dep.py#L108-L124
def get_dependencies(self, item): """ Performs a dependency check on the given item. :param item: Node to start the dependency check with. :return: The result on merged dependencies down the hierarchy. """ def _get_sub_dependency(sub_item): e = self._deps.get(sub_item) if e is None: return self.get_default() if e.dependencies is NotInitialized: e.dependencies = self.merge_dependency(sub_item, _get_sub_dependency, e.parent) return e.dependencies return _get_sub_dependency(item)
[ "def", "get_dependencies", "(", "self", ",", "item", ")", ":", "def", "_get_sub_dependency", "(", "sub_item", ")", ":", "e", "=", "self", ".", "_deps", ".", "get", "(", "sub_item", ")", "if", "e", "is", "None", ":", "return", "self", ".", "get_default", "(", ")", "if", "e", ".", "dependencies", "is", "NotInitialized", ":", "e", ".", "dependencies", "=", "self", ".", "merge_dependency", "(", "sub_item", ",", "_get_sub_dependency", ",", "e", ".", "parent", ")", "return", "e", ".", "dependencies", "return", "_get_sub_dependency", "(", "item", ")" ]
Performs a dependency check on the given item. :param item: Node to start the dependency check with. :return: The result on merged dependencies down the hierarchy.
[ "Performs", "a", "dependency", "check", "on", "the", "given", "item", "." ]
python
train
ToFuProject/tofu
tofu/pathfile.py
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/pathfile.py#L73-L121
def get_PolyFromPolyFileObj(PolyFileObj, SavePathInp=None, units='m', comments='#', skiprows=0, shape0=2): """ Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units Useful for :meth:`tofu.plugins.AUG.Ves._create()` Parameters ---------- PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray The source where the polygon is to be found, either: - str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()` - A :mod:`tofu.geom` object: with attribute 'Poly' - np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon SavePathInp : str / None The absolute path where the input file is stored units : str Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters) comments : str Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name skiprows : int Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name shape0 : int Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary) Returns ------- Poly : np.ndarray (2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points addInfo : dict Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted) """ assert type(PolyFileObj) in [list,str] or hasattr(PolyFileObj,"Poly") or np.asarray(PolyFileObj).ndim==2, "Arg PolyFileObj must be str (PathFileExt), a ToFu object with attribute Poly or an iterable convertible to 2d np.ndarray !" # Load PolyFileObj if file and check shape addInfo = {} if type(PolyFileObj) in [list,str]: PathFileExt = get_FileFromInfos(Path=SavePathInp, Name=PolyFileObj) # Include PathFileExt in ID for tracability addInfo = {'Input':PathFileExt} PolyFileObj = np.loadtxt(PathFileExt, dtype=float, comments=comments, delimiter=None, converters=None, skiprows=skiprows, usecols=None, unpack=False, ndmin=2) elif hasattr(PolyFileObj,"Poly"): addInfo = {'Input':PolyFileObj.Id.SaveName} PolyFileObj = PolyFileObj.Poly Poly = np.asarray(PolyFileObj) assert Poly.ndim==2 and shape0 in Poly.shape and max(Poly.shape)>=3 and not np.any(np.isnan(Poly)), "Arg np.asarray(PolyFileObj) must be a (2,N) or (N,2) np.ndarray with non NaNs !" Poly = Poly if Poly.shape[0]==shape0 else Poly.T Poly = convert_units(Poly, In=units, Out='m') return Poly, addInfo
[ "def", "get_PolyFromPolyFileObj", "(", "PolyFileObj", ",", "SavePathInp", "=", "None", ",", "units", "=", "'m'", ",", "comments", "=", "'#'", ",", "skiprows", "=", "0", ",", "shape0", "=", "2", ")", ":", "assert", "type", "(", "PolyFileObj", ")", "in", "[", "list", ",", "str", "]", "or", "hasattr", "(", "PolyFileObj", ",", "\"Poly\"", ")", "or", "np", ".", "asarray", "(", "PolyFileObj", ")", ".", "ndim", "==", "2", ",", "\"Arg PolyFileObj must be str (PathFileExt), a ToFu object with attribute Poly or an iterable convertible to 2d np.ndarray !\"", "# Load PolyFileObj if file and check shape", "addInfo", "=", "{", "}", "if", "type", "(", "PolyFileObj", ")", "in", "[", "list", ",", "str", "]", ":", "PathFileExt", "=", "get_FileFromInfos", "(", "Path", "=", "SavePathInp", ",", "Name", "=", "PolyFileObj", ")", "# Include PathFileExt in ID for tracability", "addInfo", "=", "{", "'Input'", ":", "PathFileExt", "}", "PolyFileObj", "=", "np", ".", "loadtxt", "(", "PathFileExt", ",", "dtype", "=", "float", ",", "comments", "=", "comments", ",", "delimiter", "=", "None", ",", "converters", "=", "None", ",", "skiprows", "=", "skiprows", ",", "usecols", "=", "None", ",", "unpack", "=", "False", ",", "ndmin", "=", "2", ")", "elif", "hasattr", "(", "PolyFileObj", ",", "\"Poly\"", ")", ":", "addInfo", "=", "{", "'Input'", ":", "PolyFileObj", ".", "Id", ".", "SaveName", "}", "PolyFileObj", "=", "PolyFileObj", ".", "Poly", "Poly", "=", "np", ".", "asarray", "(", "PolyFileObj", ")", "assert", "Poly", ".", "ndim", "==", "2", "and", "shape0", "in", "Poly", ".", "shape", "and", "max", "(", "Poly", ".", "shape", ")", ">=", "3", "and", "not", "np", ".", "any", "(", "np", ".", "isnan", "(", "Poly", ")", ")", ",", "\"Arg np.asarray(PolyFileObj) must be a (2,N) or (N,2) np.ndarray with non NaNs !\"", "Poly", "=", "Poly", "if", "Poly", ".", "shape", "[", "0", "]", "==", "shape0", "else", "Poly", ".", "T", "Poly", "=", "convert_units", "(", "Poly", ",", "In", "=", "units", ",", "Out", "=", "'m'", ")", "return", "Poly", ",", "addInfo" ]
Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units Useful for :meth:`tofu.plugins.AUG.Ves._create()` Parameters ---------- PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray The source where the polygon is to be found, either: - str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()` - A :mod:`tofu.geom` object: with attribute 'Poly' - np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon SavePathInp : str / None The absolute path where the input file is stored units : str Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters) comments : str Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name skiprows : int Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name shape0 : int Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary) Returns ------- Poly : np.ndarray (2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points addInfo : dict Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted)
[ "Return", "a", "polygon", "as", "a", "np", ".", "ndarray", "extracted", "from", "a", "txt", "file", "or", "from", "a", "ToFu", "object", "with", "appropriate", "units" ]
python
train
molmod/molmod
molmod/molecular_graphs.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/molecular_graphs.py#L66-L73
def _check_symbols(self, symbols): """the size must be the same as the length of the array numbers and all elements must be strings""" if len(symbols) != self.size: raise TypeError("The number of symbols in the graph does not " "match the length of the atomic numbers array.") for symbol in symbols: if not isinstance(symbol, str): raise TypeError("All symbols must be strings.")
[ "def", "_check_symbols", "(", "self", ",", "symbols", ")", ":", "if", "len", "(", "symbols", ")", "!=", "self", ".", "size", ":", "raise", "TypeError", "(", "\"The number of symbols in the graph does not \"", "\"match the length of the atomic numbers array.\"", ")", "for", "symbol", "in", "symbols", ":", "if", "not", "isinstance", "(", "symbol", ",", "str", ")", ":", "raise", "TypeError", "(", "\"All symbols must be strings.\"", ")" ]
the size must be the same as the length of the array numbers and all elements must be strings
[ "the", "size", "must", "be", "the", "same", "as", "the", "length", "of", "the", "array", "numbers", "and", "all", "elements", "must", "be", "strings" ]
python
train
markovmodel/msmtools
msmtools/flux/api.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/flux/api.py#L162-L230
def flux_matrix(T, pi, qminus, qplus, netflux=True): r"""Compute the TPT flux network for the reaction A-->B. Parameters ---------- T : (M, M) ndarray transition matrix pi : (M,) ndarray Stationary distribution corresponding to T qminus : (M,) ndarray Backward comittor qplus : (M,) ndarray Forward committor netflux : boolean True: net flux matrix will be computed False: gross flux matrix will be computed Returns ------- flux : (M, M) ndarray Matrix of flux values between pairs of states. Notes ----- Computation of the flux network relies on transition path theory (TPT) [1]. Here we use discrete transition path theory [2] in the transition matrix formulation [3]. See also -------- committor.forward_committor, committor.backward_committor Notes ----- Computation of the flux network relies on transition path theory (TPT). The central object used in transition path theory is the forward and backward comittor function. The TPT (gross) flux is defined as .. math:: f_{ij}=\left \{ \begin{array}{rl} \pi_i q_i^{(-)} p_{ij} q_j^{(+)} & i \neq j \\ 0 & i=j\ \end{array} \right . The TPT net flux is then defined as .. math:: f_{ij}=\max\{f_{ij} - f_{ji}, 0\} \:\:\:\forall i,j. References ---------- .. [1] W. E and E. Vanden-Eijnden. Towards a theory of transition paths. J. Stat. Phys. 123: 503-523 (2006) .. [2] P. Metzner, C. Schuette and E. Vanden-Eijnden. Transition Path Theory for Markov Jump Processes. Multiscale Model Simul 7: 1192-1219 (2009) .. [3] F. Noe, Ch. Schuette, E. Vanden-Eijnden, L. Reich and T. Weikl: Constructing the Full Ensemble of Folding Pathways from Short Off-Equilibrium Simulations. Proc. Natl. Acad. Sci. USA, 106, 19011-19016 (2009) """ if issparse(T): return sparse.tpt.flux_matrix(T, pi, qminus, qplus, netflux=netflux) elif isdense(T): return dense.tpt.flux_matrix(T, pi, qminus, qplus, netflux=netflux) else: raise _type_not_supported
[ "def", "flux_matrix", "(", "T", ",", "pi", ",", "qminus", ",", "qplus", ",", "netflux", "=", "True", ")", ":", "if", "issparse", "(", "T", ")", ":", "return", "sparse", ".", "tpt", ".", "flux_matrix", "(", "T", ",", "pi", ",", "qminus", ",", "qplus", ",", "netflux", "=", "netflux", ")", "elif", "isdense", "(", "T", ")", ":", "return", "dense", ".", "tpt", ".", "flux_matrix", "(", "T", ",", "pi", ",", "qminus", ",", "qplus", ",", "netflux", "=", "netflux", ")", "else", ":", "raise", "_type_not_supported" ]
r"""Compute the TPT flux network for the reaction A-->B. Parameters ---------- T : (M, M) ndarray transition matrix pi : (M,) ndarray Stationary distribution corresponding to T qminus : (M,) ndarray Backward comittor qplus : (M,) ndarray Forward committor netflux : boolean True: net flux matrix will be computed False: gross flux matrix will be computed Returns ------- flux : (M, M) ndarray Matrix of flux values between pairs of states. Notes ----- Computation of the flux network relies on transition path theory (TPT) [1]. Here we use discrete transition path theory [2] in the transition matrix formulation [3]. See also -------- committor.forward_committor, committor.backward_committor Notes ----- Computation of the flux network relies on transition path theory (TPT). The central object used in transition path theory is the forward and backward comittor function. The TPT (gross) flux is defined as .. math:: f_{ij}=\left \{ \begin{array}{rl} \pi_i q_i^{(-)} p_{ij} q_j^{(+)} & i \neq j \\ 0 & i=j\ \end{array} \right . The TPT net flux is then defined as .. math:: f_{ij}=\max\{f_{ij} - f_{ji}, 0\} \:\:\:\forall i,j. References ---------- .. [1] W. E and E. Vanden-Eijnden. Towards a theory of transition paths. J. Stat. Phys. 123: 503-523 (2006) .. [2] P. Metzner, C. Schuette and E. Vanden-Eijnden. Transition Path Theory for Markov Jump Processes. Multiscale Model Simul 7: 1192-1219 (2009) .. [3] F. Noe, Ch. Schuette, E. Vanden-Eijnden, L. Reich and T. Weikl: Constructing the Full Ensemble of Folding Pathways from Short Off-Equilibrium Simulations. Proc. Natl. Acad. Sci. USA, 106, 19011-19016 (2009)
[ "r", "Compute", "the", "TPT", "flux", "network", "for", "the", "reaction", "A", "--", ">", "B", "." ]
python
train
danilobellini/audiolazy
audiolazy/lazy_itertools.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_itertools.py#L69-L76
def accumulate(iterable): " Return series of accumulated sums. " iterator = iter(iterable) sum_data = next(iterator) yield sum_data for el in iterator: sum_data += el yield sum_data
[ "def", "accumulate", "(", "iterable", ")", ":", "iterator", "=", "iter", "(", "iterable", ")", "sum_data", "=", "next", "(", "iterator", ")", "yield", "sum_data", "for", "el", "in", "iterator", ":", "sum_data", "+=", "el", "yield", "sum_data" ]
Return series of accumulated sums.
[ "Return", "series", "of", "accumulated", "sums", "." ]
python
train
jxtech/wechatpy
wechatpy/client/api/invoice.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/invoice.py#L91-L113
def set_auth_field(self, user_field, biz_field): """ 设置授权页字段信息 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param user_field: 授权页个人发票字段 :type user_field: dict :param biz_field: 授权页单位发票字段 :type biz_field: dict """ return self._post( 'setbizattr', params={ 'action': 'set_auth_field', }, data={ 'auth_field': { 'user_field': user_field, 'biz_field': biz_field, }, }, )
[ "def", "set_auth_field", "(", "self", ",", "user_field", ",", "biz_field", ")", ":", "return", "self", ".", "_post", "(", "'setbizattr'", ",", "params", "=", "{", "'action'", ":", "'set_auth_field'", ",", "}", ",", "data", "=", "{", "'auth_field'", ":", "{", "'user_field'", ":", "user_field", ",", "'biz_field'", ":", "biz_field", ",", "}", ",", "}", ",", ")" ]
设置授权页字段信息 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param user_field: 授权页个人发票字段 :type user_field: dict :param biz_field: 授权页单位发票字段 :type biz_field: dict
[ "设置授权页字段信息", "详情请参考", "https", ":", "//", "mp", ".", "weixin", ".", "qq", ".", "com", "/", "wiki?id", "=", "mp1497082828_r1cI2" ]
python
train
mojaie/chorus
chorus/util/geometry.py
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/util/geometry.py#L149-L160
def p_seg(p1, p2, cw, interval, trim=0, align=0): """ parallel segment Args: p1, p2: point(x, y) cw: m_seg rad True: -π/2, False: π/2 interval: m_seg dist trim: t_seg trim align: t_seg align """ case = {True: pi / -2, False: pi / 2} p1m, p2m = m_seg(p1, p2, case[cw], interval) return t_seg(p1m, p2m, trim, align)
[ "def", "p_seg", "(", "p1", ",", "p2", ",", "cw", ",", "interval", ",", "trim", "=", "0", ",", "align", "=", "0", ")", ":", "case", "=", "{", "True", ":", "pi", "/", "-", "2", ",", "False", ":", "pi", "/", "2", "}", "p1m", ",", "p2m", "=", "m_seg", "(", "p1", ",", "p2", ",", "case", "[", "cw", "]", ",", "interval", ")", "return", "t_seg", "(", "p1m", ",", "p2m", ",", "trim", ",", "align", ")" ]
parallel segment Args: p1, p2: point(x, y) cw: m_seg rad True: -π/2, False: π/2 interval: m_seg dist trim: t_seg trim align: t_seg align
[ "parallel", "segment", "Args", ":", "p1", "p2", ":", "point", "(", "x", "y", ")", "cw", ":", "m_seg", "rad", "True", ":", "-", "π", "/", "2", "False", ":", "π", "/", "2", "interval", ":", "m_seg", "dist", "trim", ":", "t_seg", "trim", "align", ":", "t_seg", "align" ]
python
train
moonso/vcftoolbox
vcftoolbox/parse_variant.py
https://github.com/moonso/vcftoolbox/blob/438fb1d85a83812c389774b94802eb5921c89e3a/vcftoolbox/parse_variant.py#L90-L111
def get_vep_info(vep_string, vep_header): """Make the vep annotations into a dictionaries A vep dictionary will have the vep column names as keys and the vep annotations as values. The dictionaries are stored in a list Args: vep_string (string): A string with the CSQ annotation vep_header (list): A list with the vep header Return: vep_annotations (list): A list of vep dicts """ vep_annotations = [ dict(zip(vep_header, vep_annotation.split('|'))) for vep_annotation in vep_string.split(',') ] return vep_annotations
[ "def", "get_vep_info", "(", "vep_string", ",", "vep_header", ")", ":", "vep_annotations", "=", "[", "dict", "(", "zip", "(", "vep_header", ",", "vep_annotation", ".", "split", "(", "'|'", ")", ")", ")", "for", "vep_annotation", "in", "vep_string", ".", "split", "(", "','", ")", "]", "return", "vep_annotations" ]
Make the vep annotations into a dictionaries A vep dictionary will have the vep column names as keys and the vep annotations as values. The dictionaries are stored in a list Args: vep_string (string): A string with the CSQ annotation vep_header (list): A list with the vep header Return: vep_annotations (list): A list of vep dicts
[ "Make", "the", "vep", "annotations", "into", "a", "dictionaries", "A", "vep", "dictionary", "will", "have", "the", "vep", "column", "names", "as", "keys", "and", "the", "vep", "annotations", "as", "values", ".", "The", "dictionaries", "are", "stored", "in", "a", "list" ]
python
train
census-instrumentation/opencensus-python
contrib/opencensus-ext-django/opencensus/ext/django/middleware.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-django/opencensus/ext/django/middleware.py#L188-L204
def process_view(self, request, view_func, *args, **kwargs): """Process view is executed before the view function, here we get the function name add set it as the span name. """ # Do not trace if the url is blacklisted if utils.disable_tracing_url(request.path, self.blacklist_paths): return try: # Get the current span and set the span name to the current # function name of the request. tracer = _get_current_tracer() span = tracer.current_span() span.name = utils.get_func_name(view_func) except Exception: # pragma: NO COVER log.error('Failed to trace request', exc_info=True)
[ "def", "process_view", "(", "self", ",", "request", ",", "view_func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Do not trace if the url is blacklisted", "if", "utils", ".", "disable_tracing_url", "(", "request", ".", "path", ",", "self", ".", "blacklist_paths", ")", ":", "return", "try", ":", "# Get the current span and set the span name to the current", "# function name of the request.", "tracer", "=", "_get_current_tracer", "(", ")", "span", "=", "tracer", ".", "current_span", "(", ")", "span", ".", "name", "=", "utils", ".", "get_func_name", "(", "view_func", ")", "except", "Exception", ":", "# pragma: NO COVER", "log", ".", "error", "(", "'Failed to trace request'", ",", "exc_info", "=", "True", ")" ]
Process view is executed before the view function, here we get the function name add set it as the span name.
[ "Process", "view", "is", "executed", "before", "the", "view", "function", "here", "we", "get", "the", "function", "name", "add", "set", "it", "as", "the", "span", "name", "." ]
python
train
jldantas/libmft
libmft/api.py
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L903-L943
def _compute_full_path(self, fn_parent_ref, fn_parent_seq): '''Based on the parent reference and sequence, computes the full path. The majority of the files in a filesystem has a very small amount of parent directories. By definition, a filesystem is expected to have much smaller amount of directories than files. As such we use a function with the minimal amount of arguments to find a parent, that way we can cache the results easily and speed up the overall code. Args: fn_parent_ref (int): Parent reference number fn_parent_seq (int): Parent sequence number Returns: tuple(bool, str): A tuple where the first element is a boolean that is ``True`` if the the file is orphan and ``False`` if not. The second element is a string with the full path without the file name ''' names = [] root_id = 5 index, seq = fn_parent_ref, fn_parent_seq is_orphan = False #search until hit the root entry while index != root_id: try: parent_entry = self[index] #if the sequence number is wrong, something changed = orphan if seq != parent_entry.header.seq_number: is_orphan = True break else: parent_fn_attr = parent_entry.get_main_filename_attr() index, seq = parent_fn_attr.content.parent_ref, parent_fn_attr.content.parent_seq names.append(parent_fn_attr.content.name) except ValueError as e: #if the entry itself no longer exists = orphan is_orphan = True break return (is_orphan, "\\".join(reversed(names)))
[ "def", "_compute_full_path", "(", "self", ",", "fn_parent_ref", ",", "fn_parent_seq", ")", ":", "names", "=", "[", "]", "root_id", "=", "5", "index", ",", "seq", "=", "fn_parent_ref", ",", "fn_parent_seq", "is_orphan", "=", "False", "#search until hit the root entry", "while", "index", "!=", "root_id", ":", "try", ":", "parent_entry", "=", "self", "[", "index", "]", "#if the sequence number is wrong, something changed = orphan", "if", "seq", "!=", "parent_entry", ".", "header", ".", "seq_number", ":", "is_orphan", "=", "True", "break", "else", ":", "parent_fn_attr", "=", "parent_entry", ".", "get_main_filename_attr", "(", ")", "index", ",", "seq", "=", "parent_fn_attr", ".", "content", ".", "parent_ref", ",", "parent_fn_attr", ".", "content", ".", "parent_seq", "names", ".", "append", "(", "parent_fn_attr", ".", "content", ".", "name", ")", "except", "ValueError", "as", "e", ":", "#if the entry itself no longer exists = orphan", "is_orphan", "=", "True", "break", "return", "(", "is_orphan", ",", "\"\\\\\"", ".", "join", "(", "reversed", "(", "names", ")", ")", ")" ]
Based on the parent reference and sequence, computes the full path. The majority of the files in a filesystem has a very small amount of parent directories. By definition, a filesystem is expected to have much smaller amount of directories than files. As such we use a function with the minimal amount of arguments to find a parent, that way we can cache the results easily and speed up the overall code. Args: fn_parent_ref (int): Parent reference number fn_parent_seq (int): Parent sequence number Returns: tuple(bool, str): A tuple where the first element is a boolean that is ``True`` if the the file is orphan and ``False`` if not. The second element is a string with the full path without the file name
[ "Based", "on", "the", "parent", "reference", "and", "sequence", "computes", "the", "full", "path", "." ]
python
train
dedupeio/dedupe
dedupe/core.py
https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/core.py#L65-L79
def randomPairsMatch(n_records_A, n_records_B, sample_size): """ Return random combinations of indices for record list A and B """ n = int(n_records_A * n_records_B) if sample_size >= n: random_pairs = numpy.arange(n) else: random_pairs = numpy.array(random.sample(range(n), sample_size), dtype=int) i, j = numpy.unravel_index(random_pairs, (n_records_A, n_records_B)) return zip(i, j)
[ "def", "randomPairsMatch", "(", "n_records_A", ",", "n_records_B", ",", "sample_size", ")", ":", "n", "=", "int", "(", "n_records_A", "*", "n_records_B", ")", "if", "sample_size", ">=", "n", ":", "random_pairs", "=", "numpy", ".", "arange", "(", "n", ")", "else", ":", "random_pairs", "=", "numpy", ".", "array", "(", "random", ".", "sample", "(", "range", "(", "n", ")", ",", "sample_size", ")", ",", "dtype", "=", "int", ")", "i", ",", "j", "=", "numpy", ".", "unravel_index", "(", "random_pairs", ",", "(", "n_records_A", ",", "n_records_B", ")", ")", "return", "zip", "(", "i", ",", "j", ")" ]
Return random combinations of indices for record list A and B
[ "Return", "random", "combinations", "of", "indices", "for", "record", "list", "A", "and", "B" ]
python
train
scanny/python-pptx
pptx/oxml/text.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/text.py#L356-L362
def content_children(self): """ A sequence containing the text-container child elements of this ``<a:p>`` element, i.e. (a:r|a:br|a:fld). """ text_types = {CT_RegularTextRun, CT_TextLineBreak, CT_TextField} return tuple(elm for elm in self if type(elm) in text_types)
[ "def", "content_children", "(", "self", ")", ":", "text_types", "=", "{", "CT_RegularTextRun", ",", "CT_TextLineBreak", ",", "CT_TextField", "}", "return", "tuple", "(", "elm", "for", "elm", "in", "self", "if", "type", "(", "elm", ")", "in", "text_types", ")" ]
A sequence containing the text-container child elements of this ``<a:p>`` element, i.e. (a:r|a:br|a:fld).
[ "A", "sequence", "containing", "the", "text", "-", "container", "child", "elements", "of", "this", "<a", ":", "p", ">", "element", "i", ".", "e", ".", "(", "a", ":", "r|a", ":", "br|a", ":", "fld", ")", "." ]
python
train
mfcloud/python-zvm-sdk
smtLayer/changeVM.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/changeVM.py#L558-L663
def addLOADDEV(rh): """ Sets the LOADDEV statement in the virtual machine's directory entry. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'ADDLOADDEV' userid - userid of the virtual machine parms['boot'] - Boot program number parms['addr'] - Logical block address of the boot record parms['lun'] - One to eight-byte logical unit number of the FCP-I/O device. parms['wwpn'] - World-Wide Port Number parms['scpDataType'] - SCP data type parms['scpData'] - Designates information to be passed to the program is loaded during guest IPL. Note that any of the parms may be left blank, in which case we will not update them. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.addLOADDEV") # scpDataType and scpData must appear or disappear concurrently if ('scpData' in rh.parms and 'scpDataType' not in rh.parms): msg = msgs.msg['0014'][1] % (modId, "scpData", "scpDataType") rh.printLn("ES", msg) rh.updateResults(msgs.msg['0014'][0]) return if ('scpDataType' in rh.parms and 'scpData' not in rh.parms): if rh.parms['scpDataType'].lower() == "delete": scpDataType = 1 else: # scpDataType and scpData must appear or disappear # concurrently unless we're deleting data msg = msgs.msg['0014'][1] % (modId, "scpDataType", "scpData") rh.printLn("ES", msg) rh.updateResults(msgs.msg['0014'][0]) return scpData = "" if 'scpDataType' in rh.parms: if rh.parms['scpDataType'].lower() == "hex": scpData = rh.parms['scpData'] scpDataType = 3 elif rh.parms['scpDataType'].lower() == "ebcdic": scpData = rh.parms['scpData'] scpDataType = 2 # scpDataType not hex, ebcdic or delete elif rh.parms['scpDataType'].lower() != "delete": msg = msgs.msg['0016'][1] % (modId, rh.parms['scpDataType']) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0016'][0]) return else: # Not specified, 0 for do nothing scpDataType = 0 scpData = "" if 'boot' not in rh.parms: boot = "" else: boot = rh.parms['boot'] if 'addr' not in rh.parms: block = "" else: block = rh.parms['addr'] if 'lun' not in rh.parms: lun = "" else: lun = rh.parms['lun'] # Make sure it doesn't have the 0x prefix lun.replace("0x", "") if 'wwpn' not in rh.parms: wwpn = "" else: wwpn = rh.parms['wwpn'] # Make sure it doesn't have the 0x prefix wwpn.replace("0x", "") parms = [ "-T", rh.userid, "-b", boot, "-k", block, "-l", lun, "-p", wwpn, "-s", str(scpDataType)] if scpData != "": parms.extend(["-d", scpData]) results = invokeSMCLI(rh, "Image_SCSI_Characteristics_Define_DM", parms) # SMAPI API failed. if results['overallRC'] != 0: rh.printLn("ES", results['response']) rh.updateResults(results) rh.printSysLog("Exit changeVM.addLOADDEV, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC']
[ "def", "addLOADDEV", "(", "rh", ")", ":", "rh", ".", "printSysLog", "(", "\"Enter changeVM.addLOADDEV\"", ")", "# scpDataType and scpData must appear or disappear concurrently", "if", "(", "'scpData'", "in", "rh", ".", "parms", "and", "'scpDataType'", "not", "in", "rh", ".", "parms", ")", ":", "msg", "=", "msgs", ".", "msg", "[", "'0014'", "]", "[", "1", "]", "%", "(", "modId", ",", "\"scpData\"", ",", "\"scpDataType\"", ")", "rh", ".", "printLn", "(", "\"ES\"", ",", "msg", ")", "rh", ".", "updateResults", "(", "msgs", ".", "msg", "[", "'0014'", "]", "[", "0", "]", ")", "return", "if", "(", "'scpDataType'", "in", "rh", ".", "parms", "and", "'scpData'", "not", "in", "rh", ".", "parms", ")", ":", "if", "rh", ".", "parms", "[", "'scpDataType'", "]", ".", "lower", "(", ")", "==", "\"delete\"", ":", "scpDataType", "=", "1", "else", ":", "# scpDataType and scpData must appear or disappear", "# concurrently unless we're deleting data", "msg", "=", "msgs", ".", "msg", "[", "'0014'", "]", "[", "1", "]", "%", "(", "modId", ",", "\"scpDataType\"", ",", "\"scpData\"", ")", "rh", ".", "printLn", "(", "\"ES\"", ",", "msg", ")", "rh", ".", "updateResults", "(", "msgs", ".", "msg", "[", "'0014'", "]", "[", "0", "]", ")", "return", "scpData", "=", "\"\"", "if", "'scpDataType'", "in", "rh", ".", "parms", ":", "if", "rh", ".", "parms", "[", "'scpDataType'", "]", ".", "lower", "(", ")", "==", "\"hex\"", ":", "scpData", "=", "rh", ".", "parms", "[", "'scpData'", "]", "scpDataType", "=", "3", "elif", "rh", ".", "parms", "[", "'scpDataType'", "]", ".", "lower", "(", ")", "==", "\"ebcdic\"", ":", "scpData", "=", "rh", ".", "parms", "[", "'scpData'", "]", "scpDataType", "=", "2", "# scpDataType not hex, ebcdic or delete", "elif", "rh", ".", "parms", "[", "'scpDataType'", "]", ".", "lower", "(", ")", "!=", "\"delete\"", ":", "msg", "=", "msgs", ".", "msg", "[", "'0016'", "]", "[", "1", "]", "%", "(", "modId", ",", "rh", ".", "parms", "[", "'scpDataType'", "]", ")", "rh", ".", "printLn", "(", "\"ES\"", ",", "msg", ")", "rh", ".", "updateResults", "(", "msgs", ".", "msg", "[", "'0016'", "]", "[", "0", "]", ")", "return", "else", ":", "# Not specified, 0 for do nothing", "scpDataType", "=", "0", "scpData", "=", "\"\"", "if", "'boot'", "not", "in", "rh", ".", "parms", ":", "boot", "=", "\"\"", "else", ":", "boot", "=", "rh", ".", "parms", "[", "'boot'", "]", "if", "'addr'", "not", "in", "rh", ".", "parms", ":", "block", "=", "\"\"", "else", ":", "block", "=", "rh", ".", "parms", "[", "'addr'", "]", "if", "'lun'", "not", "in", "rh", ".", "parms", ":", "lun", "=", "\"\"", "else", ":", "lun", "=", "rh", ".", "parms", "[", "'lun'", "]", "# Make sure it doesn't have the 0x prefix", "lun", ".", "replace", "(", "\"0x\"", ",", "\"\"", ")", "if", "'wwpn'", "not", "in", "rh", ".", "parms", ":", "wwpn", "=", "\"\"", "else", ":", "wwpn", "=", "rh", ".", "parms", "[", "'wwpn'", "]", "# Make sure it doesn't have the 0x prefix", "wwpn", ".", "replace", "(", "\"0x\"", ",", "\"\"", ")", "parms", "=", "[", "\"-T\"", ",", "rh", ".", "userid", ",", "\"-b\"", ",", "boot", ",", "\"-k\"", ",", "block", ",", "\"-l\"", ",", "lun", ",", "\"-p\"", ",", "wwpn", ",", "\"-s\"", ",", "str", "(", "scpDataType", ")", "]", "if", "scpData", "!=", "\"\"", ":", "parms", ".", "extend", "(", "[", "\"-d\"", ",", "scpData", "]", ")", "results", "=", "invokeSMCLI", "(", "rh", ",", "\"Image_SCSI_Characteristics_Define_DM\"", ",", "parms", ")", "# SMAPI API failed.", "if", "results", "[", "'overallRC'", "]", "!=", "0", ":", "rh", ".", "printLn", "(", "\"ES\"", ",", "results", "[", "'response'", "]", ")", "rh", ".", "updateResults", "(", "results", ")", "rh", ".", "printSysLog", "(", "\"Exit changeVM.addLOADDEV, rc: \"", "+", "str", "(", "rh", ".", "results", "[", "'overallRC'", "]", ")", ")", "return", "rh", ".", "results", "[", "'overallRC'", "]" ]
Sets the LOADDEV statement in the virtual machine's directory entry. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'ADDLOADDEV' userid - userid of the virtual machine parms['boot'] - Boot program number parms['addr'] - Logical block address of the boot record parms['lun'] - One to eight-byte logical unit number of the FCP-I/O device. parms['wwpn'] - World-Wide Port Number parms['scpDataType'] - SCP data type parms['scpData'] - Designates information to be passed to the program is loaded during guest IPL. Note that any of the parms may be left blank, in which case we will not update them. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error
[ "Sets", "the", "LOADDEV", "statement", "in", "the", "virtual", "machine", "s", "directory", "entry", "." ]
python
train
openfisca/openfisca-survey-manager
openfisca_survey_manager/scenarios.py
https://github.com/openfisca/openfisca-survey-manager/blob/bed6c65dc5e4ec2bdc9cda5b865fefd9e3d0c358/openfisca_survey_manager/scenarios.py#L1210-L1217
def _set_id_variable_by_entity_key(self) -> Dict[str, str]: '''Identify and set the good ids for the different entities''' if self.id_variable_by_entity_key is None: self.id_variable_by_entity_key = dict( (entity.key, entity.key + '_id') for entity in self.tax_benefit_system.entities) log.debug("Use default id_variable names:\n {}".format(self.id_variable_by_entity_key)) return self.id_variable_by_entity_key
[ "def", "_set_id_variable_by_entity_key", "(", "self", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "if", "self", ".", "id_variable_by_entity_key", "is", "None", ":", "self", ".", "id_variable_by_entity_key", "=", "dict", "(", "(", "entity", ".", "key", ",", "entity", ".", "key", "+", "'_id'", ")", "for", "entity", "in", "self", ".", "tax_benefit_system", ".", "entities", ")", "log", ".", "debug", "(", "\"Use default id_variable names:\\n {}\"", ".", "format", "(", "self", ".", "id_variable_by_entity_key", ")", ")", "return", "self", ".", "id_variable_by_entity_key" ]
Identify and set the good ids for the different entities
[ "Identify", "and", "set", "the", "good", "ids", "for", "the", "different", "entities" ]
python
train
titusjan/argos
argos/config/intcti.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/intcti.py#L73-L77
def debugInfo(self): """ Returns the string with debugging information """ return ("enabled = {}, min = {}, max = {}, step = {}, specVal = {}" .format(self.enabled, self.minValue, self.maxValue, self.stepSize, self.specialValueText))
[ "def", "debugInfo", "(", "self", ")", ":", "return", "(", "\"enabled = {}, min = {}, max = {}, step = {}, specVal = {}\"", ".", "format", "(", "self", ".", "enabled", ",", "self", ".", "minValue", ",", "self", ".", "maxValue", ",", "self", ".", "stepSize", ",", "self", ".", "specialValueText", ")", ")" ]
Returns the string with debugging information
[ "Returns", "the", "string", "with", "debugging", "information" ]
python
train
genialis/resolwe
resolwe/elastic/management/commands/elastic_index.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/management/commands/elastic_index.py#L23-L31
def handle(self, *args, **options): """Command handle.""" verbosity = int(options['verbosity']) if self.has_filter(options): self.filter_indices(options, verbosity) else: # Process all indices. index_builder.build()
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "verbosity", "=", "int", "(", "options", "[", "'verbosity'", "]", ")", "if", "self", ".", "has_filter", "(", "options", ")", ":", "self", ".", "filter_indices", "(", "options", ",", "verbosity", ")", "else", ":", "# Process all indices.", "index_builder", ".", "build", "(", ")" ]
Command handle.
[ "Command", "handle", "." ]
python
train
johncosta/django-like-button
like_button/templatetags/like_button.py
https://github.com/johncosta/django-like-button/blob/c93a1be9c041d76e8de9a26f424ad4f836ab97bd/like_button/templatetags/like_button.py#L59-L94
def like_button_tag(context): """ This tag will check to see if they have the FACEBOOK_APP_ID setup correctly in the django settings, if so then it will pass the data along to the intercom_tag template to be displayed. If something isn't perfect we will return False, which will then not install the javascript since it isn't needed. s """ if FACEBOOK_APP_ID is None: log.warning("FACEBOOK_APP_ID isn't setup correctly in your settings") # make sure INTERCOM_APPID is setup correct and user is authenticated if FACEBOOK_APP_ID: request = context.get('request', None) if request: path_to_like = ( "http://" + request.get_host() + request.get_full_path()) show_send = true_false_converter(FACEBOOK_SHOW_SEND) like_width = FACEBOOK_LIKE_WIDTH show_faces = true_false_converter(FACEBOOK_SHOW_FACES) font = FACEBOOK_FONT return {"LIKE_BUTTON_IS_VALID": True, "path_to_like": path_to_like, "show_send": show_send, "like_width": like_width, "show_faces": show_faces, "font": font, "like_layout": FACEBOOK_LIKE_LAYOUT} # if it is here, it isn't a valid setup, return False to not show the tag. return {"LIKE_BUTTON_IS_VALID": False}
[ "def", "like_button_tag", "(", "context", ")", ":", "if", "FACEBOOK_APP_ID", "is", "None", ":", "log", ".", "warning", "(", "\"FACEBOOK_APP_ID isn't setup correctly in your settings\"", ")", "# make sure INTERCOM_APPID is setup correct and user is authenticated", "if", "FACEBOOK_APP_ID", ":", "request", "=", "context", ".", "get", "(", "'request'", ",", "None", ")", "if", "request", ":", "path_to_like", "=", "(", "\"http://\"", "+", "request", ".", "get_host", "(", ")", "+", "request", ".", "get_full_path", "(", ")", ")", "show_send", "=", "true_false_converter", "(", "FACEBOOK_SHOW_SEND", ")", "like_width", "=", "FACEBOOK_LIKE_WIDTH", "show_faces", "=", "true_false_converter", "(", "FACEBOOK_SHOW_FACES", ")", "font", "=", "FACEBOOK_FONT", "return", "{", "\"LIKE_BUTTON_IS_VALID\"", ":", "True", ",", "\"path_to_like\"", ":", "path_to_like", ",", "\"show_send\"", ":", "show_send", ",", "\"like_width\"", ":", "like_width", ",", "\"show_faces\"", ":", "show_faces", ",", "\"font\"", ":", "font", ",", "\"like_layout\"", ":", "FACEBOOK_LIKE_LAYOUT", "}", "# if it is here, it isn't a valid setup, return False to not show the tag.", "return", "{", "\"LIKE_BUTTON_IS_VALID\"", ":", "False", "}" ]
This tag will check to see if they have the FACEBOOK_APP_ID setup correctly in the django settings, if so then it will pass the data along to the intercom_tag template to be displayed. If something isn't perfect we will return False, which will then not install the javascript since it isn't needed. s
[ "This", "tag", "will", "check", "to", "see", "if", "they", "have", "the", "FACEBOOK_APP_ID", "setup", "correctly", "in", "the", "django", "settings", "if", "so", "then", "it", "will", "pass", "the", "data", "along", "to", "the", "intercom_tag", "template", "to", "be", "displayed", "." ]
python
train
onicagroup/runway
runway/commands/modules_command.py
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/commands/modules_command.py#L54-L86
def determine_module_class(path, class_path): """Determine type of module and return deployment module class.""" if not class_path: # First check directory name for type-indicating suffix basename = os.path.basename(path) if basename.endswith('.sls'): class_path = 'runway.module.serverless.Serverless' elif basename.endswith('.tf'): class_path = 'runway.module.terraform.Terraform' elif basename.endswith('.cdk'): class_path = 'runway.module.cdk.CloudDevelopmentKit' elif basename.endswith('.cfn'): class_path = 'runway.module.cloudformation.CloudFormation' if not class_path: # Fallback to autodetection if os.path.isfile(os.path.join(path, 'serverless.yml')): class_path = 'runway.module.serverless.Serverless' elif glob.glob(os.path.join(path, '*.tf')): class_path = 'runway.module.terraform.Terraform' elif os.path.isfile(os.path.join(path, 'cdk.json')) \ and os.path.isfile(os.path.join(path, 'package.json')): class_path = 'runway.module.cdk.CloudDevelopmentKit' elif glob.glob(os.path.join(path, '*.env')) or ( glob.glob(os.path.join(path, '*.yaml'))) or ( glob.glob(os.path.join(path, '*.yml'))): class_path = 'runway.module.cloudformation.CloudFormation' if not class_path: LOGGER.error('No module class found for %s', os.path.basename(path)) sys.exit(1) return load_object_from_string(class_path)
[ "def", "determine_module_class", "(", "path", ",", "class_path", ")", ":", "if", "not", "class_path", ":", "# First check directory name for type-indicating suffix", "basename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "if", "basename", ".", "endswith", "(", "'.sls'", ")", ":", "class_path", "=", "'runway.module.serverless.Serverless'", "elif", "basename", ".", "endswith", "(", "'.tf'", ")", ":", "class_path", "=", "'runway.module.terraform.Terraform'", "elif", "basename", ".", "endswith", "(", "'.cdk'", ")", ":", "class_path", "=", "'runway.module.cdk.CloudDevelopmentKit'", "elif", "basename", ".", "endswith", "(", "'.cfn'", ")", ":", "class_path", "=", "'runway.module.cloudformation.CloudFormation'", "if", "not", "class_path", ":", "# Fallback to autodetection", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'serverless.yml'", ")", ")", ":", "class_path", "=", "'runway.module.serverless.Serverless'", "elif", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'*.tf'", ")", ")", ":", "class_path", "=", "'runway.module.terraform.Terraform'", "elif", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'cdk.json'", ")", ")", "and", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'package.json'", ")", ")", ":", "class_path", "=", "'runway.module.cdk.CloudDevelopmentKit'", "elif", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'*.env'", ")", ")", "or", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'*.yaml'", ")", ")", ")", "or", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'*.yml'", ")", ")", ")", ":", "class_path", "=", "'runway.module.cloudformation.CloudFormation'", "if", "not", "class_path", ":", "LOGGER", ".", "error", "(", "'No module class found for %s'", ",", "os", ".", "path", ".", "basename", "(", "path", ")", ")", "sys", ".", "exit", "(", "1", ")", "return", "load_object_from_string", "(", "class_path", ")" ]
Determine type of module and return deployment module class.
[ "Determine", "type", "of", "module", "and", "return", "deployment", "module", "class", "." ]
python
train
deepmind/pysc2
pysc2/lib/renderer_human.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/renderer_human.py#L1062-L1076
def draw_help(self, surf): """Draw the help dialog.""" if not self._help: return def write(loc, text): surf.write_screen(self._font_large, colors.black, loc, text) surf.surf.fill(colors.white * 0.8) write((1, 1), "Shortcuts:") max_len = max(len(s) for s, _ in self.shortcuts) for i, (hotkey, description) in enumerate(self.shortcuts, start=2): write((2, i), hotkey) write((3 + max_len * 0.7, i), description)
[ "def", "draw_help", "(", "self", ",", "surf", ")", ":", "if", "not", "self", ".", "_help", ":", "return", "def", "write", "(", "loc", ",", "text", ")", ":", "surf", ".", "write_screen", "(", "self", ".", "_font_large", ",", "colors", ".", "black", ",", "loc", ",", "text", ")", "surf", ".", "surf", ".", "fill", "(", "colors", ".", "white", "*", "0.8", ")", "write", "(", "(", "1", ",", "1", ")", ",", "\"Shortcuts:\"", ")", "max_len", "=", "max", "(", "len", "(", "s", ")", "for", "s", ",", "_", "in", "self", ".", "shortcuts", ")", "for", "i", ",", "(", "hotkey", ",", "description", ")", "in", "enumerate", "(", "self", ".", "shortcuts", ",", "start", "=", "2", ")", ":", "write", "(", "(", "2", ",", "i", ")", ",", "hotkey", ")", "write", "(", "(", "3", "+", "max_len", "*", "0.7", ",", "i", ")", ",", "description", ")" ]
Draw the help dialog.
[ "Draw", "the", "help", "dialog", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L4044-L4048
def user_requests(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/requests#list-requests" api_path = "/api/v2/users/{id}/requests.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
[ "def", "user_requests", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/users/{id}/requests.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/requests#list-requests
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "requests#list", "-", "requests" ]
python
train