repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
robotools/fontParts
Lib/fontParts/base/normalizers.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/normalizers.py#L319-L337
def normalizeGlyphUnicode(value): """ Normalizes glyph unicode. * **value** must be an int or hex (represented as a string). * **value** must be in a unicode range. * Returned value will be an ``int``. """ if not isinstance(value, (int, basestring)) or isinstance(value, bool): raise TypeError("Glyph unicode must be a int or hex string, not %s." % type(value).__name__) if isinstance(value, basestring): try: value = int(value, 16) except ValueError: raise ValueError("Glyph unicode hex must be a valid hex string.") if value < 0 or value > 1114111: raise ValueError("Glyph unicode must be in the Unicode range.") return value
[ "def", "normalizeGlyphUnicode", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "(", "int", ",", "basestring", ")", ")", "or", "isinstance", "(", "value", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"Glyph unicode must be a int or hex string, not %s.\"", "%", "type", "(", "value", ")", ".", "__name__", ")", "if", "isinstance", "(", "value", ",", "basestring", ")", ":", "try", ":", "value", "=", "int", "(", "value", ",", "16", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Glyph unicode hex must be a valid hex string.\"", ")", "if", "value", "<", "0", "or", "value", ">", "1114111", ":", "raise", "ValueError", "(", "\"Glyph unicode must be in the Unicode range.\"", ")", "return", "value" ]
Normalizes glyph unicode. * **value** must be an int or hex (represented as a string). * **value** must be in a unicode range. * Returned value will be an ``int``.
[ "Normalizes", "glyph", "unicode", "." ]
python
train
iotile/coretools
iotilecore/iotile/core/utilities/schema_verify/bool_verify.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/schema_verify/bool_verify.py#L17-L36
def verify(self, obj): """Verify that the object conforms to this verifier's schema Args: obj (object): A python object to verify Raises: ValidationError: If there is a problem verifying the dictionary, a ValidationError is thrown with at least the reason key set indicating the reason for the lack of validation. """ if not isinstance(obj, bool): raise ValidationError("Object is not a bool", reason='object is not a bool', object=obj) if self._require_value is not None and obj != self._require_value: raise ValidationError("Boolean is not equal to specified literal", reason='boolean value %s should be %s' % (str(obj), str(self._require_value))) return obj
[ "def", "verify", "(", "self", ",", "obj", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "bool", ")", ":", "raise", "ValidationError", "(", "\"Object is not a bool\"", ",", "reason", "=", "'object is not a bool'", ",", "object", "=", "obj", ")", "if", "self", ".", "_require_value", "is", "not", "None", "and", "obj", "!=", "self", ".", "_require_value", ":", "raise", "ValidationError", "(", "\"Boolean is not equal to specified literal\"", ",", "reason", "=", "'boolean value %s should be %s'", "%", "(", "str", "(", "obj", ")", ",", "str", "(", "self", ".", "_require_value", ")", ")", ")", "return", "obj" ]
Verify that the object conforms to this verifier's schema Args: obj (object): A python object to verify Raises: ValidationError: If there is a problem verifying the dictionary, a ValidationError is thrown with at least the reason key set indicating the reason for the lack of validation.
[ "Verify", "that", "the", "object", "conforms", "to", "this", "verifier", "s", "schema" ]
python
train
vintasoftware/django-role-permissions
rolepermissions/roles.py
https://github.com/vintasoftware/django-role-permissions/blob/28924361e689e994e0c3575e18104a1a5abd8de6/rolepermissions/roles.py#L183-L192
def get_or_create_permission(codename, name=camel_or_snake_to_title): """ Get a Permission object from a permission name. @:param codename: permission code name @:param name: human-readable permissions name (str) or callable that takes codename as argument and returns str """ user_ct = ContentType.objects.get_for_model(get_user_model()) return Permission.objects.get_or_create(content_type=user_ct, codename=codename, defaults={'name': name(codename) if callable(name) else name})
[ "def", "get_or_create_permission", "(", "codename", ",", "name", "=", "camel_or_snake_to_title", ")", ":", "user_ct", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "get_user_model", "(", ")", ")", "return", "Permission", ".", "objects", ".", "get_or_create", "(", "content_type", "=", "user_ct", ",", "codename", "=", "codename", ",", "defaults", "=", "{", "'name'", ":", "name", "(", "codename", ")", "if", "callable", "(", "name", ")", "else", "name", "}", ")" ]
Get a Permission object from a permission name. @:param codename: permission code name @:param name: human-readable permissions name (str) or callable that takes codename as argument and returns str
[ "Get", "a", "Permission", "object", "from", "a", "permission", "name", "." ]
python
train
twisted/mantissa
xmantissa/signup.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/signup.py#L170-L182
def handleRequestForUser(self, username, url): """ User C{username} wants to reset their password. Create an attempt item, and send them an email if the username is valid """ attempt = self.newAttemptForUser(username) account = self.accountByAddress(username) if account is None: # do we want to disclose this to the user? return email = self.getExternalEmail(account) if email is not None: self.sendEmail(url, attempt, email)
[ "def", "handleRequestForUser", "(", "self", ",", "username", ",", "url", ")", ":", "attempt", "=", "self", ".", "newAttemptForUser", "(", "username", ")", "account", "=", "self", ".", "accountByAddress", "(", "username", ")", "if", "account", "is", "None", ":", "# do we want to disclose this to the user?", "return", "email", "=", "self", ".", "getExternalEmail", "(", "account", ")", "if", "email", "is", "not", "None", ":", "self", ".", "sendEmail", "(", "url", ",", "attempt", ",", "email", ")" ]
User C{username} wants to reset their password. Create an attempt item, and send them an email if the username is valid
[ "User", "C", "{", "username", "}", "wants", "to", "reset", "their", "password", ".", "Create", "an", "attempt", "item", "and", "send", "them", "an", "email", "if", "the", "username", "is", "valid" ]
python
train
marcinmiklitz/pywindow
pywindow/molecular.py
https://github.com/marcinmiklitz/pywindow/blob/e5264812157224f22a691741ca2e0aefdc9bd2eb/pywindow/molecular.py#L806-L828
def load_file(cls, filepath): """ Create a :class:`MolecularSystem` from an input file. Recognized input file formats: XYZ, PDB and MOL (V3000). Parameters ---------- filepath : :class:`str` The input's filepath. Returns ------- :class:`pywindow.molecular.MolecularSystem` :class:`MolecularSystem` """ obj = cls() obj.system = obj._Input.load_file(filepath) obj.filename = os.path.basename(filepath) obj.system_id = obj.filename.split(".")[0] obj.name, ext = os.path.splitext(obj.filename) return obj
[ "def", "load_file", "(", "cls", ",", "filepath", ")", ":", "obj", "=", "cls", "(", ")", "obj", ".", "system", "=", "obj", ".", "_Input", ".", "load_file", "(", "filepath", ")", "obj", ".", "filename", "=", "os", ".", "path", ".", "basename", "(", "filepath", ")", "obj", ".", "system_id", "=", "obj", ".", "filename", ".", "split", "(", "\".\"", ")", "[", "0", "]", "obj", ".", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "obj", ".", "filename", ")", "return", "obj" ]
Create a :class:`MolecularSystem` from an input file. Recognized input file formats: XYZ, PDB and MOL (V3000). Parameters ---------- filepath : :class:`str` The input's filepath. Returns ------- :class:`pywindow.molecular.MolecularSystem` :class:`MolecularSystem`
[ "Create", "a", ":", "class", ":", "MolecularSystem", "from", "an", "input", "file", "." ]
python
train
tensorflow/cleverhans
examples/RL-attack/model.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/RL-attack/model.py#L38-L95
def dueling_model(img_in, num_actions, scope, noisy=False, reuse=False, concat_softmax=False): """As described in https://arxiv.org/abs/1511.06581""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("state_value"): if noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 state_hidden = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) state_score = noisy_dense(state_hidden, name='noisy_fc2', size=1) else: state_hidden = layers.fully_connected( out, num_outputs=512, activation_fn=tf.nn.relu ) state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None) with tf.variable_scope("action_value"): if noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 actions_hidden = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) action_scores = noisy_dense(actions_hidden, name='noisy_fc2', size=num_actions) else: actions_hidden = layers.fully_connected( out, num_outputs=512, activation_fn=tf.nn.relu ) action_scores = layers.fully_connected( actions_hidden, num_outputs=num_actions, activation_fn=None ) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores = action_scores - tf.expand_dims( action_scores_mean, 1 ) return state_score + action_scores
[ "def", "dueling_model", "(", "img_in", ",", "num_actions", ",", "scope", ",", "noisy", "=", "False", ",", "reuse", "=", "False", ",", "concat_softmax", "=", "False", ")", ":", "with", "tf", ".", "variable_scope", "(", "scope", ",", "reuse", "=", "reuse", ")", ":", "out", "=", "img_in", "with", "tf", ".", "variable_scope", "(", "\"convnet\"", ")", ":", "# original architecture", "out", "=", "layers", ".", "convolution2d", "(", "out", ",", "num_outputs", "=", "32", ",", "kernel_size", "=", "8", ",", "stride", "=", "4", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ")", "out", "=", "layers", ".", "convolution2d", "(", "out", ",", "num_outputs", "=", "64", ",", "kernel_size", "=", "4", ",", "stride", "=", "2", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ")", "out", "=", "layers", ".", "convolution2d", "(", "out", ",", "num_outputs", "=", "64", ",", "kernel_size", "=", "3", ",", "stride", "=", "1", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ")", "out", "=", "layers", ".", "flatten", "(", "out", ")", "with", "tf", ".", "variable_scope", "(", "\"state_value\"", ")", ":", "if", "noisy", ":", "# Apply noisy network on fully connected layers", "# ref: https://arxiv.org/abs/1706.10295", "state_hidden", "=", "noisy_dense", "(", "out", ",", "name", "=", "'noisy_fc1'", ",", "size", "=", "512", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ")", "state_score", "=", "noisy_dense", "(", "state_hidden", ",", "name", "=", "'noisy_fc2'", ",", "size", "=", "1", ")", "else", ":", "state_hidden", "=", "layers", ".", "fully_connected", "(", "out", ",", "num_outputs", "=", "512", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ")", "state_score", "=", "layers", ".", "fully_connected", "(", "state_hidden", ",", "num_outputs", "=", "1", ",", "activation_fn", "=", "None", ")", "with", "tf", ".", "variable_scope", "(", "\"action_value\"", ")", ":", "if", "noisy", ":", "# Apply noisy network on fully connected layers", "# ref: https://arxiv.org/abs/1706.10295", "actions_hidden", "=", "noisy_dense", "(", "out", ",", "name", "=", "'noisy_fc1'", ",", "size", "=", "512", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ")", "action_scores", "=", "noisy_dense", "(", "actions_hidden", ",", "name", "=", "'noisy_fc2'", ",", "size", "=", "num_actions", ")", "else", ":", "actions_hidden", "=", "layers", ".", "fully_connected", "(", "out", ",", "num_outputs", "=", "512", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ")", "action_scores", "=", "layers", ".", "fully_connected", "(", "actions_hidden", ",", "num_outputs", "=", "num_actions", ",", "activation_fn", "=", "None", ")", "action_scores_mean", "=", "tf", ".", "reduce_mean", "(", "action_scores", ",", "1", ")", "action_scores", "=", "action_scores", "-", "tf", ".", "expand_dims", "(", "action_scores_mean", ",", "1", ")", "return", "state_score", "+", "action_scores" ]
As described in https://arxiv.org/abs/1511.06581
[ "As", "described", "in", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1511", ".", "06581" ]
python
train
benmack/eo-box
eobox/raster/utils.py
https://github.com/benmack/eo-box/blob/a291450c766bf50ea06adcdeb5729a4aad790ed5/eobox/raster/utils.py#L13-L40
def dtype_checker_df(df, dtype, return_=None): """Check if there are NaN values of values outside of a given datatype range. Arguments: df {dataframe} -- A dataframe. dtype {str} -- The datatype to check for. Keyword Arguments: return_ {str} -- Returns a boolean dataframe with the values not in the range of the dtype ('all'), the row ('rowsums') or column ('colsums') sums of that dataframe or an exit code 1 (None, default) if any of the values is not in the range. Returns: [int or DataFrame or Series] -- If no value is out of the range exit code 0 is returned, else depends on return_. """ dtype_range = dtype_ranges[dtype] df_out_of_range = (df < dtype_range[0]) | (df > dtype_range[1]) | (~np.isfinite(df)) if df_out_of_range.any().any(): if return_== "colsums": df_out_of_range = df_out_of_range.apply(sum, axis=0) # column elif return_== "rowsums": df_out_of_range = df_out_of_range.apply(sum, axis=1) # row elif return_== "all": df_out_of_range = df_out_of_range else: df_out_of_range = 1 else: df_out_of_range = 0 return df_out_of_range
[ "def", "dtype_checker_df", "(", "df", ",", "dtype", ",", "return_", "=", "None", ")", ":", "dtype_range", "=", "dtype_ranges", "[", "dtype", "]", "df_out_of_range", "=", "(", "df", "<", "dtype_range", "[", "0", "]", ")", "|", "(", "df", ">", "dtype_range", "[", "1", "]", ")", "|", "(", "~", "np", ".", "isfinite", "(", "df", ")", ")", "if", "df_out_of_range", ".", "any", "(", ")", ".", "any", "(", ")", ":", "if", "return_", "==", "\"colsums\"", ":", "df_out_of_range", "=", "df_out_of_range", ".", "apply", "(", "sum", ",", "axis", "=", "0", ")", "# column", "elif", "return_", "==", "\"rowsums\"", ":", "df_out_of_range", "=", "df_out_of_range", ".", "apply", "(", "sum", ",", "axis", "=", "1", ")", "# row", "elif", "return_", "==", "\"all\"", ":", "df_out_of_range", "=", "df_out_of_range", "else", ":", "df_out_of_range", "=", "1", "else", ":", "df_out_of_range", "=", "0", "return", "df_out_of_range" ]
Check if there are NaN values of values outside of a given datatype range. Arguments: df {dataframe} -- A dataframe. dtype {str} -- The datatype to check for. Keyword Arguments: return_ {str} -- Returns a boolean dataframe with the values not in the range of the dtype ('all'), the row ('rowsums') or column ('colsums') sums of that dataframe or an exit code 1 (None, default) if any of the values is not in the range. Returns: [int or DataFrame or Series] -- If no value is out of the range exit code 0 is returned, else depends on return_.
[ "Check", "if", "there", "are", "NaN", "values", "of", "values", "outside", "of", "a", "given", "datatype", "range", "." ]
python
train
Kami/python-yubico-client
yubico_client/yubico.py
https://github.com/Kami/python-yubico-client/blob/3334b2ee1b5b996af3ef6be57a4ea52b8e45e764/yubico_client/yubico.py#L347-L365
def _init_request_urls(self, api_urls): """ Returns a list of the API URLs. """ if not isinstance(api_urls, (str, list, tuple)): raise TypeError('api_urls needs to be string or iterable!') if isinstance(api_urls, str): api_urls = (api_urls,) api_urls = list(api_urls) for url in api_urls: if not url.startswith('http://') and \ not url.startswith('https://'): raise ValueError(('URL "%s" contains an invalid or missing' ' scheme' % (url))) return list(api_urls)
[ "def", "_init_request_urls", "(", "self", ",", "api_urls", ")", ":", "if", "not", "isinstance", "(", "api_urls", ",", "(", "str", ",", "list", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "'api_urls needs to be string or iterable!'", ")", "if", "isinstance", "(", "api_urls", ",", "str", ")", ":", "api_urls", "=", "(", "api_urls", ",", ")", "api_urls", "=", "list", "(", "api_urls", ")", "for", "url", "in", "api_urls", ":", "if", "not", "url", ".", "startswith", "(", "'http://'", ")", "and", "not", "url", ".", "startswith", "(", "'https://'", ")", ":", "raise", "ValueError", "(", "(", "'URL \"%s\" contains an invalid or missing'", "' scheme'", "%", "(", "url", ")", ")", ")", "return", "list", "(", "api_urls", ")" ]
Returns a list of the API URLs.
[ "Returns", "a", "list", "of", "the", "API", "URLs", "." ]
python
train
django-fluent/django-fluent-contents
fluent_contents/management/commands/find_contentitem_urls.py
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/management/commands/find_contentitem_urls.py#L119-L146
def extract_html_urls(self, html): """ Take all ``<img src="..">`` from the HTML """ p = HTMLParser(tree=treebuilders.getTreeBuilder("dom")) dom = p.parse(html) urls = [] for img in dom.getElementsByTagName('img'): src = img.getAttribute('src') if src: urls.append(unquote_utf8(src)) srcset = img.getAttribute('srcset') if srcset: urls += self.extract_srcset(srcset) for source in dom.getElementsByTagName('source'): srcset = source.getAttribute('srcset') if srcset: urls += self.extract_srcset(srcset) for source in dom.getElementsByTagName('a'): href = source.getAttribute('href') if href: urls.append(unquote_utf8(href)) return urls
[ "def", "extract_html_urls", "(", "self", ",", "html", ")", ":", "p", "=", "HTMLParser", "(", "tree", "=", "treebuilders", ".", "getTreeBuilder", "(", "\"dom\"", ")", ")", "dom", "=", "p", ".", "parse", "(", "html", ")", "urls", "=", "[", "]", "for", "img", "in", "dom", ".", "getElementsByTagName", "(", "'img'", ")", ":", "src", "=", "img", ".", "getAttribute", "(", "'src'", ")", "if", "src", ":", "urls", ".", "append", "(", "unquote_utf8", "(", "src", ")", ")", "srcset", "=", "img", ".", "getAttribute", "(", "'srcset'", ")", "if", "srcset", ":", "urls", "+=", "self", ".", "extract_srcset", "(", "srcset", ")", "for", "source", "in", "dom", ".", "getElementsByTagName", "(", "'source'", ")", ":", "srcset", "=", "source", ".", "getAttribute", "(", "'srcset'", ")", "if", "srcset", ":", "urls", "+=", "self", ".", "extract_srcset", "(", "srcset", ")", "for", "source", "in", "dom", ".", "getElementsByTagName", "(", "'a'", ")", ":", "href", "=", "source", ".", "getAttribute", "(", "'href'", ")", "if", "href", ":", "urls", ".", "append", "(", "unquote_utf8", "(", "href", ")", ")", "return", "urls" ]
Take all ``<img src="..">`` from the HTML
[ "Take", "all", "<img", "src", "=", "..", ">", "from", "the", "HTML" ]
python
train
tensorflow/cleverhans
cleverhans/utils_tf.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L647-L665
def jacobian_graph(predictions, x, nb_classes): """ Create the Jacobian graph to be ran later in a TF session :param predictions: the model's symbolic output (linear output, pre-softmax) :param x: the input placeholder :param nb_classes: the number of classes the model has :return: """ # This function will return a list of TF gradients list_derivatives = [] # Define the TF graph elements to compute our derivatives for each class for class_ind in xrange(nb_classes): derivatives, = tf.gradients(predictions[:, class_ind], x) list_derivatives.append(derivatives) return list_derivatives
[ "def", "jacobian_graph", "(", "predictions", ",", "x", ",", "nb_classes", ")", ":", "# This function will return a list of TF gradients", "list_derivatives", "=", "[", "]", "# Define the TF graph elements to compute our derivatives for each class", "for", "class_ind", "in", "xrange", "(", "nb_classes", ")", ":", "derivatives", ",", "=", "tf", ".", "gradients", "(", "predictions", "[", ":", ",", "class_ind", "]", ",", "x", ")", "list_derivatives", ".", "append", "(", "derivatives", ")", "return", "list_derivatives" ]
Create the Jacobian graph to be ran later in a TF session :param predictions: the model's symbolic output (linear output, pre-softmax) :param x: the input placeholder :param nb_classes: the number of classes the model has :return:
[ "Create", "the", "Jacobian", "graph", "to", "be", "ran", "later", "in", "a", "TF", "session", ":", "param", "predictions", ":", "the", "model", "s", "symbolic", "output", "(", "linear", "output", "pre", "-", "softmax", ")", ":", "param", "x", ":", "the", "input", "placeholder", ":", "param", "nb_classes", ":", "the", "number", "of", "classes", "the", "model", "has", ":", "return", ":" ]
python
train
iotile/coretools
iotilegateway/iotilegateway/supervisor/client.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilegateway/iotilegateway/supervisor/client.py#L615-L625
def get_headline(self, name): """Get stored messages for a service. Args: name (string): The name of the service to get messages from. Returns: ServiceMessage: the headline or None if no headline has been set """ return self._loop.run_coroutine(self._client.get_headline(name))
[ "def", "get_headline", "(", "self", ",", "name", ")", ":", "return", "self", ".", "_loop", ".", "run_coroutine", "(", "self", ".", "_client", ".", "get_headline", "(", "name", ")", ")" ]
Get stored messages for a service. Args: name (string): The name of the service to get messages from. Returns: ServiceMessage: the headline or None if no headline has been set
[ "Get", "stored", "messages", "for", "a", "service", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/host.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L1438-L1464
def explode(self, hostgroups, contactgroups): """Explode hosts with hostgroups, contactgroups:: * Add contact from contactgroups to host contacts * Add host into their hostgroups as hostgroup members :param hostgroups: Hostgroups to explode :type hostgroups: alignak.objects.hostgroup.Hostgroups :param contactgroups: Contactgorups to explode :type contactgroups: alignak.objects.contactgroup.Contactgroups :return: None """ for template in list(self.templates.values()): # items::explode_contact_groups_into_contacts # take all contacts from our contact_groups into our contact property self.explode_contact_groups_into_contacts(template, contactgroups) # Register host in the hostgroups for host in self: # items::explode_contact_groups_into_contacts # take all contacts from our contact_groups into our contact property self.explode_contact_groups_into_contacts(host, contactgroups) if hasattr(host, 'host_name') and hasattr(host, 'hostgroups'): hname = host.host_name for hostgroup in host.hostgroups: hostgroups.add_member(hname, hostgroup.strip())
[ "def", "explode", "(", "self", ",", "hostgroups", ",", "contactgroups", ")", ":", "for", "template", "in", "list", "(", "self", ".", "templates", ".", "values", "(", ")", ")", ":", "# items::explode_contact_groups_into_contacts", "# take all contacts from our contact_groups into our contact property", "self", ".", "explode_contact_groups_into_contacts", "(", "template", ",", "contactgroups", ")", "# Register host in the hostgroups", "for", "host", "in", "self", ":", "# items::explode_contact_groups_into_contacts", "# take all contacts from our contact_groups into our contact property", "self", ".", "explode_contact_groups_into_contacts", "(", "host", ",", "contactgroups", ")", "if", "hasattr", "(", "host", ",", "'host_name'", ")", "and", "hasattr", "(", "host", ",", "'hostgroups'", ")", ":", "hname", "=", "host", ".", "host_name", "for", "hostgroup", "in", "host", ".", "hostgroups", ":", "hostgroups", ".", "add_member", "(", "hname", ",", "hostgroup", ".", "strip", "(", ")", ")" ]
Explode hosts with hostgroups, contactgroups:: * Add contact from contactgroups to host contacts * Add host into their hostgroups as hostgroup members :param hostgroups: Hostgroups to explode :type hostgroups: alignak.objects.hostgroup.Hostgroups :param contactgroups: Contactgorups to explode :type contactgroups: alignak.objects.contactgroup.Contactgroups :return: None
[ "Explode", "hosts", "with", "hostgroups", "contactgroups", "::" ]
python
train
fermiPy/fermipy
fermipy/diffuse/job_library.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/job_library.py#L418-L437
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} gmm = make_ring_dicts(library=args['library'], basedir='.') for galkey in gmm.galkeys(): ring_dict = gmm.ring_dict(galkey) for ring_key, ring_info in ring_dict.items(): output_file = ring_info.merged_gasmap file_string = "" for fname in ring_info.files: file_string += " %s" % fname logfile = make_nfs_path(output_file.replace('.fits', '.log')) job_configs[ring_key] = dict(output=output_file, args=file_string, logfile=logfile) return job_configs
[ "def", "build_job_configs", "(", "self", ",", "args", ")", ":", "job_configs", "=", "{", "}", "gmm", "=", "make_ring_dicts", "(", "library", "=", "args", "[", "'library'", "]", ",", "basedir", "=", "'.'", ")", "for", "galkey", "in", "gmm", ".", "galkeys", "(", ")", ":", "ring_dict", "=", "gmm", ".", "ring_dict", "(", "galkey", ")", "for", "ring_key", ",", "ring_info", "in", "ring_dict", ".", "items", "(", ")", ":", "output_file", "=", "ring_info", ".", "merged_gasmap", "file_string", "=", "\"\"", "for", "fname", "in", "ring_info", ".", "files", ":", "file_string", "+=", "\" %s\"", "%", "fname", "logfile", "=", "make_nfs_path", "(", "output_file", ".", "replace", "(", "'.fits'", ",", "'.log'", ")", ")", "job_configs", "[", "ring_key", "]", "=", "dict", "(", "output", "=", "output_file", ",", "args", "=", "file_string", ",", "logfile", "=", "logfile", ")", "return", "job_configs" ]
Hook to build job configurations
[ "Hook", "to", "build", "job", "configurations" ]
python
train
mjirik/imcut
imcut/pycut.py
https://github.com/mjirik/imcut/blob/1b38e7cd18a7a38fe683c1cabe1222fe5fa03aa3/imcut/pycut.py#L1539-L1555
def get_node_msindex(msinds, node_seed): """ Convert seeds-like selection of voxel to multiscale index. :param msinds: ndarray with indexes :param node_seed: ndarray with 1 where selected pixel is, or list of indexes in this array :return: multiscale index of first found seed """ if type(node_seed) == np.ndarray: seed_indexes = np.nonzero(node_seed) elif type(node_seed) == list: seed_indexes = node_seed else: seed_indexes = [node_seed] selected_nodes_msinds = msinds[seed_indexes] node_msindex = selected_nodes_msinds[0] return node_msindex
[ "def", "get_node_msindex", "(", "msinds", ",", "node_seed", ")", ":", "if", "type", "(", "node_seed", ")", "==", "np", ".", "ndarray", ":", "seed_indexes", "=", "np", ".", "nonzero", "(", "node_seed", ")", "elif", "type", "(", "node_seed", ")", "==", "list", ":", "seed_indexes", "=", "node_seed", "else", ":", "seed_indexes", "=", "[", "node_seed", "]", "selected_nodes_msinds", "=", "msinds", "[", "seed_indexes", "]", "node_msindex", "=", "selected_nodes_msinds", "[", "0", "]", "return", "node_msindex" ]
Convert seeds-like selection of voxel to multiscale index. :param msinds: ndarray with indexes :param node_seed: ndarray with 1 where selected pixel is, or list of indexes in this array :return: multiscale index of first found seed
[ "Convert", "seeds", "-", "like", "selection", "of", "voxel", "to", "multiscale", "index", ".", ":", "param", "msinds", ":", "ndarray", "with", "indexes", ":", "param", "node_seed", ":", "ndarray", "with", "1", "where", "selected", "pixel", "is", "or", "list", "of", "indexes", "in", "this", "array", ":", "return", ":", "multiscale", "index", "of", "first", "found", "seed" ]
python
train
cyrus-/cypy
cypy/cg.py
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/cg.py#L149-L161
def lines_once(cls, code, **kwargs): """One-off code generation using :meth:`lines`. If keyword args are provided, initialized using :meth:`with_id_processor`. """ if kwargs: g = cls.with_id_processor() g._append_context(kwargs) else: g = cls() g.lines(code) return g.code
[ "def", "lines_once", "(", "cls", ",", "code", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ":", "g", "=", "cls", ".", "with_id_processor", "(", ")", "g", ".", "_append_context", "(", "kwargs", ")", "else", ":", "g", "=", "cls", "(", ")", "g", ".", "lines", "(", "code", ")", "return", "g", ".", "code" ]
One-off code generation using :meth:`lines`. If keyword args are provided, initialized using :meth:`with_id_processor`.
[ "One", "-", "off", "code", "generation", "using", ":", "meth", ":", "lines", ".", "If", "keyword", "args", "are", "provided", "initialized", "using", ":", "meth", ":", "with_id_processor", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/graphs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/graphs.py#L1696-L1748
def add_edge(self, from_index, to_index, weight=None, warn_duplicates=True, edge_properties=None): """ Add edge to graph. Since physically a 'bond' (or other connection between sites) doesn't have a direction, from_index, from_jimage can be swapped with to_index, to_jimage. However, images will always always be shifted so that from_index < to_index and from_jimage becomes (0, 0, 0). :param from_index: index of site connecting from :param to_index: index of site connecting to :param weight (float): e.g. bond length :param warn_duplicates (bool): if True, will warn if trying to add duplicate edges (duplicate edges will not be added in either case) :param edge_properties (dict): any other information to store on graph edges, similar to Structure's site_properties :return: """ # this is not necessary for the class to work, but # just makes it neater if to_index < from_index: to_index, from_index = from_index, to_index # sanitize types from_index, to_index = int(from_index), int(to_index) # check we're not trying to add a duplicate edge # there should only ever be at most one edge # between two sites existing_edge_data = self.graph.get_edge_data(from_index, to_index) if existing_edge_data and warn_duplicates: warnings.warn("Trying to add an edge that already exists from " "site {} to site {}.".format(from_index, to_index)) return # generic container for additional edge properties, # similar to site properties edge_properties = edge_properties or {} if weight: self.graph.add_edge(from_index, to_index, weight=weight, **edge_properties) else: self.graph.add_edge(from_index, to_index, **edge_properties)
[ "def", "add_edge", "(", "self", ",", "from_index", ",", "to_index", ",", "weight", "=", "None", ",", "warn_duplicates", "=", "True", ",", "edge_properties", "=", "None", ")", ":", "# this is not necessary for the class to work, but", "# just makes it neater", "if", "to_index", "<", "from_index", ":", "to_index", ",", "from_index", "=", "from_index", ",", "to_index", "# sanitize types", "from_index", ",", "to_index", "=", "int", "(", "from_index", ")", ",", "int", "(", "to_index", ")", "# check we're not trying to add a duplicate edge", "# there should only ever be at most one edge", "# between two sites", "existing_edge_data", "=", "self", ".", "graph", ".", "get_edge_data", "(", "from_index", ",", "to_index", ")", "if", "existing_edge_data", "and", "warn_duplicates", ":", "warnings", ".", "warn", "(", "\"Trying to add an edge that already exists from \"", "\"site {} to site {}.\"", ".", "format", "(", "from_index", ",", "to_index", ")", ")", "return", "# generic container for additional edge properties,", "# similar to site properties", "edge_properties", "=", "edge_properties", "or", "{", "}", "if", "weight", ":", "self", ".", "graph", ".", "add_edge", "(", "from_index", ",", "to_index", ",", "weight", "=", "weight", ",", "*", "*", "edge_properties", ")", "else", ":", "self", ".", "graph", ".", "add_edge", "(", "from_index", ",", "to_index", ",", "*", "*", "edge_properties", ")" ]
Add edge to graph. Since physically a 'bond' (or other connection between sites) doesn't have a direction, from_index, from_jimage can be swapped with to_index, to_jimage. However, images will always always be shifted so that from_index < to_index and from_jimage becomes (0, 0, 0). :param from_index: index of site connecting from :param to_index: index of site connecting to :param weight (float): e.g. bond length :param warn_duplicates (bool): if True, will warn if trying to add duplicate edges (duplicate edges will not be added in either case) :param edge_properties (dict): any other information to store on graph edges, similar to Structure's site_properties :return:
[ "Add", "edge", "to", "graph", "." ]
python
train
pjuren/pyokit
src/pyokit/statistics/beta.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/statistics/beta.py#L110-L115
def beta_pdf(x, a, b): """Beta distirbution probability density function.""" bc = 1 / beta(a, b) fc = x ** (a - 1) sc = (1 - x) ** (b - 1) return bc * fc * sc
[ "def", "beta_pdf", "(", "x", ",", "a", ",", "b", ")", ":", "bc", "=", "1", "/", "beta", "(", "a", ",", "b", ")", "fc", "=", "x", "**", "(", "a", "-", "1", ")", "sc", "=", "(", "1", "-", "x", ")", "**", "(", "b", "-", "1", ")", "return", "bc", "*", "fc", "*", "sc" ]
Beta distirbution probability density function.
[ "Beta", "distirbution", "probability", "density", "function", "." ]
python
train
PGower/PyCanvas
pycanvas/apis/group_categories.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/group_categories.py#L290-L317
def list_users_in_group_category(self, group_category_id, search_term=None, unassigned=None): """ List users in group category. Returns a list of users in the group category. """ path = {} data = {} params = {} # REQUIRED - PATH - group_category_id """ID""" path["group_category_id"] = group_category_id # OPTIONAL - search_term """The partial name or full ID of the users to match and return in the results list. Must be at least 3 characters.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - unassigned """Set this value to true if you wish only to search unassigned users in the group category.""" if unassigned is not None: params["unassigned"] = unassigned self.logger.debug("GET /api/v1/group_categories/{group_category_id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/group_categories/{group_category_id}/users".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_users_in_group_category", "(", "self", ",", "group_category_id", ",", "search_term", "=", "None", ",", "unassigned", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_category_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"group_category_id\"", "]", "=", "group_category_id", "# OPTIONAL - search_term\r", "\"\"\"The partial name or full ID of the users to match and return in the results\r\n list. Must be at least 3 characters.\"\"\"", "if", "search_term", "is", "not", "None", ":", "params", "[", "\"search_term\"", "]", "=", "search_term", "# OPTIONAL - unassigned\r", "\"\"\"Set this value to true if you wish only to search unassigned users in the\r\n group category.\"\"\"", "if", "unassigned", "is", "not", "None", ":", "params", "[", "\"unassigned\"", "]", "=", "unassigned", "self", ".", "logger", ".", "debug", "(", "\"GET /api/v1/group_categories/{group_category_id}/users with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"GET\"", ",", "\"/api/v1/group_categories/{group_category_id}/users\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "all_pages", "=", "True", ")" ]
List users in group category. Returns a list of users in the group category.
[ "List", "users", "in", "group", "category", ".", "Returns", "a", "list", "of", "users", "in", "the", "group", "category", "." ]
python
train
ninuxorg/nodeshot
nodeshot/community/participation/views.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/community/participation/views.py#L15-L23
def initial(self, request, *args, **kwargs): """ Custom initial method: * ensure node exists and store it in an instance attribute * change queryset to return only comments of current node """ super(NodeRelationViewMixin, self).initial(request, *args, **kwargs) self.node = get_object_or_404(Node, **{'slug': self.kwargs['slug']}) self.queryset = self.model.objects.filter(node_id=self.node.id)
[ "def", "initial", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "NodeRelationViewMixin", ",", "self", ")", ".", "initial", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "node", "=", "get_object_or_404", "(", "Node", ",", "*", "*", "{", "'slug'", ":", "self", ".", "kwargs", "[", "'slug'", "]", "}", ")", "self", ".", "queryset", "=", "self", ".", "model", ".", "objects", ".", "filter", "(", "node_id", "=", "self", ".", "node", ".", "id", ")" ]
Custom initial method: * ensure node exists and store it in an instance attribute * change queryset to return only comments of current node
[ "Custom", "initial", "method", ":", "*", "ensure", "node", "exists", "and", "store", "it", "in", "an", "instance", "attribute", "*", "change", "queryset", "to", "return", "only", "comments", "of", "current", "node" ]
python
train
boriel/zxbasic
arch/zx48k/backend/__init__.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__init__.py#L946-L995
def _cast(ins): """ Convert data from typeA to typeB (only numeric data types) """ # Signed and unsigned types are the same in the Z80 tA = ins.quad[2] # From TypeA tB = ins.quad[3] # To TypeB YY_TYPES[tA] # Type sizes xsB = sB = YY_TYPES[tB] # Type sizes output = [] if tA in ('u8', 'i8'): output.extend(_8bit_oper(ins.quad[4])) elif tA in ('u16', 'i16'): output.extend(_16bit_oper(ins.quad[4])) elif tA in ('u32', 'i32'): output.extend(_32bit_oper(ins.quad[4])) elif tA == 'f16': output.extend(_f16_oper(ins.quad[4])) elif tA == 'f': output.extend(_float_oper(ins.quad[4])) else: raise errors.GenericError( 'Internal error: invalid typecast from %s to %s' % (tA, tB)) if tB in ('u8', 'i8'): # It was a byte output.extend(to_byte(tA)) elif tB in ('u16', 'i16'): output.extend(to_word(tA)) elif tB in ('u32', 'i32'): output.extend(to_long(tA)) elif tB == 'f16': output.extend(to_fixed(tA)) elif tB == 'f': output.extend(to_float(tA)) xsB += sB % 2 # make it even (round up) if xsB > 4: output.extend(_fpush()) else: if xsB > 2: output.append('push de') # Fixed or 32 bit Integer if sB > 1: output.append('push hl') # 16 bit Integer else: output.append('push af') # 8 bit Integer return output
[ "def", "_cast", "(", "ins", ")", ":", "# Signed and unsigned types are the same in the Z80", "tA", "=", "ins", ".", "quad", "[", "2", "]", "# From TypeA", "tB", "=", "ins", ".", "quad", "[", "3", "]", "# To TypeB", "YY_TYPES", "[", "tA", "]", "# Type sizes", "xsB", "=", "sB", "=", "YY_TYPES", "[", "tB", "]", "# Type sizes", "output", "=", "[", "]", "if", "tA", "in", "(", "'u8'", ",", "'i8'", ")", ":", "output", ".", "extend", "(", "_8bit_oper", "(", "ins", ".", "quad", "[", "4", "]", ")", ")", "elif", "tA", "in", "(", "'u16'", ",", "'i16'", ")", ":", "output", ".", "extend", "(", "_16bit_oper", "(", "ins", ".", "quad", "[", "4", "]", ")", ")", "elif", "tA", "in", "(", "'u32'", ",", "'i32'", ")", ":", "output", ".", "extend", "(", "_32bit_oper", "(", "ins", ".", "quad", "[", "4", "]", ")", ")", "elif", "tA", "==", "'f16'", ":", "output", ".", "extend", "(", "_f16_oper", "(", "ins", ".", "quad", "[", "4", "]", ")", ")", "elif", "tA", "==", "'f'", ":", "output", ".", "extend", "(", "_float_oper", "(", "ins", ".", "quad", "[", "4", "]", ")", ")", "else", ":", "raise", "errors", ".", "GenericError", "(", "'Internal error: invalid typecast from %s to %s'", "%", "(", "tA", ",", "tB", ")", ")", "if", "tB", "in", "(", "'u8'", ",", "'i8'", ")", ":", "# It was a byte", "output", ".", "extend", "(", "to_byte", "(", "tA", ")", ")", "elif", "tB", "in", "(", "'u16'", ",", "'i16'", ")", ":", "output", ".", "extend", "(", "to_word", "(", "tA", ")", ")", "elif", "tB", "in", "(", "'u32'", ",", "'i32'", ")", ":", "output", ".", "extend", "(", "to_long", "(", "tA", ")", ")", "elif", "tB", "==", "'f16'", ":", "output", ".", "extend", "(", "to_fixed", "(", "tA", ")", ")", "elif", "tB", "==", "'f'", ":", "output", ".", "extend", "(", "to_float", "(", "tA", ")", ")", "xsB", "+=", "sB", "%", "2", "# make it even (round up)", "if", "xsB", ">", "4", ":", "output", ".", "extend", "(", "_fpush", "(", ")", ")", "else", ":", "if", "xsB", ">", "2", ":", "output", ".", "append", "(", "'push de'", ")", "# Fixed or 32 bit Integer", "if", "sB", ">", "1", ":", "output", ".", "append", "(", "'push hl'", ")", "# 16 bit Integer", "else", ":", "output", ".", "append", "(", "'push af'", ")", "# 8 bit Integer", "return", "output" ]
Convert data from typeA to typeB (only numeric data types)
[ "Convert", "data", "from", "typeA", "to", "typeB", "(", "only", "numeric", "data", "types", ")" ]
python
train
librosa/librosa
librosa/filters.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L229-L359
def chroma(sr, n_fft, n_chroma=12, A440=440.0, ctroct=5.0, octwidth=2, norm=2, base_c=True, dtype=np.float32): """Create a Filterbank matrix to convert STFT to chroma Parameters ---------- sr : number > 0 [scalar] audio sampling rate n_fft : int > 0 [scalar] number of FFT bins n_chroma : int > 0 [scalar] number of chroma bins A440 : float > 0 [scalar] Reference frequency for A440 ctroct : float > 0 [scalar] octwidth : float > 0 or None [scalar] `ctroct` and `octwidth` specify a dominance window - a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz) and with a gaussian half-width of `octwidth`. Set `octwidth` to `None` to use a flat weighting. norm : float > 0 or np.inf Normalization factor for each filter base_c : bool If True, the filter bank will start at 'C'. If False, the filter bank will start at 'A'. dtype : np.dtype The data type of the output basis. By default, uses 32-bit (single-precision) floating point. Returns ------- wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)] Chroma filter matrix See Also -------- util.normalize feature.chroma_stft Notes ----- This function caches at level 10. Examples -------- Build a simple chroma filter bank >>> chromafb = librosa.filters.chroma(22050, 4096) array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17], [ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25], ..., [ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06], [ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]]) Use quarter-tones instead of semitones >>> librosa.filters.chroma(22050, 4096, n_chroma=24) array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63], [ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79], ..., [ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38], [ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]]) Equally weight all octaves >>> librosa.filters.chroma(22050, 4096, octwidth=None) array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16], [ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24], ..., [ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05], [ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]]) >>> import matplotlib.pyplot as plt >>> plt.figure() >>> librosa.display.specshow(chromafb, x_axis='linear') >>> plt.ylabel('Chroma filter') >>> plt.title('Chroma filter bank') >>> plt.colorbar() >>> plt.tight_layout() """ wts = np.zeros((n_chroma, n_fft)) # Get the FFT bins, not counting the DC component frequencies = np.linspace(0, sr, n_fft, endpoint=False)[1:] frqbins = n_chroma * hz_to_octs(frequencies, A440) # make up a value for the 0 Hz bin = 1.5 octaves below bin 1 # (so chroma is 50% rotated from bin 1, and bin width is broad) frqbins = np.concatenate(([frqbins[0] - 1.5 * n_chroma], frqbins)) binwidthbins = np.concatenate((np.maximum(frqbins[1:] - frqbins[:-1], 1.0), [1])) D = np.subtract.outer(frqbins, np.arange(0, n_chroma, dtype='d')).T n_chroma2 = np.round(float(n_chroma) / 2) # Project into range -n_chroma/2 .. n_chroma/2 # add on fixed offset of 10*n_chroma to ensure all values passed to # rem are positive D = np.remainder(D + n_chroma2 + 10*n_chroma, n_chroma) - n_chroma2 # Gaussian bumps - 2*D to make them narrower wts = np.exp(-0.5 * (2*D / np.tile(binwidthbins, (n_chroma, 1)))**2) # normalize each column wts = util.normalize(wts, norm=norm, axis=0) # Maybe apply scaling for fft bins if octwidth is not None: wts *= np.tile( np.exp(-0.5 * (((frqbins/n_chroma - ctroct)/octwidth)**2)), (n_chroma, 1)) if base_c: wts = np.roll(wts, -3, axis=0) # remove aliasing columns, copy to ensure row-contiguity return np.ascontiguousarray(wts[:, :int(1 + n_fft/2)], dtype=dtype)
[ "def", "chroma", "(", "sr", ",", "n_fft", ",", "n_chroma", "=", "12", ",", "A440", "=", "440.0", ",", "ctroct", "=", "5.0", ",", "octwidth", "=", "2", ",", "norm", "=", "2", ",", "base_c", "=", "True", ",", "dtype", "=", "np", ".", "float32", ")", ":", "wts", "=", "np", ".", "zeros", "(", "(", "n_chroma", ",", "n_fft", ")", ")", "# Get the FFT bins, not counting the DC component", "frequencies", "=", "np", ".", "linspace", "(", "0", ",", "sr", ",", "n_fft", ",", "endpoint", "=", "False", ")", "[", "1", ":", "]", "frqbins", "=", "n_chroma", "*", "hz_to_octs", "(", "frequencies", ",", "A440", ")", "# make up a value for the 0 Hz bin = 1.5 octaves below bin 1", "# (so chroma is 50% rotated from bin 1, and bin width is broad)", "frqbins", "=", "np", ".", "concatenate", "(", "(", "[", "frqbins", "[", "0", "]", "-", "1.5", "*", "n_chroma", "]", ",", "frqbins", ")", ")", "binwidthbins", "=", "np", ".", "concatenate", "(", "(", "np", ".", "maximum", "(", "frqbins", "[", "1", ":", "]", "-", "frqbins", "[", ":", "-", "1", "]", ",", "1.0", ")", ",", "[", "1", "]", ")", ")", "D", "=", "np", ".", "subtract", ".", "outer", "(", "frqbins", ",", "np", ".", "arange", "(", "0", ",", "n_chroma", ",", "dtype", "=", "'d'", ")", ")", ".", "T", "n_chroma2", "=", "np", ".", "round", "(", "float", "(", "n_chroma", ")", "/", "2", ")", "# Project into range -n_chroma/2 .. n_chroma/2", "# add on fixed offset of 10*n_chroma to ensure all values passed to", "# rem are positive", "D", "=", "np", ".", "remainder", "(", "D", "+", "n_chroma2", "+", "10", "*", "n_chroma", ",", "n_chroma", ")", "-", "n_chroma2", "# Gaussian bumps - 2*D to make them narrower", "wts", "=", "np", ".", "exp", "(", "-", "0.5", "*", "(", "2", "*", "D", "/", "np", ".", "tile", "(", "binwidthbins", ",", "(", "n_chroma", ",", "1", ")", ")", ")", "**", "2", ")", "# normalize each column", "wts", "=", "util", ".", "normalize", "(", "wts", ",", "norm", "=", "norm", ",", "axis", "=", "0", ")", "# Maybe apply scaling for fft bins", "if", "octwidth", "is", "not", "None", ":", "wts", "*=", "np", ".", "tile", "(", "np", ".", "exp", "(", "-", "0.5", "*", "(", "(", "(", "frqbins", "/", "n_chroma", "-", "ctroct", ")", "/", "octwidth", ")", "**", "2", ")", ")", ",", "(", "n_chroma", ",", "1", ")", ")", "if", "base_c", ":", "wts", "=", "np", ".", "roll", "(", "wts", ",", "-", "3", ",", "axis", "=", "0", ")", "# remove aliasing columns, copy to ensure row-contiguity", "return", "np", ".", "ascontiguousarray", "(", "wts", "[", ":", ",", ":", "int", "(", "1", "+", "n_fft", "/", "2", ")", "]", ",", "dtype", "=", "dtype", ")" ]
Create a Filterbank matrix to convert STFT to chroma Parameters ---------- sr : number > 0 [scalar] audio sampling rate n_fft : int > 0 [scalar] number of FFT bins n_chroma : int > 0 [scalar] number of chroma bins A440 : float > 0 [scalar] Reference frequency for A440 ctroct : float > 0 [scalar] octwidth : float > 0 or None [scalar] `ctroct` and `octwidth` specify a dominance window - a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz) and with a gaussian half-width of `octwidth`. Set `octwidth` to `None` to use a flat weighting. norm : float > 0 or np.inf Normalization factor for each filter base_c : bool If True, the filter bank will start at 'C'. If False, the filter bank will start at 'A'. dtype : np.dtype The data type of the output basis. By default, uses 32-bit (single-precision) floating point. Returns ------- wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)] Chroma filter matrix See Also -------- util.normalize feature.chroma_stft Notes ----- This function caches at level 10. Examples -------- Build a simple chroma filter bank >>> chromafb = librosa.filters.chroma(22050, 4096) array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17], [ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25], ..., [ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06], [ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]]) Use quarter-tones instead of semitones >>> librosa.filters.chroma(22050, 4096, n_chroma=24) array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63], [ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79], ..., [ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38], [ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]]) Equally weight all octaves >>> librosa.filters.chroma(22050, 4096, octwidth=None) array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16], [ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24], ..., [ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05], [ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]]) >>> import matplotlib.pyplot as plt >>> plt.figure() >>> librosa.display.specshow(chromafb, x_axis='linear') >>> plt.ylabel('Chroma filter') >>> plt.title('Chroma filter bank') >>> plt.colorbar() >>> plt.tight_layout()
[ "Create", "a", "Filterbank", "matrix", "to", "convert", "STFT", "to", "chroma" ]
python
test
helixyte/everest
everest/traversalpath.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/traversalpath.py#L51-L56
def pop(self): """ Removes the last traversal path node from this traversal path. """ node = self.nodes.pop() self.__keys.remove(node.key)
[ "def", "pop", "(", "self", ")", ":", "node", "=", "self", ".", "nodes", ".", "pop", "(", ")", "self", ".", "__keys", ".", "remove", "(", "node", ".", "key", ")" ]
Removes the last traversal path node from this traversal path.
[ "Removes", "the", "last", "traversal", "path", "node", "from", "this", "traversal", "path", "." ]
python
train
Azure/azure-sdk-for-python
azure-batch/azure/batch/custom/patch.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-batch/azure/batch/custom/patch.py#L172-L186
def _handle_output(results_queue): """Scan output for exceptions If there is an output from an add task collection call add it to the results. :param results_queue: Queue containing results of attempted add_collection's :type results_queue: collections.deque :return: list of TaskAddResults :rtype: list[~TaskAddResult] """ results = [] while results_queue: queue_item = results_queue.pop() results.append(queue_item) return results
[ "def", "_handle_output", "(", "results_queue", ")", ":", "results", "=", "[", "]", "while", "results_queue", ":", "queue_item", "=", "results_queue", ".", "pop", "(", ")", "results", ".", "append", "(", "queue_item", ")", "return", "results" ]
Scan output for exceptions If there is an output from an add task collection call add it to the results. :param results_queue: Queue containing results of attempted add_collection's :type results_queue: collections.deque :return: list of TaskAddResults :rtype: list[~TaskAddResult]
[ "Scan", "output", "for", "exceptions" ]
python
test
Cognexa/cxflow
cxflow/cli/common.py
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/common.py#L66-L90
def create_dataset(config: dict, output_dir: Optional[str]=None) -> AbstractDataset: """ Create a dataset object according to the given config. Dataset config section and the `output_dir` are passed to the constructor in a single YAML-encoded string. :param config: config dict with dataset config :param output_dir: path to the training output dir or None :return: dataset object """ logging.info('Creating dataset') dataset_config = make_simple(config)['dataset'] assert 'class' in dataset_config, '`dataset.class` not present in the config' dataset_module, dataset_class = parse_fully_qualified_name(dataset_config['class']) if 'output_dir' in dataset_config: raise ValueError('The `output_dir` key is reserved and can not be used in dataset configuration.') dataset_config = {'output_dir': output_dir, **config['dataset']} del dataset_config['class'] dataset = create_object(dataset_module, dataset_class, args=(yaml_to_str(dataset_config),)) logging.info('\t%s created', type(dataset).__name__) return dataset
[ "def", "create_dataset", "(", "config", ":", "dict", ",", "output_dir", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "AbstractDataset", ":", "logging", ".", "info", "(", "'Creating dataset'", ")", "dataset_config", "=", "make_simple", "(", "config", ")", "[", "'dataset'", "]", "assert", "'class'", "in", "dataset_config", ",", "'`dataset.class` not present in the config'", "dataset_module", ",", "dataset_class", "=", "parse_fully_qualified_name", "(", "dataset_config", "[", "'class'", "]", ")", "if", "'output_dir'", "in", "dataset_config", ":", "raise", "ValueError", "(", "'The `output_dir` key is reserved and can not be used in dataset configuration.'", ")", "dataset_config", "=", "{", "'output_dir'", ":", "output_dir", ",", "*", "*", "config", "[", "'dataset'", "]", "}", "del", "dataset_config", "[", "'class'", "]", "dataset", "=", "create_object", "(", "dataset_module", ",", "dataset_class", ",", "args", "=", "(", "yaml_to_str", "(", "dataset_config", ")", ",", ")", ")", "logging", ".", "info", "(", "'\\t%s created'", ",", "type", "(", "dataset", ")", ".", "__name__", ")", "return", "dataset" ]
Create a dataset object according to the given config. Dataset config section and the `output_dir` are passed to the constructor in a single YAML-encoded string. :param config: config dict with dataset config :param output_dir: path to the training output dir or None :return: dataset object
[ "Create", "a", "dataset", "object", "according", "to", "the", "given", "config", "." ]
python
train
mgbarrero/xbob.db.atvskeystroke
xbob/db/atvskeystroke/query.py
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/query.py#L249-L253
def protocol(self, name): """Returns the protocol object in the database given a certain name. Raises an error if that does not exist.""" return self.query(Protocol).filter(Protocol.name==name).one()
[ "def", "protocol", "(", "self", ",", "name", ")", ":", "return", "self", ".", "query", "(", "Protocol", ")", ".", "filter", "(", "Protocol", ".", "name", "==", "name", ")", ".", "one", "(", ")" ]
Returns the protocol object in the database given a certain name. Raises an error if that does not exist.
[ "Returns", "the", "protocol", "object", "in", "the", "database", "given", "a", "certain", "name", ".", "Raises", "an", "error", "if", "that", "does", "not", "exist", "." ]
python
train
okpy/ok-client
client/protocols/backup.py
https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/protocols/backup.py#L223-L250
def send_messages(self, access_token, messages, timeout, current): """Send messages to server, along with user authentication.""" is_submit = current and self.args.submit and not self.args.revise is_revision = current and self.args.revise data = { 'assignment': self.assignment.endpoint, 'messages': messages, 'submit': is_submit } if is_revision: address = self.REVISION_ENDPOINT.format(server=self.assignment.server_url) else: address = self.BACKUP_ENDPOINT.format(server=self.assignment.server_url) address_params = { 'client_name': 'ok-client', 'client_version': client.__version__, } headers = {'Authorization': 'Bearer {}'.format(access_token)} log.info('Sending messages to %s', address) response = requests.post(address, headers=headers, params=address_params, json=data, timeout=timeout) response.raise_for_status() return response.json()
[ "def", "send_messages", "(", "self", ",", "access_token", ",", "messages", ",", "timeout", ",", "current", ")", ":", "is_submit", "=", "current", "and", "self", ".", "args", ".", "submit", "and", "not", "self", ".", "args", ".", "revise", "is_revision", "=", "current", "and", "self", ".", "args", ".", "revise", "data", "=", "{", "'assignment'", ":", "self", ".", "assignment", ".", "endpoint", ",", "'messages'", ":", "messages", ",", "'submit'", ":", "is_submit", "}", "if", "is_revision", ":", "address", "=", "self", ".", "REVISION_ENDPOINT", ".", "format", "(", "server", "=", "self", ".", "assignment", ".", "server_url", ")", "else", ":", "address", "=", "self", ".", "BACKUP_ENDPOINT", ".", "format", "(", "server", "=", "self", ".", "assignment", ".", "server_url", ")", "address_params", "=", "{", "'client_name'", ":", "'ok-client'", ",", "'client_version'", ":", "client", ".", "__version__", ",", "}", "headers", "=", "{", "'Authorization'", ":", "'Bearer {}'", ".", "format", "(", "access_token", ")", "}", "log", ".", "info", "(", "'Sending messages to %s'", ",", "address", ")", "response", "=", "requests", ".", "post", "(", "address", ",", "headers", "=", "headers", ",", "params", "=", "address_params", ",", "json", "=", "data", ",", "timeout", "=", "timeout", ")", "response", ".", "raise_for_status", "(", ")", "return", "response", ".", "json", "(", ")" ]
Send messages to server, along with user authentication.
[ "Send", "messages", "to", "server", "along", "with", "user", "authentication", "." ]
python
train
kakwa/ldapcherry
ldapcherry/roles.py
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/roles.py#L302-L327
def get_roles(self, groups): """get list of roles and list of standalone groups""" roles = set([]) parentroles = set([]) notroles = set([]) tmp = set([]) usedgroups = {} unusedgroups = {} ret = {} # determine roles membership for role in self.roles: if self._check_member( role, groups, notroles, tmp, parentroles, usedgroups): roles.add(role) # determine standalone groups not matching any roles for b in groups: for g in groups[b]: if b not in usedgroups or g not in usedgroups[b]: if b not in unusedgroups: unusedgroups[b] = set([]) unusedgroups[b].add(g) ret['roles'] = roles ret['unusedgroups'] = unusedgroups return ret
[ "def", "get_roles", "(", "self", ",", "groups", ")", ":", "roles", "=", "set", "(", "[", "]", ")", "parentroles", "=", "set", "(", "[", "]", ")", "notroles", "=", "set", "(", "[", "]", ")", "tmp", "=", "set", "(", "[", "]", ")", "usedgroups", "=", "{", "}", "unusedgroups", "=", "{", "}", "ret", "=", "{", "}", "# determine roles membership", "for", "role", "in", "self", ".", "roles", ":", "if", "self", ".", "_check_member", "(", "role", ",", "groups", ",", "notroles", ",", "tmp", ",", "parentroles", ",", "usedgroups", ")", ":", "roles", ".", "add", "(", "role", ")", "# determine standalone groups not matching any roles", "for", "b", "in", "groups", ":", "for", "g", "in", "groups", "[", "b", "]", ":", "if", "b", "not", "in", "usedgroups", "or", "g", "not", "in", "usedgroups", "[", "b", "]", ":", "if", "b", "not", "in", "unusedgroups", ":", "unusedgroups", "[", "b", "]", "=", "set", "(", "[", "]", ")", "unusedgroups", "[", "b", "]", ".", "add", "(", "g", ")", "ret", "[", "'roles'", "]", "=", "roles", "ret", "[", "'unusedgroups'", "]", "=", "unusedgroups", "return", "ret" ]
get list of roles and list of standalone groups
[ "get", "list", "of", "roles", "and", "list", "of", "standalone", "groups" ]
python
train
RedHatInsights/insights-core
insights/parsers/ps.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/ps.py#L104-L123
def users(self, proc): """ Searches for all users running a given command. Returns: dict: each username as a key to a list of PIDs (as strings) that are running the given process. ``{}`` if neither ``USER`` nor ``UID`` is found or ``proc`` is not found. .. note:: 'proc' must match the entire command and arguments. """ ret = {} if self.first_column in ['USER', 'UID']: for row in self.data: if proc == row[self.command_name]: if row[self.first_column] not in ret: ret[row[self.first_column]] = [] ret[row[self.first_column]].append(row["PID"]) return ret
[ "def", "users", "(", "self", ",", "proc", ")", ":", "ret", "=", "{", "}", "if", "self", ".", "first_column", "in", "[", "'USER'", ",", "'UID'", "]", ":", "for", "row", "in", "self", ".", "data", ":", "if", "proc", "==", "row", "[", "self", ".", "command_name", "]", ":", "if", "row", "[", "self", ".", "first_column", "]", "not", "in", "ret", ":", "ret", "[", "row", "[", "self", ".", "first_column", "]", "]", "=", "[", "]", "ret", "[", "row", "[", "self", ".", "first_column", "]", "]", ".", "append", "(", "row", "[", "\"PID\"", "]", ")", "return", "ret" ]
Searches for all users running a given command. Returns: dict: each username as a key to a list of PIDs (as strings) that are running the given process. ``{}`` if neither ``USER`` nor ``UID`` is found or ``proc`` is not found. .. note:: 'proc' must match the entire command and arguments.
[ "Searches", "for", "all", "users", "running", "a", "given", "command", "." ]
python
train
merll/docker-map
dockermap/map/state/base.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/state/base.py#L107-L125
def inspect(self): """ Fetches information about the container from the client. """ policy = self.policy config_id = self.config_id if self.config_id.config_type == ItemType.VOLUME: if self.container_map.use_attached_parent_name: container_name = policy.aname(config_id.map_name, config_id.instance_name, config_id.config_name) else: container_name = policy.aname(config_id.map_name, config_id.instance_name) else: container_name = policy.cname(config_id.map_name, config_id.config_name, config_id.instance_name) self.container_name = container_name if container_name in policy.container_names[self.client_name]: self.detail = self.client.inspect_container(container_name) else: self.detail = NOT_FOUND
[ "def", "inspect", "(", "self", ")", ":", "policy", "=", "self", ".", "policy", "config_id", "=", "self", ".", "config_id", "if", "self", ".", "config_id", ".", "config_type", "==", "ItemType", ".", "VOLUME", ":", "if", "self", ".", "container_map", ".", "use_attached_parent_name", ":", "container_name", "=", "policy", ".", "aname", "(", "config_id", ".", "map_name", ",", "config_id", ".", "instance_name", ",", "config_id", ".", "config_name", ")", "else", ":", "container_name", "=", "policy", ".", "aname", "(", "config_id", ".", "map_name", ",", "config_id", ".", "instance_name", ")", "else", ":", "container_name", "=", "policy", ".", "cname", "(", "config_id", ".", "map_name", ",", "config_id", ".", "config_name", ",", "config_id", ".", "instance_name", ")", "self", ".", "container_name", "=", "container_name", "if", "container_name", "in", "policy", ".", "container_names", "[", "self", ".", "client_name", "]", ":", "self", ".", "detail", "=", "self", ".", "client", ".", "inspect_container", "(", "container_name", ")", "else", ":", "self", ".", "detail", "=", "NOT_FOUND" ]
Fetches information about the container from the client.
[ "Fetches", "information", "about", "the", "container", "from", "the", "client", "." ]
python
train
jobovy/galpy
galpy/df/streamdf.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/streamdf.py#L1256-L1359
def _determine_stream_spreadLB(self,simple=_USESIMPLE, ro=None,vo=None, R0=None,Zsun=None,vsun=None): """Determine the spread in the stream in observable coordinates""" if not hasattr(self,'_allErrCovs'): self._determine_stream_spread(simple=simple) if ro is None: ro= self._ro if vo is None: vo= self._vo if R0 is None: R0= self._R0 if Zsun is None: Zsun= self._Zsun if vsun is None: vsun= self._vsun allErrCovsLB= numpy.empty_like(self._allErrCovs) obs= [R0,0.,Zsun] obs.extend(vsun) obskwargs= {} obskwargs['ro']= ro obskwargs['vo']= vo obskwargs['obs']= obs self._ErrCovsLBScale= [180.,90., self._progenitor.dist(**obskwargs), numpy.fabs(self._progenitor.vlos(**obskwargs)), numpy.sqrt(self._progenitor.pmll(**obskwargs)**2. +self._progenitor.pmbb(**obskwargs)**2.), numpy.sqrt(self._progenitor.pmll(**obskwargs)**2. +self._progenitor.pmbb(**obskwargs)**2.)] allErrCovsEigvalLB= numpy.empty((len(self._thetasTrack),6)) allErrCovsEigvecLB= numpy.empty_like(self._allErrCovs) eigDir= numpy.array([numpy.array([1.,0.,0.,0.,0.,0.]) for ii in range(6)]) for ii in range(self._nTrackChunks): tjacXY= bovy_coords.galcenrect_to_XYZ_jac(*self._ObsTrackXY[ii]) tjacLB= bovy_coords.lbd_to_XYZ_jac(*self._ObsTrackLB[ii], degree=True) tjacLB[:3,:]/= ro tjacLB[3:,:]/= vo for jj in range(6): tjacLB[:,jj]*= self._ErrCovsLBScale[jj] tjac= numpy.dot(numpy.linalg.inv(tjacLB),tjacXY) allErrCovsLB[ii]=\ numpy.dot(tjac,numpy.dot(self._allErrCovsXY[ii],tjac.T)) #Eigen decomposition for interpolation teig= numpy.linalg.eig(allErrCovsLB[ii]) #Sort them to match them up later sortIndx= numpy.argsort(teig[0]) allErrCovsEigvalLB[ii]= teig[0][sortIndx] #Make sure the eigenvectors point in the same direction for jj in range(6): if numpy.sum(eigDir[jj]*teig[1][:,sortIndx[jj]]) < 0.: teig[1][:,sortIndx[jj]]*= -1. eigDir[jj]= teig[1][:,sortIndx[jj]] allErrCovsEigvecLB[ii]= teig[1][:,sortIndx] self._allErrCovsLBUnscaled= allErrCovsLB #Interpolate the allErrCovsLB covariance matrices along the interpolated track #Interpolate the eigenvalues interpAllErrCovsEigvalLB=\ [interpolate.InterpolatedUnivariateSpline(self._thetasTrack, allErrCovsEigvalLB[:,ii], k=3) for ii in range(6)] #Now build the interpolated allErrCovsXY using slerp interpolatedAllErrCovsLB= numpy.empty((len(self._interpolatedThetasTrack), 6,6)) interpolatedEigval=\ numpy.array([interpAllErrCovsEigvalLB[ii](self._interpolatedThetasTrack) for ii in range(6)]) #6,ninterp #Interpolate in chunks interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack), 6,6)) for ii in range(self._nTrackChunks-1): slerpOmegas=\ [numpy.arccos(numpy.sum(allErrCovsEigvecLB[ii,:,jj]*allErrCovsEigvecLB[ii+1,:,jj])) for jj in range(6)] slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\ (self._thetasTrack[ii+1]-self._thetasTrack[ii]) slerpIndx= (slerpts >= 0.)*(slerpts <= 1.) for jj in range(6): for kk in range(6): interpolatedEigvec[slerpIndx,kk,jj]=\ (numpy.sin((1-slerpts[slerpIndx])*slerpOmegas[jj])*allErrCovsEigvecLB[ii,kk,jj] +numpy.sin(slerpts[slerpIndx]*slerpOmegas[jj])*allErrCovsEigvecLB[ii+1,kk,jj])/numpy.sin(slerpOmegas[jj]) for ii in range(len(self._interpolatedThetasTrack)): interpolatedAllErrCovsLB[ii]=\ numpy.dot(interpolatedEigvec[ii], numpy.dot(numpy.diag(interpolatedEigval[:,ii]), interpolatedEigvec[ii].T)) self._interpolatedAllErrCovsLBUnscaled= interpolatedAllErrCovsLB #Also calculate the (l,b,..) -> (X,Y,..) Jacobian at all of the interpolated and not interpolated points trackLogDetJacLB= numpy.empty_like(self._thetasTrack) interpolatedTrackLogDetJacLB=\ numpy.empty_like(self._interpolatedThetasTrack) for ii in range(self._nTrackChunks): tjacLB= bovy_coords.lbd_to_XYZ_jac(*self._ObsTrackLB[ii], degree=True) trackLogDetJacLB[ii]= numpy.log(numpy.linalg.det(tjacLB)) self._trackLogDetJacLB= trackLogDetJacLB for ii in range(len(self._interpolatedThetasTrack)): tjacLB=\ bovy_coords.lbd_to_XYZ_jac(*self._interpolatedObsTrackLB[ii], degree=True) interpolatedTrackLogDetJacLB[ii]=\ numpy.log(numpy.linalg.det(tjacLB)) self._interpolatedTrackLogDetJacLB= interpolatedTrackLogDetJacLB return None
[ "def", "_determine_stream_spreadLB", "(", "self", ",", "simple", "=", "_USESIMPLE", ",", "ro", "=", "None", ",", "vo", "=", "None", ",", "R0", "=", "None", ",", "Zsun", "=", "None", ",", "vsun", "=", "None", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_allErrCovs'", ")", ":", "self", ".", "_determine_stream_spread", "(", "simple", "=", "simple", ")", "if", "ro", "is", "None", ":", "ro", "=", "self", ".", "_ro", "if", "vo", "is", "None", ":", "vo", "=", "self", ".", "_vo", "if", "R0", "is", "None", ":", "R0", "=", "self", ".", "_R0", "if", "Zsun", "is", "None", ":", "Zsun", "=", "self", ".", "_Zsun", "if", "vsun", "is", "None", ":", "vsun", "=", "self", ".", "_vsun", "allErrCovsLB", "=", "numpy", ".", "empty_like", "(", "self", ".", "_allErrCovs", ")", "obs", "=", "[", "R0", ",", "0.", ",", "Zsun", "]", "obs", ".", "extend", "(", "vsun", ")", "obskwargs", "=", "{", "}", "obskwargs", "[", "'ro'", "]", "=", "ro", "obskwargs", "[", "'vo'", "]", "=", "vo", "obskwargs", "[", "'obs'", "]", "=", "obs", "self", ".", "_ErrCovsLBScale", "=", "[", "180.", ",", "90.", ",", "self", ".", "_progenitor", ".", "dist", "(", "*", "*", "obskwargs", ")", ",", "numpy", ".", "fabs", "(", "self", ".", "_progenitor", ".", "vlos", "(", "*", "*", "obskwargs", ")", ")", ",", "numpy", ".", "sqrt", "(", "self", ".", "_progenitor", ".", "pmll", "(", "*", "*", "obskwargs", ")", "**", "2.", "+", "self", ".", "_progenitor", ".", "pmbb", "(", "*", "*", "obskwargs", ")", "**", "2.", ")", ",", "numpy", ".", "sqrt", "(", "self", ".", "_progenitor", ".", "pmll", "(", "*", "*", "obskwargs", ")", "**", "2.", "+", "self", ".", "_progenitor", ".", "pmbb", "(", "*", "*", "obskwargs", ")", "**", "2.", ")", "]", "allErrCovsEigvalLB", "=", "numpy", ".", "empty", "(", "(", "len", "(", "self", ".", "_thetasTrack", ")", ",", "6", ")", ")", "allErrCovsEigvecLB", "=", "numpy", ".", "empty_like", "(", "self", ".", "_allErrCovs", ")", "eigDir", "=", "numpy", ".", "array", "(", "[", "numpy", ".", "array", "(", "[", "1.", ",", "0.", ",", "0.", ",", "0.", ",", "0.", ",", "0.", "]", ")", "for", "ii", "in", "range", "(", "6", ")", "]", ")", "for", "ii", "in", "range", "(", "self", ".", "_nTrackChunks", ")", ":", "tjacXY", "=", "bovy_coords", ".", "galcenrect_to_XYZ_jac", "(", "*", "self", ".", "_ObsTrackXY", "[", "ii", "]", ")", "tjacLB", "=", "bovy_coords", ".", "lbd_to_XYZ_jac", "(", "*", "self", ".", "_ObsTrackLB", "[", "ii", "]", ",", "degree", "=", "True", ")", "tjacLB", "[", ":", "3", ",", ":", "]", "/=", "ro", "tjacLB", "[", "3", ":", ",", ":", "]", "/=", "vo", "for", "jj", "in", "range", "(", "6", ")", ":", "tjacLB", "[", ":", ",", "jj", "]", "*=", "self", ".", "_ErrCovsLBScale", "[", "jj", "]", "tjac", "=", "numpy", ".", "dot", "(", "numpy", ".", "linalg", ".", "inv", "(", "tjacLB", ")", ",", "tjacXY", ")", "allErrCovsLB", "[", "ii", "]", "=", "numpy", ".", "dot", "(", "tjac", ",", "numpy", ".", "dot", "(", "self", ".", "_allErrCovsXY", "[", "ii", "]", ",", "tjac", ".", "T", ")", ")", "#Eigen decomposition for interpolation", "teig", "=", "numpy", ".", "linalg", ".", "eig", "(", "allErrCovsLB", "[", "ii", "]", ")", "#Sort them to match them up later", "sortIndx", "=", "numpy", ".", "argsort", "(", "teig", "[", "0", "]", ")", "allErrCovsEigvalLB", "[", "ii", "]", "=", "teig", "[", "0", "]", "[", "sortIndx", "]", "#Make sure the eigenvectors point in the same direction", "for", "jj", "in", "range", "(", "6", ")", ":", "if", "numpy", ".", "sum", "(", "eigDir", "[", "jj", "]", "*", "teig", "[", "1", "]", "[", ":", ",", "sortIndx", "[", "jj", "]", "]", ")", "<", "0.", ":", "teig", "[", "1", "]", "[", ":", ",", "sortIndx", "[", "jj", "]", "]", "*=", "-", "1.", "eigDir", "[", "jj", "]", "=", "teig", "[", "1", "]", "[", ":", ",", "sortIndx", "[", "jj", "]", "]", "allErrCovsEigvecLB", "[", "ii", "]", "=", "teig", "[", "1", "]", "[", ":", ",", "sortIndx", "]", "self", ".", "_allErrCovsLBUnscaled", "=", "allErrCovsLB", "#Interpolate the allErrCovsLB covariance matrices along the interpolated track", "#Interpolate the eigenvalues", "interpAllErrCovsEigvalLB", "=", "[", "interpolate", ".", "InterpolatedUnivariateSpline", "(", "self", ".", "_thetasTrack", ",", "allErrCovsEigvalLB", "[", ":", ",", "ii", "]", ",", "k", "=", "3", ")", "for", "ii", "in", "range", "(", "6", ")", "]", "#Now build the interpolated allErrCovsXY using slerp", "interpolatedAllErrCovsLB", "=", "numpy", ".", "empty", "(", "(", "len", "(", "self", ".", "_interpolatedThetasTrack", ")", ",", "6", ",", "6", ")", ")", "interpolatedEigval", "=", "numpy", ".", "array", "(", "[", "interpAllErrCovsEigvalLB", "[", "ii", "]", "(", "self", ".", "_interpolatedThetasTrack", ")", "for", "ii", "in", "range", "(", "6", ")", "]", ")", "#6,ninterp", "#Interpolate in chunks", "interpolatedEigvec", "=", "numpy", ".", "empty", "(", "(", "len", "(", "self", ".", "_interpolatedThetasTrack", ")", ",", "6", ",", "6", ")", ")", "for", "ii", "in", "range", "(", "self", ".", "_nTrackChunks", "-", "1", ")", ":", "slerpOmegas", "=", "[", "numpy", ".", "arccos", "(", "numpy", ".", "sum", "(", "allErrCovsEigvecLB", "[", "ii", ",", ":", ",", "jj", "]", "*", "allErrCovsEigvecLB", "[", "ii", "+", "1", ",", ":", ",", "jj", "]", ")", ")", "for", "jj", "in", "range", "(", "6", ")", "]", "slerpts", "=", "(", "self", ".", "_interpolatedThetasTrack", "-", "self", ".", "_thetasTrack", "[", "ii", "]", ")", "/", "(", "self", ".", "_thetasTrack", "[", "ii", "+", "1", "]", "-", "self", ".", "_thetasTrack", "[", "ii", "]", ")", "slerpIndx", "=", "(", "slerpts", ">=", "0.", ")", "*", "(", "slerpts", "<=", "1.", ")", "for", "jj", "in", "range", "(", "6", ")", ":", "for", "kk", "in", "range", "(", "6", ")", ":", "interpolatedEigvec", "[", "slerpIndx", ",", "kk", ",", "jj", "]", "=", "(", "numpy", ".", "sin", "(", "(", "1", "-", "slerpts", "[", "slerpIndx", "]", ")", "*", "slerpOmegas", "[", "jj", "]", ")", "*", "allErrCovsEigvecLB", "[", "ii", ",", "kk", ",", "jj", "]", "+", "numpy", ".", "sin", "(", "slerpts", "[", "slerpIndx", "]", "*", "slerpOmegas", "[", "jj", "]", ")", "*", "allErrCovsEigvecLB", "[", "ii", "+", "1", ",", "kk", ",", "jj", "]", ")", "/", "numpy", ".", "sin", "(", "slerpOmegas", "[", "jj", "]", ")", "for", "ii", "in", "range", "(", "len", "(", "self", ".", "_interpolatedThetasTrack", ")", ")", ":", "interpolatedAllErrCovsLB", "[", "ii", "]", "=", "numpy", ".", "dot", "(", "interpolatedEigvec", "[", "ii", "]", ",", "numpy", ".", "dot", "(", "numpy", ".", "diag", "(", "interpolatedEigval", "[", ":", ",", "ii", "]", ")", ",", "interpolatedEigvec", "[", "ii", "]", ".", "T", ")", ")", "self", ".", "_interpolatedAllErrCovsLBUnscaled", "=", "interpolatedAllErrCovsLB", "#Also calculate the (l,b,..) -> (X,Y,..) Jacobian at all of the interpolated and not interpolated points", "trackLogDetJacLB", "=", "numpy", ".", "empty_like", "(", "self", ".", "_thetasTrack", ")", "interpolatedTrackLogDetJacLB", "=", "numpy", ".", "empty_like", "(", "self", ".", "_interpolatedThetasTrack", ")", "for", "ii", "in", "range", "(", "self", ".", "_nTrackChunks", ")", ":", "tjacLB", "=", "bovy_coords", ".", "lbd_to_XYZ_jac", "(", "*", "self", ".", "_ObsTrackLB", "[", "ii", "]", ",", "degree", "=", "True", ")", "trackLogDetJacLB", "[", "ii", "]", "=", "numpy", ".", "log", "(", "numpy", ".", "linalg", ".", "det", "(", "tjacLB", ")", ")", "self", ".", "_trackLogDetJacLB", "=", "trackLogDetJacLB", "for", "ii", "in", "range", "(", "len", "(", "self", ".", "_interpolatedThetasTrack", ")", ")", ":", "tjacLB", "=", "bovy_coords", ".", "lbd_to_XYZ_jac", "(", "*", "self", ".", "_interpolatedObsTrackLB", "[", "ii", "]", ",", "degree", "=", "True", ")", "interpolatedTrackLogDetJacLB", "[", "ii", "]", "=", "numpy", ".", "log", "(", "numpy", ".", "linalg", ".", "det", "(", "tjacLB", ")", ")", "self", ".", "_interpolatedTrackLogDetJacLB", "=", "interpolatedTrackLogDetJacLB", "return", "None" ]
Determine the spread in the stream in observable coordinates
[ "Determine", "the", "spread", "in", "the", "stream", "in", "observable", "coordinates" ]
python
train
gwastro/pycbc
pycbc/events/ranking.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/events/ranking.py#L90-L109
def get_newsnr_sgveto(trigs): """ Calculate newsnr re-weigthed by the sine-gaussian veto Parameters ---------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. 'chisq_dof', 'snr', 'sg_chisq' and 'chisq' are required keys Returns ------- numpy.ndarray Array of newsnr values """ dof = 2. * trigs['chisq_dof'][:] - 2. nsnr_sg = newsnr_sgveto(trigs['snr'][:], trigs['chisq'][:] / dof, trigs['sg_chisq'][:]) return numpy.array(nsnr_sg, ndmin=1, dtype=numpy.float32)
[ "def", "get_newsnr_sgveto", "(", "trigs", ")", ":", "dof", "=", "2.", "*", "trigs", "[", "'chisq_dof'", "]", "[", ":", "]", "-", "2.", "nsnr_sg", "=", "newsnr_sgveto", "(", "trigs", "[", "'snr'", "]", "[", ":", "]", ",", "trigs", "[", "'chisq'", "]", "[", ":", "]", "/", "dof", ",", "trigs", "[", "'sg_chisq'", "]", "[", ":", "]", ")", "return", "numpy", ".", "array", "(", "nsnr_sg", ",", "ndmin", "=", "1", ",", "dtype", "=", "numpy", ".", "float32", ")" ]
Calculate newsnr re-weigthed by the sine-gaussian veto Parameters ---------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. 'chisq_dof', 'snr', 'sg_chisq' and 'chisq' are required keys Returns ------- numpy.ndarray Array of newsnr values
[ "Calculate", "newsnr", "re", "-", "weigthed", "by", "the", "sine", "-", "gaussian", "veto" ]
python
train
OSSOS/MOP
src/ossos/core/ossos/storage.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/storage.py#L1020-L1066
def get_hdu(uri, cutout=None): """Get a at the given uri from VOSpace, possibly doing a cutout. If the cutout is flips the image then we also must flip the datasec keywords. Also, we must offset the datasec to reflect the cutout area being used. @param uri: The URI in VOSpace of the image to HDU to retrieve. @param cutout: A CADC data service CUTOUT paramter to be used when retrieving the observation. @return: fits.HDU """ try: # the filename is based on the Simple FITS images file. filename = os.path.basename(uri) if os.access(filename, os.F_OK) and cutout is None: logger.debug("File already on disk: {}".format(filename)) hdu_list = fits.open(filename, scale_back=True) hdu_list.verify('silentfix+ignore') else: logger.debug("Pulling: {}{} from VOSpace".format(uri, cutout)) fpt = tempfile.NamedTemporaryFile(suffix='.fits') cutout = cutout is not None and cutout or "" copy(uri+cutout, fpt.name) fpt.seek(0, 2) fpt.seek(0) logger.debug("Read from vospace completed. Building fits object.") hdu_list = fits.open(fpt, scale_back=False) hdu_list.verify('silentfix+ignore') logger.debug("Got image from vospace") try: hdu_list[0].header['DATASEC'] = reset_datasec(cutout, hdu_list[0].header['DATASEC'], hdu_list[0].header['NAXIS1'], hdu_list[0].header['NAXIS2']) except Exception as e: logging.debug("error converting datasec: {}".format(str(e))) for hdu in hdu_list: logging.debug("Adding converter to {}".format(hdu)) hdu.converter = CoordinateConverter(0, 0) try: hdu.wcs = WCS(hdu.header) except Exception as ex: logger.error("Failed trying to initialize the WCS: {}".format(ex)) except Exception as ex: raise ex return hdu_list
[ "def", "get_hdu", "(", "uri", ",", "cutout", "=", "None", ")", ":", "try", ":", "# the filename is based on the Simple FITS images file.", "filename", "=", "os", ".", "path", ".", "basename", "(", "uri", ")", "if", "os", ".", "access", "(", "filename", ",", "os", ".", "F_OK", ")", "and", "cutout", "is", "None", ":", "logger", ".", "debug", "(", "\"File already on disk: {}\"", ".", "format", "(", "filename", ")", ")", "hdu_list", "=", "fits", ".", "open", "(", "filename", ",", "scale_back", "=", "True", ")", "hdu_list", ".", "verify", "(", "'silentfix+ignore'", ")", "else", ":", "logger", ".", "debug", "(", "\"Pulling: {}{} from VOSpace\"", ".", "format", "(", "uri", ",", "cutout", ")", ")", "fpt", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.fits'", ")", "cutout", "=", "cutout", "is", "not", "None", "and", "cutout", "or", "\"\"", "copy", "(", "uri", "+", "cutout", ",", "fpt", ".", "name", ")", "fpt", ".", "seek", "(", "0", ",", "2", ")", "fpt", ".", "seek", "(", "0", ")", "logger", ".", "debug", "(", "\"Read from vospace completed. Building fits object.\"", ")", "hdu_list", "=", "fits", ".", "open", "(", "fpt", ",", "scale_back", "=", "False", ")", "hdu_list", ".", "verify", "(", "'silentfix+ignore'", ")", "logger", ".", "debug", "(", "\"Got image from vospace\"", ")", "try", ":", "hdu_list", "[", "0", "]", ".", "header", "[", "'DATASEC'", "]", "=", "reset_datasec", "(", "cutout", ",", "hdu_list", "[", "0", "]", ".", "header", "[", "'DATASEC'", "]", ",", "hdu_list", "[", "0", "]", ".", "header", "[", "'NAXIS1'", "]", ",", "hdu_list", "[", "0", "]", ".", "header", "[", "'NAXIS2'", "]", ")", "except", "Exception", "as", "e", ":", "logging", ".", "debug", "(", "\"error converting datasec: {}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "for", "hdu", "in", "hdu_list", ":", "logging", ".", "debug", "(", "\"Adding converter to {}\"", ".", "format", "(", "hdu", ")", ")", "hdu", ".", "converter", "=", "CoordinateConverter", "(", "0", ",", "0", ")", "try", ":", "hdu", ".", "wcs", "=", "WCS", "(", "hdu", ".", "header", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "error", "(", "\"Failed trying to initialize the WCS: {}\"", ".", "format", "(", "ex", ")", ")", "except", "Exception", "as", "ex", ":", "raise", "ex", "return", "hdu_list" ]
Get a at the given uri from VOSpace, possibly doing a cutout. If the cutout is flips the image then we also must flip the datasec keywords. Also, we must offset the datasec to reflect the cutout area being used. @param uri: The URI in VOSpace of the image to HDU to retrieve. @param cutout: A CADC data service CUTOUT paramter to be used when retrieving the observation. @return: fits.HDU
[ "Get", "a", "at", "the", "given", "uri", "from", "VOSpace", "possibly", "doing", "a", "cutout", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py#L422-L433
def threshold_monitor_hidden_threshold_monitor_Cpu_poll(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") Cpu = ET.SubElement(threshold_monitor, "Cpu") poll = ET.SubElement(Cpu, "poll") poll.text = kwargs.pop('poll') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "threshold_monitor_hidden_threshold_monitor_Cpu_poll", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "threshold_monitor_hidden", "=", "ET", ".", "SubElement", "(", "config", ",", "\"threshold-monitor-hidden\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-threshold-monitor\"", ")", "threshold_monitor", "=", "ET", ".", "SubElement", "(", "threshold_monitor_hidden", ",", "\"threshold-monitor\"", ")", "Cpu", "=", "ET", ".", "SubElement", "(", "threshold_monitor", ",", "\"Cpu\"", ")", "poll", "=", "ET", ".", "SubElement", "(", "Cpu", ",", "\"poll\"", ")", "poll", ".", "text", "=", "kwargs", ".", "pop", "(", "'poll'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
bastikr/boolean.py
boolean/boolean.py
https://github.com/bastikr/boolean.py/blob/e984df480afc60605e9501a0d3d54d667e8f7dbf/boolean/boolean.py#L1061-L1073
def cancel(self): """ Cancel itself and following NOTs as far as possible. Returns the simplified expression. """ expr = self while True: arg = expr.args[0] if not isinstance(arg, self.__class__): return expr expr = arg.args[0] if not isinstance(expr, self.__class__): return expr
[ "def", "cancel", "(", "self", ")", ":", "expr", "=", "self", "while", "True", ":", "arg", "=", "expr", ".", "args", "[", "0", "]", "if", "not", "isinstance", "(", "arg", ",", "self", ".", "__class__", ")", ":", "return", "expr", "expr", "=", "arg", ".", "args", "[", "0", "]", "if", "not", "isinstance", "(", "expr", ",", "self", ".", "__class__", ")", ":", "return", "expr" ]
Cancel itself and following NOTs as far as possible. Returns the simplified expression.
[ "Cancel", "itself", "and", "following", "NOTs", "as", "far", "as", "possible", ".", "Returns", "the", "simplified", "expression", "." ]
python
train
oasis-open/cti-taxii-client
taxii2client/__init__.py
https://github.com/oasis-open/cti-taxii-client/blob/b4c037fb61d8b8892af34423e2c67c81218d6f8e/taxii2client/__init__.py#L235-L239
def refresh(self, accept=MEDIA_TYPE_TAXII_V20): """Updates Status information""" response = self.__raw = self._conn.get(self.url, headers={"Accept": accept}) self._populate_fields(**response)
[ "def", "refresh", "(", "self", ",", "accept", "=", "MEDIA_TYPE_TAXII_V20", ")", ":", "response", "=", "self", ".", "__raw", "=", "self", ".", "_conn", ".", "get", "(", "self", ".", "url", ",", "headers", "=", "{", "\"Accept\"", ":", "accept", "}", ")", "self", ".", "_populate_fields", "(", "*", "*", "response", ")" ]
Updates Status information
[ "Updates", "Status", "information" ]
python
valid
airspeed-velocity/asv
asv/extern/asizeof.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/extern/asizeof.py#L511-L527
def _refs(obj, named, *ats, **kwds): '''Return specific attribute objects of an object. ''' if named: for a in ats: # cf. inspect.getmembers() if hasattr(obj, a): yield _NamedRef(a, getattr(obj, a)) if kwds: # kwds are _dir2() args for a, o in _dir2(obj, **kwds): yield _NamedRef(a, o) else: for a in ats: # cf. inspect.getmembers() if hasattr(obj, a): yield getattr(obj, a) if kwds: # kwds are _dir2() args for _, o in _dir2(obj, **kwds): yield o
[ "def", "_refs", "(", "obj", ",", "named", ",", "*", "ats", ",", "*", "*", "kwds", ")", ":", "if", "named", ":", "for", "a", "in", "ats", ":", "# cf. inspect.getmembers()", "if", "hasattr", "(", "obj", ",", "a", ")", ":", "yield", "_NamedRef", "(", "a", ",", "getattr", "(", "obj", ",", "a", ")", ")", "if", "kwds", ":", "# kwds are _dir2() args", "for", "a", ",", "o", "in", "_dir2", "(", "obj", ",", "*", "*", "kwds", ")", ":", "yield", "_NamedRef", "(", "a", ",", "o", ")", "else", ":", "for", "a", "in", "ats", ":", "# cf. inspect.getmembers()", "if", "hasattr", "(", "obj", ",", "a", ")", ":", "yield", "getattr", "(", "obj", ",", "a", ")", "if", "kwds", ":", "# kwds are _dir2() args", "for", "_", ",", "o", "in", "_dir2", "(", "obj", ",", "*", "*", "kwds", ")", ":", "yield", "o" ]
Return specific attribute objects of an object.
[ "Return", "specific", "attribute", "objects", "of", "an", "object", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/structural/titancna.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/titancna.py#L86-L95
def _run_select_solution(ploidy_outdirs, work_dir, data): """Select optimal """ out_file = os.path.join(work_dir, "optimalClusters.txt") if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: ploidy_inputs = " ".join(["--ploidyRun%s=%s" % (p, d) for p, d in ploidy_outdirs]) cmd = "titanCNA_selectSolution.R {ploidy_inputs} --outFile={tx_out_file}" do.run(cmd.format(**locals()), "TitanCNA: select optimal solution") return out_file
[ "def", "_run_select_solution", "(", "ploidy_outdirs", ",", "work_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"optimalClusters.txt\"", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", "ploidy_inputs", "=", "\" \"", ".", "join", "(", "[", "\"--ploidyRun%s=%s\"", "%", "(", "p", ",", "d", ")", "for", "p", ",", "d", "in", "ploidy_outdirs", "]", ")", "cmd", "=", "\"titanCNA_selectSolution.R {ploidy_inputs} --outFile={tx_out_file}\"", "do", ".", "run", "(", "cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"TitanCNA: select optimal solution\"", ")", "return", "out_file" ]
Select optimal
[ "Select", "optimal" ]
python
train
datahq/dataflows
setup.py
https://github.com/datahq/dataflows/blob/2c5e5e01e09c8b44e0ff36d85b3f2f4dcf4e8465/setup.py#L12-L17
def read(*paths): """Read a text file.""" basedir = os.path.dirname(__file__) fullpath = os.path.join(basedir, *paths) contents = io.open(fullpath, encoding='utf-8').read().strip() return contents
[ "def", "read", "(", "*", "paths", ")", ":", "basedir", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "fullpath", "=", "os", ".", "path", ".", "join", "(", "basedir", ",", "*", "paths", ")", "contents", "=", "io", ".", "open", "(", "fullpath", ",", "encoding", "=", "'utf-8'", ")", ".", "read", "(", ")", ".", "strip", "(", ")", "return", "contents" ]
Read a text file.
[ "Read", "a", "text", "file", "." ]
python
train
letuananh/chirptext
chirptext/dekomecab.py
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/dekomecab.py#L57-L70
def run_mecab_process(content, *args, **kwargs): ''' Use subprocess to run mecab ''' encoding = 'utf-8' if 'encoding' not in kwargs else kwargs['encoding'] mecab_loc = kwargs['mecab_loc'] if 'mecab_loc' in kwargs else None if mecab_loc is None: mecab_loc = MECAB_LOC proc_args = [mecab_loc] if args: proc_args.extend(args) output = subprocess.run(proc_args, input=content.encode(encoding), stdout=subprocess.PIPE) output_string = os.linesep.join(output.stdout.decode(encoding).splitlines()) return output_string
[ "def", "run_mecab_process", "(", "content", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "encoding", "=", "'utf-8'", "if", "'encoding'", "not", "in", "kwargs", "else", "kwargs", "[", "'encoding'", "]", "mecab_loc", "=", "kwargs", "[", "'mecab_loc'", "]", "if", "'mecab_loc'", "in", "kwargs", "else", "None", "if", "mecab_loc", "is", "None", ":", "mecab_loc", "=", "MECAB_LOC", "proc_args", "=", "[", "mecab_loc", "]", "if", "args", ":", "proc_args", ".", "extend", "(", "args", ")", "output", "=", "subprocess", ".", "run", "(", "proc_args", ",", "input", "=", "content", ".", "encode", "(", "encoding", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "output_string", "=", "os", ".", "linesep", ".", "join", "(", "output", ".", "stdout", ".", "decode", "(", "encoding", ")", ".", "splitlines", "(", ")", ")", "return", "output_string" ]
Use subprocess to run mecab
[ "Use", "subprocess", "to", "run", "mecab" ]
python
train
Dentosal/python-sc2
sc2/unit.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/unit.py#L204-L208
def health_percentage(self) -> Union[int, float]: """ Does not include shields """ if self._proto.health_max == 0: return 0 return self._proto.health / self._proto.health_max
[ "def", "health_percentage", "(", "self", ")", "->", "Union", "[", "int", ",", "float", "]", ":", "if", "self", ".", "_proto", ".", "health_max", "==", "0", ":", "return", "0", "return", "self", ".", "_proto", ".", "health", "/", "self", ".", "_proto", ".", "health_max" ]
Does not include shields
[ "Does", "not", "include", "shields" ]
python
train
saltstack/salt
salt/modules/nagios.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nagios.py#L131-L189
def retcode_pillar(pillar_name): ''' Run one or more nagios plugins from pillar data and get the result of cmd.retcode The pillar have to be in this format:: ------ webserver: Ping_google: - check_icmp: 8.8.8.8 - check_icmp: google.com Load: - check_load: -w 0.8 -c 1 APT: - check_apt ------- webserver is the role to check, the next keys are the group and the items the check with the arguments if needed You must to group different checks(one o more) and always it will return the highest value of all the checks CLI Example: .. code-block:: bash salt '*' nagios.retcode webserver ''' groups = __salt__['pillar.get'](pillar_name) check = {} data = {} for group in groups: commands = groups[group] for command in commands: # Check if is a dict to get the arguments # in command if not set the arguments to empty string if isinstance(command, dict): plugin = next(six.iterkeys(command)) args = command[plugin] else: plugin = command args = '' check.update(retcode(plugin, args, group)) current_value = 0 new_value = int(check[group]['status']) if group in data: current_value = int(data[group]['status']) if (new_value > current_value) or (group not in data): if group not in data: data[group] = {} data[group]['status'] = new_value return data
[ "def", "retcode_pillar", "(", "pillar_name", ")", ":", "groups", "=", "__salt__", "[", "'pillar.get'", "]", "(", "pillar_name", ")", "check", "=", "{", "}", "data", "=", "{", "}", "for", "group", "in", "groups", ":", "commands", "=", "groups", "[", "group", "]", "for", "command", "in", "commands", ":", "# Check if is a dict to get the arguments", "# in command if not set the arguments to empty string", "if", "isinstance", "(", "command", ",", "dict", ")", ":", "plugin", "=", "next", "(", "six", ".", "iterkeys", "(", "command", ")", ")", "args", "=", "command", "[", "plugin", "]", "else", ":", "plugin", "=", "command", "args", "=", "''", "check", ".", "update", "(", "retcode", "(", "plugin", ",", "args", ",", "group", ")", ")", "current_value", "=", "0", "new_value", "=", "int", "(", "check", "[", "group", "]", "[", "'status'", "]", ")", "if", "group", "in", "data", ":", "current_value", "=", "int", "(", "data", "[", "group", "]", "[", "'status'", "]", ")", "if", "(", "new_value", ">", "current_value", ")", "or", "(", "group", "not", "in", "data", ")", ":", "if", "group", "not", "in", "data", ":", "data", "[", "group", "]", "=", "{", "}", "data", "[", "group", "]", "[", "'status'", "]", "=", "new_value", "return", "data" ]
Run one or more nagios plugins from pillar data and get the result of cmd.retcode The pillar have to be in this format:: ------ webserver: Ping_google: - check_icmp: 8.8.8.8 - check_icmp: google.com Load: - check_load: -w 0.8 -c 1 APT: - check_apt ------- webserver is the role to check, the next keys are the group and the items the check with the arguments if needed You must to group different checks(one o more) and always it will return the highest value of all the checks CLI Example: .. code-block:: bash salt '*' nagios.retcode webserver
[ "Run", "one", "or", "more", "nagios", "plugins", "from", "pillar", "data", "and", "get", "the", "result", "of", "cmd", ".", "retcode", "The", "pillar", "have", "to", "be", "in", "this", "format", "::" ]
python
train
miyakogi/wdom
wdom/web_node.py
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/web_node.py#L168-L184
def removeClass(self, *classes: str) -> None: """[Not Standard] Remove classes from this node.""" _remove_cl = [] for class_ in classes: if class_ not in self.classList: if class_ in self.get_class_list(): logger.warning( 'tried to remove class-level class: ' '{}'.format(class_) ) else: logger.warning( 'tried to remove non-existing class: {}'.format(class_) ) else: _remove_cl.append(class_) self.classList.remove(*_remove_cl)
[ "def", "removeClass", "(", "self", ",", "*", "classes", ":", "str", ")", "->", "None", ":", "_remove_cl", "=", "[", "]", "for", "class_", "in", "classes", ":", "if", "class_", "not", "in", "self", ".", "classList", ":", "if", "class_", "in", "self", ".", "get_class_list", "(", ")", ":", "logger", ".", "warning", "(", "'tried to remove class-level class: '", "'{}'", ".", "format", "(", "class_", ")", ")", "else", ":", "logger", ".", "warning", "(", "'tried to remove non-existing class: {}'", ".", "format", "(", "class_", ")", ")", "else", ":", "_remove_cl", ".", "append", "(", "class_", ")", "self", ".", "classList", ".", "remove", "(", "*", "_remove_cl", ")" ]
[Not Standard] Remove classes from this node.
[ "[", "Not", "Standard", "]", "Remove", "classes", "from", "this", "node", "." ]
python
train
messagebird/python-rest-api
messagebird/client.py
https://github.com/messagebird/python-rest-api/blob/fb7864f178135f92d09af803bee93270e99f3963/messagebird/client.py#L126-L129
def lookup(self, phonenumber, params=None): """Do a new lookup.""" if params is None: params = {} return Lookup().load(self.request('lookup/' + str(phonenumber), 'GET', params))
[ "def", "lookup", "(", "self", ",", "phonenumber", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "return", "Lookup", "(", ")", ".", "load", "(", "self", ".", "request", "(", "'lookup/'", "+", "str", "(", "phonenumber", ")", ",", "'GET'", ",", "params", ")", ")" ]
Do a new lookup.
[ "Do", "a", "new", "lookup", "." ]
python
train
prajwalkr/track-it
trackit/trackers.py
https://github.com/prajwalkr/track-it/blob/01a91dba8e7bc169976e0b13faacf7dd7330237b/trackit/trackers.py#L221-L234
def wait_till_page_load(self,driver,max_wait_time): ''' This method pauses execution until the page is loaded fully, including data delayed by JavaScript ''' sleepCount = max_wait_time # wait for a fixed max_wait_time only # A page that's fully loaded has the word 'Current Status' while self.tracking_no not in driver.page_source and 'Invalid Input' not in driver.page_source: sleep(1) sleepCount -= 1 if sleepCount is 0: raise Exception('Request timed out!')
[ "def", "wait_till_page_load", "(", "self", ",", "driver", ",", "max_wait_time", ")", ":", "sleepCount", "=", "max_wait_time", "# wait for a fixed max_wait_time only ", "# A page that's fully loaded has the word 'Current Status'", "while", "self", ".", "tracking_no", "not", "in", "driver", ".", "page_source", "and", "'Invalid Input'", "not", "in", "driver", ".", "page_source", ":", "sleep", "(", "1", ")", "sleepCount", "-=", "1", "if", "sleepCount", "is", "0", ":", "raise", "Exception", "(", "'Request timed out!'", ")" ]
This method pauses execution until the page is loaded fully, including data delayed by JavaScript
[ "This", "method", "pauses", "execution", "until", "the", "page", "is", "loaded", "fully", "including", "data", "delayed", "by", "JavaScript" ]
python
train
jay-johnson/spylunking
spylunking/wait_for_exit.py
https://github.com/jay-johnson/spylunking/blob/95cc86776f04ec5935cf04e291cf18798345d6cb/spylunking/wait_for_exit.py#L6-L70
def wait_for_exit( log, debug=False): """wait_for_exit Sleep to allow the thread to pick up final messages before exiting and stopping the Splunk HTTP publisher. You can decrease this delay (in seconds) by reducing the splunk_sleep_interval or by exporting the env var: export SPLUNK_SLEEP_INTERVAL=0.5 If you set the timer to 0 then it will be a blocking HTTP POST sent to Splunk for each log message. This creates a blocking logger in your application that will wait until each log's HTTP POST was received before continuing. Note: Reducing this Splunk sleep timer could result in losing messages that were stuck in the queue when the parent process exits. The multiprocessing Splunk Publisher was built to do this, but will not work in certain frameworks like Celery as it requires access to spawn daemon processes to prevent this 'message loss' case during exiting. Applications using this library should ensure there's no critical log messages stuck in a queue when stopping a long-running process. :param log: created logger :param debug: bool to debug with prints """ debug = SPLUNK_DEBUG for i in log.root.handlers: handler_class_name = i.__class__.__name__.lower() if debug: print(( ' - wait_for_exit handler={}').format( handler_class_name)) if ('splunkpublisher' == handler_class_name or 'mpsplunkpublisher' == handler_class_name): if hasattr(i, 'sleep_interval'): total_sleep = i.sleep_interval + 2.0 if os.getenv( 'PUBLISHER_EXIT_DELAY', False): total_sleep = float(os.getenv( 'PUBLISHER_EXIT_DELAY', total_sleep)) if debug: print(( ' - wait_for_exit ' 'handler={} wait={}s').format( handler_class_name, total_sleep)) time.sleep(total_sleep) if debug: print(( 'done waiting for exit')) return else: print(( ' - wait_for_exit handler={} has no' 'sleep_interval').format( handler_class_name))
[ "def", "wait_for_exit", "(", "log", ",", "debug", "=", "False", ")", ":", "debug", "=", "SPLUNK_DEBUG", "for", "i", "in", "log", ".", "root", ".", "handlers", ":", "handler_class_name", "=", "i", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "if", "debug", ":", "print", "(", "(", "' - wait_for_exit handler={}'", ")", ".", "format", "(", "handler_class_name", ")", ")", "if", "(", "'splunkpublisher'", "==", "handler_class_name", "or", "'mpsplunkpublisher'", "==", "handler_class_name", ")", ":", "if", "hasattr", "(", "i", ",", "'sleep_interval'", ")", ":", "total_sleep", "=", "i", ".", "sleep_interval", "+", "2.0", "if", "os", ".", "getenv", "(", "'PUBLISHER_EXIT_DELAY'", ",", "False", ")", ":", "total_sleep", "=", "float", "(", "os", ".", "getenv", "(", "'PUBLISHER_EXIT_DELAY'", ",", "total_sleep", ")", ")", "if", "debug", ":", "print", "(", "(", "' - wait_for_exit '", "'handler={} wait={}s'", ")", ".", "format", "(", "handler_class_name", ",", "total_sleep", ")", ")", "time", ".", "sleep", "(", "total_sleep", ")", "if", "debug", ":", "print", "(", "(", "'done waiting for exit'", ")", ")", "return", "else", ":", "print", "(", "(", "' - wait_for_exit handler={} has no'", "'sleep_interval'", ")", ".", "format", "(", "handler_class_name", ")", ")" ]
wait_for_exit Sleep to allow the thread to pick up final messages before exiting and stopping the Splunk HTTP publisher. You can decrease this delay (in seconds) by reducing the splunk_sleep_interval or by exporting the env var: export SPLUNK_SLEEP_INTERVAL=0.5 If you set the timer to 0 then it will be a blocking HTTP POST sent to Splunk for each log message. This creates a blocking logger in your application that will wait until each log's HTTP POST was received before continuing. Note: Reducing this Splunk sleep timer could result in losing messages that were stuck in the queue when the parent process exits. The multiprocessing Splunk Publisher was built to do this, but will not work in certain frameworks like Celery as it requires access to spawn daemon processes to prevent this 'message loss' case during exiting. Applications using this library should ensure there's no critical log messages stuck in a queue when stopping a long-running process. :param log: created logger :param debug: bool to debug with prints
[ "wait_for_exit" ]
python
train
carpyncho/feets
feets/datasets/ogle3.py
https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/datasets/ogle3.py#L155-L246
def fetch_OGLE3(ogle3_id, data_home=None, metadata=None, download_if_missing=True): """Retrieve a lighte curve from OGLE-3 database Parameters ---------- ogle3_id : str The id of the source (see: ``load_OGLE3_catalog()`` for available sources. data_home : optional, default: None Specify another download and cache folder for the datasets. By default all feets data is stored in '~/feets' subfolders. metadata : bool | None If it's True, the row of the dataframe from ``load_OGLE3_catalog()`` with the metadata of the source are added to the result. download_if_missing : optional, True by default If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. Returns ------- A Data object. Examples -------- .. code-block:: pycon >>> ds = fetch_OGLE3("OGLE-BLG-LPV-232377") >>> ds Data(id='OGLE-BLG-LPV-232377', ds_name='OGLE-III', bands=('I', 'V')) >>> ds.bands ('I', 'V') >>> ds.data.I LightCurve(time[100], magnitude[100], error[100]) >>> ds.data.I.magnitude array([ 13.816, 13.826, 13.818, 13.812, 13.8 , 13.827, 13.797, 13.82 , 13.804, 13.783, 13.823, 13.8 , 13.84 , 13.817, 13.802, 13.824, 13.822, 13.81 , 13.844, 13.848, 13.813, 13.836, 13.83 , 13.83 , 13.837, 13.811, 13.814, 13.82 , 13.826, 13.822, 13.821, 13.817, 13.813, 13.809, 13.817, 13.836, 13.804, 13.801, 13.813, 13.823, 13.818, 13.831, 13.833, 13.814, 13.814, 13.812, 13.822, 13.814, 13.818, 13.817, 13.8 , 13.804, 13.799, 13.809, 13.815, 13.846, 13.796, 13.791, 13.804, 13.853, 13.839, 13.816, 13.825, 13.81 , 13.8 , 13.807, 13.819, 13.829, 13.844, 13.84 , 13.842, 13.818, 13.801, 13.804, 13.814, 13.821, 13.821, 13.822, 13.82 , 13.803, 13.813, 13.826, 13.855, 13.865, 13.854, 13.828, 13.809, 13.828, 13.833, 13.829, 13.816, 13.82 , 13.827, 13.834, 13.811, 13.817, 13.808, 13.834, 13.814, 13.829]) """ # retrieve the data dir for ogle store_path = _get_OGLE3_data_home(data_home) # the data dir for this lightcurve file_path = os.path.join(store_path, "{}.tar".format(ogle3_id)) # members of the two bands of ogle3 members = {"I": "./{}.I.dat".format(ogle3_id), "V": "./{}.V.dat".format(ogle3_id)} # the url of the lightcurve if download_if_missing: url = URL.format(ogle3_id) base.fetch(url, file_path) bands = [] data = {} with tarfile.TarFile(file_path) as tfp: members_names = tfp.getnames() for band_name, member_name in members.items(): if member_name in members_names: member = tfp.getmember(member_name) src = tfp.extractfile(member) lc = _check_dim(np.loadtxt(src)) data[band_name] = {"time": lc[:, 0], "magnitude": lc[:, 1], "error": lc[:, 2]} bands.append(band_name) if metadata: cat = load_OGLE3_catalog() metadata = cat[cat.ID == ogle3_id].iloc[0].to_dict() del cat return Data( id=ogle3_id, metadata=metadata, ds_name="OGLE-III", description=DESCR, bands=bands, data=data)
[ "def", "fetch_OGLE3", "(", "ogle3_id", ",", "data_home", "=", "None", ",", "metadata", "=", "None", ",", "download_if_missing", "=", "True", ")", ":", "# retrieve the data dir for ogle", "store_path", "=", "_get_OGLE3_data_home", "(", "data_home", ")", "# the data dir for this lightcurve", "file_path", "=", "os", ".", "path", ".", "join", "(", "store_path", ",", "\"{}.tar\"", ".", "format", "(", "ogle3_id", ")", ")", "# members of the two bands of ogle3", "members", "=", "{", "\"I\"", ":", "\"./{}.I.dat\"", ".", "format", "(", "ogle3_id", ")", ",", "\"V\"", ":", "\"./{}.V.dat\"", ".", "format", "(", "ogle3_id", ")", "}", "# the url of the lightcurve", "if", "download_if_missing", ":", "url", "=", "URL", ".", "format", "(", "ogle3_id", ")", "base", ".", "fetch", "(", "url", ",", "file_path", ")", "bands", "=", "[", "]", "data", "=", "{", "}", "with", "tarfile", ".", "TarFile", "(", "file_path", ")", "as", "tfp", ":", "members_names", "=", "tfp", ".", "getnames", "(", ")", "for", "band_name", ",", "member_name", "in", "members", ".", "items", "(", ")", ":", "if", "member_name", "in", "members_names", ":", "member", "=", "tfp", ".", "getmember", "(", "member_name", ")", "src", "=", "tfp", ".", "extractfile", "(", "member", ")", "lc", "=", "_check_dim", "(", "np", ".", "loadtxt", "(", "src", ")", ")", "data", "[", "band_name", "]", "=", "{", "\"time\"", ":", "lc", "[", ":", ",", "0", "]", ",", "\"magnitude\"", ":", "lc", "[", ":", ",", "1", "]", ",", "\"error\"", ":", "lc", "[", ":", ",", "2", "]", "}", "bands", ".", "append", "(", "band_name", ")", "if", "metadata", ":", "cat", "=", "load_OGLE3_catalog", "(", ")", "metadata", "=", "cat", "[", "cat", ".", "ID", "==", "ogle3_id", "]", ".", "iloc", "[", "0", "]", ".", "to_dict", "(", ")", "del", "cat", "return", "Data", "(", "id", "=", "ogle3_id", ",", "metadata", "=", "metadata", ",", "ds_name", "=", "\"OGLE-III\"", ",", "description", "=", "DESCR", ",", "bands", "=", "bands", ",", "data", "=", "data", ")" ]
Retrieve a lighte curve from OGLE-3 database Parameters ---------- ogle3_id : str The id of the source (see: ``load_OGLE3_catalog()`` for available sources. data_home : optional, default: None Specify another download and cache folder for the datasets. By default all feets data is stored in '~/feets' subfolders. metadata : bool | None If it's True, the row of the dataframe from ``load_OGLE3_catalog()`` with the metadata of the source are added to the result. download_if_missing : optional, True by default If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. Returns ------- A Data object. Examples -------- .. code-block:: pycon >>> ds = fetch_OGLE3("OGLE-BLG-LPV-232377") >>> ds Data(id='OGLE-BLG-LPV-232377', ds_name='OGLE-III', bands=('I', 'V')) >>> ds.bands ('I', 'V') >>> ds.data.I LightCurve(time[100], magnitude[100], error[100]) >>> ds.data.I.magnitude array([ 13.816, 13.826, 13.818, 13.812, 13.8 , 13.827, 13.797, 13.82 , 13.804, 13.783, 13.823, 13.8 , 13.84 , 13.817, 13.802, 13.824, 13.822, 13.81 , 13.844, 13.848, 13.813, 13.836, 13.83 , 13.83 , 13.837, 13.811, 13.814, 13.82 , 13.826, 13.822, 13.821, 13.817, 13.813, 13.809, 13.817, 13.836, 13.804, 13.801, 13.813, 13.823, 13.818, 13.831, 13.833, 13.814, 13.814, 13.812, 13.822, 13.814, 13.818, 13.817, 13.8 , 13.804, 13.799, 13.809, 13.815, 13.846, 13.796, 13.791, 13.804, 13.853, 13.839, 13.816, 13.825, 13.81 , 13.8 , 13.807, 13.819, 13.829, 13.844, 13.84 , 13.842, 13.818, 13.801, 13.804, 13.814, 13.821, 13.821, 13.822, 13.82 , 13.803, 13.813, 13.826, 13.855, 13.865, 13.854, 13.828, 13.809, 13.828, 13.833, 13.829, 13.816, 13.82 , 13.827, 13.834, 13.811, 13.817, 13.808, 13.834, 13.814, 13.829])
[ "Retrieve", "a", "lighte", "curve", "from", "OGLE", "-", "3", "database" ]
python
train
yyuu/botornado
boto/s3/bucket.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/bucket.py#L208-L245
def list(self, prefix='', delimiter='', marker='', headers=None): """ List key objects within a bucket. This returns an instance of an BucketListResultSet that automatically handles all of the result paging, etc. from S3. You just need to keep iterating until there are no more results. Called with no arguments, this will return an iterator object across all keys within the bucket. The Key objects returned by the iterator are obtained by parsing the results of a GET on the bucket, also known as the List Objects request. The XML returned by this request contains only a subset of the information about each key. Certain metadata fields such as Content-Type and user metadata are not available in the XML. Therefore, if you want these additional metadata fields you will have to do a HEAD request on the Key in the bucket. :type prefix: string :param prefix: allows you to limit the listing to a particular prefix. For example, if you call the method with prefix='/foo/' then the iterator will only cycle through the keys that begin with the string '/foo/'. :type delimiter: string :param delimiter: can be used in conjunction with the prefix to allow you to organize and browse your keys hierarchically. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/ for more details. :type marker: string :param marker: The "marker" of where you are in the result set :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` :return: an instance of a BucketListResultSet that handles paging, etc """ return BucketListResultSet(self, prefix, delimiter, marker, headers)
[ "def", "list", "(", "self", ",", "prefix", "=", "''", ",", "delimiter", "=", "''", ",", "marker", "=", "''", ",", "headers", "=", "None", ")", ":", "return", "BucketListResultSet", "(", "self", ",", "prefix", ",", "delimiter", ",", "marker", ",", "headers", ")" ]
List key objects within a bucket. This returns an instance of an BucketListResultSet that automatically handles all of the result paging, etc. from S3. You just need to keep iterating until there are no more results. Called with no arguments, this will return an iterator object across all keys within the bucket. The Key objects returned by the iterator are obtained by parsing the results of a GET on the bucket, also known as the List Objects request. The XML returned by this request contains only a subset of the information about each key. Certain metadata fields such as Content-Type and user metadata are not available in the XML. Therefore, if you want these additional metadata fields you will have to do a HEAD request on the Key in the bucket. :type prefix: string :param prefix: allows you to limit the listing to a particular prefix. For example, if you call the method with prefix='/foo/' then the iterator will only cycle through the keys that begin with the string '/foo/'. :type delimiter: string :param delimiter: can be used in conjunction with the prefix to allow you to organize and browse your keys hierarchically. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/ for more details. :type marker: string :param marker: The "marker" of where you are in the result set :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` :return: an instance of a BucketListResultSet that handles paging, etc
[ "List", "key", "objects", "within", "a", "bucket", ".", "This", "returns", "an", "instance", "of", "an", "BucketListResultSet", "that", "automatically", "handles", "all", "of", "the", "result", "paging", "etc", ".", "from", "S3", ".", "You", "just", "need", "to", "keep", "iterating", "until", "there", "are", "no", "more", "results", ".", "Called", "with", "no", "arguments", "this", "will", "return", "an", "iterator", "object", "across", "all", "keys", "within", "the", "bucket", "." ]
python
train
dls-controls/pymalcolm
malcolm/core/notifier.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/notifier.py#L136-L170
def notify_changes(self, changes): # type: (List[List]) -> CallbackResponses """Set our data and notify anyone listening Args: changes (list): [[path, optional data]] where path is the path to what has changed, and data is the unserialized object that has changed Returns: list: [(callback, Response)] that need to be called """ ret = [] child_changes = {} for change in changes: # Add any changes that our children need to know about self._add_child_change(change, child_changes) # If we have update subscribers, serialize at this level if self.update_requests: serialized = serialize_object(self.data) for request in self.update_requests: ret.append(request.update_response(serialized)) # If we have delta subscribers, serialize the changes if self.delta_requests: for change in changes: change[-1] = serialize_object(change[-1]) for request in self.delta_requests: ret.append(request.delta_response(changes)) # Now notify our children for name, child_changes in child_changes.items(): ret += self.children[name].notify_changes(child_changes) return ret
[ "def", "notify_changes", "(", "self", ",", "changes", ")", ":", "# type: (List[List]) -> CallbackResponses", "ret", "=", "[", "]", "child_changes", "=", "{", "}", "for", "change", "in", "changes", ":", "# Add any changes that our children need to know about", "self", ".", "_add_child_change", "(", "change", ",", "child_changes", ")", "# If we have update subscribers, serialize at this level", "if", "self", ".", "update_requests", ":", "serialized", "=", "serialize_object", "(", "self", ".", "data", ")", "for", "request", "in", "self", ".", "update_requests", ":", "ret", ".", "append", "(", "request", ".", "update_response", "(", "serialized", ")", ")", "# If we have delta subscribers, serialize the changes", "if", "self", ".", "delta_requests", ":", "for", "change", "in", "changes", ":", "change", "[", "-", "1", "]", "=", "serialize_object", "(", "change", "[", "-", "1", "]", ")", "for", "request", "in", "self", ".", "delta_requests", ":", "ret", ".", "append", "(", "request", ".", "delta_response", "(", "changes", ")", ")", "# Now notify our children", "for", "name", ",", "child_changes", "in", "child_changes", ".", "items", "(", ")", ":", "ret", "+=", "self", ".", "children", "[", "name", "]", ".", "notify_changes", "(", "child_changes", ")", "return", "ret" ]
Set our data and notify anyone listening Args: changes (list): [[path, optional data]] where path is the path to what has changed, and data is the unserialized object that has changed Returns: list: [(callback, Response)] that need to be called
[ "Set", "our", "data", "and", "notify", "anyone", "listening" ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/reftrackwidget.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L324-L333
def get_taskfileinfo_selection(self, ): """Return a taskfileinfo that the user chose from the available options :returns: the chosen taskfileinfo :rtype: :class:`jukeboxcore.filesys.TaskFileInfo` :raises: None """ sel = OptionSelector(self.reftrack) sel.exec_() return sel.selected
[ "def", "get_taskfileinfo_selection", "(", "self", ",", ")", ":", "sel", "=", "OptionSelector", "(", "self", ".", "reftrack", ")", "sel", ".", "exec_", "(", ")", "return", "sel", ".", "selected" ]
Return a taskfileinfo that the user chose from the available options :returns: the chosen taskfileinfo :rtype: :class:`jukeboxcore.filesys.TaskFileInfo` :raises: None
[ "Return", "a", "taskfileinfo", "that", "the", "user", "chose", "from", "the", "available", "options" ]
python
train
alphatwirl/alphatwirl
alphatwirl/concurrently/CommunicationChannel.py
https://github.com/alphatwirl/alphatwirl/blob/5138eeba6cd8a334ba52d6c2c022b33c61e3ba38/alphatwirl/concurrently/CommunicationChannel.py#L114-L120
def begin(self): """begin """ if self.isopen: return self.dropbox.open() self.isopen = True
[ "def", "begin", "(", "self", ")", ":", "if", "self", ".", "isopen", ":", "return", "self", ".", "dropbox", ".", "open", "(", ")", "self", ".", "isopen", "=", "True" ]
begin
[ "begin" ]
python
valid
limodou/uliweb
uliweb/lib/werkzeug/serving.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/serving.py#L334-L341
def generate_adhoc_ssl_context(): """Generates an adhoc SSL context for the development server.""" from OpenSSL import SSL cert, pkey = generate_adhoc_ssl_pair() ctx = SSL.Context(SSL.SSLv23_METHOD) ctx.use_privatekey(pkey) ctx.use_certificate(cert) return ctx
[ "def", "generate_adhoc_ssl_context", "(", ")", ":", "from", "OpenSSL", "import", "SSL", "cert", ",", "pkey", "=", "generate_adhoc_ssl_pair", "(", ")", "ctx", "=", "SSL", ".", "Context", "(", "SSL", ".", "SSLv23_METHOD", ")", "ctx", ".", "use_privatekey", "(", "pkey", ")", "ctx", ".", "use_certificate", "(", "cert", ")", "return", "ctx" ]
Generates an adhoc SSL context for the development server.
[ "Generates", "an", "adhoc", "SSL", "context", "for", "the", "development", "server", "." ]
python
train
gamechanger/mongothon
mongothon/model.py
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L273-L276
def class_method(cls, f): """Decorator which dynamically binds class methods to the model for later use.""" setattr(cls, f.__name__, classmethod(f)) return f
[ "def", "class_method", "(", "cls", ",", "f", ")", ":", "setattr", "(", "cls", ",", "f", ".", "__name__", ",", "classmethod", "(", "f", ")", ")", "return", "f" ]
Decorator which dynamically binds class methods to the model for later use.
[ "Decorator", "which", "dynamically", "binds", "class", "methods", "to", "the", "model", "for", "later", "use", "." ]
python
train
mushkevych/scheduler
synergy/scheduler/state_machine_freerun.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/state_machine_freerun.py#L142-L147
def _process_terminal_state(self, freerun_entry, uow, flow_request=None): """ method that takes care of processing unit_of_work records in STATE_PROCESSED, STATE_NOOP, STATE_INVALID, STATE_CANCELED states""" msg = 'UOW for {0} found in state {1}.'.format(freerun_entry.schedulable_name, uow.state) self._log_message(INFO, freerun_entry, msg) self.insert_and_publish_uow(freerun_entry, flow_request, reset_uow=True)
[ "def", "_process_terminal_state", "(", "self", ",", "freerun_entry", ",", "uow", ",", "flow_request", "=", "None", ")", ":", "msg", "=", "'UOW for {0} found in state {1}.'", ".", "format", "(", "freerun_entry", ".", "schedulable_name", ",", "uow", ".", "state", ")", "self", ".", "_log_message", "(", "INFO", ",", "freerun_entry", ",", "msg", ")", "self", ".", "insert_and_publish_uow", "(", "freerun_entry", ",", "flow_request", ",", "reset_uow", "=", "True", ")" ]
method that takes care of processing unit_of_work records in STATE_PROCESSED, STATE_NOOP, STATE_INVALID, STATE_CANCELED states
[ "method", "that", "takes", "care", "of", "processing", "unit_of_work", "records", "in", "STATE_PROCESSED", "STATE_NOOP", "STATE_INVALID", "STATE_CANCELED", "states" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4027-L4039
def get_stp_mst_detail_output_cist_migrate_time(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") cist = ET.SubElement(output, "cist") migrate_time = ET.SubElement(cist, "migrate-time") migrate_time.text = kwargs.pop('migrate_time') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_mst_detail_output_cist_migrate_time", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_mst_detail", "=", "ET", ".", "Element", "(", "\"get_stp_mst_detail\"", ")", "config", "=", "get_stp_mst_detail", "output", "=", "ET", ".", "SubElement", "(", "get_stp_mst_detail", ",", "\"output\"", ")", "cist", "=", "ET", ".", "SubElement", "(", "output", ",", "\"cist\"", ")", "migrate_time", "=", "ET", ".", "SubElement", "(", "cist", ",", "\"migrate-time\"", ")", "migrate_time", ".", "text", "=", "kwargs", ".", "pop", "(", "'migrate_time'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
fp12/achallonge
challonge/participant.py
https://github.com/fp12/achallonge/blob/25780b3c48b66400a50ff9f884e4287afd4c89e4/challonge/participant.py#L135-L148
async def undo_check_in(self): """ Undo the check in for this participant |methcoro| Warning: |unstable| Raises: APIException """ res = await self.connection('POST', 'tournaments/{}/participants/{}/undo_check_in'.format(self._tournament_id, self._id)) self._refresh_from_json(res)
[ "async", "def", "undo_check_in", "(", "self", ")", ":", "res", "=", "await", "self", ".", "connection", "(", "'POST'", ",", "'tournaments/{}/participants/{}/undo_check_in'", ".", "format", "(", "self", ".", "_tournament_id", ",", "self", ".", "_id", ")", ")", "self", ".", "_refresh_from_json", "(", "res", ")" ]
Undo the check in for this participant |methcoro| Warning: |unstable| Raises: APIException
[ "Undo", "the", "check", "in", "for", "this", "participant" ]
python
train
jpscaletti/pyceo
pyceo/parser.py
https://github.com/jpscaletti/pyceo/blob/7f37eaf8e557d25f8e54634176139e0aad84b8df/pyceo/parser.py#L62-L69
def is_key(sarg): """Check if `sarg` is a key (eg. -foo, --foo) or a negative number (eg. -33). """ if not sarg.startswith("-"): return False if sarg.startswith("--"): return True return not sarg.lstrip("-").isnumeric()
[ "def", "is_key", "(", "sarg", ")", ":", "if", "not", "sarg", ".", "startswith", "(", "\"-\"", ")", ":", "return", "False", "if", "sarg", ".", "startswith", "(", "\"--\"", ")", ":", "return", "True", "return", "not", "sarg", ".", "lstrip", "(", "\"-\"", ")", ".", "isnumeric", "(", ")" ]
Check if `sarg` is a key (eg. -foo, --foo) or a negative number (eg. -33).
[ "Check", "if", "sarg", "is", "a", "key", "(", "eg", ".", "-", "foo", "--", "foo", ")", "or", "a", "negative", "number", "(", "eg", ".", "-", "33", ")", "." ]
python
train
bububa/pyTOP
pyTOP/fenxiao.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/fenxiao.py#L52-L60
def get(self, session, discount_id=None, ext_fields=None): '''taobao.fenxiao.discounts.get 获取折扣信息 查询折扣信息''' request = TOPRequest('taobao.fenxiao.discounts.get') if discount_id!=None: request['discount_id'] = discount_id if ext_fields!=None: request['ext_fields'] = ext_fields self.create(self.execute(request, session)) return self.discounts
[ "def", "get", "(", "self", ",", "session", ",", "discount_id", "=", "None", ",", "ext_fields", "=", "None", ")", ":", "request", "=", "TOPRequest", "(", "'taobao.fenxiao.discounts.get'", ")", "if", "discount_id", "!=", "None", ":", "request", "[", "'discount_id'", "]", "=", "discount_id", "if", "ext_fields", "!=", "None", ":", "request", "[", "'ext_fields'", "]", "=", "ext_fields", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ",", "session", ")", ")", "return", "self", ".", "discounts" ]
taobao.fenxiao.discounts.get 获取折扣信息 查询折扣信息
[ "taobao", ".", "fenxiao", ".", "discounts", ".", "get", "获取折扣信息", "查询折扣信息" ]
python
train
OnroerendErfgoed/oe_utils
oe_utils/range_parser.py
https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/range_parser.py#L101-L143
def set_link_headers(self, request, total_count): """ Sets Link headers on the response. When the Range header is present in the request no Link headers will be added. 4 links will be added: first, prev, next, last. If the current page is already the first page, the prev link will not be present. If the current page is already the last page, the next link will not be present. :param request: A request object :param total_count: The total amount of items available before paging """ response = request.response if request.headers.get('Range'): # Don't set the Link headers when custom ranges were used. return settings = request.registry.settings page_param = settings.get('oe.paging.page.queryparam', 'pagina') per_page_param = settings.get('oe.paging.per_page.queryparam', 'per_pagina') url = request.path_url try: queryparams = request.params.mixed() except AttributeError: queryparams = request.params page_size = self.get_page_size() current_page = self.start // page_size + 1 queryparams[per_page_param] = page_size links = { 'first': 1, 'last': int(math.ceil(float(total_count) / page_size)) } if current_page != links['first']: links['prev'] = current_page - 1 if current_page != links['last']: links['next'] = current_page + 1 response.headers['Link'] = self._make_link_headers(links, page_param, queryparams, url)
[ "def", "set_link_headers", "(", "self", ",", "request", ",", "total_count", ")", ":", "response", "=", "request", ".", "response", "if", "request", ".", "headers", ".", "get", "(", "'Range'", ")", ":", "# Don't set the Link headers when custom ranges were used.", "return", "settings", "=", "request", ".", "registry", ".", "settings", "page_param", "=", "settings", ".", "get", "(", "'oe.paging.page.queryparam'", ",", "'pagina'", ")", "per_page_param", "=", "settings", ".", "get", "(", "'oe.paging.per_page.queryparam'", ",", "'per_pagina'", ")", "url", "=", "request", ".", "path_url", "try", ":", "queryparams", "=", "request", ".", "params", ".", "mixed", "(", ")", "except", "AttributeError", ":", "queryparams", "=", "request", ".", "params", "page_size", "=", "self", ".", "get_page_size", "(", ")", "current_page", "=", "self", ".", "start", "//", "page_size", "+", "1", "queryparams", "[", "per_page_param", "]", "=", "page_size", "links", "=", "{", "'first'", ":", "1", ",", "'last'", ":", "int", "(", "math", ".", "ceil", "(", "float", "(", "total_count", ")", "/", "page_size", ")", ")", "}", "if", "current_page", "!=", "links", "[", "'first'", "]", ":", "links", "[", "'prev'", "]", "=", "current_page", "-", "1", "if", "current_page", "!=", "links", "[", "'last'", "]", ":", "links", "[", "'next'", "]", "=", "current_page", "+", "1", "response", ".", "headers", "[", "'Link'", "]", "=", "self", ".", "_make_link_headers", "(", "links", ",", "page_param", ",", "queryparams", ",", "url", ")" ]
Sets Link headers on the response. When the Range header is present in the request no Link headers will be added. 4 links will be added: first, prev, next, last. If the current page is already the first page, the prev link will not be present. If the current page is already the last page, the next link will not be present. :param request: A request object :param total_count: The total amount of items available before paging
[ "Sets", "Link", "headers", "on", "the", "response", "." ]
python
train
crate/crash
src/crate/crash/tabulate.py
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/tabulate.py#L770-L1055
def tabulate(tabular_data, headers=(), tablefmt="simple", floatfmt="g", numalign="decimal", stralign="left", missingval=""): """Format a fixed width table for pretty printing. >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]])) --- --------- 1 2.34 -56 8.999 2 10001 --- --------- The first required argument (`tabular_data`) can be a list-of-lists (or another iterable of iterables), a list of named tuples, a dictionary of iterables, an iterable of dictionaries, a two-dimensional NumPy array, NumPy record array, or a Pandas' dataframe. Table headers ------------- To print nice column headers, supply the second argument (`headers`): - `headers` can be an explicit list of column headers - if `headers="firstrow"`, then the first row of data is used - if `headers="keys"`, then dictionary keys or column indices are used Otherwise a headerless table is produced. If the number of headers is less than the number of columns, they are supposed to be names of the last columns. This is consistent with the plain-text format of R and Pandas' dataframes. >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]], ... headers="firstrow")) sex age ----- ----- ----- Alice F 24 Bob M 19 Column alignment ---------------- `tabulate` tries to detect column types automatically, and aligns the values properly. By default it aligns decimal points of the numbers (or flushes integer numbers to the right), and flushes everything else to the left. Possible column alignments (`numalign`, `stralign`) are: "right", "center", "left", "decimal" (only for `numalign`), and None (to disable alignment). Table formats ------------- `floatfmt` is a format specification used for columns which contain numeric data with a decimal point. `None` values are replaced with a `missingval` string: >>> print(tabulate([["spam", 1, None], ... ["eggs", 42, 3.14], ... ["other", None, 2.7]], missingval="?")) ----- -- ---- spam 1 ? eggs 42 3.14 other ? 2.7 ----- -- ---- Various plain-text table formats (`tablefmt`) are supported: 'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki', 'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of currently supported formats. "plain" format doesn't use any pseudographics to draw tables, it separates columns with a double space: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "plain")) strings numbers spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain")) spam 41.9999 eggs 451 "simple" format is like Pandoc simple_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "simple")) strings numbers --------- --------- spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple")) ---- -------- spam 41.9999 eggs 451 ---- -------- "grid" is similar to tables produced by Emacs table.el package or Pandoc grid_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "grid")) +-----------+-----------+ | strings | numbers | +===========+===========+ | spam | 41.9999 | +-----------+-----------+ | eggs | 451 | +-----------+-----------+ >>> print(tabulate([["this\\nis\\na multiline\\ntext", "41.9999", "foo\\nbar"], ["NULL", "451.0", ""]], ... ["text", "numbers", "other"], "grid")) +-------------+----------+-------+ | text | numbers | other | +=============+==========+=======+ | this | 41.9999 | foo | | is | | bar | | a multiline | | | | text | | | +-------------+----------+-------+ | NULL | 451 | | +-------------+----------+-------+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid")) +------+----------+ | spam | 41.9999 | +------+----------+ | eggs | 451 | +------+----------+ "fancy_grid" draws a grid using box-drawing characters: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "fancy_grid")) ╒═══════════╤═══════════╕ │ strings │ numbers │ ╞═══════════╪═══════════╡ │ spam │ 41.9999 │ ├───────────┼───────────┤ │ eggs │ 451 │ ╘═══════════╧═══════════╛ "pipe" is like tables in PHP Markdown Extra extension or Pandoc pipe_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "pipe")) | strings | numbers | |:----------|----------:| | spam | 41.9999 | | eggs | 451 | >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe")) |:-----|---------:| | spam | 41.9999 | | eggs | 451 | "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They are slightly different from "pipe" format by not using colons to define column alignment, and using a "+" sign to indicate line intersections: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "orgtbl")) | strings | numbers | |-----------+-----------| | spam | 41.9999 | | eggs | 451 | >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl")) | spam | 41.9999 | | eggs | 451 | "rst" is like a simple table format from reStructuredText; please note that reStructuredText accepts also "grid" tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "rst")) ========= ========= strings numbers ========= ========= spam 41.9999 eggs 451 ========= ========= >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst")) ==== ======== spam 41.9999 eggs 451 ==== ======== "mediawiki" produces a table markup used in Wikipedia and on other MediaWiki-based sites: >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], ... headers="firstrow", tablefmt="mediawiki")) {| class="wikitable" style="text-align: left;" |+ <!-- caption --> |- ! strings !! align="right"| numbers |- | spam || align="right"| 41.9999 |- | eggs || align="right"| 451 |} "html" produces HTML markup: >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], ... headers="firstrow", tablefmt="html")) <table> <tr><th>strings </th><th style="text-align: right;"> numbers</th></tr> <tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr> <tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr> </table> "latex" produces a tabular environment of LaTeX document markup: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex")) \\begin{tabular}{lr} \\hline spam & 41.9999 \\\\ eggs & 451 \\\\ \\hline \\end{tabular} "latex_booktabs" produces a tabular environment of LaTeX document markup using the booktabs.sty package: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs")) \\begin{tabular}{lr} \\toprule spam & 41.9999 \\\\ eggs & 451 \\\\ \\bottomrule \end{tabular} """ if tabular_data is None: tabular_data = [] list_of_lists, headers = _normalize_tabular_data(tabular_data, headers) # optimization: look for ANSI control codes once, # enable smart width functions only if a control code is found plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \ ['\t'.join(map(_text_type, row)) for row in list_of_lists]) has_invisible = re.search(_invisible_codes, plain_text) enable_widechars = wcwidth is not None and WIDE_CHARS_MODE is_multiline = _is_multiline(plain_text) width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline) # format rows and columns, convert numeric values to strings cols = list(zip(*list_of_lists)) coltypes = list(map(_column_type, cols)) cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c] for c, ct in zip(cols, coltypes)] # align columns aligns = [numalign if ct in [int, float] else stralign for ct in coltypes] minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols) cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline) for c, a, minw in zip(cols, aligns, minwidths)] if headers: # align headers and add headers t_cols = cols or [['']] * len(headers) t_aligns = aligns or [stralign] * len(headers) minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)] headers = [_align_header(h, a, minw, width_fn(h), enable_widechars, is_multiline) for h, a, minw in zip(headers, t_aligns, minwidths)] rows = list(zip(*cols)) else: minwidths = [width_fn(c[0]) for c in cols] rows = list(zip(*cols)) if not isinstance(tablefmt, TableFormat): tablefmt = _table_formats.get(tablefmt, _table_formats["simple"]) return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
[ "def", "tabulate", "(", "tabular_data", ",", "headers", "=", "(", ")", ",", "tablefmt", "=", "\"simple\"", ",", "floatfmt", "=", "\"g\"", ",", "numalign", "=", "\"decimal\"", ",", "stralign", "=", "\"left\"", ",", "missingval", "=", "\"\"", ")", ":", "if", "tabular_data", "is", "None", ":", "tabular_data", "=", "[", "]", "list_of_lists", ",", "headers", "=", "_normalize_tabular_data", "(", "tabular_data", ",", "headers", ")", "# optimization: look for ANSI control codes once,", "# enable smart width functions only if a control code is found", "plain_text", "=", "'\\n'", ".", "join", "(", "[", "'\\t'", ".", "join", "(", "map", "(", "_text_type", ",", "headers", ")", ")", "]", "+", "[", "'\\t'", ".", "join", "(", "map", "(", "_text_type", ",", "row", ")", ")", "for", "row", "in", "list_of_lists", "]", ")", "has_invisible", "=", "re", ".", "search", "(", "_invisible_codes", ",", "plain_text", ")", "enable_widechars", "=", "wcwidth", "is", "not", "None", "and", "WIDE_CHARS_MODE", "is_multiline", "=", "_is_multiline", "(", "plain_text", ")", "width_fn", "=", "_choose_width_fn", "(", "has_invisible", ",", "enable_widechars", ",", "is_multiline", ")", "# format rows and columns, convert numeric values to strings", "cols", "=", "list", "(", "zip", "(", "*", "list_of_lists", ")", ")", "coltypes", "=", "list", "(", "map", "(", "_column_type", ",", "cols", ")", ")", "cols", "=", "[", "[", "_format", "(", "v", ",", "ct", ",", "floatfmt", ",", "missingval", ",", "has_invisible", ")", "for", "v", "in", "c", "]", "for", "c", ",", "ct", "in", "zip", "(", "cols", ",", "coltypes", ")", "]", "# align columns", "aligns", "=", "[", "numalign", "if", "ct", "in", "[", "int", ",", "float", "]", "else", "stralign", "for", "ct", "in", "coltypes", "]", "minwidths", "=", "[", "width_fn", "(", "h", ")", "+", "MIN_PADDING", "for", "h", "in", "headers", "]", "if", "headers", "else", "[", "0", "]", "*", "len", "(", "cols", ")", "cols", "=", "[", "_align_column", "(", "c", ",", "a", ",", "minw", ",", "has_invisible", ",", "enable_widechars", ",", "is_multiline", ")", "for", "c", ",", "a", ",", "minw", "in", "zip", "(", "cols", ",", "aligns", ",", "minwidths", ")", "]", "if", "headers", ":", "# align headers and add headers", "t_cols", "=", "cols", "or", "[", "[", "''", "]", "]", "*", "len", "(", "headers", ")", "t_aligns", "=", "aligns", "or", "[", "stralign", "]", "*", "len", "(", "headers", ")", "minwidths", "=", "[", "max", "(", "minw", ",", "width_fn", "(", "c", "[", "0", "]", ")", ")", "for", "minw", ",", "c", "in", "zip", "(", "minwidths", ",", "t_cols", ")", "]", "headers", "=", "[", "_align_header", "(", "h", ",", "a", ",", "minw", ",", "width_fn", "(", "h", ")", ",", "enable_widechars", ",", "is_multiline", ")", "for", "h", ",", "a", ",", "minw", "in", "zip", "(", "headers", ",", "t_aligns", ",", "minwidths", ")", "]", "rows", "=", "list", "(", "zip", "(", "*", "cols", ")", ")", "else", ":", "minwidths", "=", "[", "width_fn", "(", "c", "[", "0", "]", ")", "for", "c", "in", "cols", "]", "rows", "=", "list", "(", "zip", "(", "*", "cols", ")", ")", "if", "not", "isinstance", "(", "tablefmt", ",", "TableFormat", ")", ":", "tablefmt", "=", "_table_formats", ".", "get", "(", "tablefmt", ",", "_table_formats", "[", "\"simple\"", "]", ")", "return", "_format_table", "(", "tablefmt", ",", "headers", ",", "rows", ",", "minwidths", ",", "aligns", ",", "is_multiline", ")" ]
Format a fixed width table for pretty printing. >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]])) --- --------- 1 2.34 -56 8.999 2 10001 --- --------- The first required argument (`tabular_data`) can be a list-of-lists (or another iterable of iterables), a list of named tuples, a dictionary of iterables, an iterable of dictionaries, a two-dimensional NumPy array, NumPy record array, or a Pandas' dataframe. Table headers ------------- To print nice column headers, supply the second argument (`headers`): - `headers` can be an explicit list of column headers - if `headers="firstrow"`, then the first row of data is used - if `headers="keys"`, then dictionary keys or column indices are used Otherwise a headerless table is produced. If the number of headers is less than the number of columns, they are supposed to be names of the last columns. This is consistent with the plain-text format of R and Pandas' dataframes. >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]], ... headers="firstrow")) sex age ----- ----- ----- Alice F 24 Bob M 19 Column alignment ---------------- `tabulate` tries to detect column types automatically, and aligns the values properly. By default it aligns decimal points of the numbers (or flushes integer numbers to the right), and flushes everything else to the left. Possible column alignments (`numalign`, `stralign`) are: "right", "center", "left", "decimal" (only for `numalign`), and None (to disable alignment). Table formats ------------- `floatfmt` is a format specification used for columns which contain numeric data with a decimal point. `None` values are replaced with a `missingval` string: >>> print(tabulate([["spam", 1, None], ... ["eggs", 42, 3.14], ... ["other", None, 2.7]], missingval="?")) ----- -- ---- spam 1 ? eggs 42 3.14 other ? 2.7 ----- -- ---- Various plain-text table formats (`tablefmt`) are supported: 'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki', 'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of currently supported formats. "plain" format doesn't use any pseudographics to draw tables, it separates columns with a double space: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "plain")) strings numbers spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain")) spam 41.9999 eggs 451 "simple" format is like Pandoc simple_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "simple")) strings numbers --------- --------- spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple")) ---- -------- spam 41.9999 eggs 451 ---- -------- "grid" is similar to tables produced by Emacs table.el package or Pandoc grid_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "grid")) +-----------+-----------+ | strings | numbers | +===========+===========+ | spam | 41.9999 | +-----------+-----------+ | eggs | 451 | +-----------+-----------+ >>> print(tabulate([["this\\nis\\na multiline\\ntext", "41.9999", "foo\\nbar"], ["NULL", "451.0", ""]], ... ["text", "numbers", "other"], "grid")) +-------------+----------+-------+ | text | numbers | other | +=============+==========+=======+ | this | 41.9999 | foo | | is | | bar | | a multiline | | | | text | | | +-------------+----------+-------+ | NULL | 451 | | +-------------+----------+-------+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid")) +------+----------+ | spam | 41.9999 | +------+----------+ | eggs | 451 | +------+----------+ "fancy_grid" draws a grid using box-drawing characters: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "fancy_grid")) ╒═══════════╤═══════════╕ │ strings │ numbers │ ╞═══════════╪═══════════╡ │ spam │ 41.9999 │ ├───────────┼───────────┤ │ eggs │ 451 │ ╘═══════════╧═══════════╛ "pipe" is like tables in PHP Markdown Extra extension or Pandoc pipe_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "pipe")) | strings | numbers | |:----------|----------:| | spam | 41.9999 | | eggs | 451 | >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe")) |:-----|---------:| | spam | 41.9999 | | eggs | 451 | "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They are slightly different from "pipe" format by not using colons to define column alignment, and using a "+" sign to indicate line intersections: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "orgtbl")) | strings | numbers | |-----------+-----------| | spam | 41.9999 | | eggs | 451 | >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl")) | spam | 41.9999 | | eggs | 451 | "rst" is like a simple table format from reStructuredText; please note that reStructuredText accepts also "grid" tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "rst")) ========= ========= strings numbers ========= ========= spam 41.9999 eggs 451 ========= ========= >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst")) ==== ======== spam 41.9999 eggs 451 ==== ======== "mediawiki" produces a table markup used in Wikipedia and on other MediaWiki-based sites: >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], ... headers="firstrow", tablefmt="mediawiki")) {| class="wikitable" style="text-align: left;" |+ <!-- caption --> |- ! strings !! align="right"| numbers |- | spam || align="right"| 41.9999 |- | eggs || align="right"| 451 |} "html" produces HTML markup: >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], ... headers="firstrow", tablefmt="html")) <table> <tr><th>strings </th><th style="text-align: right;"> numbers</th></tr> <tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr> <tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr> </table> "latex" produces a tabular environment of LaTeX document markup: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex")) \\begin{tabular}{lr} \\hline spam & 41.9999 \\\\ eggs & 451 \\\\ \\hline \\end{tabular} "latex_booktabs" produces a tabular environment of LaTeX document markup using the booktabs.sty package: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs")) \\begin{tabular}{lr} \\toprule spam & 41.9999 \\\\ eggs & 451 \\\\ \\bottomrule \end{tabular}
[ "Format", "a", "fixed", "width", "table", "for", "pretty", "printing", "." ]
python
train
bblfsh/client-python
bblfsh/compat.py
https://github.com/bblfsh/client-python/blob/815835d191d5e385973f3c685849cc3b46aa20a5/bblfsh/compat.py#L270-L275
def filter_nodes(n: Node, query: str) -> CompatNodeIterator: """ Utility function. Same as filter() but will only filter for nodes (i. e. it will exclude scalars and positions). """ return CompatNodeIterator(filter(n, query)._nodeit, only_nodes=True)
[ "def", "filter_nodes", "(", "n", ":", "Node", ",", "query", ":", "str", ")", "->", "CompatNodeIterator", ":", "return", "CompatNodeIterator", "(", "filter", "(", "n", ",", "query", ")", ".", "_nodeit", ",", "only_nodes", "=", "True", ")" ]
Utility function. Same as filter() but will only filter for nodes (i. e. it will exclude scalars and positions).
[ "Utility", "function", ".", "Same", "as", "filter", "()", "but", "will", "only", "filter", "for", "nodes", "(", "i", ".", "e", ".", "it", "will", "exclude", "scalars", "and", "positions", ")", "." ]
python
train
data61/clkhash
clkhash/randomnames.py
https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/randomnames.py#L106-L129
def generate_random_person(self, n): # type: (int) -> Iterable[Tuple[str, str, str, str]] """ Generator that yields details on a person with plausible name, sex and age. :yields: Generated data for one person tuple - (id: int, name: str('First Last'), birthdate: str('DD/MM/YYYY'), sex: str('M' | 'F') ) """ assert self.all_male_first_names is not None assert self.all_female_first_names is not None assert self.all_last_names is not None for i in range(n): sex = 'M' if random.random() > 0.5 else 'F' dob = random_date(self.earliest_birthday, self.latest_birthday).strftime("%Y/%m/%d") first_name = random.choice(self.all_male_first_names) if sex == 'M' else random.choice( self.all_female_first_names) last_name = random.choice(self.all_last_names) yield ( str(i), first_name + ' ' + last_name, dob, sex )
[ "def", "generate_random_person", "(", "self", ",", "n", ")", ":", "# type: (int) -> Iterable[Tuple[str, str, str, str]]", "assert", "self", ".", "all_male_first_names", "is", "not", "None", "assert", "self", ".", "all_female_first_names", "is", "not", "None", "assert", "self", ".", "all_last_names", "is", "not", "None", "for", "i", "in", "range", "(", "n", ")", ":", "sex", "=", "'M'", "if", "random", ".", "random", "(", ")", ">", "0.5", "else", "'F'", "dob", "=", "random_date", "(", "self", ".", "earliest_birthday", ",", "self", ".", "latest_birthday", ")", ".", "strftime", "(", "\"%Y/%m/%d\"", ")", "first_name", "=", "random", ".", "choice", "(", "self", ".", "all_male_first_names", ")", "if", "sex", "==", "'M'", "else", "random", ".", "choice", "(", "self", ".", "all_female_first_names", ")", "last_name", "=", "random", ".", "choice", "(", "self", ".", "all_last_names", ")", "yield", "(", "str", "(", "i", ")", ",", "first_name", "+", "' '", "+", "last_name", ",", "dob", ",", "sex", ")" ]
Generator that yields details on a person with plausible name, sex and age. :yields: Generated data for one person tuple - (id: int, name: str('First Last'), birthdate: str('DD/MM/YYYY'), sex: str('M' | 'F') )
[ "Generator", "that", "yields", "details", "on", "a", "person", "with", "plausible", "name", "sex", "and", "age", "." ]
python
train
HDI-Project/mit-d3m
mit_d3m/config.py
https://github.com/HDI-Project/mit-d3m/blob/3ab44eb5db8de8e28a29ca4b695a7a4becf45275/mit_d3m/config.py#L10-L60
def build_config(dataset, datasets_dir, phase, problem=None, output_dir='data/output'): """ root@d3m-example-pod:/# cat /input/185_baseball/test_config.json { "problem_schema": "/input/TEST/problem_TEST/problemDoc.json", "problem_root": "/input/TEST/problem_TEST", "dataset_schema": "/input/TEST/dataset_TEST/datasetDoc.json", "test_data_root": "/input/TEST/dataset_TEST", "results_root": "/output/predictions", "executables_root": "/output/executables", "temp_storage_root": "/output/supporting_files" } root@d3m-example-pod:/# cat /input/185_baseball/search_config.json { "problem_schema": "/input/TRAIN/problem_TRAIN/problemDoc.json", "problem_root": "/input/TRAIN/problem_TRAIN", "dataset_schema": "/input/TRAIN/dataset_TRAIN/datasetDoc.json", "training_data_root": "/input/TRAIN/dataset_TRAIN", "pipeline_logs_root": "/output/pipelines", "executables_root": "/output/executables", "user_problems_root": "/output/user_problems", "temp_storage_root": "/output/supporting_files" } """ if problem: full_phase = phase + '_' + problem else: full_phase = phase root_dir = os.path.join(datasets_dir, dataset, full_phase) problem_root = os.path.join(root_dir, 'problem_' + phase) data_root = os.path.join(root_dir, 'dataset_' + phase) config = { 'problem_root': problem_root, 'problem_schema': os.path.join(problem_root, 'problemDoc.json'), 'dataset_schema': os.path.join(data_root, 'datasetDoc.json'), 'executables_root': os.path.join(output_dir, 'executables'), 'temp_storage_root': os.path.join(output_dir, 'supporting_files'), } if phase == 'TRAIN': config['training_data_root'] = data_root config['pipeline_logs_root'] = os.path.join(output_dir, 'pipelines') else: config['test_data_root'] = data_root config['results_root'] = os.path.join(output_dir, 'predictions') return config
[ "def", "build_config", "(", "dataset", ",", "datasets_dir", ",", "phase", ",", "problem", "=", "None", ",", "output_dir", "=", "'data/output'", ")", ":", "if", "problem", ":", "full_phase", "=", "phase", "+", "'_'", "+", "problem", "else", ":", "full_phase", "=", "phase", "root_dir", "=", "os", ".", "path", ".", "join", "(", "datasets_dir", ",", "dataset", ",", "full_phase", ")", "problem_root", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "'problem_'", "+", "phase", ")", "data_root", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "'dataset_'", "+", "phase", ")", "config", "=", "{", "'problem_root'", ":", "problem_root", ",", "'problem_schema'", ":", "os", ".", "path", ".", "join", "(", "problem_root", ",", "'problemDoc.json'", ")", ",", "'dataset_schema'", ":", "os", ".", "path", ".", "join", "(", "data_root", ",", "'datasetDoc.json'", ")", ",", "'executables_root'", ":", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'executables'", ")", ",", "'temp_storage_root'", ":", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'supporting_files'", ")", ",", "}", "if", "phase", "==", "'TRAIN'", ":", "config", "[", "'training_data_root'", "]", "=", "data_root", "config", "[", "'pipeline_logs_root'", "]", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'pipelines'", ")", "else", ":", "config", "[", "'test_data_root'", "]", "=", "data_root", "config", "[", "'results_root'", "]", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'predictions'", ")", "return", "config" ]
root@d3m-example-pod:/# cat /input/185_baseball/test_config.json { "problem_schema": "/input/TEST/problem_TEST/problemDoc.json", "problem_root": "/input/TEST/problem_TEST", "dataset_schema": "/input/TEST/dataset_TEST/datasetDoc.json", "test_data_root": "/input/TEST/dataset_TEST", "results_root": "/output/predictions", "executables_root": "/output/executables", "temp_storage_root": "/output/supporting_files" } root@d3m-example-pod:/# cat /input/185_baseball/search_config.json { "problem_schema": "/input/TRAIN/problem_TRAIN/problemDoc.json", "problem_root": "/input/TRAIN/problem_TRAIN", "dataset_schema": "/input/TRAIN/dataset_TRAIN/datasetDoc.json", "training_data_root": "/input/TRAIN/dataset_TRAIN", "pipeline_logs_root": "/output/pipelines", "executables_root": "/output/executables", "user_problems_root": "/output/user_problems", "temp_storage_root": "/output/supporting_files" }
[ "root@d3m", "-", "example", "-", "pod", ":", "/", "#", "cat", "/", "input", "/", "185_baseball", "/", "test_config", ".", "json", "{", "problem_schema", ":", "/", "input", "/", "TEST", "/", "problem_TEST", "/", "problemDoc", ".", "json", "problem_root", ":", "/", "input", "/", "TEST", "/", "problem_TEST", "dataset_schema", ":", "/", "input", "/", "TEST", "/", "dataset_TEST", "/", "datasetDoc", ".", "json", "test_data_root", ":", "/", "input", "/", "TEST", "/", "dataset_TEST", "results_root", ":", "/", "output", "/", "predictions", "executables_root", ":", "/", "output", "/", "executables", "temp_storage_root", ":", "/", "output", "/", "supporting_files", "}", "root@d3m", "-", "example", "-", "pod", ":", "/", "#", "cat", "/", "input", "/", "185_baseball", "/", "search_config", ".", "json", "{", "problem_schema", ":", "/", "input", "/", "TRAIN", "/", "problem_TRAIN", "/", "problemDoc", ".", "json", "problem_root", ":", "/", "input", "/", "TRAIN", "/", "problem_TRAIN", "dataset_schema", ":", "/", "input", "/", "TRAIN", "/", "dataset_TRAIN", "/", "datasetDoc", ".", "json", "training_data_root", ":", "/", "input", "/", "TRAIN", "/", "dataset_TRAIN", "pipeline_logs_root", ":", "/", "output", "/", "pipelines", "executables_root", ":", "/", "output", "/", "executables", "user_problems_root", ":", "/", "output", "/", "user_problems", "temp_storage_root", ":", "/", "output", "/", "supporting_files", "}" ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L1321-L1327
def system_global_search(input_params={}, always_retry=True, **kwargs): """ Invokes the /system/globalSearch API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/globalSearch """ return DXHTTPRequest('/system/globalSearch', input_params, always_retry=always_retry, **kwargs)
[ "def", "system_global_search", "(", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/system/globalSearch'", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /system/globalSearch API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/globalSearch
[ "Invokes", "the", "/", "system", "/", "globalSearch", "API", "method", "." ]
python
train
mjirik/imtools
imtools/sample_data.py
https://github.com/mjirik/imtools/blob/eb29fa59df0e0684d8334eb3bc5ef36ea46d1d3a/imtools/sample_data.py#L227-L253
def get_conda_path(): """ Return anaconda or miniconda directory :return: anaconda directory """ dstdir = '' # try: import subprocess import re # cond info --root work only for root environment # p = subprocess.Popen(['conda', 'info', '--root'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p = subprocess.Popen(['conda', 'info', '-e'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() dstdir = out.strip() dstdir = re.search("\*(.*)\n", dstdir).group(1).strip() # except: # import traceback # traceback.print_exc() # import os.path as op # conda_pth = op.expanduser('~/anaconda/bin') # if not op.exists(conda_pth): # conda_pth = op.expanduser('~/miniconda/bin') # return conda_pth return dstdir
[ "def", "get_conda_path", "(", ")", ":", "dstdir", "=", "''", "# try:", "import", "subprocess", "import", "re", "# cond info --root work only for root environment", "# p = subprocess.Popen(['conda', 'info', '--root'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "p", "=", "subprocess", ".", "Popen", "(", "[", "'conda'", ",", "'info'", ",", "'-e'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "dstdir", "=", "out", ".", "strip", "(", ")", "dstdir", "=", "re", ".", "search", "(", "\"\\*(.*)\\n\"", ",", "dstdir", ")", ".", "group", "(", "1", ")", ".", "strip", "(", ")", "# except:", "# import traceback", "# traceback.print_exc()", "# import os.path as op", "# conda_pth = op.expanduser('~/anaconda/bin')", "# if not op.exists(conda_pth):", "# conda_pth = op.expanduser('~/miniconda/bin')", "# return conda_pth", "return", "dstdir" ]
Return anaconda or miniconda directory :return: anaconda directory
[ "Return", "anaconda", "or", "miniconda", "directory", ":", "return", ":", "anaconda", "directory" ]
python
train
tbielawa/bitmath
bitmath/integrations.py
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/integrations.py#L92-L104
def update(self, pbar): """Updates the widget with the current NIST/SI speed. Basically, this calculates the average rate of update and figures out how to make a "pretty" prefix unit""" if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: scaled = bitmath.Byte() else: speed = pbar.currval / pbar.seconds_elapsed scaled = bitmath.Byte(speed).best_prefix(system=self.system) return scaled.format(self.format)
[ "def", "update", "(", "self", ",", "pbar", ")", ":", "if", "pbar", ".", "seconds_elapsed", "<", "2e-6", "or", "pbar", ".", "currval", "<", "2e-6", ":", "scaled", "=", "bitmath", ".", "Byte", "(", ")", "else", ":", "speed", "=", "pbar", ".", "currval", "/", "pbar", ".", "seconds_elapsed", "scaled", "=", "bitmath", ".", "Byte", "(", "speed", ")", ".", "best_prefix", "(", "system", "=", "self", ".", "system", ")", "return", "scaled", ".", "format", "(", "self", ".", "format", ")" ]
Updates the widget with the current NIST/SI speed. Basically, this calculates the average rate of update and figures out how to make a "pretty" prefix unit
[ "Updates", "the", "widget", "with", "the", "current", "NIST", "/", "SI", "speed", "." ]
python
train
sentinel-hub/eo-learn
docs/source/conf.py
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/docs/source/conf.py#L221-L249
def process_readme(): """ Function which will process README.md file and divide it into INTRO.md and INSTALL.md, which will be used in documentation """ with open('../../README.md', 'r') as file: readme = file.read() readme = readme.replace('# eo-learn', '# Introduction').replace('docs/source/', '') readme = readme.replace('**`', '**').replace('`**', '**') chapters = [[]] for line in readme.split('\n'): if line.strip().startswith('## '): chapters.append([]) if line.startswith('<img'): line = '<p></p>' chapters[-1].append(line) chapters = ['\n'.join(chapter) for chapter in chapters] intro = '\n'.join([chapter for chapter in chapters if not (chapter.startswith('## Install') or chapter.startswith('## Documentation'))]) install = '\n'.join([chapter for chapter in chapters if chapter.startswith('## Install')]) with open(os.path.join(MARKDOWNS_FOLDER, 'INTRO.md'), 'w') as file: file.write(intro) with open(os.path.join(MARKDOWNS_FOLDER, 'INSTALL.md'), 'w') as file: file.write(install)
[ "def", "process_readme", "(", ")", ":", "with", "open", "(", "'../../README.md'", ",", "'r'", ")", "as", "file", ":", "readme", "=", "file", ".", "read", "(", ")", "readme", "=", "readme", ".", "replace", "(", "'# eo-learn'", ",", "'# Introduction'", ")", ".", "replace", "(", "'docs/source/'", ",", "''", ")", "readme", "=", "readme", ".", "replace", "(", "'**`'", ",", "'**'", ")", ".", "replace", "(", "'`**'", ",", "'**'", ")", "chapters", "=", "[", "[", "]", "]", "for", "line", "in", "readme", ".", "split", "(", "'\\n'", ")", ":", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "'## '", ")", ":", "chapters", ".", "append", "(", "[", "]", ")", "if", "line", ".", "startswith", "(", "'<img'", ")", ":", "line", "=", "'<p></p>'", "chapters", "[", "-", "1", "]", ".", "append", "(", "line", ")", "chapters", "=", "[", "'\\n'", ".", "join", "(", "chapter", ")", "for", "chapter", "in", "chapters", "]", "intro", "=", "'\\n'", ".", "join", "(", "[", "chapter", "for", "chapter", "in", "chapters", "if", "not", "(", "chapter", ".", "startswith", "(", "'## Install'", ")", "or", "chapter", ".", "startswith", "(", "'## Documentation'", ")", ")", "]", ")", "install", "=", "'\\n'", ".", "join", "(", "[", "chapter", "for", "chapter", "in", "chapters", "if", "chapter", ".", "startswith", "(", "'## Install'", ")", "]", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "MARKDOWNS_FOLDER", ",", "'INTRO.md'", ")", ",", "'w'", ")", "as", "file", ":", "file", ".", "write", "(", "intro", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "MARKDOWNS_FOLDER", ",", "'INSTALL.md'", ")", ",", "'w'", ")", "as", "file", ":", "file", ".", "write", "(", "install", ")" ]
Function which will process README.md file and divide it into INTRO.md and INSTALL.md, which will be used in documentation
[ "Function", "which", "will", "process", "README", ".", "md", "file", "and", "divide", "it", "into", "INTRO", ".", "md", "and", "INSTALL", ".", "md", "which", "will", "be", "used", "in", "documentation" ]
python
train
gwastro/pycbc
pycbc/inference/models/gaussian_noise.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/gaussian_noise.py#L423-L443
def det_optimal_snrsq(self, det): """Returns the opitmal SNR squared in the given detector. Parameters ---------- det : str The name of the detector. Returns ------- float : The opimtal SNR squared. """ # try to get it from current stats try: return getattr(self._current_stats, '{}_optimal_snrsq'.format(det)) except AttributeError: # hasn't been calculated yet; call loglr to do so self._loglr() # now try returning again return getattr(self._current_stats, '{}_optimal_snrsq'.format(det))
[ "def", "det_optimal_snrsq", "(", "self", ",", "det", ")", ":", "# try to get it from current stats", "try", ":", "return", "getattr", "(", "self", ".", "_current_stats", ",", "'{}_optimal_snrsq'", ".", "format", "(", "det", ")", ")", "except", "AttributeError", ":", "# hasn't been calculated yet; call loglr to do so", "self", ".", "_loglr", "(", ")", "# now try returning again", "return", "getattr", "(", "self", ".", "_current_stats", ",", "'{}_optimal_snrsq'", ".", "format", "(", "det", ")", ")" ]
Returns the opitmal SNR squared in the given detector. Parameters ---------- det : str The name of the detector. Returns ------- float : The opimtal SNR squared.
[ "Returns", "the", "opitmal", "SNR", "squared", "in", "the", "given", "detector", "." ]
python
train
projectshift/shift-boiler
boiler/cli/user.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/user.py#L19-L29
def find_user(search_params): """ Find user Attempts to find a user by a set of search params. You must be in application context. """ user = None params = {prop: value for prop, value in search_params.items() if value} if 'id' in params or 'email' in params: user = user_service.first(**params) return user
[ "def", "find_user", "(", "search_params", ")", ":", "user", "=", "None", "params", "=", "{", "prop", ":", "value", "for", "prop", ",", "value", "in", "search_params", ".", "items", "(", ")", "if", "value", "}", "if", "'id'", "in", "params", "or", "'email'", "in", "params", ":", "user", "=", "user_service", ".", "first", "(", "*", "*", "params", ")", "return", "user" ]
Find user Attempts to find a user by a set of search params. You must be in application context.
[ "Find", "user", "Attempts", "to", "find", "a", "user", "by", "a", "set", "of", "search", "params", ".", "You", "must", "be", "in", "application", "context", "." ]
python
train
twisted/mantissa
xmantissa/smtp.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/smtp.py#L190-L207
def parseAddress(address): """ Parse the given RFC 2821 email address into a structured object. @type address: C{str} @param address: The address to parse. @rtype: L{Address} @raise xmantissa.error.ArgumentError: The given string was not a valid RFC 2821 address. """ parts = [] parser = _AddressParser() end = parser(parts, address) if end != len(address): raise InvalidTrailingBytes() return parts[0]
[ "def", "parseAddress", "(", "address", ")", ":", "parts", "=", "[", "]", "parser", "=", "_AddressParser", "(", ")", "end", "=", "parser", "(", "parts", ",", "address", ")", "if", "end", "!=", "len", "(", "address", ")", ":", "raise", "InvalidTrailingBytes", "(", ")", "return", "parts", "[", "0", "]" ]
Parse the given RFC 2821 email address into a structured object. @type address: C{str} @param address: The address to parse. @rtype: L{Address} @raise xmantissa.error.ArgumentError: The given string was not a valid RFC 2821 address.
[ "Parse", "the", "given", "RFC", "2821", "email", "address", "into", "a", "structured", "object", "." ]
python
train
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L1560-L1570
def log10(x, context=None): """ Return the base-ten logarithm of x. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_log10, (BigFloat._implicit_convert(x),), context, )
[ "def", "log10", "(", "x", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_log10", ",", "(", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ",", ")", ",", "context", ",", ")" ]
Return the base-ten logarithm of x.
[ "Return", "the", "base", "-", "ten", "logarithm", "of", "x", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/atlas.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3271-L3289
def store_zonefile_data( self, fetched_zfhash, zonefile_data, min_block_height, peer_hostport, con, path ): """ Store the fetched zonefile (as a serialized string) to storage and cache it locally. Update internal state to mark it present Return True on success Return False on error """ rc = add_atlas_zonefile_data( zonefile_data, self.zonefile_dir ) if not rc: log.error("%s: Failed to store zonefile %s" % (self.hostport, fetched_zfhash)) else: # stored! remember it log.debug("%s: got %s from %s" % (self.hostport, fetched_zfhash, peer_hostport)) # update internal state self.set_zonefile_present(fetched_zfhash, min_block_height, con=con, path=path) return rc
[ "def", "store_zonefile_data", "(", "self", ",", "fetched_zfhash", ",", "zonefile_data", ",", "min_block_height", ",", "peer_hostport", ",", "con", ",", "path", ")", ":", "rc", "=", "add_atlas_zonefile_data", "(", "zonefile_data", ",", "self", ".", "zonefile_dir", ")", "if", "not", "rc", ":", "log", ".", "error", "(", "\"%s: Failed to store zonefile %s\"", "%", "(", "self", ".", "hostport", ",", "fetched_zfhash", ")", ")", "else", ":", "# stored! remember it", "log", ".", "debug", "(", "\"%s: got %s from %s\"", "%", "(", "self", ".", "hostport", ",", "fetched_zfhash", ",", "peer_hostport", ")", ")", "# update internal state", "self", ".", "set_zonefile_present", "(", "fetched_zfhash", ",", "min_block_height", ",", "con", "=", "con", ",", "path", "=", "path", ")", "return", "rc" ]
Store the fetched zonefile (as a serialized string) to storage and cache it locally. Update internal state to mark it present Return True on success Return False on error
[ "Store", "the", "fetched", "zonefile", "(", "as", "a", "serialized", "string", ")", "to", "storage", "and", "cache", "it", "locally", ".", "Update", "internal", "state", "to", "mark", "it", "present", "Return", "True", "on", "success", "Return", "False", "on", "error" ]
python
train
vallis/libstempo
libstempo/spharmORFbasis.py
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/spharmORFbasis.py#L281-L297
def real_rotated_Gammas(m,l,phi1,phi2,theta1,theta2,gamma_ml): """ This function returns the real-valued form of the Overlap Reduction Functions, see Eqs 47 in Mingarelli et al, 2013. """ if m>0: ans=(1./sqrt(2))*(rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml) + \ (-1)**m*rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real if m==0: return rotated_Gamma_ml(0,l,phi1,phi2,theta1,theta2,gamma_ml).real if m<0: ans=(1./sqrt(2)/complex(0.,1))*(rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml) - \ (-1)**m*rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real
[ "def", "real_rotated_Gammas", "(", "m", ",", "l", ",", "phi1", ",", "phi2", ",", "theta1", ",", "theta2", ",", "gamma_ml", ")", ":", "if", "m", ">", "0", ":", "ans", "=", "(", "1.", "/", "sqrt", "(", "2", ")", ")", "*", "(", "rotated_Gamma_ml", "(", "m", ",", "l", ",", "phi1", ",", "phi2", ",", "theta1", ",", "theta2", ",", "gamma_ml", ")", "+", "(", "-", "1", ")", "**", "m", "*", "rotated_Gamma_ml", "(", "-", "m", ",", "l", ",", "phi1", ",", "phi2", ",", "theta1", ",", "theta2", ",", "gamma_ml", ")", ")", "return", "ans", ".", "real", "if", "m", "==", "0", ":", "return", "rotated_Gamma_ml", "(", "0", ",", "l", ",", "phi1", ",", "phi2", ",", "theta1", ",", "theta2", ",", "gamma_ml", ")", ".", "real", "if", "m", "<", "0", ":", "ans", "=", "(", "1.", "/", "sqrt", "(", "2", ")", "/", "complex", "(", "0.", ",", "1", ")", ")", "*", "(", "rotated_Gamma_ml", "(", "-", "m", ",", "l", ",", "phi1", ",", "phi2", ",", "theta1", ",", "theta2", ",", "gamma_ml", ")", "-", "(", "-", "1", ")", "**", "m", "*", "rotated_Gamma_ml", "(", "m", ",", "l", ",", "phi1", ",", "phi2", ",", "theta1", ",", "theta2", ",", "gamma_ml", ")", ")", "return", "ans", ".", "real" ]
This function returns the real-valued form of the Overlap Reduction Functions, see Eqs 47 in Mingarelli et al, 2013.
[ "This", "function", "returns", "the", "real", "-", "valued", "form", "of", "the", "Overlap", "Reduction", "Functions", "see", "Eqs", "47", "in", "Mingarelli", "et", "al", "2013", "." ]
python
train
CxAalto/gtfspy
gtfspy/filter.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/filter.py#L234-L267
def _filter_by_calendar(self): """ update calendar table's services :param copy_db_conn: :param start_date: :param end_date: :return: """ if (self.start_date is not None) and (self.end_date is not None): logging.info("Making date extract") start_date_query = "UPDATE calendar " \ "SET start_date='{start_date}' " \ "WHERE start_date<'{start_date}' ".format(start_date=self.start_date) self.copy_db_conn.execute(start_date_query) end_date_query = "UPDATE calendar " \ "SET end_date='{end_date_to_include}' " \ "WHERE end_date>'{end_date_to_include}' " \ .format(end_date_to_include=self.end_date_to_include_str) self.copy_db_conn.execute(end_date_query) # then recursively delete further data: self.copy_db_conn.execute(DELETE_TRIPS_NOT_IN_DAYS_SQL) self.copy_db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL) self.copy_db_conn.execute(DELETE_STOP_TIMES_NOT_REFERENCED_IN_TRIPS_SQL) delete_stops_not_in_stop_times_and_not_as_parent_stop(self.copy_db_conn) self.copy_db_conn.execute(DELETE_STOP_DISTANCE_ENTRIES_WITH_NONEXISTENT_STOPS_SQL) self.copy_db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL) self.copy_db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL) self.copy_db_conn.commit() return FILTERED else: return NOT_FILTERED
[ "def", "_filter_by_calendar", "(", "self", ")", ":", "if", "(", "self", ".", "start_date", "is", "not", "None", ")", "and", "(", "self", ".", "end_date", "is", "not", "None", ")", ":", "logging", ".", "info", "(", "\"Making date extract\"", ")", "start_date_query", "=", "\"UPDATE calendar \"", "\"SET start_date='{start_date}' \"", "\"WHERE start_date<'{start_date}' \"", ".", "format", "(", "start_date", "=", "self", ".", "start_date", ")", "self", ".", "copy_db_conn", ".", "execute", "(", "start_date_query", ")", "end_date_query", "=", "\"UPDATE calendar \"", "\"SET end_date='{end_date_to_include}' \"", "\"WHERE end_date>'{end_date_to_include}' \"", ".", "format", "(", "end_date_to_include", "=", "self", ".", "end_date_to_include_str", ")", "self", ".", "copy_db_conn", ".", "execute", "(", "end_date_query", ")", "# then recursively delete further data:", "self", ".", "copy_db_conn", ".", "execute", "(", "DELETE_TRIPS_NOT_IN_DAYS_SQL", ")", "self", ".", "copy_db_conn", ".", "execute", "(", "DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL", ")", "self", ".", "copy_db_conn", ".", "execute", "(", "DELETE_STOP_TIMES_NOT_REFERENCED_IN_TRIPS_SQL", ")", "delete_stops_not_in_stop_times_and_not_as_parent_stop", "(", "self", ".", "copy_db_conn", ")", "self", ".", "copy_db_conn", ".", "execute", "(", "DELETE_STOP_DISTANCE_ENTRIES_WITH_NONEXISTENT_STOPS_SQL", ")", "self", ".", "copy_db_conn", ".", "execute", "(", "DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL", ")", "self", ".", "copy_db_conn", ".", "execute", "(", "DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL", ")", "self", ".", "copy_db_conn", ".", "commit", "(", ")", "return", "FILTERED", "else", ":", "return", "NOT_FILTERED" ]
update calendar table's services :param copy_db_conn: :param start_date: :param end_date: :return:
[ "update", "calendar", "table", "s", "services", ":", "param", "copy_db_conn", ":", ":", "param", "start_date", ":", ":", "param", "end_date", ":", ":", "return", ":" ]
python
valid
Loudr/asana-hub
asana_hub/json_data.py
https://github.com/Loudr/asana-hub/blob/af996ce890ed23d8ede5bf68dcd318e3438829cb/asana_hub/json_data.py#L68-L112
def apply(self, key, value, prompt=None, on_load=lambda a: a, on_save=lambda a: a): """Applies a setting value to a key, if the value is not `None`. Returns without prompting if either of the following: * `value` is not `None` * already present in the dictionary Args: prompt: May either be a string to prompt via `raw_input` or a method (callable) that returns the value. on_load: lambda. Value is passed through here after loaded. on_save: lambda. Value is saved as this value. """ # Reset value if flag exists without value if value == '': value = None if key and self.data.has_key(key): del self.data[key] # If value is explicitly set from args. if value is not None: value = on_load(value) if key: self.data[key] = on_save(value) return value elif not key or not self.has_key(key): if callable(prompt): value = prompt() elif prompt is not None: value = raw_input(prompt + ": ") if value is None: if self.data.has_key(key): del self.data[key] return None self.data[key] = on_save(value) return value return on_load(self.data[key])
[ "def", "apply", "(", "self", ",", "key", ",", "value", ",", "prompt", "=", "None", ",", "on_load", "=", "lambda", "a", ":", "a", ",", "on_save", "=", "lambda", "a", ":", "a", ")", ":", "# Reset value if flag exists without value", "if", "value", "==", "''", ":", "value", "=", "None", "if", "key", "and", "self", ".", "data", ".", "has_key", "(", "key", ")", ":", "del", "self", ".", "data", "[", "key", "]", "# If value is explicitly set from args.", "if", "value", "is", "not", "None", ":", "value", "=", "on_load", "(", "value", ")", "if", "key", ":", "self", ".", "data", "[", "key", "]", "=", "on_save", "(", "value", ")", "return", "value", "elif", "not", "key", "or", "not", "self", ".", "has_key", "(", "key", ")", ":", "if", "callable", "(", "prompt", ")", ":", "value", "=", "prompt", "(", ")", "elif", "prompt", "is", "not", "None", ":", "value", "=", "raw_input", "(", "prompt", "+", "\": \"", ")", "if", "value", "is", "None", ":", "if", "self", ".", "data", ".", "has_key", "(", "key", ")", ":", "del", "self", ".", "data", "[", "key", "]", "return", "None", "self", ".", "data", "[", "key", "]", "=", "on_save", "(", "value", ")", "return", "value", "return", "on_load", "(", "self", ".", "data", "[", "key", "]", ")" ]
Applies a setting value to a key, if the value is not `None`. Returns without prompting if either of the following: * `value` is not `None` * already present in the dictionary Args: prompt: May either be a string to prompt via `raw_input` or a method (callable) that returns the value. on_load: lambda. Value is passed through here after loaded. on_save: lambda. Value is saved as this value.
[ "Applies", "a", "setting", "value", "to", "a", "key", "if", "the", "value", "is", "not", "None", "." ]
python
test
RIPE-NCC/ripe-atlas-cousteau
ripe/atlas/cousteau/measurement.py
https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/measurement.py#L100-L132
def v2_translator(self, option): """ This is a temporary function that helps move from v1 API to v2 without breaking already running script and keep backwards compatibility. Translates option name from API v1 to renamed one of v2 API. """ new_option = option new_value = getattr(self, option) renaming_pairs = { "dontfrag": "dont_fragment", "maxhops": "max_hops", "firsthop": "first_hop", "use_NSID": "set_nsid_bit", "cd": "set_cd_bit", "do": "set_do_bit", "qbuf": "include_qbuf", "recursion_desired": "set_rd_bit", "noabuf": "include_abuf" } if option in renaming_pairs.keys(): warninglog = ( "DeprecationWarning: {0} option has been deprecated and " "renamed to {1}." ).format(option, renaming_pairs[option]) print(warninglog) new_option = renaming_pairs[option] # noabuf was changed to include_abuf so we need a double-negative if option == "noabuf": new_value = not new_value return new_option, new_value
[ "def", "v2_translator", "(", "self", ",", "option", ")", ":", "new_option", "=", "option", "new_value", "=", "getattr", "(", "self", ",", "option", ")", "renaming_pairs", "=", "{", "\"dontfrag\"", ":", "\"dont_fragment\"", ",", "\"maxhops\"", ":", "\"max_hops\"", ",", "\"firsthop\"", ":", "\"first_hop\"", ",", "\"use_NSID\"", ":", "\"set_nsid_bit\"", ",", "\"cd\"", ":", "\"set_cd_bit\"", ",", "\"do\"", ":", "\"set_do_bit\"", ",", "\"qbuf\"", ":", "\"include_qbuf\"", ",", "\"recursion_desired\"", ":", "\"set_rd_bit\"", ",", "\"noabuf\"", ":", "\"include_abuf\"", "}", "if", "option", "in", "renaming_pairs", ".", "keys", "(", ")", ":", "warninglog", "=", "(", "\"DeprecationWarning: {0} option has been deprecated and \"", "\"renamed to {1}.\"", ")", ".", "format", "(", "option", ",", "renaming_pairs", "[", "option", "]", ")", "print", "(", "warninglog", ")", "new_option", "=", "renaming_pairs", "[", "option", "]", "# noabuf was changed to include_abuf so we need a double-negative", "if", "option", "==", "\"noabuf\"", ":", "new_value", "=", "not", "new_value", "return", "new_option", ",", "new_value" ]
This is a temporary function that helps move from v1 API to v2 without breaking already running script and keep backwards compatibility. Translates option name from API v1 to renamed one of v2 API.
[ "This", "is", "a", "temporary", "function", "that", "helps", "move", "from", "v1", "API", "to", "v2", "without", "breaking", "already", "running", "script", "and", "keep", "backwards", "compatibility", ".", "Translates", "option", "name", "from", "API", "v1", "to", "renamed", "one", "of", "v2", "API", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/textio.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L516-L532
def printable(data): """ Replace unprintable characters with dots. @type data: str @param data: Binary data. @rtype: str @return: Printable text. """ result = '' for c in data: if 32 < ord(c) < 128: result += c else: result += '.' return result
[ "def", "printable", "(", "data", ")", ":", "result", "=", "''", "for", "c", "in", "data", ":", "if", "32", "<", "ord", "(", "c", ")", "<", "128", ":", "result", "+=", "c", "else", ":", "result", "+=", "'.'", "return", "result" ]
Replace unprintable characters with dots. @type data: str @param data: Binary data. @rtype: str @return: Printable text.
[ "Replace", "unprintable", "characters", "with", "dots", "." ]
python
train
Jarn/jarn.viewdoc
jarn/viewdoc/viewdoc.py
https://github.com/Jarn/jarn.viewdoc/blob/59ae82fd1658889c41096c1d8c08dcb1047dc349/jarn/viewdoc/viewdoc.py#L314-L324
def convert_string(self, rest): """Convert a reST string to an HTML string. """ try: html = publish_string(rest, writer_name='html') except SystemExit as e: err_exit('HTML conversion failed with error: %s' % e.code) else: if sys.version_info[0] >= 3: return html.decode('utf-8') return html
[ "def", "convert_string", "(", "self", ",", "rest", ")", ":", "try", ":", "html", "=", "publish_string", "(", "rest", ",", "writer_name", "=", "'html'", ")", "except", "SystemExit", "as", "e", ":", "err_exit", "(", "'HTML conversion failed with error: %s'", "%", "e", ".", "code", ")", "else", ":", "if", "sys", ".", "version_info", "[", "0", "]", ">=", "3", ":", "return", "html", ".", "decode", "(", "'utf-8'", ")", "return", "html" ]
Convert a reST string to an HTML string.
[ "Convert", "a", "reST", "string", "to", "an", "HTML", "string", "." ]
python
train
cdgriffith/Reusables
reusables/cli.py
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L29-L44
def cmd(command, ignore_stderr=False, raise_on_return=False, timeout=None, encoding="utf-8"): """ Run a shell command and have it automatically decoded and printed :param command: Command to run as str :param ignore_stderr: To not print stderr :param raise_on_return: Run CompletedProcess.check_returncode() :param timeout: timeout to pass to communicate if python 3 :param encoding: How the output should be decoded """ result = run(command, timeout=timeout, shell=True) if raise_on_return: result.check_returncode() print(result.stdout.decode(encoding)) if not ignore_stderr and result.stderr: print(result.stderr.decode(encoding))
[ "def", "cmd", "(", "command", ",", "ignore_stderr", "=", "False", ",", "raise_on_return", "=", "False", ",", "timeout", "=", "None", ",", "encoding", "=", "\"utf-8\"", ")", ":", "result", "=", "run", "(", "command", ",", "timeout", "=", "timeout", ",", "shell", "=", "True", ")", "if", "raise_on_return", ":", "result", ".", "check_returncode", "(", ")", "print", "(", "result", ".", "stdout", ".", "decode", "(", "encoding", ")", ")", "if", "not", "ignore_stderr", "and", "result", ".", "stderr", ":", "print", "(", "result", ".", "stderr", ".", "decode", "(", "encoding", ")", ")" ]
Run a shell command and have it automatically decoded and printed :param command: Command to run as str :param ignore_stderr: To not print stderr :param raise_on_return: Run CompletedProcess.check_returncode() :param timeout: timeout to pass to communicate if python 3 :param encoding: How the output should be decoded
[ "Run", "a", "shell", "command", "and", "have", "it", "automatically", "decoded", "and", "printed" ]
python
train
nugget/python-insteonplm
insteonplm/tools.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/tools.py#L792-L832
async def do_del_aldb(self, args): """Delete device All-Link record. WARNING THIS METHOD CAN DAMAGE YOUR DEVICE IF USED INCORRECTLY. Please ensure the memory id is appropriate for the device. You must load the ALDB of the device before using this method. The memory id must be an existing memory id in the ALDB or this method will return an error. If you are looking to create a new link between two devices, use the `link_devices` command or the `start_all_linking` command. Usage: del_aldb addr memory Required Parameters: addr: Inseon address of the device to write memory: record ID of the record to write (i.e. 0fff) """ params = args.split() addr = None mem_bytes = None memory = None try: addr = Address(params[0]) mem_bytes = binascii.unhexlify(params[1]) memory = int.from_bytes(mem_bytes, byteorder='big') _LOGGING.info('address: %s', addr) _LOGGING.info('memory: %04x', memory) except IndexError: _LOGGING.error('Device address and memory are required.') self.do_help('del_aldb') except ValueError: _LOGGING.error('Value error - Check parameters') self.do_help('write_aldb') if addr and memory: await self.tools.del_aldb(addr, memory)
[ "async", "def", "do_del_aldb", "(", "self", ",", "args", ")", ":", "params", "=", "args", ".", "split", "(", ")", "addr", "=", "None", "mem_bytes", "=", "None", "memory", "=", "None", "try", ":", "addr", "=", "Address", "(", "params", "[", "0", "]", ")", "mem_bytes", "=", "binascii", ".", "unhexlify", "(", "params", "[", "1", "]", ")", "memory", "=", "int", ".", "from_bytes", "(", "mem_bytes", ",", "byteorder", "=", "'big'", ")", "_LOGGING", ".", "info", "(", "'address: %s'", ",", "addr", ")", "_LOGGING", ".", "info", "(", "'memory: %04x'", ",", "memory", ")", "except", "IndexError", ":", "_LOGGING", ".", "error", "(", "'Device address and memory are required.'", ")", "self", ".", "do_help", "(", "'del_aldb'", ")", "except", "ValueError", ":", "_LOGGING", ".", "error", "(", "'Value error - Check parameters'", ")", "self", ".", "do_help", "(", "'write_aldb'", ")", "if", "addr", "and", "memory", ":", "await", "self", ".", "tools", ".", "del_aldb", "(", "addr", ",", "memory", ")" ]
Delete device All-Link record. WARNING THIS METHOD CAN DAMAGE YOUR DEVICE IF USED INCORRECTLY. Please ensure the memory id is appropriate for the device. You must load the ALDB of the device before using this method. The memory id must be an existing memory id in the ALDB or this method will return an error. If you are looking to create a new link between two devices, use the `link_devices` command or the `start_all_linking` command. Usage: del_aldb addr memory Required Parameters: addr: Inseon address of the device to write memory: record ID of the record to write (i.e. 0fff)
[ "Delete", "device", "All", "-", "Link", "record", "." ]
python
train
toumorokoshi/sprinter
sprinter/environment.py
https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/environment.py#L433-L467
def _finalize(self): """ command to run at the end of sprinter's run """ self.logger.info("Finalizing...") self.write_manifest() if self.directory.rewrite_config: # always ensure .rc is written (sourcing .env) self.directory.add_to_rc('') # prepend brew for global installs if system.is_osx() and self.main_manifest.is_affirmative('config', 'use_global_packagemanagers'): self.directory.add_to_env('__sprinter_prepend_path "%s" PATH' % '/usr/local/bin') self.directory.add_to_env('__sprinter_prepend_path "%s" PATH' % self.directory.bin_path()) self.directory.add_to_env('__sprinter_prepend_path "%s" LIBRARY_PATH' % self.directory.lib_path()) self.directory.add_to_env('__sprinter_prepend_path "%s" C_INCLUDE_PATH' % self.directory.include_path()) self.directory.finalize() self.injections.commit() self.global_injections.commit() if not os.path.exists(os.path.join(self.root, ".global")): self.logger.debug("Global directory doesn't exist! creating...") os.makedirs(os.path.join(self.root, ".global")) self.logger.debug("Writing shell util file...") with open(self.shell_util_path, 'w+') as fh: fh.write(shell_utils_template) if self.error_occured: raise SprinterException("Error occured!") if self.message_success(): self.logger.info(self.message_success()) self.logger.info("Done!") self.logger.info("NOTE: Please remember to open new shells/terminals to use the modified environment")
[ "def", "_finalize", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Finalizing...\"", ")", "self", ".", "write_manifest", "(", ")", "if", "self", ".", "directory", ".", "rewrite_config", ":", "# always ensure .rc is written (sourcing .env)", "self", ".", "directory", ".", "add_to_rc", "(", "''", ")", "# prepend brew for global installs", "if", "system", ".", "is_osx", "(", ")", "and", "self", ".", "main_manifest", ".", "is_affirmative", "(", "'config'", ",", "'use_global_packagemanagers'", ")", ":", "self", ".", "directory", ".", "add_to_env", "(", "'__sprinter_prepend_path \"%s\" PATH'", "%", "'/usr/local/bin'", ")", "self", ".", "directory", ".", "add_to_env", "(", "'__sprinter_prepend_path \"%s\" PATH'", "%", "self", ".", "directory", ".", "bin_path", "(", ")", ")", "self", ".", "directory", ".", "add_to_env", "(", "'__sprinter_prepend_path \"%s\" LIBRARY_PATH'", "%", "self", ".", "directory", ".", "lib_path", "(", ")", ")", "self", ".", "directory", ".", "add_to_env", "(", "'__sprinter_prepend_path \"%s\" C_INCLUDE_PATH'", "%", "self", ".", "directory", ".", "include_path", "(", ")", ")", "self", ".", "directory", ".", "finalize", "(", ")", "self", ".", "injections", ".", "commit", "(", ")", "self", ".", "global_injections", ".", "commit", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "self", ".", "root", ",", "\".global\"", ")", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Global directory doesn't exist! creating...\"", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "self", ".", "root", ",", "\".global\"", ")", ")", "self", ".", "logger", ".", "debug", "(", "\"Writing shell util file...\"", ")", "with", "open", "(", "self", ".", "shell_util_path", ",", "'w+'", ")", "as", "fh", ":", "fh", ".", "write", "(", "shell_utils_template", ")", "if", "self", ".", "error_occured", ":", "raise", "SprinterException", "(", "\"Error occured!\"", ")", "if", "self", ".", "message_success", "(", ")", ":", "self", ".", "logger", ".", "info", "(", "self", ".", "message_success", "(", ")", ")", "self", ".", "logger", ".", "info", "(", "\"Done!\"", ")", "self", ".", "logger", ".", "info", "(", "\"NOTE: Please remember to open new shells/terminals to use the modified environment\"", ")" ]
command to run at the end of sprinter's run
[ "command", "to", "run", "at", "the", "end", "of", "sprinter", "s", "run" ]
python
train
summernote/django-summernote
django_summernote/utils.py
https://github.com/summernote/django-summernote/blob/bc7fbbf065d88a909fe3e1533c84110e0dd132bc/django_summernote/utils.py#L180-L199
def get_attachment_model(): """ Returns the Attachment model that is active in this project. """ try: from .models import AbstractAttachment klass = apps.get_model(config["attachment_model"]) if not issubclass(klass, AbstractAttachment): raise ImproperlyConfigured( "SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that is not " "inherited from 'django_summernote.models.AbstractAttachment'" % config["attachment_model"] ) return klass except ValueError: raise ImproperlyConfigured("SUMMERNOTE_CONFIG['attachment_model'] must be of the form 'app_label.model_name'") except LookupError: raise ImproperlyConfigured( "SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that has not been installed" % config["attachment_model"] )
[ "def", "get_attachment_model", "(", ")", ":", "try", ":", "from", ".", "models", "import", "AbstractAttachment", "klass", "=", "apps", ".", "get_model", "(", "config", "[", "\"attachment_model\"", "]", ")", "if", "not", "issubclass", "(", "klass", ",", "AbstractAttachment", ")", ":", "raise", "ImproperlyConfigured", "(", "\"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that is not \"", "\"inherited from 'django_summernote.models.AbstractAttachment'\"", "%", "config", "[", "\"attachment_model\"", "]", ")", "return", "klass", "except", "ValueError", ":", "raise", "ImproperlyConfigured", "(", "\"SUMMERNOTE_CONFIG['attachment_model'] must be of the form 'app_label.model_name'\"", ")", "except", "LookupError", ":", "raise", "ImproperlyConfigured", "(", "\"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that has not been installed\"", "%", "config", "[", "\"attachment_model\"", "]", ")" ]
Returns the Attachment model that is active in this project.
[ "Returns", "the", "Attachment", "model", "that", "is", "active", "in", "this", "project", "." ]
python
train
CalebBell/fluids
fluids/nrlmsise00/nrlmsise_00.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/nrlmsise00/nrlmsise_00.py#L278-L303
def splint(xa, ya, y2a, n, x, y): ''' /* CALCULATE CUBIC SPLINE INTERP VALUE * ADAPTED FROM NUMERICAL RECIPES BY PRESS ET AL. * XA,YA: ARRAYS OF TABULATED FUNCTION IN ASCENDING ORDER BY X * Y2A: ARRAY OF SECOND DERIVATIVES * N: SIZE OF ARRAYS XA,YA,Y2A * X: ABSCISSA FOR INTERPOLATION * Y: OUTPUT VALUE */ ''' klo = 0 khi = n-1 while((khi-klo)>1): k=int((khi+klo)/2); if (xa[k]>x): khi = k else: klo = k h = xa[khi] - xa[klo]; a = (xa[khi] - x)/h; b = (x - xa[klo])/h; yi = a * ya[klo] + b * ya[khi] + ((a*a*a - a) * y2a[klo] + (b*b*b - b) * y2a[khi]) * h * h/6.0; y[0] = yi #may not need this return
[ "def", "splint", "(", "xa", ",", "ya", ",", "y2a", ",", "n", ",", "x", ",", "y", ")", ":", "klo", "=", "0", "khi", "=", "n", "-", "1", "while", "(", "(", "khi", "-", "klo", ")", ">", "1", ")", ":", "k", "=", "int", "(", "(", "khi", "+", "klo", ")", "/", "2", ")", "if", "(", "xa", "[", "k", "]", ">", "x", ")", ":", "khi", "=", "k", "else", ":", "klo", "=", "k", "h", "=", "xa", "[", "khi", "]", "-", "xa", "[", "klo", "]", "a", "=", "(", "xa", "[", "khi", "]", "-", "x", ")", "/", "h", "b", "=", "(", "x", "-", "xa", "[", "klo", "]", ")", "/", "h", "yi", "=", "a", "*", "ya", "[", "klo", "]", "+", "b", "*", "ya", "[", "khi", "]", "+", "(", "(", "a", "*", "a", "*", "a", "-", "a", ")", "*", "y2a", "[", "klo", "]", "+", "(", "b", "*", "b", "*", "b", "-", "b", ")", "*", "y2a", "[", "khi", "]", ")", "*", "h", "*", "h", "/", "6.0", "y", "[", "0", "]", "=", "yi", "#may not need this", "return" ]
/* CALCULATE CUBIC SPLINE INTERP VALUE * ADAPTED FROM NUMERICAL RECIPES BY PRESS ET AL. * XA,YA: ARRAYS OF TABULATED FUNCTION IN ASCENDING ORDER BY X * Y2A: ARRAY OF SECOND DERIVATIVES * N: SIZE OF ARRAYS XA,YA,Y2A * X: ABSCISSA FOR INTERPOLATION * Y: OUTPUT VALUE */
[ "/", "*", "CALCULATE", "CUBIC", "SPLINE", "INTERP", "VALUE", "*", "ADAPTED", "FROM", "NUMERICAL", "RECIPES", "BY", "PRESS", "ET", "AL", ".", "*", "XA", "YA", ":", "ARRAYS", "OF", "TABULATED", "FUNCTION", "IN", "ASCENDING", "ORDER", "BY", "X", "*", "Y2A", ":", "ARRAY", "OF", "SECOND", "DERIVATIVES", "*", "N", ":", "SIZE", "OF", "ARRAYS", "XA", "YA", "Y2A", "*", "X", ":", "ABSCISSA", "FOR", "INTERPOLATION", "*", "Y", ":", "OUTPUT", "VALUE", "*", "/" ]
python
train
google/prettytensor
prettytensor/tutorial/data_utils.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/tutorial/data_utils.py#L37-L46
def maybe_download(url, filename): """Download the data from Yann's website, unless it's already here.""" if not os.path.exists(WORK_DIRECTORY): os.mkdir(WORK_DIRECTORY) filepath = os.path.join(WORK_DIRECTORY, filename) if not os.path.exists(filepath): filepath, _ = request.urlretrieve(url + filename, filepath) statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') return filepath
[ "def", "maybe_download", "(", "url", ",", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "WORK_DIRECTORY", ")", ":", "os", ".", "mkdir", "(", "WORK_DIRECTORY", ")", "filepath", "=", "os", ".", "path", ".", "join", "(", "WORK_DIRECTORY", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "filepath", ",", "_", "=", "request", ".", "urlretrieve", "(", "url", "+", "filename", ",", "filepath", ")", "statinfo", "=", "os", ".", "stat", "(", "filepath", ")", "print", "(", "'Successfully downloaded'", ",", "filename", ",", "statinfo", ".", "st_size", ",", "'bytes.'", ")", "return", "filepath" ]
Download the data from Yann's website, unless it's already here.
[ "Download", "the", "data", "from", "Yann", "s", "website", "unless", "it", "s", "already", "here", "." ]
python
train
SeleniumHQ/selenium
py/selenium/webdriver/remote/webelement.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/remote/webelement.py#L674-L700
def find_element(self, by=By.ID, value=None): """ Find an element given a By strategy and locator. Prefer the find_element_by_* methods when possible. :Usage: :: element = element.find_element(By.ID, 'foo') :rtype: WebElement """ if self._w3c: if by == By.ID: by = By.CSS_SELECTOR value = '[id="%s"]' % value elif by == By.TAG_NAME: by = By.CSS_SELECTOR elif by == By.CLASS_NAME: by = By.CSS_SELECTOR value = ".%s" % value elif by == By.NAME: by = By.CSS_SELECTOR value = '[name="%s"]' % value return self._execute(Command.FIND_CHILD_ELEMENT, {"using": by, "value": value})['value']
[ "def", "find_element", "(", "self", ",", "by", "=", "By", ".", "ID", ",", "value", "=", "None", ")", ":", "if", "self", ".", "_w3c", ":", "if", "by", "==", "By", ".", "ID", ":", "by", "=", "By", ".", "CSS_SELECTOR", "value", "=", "'[id=\"%s\"]'", "%", "value", "elif", "by", "==", "By", ".", "TAG_NAME", ":", "by", "=", "By", ".", "CSS_SELECTOR", "elif", "by", "==", "By", ".", "CLASS_NAME", ":", "by", "=", "By", ".", "CSS_SELECTOR", "value", "=", "\".%s\"", "%", "value", "elif", "by", "==", "By", ".", "NAME", ":", "by", "=", "By", ".", "CSS_SELECTOR", "value", "=", "'[name=\"%s\"]'", "%", "value", "return", "self", ".", "_execute", "(", "Command", ".", "FIND_CHILD_ELEMENT", ",", "{", "\"using\"", ":", "by", ",", "\"value\"", ":", "value", "}", ")", "[", "'value'", "]" ]
Find an element given a By strategy and locator. Prefer the find_element_by_* methods when possible. :Usage: :: element = element.find_element(By.ID, 'foo') :rtype: WebElement
[ "Find", "an", "element", "given", "a", "By", "strategy", "and", "locator", ".", "Prefer", "the", "find_element_by_", "*", "methods", "when", "possible", "." ]
python
train
openwisp/netjsonconfig
netjsonconfig/backends/openwrt/openwrt.py
https://github.com/openwisp/netjsonconfig/blob/c23ce9732720856e2f6dc54060db71a8182c7d4b/netjsonconfig/backends/openwrt/openwrt.py#L30-L49
def _generate_contents(self, tar): """ Adds configuration files to tarfile instance. :param tar: tarfile instance :returns: None """ uci = self.render(files=False) # create a list with all the packages (and remove empty entries) packages = packages_pattern.split(uci) if '' in packages: packages.remove('') # create an UCI file for each configuration package used for package in packages: lines = package.split('\n') package_name = lines[0] text_contents = '\n'.join(lines[2:]) self._add_file(tar=tar, name='{0}{1}'.format(config_path, package_name), contents=text_contents)
[ "def", "_generate_contents", "(", "self", ",", "tar", ")", ":", "uci", "=", "self", ".", "render", "(", "files", "=", "False", ")", "# create a list with all the packages (and remove empty entries)", "packages", "=", "packages_pattern", ".", "split", "(", "uci", ")", "if", "''", "in", "packages", ":", "packages", ".", "remove", "(", "''", ")", "# create an UCI file for each configuration package used", "for", "package", "in", "packages", ":", "lines", "=", "package", ".", "split", "(", "'\\n'", ")", "package_name", "=", "lines", "[", "0", "]", "text_contents", "=", "'\\n'", ".", "join", "(", "lines", "[", "2", ":", "]", ")", "self", ".", "_add_file", "(", "tar", "=", "tar", ",", "name", "=", "'{0}{1}'", ".", "format", "(", "config_path", ",", "package_name", ")", ",", "contents", "=", "text_contents", ")" ]
Adds configuration files to tarfile instance. :param tar: tarfile instance :returns: None
[ "Adds", "configuration", "files", "to", "tarfile", "instance", "." ]
python
valid
dmwm/DBS
Server/Python/src/dbs/business/DBSAcquisitionEra.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/business/DBSAcquisitionEra.py#L57-L83
def insertAcquisitionEra(self, businput): """ Input dictionary has to have the following keys: acquisition_era_name, creation_date, create_by, start_date, end_date. it builds the correct dictionary for dao input and executes the dao """ conn = self.dbi.connection() tran = conn.begin() try: businput["acquisition_era_id"] = self.sm.increment(conn, "SEQ_AQE", tran) businput["acquisition_era_name"] = businput["acquisition_era_name"] #self.logger.warning(businput) self.acqin.execute(conn, businput, tran) tran.commit() tran = None except KeyError as ke: dbsExceptionHandler('dbsException-invalid-input', "Invalid input:"+ke.args[0]) except Exception as ex: if str(ex).lower().find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1: dbsExceptionHandler('dbsException-invalid-input2', "Invalid input: acquisition_era_name already exists in DB", serverError="%s" %ex) else: raise finally: if tran: tran.rollback() if conn: conn.close()
[ "def", "insertAcquisitionEra", "(", "self", ",", "businput", ")", ":", "conn", "=", "self", ".", "dbi", ".", "connection", "(", ")", "tran", "=", "conn", ".", "begin", "(", ")", "try", ":", "businput", "[", "\"acquisition_era_id\"", "]", "=", "self", ".", "sm", ".", "increment", "(", "conn", ",", "\"SEQ_AQE\"", ",", "tran", ")", "businput", "[", "\"acquisition_era_name\"", "]", "=", "businput", "[", "\"acquisition_era_name\"", "]", "#self.logger.warning(businput)", "self", ".", "acqin", ".", "execute", "(", "conn", ",", "businput", ",", "tran", ")", "tran", ".", "commit", "(", ")", "tran", "=", "None", "except", "KeyError", "as", "ke", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input'", ",", "\"Invalid input:\"", "+", "ke", ".", "args", "[", "0", "]", ")", "except", "Exception", "as", "ex", ":", "if", "str", "(", "ex", ")", ".", "lower", "(", ")", ".", "find", "(", "\"unique constraint\"", ")", "!=", "-", "1", "or", "str", "(", "ex", ")", ".", "lower", "(", ")", ".", "find", "(", "\"duplicate\"", ")", "!=", "-", "1", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input2'", ",", "\"Invalid input: acquisition_era_name already exists in DB\"", ",", "serverError", "=", "\"%s\"", "%", "ex", ")", "else", ":", "raise", "finally", ":", "if", "tran", ":", "tran", ".", "rollback", "(", ")", "if", "conn", ":", "conn", ".", "close", "(", ")" ]
Input dictionary has to have the following keys: acquisition_era_name, creation_date, create_by, start_date, end_date. it builds the correct dictionary for dao input and executes the dao
[ "Input", "dictionary", "has", "to", "have", "the", "following", "keys", ":", "acquisition_era_name", "creation_date", "create_by", "start_date", "end_date", ".", "it", "builds", "the", "correct", "dictionary", "for", "dao", "input", "and", "executes", "the", "dao" ]
python
train
fastai/fastai
fastai/datasets.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/datasets.py#L140-L142
def get_key(cls, key): "Get the path to `key` in the config file." return cls.get().get(key, cls.DEFAULT_CONFIG.get(key,None))
[ "def", "get_key", "(", "cls", ",", "key", ")", ":", "return", "cls", ".", "get", "(", ")", ".", "get", "(", "key", ",", "cls", ".", "DEFAULT_CONFIG", ".", "get", "(", "key", ",", "None", ")", ")" ]
Get the path to `key` in the config file.
[ "Get", "the", "path", "to", "key", "in", "the", "config", "file", "." ]
python
train
nicolargo/glances
glances/outputs/glances_curses_browser.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/outputs/glances_curses_browser.py#L134-L144
def cursor_down(self, stats): """Set the cursor to position N-1 in the list.""" if self.cursor_position + 1 < self.get_pagelines(stats): self.cursor_position += 1 else: if self._current_page + 1 < self._page_max: self._current_page += 1 else: self._current_page = 0 self.cursor_position = 0
[ "def", "cursor_down", "(", "self", ",", "stats", ")", ":", "if", "self", ".", "cursor_position", "+", "1", "<", "self", ".", "get_pagelines", "(", "stats", ")", ":", "self", ".", "cursor_position", "+=", "1", "else", ":", "if", "self", ".", "_current_page", "+", "1", "<", "self", ".", "_page_max", ":", "self", ".", "_current_page", "+=", "1", "else", ":", "self", ".", "_current_page", "=", "0", "self", ".", "cursor_position", "=", "0" ]
Set the cursor to position N-1 in the list.
[ "Set", "the", "cursor", "to", "position", "N", "-", "1", "in", "the", "list", "." ]
python
train
Azure/msrest-for-python
msrest/service_client.py
https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/service_client.py#L302-L316
def send_formdata(self, request, headers=None, content=None, **config): """Send data as a multipart form-data request. We only deal with file-like objects or strings at this point. The requests is not yet streamed. This method is deprecated, and shouldn't be used anymore. :param ClientRequest request: The request object to be sent. :param dict headers: Any headers to add to the request. :param dict content: Dictionary of the fields of the formdata. :param config: Any specific config overrides. """ request.headers = headers request.add_formdata(content) return self.send(request, **config)
[ "def", "send_formdata", "(", "self", ",", "request", ",", "headers", "=", "None", ",", "content", "=", "None", ",", "*", "*", "config", ")", ":", "request", ".", "headers", "=", "headers", "request", ".", "add_formdata", "(", "content", ")", "return", "self", ".", "send", "(", "request", ",", "*", "*", "config", ")" ]
Send data as a multipart form-data request. We only deal with file-like objects or strings at this point. The requests is not yet streamed. This method is deprecated, and shouldn't be used anymore. :param ClientRequest request: The request object to be sent. :param dict headers: Any headers to add to the request. :param dict content: Dictionary of the fields of the formdata. :param config: Any specific config overrides.
[ "Send", "data", "as", "a", "multipart", "form", "-", "data", "request", ".", "We", "only", "deal", "with", "file", "-", "like", "objects", "or", "strings", "at", "this", "point", ".", "The", "requests", "is", "not", "yet", "streamed", "." ]
python
train
jamieleshaw/lurklib
lurklib/channel.py
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/channel.py#L139-L174
def cmode(self, channel, modes=''): """ Sets or gets the channel mode. Required arguments: * channel - Channel to set/get modes of. Optional arguments: * modes='' - Modes to set. If not specified return the modes of the channel. """ with self.lock: self.is_in_channel(channel) if not modes: self.send('MODE %s' % channel) modes = '' mode_set_time = None while self.readable(): msg = self._recv(rm_colon=True, \ expected_replies=('324', '329')) if msg[0] == '324': modes = msg[2].split()[1].replace('+', '', 1) elif msg[0] == '329': mode_set_time = self._m_time.localtime( \ int(msg[2].split()[1])) return modes, mode_set_time else: self.send('MODE %s %s' % (channel, modes)) if self.readable(): msg = self._recv(expected_replies=('MODE',), \ ignore_unexpected_replies=True) if msg[0]: mode = msg[2] self.parse_cmode_string(mode, msg[1]) if not self.hide_called_events: self.stepback()
[ "def", "cmode", "(", "self", ",", "channel", ",", "modes", "=", "''", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "is_in_channel", "(", "channel", ")", "if", "not", "modes", ":", "self", ".", "send", "(", "'MODE %s'", "%", "channel", ")", "modes", "=", "''", "mode_set_time", "=", "None", "while", "self", ".", "readable", "(", ")", ":", "msg", "=", "self", ".", "_recv", "(", "rm_colon", "=", "True", ",", "expected_replies", "=", "(", "'324'", ",", "'329'", ")", ")", "if", "msg", "[", "0", "]", "==", "'324'", ":", "modes", "=", "msg", "[", "2", "]", ".", "split", "(", ")", "[", "1", "]", ".", "replace", "(", "'+'", ",", "''", ",", "1", ")", "elif", "msg", "[", "0", "]", "==", "'329'", ":", "mode_set_time", "=", "self", ".", "_m_time", ".", "localtime", "(", "int", "(", "msg", "[", "2", "]", ".", "split", "(", ")", "[", "1", "]", ")", ")", "return", "modes", ",", "mode_set_time", "else", ":", "self", ".", "send", "(", "'MODE %s %s'", "%", "(", "channel", ",", "modes", ")", ")", "if", "self", ".", "readable", "(", ")", ":", "msg", "=", "self", ".", "_recv", "(", "expected_replies", "=", "(", "'MODE'", ",", ")", ",", "ignore_unexpected_replies", "=", "True", ")", "if", "msg", "[", "0", "]", ":", "mode", "=", "msg", "[", "2", "]", "self", ".", "parse_cmode_string", "(", "mode", ",", "msg", "[", "1", "]", ")", "if", "not", "self", ".", "hide_called_events", ":", "self", ".", "stepback", "(", ")" ]
Sets or gets the channel mode. Required arguments: * channel - Channel to set/get modes of. Optional arguments: * modes='' - Modes to set. If not specified return the modes of the channel.
[ "Sets", "or", "gets", "the", "channel", "mode", ".", "Required", "arguments", ":", "*", "channel", "-", "Channel", "to", "set", "/", "get", "modes", "of", ".", "Optional", "arguments", ":", "*", "modes", "=", "-", "Modes", "to", "set", ".", "If", "not", "specified", "return", "the", "modes", "of", "the", "channel", "." ]
python
train
rags/pynt-contrib
pyntcontrib/__init__.py
https://github.com/rags/pynt-contrib/blob/912315f2df9ea9b4b61abc923cad8807eed54cba/pyntcontrib/__init__.py#L11-L25
def safe_cd(path): """ Changes to a directory, yields, and changes back. Additionally any error will also change the directory back. Usage: >>> with safe_cd('some/repo'): ... call('git status') """ starting_directory = os.getcwd() try: os.chdir(path) yield finally: os.chdir(starting_directory)
[ "def", "safe_cd", "(", "path", ")", ":", "starting_directory", "=", "os", ".", "getcwd", "(", ")", "try", ":", "os", ".", "chdir", "(", "path", ")", "yield", "finally", ":", "os", ".", "chdir", "(", "starting_directory", ")" ]
Changes to a directory, yields, and changes back. Additionally any error will also change the directory back. Usage: >>> with safe_cd('some/repo'): ... call('git status')
[ "Changes", "to", "a", "directory", "yields", "and", "changes", "back", ".", "Additionally", "any", "error", "will", "also", "change", "the", "directory", "back", "." ]
python
train
facetoe/zenpy
zenpy/lib/api.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L1459-L1467
def count(self, view, include=None): """ Return a ViewCount for a view. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param view: View or view id """ return self._get(self._build_url(self.endpoint.count(id=view, include=include)))
[ "def", "count", "(", "self", ",", "view", ",", "include", "=", "None", ")", ":", "return", "self", ".", "_get", "(", "self", ".", "_build_url", "(", "self", ".", "endpoint", ".", "count", "(", "id", "=", "view", ",", "include", "=", "include", ")", ")", ")" ]
Return a ViewCount for a view. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param view: View or view id
[ "Return", "a", "ViewCount", "for", "a", "view", "." ]
python
train
RI-imaging/qpsphere
qpsphere/imagefit/__init__.py
https://github.com/RI-imaging/qpsphere/blob/3cfa0e9fb8e81be8c820abbeccd47242e7972ac1/qpsphere/imagefit/__init__.py#L4-L53
def analyze(qpi, model, n0, r0, c0=None, imagekw={}, ret_center=False, ret_pha_offset=False, ret_qpi=False): """Fit refractive index and radius to a phase image of a sphere Parameters ---------- qpi: QPImage Quantitative phase image information model: str Name of the light-scattering model (see :const:`qpsphere.models.available`) n0: float Approximate refractive index of the sphere r0: float Approximate radius of the sphere [m] c0: tuple of (float, float) Approximate center position in ndarray index coordinates [px]; if set to `None` (default), the center of the image is used. imagekw: dict Additional keyword arguments to :func:`qpsphere.imagefit.alg.match_phase`. ret_center: bool Return the center coordinate of the sphere ret_pha_offset: bool If True, return the phase image background offset. ret_qpi: bool If True, return the modeled data as a :class:`qpimage.QPImage`. Returns ------- n: float Computed refractive index r: float Computed radius [m] c: tuple of floats Only returned if `ret_center` is True Center position of the sphere [px] pha_offset: float Only returned if `ret_pha_offset` is True Phase image background offset qpi_sim: qpimage.QPImage Only returned if `ret_qpi` is True Modeled data """ res = match_phase(qpi, model=model, n0=n0, r0=r0, c0=c0, ret_center=ret_center, ret_pha_offset=ret_pha_offset, ret_qpi=ret_qpi, **imagekw) return res
[ "def", "analyze", "(", "qpi", ",", "model", ",", "n0", ",", "r0", ",", "c0", "=", "None", ",", "imagekw", "=", "{", "}", ",", "ret_center", "=", "False", ",", "ret_pha_offset", "=", "False", ",", "ret_qpi", "=", "False", ")", ":", "res", "=", "match_phase", "(", "qpi", ",", "model", "=", "model", ",", "n0", "=", "n0", ",", "r0", "=", "r0", ",", "c0", "=", "c0", ",", "ret_center", "=", "ret_center", ",", "ret_pha_offset", "=", "ret_pha_offset", ",", "ret_qpi", "=", "ret_qpi", ",", "*", "*", "imagekw", ")", "return", "res" ]
Fit refractive index and radius to a phase image of a sphere Parameters ---------- qpi: QPImage Quantitative phase image information model: str Name of the light-scattering model (see :const:`qpsphere.models.available`) n0: float Approximate refractive index of the sphere r0: float Approximate radius of the sphere [m] c0: tuple of (float, float) Approximate center position in ndarray index coordinates [px]; if set to `None` (default), the center of the image is used. imagekw: dict Additional keyword arguments to :func:`qpsphere.imagefit.alg.match_phase`. ret_center: bool Return the center coordinate of the sphere ret_pha_offset: bool If True, return the phase image background offset. ret_qpi: bool If True, return the modeled data as a :class:`qpimage.QPImage`. Returns ------- n: float Computed refractive index r: float Computed radius [m] c: tuple of floats Only returned if `ret_center` is True Center position of the sphere [px] pha_offset: float Only returned if `ret_pha_offset` is True Phase image background offset qpi_sim: qpimage.QPImage Only returned if `ret_qpi` is True Modeled data
[ "Fit", "refractive", "index", "and", "radius", "to", "a", "phase", "image", "of", "a", "sphere" ]
python
train
iotile/coretools
iotilesensorgraph/iotile/sg/node.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/node.py#L59-L73
def format_trigger(self, stream): """Create a user understandable string like count(stream) >= X. Args: stream (DataStream): The stream to use to format ourselves. Returns: str: The formatted string """ src = u'value' if self.use_count: src = u'count' return u"{}({}) {} {}".format(src, stream, self.comp_string, self.reference)
[ "def", "format_trigger", "(", "self", ",", "stream", ")", ":", "src", "=", "u'value'", "if", "self", ".", "use_count", ":", "src", "=", "u'count'", "return", "u\"{}({}) {} {}\"", ".", "format", "(", "src", ",", "stream", ",", "self", ".", "comp_string", ",", "self", ".", "reference", ")" ]
Create a user understandable string like count(stream) >= X. Args: stream (DataStream): The stream to use to format ourselves. Returns: str: The formatted string
[ "Create", "a", "user", "understandable", "string", "like", "count", "(", "stream", ")", ">", "=", "X", "." ]
python
train
nickoala/telepot
telepot/__init__.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/__init__.py#L945-L950
def uploadStickerFile(self, user_id, png_sticker): """ See: https://core.telegram.org/bots/api#uploadstickerfile """ p = _strip(locals(), more=['png_sticker']) return self._api_request_with_file('uploadStickerFile', _rectify(p), 'png_sticker', png_sticker)
[ "def", "uploadStickerFile", "(", "self", ",", "user_id", ",", "png_sticker", ")", ":", "p", "=", "_strip", "(", "locals", "(", ")", ",", "more", "=", "[", "'png_sticker'", "]", ")", "return", "self", ".", "_api_request_with_file", "(", "'uploadStickerFile'", ",", "_rectify", "(", "p", ")", ",", "'png_sticker'", ",", "png_sticker", ")" ]
See: https://core.telegram.org/bots/api#uploadstickerfile
[ "See", ":", "https", ":", "//", "core", ".", "telegram", ".", "org", "/", "bots", "/", "api#uploadstickerfile" ]
python
train
JonathanRaiman/pytreebank
pytreebank/labeled_trees.py
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L92-L101
def lowercase(self): """ Lowercase all strings in this tree. Works recursively and in-place. """ if len(self.children) > 0: for child in self.children: child.lowercase() else: self.text = self.text.lower()
[ "def", "lowercase", "(", "self", ")", ":", "if", "len", "(", "self", ".", "children", ")", ">", "0", ":", "for", "child", "in", "self", ".", "children", ":", "child", ".", "lowercase", "(", ")", "else", ":", "self", ".", "text", "=", "self", ".", "text", ".", "lower", "(", ")" ]
Lowercase all strings in this tree. Works recursively and in-place.
[ "Lowercase", "all", "strings", "in", "this", "tree", ".", "Works", "recursively", "and", "in", "-", "place", "." ]
python
train
weld-project/weld
python/numpy/weldnumpy/weldnumpy.py
https://github.com/weld-project/weld/blob/8ddd6db6b28878bef0892da44b1d2002b564389c/python/numpy/weldnumpy/weldnumpy.py#L55-L63
def get_supported_unary_ops(): ''' Returns a dictionary of the Weld supported unary ops, with values being their Weld symbol. ''' unary_ops = {} unary_ops[np.exp.__name__] = 'exp' unary_ops[np.log.__name__] = 'log' unary_ops[np.sqrt.__name__] = 'sqrt' return unary_ops
[ "def", "get_supported_unary_ops", "(", ")", ":", "unary_ops", "=", "{", "}", "unary_ops", "[", "np", ".", "exp", ".", "__name__", "]", "=", "'exp'", "unary_ops", "[", "np", ".", "log", ".", "__name__", "]", "=", "'log'", "unary_ops", "[", "np", ".", "sqrt", ".", "__name__", "]", "=", "'sqrt'", "return", "unary_ops" ]
Returns a dictionary of the Weld supported unary ops, with values being their Weld symbol.
[ "Returns", "a", "dictionary", "of", "the", "Weld", "supported", "unary", "ops", "with", "values", "being", "their", "Weld", "symbol", "." ]
python
train
optimizely/python-sdk
optimizely/lib/pymmh3.py
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/lib/pymmh3.py#L97-L403
def hash128( key, seed = 0x0, x64arch = True ): ''' Implements 128bit murmur3 hash. ''' def hash128_x64( key, seed ): ''' Implements 128bit murmur3 hash for x64. ''' def fmix( k ): k ^= k >> 33 k = ( k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF k ^= k >> 33 k = ( k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF k ^= k >> 33 return k length = len( key ) nblocks = int( length / 16 ) h1 = seed h2 = seed c1 = 0x87c37b91114253d5 c2 = 0x4cf5ad432745937f #body for block_start in xrange( 0, nblocks * 8, 8 ): # ??? big endian? k1 = key[ 2 * block_start + 7 ] << 56 | \ key[ 2 * block_start + 6 ] << 48 | \ key[ 2 * block_start + 5 ] << 40 | \ key[ 2 * block_start + 4 ] << 32 | \ key[ 2 * block_start + 3 ] << 24 | \ key[ 2 * block_start + 2 ] << 16 | \ key[ 2 * block_start + 1 ] << 8 | \ key[ 2 * block_start + 0 ] k2 = key[ 2 * block_start + 15 ] << 56 | \ key[ 2 * block_start + 14 ] << 48 | \ key[ 2 * block_start + 13 ] << 40 | \ key[ 2 * block_start + 12 ] << 32 | \ key[ 2 * block_start + 11 ] << 24 | \ key[ 2 * block_start + 10 ] << 16 | \ key[ 2 * block_start + 9 ] << 8 | \ key[ 2 * block_start + 8 ] k1 = ( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 = ( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF h1 ^= k1 h1 = ( h1 << 27 | h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h1 = ( h1 * 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF k2 = ( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 = ( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF h2 ^= k2 h2 = ( h2 << 31 | h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h2 = ( h2 * 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF #tail tail_index = nblocks * 16 k1 = 0 k2 = 0 tail_size = length & 15 if tail_size >= 15: k2 ^= key[ tail_index + 14 ] << 48 if tail_size >= 14: k2 ^= key[ tail_index + 13 ] << 40 if tail_size >= 13: k2 ^= key[ tail_index + 12 ] << 32 if tail_size >= 12: k2 ^= key[ tail_index + 11 ] << 24 if tail_size >= 11: k2 ^= key[ tail_index + 10 ] << 16 if tail_size >= 10: k2 ^= key[ tail_index + 9 ] << 8 if tail_size >= 9: k2 ^= key[ tail_index + 8 ] if tail_size > 8: k2 = ( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 = ( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF h2 ^= k2 if tail_size >= 8: k1 ^= key[ tail_index + 7 ] << 56 if tail_size >= 7: k1 ^= key[ tail_index + 6 ] << 48 if tail_size >= 6: k1 ^= key[ tail_index + 5 ] << 40 if tail_size >= 5: k1 ^= key[ tail_index + 4 ] << 32 if tail_size >= 4: k1 ^= key[ tail_index + 3 ] << 24 if tail_size >= 3: k1 ^= key[ tail_index + 2 ] << 16 if tail_size >= 2: k1 ^= key[ tail_index + 1 ] << 8 if tail_size >= 1: k1 ^= key[ tail_index + 0 ] if tail_size > 0: k1 = ( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 = ( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF h1 ^= k1 #finalization h1 ^= length h2 ^= length h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h1 = fmix( h1 ) h2 = fmix( h2 ) h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF return ( h2 << 64 | h1 ) def hash128_x86( key, seed ): ''' Implements 128bit murmur3 hash for x86. ''' def fmix( h ): h ^= h >> 16 h = ( h * 0x85ebca6b ) & 0xFFFFFFFF h ^= h >> 13 h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF h ^= h >> 16 return h length = len( key ) nblocks = int( length / 16 ) h1 = seed h2 = seed h3 = seed h4 = seed c1 = 0x239b961b c2 = 0xab0e9789 c3 = 0x38b34ae5 c4 = 0xa1e38b93 #body for block_start in xrange( 0, nblocks * 16, 16 ): k1 = key[ block_start + 3 ] << 24 | \ key[ block_start + 2 ] << 16 | \ key[ block_start + 1 ] << 8 | \ key[ block_start + 0 ] k2 = key[ block_start + 7 ] << 24 | \ key[ block_start + 6 ] << 16 | \ key[ block_start + 5 ] << 8 | \ key[ block_start + 4 ] k3 = key[ block_start + 11 ] << 24 | \ key[ block_start + 10 ] << 16 | \ key[ block_start + 9 ] << 8 | \ key[ block_start + 8 ] k4 = key[ block_start + 15 ] << 24 | \ key[ block_start + 14 ] << 16 | \ key[ block_start + 13 ] << 8 | \ key[ block_start + 12 ] k1 = ( c1 * k1 ) & 0xFFFFFFFF k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32 k1 = ( c2 * k1 ) & 0xFFFFFFFF h1 ^= k1 h1 = ( h1 << 19 | h1 >> 13 ) & 0xFFFFFFFF # inlined ROTL32 h1 = ( h1 + h2 ) & 0xFFFFFFFF h1 = ( h1 * 5 + 0x561ccd1b ) & 0xFFFFFFFF k2 = ( c2 * k2 ) & 0xFFFFFFFF k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32 k2 = ( c3 * k2 ) & 0xFFFFFFFF h2 ^= k2 h2 = ( h2 << 17 | h2 >> 15 ) & 0xFFFFFFFF # inlined ROTL32 h2 = ( h2 + h3 ) & 0xFFFFFFFF h2 = ( h2 * 5 + 0x0bcaa747 ) & 0xFFFFFFFF k3 = ( c3 * k3 ) & 0xFFFFFFFF k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32 k3 = ( c4 * k3 ) & 0xFFFFFFFF h3 ^= k3 h3 = ( h3 << 15 | h3 >> 17 ) & 0xFFFFFFFF # inlined ROTL32 h3 = ( h3 + h4 ) & 0xFFFFFFFF h3 = ( h3 * 5 + 0x96cd1c35 ) & 0xFFFFFFFF k4 = ( c4 * k4 ) & 0xFFFFFFFF k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32 k4 = ( c1 * k4 ) & 0xFFFFFFFF h4 ^= k4 h4 = ( h4 << 13 | h4 >> 19 ) & 0xFFFFFFFF # inlined ROTL32 h4 = ( h1 + h4 ) & 0xFFFFFFFF h4 = ( h4 * 5 + 0x32ac3b17 ) & 0xFFFFFFFF #tail tail_index = nblocks * 16 k1 = 0 k2 = 0 k3 = 0 k4 = 0 tail_size = length & 15 if tail_size >= 15: k4 ^= key[ tail_index + 14 ] << 16 if tail_size >= 14: k4 ^= key[ tail_index + 13 ] << 8 if tail_size >= 13: k4 ^= key[ tail_index + 12 ] if tail_size > 12: k4 = ( k4 * c4 ) & 0xFFFFFFFF k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32 k4 = ( k4 * c1 ) & 0xFFFFFFFF h4 ^= k4 if tail_size >= 12: k3 ^= key[ tail_index + 11 ] << 24 if tail_size >= 11: k3 ^= key[ tail_index + 10 ] << 16 if tail_size >= 10: k3 ^= key[ tail_index + 9 ] << 8 if tail_size >= 9: k3 ^= key[ tail_index + 8 ] if tail_size > 8: k3 = ( k3 * c3 ) & 0xFFFFFFFF k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32 k3 = ( k3 * c4 ) & 0xFFFFFFFF h3 ^= k3 if tail_size >= 8: k2 ^= key[ tail_index + 7 ] << 24 if tail_size >= 7: k2 ^= key[ tail_index + 6 ] << 16 if tail_size >= 6: k2 ^= key[ tail_index + 5 ] << 8 if tail_size >= 5: k2 ^= key[ tail_index + 4 ] if tail_size > 4: k2 = ( k2 * c2 ) & 0xFFFFFFFF k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32 k2 = ( k2 * c3 ) & 0xFFFFFFFF h2 ^= k2 if tail_size >= 4: k1 ^= key[ tail_index + 3 ] << 24 if tail_size >= 3: k1 ^= key[ tail_index + 2 ] << 16 if tail_size >= 2: k1 ^= key[ tail_index + 1 ] << 8 if tail_size >= 1: k1 ^= key[ tail_index + 0 ] if tail_size > 0: k1 = ( k1 * c1 ) & 0xFFFFFFFF k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32 k1 = ( k1 * c2 ) & 0xFFFFFFFF h1 ^= k1 #finalization h1 ^= length h2 ^= length h3 ^= length h4 ^= length h1 = ( h1 + h2 ) & 0xFFFFFFFF h1 = ( h1 + h3 ) & 0xFFFFFFFF h1 = ( h1 + h4 ) & 0xFFFFFFFF h2 = ( h1 + h2 ) & 0xFFFFFFFF h3 = ( h1 + h3 ) & 0xFFFFFFFF h4 = ( h1 + h4 ) & 0xFFFFFFFF h1 = fmix( h1 ) h2 = fmix( h2 ) h3 = fmix( h3 ) h4 = fmix( h4 ) h1 = ( h1 + h2 ) & 0xFFFFFFFF h1 = ( h1 + h3 ) & 0xFFFFFFFF h1 = ( h1 + h4 ) & 0xFFFFFFFF h2 = ( h1 + h2 ) & 0xFFFFFFFF h3 = ( h1 + h3 ) & 0xFFFFFFFF h4 = ( h1 + h4 ) & 0xFFFFFFFF return ( h4 << 96 | h3 << 64 | h2 << 32 | h1 ) key = bytearray( xencode(key) ) if x64arch: return hash128_x64( key, seed ) else: return hash128_x86( key, seed )
[ "def", "hash128", "(", "key", ",", "seed", "=", "0x0", ",", "x64arch", "=", "True", ")", ":", "def", "hash128_x64", "(", "key", ",", "seed", ")", ":", "''' Implements 128bit murmur3 hash for x64. '''", "def", "fmix", "(", "k", ")", ":", "k", "^=", "k", ">>", "33", "k", "=", "(", "k", "*", "0xff51afd7ed558ccd", ")", "&", "0xFFFFFFFFFFFFFFFF", "k", "^=", "k", ">>", "33", "k", "=", "(", "k", "*", "0xc4ceb9fe1a85ec53", ")", "&", "0xFFFFFFFFFFFFFFFF", "k", "^=", "k", ">>", "33", "return", "k", "length", "=", "len", "(", "key", ")", "nblocks", "=", "int", "(", "length", "/", "16", ")", "h1", "=", "seed", "h2", "=", "seed", "c1", "=", "0x87c37b91114253d5", "c2", "=", "0x4cf5ad432745937f", "#body", "for", "block_start", "in", "xrange", "(", "0", ",", "nblocks", "*", "8", ",", "8", ")", ":", "# ??? big endian?", "k1", "=", "key", "[", "2", "*", "block_start", "+", "7", "]", "<<", "56", "|", "key", "[", "2", "*", "block_start", "+", "6", "]", "<<", "48", "|", "key", "[", "2", "*", "block_start", "+", "5", "]", "<<", "40", "|", "key", "[", "2", "*", "block_start", "+", "4", "]", "<<", "32", "|", "key", "[", "2", "*", "block_start", "+", "3", "]", "<<", "24", "|", "key", "[", "2", "*", "block_start", "+", "2", "]", "<<", "16", "|", "key", "[", "2", "*", "block_start", "+", "1", "]", "<<", "8", "|", "key", "[", "2", "*", "block_start", "+", "0", "]", "k2", "=", "key", "[", "2", "*", "block_start", "+", "15", "]", "<<", "56", "|", "key", "[", "2", "*", "block_start", "+", "14", "]", "<<", "48", "|", "key", "[", "2", "*", "block_start", "+", "13", "]", "<<", "40", "|", "key", "[", "2", "*", "block_start", "+", "12", "]", "<<", "32", "|", "key", "[", "2", "*", "block_start", "+", "11", "]", "<<", "24", "|", "key", "[", "2", "*", "block_start", "+", "10", "]", "<<", "16", "|", "key", "[", "2", "*", "block_start", "+", "9", "]", "<<", "8", "|", "key", "[", "2", "*", "block_start", "+", "8", "]", "k1", "=", "(", "c1", "*", "k1", ")", "&", "0xFFFFFFFFFFFFFFFF", "k1", "=", "(", "k1", "<<", "31", "|", "k1", ">>", "33", ")", "&", "0xFFFFFFFFFFFFFFFF", "# inlined ROTL64", "k1", "=", "(", "c2", "*", "k1", ")", "&", "0xFFFFFFFFFFFFFFFF", "h1", "^=", "k1", "h1", "=", "(", "h1", "<<", "27", "|", "h1", ">>", "37", ")", "&", "0xFFFFFFFFFFFFFFFF", "# inlined ROTL64", "h1", "=", "(", "h1", "+", "h2", ")", "&", "0xFFFFFFFFFFFFFFFF", "h1", "=", "(", "h1", "*", "5", "+", "0x52dce729", ")", "&", "0xFFFFFFFFFFFFFFFF", "k2", "=", "(", "c2", "*", "k2", ")", "&", "0xFFFFFFFFFFFFFFFF", "k2", "=", "(", "k2", "<<", "33", "|", "k2", ">>", "31", ")", "&", "0xFFFFFFFFFFFFFFFF", "# inlined ROTL64", "k2", "=", "(", "c1", "*", "k2", ")", "&", "0xFFFFFFFFFFFFFFFF", "h2", "^=", "k2", "h2", "=", "(", "h2", "<<", "31", "|", "h2", ">>", "33", ")", "&", "0xFFFFFFFFFFFFFFFF", "# inlined ROTL64", "h2", "=", "(", "h1", "+", "h2", ")", "&", "0xFFFFFFFFFFFFFFFF", "h2", "=", "(", "h2", "*", "5", "+", "0x38495ab5", ")", "&", "0xFFFFFFFFFFFFFFFF", "#tail", "tail_index", "=", "nblocks", "*", "16", "k1", "=", "0", "k2", "=", "0", "tail_size", "=", "length", "&", "15", "if", "tail_size", ">=", "15", ":", "k2", "^=", "key", "[", "tail_index", "+", "14", "]", "<<", "48", "if", "tail_size", ">=", "14", ":", "k2", "^=", "key", "[", "tail_index", "+", "13", "]", "<<", "40", "if", "tail_size", ">=", "13", ":", "k2", "^=", "key", "[", "tail_index", "+", "12", "]", "<<", "32", "if", "tail_size", ">=", "12", ":", "k2", "^=", "key", "[", "tail_index", "+", "11", "]", "<<", "24", "if", "tail_size", ">=", "11", ":", "k2", "^=", "key", "[", "tail_index", "+", "10", "]", "<<", "16", "if", "tail_size", ">=", "10", ":", "k2", "^=", "key", "[", "tail_index", "+", "9", "]", "<<", "8", "if", "tail_size", ">=", "9", ":", "k2", "^=", "key", "[", "tail_index", "+", "8", "]", "if", "tail_size", ">", "8", ":", "k2", "=", "(", "k2", "*", "c2", ")", "&", "0xFFFFFFFFFFFFFFFF", "k2", "=", "(", "k2", "<<", "33", "|", "k2", ">>", "31", ")", "&", "0xFFFFFFFFFFFFFFFF", "# inlined ROTL64", "k2", "=", "(", "k2", "*", "c1", ")", "&", "0xFFFFFFFFFFFFFFFF", "h2", "^=", "k2", "if", "tail_size", ">=", "8", ":", "k1", "^=", "key", "[", "tail_index", "+", "7", "]", "<<", "56", "if", "tail_size", ">=", "7", ":", "k1", "^=", "key", "[", "tail_index", "+", "6", "]", "<<", "48", "if", "tail_size", ">=", "6", ":", "k1", "^=", "key", "[", "tail_index", "+", "5", "]", "<<", "40", "if", "tail_size", ">=", "5", ":", "k1", "^=", "key", "[", "tail_index", "+", "4", "]", "<<", "32", "if", "tail_size", ">=", "4", ":", "k1", "^=", "key", "[", "tail_index", "+", "3", "]", "<<", "24", "if", "tail_size", ">=", "3", ":", "k1", "^=", "key", "[", "tail_index", "+", "2", "]", "<<", "16", "if", "tail_size", ">=", "2", ":", "k1", "^=", "key", "[", "tail_index", "+", "1", "]", "<<", "8", "if", "tail_size", ">=", "1", ":", "k1", "^=", "key", "[", "tail_index", "+", "0", "]", "if", "tail_size", ">", "0", ":", "k1", "=", "(", "k1", "*", "c1", ")", "&", "0xFFFFFFFFFFFFFFFF", "k1", "=", "(", "k1", "<<", "31", "|", "k1", ">>", "33", ")", "&", "0xFFFFFFFFFFFFFFFF", "# inlined ROTL64", "k1", "=", "(", "k1", "*", "c2", ")", "&", "0xFFFFFFFFFFFFFFFF", "h1", "^=", "k1", "#finalization", "h1", "^=", "length", "h2", "^=", "length", "h1", "=", "(", "h1", "+", "h2", ")", "&", "0xFFFFFFFFFFFFFFFF", "h2", "=", "(", "h1", "+", "h2", ")", "&", "0xFFFFFFFFFFFFFFFF", "h1", "=", "fmix", "(", "h1", ")", "h2", "=", "fmix", "(", "h2", ")", "h1", "=", "(", "h1", "+", "h2", ")", "&", "0xFFFFFFFFFFFFFFFF", "h2", "=", "(", "h1", "+", "h2", ")", "&", "0xFFFFFFFFFFFFFFFF", "return", "(", "h2", "<<", "64", "|", "h1", ")", "def", "hash128_x86", "(", "key", ",", "seed", ")", ":", "''' Implements 128bit murmur3 hash for x86. '''", "def", "fmix", "(", "h", ")", ":", "h", "^=", "h", ">>", "16", "h", "=", "(", "h", "*", "0x85ebca6b", ")", "&", "0xFFFFFFFF", "h", "^=", "h", ">>", "13", "h", "=", "(", "h", "*", "0xc2b2ae35", ")", "&", "0xFFFFFFFF", "h", "^=", "h", ">>", "16", "return", "h", "length", "=", "len", "(", "key", ")", "nblocks", "=", "int", "(", "length", "/", "16", ")", "h1", "=", "seed", "h2", "=", "seed", "h3", "=", "seed", "h4", "=", "seed", "c1", "=", "0x239b961b", "c2", "=", "0xab0e9789", "c3", "=", "0x38b34ae5", "c4", "=", "0xa1e38b93", "#body", "for", "block_start", "in", "xrange", "(", "0", ",", "nblocks", "*", "16", ",", "16", ")", ":", "k1", "=", "key", "[", "block_start", "+", "3", "]", "<<", "24", "|", "key", "[", "block_start", "+", "2", "]", "<<", "16", "|", "key", "[", "block_start", "+", "1", "]", "<<", "8", "|", "key", "[", "block_start", "+", "0", "]", "k2", "=", "key", "[", "block_start", "+", "7", "]", "<<", "24", "|", "key", "[", "block_start", "+", "6", "]", "<<", "16", "|", "key", "[", "block_start", "+", "5", "]", "<<", "8", "|", "key", "[", "block_start", "+", "4", "]", "k3", "=", "key", "[", "block_start", "+", "11", "]", "<<", "24", "|", "key", "[", "block_start", "+", "10", "]", "<<", "16", "|", "key", "[", "block_start", "+", "9", "]", "<<", "8", "|", "key", "[", "block_start", "+", "8", "]", "k4", "=", "key", "[", "block_start", "+", "15", "]", "<<", "24", "|", "key", "[", "block_start", "+", "14", "]", "<<", "16", "|", "key", "[", "block_start", "+", "13", "]", "<<", "8", "|", "key", "[", "block_start", "+", "12", "]", "k1", "=", "(", "c1", "*", "k1", ")", "&", "0xFFFFFFFF", "k1", "=", "(", "k1", "<<", "15", "|", "k1", ">>", "17", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "k1", "=", "(", "c2", "*", "k1", ")", "&", "0xFFFFFFFF", "h1", "^=", "k1", "h1", "=", "(", "h1", "<<", "19", "|", "h1", ">>", "13", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "h1", "=", "(", "h1", "+", "h2", ")", "&", "0xFFFFFFFF", "h1", "=", "(", "h1", "*", "5", "+", "0x561ccd1b", ")", "&", "0xFFFFFFFF", "k2", "=", "(", "c2", "*", "k2", ")", "&", "0xFFFFFFFF", "k2", "=", "(", "k2", "<<", "16", "|", "k2", ">>", "16", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "k2", "=", "(", "c3", "*", "k2", ")", "&", "0xFFFFFFFF", "h2", "^=", "k2", "h2", "=", "(", "h2", "<<", "17", "|", "h2", ">>", "15", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "h2", "=", "(", "h2", "+", "h3", ")", "&", "0xFFFFFFFF", "h2", "=", "(", "h2", "*", "5", "+", "0x0bcaa747", ")", "&", "0xFFFFFFFF", "k3", "=", "(", "c3", "*", "k3", ")", "&", "0xFFFFFFFF", "k3", "=", "(", "k3", "<<", "17", "|", "k3", ">>", "15", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "k3", "=", "(", "c4", "*", "k3", ")", "&", "0xFFFFFFFF", "h3", "^=", "k3", "h3", "=", "(", "h3", "<<", "15", "|", "h3", ">>", "17", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "h3", "=", "(", "h3", "+", "h4", ")", "&", "0xFFFFFFFF", "h3", "=", "(", "h3", "*", "5", "+", "0x96cd1c35", ")", "&", "0xFFFFFFFF", "k4", "=", "(", "c4", "*", "k4", ")", "&", "0xFFFFFFFF", "k4", "=", "(", "k4", "<<", "18", "|", "k4", ">>", "14", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "k4", "=", "(", "c1", "*", "k4", ")", "&", "0xFFFFFFFF", "h4", "^=", "k4", "h4", "=", "(", "h4", "<<", "13", "|", "h4", ">>", "19", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "h4", "=", "(", "h1", "+", "h4", ")", "&", "0xFFFFFFFF", "h4", "=", "(", "h4", "*", "5", "+", "0x32ac3b17", ")", "&", "0xFFFFFFFF", "#tail", "tail_index", "=", "nblocks", "*", "16", "k1", "=", "0", "k2", "=", "0", "k3", "=", "0", "k4", "=", "0", "tail_size", "=", "length", "&", "15", "if", "tail_size", ">=", "15", ":", "k4", "^=", "key", "[", "tail_index", "+", "14", "]", "<<", "16", "if", "tail_size", ">=", "14", ":", "k4", "^=", "key", "[", "tail_index", "+", "13", "]", "<<", "8", "if", "tail_size", ">=", "13", ":", "k4", "^=", "key", "[", "tail_index", "+", "12", "]", "if", "tail_size", ">", "12", ":", "k4", "=", "(", "k4", "*", "c4", ")", "&", "0xFFFFFFFF", "k4", "=", "(", "k4", "<<", "18", "|", "k4", ">>", "14", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "k4", "=", "(", "k4", "*", "c1", ")", "&", "0xFFFFFFFF", "h4", "^=", "k4", "if", "tail_size", ">=", "12", ":", "k3", "^=", "key", "[", "tail_index", "+", "11", "]", "<<", "24", "if", "tail_size", ">=", "11", ":", "k3", "^=", "key", "[", "tail_index", "+", "10", "]", "<<", "16", "if", "tail_size", ">=", "10", ":", "k3", "^=", "key", "[", "tail_index", "+", "9", "]", "<<", "8", "if", "tail_size", ">=", "9", ":", "k3", "^=", "key", "[", "tail_index", "+", "8", "]", "if", "tail_size", ">", "8", ":", "k3", "=", "(", "k3", "*", "c3", ")", "&", "0xFFFFFFFF", "k3", "=", "(", "k3", "<<", "17", "|", "k3", ">>", "15", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "k3", "=", "(", "k3", "*", "c4", ")", "&", "0xFFFFFFFF", "h3", "^=", "k3", "if", "tail_size", ">=", "8", ":", "k2", "^=", "key", "[", "tail_index", "+", "7", "]", "<<", "24", "if", "tail_size", ">=", "7", ":", "k2", "^=", "key", "[", "tail_index", "+", "6", "]", "<<", "16", "if", "tail_size", ">=", "6", ":", "k2", "^=", "key", "[", "tail_index", "+", "5", "]", "<<", "8", "if", "tail_size", ">=", "5", ":", "k2", "^=", "key", "[", "tail_index", "+", "4", "]", "if", "tail_size", ">", "4", ":", "k2", "=", "(", "k2", "*", "c2", ")", "&", "0xFFFFFFFF", "k2", "=", "(", "k2", "<<", "16", "|", "k2", ">>", "16", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "k2", "=", "(", "k2", "*", "c3", ")", "&", "0xFFFFFFFF", "h2", "^=", "k2", "if", "tail_size", ">=", "4", ":", "k1", "^=", "key", "[", "tail_index", "+", "3", "]", "<<", "24", "if", "tail_size", ">=", "3", ":", "k1", "^=", "key", "[", "tail_index", "+", "2", "]", "<<", "16", "if", "tail_size", ">=", "2", ":", "k1", "^=", "key", "[", "tail_index", "+", "1", "]", "<<", "8", "if", "tail_size", ">=", "1", ":", "k1", "^=", "key", "[", "tail_index", "+", "0", "]", "if", "tail_size", ">", "0", ":", "k1", "=", "(", "k1", "*", "c1", ")", "&", "0xFFFFFFFF", "k1", "=", "(", "k1", "<<", "15", "|", "k1", ">>", "17", ")", "&", "0xFFFFFFFF", "# inlined ROTL32", "k1", "=", "(", "k1", "*", "c2", ")", "&", "0xFFFFFFFF", "h1", "^=", "k1", "#finalization", "h1", "^=", "length", "h2", "^=", "length", "h3", "^=", "length", "h4", "^=", "length", "h1", "=", "(", "h1", "+", "h2", ")", "&", "0xFFFFFFFF", "h1", "=", "(", "h1", "+", "h3", ")", "&", "0xFFFFFFFF", "h1", "=", "(", "h1", "+", "h4", ")", "&", "0xFFFFFFFF", "h2", "=", "(", "h1", "+", "h2", ")", "&", "0xFFFFFFFF", "h3", "=", "(", "h1", "+", "h3", ")", "&", "0xFFFFFFFF", "h4", "=", "(", "h1", "+", "h4", ")", "&", "0xFFFFFFFF", "h1", "=", "fmix", "(", "h1", ")", "h2", "=", "fmix", "(", "h2", ")", "h3", "=", "fmix", "(", "h3", ")", "h4", "=", "fmix", "(", "h4", ")", "h1", "=", "(", "h1", "+", "h2", ")", "&", "0xFFFFFFFF", "h1", "=", "(", "h1", "+", "h3", ")", "&", "0xFFFFFFFF", "h1", "=", "(", "h1", "+", "h4", ")", "&", "0xFFFFFFFF", "h2", "=", "(", "h1", "+", "h2", ")", "&", "0xFFFFFFFF", "h3", "=", "(", "h1", "+", "h3", ")", "&", "0xFFFFFFFF", "h4", "=", "(", "h1", "+", "h4", ")", "&", "0xFFFFFFFF", "return", "(", "h4", "<<", "96", "|", "h3", "<<", "64", "|", "h2", "<<", "32", "|", "h1", ")", "key", "=", "bytearray", "(", "xencode", "(", "key", ")", ")", "if", "x64arch", ":", "return", "hash128_x64", "(", "key", ",", "seed", ")", "else", ":", "return", "hash128_x86", "(", "key", ",", "seed", ")" ]
Implements 128bit murmur3 hash.
[ "Implements", "128bit", "murmur3", "hash", "." ]
python
train
andrewsnowden/dota2py
dota2py/summary.py
https://github.com/andrewsnowden/dota2py/blob/67637f4b9c160ea90c11b7e81545baf350affa7a/dota2py/summary.py#L268-L273
def parse_say_text(self, event): """ All chat """ if event.chat and event.format == "DOTA_Chat_All": self.chatlog.append((event.prefix, event.text))
[ "def", "parse_say_text", "(", "self", ",", "event", ")", ":", "if", "event", ".", "chat", "and", "event", ".", "format", "==", "\"DOTA_Chat_All\"", ":", "self", ".", "chatlog", ".", "append", "(", "(", "event", ".", "prefix", ",", "event", ".", "text", ")", ")" ]
All chat
[ "All", "chat" ]
python
train
nerdvegas/rez
src/rez/resolver.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/resolver.py#L158-L302
def _get_cached_solve(self): """Find a memcached resolve. If there is NOT a resolve timestamp: - fetch a non-timestamped memcache entry; - if no entry, then fail; - if packages have changed, then: - delete the entry; - fail; - if no packages in the entry have been released since, then - use the entry and return; - else: - delete the entry; - fail. If there IS a resolve timestamp (let us call this T): - fetch a non-timestamped memcache entry; - if entry then: - if no packages have changed, then: - if no packages in the entry have been released since: - if no packages in the entry were released after T, then - use the entry and return; - else: - delete the entry; - else: - delete the entry; - fetch a timestamped (T) memcache entry; - if no entry, then fail; - if packages have changed, then: - delete the entry; - fail; - else: - use the entry. This behaviour exists specifically so that resolves that use a timestamp but set that to the current time, can be reused by other resolves if nothing has changed. Older resolves however, can only be reused if the timestamp matches exactly (but this might happen a lot - consider a workflow where a work area is tied down to a particular timestamp in order to 'lock' it from any further software releases). """ if not (self.caching and self.memcached_servers): return None # these caches avoids some potentially repeated file stats variant_states = {} last_release_times = {} def _hit(data): solver_dict, _, _ = data return solver_dict def _miss(): self._print("No cache key retrieved") return None def _delete_cache_entry(key): with self._memcached_client() as client: client.delete(key) self._print("Discarded entry: %r", key) def _retrieve(timestamped): key = self._memcache_key(timestamped=timestamped) self._print("Retrieving memcache key: %r", key) with self._memcached_client() as client: data = client.get(key) return key, data def _packages_changed(key, data): solver_dict, _, variant_states_dict = data for variant_handle in solver_dict.get("variant_handles", []): variant = self._get_variant(variant_handle) old_state = variant_states_dict.get(variant.name) new_state = variant_states.get(variant) if new_state is None: try: repo = variant.resource._repository new_state = repo.get_variant_state_handle(variant.resource) except (IOError, OSError) as e: # if, ie a package file was deleted on disk, then # an IOError or OSError will be raised when we try to # read from it - assume that the packages have changed! self._print("Error loading %r (assuming cached state " "changed): %s", variant.qualified_name, e) return True variant_states[variant] = new_state if old_state != new_state: self._print("%r has been modified", variant.qualified_name) return True return False def _releases_since_solve(key, data): _, release_times_dict, _ = data for package_name, release_time in release_times_dict.iteritems(): time_ = last_release_times.get(package_name) if time_ is None: time_ = get_last_release_time(package_name, self.package_paths) last_release_times[package_name] = time_ if time_ != release_time: self._print( "A newer version of %r (%d) has been released since the " "resolve was cached (latest release in cache was %d) " "(entry: %r)", package_name, time_, release_time, key) return True return False def _timestamp_is_earlier(key, data): _, release_times_dict, _ = data for package_name, release_time in release_times_dict.iteritems(): if self.timestamp < release_time: self._print("Resolve timestamp (%d) is earlier than %r in " "solve (%d) (entry: %r)", self.timestamp, package_name, release_time, key) return True return False key, data = _retrieve(False) if self.timestamp: if data: if _packages_changed(key, data) or _releases_since_solve(key, data): _delete_cache_entry(key) elif not _timestamp_is_earlier(key, data): return _hit(data) key, data = _retrieve(True) if not data: return _miss() if _packages_changed(key, data): _delete_cache_entry(key) return _miss() else: return _hit(data) else: if not data: return _miss() if _packages_changed(key, data) or _releases_since_solve(key, data): _delete_cache_entry(key) return _miss() else: return _hit(data)
[ "def", "_get_cached_solve", "(", "self", ")", ":", "if", "not", "(", "self", ".", "caching", "and", "self", ".", "memcached_servers", ")", ":", "return", "None", "# these caches avoids some potentially repeated file stats", "variant_states", "=", "{", "}", "last_release_times", "=", "{", "}", "def", "_hit", "(", "data", ")", ":", "solver_dict", ",", "_", ",", "_", "=", "data", "return", "solver_dict", "def", "_miss", "(", ")", ":", "self", ".", "_print", "(", "\"No cache key retrieved\"", ")", "return", "None", "def", "_delete_cache_entry", "(", "key", ")", ":", "with", "self", ".", "_memcached_client", "(", ")", "as", "client", ":", "client", ".", "delete", "(", "key", ")", "self", ".", "_print", "(", "\"Discarded entry: %r\"", ",", "key", ")", "def", "_retrieve", "(", "timestamped", ")", ":", "key", "=", "self", ".", "_memcache_key", "(", "timestamped", "=", "timestamped", ")", "self", ".", "_print", "(", "\"Retrieving memcache key: %r\"", ",", "key", ")", "with", "self", ".", "_memcached_client", "(", ")", "as", "client", ":", "data", "=", "client", ".", "get", "(", "key", ")", "return", "key", ",", "data", "def", "_packages_changed", "(", "key", ",", "data", ")", ":", "solver_dict", ",", "_", ",", "variant_states_dict", "=", "data", "for", "variant_handle", "in", "solver_dict", ".", "get", "(", "\"variant_handles\"", ",", "[", "]", ")", ":", "variant", "=", "self", ".", "_get_variant", "(", "variant_handle", ")", "old_state", "=", "variant_states_dict", ".", "get", "(", "variant", ".", "name", ")", "new_state", "=", "variant_states", ".", "get", "(", "variant", ")", "if", "new_state", "is", "None", ":", "try", ":", "repo", "=", "variant", ".", "resource", ".", "_repository", "new_state", "=", "repo", ".", "get_variant_state_handle", "(", "variant", ".", "resource", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "e", ":", "# if, ie a package file was deleted on disk, then", "# an IOError or OSError will be raised when we try to", "# read from it - assume that the packages have changed!", "self", ".", "_print", "(", "\"Error loading %r (assuming cached state \"", "\"changed): %s\"", ",", "variant", ".", "qualified_name", ",", "e", ")", "return", "True", "variant_states", "[", "variant", "]", "=", "new_state", "if", "old_state", "!=", "new_state", ":", "self", ".", "_print", "(", "\"%r has been modified\"", ",", "variant", ".", "qualified_name", ")", "return", "True", "return", "False", "def", "_releases_since_solve", "(", "key", ",", "data", ")", ":", "_", ",", "release_times_dict", ",", "_", "=", "data", "for", "package_name", ",", "release_time", "in", "release_times_dict", ".", "iteritems", "(", ")", ":", "time_", "=", "last_release_times", ".", "get", "(", "package_name", ")", "if", "time_", "is", "None", ":", "time_", "=", "get_last_release_time", "(", "package_name", ",", "self", ".", "package_paths", ")", "last_release_times", "[", "package_name", "]", "=", "time_", "if", "time_", "!=", "release_time", ":", "self", ".", "_print", "(", "\"A newer version of %r (%d) has been released since the \"", "\"resolve was cached (latest release in cache was %d) \"", "\"(entry: %r)\"", ",", "package_name", ",", "time_", ",", "release_time", ",", "key", ")", "return", "True", "return", "False", "def", "_timestamp_is_earlier", "(", "key", ",", "data", ")", ":", "_", ",", "release_times_dict", ",", "_", "=", "data", "for", "package_name", ",", "release_time", "in", "release_times_dict", ".", "iteritems", "(", ")", ":", "if", "self", ".", "timestamp", "<", "release_time", ":", "self", ".", "_print", "(", "\"Resolve timestamp (%d) is earlier than %r in \"", "\"solve (%d) (entry: %r)\"", ",", "self", ".", "timestamp", ",", "package_name", ",", "release_time", ",", "key", ")", "return", "True", "return", "False", "key", ",", "data", "=", "_retrieve", "(", "False", ")", "if", "self", ".", "timestamp", ":", "if", "data", ":", "if", "_packages_changed", "(", "key", ",", "data", ")", "or", "_releases_since_solve", "(", "key", ",", "data", ")", ":", "_delete_cache_entry", "(", "key", ")", "elif", "not", "_timestamp_is_earlier", "(", "key", ",", "data", ")", ":", "return", "_hit", "(", "data", ")", "key", ",", "data", "=", "_retrieve", "(", "True", ")", "if", "not", "data", ":", "return", "_miss", "(", ")", "if", "_packages_changed", "(", "key", ",", "data", ")", ":", "_delete_cache_entry", "(", "key", ")", "return", "_miss", "(", ")", "else", ":", "return", "_hit", "(", "data", ")", "else", ":", "if", "not", "data", ":", "return", "_miss", "(", ")", "if", "_packages_changed", "(", "key", ",", "data", ")", "or", "_releases_since_solve", "(", "key", ",", "data", ")", ":", "_delete_cache_entry", "(", "key", ")", "return", "_miss", "(", ")", "else", ":", "return", "_hit", "(", "data", ")" ]
Find a memcached resolve. If there is NOT a resolve timestamp: - fetch a non-timestamped memcache entry; - if no entry, then fail; - if packages have changed, then: - delete the entry; - fail; - if no packages in the entry have been released since, then - use the entry and return; - else: - delete the entry; - fail. If there IS a resolve timestamp (let us call this T): - fetch a non-timestamped memcache entry; - if entry then: - if no packages have changed, then: - if no packages in the entry have been released since: - if no packages in the entry were released after T, then - use the entry and return; - else: - delete the entry; - else: - delete the entry; - fetch a timestamped (T) memcache entry; - if no entry, then fail; - if packages have changed, then: - delete the entry; - fail; - else: - use the entry. This behaviour exists specifically so that resolves that use a timestamp but set that to the current time, can be reused by other resolves if nothing has changed. Older resolves however, can only be reused if the timestamp matches exactly (but this might happen a lot - consider a workflow where a work area is tied down to a particular timestamp in order to 'lock' it from any further software releases).
[ "Find", "a", "memcached", "resolve", "." ]
python
train