repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
wonambi-python/wonambi
wonambi/detect/spindle.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/detect/spindle.py#L2130-L2158
def _realwavelets(s_freq, freqs, dur, width): """Create real wavelets, for UCSD. Parameters ---------- s_freq : int sampling frequency freqs : ndarray vector with frequencies of interest dur : float duration of the wavelets in s width : float parameter controlling gaussian shape Returns ------- ndarray wavelets """ x = arange(-dur / 2, dur / 2, 1 / s_freq) wavelets = empty((len(freqs), len(x))) g = exp(-(pi * x ** 2) / width ** 2) for i, one_freq in enumerate(freqs): y = cos(2 * pi * x * one_freq) wavelets[i, :] = y * g return wavelets
[ "def", "_realwavelets", "(", "s_freq", ",", "freqs", ",", "dur", ",", "width", ")", ":", "x", "=", "arange", "(", "-", "dur", "/", "2", ",", "dur", "/", "2", ",", "1", "/", "s_freq", ")", "wavelets", "=", "empty", "(", "(", "len", "(", "freqs", ")", ",", "len", "(", "x", ")", ")", ")", "g", "=", "exp", "(", "-", "(", "pi", "*", "x", "**", "2", ")", "/", "width", "**", "2", ")", "for", "i", ",", "one_freq", "in", "enumerate", "(", "freqs", ")", ":", "y", "=", "cos", "(", "2", "*", "pi", "*", "x", "*", "one_freq", ")", "wavelets", "[", "i", ",", ":", "]", "=", "y", "*", "g", "return", "wavelets" ]
Create real wavelets, for UCSD. Parameters ---------- s_freq : int sampling frequency freqs : ndarray vector with frequencies of interest dur : float duration of the wavelets in s width : float parameter controlling gaussian shape Returns ------- ndarray wavelets
[ "Create", "real", "wavelets", "for", "UCSD", "." ]
python
train
ashleysommer/sanicpluginsframework
spf/plugin.py
https://github.com/ashleysommer/sanicpluginsframework/blob/2cb1656d9334f04c30c738074784b0450c1b893e/spf/plugin.py#L154-L174
def static(self, uri, file_or_directory, *args, **kwargs): """Create a websocket route from a decorated function :param uri: endpoint at which the socket endpoint will be accessible. :type uri: str :param args: captures all of the positional arguments passed in :type args: tuple(Any) :param kwargs: captures the keyword arguments passed in :type kwargs: dict(Any) :return: The exception function to use as the decorator :rtype: fn """ kwargs.setdefault('pattern', r'/?.+') kwargs.setdefault('use_modified_since', True) kwargs.setdefault('use_content_range', False) kwargs.setdefault('stream_large_files', False) kwargs.setdefault('name', 'static') kwargs.setdefault('host', None) kwargs.setdefault('strict_slashes', None) self._static.append(FutureStatic(uri, file_or_directory, args, kwargs))
[ "def", "static", "(", "self", ",", "uri", ",", "file_or_directory", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'pattern'", ",", "r'/?.+'", ")", "kwargs", ".", "setdefault", "(", "'use_modified_since'", ",", "True", ")", "kwargs", ".", "setdefault", "(", "'use_content_range'", ",", "False", ")", "kwargs", ".", "setdefault", "(", "'stream_large_files'", ",", "False", ")", "kwargs", ".", "setdefault", "(", "'name'", ",", "'static'", ")", "kwargs", ".", "setdefault", "(", "'host'", ",", "None", ")", "kwargs", ".", "setdefault", "(", "'strict_slashes'", ",", "None", ")", "self", ".", "_static", ".", "append", "(", "FutureStatic", "(", "uri", ",", "file_or_directory", ",", "args", ",", "kwargs", ")", ")" ]
Create a websocket route from a decorated function :param uri: endpoint at which the socket endpoint will be accessible. :type uri: str :param args: captures all of the positional arguments passed in :type args: tuple(Any) :param kwargs: captures the keyword arguments passed in :type kwargs: dict(Any) :return: The exception function to use as the decorator :rtype: fn
[ "Create", "a", "websocket", "route", "from", "a", "decorated", "function", ":", "param", "uri", ":", "endpoint", "at", "which", "the", "socket", "endpoint", "will", "be", "accessible", ".", ":", "type", "uri", ":", "str", ":", "param", "args", ":", "captures", "all", "of", "the", "positional", "arguments", "passed", "in", ":", "type", "args", ":", "tuple", "(", "Any", ")", ":", "param", "kwargs", ":", "captures", "the", "keyword", "arguments", "passed", "in", ":", "type", "kwargs", ":", "dict", "(", "Any", ")", ":", "return", ":", "The", "exception", "function", "to", "use", "as", "the", "decorator", ":", "rtype", ":", "fn" ]
python
train
idlesign/torrentool
torrentool/torrent.py
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/torrent.py#L428-L436
def from_file(cls, filepath): """Alternative constructor to get Torrent object from file. :param str filepath: :rtype: Torrent """ torrent = cls(Bencode.read_file(filepath)) torrent._filepath = filepath return torrent
[ "def", "from_file", "(", "cls", ",", "filepath", ")", ":", "torrent", "=", "cls", "(", "Bencode", ".", "read_file", "(", "filepath", ")", ")", "torrent", ".", "_filepath", "=", "filepath", "return", "torrent" ]
Alternative constructor to get Torrent object from file. :param str filepath: :rtype: Torrent
[ "Alternative", "constructor", "to", "get", "Torrent", "object", "from", "file", "." ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/shortcuts.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/shortcuts.py#L514-L548
def prompt(message='', **kwargs): """ Get input from the user and return it. This is a wrapper around a lot of ``prompt_toolkit`` functionality and can be a replacement for `raw_input`. (or GNU readline.) If you want to keep your history across several calls, create one :class:`~prompt_toolkit.history.History` instance and pass it every time. This function accepts many keyword arguments. Except for the following, they are a proxy to the arguments of :func:`.create_prompt_application`. :param patch_stdout: Replace ``sys.stdout`` by a proxy that ensures that print statements from other threads won't destroy the prompt. (They will be printed above the prompt instead.) :param return_asyncio_coroutine: When True, return a asyncio coroutine. (Python >3.3) :param true_color: When True, use 24bit colors instead of 256 colors. :param refresh_interval: (number; in seconds) When given, refresh the UI every so many seconds. """ patch_stdout = kwargs.pop('patch_stdout', False) return_asyncio_coroutine = kwargs.pop('return_asyncio_coroutine', False) true_color = kwargs.pop('true_color', False) refresh_interval = kwargs.pop('refresh_interval', 0) eventloop = kwargs.pop('eventloop', None) application = create_prompt_application(message, **kwargs) return run_application(application, patch_stdout=patch_stdout, return_asyncio_coroutine=return_asyncio_coroutine, true_color=true_color, refresh_interval=refresh_interval, eventloop=eventloop)
[ "def", "prompt", "(", "message", "=", "''", ",", "*", "*", "kwargs", ")", ":", "patch_stdout", "=", "kwargs", ".", "pop", "(", "'patch_stdout'", ",", "False", ")", "return_asyncio_coroutine", "=", "kwargs", ".", "pop", "(", "'return_asyncio_coroutine'", ",", "False", ")", "true_color", "=", "kwargs", ".", "pop", "(", "'true_color'", ",", "False", ")", "refresh_interval", "=", "kwargs", ".", "pop", "(", "'refresh_interval'", ",", "0", ")", "eventloop", "=", "kwargs", ".", "pop", "(", "'eventloop'", ",", "None", ")", "application", "=", "create_prompt_application", "(", "message", ",", "*", "*", "kwargs", ")", "return", "run_application", "(", "application", ",", "patch_stdout", "=", "patch_stdout", ",", "return_asyncio_coroutine", "=", "return_asyncio_coroutine", ",", "true_color", "=", "true_color", ",", "refresh_interval", "=", "refresh_interval", ",", "eventloop", "=", "eventloop", ")" ]
Get input from the user and return it. This is a wrapper around a lot of ``prompt_toolkit`` functionality and can be a replacement for `raw_input`. (or GNU readline.) If you want to keep your history across several calls, create one :class:`~prompt_toolkit.history.History` instance and pass it every time. This function accepts many keyword arguments. Except for the following, they are a proxy to the arguments of :func:`.create_prompt_application`. :param patch_stdout: Replace ``sys.stdout`` by a proxy that ensures that print statements from other threads won't destroy the prompt. (They will be printed above the prompt instead.) :param return_asyncio_coroutine: When True, return a asyncio coroutine. (Python >3.3) :param true_color: When True, use 24bit colors instead of 256 colors. :param refresh_interval: (number; in seconds) When given, refresh the UI every so many seconds.
[ "Get", "input", "from", "the", "user", "and", "return", "it", "." ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/itsdangerous.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/itsdangerous.py#L322-L342
def derive_key(self): """This method is called to derive the key. If you're unhappy with the default key derivation choices you can override them here. Keep in mind that the key derivation in itsdangerous is not intended to be used as a security method to make a complex key out of a short password. Instead you should use large random secret keys. """ salt = want_bytes(self.salt) if self.key_derivation == 'concat': return self.digest_method(salt + self.secret_key).digest() elif self.key_derivation == 'django-concat': return self.digest_method(salt + b'signer' + self.secret_key).digest() elif self.key_derivation == 'hmac': mac = hmac.new(self.secret_key, digestmod=self.digest_method) mac.update(salt) return mac.digest() elif self.key_derivation == 'none': return self.secret_key else: raise TypeError('Unknown key derivation method')
[ "def", "derive_key", "(", "self", ")", ":", "salt", "=", "want_bytes", "(", "self", ".", "salt", ")", "if", "self", ".", "key_derivation", "==", "'concat'", ":", "return", "self", ".", "digest_method", "(", "salt", "+", "self", ".", "secret_key", ")", ".", "digest", "(", ")", "elif", "self", ".", "key_derivation", "==", "'django-concat'", ":", "return", "self", ".", "digest_method", "(", "salt", "+", "b'signer'", "+", "self", ".", "secret_key", ")", ".", "digest", "(", ")", "elif", "self", ".", "key_derivation", "==", "'hmac'", ":", "mac", "=", "hmac", ".", "new", "(", "self", ".", "secret_key", ",", "digestmod", "=", "self", ".", "digest_method", ")", "mac", ".", "update", "(", "salt", ")", "return", "mac", ".", "digest", "(", ")", "elif", "self", ".", "key_derivation", "==", "'none'", ":", "return", "self", ".", "secret_key", "else", ":", "raise", "TypeError", "(", "'Unknown key derivation method'", ")" ]
This method is called to derive the key. If you're unhappy with the default key derivation choices you can override them here. Keep in mind that the key derivation in itsdangerous is not intended to be used as a security method to make a complex key out of a short password. Instead you should use large random secret keys.
[ "This", "method", "is", "called", "to", "derive", "the", "key", ".", "If", "you", "re", "unhappy", "with", "the", "default", "key", "derivation", "choices", "you", "can", "override", "them", "here", ".", "Keep", "in", "mind", "that", "the", "key", "derivation", "in", "itsdangerous", "is", "not", "intended", "to", "be", "used", "as", "a", "security", "method", "to", "make", "a", "complex", "key", "out", "of", "a", "short", "password", ".", "Instead", "you", "should", "use", "large", "random", "secret", "keys", "." ]
python
test
Celeo/Pycord
pycord/__init__.py
https://github.com/Celeo/Pycord/blob/15c38e39b508c89c35f7f6d7009fe8e9f161a94e/pycord/__init__.py#L374-L399
def connect_to_websocket(self): """Call this method to make the connection to the Discord websocket This method is not blocking, so you'll probably want to call it after initializating your Pycord object, and then move on with your code. When you want to block on just maintaining the websocket connection, then call ``keep_running``, and it'll block until your application is interrupted. Args: None """ self.logger.info('Making websocket connection') try: if hasattr(self, '_ws'): self._ws.close() except: self.logger.debug('Couldn\'t terminate previous websocket connection') self._ws = websocket.WebSocketApp( self._get_websocket_address() + '?v=6&encoding=json', on_message=self._ws_on_message, on_error=self._ws_on_error, on_close=self._ws_on_close ) self._ws.on_open = self._ws_on_open self._ws_run_forever_wrapper = WebSocketRunForeverWrapper(self.logger, self._ws) self._ws_run_forever_wrapper.start()
[ "def", "connect_to_websocket", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "'Making websocket connection'", ")", "try", ":", "if", "hasattr", "(", "self", ",", "'_ws'", ")", ":", "self", ".", "_ws", ".", "close", "(", ")", "except", ":", "self", ".", "logger", ".", "debug", "(", "'Couldn\\'t terminate previous websocket connection'", ")", "self", ".", "_ws", "=", "websocket", ".", "WebSocketApp", "(", "self", ".", "_get_websocket_address", "(", ")", "+", "'?v=6&encoding=json'", ",", "on_message", "=", "self", ".", "_ws_on_message", ",", "on_error", "=", "self", ".", "_ws_on_error", ",", "on_close", "=", "self", ".", "_ws_on_close", ")", "self", ".", "_ws", ".", "on_open", "=", "self", ".", "_ws_on_open", "self", ".", "_ws_run_forever_wrapper", "=", "WebSocketRunForeverWrapper", "(", "self", ".", "logger", ",", "self", ".", "_ws", ")", "self", ".", "_ws_run_forever_wrapper", ".", "start", "(", ")" ]
Call this method to make the connection to the Discord websocket This method is not blocking, so you'll probably want to call it after initializating your Pycord object, and then move on with your code. When you want to block on just maintaining the websocket connection, then call ``keep_running``, and it'll block until your application is interrupted. Args: None
[ "Call", "this", "method", "to", "make", "the", "connection", "to", "the", "Discord", "websocket" ]
python
train
ff0000/scarlet
scarlet/versioning/models.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/versioning/models.py#L171-L187
def _delete_reverses(self): """ Delete all objects that would have been cloned on a clone command. This is done separately because there may be m2m and other relationships that would have not been deleted otherwise. """ for reverse in self.clone_related: self._delete_reverse(reverse) for field in self._meta.local_many_to_many: if field.rel.through and \ field.rel.through._meta.auto_created and not \ field.name in self.clone_related: man = getattr(self, field.name) man.clear()
[ "def", "_delete_reverses", "(", "self", ")", ":", "for", "reverse", "in", "self", ".", "clone_related", ":", "self", ".", "_delete_reverse", "(", "reverse", ")", "for", "field", "in", "self", ".", "_meta", ".", "local_many_to_many", ":", "if", "field", ".", "rel", ".", "through", "and", "field", ".", "rel", ".", "through", ".", "_meta", ".", "auto_created", "and", "not", "field", ".", "name", "in", "self", ".", "clone_related", ":", "man", "=", "getattr", "(", "self", ",", "field", ".", "name", ")", "man", ".", "clear", "(", ")" ]
Delete all objects that would have been cloned on a clone command. This is done separately because there may be m2m and other relationships that would have not been deleted otherwise.
[ "Delete", "all", "objects", "that", "would", "have", "been", "cloned", "on", "a", "clone", "command", ".", "This", "is", "done", "separately", "because", "there", "may", "be", "m2m", "and", "other", "relationships", "that", "would", "have", "not", "been", "deleted", "otherwise", "." ]
python
train
LISE-B26/pylabcontrol
pylabcontrol/core/script_iterator.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/pylabcontrol/core/script_iterator.py#L351-L359
def _receive_signal(self, progress_subscript): """ this function takes care of signals emitted by the subscripts Args: progress_subscript: progress of subscript """ self.progress = self._estimate_progress() self.updateProgress.emit(int(self.progress))
[ "def", "_receive_signal", "(", "self", ",", "progress_subscript", ")", ":", "self", ".", "progress", "=", "self", ".", "_estimate_progress", "(", ")", "self", ".", "updateProgress", ".", "emit", "(", "int", "(", "self", ".", "progress", ")", ")" ]
this function takes care of signals emitted by the subscripts Args: progress_subscript: progress of subscript
[ "this", "function", "takes", "care", "of", "signals", "emitted", "by", "the", "subscripts", "Args", ":", "progress_subscript", ":", "progress", "of", "subscript" ]
python
train
dlanger/inlinestyler
inlinestyler/converter.py
https://github.com/dlanger/inlinestyler/blob/335c4fbab892f0ed67466a6beaea6a91f395ad12/inlinestyler/converter.py#L66-L74
def styleattribute(self, element): """ returns css.CSSStyleDeclaration of inline styles, for html: @style """ css_text = element.get('style') if css_text: return cssutils.css.CSSStyleDeclaration(cssText=css_text) else: return None
[ "def", "styleattribute", "(", "self", ",", "element", ")", ":", "css_text", "=", "element", ".", "get", "(", "'style'", ")", "if", "css_text", ":", "return", "cssutils", ".", "css", ".", "CSSStyleDeclaration", "(", "cssText", "=", "css_text", ")", "else", ":", "return", "None" ]
returns css.CSSStyleDeclaration of inline styles, for html: @style
[ "returns", "css", ".", "CSSStyleDeclaration", "of", "inline", "styles", "for", "html", ":" ]
python
train
google/grr
grr/server/grr_response_server/data_store.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/data_store.py#L289-L345
def QueueClaimRecords(self, queue_id, item_rdf_type, limit=10000, timeout="30m", start_time=None, record_filter=lambda x: False, max_filtered=1000): """Claims records from a queue. See server/aff4_objects/queue.py.""" now = rdfvalue.RDFDatetime.Now() expiration = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration(timeout) after_urn = None if start_time: after_urn, _, _ = DataStore.CollectionMakeURN( queue_id, start_time.AsMicrosecondsSinceEpoch(), 0, subpath="Records") results = [] filtered_count = 0 for subject, values in DB.ScanAttributes( str(queue_id.Add("Records")), [DataStore.COLLECTION_ATTRIBUTE, DataStore.QUEUE_LOCK_ATTRIBUTE], max_records=4 * limit, after_urn=after_urn): if DataStore.COLLECTION_ATTRIBUTE not in values: # Unlikely case, but could happen if, say, a thread called RefreshClaims # so late that another thread already deleted the record. Go ahead and # clean this up. self.DeleteAttributes(subject, [DataStore.QUEUE_LOCK_ATTRIBUTE]) continue if DataStore.QUEUE_LOCK_ATTRIBUTE in values: timestamp = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch( values[DataStore.QUEUE_LOCK_ATTRIBUTE][1]) if timestamp > now: continue rdf_value = item_rdf_type.FromSerializedString( values[DataStore.COLLECTION_ATTRIBUTE][1]) if record_filter(rdf_value): filtered_count += 1 if max_filtered and filtered_count >= max_filtered: break continue results.append( Record( queue_id=queue_id, timestamp=values[DataStore.COLLECTION_ATTRIBUTE][0], suffix=int(subject[-6:], 16), subpath="Records", value=rdf_value)) self.Set(subject, DataStore.QUEUE_LOCK_ATTRIBUTE, expiration) filtered_count = 0 if len(results) >= limit: break return results
[ "def", "QueueClaimRecords", "(", "self", ",", "queue_id", ",", "item_rdf_type", ",", "limit", "=", "10000", ",", "timeout", "=", "\"30m\"", ",", "start_time", "=", "None", ",", "record_filter", "=", "lambda", "x", ":", "False", ",", "max_filtered", "=", "1000", ")", ":", "now", "=", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "expiration", "=", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "+", "rdfvalue", ".", "Duration", "(", "timeout", ")", "after_urn", "=", "None", "if", "start_time", ":", "after_urn", ",", "_", ",", "_", "=", "DataStore", ".", "CollectionMakeURN", "(", "queue_id", ",", "start_time", ".", "AsMicrosecondsSinceEpoch", "(", ")", ",", "0", ",", "subpath", "=", "\"Records\"", ")", "results", "=", "[", "]", "filtered_count", "=", "0", "for", "subject", ",", "values", "in", "DB", ".", "ScanAttributes", "(", "str", "(", "queue_id", ".", "Add", "(", "\"Records\"", ")", ")", ",", "[", "DataStore", ".", "COLLECTION_ATTRIBUTE", ",", "DataStore", ".", "QUEUE_LOCK_ATTRIBUTE", "]", ",", "max_records", "=", "4", "*", "limit", ",", "after_urn", "=", "after_urn", ")", ":", "if", "DataStore", ".", "COLLECTION_ATTRIBUTE", "not", "in", "values", ":", "# Unlikely case, but could happen if, say, a thread called RefreshClaims", "# so late that another thread already deleted the record. Go ahead and", "# clean this up.", "self", ".", "DeleteAttributes", "(", "subject", ",", "[", "DataStore", ".", "QUEUE_LOCK_ATTRIBUTE", "]", ")", "continue", "if", "DataStore", ".", "QUEUE_LOCK_ATTRIBUTE", "in", "values", ":", "timestamp", "=", "rdfvalue", ".", "RDFDatetime", ".", "FromMicrosecondsSinceEpoch", "(", "values", "[", "DataStore", ".", "QUEUE_LOCK_ATTRIBUTE", "]", "[", "1", "]", ")", "if", "timestamp", ">", "now", ":", "continue", "rdf_value", "=", "item_rdf_type", ".", "FromSerializedString", "(", "values", "[", "DataStore", ".", "COLLECTION_ATTRIBUTE", "]", "[", "1", "]", ")", "if", "record_filter", "(", "rdf_value", ")", ":", "filtered_count", "+=", "1", "if", "max_filtered", "and", "filtered_count", ">=", "max_filtered", ":", "break", "continue", "results", ".", "append", "(", "Record", "(", "queue_id", "=", "queue_id", ",", "timestamp", "=", "values", "[", "DataStore", ".", "COLLECTION_ATTRIBUTE", "]", "[", "0", "]", ",", "suffix", "=", "int", "(", "subject", "[", "-", "6", ":", "]", ",", "16", ")", ",", "subpath", "=", "\"Records\"", ",", "value", "=", "rdf_value", ")", ")", "self", ".", "Set", "(", "subject", ",", "DataStore", ".", "QUEUE_LOCK_ATTRIBUTE", ",", "expiration", ")", "filtered_count", "=", "0", "if", "len", "(", "results", ")", ">=", "limit", ":", "break", "return", "results" ]
Claims records from a queue. See server/aff4_objects/queue.py.
[ "Claims", "records", "from", "a", "queue", ".", "See", "server", "/", "aff4_objects", "/", "queue", ".", "py", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/envs/env_problem.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem.py#L199-L235
def _verify_same_spaces(self): """Verifies that all the envs have the same observation and action space.""" # Pre-conditions: self._envs is initialized. if self._envs is None: raise ValueError("Environments not initialized.") if not isinstance(self._envs, list): tf.logging.warning("Not checking observation and action space " "compatibility across envs, since there is just one.") return # NOTE: We compare string representations of observation_space and # action_space because compositional classes like space.Tuple don't return # true on object comparison. if not all( str(env.observation_space) == str(self.observation_space) for env in self._envs): err_str = ("All environments should have the same observation space, but " "don't.") tf.logging.error(err_str) # Log all observation spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has observation space [%s]", i, env.observation_space) raise ValueError(err_str) if not all( str(env.action_space) == str(self.action_space) for env in self._envs): err_str = "All environments should have the same action space, but don't." tf.logging.error(err_str) # Log all action spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has action space [%s]", i, env.action_space) raise ValueError(err_str)
[ "def", "_verify_same_spaces", "(", "self", ")", ":", "# Pre-conditions: self._envs is initialized.", "if", "self", ".", "_envs", "is", "None", ":", "raise", "ValueError", "(", "\"Environments not initialized.\"", ")", "if", "not", "isinstance", "(", "self", ".", "_envs", ",", "list", ")", ":", "tf", ".", "logging", ".", "warning", "(", "\"Not checking observation and action space \"", "\"compatibility across envs, since there is just one.\"", ")", "return", "# NOTE: We compare string representations of observation_space and", "# action_space because compositional classes like space.Tuple don't return", "# true on object comparison.", "if", "not", "all", "(", "str", "(", "env", ".", "observation_space", ")", "==", "str", "(", "self", ".", "observation_space", ")", "for", "env", "in", "self", ".", "_envs", ")", ":", "err_str", "=", "(", "\"All environments should have the same observation space, but \"", "\"don't.\"", ")", "tf", ".", "logging", ".", "error", "(", "err_str", ")", "# Log all observation spaces.", "for", "i", ",", "env", "in", "enumerate", "(", "self", ".", "_envs", ")", ":", "tf", ".", "logging", ".", "error", "(", "\"Env[%d] has observation space [%s]\"", ",", "i", ",", "env", ".", "observation_space", ")", "raise", "ValueError", "(", "err_str", ")", "if", "not", "all", "(", "str", "(", "env", ".", "action_space", ")", "==", "str", "(", "self", ".", "action_space", ")", "for", "env", "in", "self", ".", "_envs", ")", ":", "err_str", "=", "\"All environments should have the same action space, but don't.\"", "tf", ".", "logging", ".", "error", "(", "err_str", ")", "# Log all action spaces.", "for", "i", ",", "env", "in", "enumerate", "(", "self", ".", "_envs", ")", ":", "tf", ".", "logging", ".", "error", "(", "\"Env[%d] has action space [%s]\"", ",", "i", ",", "env", ".", "action_space", ")", "raise", "ValueError", "(", "err_str", ")" ]
Verifies that all the envs have the same observation and action space.
[ "Verifies", "that", "all", "the", "envs", "have", "the", "same", "observation", "and", "action", "space", "." ]
python
train
senaite/senaite.api
src/senaite/api/__init__.py
https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L458-L496
def get_object_by_path(path, default=_marker): """Find an object by a given physical path or absolute_url :param path: The physical path of the object to find :type path: string :returns: Found Object or None """ # nothing to do here if not path: if default is not _marker: return default fail("get_object_by_path first argument must be a path; {} received" .format(path)) pc = get_portal_catalog() portal = get_portal() portal_path = get_path(portal) portal_url = get_url(portal) # ensure we have a physical path if path.startswith(portal_url): request = get_request() path = "/".join(request.physicalPathFromURL(path)) if not path.startswith(portal_path): if default is not _marker: return default fail("Not a physical path inside the portal.") if path == portal_path: return portal res = pc(path=dict(query=path, depth=0)) if not res: if default is not _marker: return default fail("Object at path '{}' not found".format(path)) return get_object(res[0])
[ "def", "get_object_by_path", "(", "path", ",", "default", "=", "_marker", ")", ":", "# nothing to do here", "if", "not", "path", ":", "if", "default", "is", "not", "_marker", ":", "return", "default", "fail", "(", "\"get_object_by_path first argument must be a path; {} received\"", ".", "format", "(", "path", ")", ")", "pc", "=", "get_portal_catalog", "(", ")", "portal", "=", "get_portal", "(", ")", "portal_path", "=", "get_path", "(", "portal", ")", "portal_url", "=", "get_url", "(", "portal", ")", "# ensure we have a physical path", "if", "path", ".", "startswith", "(", "portal_url", ")", ":", "request", "=", "get_request", "(", ")", "path", "=", "\"/\"", ".", "join", "(", "request", ".", "physicalPathFromURL", "(", "path", ")", ")", "if", "not", "path", ".", "startswith", "(", "portal_path", ")", ":", "if", "default", "is", "not", "_marker", ":", "return", "default", "fail", "(", "\"Not a physical path inside the portal.\"", ")", "if", "path", "==", "portal_path", ":", "return", "portal", "res", "=", "pc", "(", "path", "=", "dict", "(", "query", "=", "path", ",", "depth", "=", "0", ")", ")", "if", "not", "res", ":", "if", "default", "is", "not", "_marker", ":", "return", "default", "fail", "(", "\"Object at path '{}' not found\"", ".", "format", "(", "path", ")", ")", "return", "get_object", "(", "res", "[", "0", "]", ")" ]
Find an object by a given physical path or absolute_url :param path: The physical path of the object to find :type path: string :returns: Found Object or None
[ "Find", "an", "object", "by", "a", "given", "physical", "path", "or", "absolute_url" ]
python
train
majerteam/sqla_inspect
sqla_inspect/py3o.py
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L228-L250
def _get_formatted_val(self, obj, attribute, column): """ Return the formatted value of the attribute "attribute" of the obj "obj" regarding the column's description :param obj obj: The instance we manage :param str attribute: The string defining the path to access the end attribute we want to manage :param dict column: The column description dictionnary :returns: The associated value """ attr_path = attribute.split('.') val = None tmp_val = obj for attr in attr_path: tmp_val = getattr(tmp_val, attr, None) if tmp_val is None: break if tmp_val is not None: val = tmp_val value = format_value(column, val, self.config_key) return format_py3o_val(value)
[ "def", "_get_formatted_val", "(", "self", ",", "obj", ",", "attribute", ",", "column", ")", ":", "attr_path", "=", "attribute", ".", "split", "(", "'.'", ")", "val", "=", "None", "tmp_val", "=", "obj", "for", "attr", "in", "attr_path", ":", "tmp_val", "=", "getattr", "(", "tmp_val", ",", "attr", ",", "None", ")", "if", "tmp_val", "is", "None", ":", "break", "if", "tmp_val", "is", "not", "None", ":", "val", "=", "tmp_val", "value", "=", "format_value", "(", "column", ",", "val", ",", "self", ".", "config_key", ")", "return", "format_py3o_val", "(", "value", ")" ]
Return the formatted value of the attribute "attribute" of the obj "obj" regarding the column's description :param obj obj: The instance we manage :param str attribute: The string defining the path to access the end attribute we want to manage :param dict column: The column description dictionnary :returns: The associated value
[ "Return", "the", "formatted", "value", "of", "the", "attribute", "attribute", "of", "the", "obj", "obj", "regarding", "the", "column", "s", "description" ]
python
train
materialsproject/pymatgen
pymatgen/io/qchem/outputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/qchem/outputs.py#L374-L402
def _read_mulliken(self): """ Parses Mulliken charges. Also parses spins given an unrestricted SCF. """ if self.data.get('unrestricted', []): header_pattern = r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+Spin\s\(a\.u\.\)\s+\-+" table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)\s+([\d\-\.]+)" footer_pattern = r"\s\s\-+\s+Sum of atomic charges" else: header_pattern = r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+\-+" table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)" footer_pattern = r"\s\s\-+\s+Sum of atomic charges" temp_mulliken = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern) real_mulliken = [] for one_mulliken in temp_mulliken: if self.data.get('unrestricted', []): temp = np.zeros(shape=(len(one_mulliken), 2)) for ii, entry in enumerate(one_mulliken): temp[ii, 0] = float(entry[0]) temp[ii, 1] = float(entry[1]) else: temp = np.zeros(len(one_mulliken)) for ii, entry in enumerate(one_mulliken): temp[ii] = float(entry[0]) real_mulliken += [temp] self.data["Mulliken"] = real_mulliken
[ "def", "_read_mulliken", "(", "self", ")", ":", "if", "self", ".", "data", ".", "get", "(", "'unrestricted'", ",", "[", "]", ")", ":", "header_pattern", "=", "r\"\\-+\\s+Ground-State Mulliken Net Atomic Charges\\s+Atom\\s+Charge \\(a\\.u\\.\\)\\s+Spin\\s\\(a\\.u\\.\\)\\s+\\-+\"", "table_pattern", "=", "r\"\\s+\\d+\\s\\w+\\s+([\\d\\-\\.]+)\\s+([\\d\\-\\.]+)\"", "footer_pattern", "=", "r\"\\s\\s\\-+\\s+Sum of atomic charges\"", "else", ":", "header_pattern", "=", "r\"\\-+\\s+Ground-State Mulliken Net Atomic Charges\\s+Atom\\s+Charge \\(a\\.u\\.\\)\\s+\\-+\"", "table_pattern", "=", "r\"\\s+\\d+\\s\\w+\\s+([\\d\\-\\.]+)\"", "footer_pattern", "=", "r\"\\s\\s\\-+\\s+Sum of atomic charges\"", "temp_mulliken", "=", "read_table_pattern", "(", "self", ".", "text", ",", "header_pattern", ",", "table_pattern", ",", "footer_pattern", ")", "real_mulliken", "=", "[", "]", "for", "one_mulliken", "in", "temp_mulliken", ":", "if", "self", ".", "data", ".", "get", "(", "'unrestricted'", ",", "[", "]", ")", ":", "temp", "=", "np", ".", "zeros", "(", "shape", "=", "(", "len", "(", "one_mulliken", ")", ",", "2", ")", ")", "for", "ii", ",", "entry", "in", "enumerate", "(", "one_mulliken", ")", ":", "temp", "[", "ii", ",", "0", "]", "=", "float", "(", "entry", "[", "0", "]", ")", "temp", "[", "ii", ",", "1", "]", "=", "float", "(", "entry", "[", "1", "]", ")", "else", ":", "temp", "=", "np", ".", "zeros", "(", "len", "(", "one_mulliken", ")", ")", "for", "ii", ",", "entry", "in", "enumerate", "(", "one_mulliken", ")", ":", "temp", "[", "ii", "]", "=", "float", "(", "entry", "[", "0", "]", ")", "real_mulliken", "+=", "[", "temp", "]", "self", ".", "data", "[", "\"Mulliken\"", "]", "=", "real_mulliken" ]
Parses Mulliken charges. Also parses spins given an unrestricted SCF.
[ "Parses", "Mulliken", "charges", ".", "Also", "parses", "spins", "given", "an", "unrestricted", "SCF", "." ]
python
train
wummel/dosage
scripts/smackjeeves.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/scripts/smackjeeves.py#L295-L307
def get_results(): """Parse all search result pages.""" base = "http://www.smackjeeves.com/search.php?submit=Search+for+Webcomics&search_mode=webcomics&comic_title=&special=all&last_update=3&style_all=on&genre_all=on&format_all=on&sort_by=2&start=%d" session = requests.Session() # store info in a dictionary {name -> url, number of comics, adult flag, bounce flag} res = {} # a search for an empty string returned 286 result pages result_pages = 286 print("Parsing", result_pages, "search result pages...", file=sys.stderr) for i in range(0, result_pages): print(i+1, file=sys.stderr, end=" ") handle_url(base % (i*12), session, res) save_result(res, json_file)
[ "def", "get_results", "(", ")", ":", "base", "=", "\"http://www.smackjeeves.com/search.php?submit=Search+for+Webcomics&search_mode=webcomics&comic_title=&special=all&last_update=3&style_all=on&genre_all=on&format_all=on&sort_by=2&start=%d\"", "session", "=", "requests", ".", "Session", "(", ")", "# store info in a dictionary {name -> url, number of comics, adult flag, bounce flag}", "res", "=", "{", "}", "# a search for an empty string returned 286 result pages", "result_pages", "=", "286", "print", "(", "\"Parsing\"", ",", "result_pages", ",", "\"search result pages...\"", ",", "file", "=", "sys", ".", "stderr", ")", "for", "i", "in", "range", "(", "0", ",", "result_pages", ")", ":", "print", "(", "i", "+", "1", ",", "file", "=", "sys", ".", "stderr", ",", "end", "=", "\" \"", ")", "handle_url", "(", "base", "%", "(", "i", "*", "12", ")", ",", "session", ",", "res", ")", "save_result", "(", "res", ",", "json_file", ")" ]
Parse all search result pages.
[ "Parse", "all", "search", "result", "pages", "." ]
python
train
horejsek/python-fastjsonschema
fastjsonschema/draft04.py
https://github.com/horejsek/python-fastjsonschema/blob/8c38d0f91fa5d928ff629080cdb75ab23f96590f/fastjsonschema/draft04.py#L151-L177
def generate_one_of(self): """ Means that value have to be valid by only one of those definitions. It can't be valid by two or more of them. .. code-block:: python { 'oneOf': [ {'type': 'number', 'multipleOf': 3}, {'type': 'number', 'multipleOf': 5}, ], } Valid values for this definition are 3, 5, 6, ... but not 15 for example. """ self.l('{variable}_one_of_count = 0') for definition_item in self._definition['oneOf']: # When we know it's failing (one of means exactly once), we do not need to do another expensive try-except. with self.l('if {variable}_one_of_count < 2:'): with self.l('try:'): self.generate_func_code_block(definition_item, self._variable, self._variable_name, clear_variables=True) self.l('{variable}_one_of_count += 1') self.l('except JsonSchemaException: pass') with self.l('if {variable}_one_of_count != 1:'): self.l('raise JsonSchemaException("{name} must be valid exactly by one of oneOf definition")')
[ "def", "generate_one_of", "(", "self", ")", ":", "self", ".", "l", "(", "'{variable}_one_of_count = 0'", ")", "for", "definition_item", "in", "self", ".", "_definition", "[", "'oneOf'", "]", ":", "# When we know it's failing (one of means exactly once), we do not need to do another expensive try-except.", "with", "self", ".", "l", "(", "'if {variable}_one_of_count < 2:'", ")", ":", "with", "self", ".", "l", "(", "'try:'", ")", ":", "self", ".", "generate_func_code_block", "(", "definition_item", ",", "self", ".", "_variable", ",", "self", ".", "_variable_name", ",", "clear_variables", "=", "True", ")", "self", ".", "l", "(", "'{variable}_one_of_count += 1'", ")", "self", ".", "l", "(", "'except JsonSchemaException: pass'", ")", "with", "self", ".", "l", "(", "'if {variable}_one_of_count != 1:'", ")", ":", "self", ".", "l", "(", "'raise JsonSchemaException(\"{name} must be valid exactly by one of oneOf definition\")'", ")" ]
Means that value have to be valid by only one of those definitions. It can't be valid by two or more of them. .. code-block:: python { 'oneOf': [ {'type': 'number', 'multipleOf': 3}, {'type': 'number', 'multipleOf': 5}, ], } Valid values for this definition are 3, 5, 6, ... but not 15 for example.
[ "Means", "that", "value", "have", "to", "be", "valid", "by", "only", "one", "of", "those", "definitions", ".", "It", "can", "t", "be", "valid", "by", "two", "or", "more", "of", "them", "." ]
python
train
fermiPy/fermipy
fermipy/gtanalysis.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L2349-L2436
def profile_norm(self, name, logemin=None, logemax=None, reoptimize=False, xvals=None, npts=None, fix_shape=True, savestate=True, **kwargs): """Profile the normalization of a source. Parameters ---------- name : str Source name. reoptimize : bool Re-optimize free parameters in the model at each point in the profile likelihood scan. """ self.logger.debug('Profiling %s', name) if savestate: saved_state = LikelihoodState(self.like) if fix_shape: self.free_sources(False, pars='shape', loglevel=logging.DEBUG) if npts is None: npts = self.config['gtlike']['llscan_npts'] # Find the source name = self.roi.get_source_by_name(name).name parName = self.like.normPar(name).getName() loge_bounds = self.loge_bounds if logemin is not None or logemax is not None: self.set_energy_range(logemin, logemax) # Find a sequence of values for the normalization scan if xvals is None: if reoptimize: xvals = self._find_scan_pts_reopt(name, npts=npts, **kwargs) else: xvals = self._find_scan_pts(name, npts=9) lnlp = self.profile(name, parName, reoptimize=False, xvals=xvals) lims = utils.get_parameter_limits(lnlp['xvals'], lnlp['dloglike'], cl_limit=0.99) if not np.isfinite(lims['ul']): self.logger.warning('Upper limit not found. ' 'Refitting normalization.') self.like.optimize(0) xvals = self._find_scan_pts(name, npts=npts) lnlp = self.profile(name, parName, reoptimize=False, xvals=xvals) lims = utils.get_parameter_limits(lnlp['xvals'], lnlp['dloglike'], cl_limit=0.99) if np.isfinite(lims['ll']): xhi = np.linspace(lims['x0'], lims['ul'], npts - npts // 2) xlo = np.linspace(lims['ll'], lims['x0'], npts // 2) xvals = np.concatenate((xlo[:-1], xhi)) xvals = np.insert(xvals, 0, 0.0) elif np.abs(lnlp['dloglike'][0] - lims['lnlmax']) > 0.1: lims['ll'] = 0.0 xhi = np.linspace(lims['x0'], lims['ul'], (npts + 1) - (npts + 1) // 2) xlo = np.linspace(lims['ll'], lims['x0'], (npts + 1) // 2) xvals = np.concatenate((xlo[:-1], xhi)) else: xvals = np.linspace(0, lims['ul'], npts) o = self.profile(name, parName, reoptimize=reoptimize, xvals=xvals, savestate=savestate, **kwargs) if savestate: saved_state.restore() if logemin is not None or logemax is not None: self.set_energy_range(*loge_bounds) self.logger.debug('Finished') return o
[ "def", "profile_norm", "(", "self", ",", "name", ",", "logemin", "=", "None", ",", "logemax", "=", "None", ",", "reoptimize", "=", "False", ",", "xvals", "=", "None", ",", "npts", "=", "None", ",", "fix_shape", "=", "True", ",", "savestate", "=", "True", ",", "*", "*", "kwargs", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Profiling %s'", ",", "name", ")", "if", "savestate", ":", "saved_state", "=", "LikelihoodState", "(", "self", ".", "like", ")", "if", "fix_shape", ":", "self", ".", "free_sources", "(", "False", ",", "pars", "=", "'shape'", ",", "loglevel", "=", "logging", ".", "DEBUG", ")", "if", "npts", "is", "None", ":", "npts", "=", "self", ".", "config", "[", "'gtlike'", "]", "[", "'llscan_npts'", "]", "# Find the source", "name", "=", "self", ".", "roi", ".", "get_source_by_name", "(", "name", ")", ".", "name", "parName", "=", "self", ".", "like", ".", "normPar", "(", "name", ")", ".", "getName", "(", ")", "loge_bounds", "=", "self", ".", "loge_bounds", "if", "logemin", "is", "not", "None", "or", "logemax", "is", "not", "None", ":", "self", ".", "set_energy_range", "(", "logemin", ",", "logemax", ")", "# Find a sequence of values for the normalization scan", "if", "xvals", "is", "None", ":", "if", "reoptimize", ":", "xvals", "=", "self", ".", "_find_scan_pts_reopt", "(", "name", ",", "npts", "=", "npts", ",", "*", "*", "kwargs", ")", "else", ":", "xvals", "=", "self", ".", "_find_scan_pts", "(", "name", ",", "npts", "=", "9", ")", "lnlp", "=", "self", ".", "profile", "(", "name", ",", "parName", ",", "reoptimize", "=", "False", ",", "xvals", "=", "xvals", ")", "lims", "=", "utils", ".", "get_parameter_limits", "(", "lnlp", "[", "'xvals'", "]", ",", "lnlp", "[", "'dloglike'", "]", ",", "cl_limit", "=", "0.99", ")", "if", "not", "np", ".", "isfinite", "(", "lims", "[", "'ul'", "]", ")", ":", "self", ".", "logger", ".", "warning", "(", "'Upper limit not found. '", "'Refitting normalization.'", ")", "self", ".", "like", ".", "optimize", "(", "0", ")", "xvals", "=", "self", ".", "_find_scan_pts", "(", "name", ",", "npts", "=", "npts", ")", "lnlp", "=", "self", ".", "profile", "(", "name", ",", "parName", ",", "reoptimize", "=", "False", ",", "xvals", "=", "xvals", ")", "lims", "=", "utils", ".", "get_parameter_limits", "(", "lnlp", "[", "'xvals'", "]", ",", "lnlp", "[", "'dloglike'", "]", ",", "cl_limit", "=", "0.99", ")", "if", "np", ".", "isfinite", "(", "lims", "[", "'ll'", "]", ")", ":", "xhi", "=", "np", ".", "linspace", "(", "lims", "[", "'x0'", "]", ",", "lims", "[", "'ul'", "]", ",", "npts", "-", "npts", "//", "2", ")", "xlo", "=", "np", ".", "linspace", "(", "lims", "[", "'ll'", "]", ",", "lims", "[", "'x0'", "]", ",", "npts", "//", "2", ")", "xvals", "=", "np", ".", "concatenate", "(", "(", "xlo", "[", ":", "-", "1", "]", ",", "xhi", ")", ")", "xvals", "=", "np", ".", "insert", "(", "xvals", ",", "0", ",", "0.0", ")", "elif", "np", ".", "abs", "(", "lnlp", "[", "'dloglike'", "]", "[", "0", "]", "-", "lims", "[", "'lnlmax'", "]", ")", ">", "0.1", ":", "lims", "[", "'ll'", "]", "=", "0.0", "xhi", "=", "np", ".", "linspace", "(", "lims", "[", "'x0'", "]", ",", "lims", "[", "'ul'", "]", ",", "(", "npts", "+", "1", ")", "-", "(", "npts", "+", "1", ")", "//", "2", ")", "xlo", "=", "np", ".", "linspace", "(", "lims", "[", "'ll'", "]", ",", "lims", "[", "'x0'", "]", ",", "(", "npts", "+", "1", ")", "//", "2", ")", "xvals", "=", "np", ".", "concatenate", "(", "(", "xlo", "[", ":", "-", "1", "]", ",", "xhi", ")", ")", "else", ":", "xvals", "=", "np", ".", "linspace", "(", "0", ",", "lims", "[", "'ul'", "]", ",", "npts", ")", "o", "=", "self", ".", "profile", "(", "name", ",", "parName", ",", "reoptimize", "=", "reoptimize", ",", "xvals", "=", "xvals", ",", "savestate", "=", "savestate", ",", "*", "*", "kwargs", ")", "if", "savestate", ":", "saved_state", ".", "restore", "(", ")", "if", "logemin", "is", "not", "None", "or", "logemax", "is", "not", "None", ":", "self", ".", "set_energy_range", "(", "*", "loge_bounds", ")", "self", ".", "logger", ".", "debug", "(", "'Finished'", ")", "return", "o" ]
Profile the normalization of a source. Parameters ---------- name : str Source name. reoptimize : bool Re-optimize free parameters in the model at each point in the profile likelihood scan.
[ "Profile", "the", "normalization", "of", "a", "source", "." ]
python
train
nerdvegas/rez
src/rez/vendor/yaml/__init__.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/yaml/__init__.py#L64-L73
def load(stream, Loader=Loader): """ Parse the first YAML document in a stream and produce the corresponding Python object. """ loader = Loader(stream) try: return loader.get_single_data() finally: loader.dispose()
[ "def", "load", "(", "stream", ",", "Loader", "=", "Loader", ")", ":", "loader", "=", "Loader", "(", "stream", ")", "try", ":", "return", "loader", ".", "get_single_data", "(", ")", "finally", ":", "loader", ".", "dispose", "(", ")" ]
Parse the first YAML document in a stream and produce the corresponding Python object.
[ "Parse", "the", "first", "YAML", "document", "in", "a", "stream", "and", "produce", "the", "corresponding", "Python", "object", "." ]
python
train
bububa/pyTOP
pyTOP/increment.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/increment.py#L50-L56
def customer_permit(self, session): '''taobao.increment.customer.permit 开通增量消息服务 提供app为自己的用户开通增量消息服务功能''' request = TOPRequest('taobao.increment.customer.permit') self.create(self.execute(request, session), fields=['app_customer',], models={'app_customer':AppCustomer}) return self.app_customer
[ "def", "customer_permit", "(", "self", ",", "session", ")", ":", "request", "=", "TOPRequest", "(", "'taobao.increment.customer.permit'", ")", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ",", "session", ")", ",", "fields", "=", "[", "'app_customer'", ",", "]", ",", "models", "=", "{", "'app_customer'", ":", "AppCustomer", "}", ")", "return", "self", ".", "app_customer" ]
taobao.increment.customer.permit 开通增量消息服务 提供app为自己的用户开通增量消息服务功能
[ "taobao", ".", "increment", ".", "customer", ".", "permit", "开通增量消息服务", "提供app为自己的用户开通增量消息服务功能" ]
python
train
tanghaibao/jcvi
jcvi/variation/str.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/str.py#L1427-L1482
def lobstrindex(args): """ %prog lobstrindex hg38.trf.bed hg38.upper.fa Make lobSTR index. Make sure the FASTA contain only upper case (so use fasta.format --upper to convert from UCSC fasta). The bed file is generated by str(). """ p = OptionParser(lobstrindex.__doc__) p.add_option("--notreds", default=False, action="store_true", help="Remove TREDs from the bed file") p.set_home("lobstr") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) trfbed, fastafile = args pf = fastafile.split(".")[0] lhome = opts.lobstr_home mkdir(pf) if opts.notreds: newbedfile = trfbed + ".new" newbed = open(newbedfile, "w") fp = open(trfbed) retained = total = 0 seen = set() for row in fp: r = STRLine(row) total += 1 name = r.longname if name in seen: continue seen.add(name) print(r, file=newbed) retained += 1 newbed.close() logging.debug("Retained: {0}".format(percentage(retained, total))) else: newbedfile = trfbed mm = MakeManager() cmd = "python {0}/scripts/lobstr_index.py".format(lhome) cmd += " --str {0} --ref {1} --out {2}".format(newbedfile, fastafile, pf) mm.add((newbedfile, fastafile), op.join(pf, "lobSTR_ref.fasta.rsa"), cmd) tabfile = "{0}/index.tab".format(pf) cmd = "python {0}/scripts/GetSTRInfo.py".format(lhome) cmd += " {0} {1} > {2}".format(newbedfile, fastafile, tabfile) mm.add((newbedfile, fastafile), tabfile, cmd) infofile = "{0}/index.info".format(pf) cmd = "cp {0} {1}".format(newbedfile, infofile) mm.add(trfbed, infofile, cmd) mm.write()
[ "def", "lobstrindex", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "lobstrindex", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--notreds\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Remove TREDs from the bed file\"", ")", "p", ".", "set_home", "(", "\"lobstr\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "trfbed", ",", "fastafile", "=", "args", "pf", "=", "fastafile", ".", "split", "(", "\".\"", ")", "[", "0", "]", "lhome", "=", "opts", ".", "lobstr_home", "mkdir", "(", "pf", ")", "if", "opts", ".", "notreds", ":", "newbedfile", "=", "trfbed", "+", "\".new\"", "newbed", "=", "open", "(", "newbedfile", ",", "\"w\"", ")", "fp", "=", "open", "(", "trfbed", ")", "retained", "=", "total", "=", "0", "seen", "=", "set", "(", ")", "for", "row", "in", "fp", ":", "r", "=", "STRLine", "(", "row", ")", "total", "+=", "1", "name", "=", "r", ".", "longname", "if", "name", "in", "seen", ":", "continue", "seen", ".", "add", "(", "name", ")", "print", "(", "r", ",", "file", "=", "newbed", ")", "retained", "+=", "1", "newbed", ".", "close", "(", ")", "logging", ".", "debug", "(", "\"Retained: {0}\"", ".", "format", "(", "percentage", "(", "retained", ",", "total", ")", ")", ")", "else", ":", "newbedfile", "=", "trfbed", "mm", "=", "MakeManager", "(", ")", "cmd", "=", "\"python {0}/scripts/lobstr_index.py\"", ".", "format", "(", "lhome", ")", "cmd", "+=", "\" --str {0} --ref {1} --out {2}\"", ".", "format", "(", "newbedfile", ",", "fastafile", ",", "pf", ")", "mm", ".", "add", "(", "(", "newbedfile", ",", "fastafile", ")", ",", "op", ".", "join", "(", "pf", ",", "\"lobSTR_ref.fasta.rsa\"", ")", ",", "cmd", ")", "tabfile", "=", "\"{0}/index.tab\"", ".", "format", "(", "pf", ")", "cmd", "=", "\"python {0}/scripts/GetSTRInfo.py\"", ".", "format", "(", "lhome", ")", "cmd", "+=", "\" {0} {1} > {2}\"", ".", "format", "(", "newbedfile", ",", "fastafile", ",", "tabfile", ")", "mm", ".", "add", "(", "(", "newbedfile", ",", "fastafile", ")", ",", "tabfile", ",", "cmd", ")", "infofile", "=", "\"{0}/index.info\"", ".", "format", "(", "pf", ")", "cmd", "=", "\"cp {0} {1}\"", ".", "format", "(", "newbedfile", ",", "infofile", ")", "mm", ".", "add", "(", "trfbed", ",", "infofile", ",", "cmd", ")", "mm", ".", "write", "(", ")" ]
%prog lobstrindex hg38.trf.bed hg38.upper.fa Make lobSTR index. Make sure the FASTA contain only upper case (so use fasta.format --upper to convert from UCSC fasta). The bed file is generated by str().
[ "%prog", "lobstrindex", "hg38", ".", "trf", ".", "bed", "hg38", ".", "upper", ".", "fa" ]
python
train
prawn-cake/vk-requests
vk_requests/session.py
https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L85-L134
def do_login(self, http_session): """Do vk login :param http_session: vk_requests.utils.VerboseHTTPSession: http session """ response = http_session.get(self.LOGIN_URL) action_url = parse_form_action_url(response.text) # Stop login it action url is not found if not action_url: logger.debug(response.text) raise VkParseError("Can't parse form action url") login_form_data = {'email': self._login, 'pass': self._password} login_response = http_session.post(action_url, login_form_data) logger.debug('Cookies: %s', http_session.cookies) response_url_query = parse_url_query_params( login_response.url, fragment=False) logger.debug('response_url_query: %s', response_url_query) act = response_url_query.get('act') # Check response url query params firstly if 'sid' in response_url_query: self.require_auth_captcha( response=login_response, query_params=response_url_query, login_form_data=login_form_data, http_session=http_session) elif act == 'authcheck': self.require_2fa(html=login_response.text, http_session=http_session) elif act == 'security_check': self.require_phone_number(html=login_response.text, session=http_session) session_cookies = ('remixsid' in http_session.cookies, 'remixsid6' in http_session.cookies) if any(session_cookies): logger.info('VK session is established') return True else: message = 'Authorization error: incorrect password or ' \ 'authentication code' logger.error(message) raise VkAuthError(message)
[ "def", "do_login", "(", "self", ",", "http_session", ")", ":", "response", "=", "http_session", ".", "get", "(", "self", ".", "LOGIN_URL", ")", "action_url", "=", "parse_form_action_url", "(", "response", ".", "text", ")", "# Stop login it action url is not found", "if", "not", "action_url", ":", "logger", ".", "debug", "(", "response", ".", "text", ")", "raise", "VkParseError", "(", "\"Can't parse form action url\"", ")", "login_form_data", "=", "{", "'email'", ":", "self", ".", "_login", ",", "'pass'", ":", "self", ".", "_password", "}", "login_response", "=", "http_session", ".", "post", "(", "action_url", ",", "login_form_data", ")", "logger", ".", "debug", "(", "'Cookies: %s'", ",", "http_session", ".", "cookies", ")", "response_url_query", "=", "parse_url_query_params", "(", "login_response", ".", "url", ",", "fragment", "=", "False", ")", "logger", ".", "debug", "(", "'response_url_query: %s'", ",", "response_url_query", ")", "act", "=", "response_url_query", ".", "get", "(", "'act'", ")", "# Check response url query params firstly", "if", "'sid'", "in", "response_url_query", ":", "self", ".", "require_auth_captcha", "(", "response", "=", "login_response", ",", "query_params", "=", "response_url_query", ",", "login_form_data", "=", "login_form_data", ",", "http_session", "=", "http_session", ")", "elif", "act", "==", "'authcheck'", ":", "self", ".", "require_2fa", "(", "html", "=", "login_response", ".", "text", ",", "http_session", "=", "http_session", ")", "elif", "act", "==", "'security_check'", ":", "self", ".", "require_phone_number", "(", "html", "=", "login_response", ".", "text", ",", "session", "=", "http_session", ")", "session_cookies", "=", "(", "'remixsid'", "in", "http_session", ".", "cookies", ",", "'remixsid6'", "in", "http_session", ".", "cookies", ")", "if", "any", "(", "session_cookies", ")", ":", "logger", ".", "info", "(", "'VK session is established'", ")", "return", "True", "else", ":", "message", "=", "'Authorization error: incorrect password or '", "'authentication code'", "logger", ".", "error", "(", "message", ")", "raise", "VkAuthError", "(", "message", ")" ]
Do vk login :param http_session: vk_requests.utils.VerboseHTTPSession: http session
[ "Do", "vk", "login" ]
python
train
usc-isi-i2/etk
etk/extractors/table_extractor.py
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/table_extractor.py#L67-L73
def add_glossary(self, glossary: List[str], attr_name: str) -> None: """ Adds a glossary for the given attribute name :param glossary: a list of possible mentions of the attribute name :param attr_name: the attribute name (field name) """ self.glossaries[attr_name] = glossary
[ "def", "add_glossary", "(", "self", ",", "glossary", ":", "List", "[", "str", "]", ",", "attr_name", ":", "str", ")", "->", "None", ":", "self", ".", "glossaries", "[", "attr_name", "]", "=", "glossary" ]
Adds a glossary for the given attribute name :param glossary: a list of possible mentions of the attribute name :param attr_name: the attribute name (field name)
[ "Adds", "a", "glossary", "for", "the", "given", "attribute", "name", ":", "param", "glossary", ":", "a", "list", "of", "possible", "mentions", "of", "the", "attribute", "name", ":", "param", "attr_name", ":", "the", "attribute", "name", "(", "field", "name", ")" ]
python
train
openstack/horizon
openstack_dashboard/utils/config_types.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/utils/config_types.py#L117-L154
def validate(self, result, spec): # noqa Yes, it's too complex. """Validate that the result has the correct structure.""" if spec is None: # None matches anything. return if isinstance(spec, dict): if not isinstance(result, dict): raise ValueError('Dictionary expected, but %r found.' % result) if spec: spec_value = next(iter(spec.values())) # Yay Python 3! for value in result.values(): self.validate(value, spec_value) spec_key = next(iter(spec.keys())) for key in result: self.validate(key, spec_key) if isinstance(spec, list): if not isinstance(result, list): raise ValueError('List expected, but %r found.' % result) if spec: for value in result: self.validate(value, spec[0]) if isinstance(spec, tuple): if not isinstance(result, tuple): raise ValueError('Tuple expected, but %r found.' % result) if len(result) != len(spec): raise ValueError('Expected %d elements in tuple %r.' % (len(spec), result)) for s, value in zip(spec, result): self.validate(value, s) if isinstance(spec, six.string_types): if not isinstance(result, six.string_types): raise ValueError('String expected, but %r found.' % result) if isinstance(spec, int): if not isinstance(result, int): raise ValueError('Integer expected, but %r found.' % result) if isinstance(spec, bool): if not isinstance(result, bool): raise ValueError('Boolean expected, but %r found.' % result)
[ "def", "validate", "(", "self", ",", "result", ",", "spec", ")", ":", "# noqa Yes, it's too complex.", "if", "spec", "is", "None", ":", "# None matches anything.", "return", "if", "isinstance", "(", "spec", ",", "dict", ")", ":", "if", "not", "isinstance", "(", "result", ",", "dict", ")", ":", "raise", "ValueError", "(", "'Dictionary expected, but %r found.'", "%", "result", ")", "if", "spec", ":", "spec_value", "=", "next", "(", "iter", "(", "spec", ".", "values", "(", ")", ")", ")", "# Yay Python 3!", "for", "value", "in", "result", ".", "values", "(", ")", ":", "self", ".", "validate", "(", "value", ",", "spec_value", ")", "spec_key", "=", "next", "(", "iter", "(", "spec", ".", "keys", "(", ")", ")", ")", "for", "key", "in", "result", ":", "self", ".", "validate", "(", "key", ",", "spec_key", ")", "if", "isinstance", "(", "spec", ",", "list", ")", ":", "if", "not", "isinstance", "(", "result", ",", "list", ")", ":", "raise", "ValueError", "(", "'List expected, but %r found.'", "%", "result", ")", "if", "spec", ":", "for", "value", "in", "result", ":", "self", ".", "validate", "(", "value", ",", "spec", "[", "0", "]", ")", "if", "isinstance", "(", "spec", ",", "tuple", ")", ":", "if", "not", "isinstance", "(", "result", ",", "tuple", ")", ":", "raise", "ValueError", "(", "'Tuple expected, but %r found.'", "%", "result", ")", "if", "len", "(", "result", ")", "!=", "len", "(", "spec", ")", ":", "raise", "ValueError", "(", "'Expected %d elements in tuple %r.'", "%", "(", "len", "(", "spec", ")", ",", "result", ")", ")", "for", "s", ",", "value", "in", "zip", "(", "spec", ",", "result", ")", ":", "self", ".", "validate", "(", "value", ",", "s", ")", "if", "isinstance", "(", "spec", ",", "six", ".", "string_types", ")", ":", "if", "not", "isinstance", "(", "result", ",", "six", ".", "string_types", ")", ":", "raise", "ValueError", "(", "'String expected, but %r found.'", "%", "result", ")", "if", "isinstance", "(", "spec", ",", "int", ")", ":", "if", "not", "isinstance", "(", "result", ",", "int", ")", ":", "raise", "ValueError", "(", "'Integer expected, but %r found.'", "%", "result", ")", "if", "isinstance", "(", "spec", ",", "bool", ")", ":", "if", "not", "isinstance", "(", "result", ",", "bool", ")", ":", "raise", "ValueError", "(", "'Boolean expected, but %r found.'", "%", "result", ")" ]
Validate that the result has the correct structure.
[ "Validate", "that", "the", "result", "has", "the", "correct", "structure", "." ]
python
train
robotpy/pyfrc
lib/pyfrc/physics/core.py
https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/physics/core.py#L332-L338
def get_position(self): """ :returns: Robot's current position on the field as `(x,y,angle)`. `x` and `y` are specified in feet, `angle` is in radians """ with self._lock: return self.x, self.y, self.angle
[ "def", "get_position", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "return", "self", ".", "x", ",", "self", ".", "y", ",", "self", ".", "angle" ]
:returns: Robot's current position on the field as `(x,y,angle)`. `x` and `y` are specified in feet, `angle` is in radians
[ ":", "returns", ":", "Robot", "s", "current", "position", "on", "the", "field", "as", "(", "x", "y", "angle", ")", ".", "x", "and", "y", "are", "specified", "in", "feet", "angle", "is", "in", "radians" ]
python
train
lago-project/lago
lago/subnet_lease.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/subnet_lease.py#L196-L217
def _lease_valid(self, lease): """ Check if the given lease exist and still has a prefix that owns it. If the lease exist but its prefix isn't, remove the lease from this store. Args: lease (lago.subnet_lease.Lease): Object representation of the lease Returns: str or None: If the lease and its prefix exists, return the path to the uuid of the prefix, else return None. """ if not lease.exist: return None if lease.has_env: return lease.uuid_path else: self._release(lease) return None
[ "def", "_lease_valid", "(", "self", ",", "lease", ")", ":", "if", "not", "lease", ".", "exist", ":", "return", "None", "if", "lease", ".", "has_env", ":", "return", "lease", ".", "uuid_path", "else", ":", "self", ".", "_release", "(", "lease", ")", "return", "None" ]
Check if the given lease exist and still has a prefix that owns it. If the lease exist but its prefix isn't, remove the lease from this store. Args: lease (lago.subnet_lease.Lease): Object representation of the lease Returns: str or None: If the lease and its prefix exists, return the path to the uuid of the prefix, else return None.
[ "Check", "if", "the", "given", "lease", "exist", "and", "still", "has", "a", "prefix", "that", "owns", "it", ".", "If", "the", "lease", "exist", "but", "its", "prefix", "isn", "t", "remove", "the", "lease", "from", "this", "store", "." ]
python
train
lpantano/seqcluster
seqcluster/function/coral.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/function/coral.py#L123-L163
def create_features(bam_in, loci_file, reference, out_dir): """ Use feature extraction module from CoRaL """ lenvec_plus = op.join(out_dir, 'genomic_lenvec.plus') lenvec_minus = op.join(out_dir, 'genomic_lenvec.minus') compute_genomic_cmd = ("compute_genomic_lenvectors " "{bam_in} {lenvec_plus} " "{lenvec_minus} " "{min_len} " "{max_len} ") index_genomic_cmd = ("index_genomic_lenvectors " "{lenvec} ") genomic_lenvec = op.join(out_dir, 'genomic_lenvec') feat_len_file = op.join(out_dir, 'feat_lengths.txt') compute_locus_cmd = ("compute_locus_lenvectors " "{loci_file} " "{genomic_lenvec} " "{min_len} " "{max_len} " "> {feat_len_file}") cov_S_file = op.join(out_dir, 'loci.cov_anti') coverage_anti_cmd = ("coverageBed -S -counts -b " "{bam_in} -a {loci_file} " "> {cov_S_file}") feat_posentropy = op.join(out_dir, 'feat_posentropy.txt') entropy_cmd = ("compute_locus_entropy.rb " "{counts_reads} " "> {feat_posentropy}") with utils.chdir(out_dir): run(compute_genomic_cmd.format(min_len=min_trimmed_read_len, max_len=max_trimmed_read_len, **locals()), "Run compute_genomic") run(index_genomic_cmd.format(lenvec=lenvec_plus), "Run index in plus") run(index_genomic_cmd.format(lenvec=lenvec_minus), "Run index in minus") run(compute_locus_cmd.format(min_len=min_trimmed_read_len, max_len=max_trimmed_read_len, **locals()), "Run compute locus") run(coverage_anti_cmd.format(**locals()), "Run coverage antisense") feat_antisense = _order_antisense_column(cov_S_file, min_trimmed_read_len) counts_reads = _reads_per_position(bam_in, loci_file, out_dir) run(entropy_cmd.format(**locals()), "Run entropy") rnafold = calculate_structure(loci_file, reference)
[ "def", "create_features", "(", "bam_in", ",", "loci_file", ",", "reference", ",", "out_dir", ")", ":", "lenvec_plus", "=", "op", ".", "join", "(", "out_dir", ",", "'genomic_lenvec.plus'", ")", "lenvec_minus", "=", "op", ".", "join", "(", "out_dir", ",", "'genomic_lenvec.minus'", ")", "compute_genomic_cmd", "=", "(", "\"compute_genomic_lenvectors \"", "\"{bam_in} {lenvec_plus} \"", "\"{lenvec_minus} \"", "\"{min_len} \"", "\"{max_len} \"", ")", "index_genomic_cmd", "=", "(", "\"index_genomic_lenvectors \"", "\"{lenvec} \"", ")", "genomic_lenvec", "=", "op", ".", "join", "(", "out_dir", ",", "'genomic_lenvec'", ")", "feat_len_file", "=", "op", ".", "join", "(", "out_dir", ",", "'feat_lengths.txt'", ")", "compute_locus_cmd", "=", "(", "\"compute_locus_lenvectors \"", "\"{loci_file} \"", "\"{genomic_lenvec} \"", "\"{min_len} \"", "\"{max_len} \"", "\"> {feat_len_file}\"", ")", "cov_S_file", "=", "op", ".", "join", "(", "out_dir", ",", "'loci.cov_anti'", ")", "coverage_anti_cmd", "=", "(", "\"coverageBed -S -counts -b \"", "\"{bam_in} -a {loci_file} \"", "\"> {cov_S_file}\"", ")", "feat_posentropy", "=", "op", ".", "join", "(", "out_dir", ",", "'feat_posentropy.txt'", ")", "entropy_cmd", "=", "(", "\"compute_locus_entropy.rb \"", "\"{counts_reads} \"", "\"> {feat_posentropy}\"", ")", "with", "utils", ".", "chdir", "(", "out_dir", ")", ":", "run", "(", "compute_genomic_cmd", ".", "format", "(", "min_len", "=", "min_trimmed_read_len", ",", "max_len", "=", "max_trimmed_read_len", ",", "*", "*", "locals", "(", ")", ")", ",", "\"Run compute_genomic\"", ")", "run", "(", "index_genomic_cmd", ".", "format", "(", "lenvec", "=", "lenvec_plus", ")", ",", "\"Run index in plus\"", ")", "run", "(", "index_genomic_cmd", ".", "format", "(", "lenvec", "=", "lenvec_minus", ")", ",", "\"Run index in minus\"", ")", "run", "(", "compute_locus_cmd", ".", "format", "(", "min_len", "=", "min_trimmed_read_len", ",", "max_len", "=", "max_trimmed_read_len", ",", "*", "*", "locals", "(", ")", ")", ",", "\"Run compute locus\"", ")", "run", "(", "coverage_anti_cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"Run coverage antisense\"", ")", "feat_antisense", "=", "_order_antisense_column", "(", "cov_S_file", ",", "min_trimmed_read_len", ")", "counts_reads", "=", "_reads_per_position", "(", "bam_in", ",", "loci_file", ",", "out_dir", ")", "run", "(", "entropy_cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"Run entropy\"", ")", "rnafold", "=", "calculate_structure", "(", "loci_file", ",", "reference", ")" ]
Use feature extraction module from CoRaL
[ "Use", "feature", "extraction", "module", "from", "CoRaL" ]
python
train
angr/claripy
claripy/backends/__init__.py
https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/backends/__init__.py#L437-L445
def add(self, s, c, track=False): """ This function adds constraints to the backend solver. :param c: A sequence of ASTs :param s: A backend solver object :param bool track: True to enable constraint tracking, which is used in unsat_core() """ return self._add(s, self.convert_list(c), track=track)
[ "def", "add", "(", "self", ",", "s", ",", "c", ",", "track", "=", "False", ")", ":", "return", "self", ".", "_add", "(", "s", ",", "self", ".", "convert_list", "(", "c", ")", ",", "track", "=", "track", ")" ]
This function adds constraints to the backend solver. :param c: A sequence of ASTs :param s: A backend solver object :param bool track: True to enable constraint tracking, which is used in unsat_core()
[ "This", "function", "adds", "constraints", "to", "the", "backend", "solver", "." ]
python
train
saltstack/salt
salt/modules/aliases.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aliases.py#L40-L61
def __parse_aliases(): ''' Parse the aliases file, and return a list of line components: [ (alias1, target1, comment1), (alias2, target2, comment2), ] ''' afn = __get_aliases_filename() ret = [] if not os.path.isfile(afn): return ret with salt.utils.files.fopen(afn, 'r') as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) match = __ALIAS_RE.match(line) if match: ret.append(match.groups()) else: ret.append((None, None, line.strip())) return ret
[ "def", "__parse_aliases", "(", ")", ":", "afn", "=", "__get_aliases_filename", "(", ")", "ret", "=", "[", "]", "if", "not", "os", ".", "path", ".", "isfile", "(", "afn", ")", ":", "return", "ret", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "afn", ",", "'r'", ")", "as", "ifile", ":", "for", "line", "in", "ifile", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", "match", "=", "__ALIAS_RE", ".", "match", "(", "line", ")", "if", "match", ":", "ret", ".", "append", "(", "match", ".", "groups", "(", ")", ")", "else", ":", "ret", ".", "append", "(", "(", "None", ",", "None", ",", "line", ".", "strip", "(", ")", ")", ")", "return", "ret" ]
Parse the aliases file, and return a list of line components: [ (alias1, target1, comment1), (alias2, target2, comment2), ]
[ "Parse", "the", "aliases", "file", "and", "return", "a", "list", "of", "line", "components", ":" ]
python
train
w1ll1am23/pyeconet
src/pyeconet/api.py
https://github.com/w1ll1am23/pyeconet/blob/05abf965f67c7445355508a38f11992d13adac4f/src/pyeconet/api.py#L114-L123
def get_vacations(): """ Pull the accounts vacations. """ arequest = requests.get(VACATIONS_URL, headers=HEADERS) status_code = str(arequest.status_code) if status_code == '401': _LOGGER.error("Token expired.") return False return arequest.json()
[ "def", "get_vacations", "(", ")", ":", "arequest", "=", "requests", ".", "get", "(", "VACATIONS_URL", ",", "headers", "=", "HEADERS", ")", "status_code", "=", "str", "(", "arequest", ".", "status_code", ")", "if", "status_code", "==", "'401'", ":", "_LOGGER", ".", "error", "(", "\"Token expired.\"", ")", "return", "False", "return", "arequest", ".", "json", "(", ")" ]
Pull the accounts vacations.
[ "Pull", "the", "accounts", "vacations", "." ]
python
valid
rapidpro/expressions
python/temba_expressions/functions/excel.py
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L164-L189
def datedif(ctx, start_date, end_date, unit): """ Calculates the number of days, months, or years between two dates. """ start_date = conversions.to_date(start_date, ctx) end_date = conversions.to_date(end_date, ctx) unit = conversions.to_string(unit, ctx).lower() if start_date > end_date: raise ValueError("Start date cannot be after end date") if unit == 'y': return relativedelta(end_date, start_date).years elif unit == 'm': delta = relativedelta(end_date, start_date) return 12 * delta.years + delta.months elif unit == 'd': return (end_date - start_date).days elif unit == 'md': return relativedelta(end_date, start_date).days elif unit == 'ym': return relativedelta(end_date, start_date).months elif unit == 'yd': return (end_date - start_date.replace(year=end_date.year)).days raise ValueError("Invalid unit value: %s" % unit)
[ "def", "datedif", "(", "ctx", ",", "start_date", ",", "end_date", ",", "unit", ")", ":", "start_date", "=", "conversions", ".", "to_date", "(", "start_date", ",", "ctx", ")", "end_date", "=", "conversions", ".", "to_date", "(", "end_date", ",", "ctx", ")", "unit", "=", "conversions", ".", "to_string", "(", "unit", ",", "ctx", ")", ".", "lower", "(", ")", "if", "start_date", ">", "end_date", ":", "raise", "ValueError", "(", "\"Start date cannot be after end date\"", ")", "if", "unit", "==", "'y'", ":", "return", "relativedelta", "(", "end_date", ",", "start_date", ")", ".", "years", "elif", "unit", "==", "'m'", ":", "delta", "=", "relativedelta", "(", "end_date", ",", "start_date", ")", "return", "12", "*", "delta", ".", "years", "+", "delta", ".", "months", "elif", "unit", "==", "'d'", ":", "return", "(", "end_date", "-", "start_date", ")", ".", "days", "elif", "unit", "==", "'md'", ":", "return", "relativedelta", "(", "end_date", ",", "start_date", ")", ".", "days", "elif", "unit", "==", "'ym'", ":", "return", "relativedelta", "(", "end_date", ",", "start_date", ")", ".", "months", "elif", "unit", "==", "'yd'", ":", "return", "(", "end_date", "-", "start_date", ".", "replace", "(", "year", "=", "end_date", ".", "year", ")", ")", ".", "days", "raise", "ValueError", "(", "\"Invalid unit value: %s\"", "%", "unit", ")" ]
Calculates the number of days, months, or years between two dates.
[ "Calculates", "the", "number", "of", "days", "months", "or", "years", "between", "two", "dates", "." ]
python
train
saltstack/salt
salt/modules/znc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/znc.py#L34-L58
def _makepass(password, hasher='sha256'): ''' Create a znc compatible hashed password ''' # Setup the hasher if hasher == 'sha256': h = hashlib.sha256(password) elif hasher == 'md5': h = hashlib.md5(password) else: return NotImplemented c = "abcdefghijklmnopqrstuvwxyz" \ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" \ "0123456789!?.,:;/*-+_()" r = { 'Method': h.name, 'Salt': ''.join(random.SystemRandom().choice(c) for x in range(20)), } # Salt the password hash h.update(r['Salt']) r['Hash'] = h.hexdigest() return r
[ "def", "_makepass", "(", "password", ",", "hasher", "=", "'sha256'", ")", ":", "# Setup the hasher", "if", "hasher", "==", "'sha256'", ":", "h", "=", "hashlib", ".", "sha256", "(", "password", ")", "elif", "hasher", "==", "'md5'", ":", "h", "=", "hashlib", ".", "md5", "(", "password", ")", "else", ":", "return", "NotImplemented", "c", "=", "\"abcdefghijklmnopqrstuvwxyz\"", "\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"", "\"0123456789!?.,:;/*-+_()\"", "r", "=", "{", "'Method'", ":", "h", ".", "name", ",", "'Salt'", ":", "''", ".", "join", "(", "random", ".", "SystemRandom", "(", ")", ".", "choice", "(", "c", ")", "for", "x", "in", "range", "(", "20", ")", ")", ",", "}", "# Salt the password hash", "h", ".", "update", "(", "r", "[", "'Salt'", "]", ")", "r", "[", "'Hash'", "]", "=", "h", ".", "hexdigest", "(", ")", "return", "r" ]
Create a znc compatible hashed password
[ "Create", "a", "znc", "compatible", "hashed", "password" ]
python
train
miguelgrinberg/python-socketio
socketio/asyncio_server.py
https://github.com/miguelgrinberg/python-socketio/blob/c0c1bf8d21e3597389b18938550a0724dd9676b7/socketio/asyncio_server.py#L205-L218
async def close_room(self, room, namespace=None): """Close a room. This function removes all the clients from the given room. :param room: Room name. :param namespace: The Socket.IO namespace for the event. If this argument is omitted the default namespace is used. Note: this method is a coroutine. """ namespace = namespace or '/' self.logger.info('room %s is closing [%s]', room, namespace) await self.manager.close_room(room, namespace)
[ "async", "def", "close_room", "(", "self", ",", "room", ",", "namespace", "=", "None", ")", ":", "namespace", "=", "namespace", "or", "'/'", "self", ".", "logger", ".", "info", "(", "'room %s is closing [%s]'", ",", "room", ",", "namespace", ")", "await", "self", ".", "manager", ".", "close_room", "(", "room", ",", "namespace", ")" ]
Close a room. This function removes all the clients from the given room. :param room: Room name. :param namespace: The Socket.IO namespace for the event. If this argument is omitted the default namespace is used. Note: this method is a coroutine.
[ "Close", "a", "room", "." ]
python
train
SteveMcGrath/pySecurityCenter
securitycenter/sc4.py
https://github.com/SteveMcGrath/pySecurityCenter/blob/f0b10b1bcd4fd23a8d4d09ca6774cdf5e1cfd880/securitycenter/sc4.py#L641-L671
def plugin_counts(self): """plugin_counts Returns the plugin counts as dictionary with the last updated info if its available. """ ret = { 'total': 0, } # As ususal, we need data before we can actually do anything ;) data = self.raw_query('plugin', 'init') # For backwards compatability purposes, we will be handling this a bit # differently than I would like. We are going to check to see if each # value exists and override the default value of 0. The only value that # I know existed in bost 4.2 and 4.4 is pluginCount, the rest aren't # listed in the API docs, however return back from my experimentation. ret['total'] = data['pluginCount'] if 'lastUpdates' in data: for item in ['active', 'passive', 'compliance', 'custom', 'event']: itemdata = {} if item in data['lastUpdates']: itemdata = data['lastUpdates'][item] if item in data: itemdata['count'] = data[item] else: itemdata['count'] = 0 ret[item] = itemdata return ret
[ "def", "plugin_counts", "(", "self", ")", ":", "ret", "=", "{", "'total'", ":", "0", ",", "}", "# As ususal, we need data before we can actually do anything ;)", "data", "=", "self", ".", "raw_query", "(", "'plugin'", ",", "'init'", ")", "# For backwards compatability purposes, we will be handling this a bit", "# differently than I would like. We are going to check to see if each", "# value exists and override the default value of 0. The only value that", "# I know existed in bost 4.2 and 4.4 is pluginCount, the rest aren't", "# listed in the API docs, however return back from my experimentation.", "ret", "[", "'total'", "]", "=", "data", "[", "'pluginCount'", "]", "if", "'lastUpdates'", "in", "data", ":", "for", "item", "in", "[", "'active'", ",", "'passive'", ",", "'compliance'", ",", "'custom'", ",", "'event'", "]", ":", "itemdata", "=", "{", "}", "if", "item", "in", "data", "[", "'lastUpdates'", "]", ":", "itemdata", "=", "data", "[", "'lastUpdates'", "]", "[", "item", "]", "if", "item", "in", "data", ":", "itemdata", "[", "'count'", "]", "=", "data", "[", "item", "]", "else", ":", "itemdata", "[", "'count'", "]", "=", "0", "ret", "[", "item", "]", "=", "itemdata", "return", "ret" ]
plugin_counts Returns the plugin counts as dictionary with the last updated info if its available.
[ "plugin_counts", "Returns", "the", "plugin", "counts", "as", "dictionary", "with", "the", "last", "updated", "info", "if", "its", "available", "." ]
python
train
cnt-dev/cnt.rulebase
cnt/rulebase/rules/sentence_segmentation/sentence_segmenter.py
https://github.com/cnt-dev/cnt.rulebase/blob/d1c767c356d8ee05b23ec5b04aaac84784ee547c/cnt/rulebase/rules/sentence_segmentation/sentence_segmenter.py#L108-L170
def result(self) -> workflow.IntervalGeneratorType: """ Generate intervals indicating the valid sentences. """ config = cast(SentenceSegementationConfig, self.config) index = -1 labels = None while True: # 1. Find the start of the sentence. start = -1 while True: # Check the ``labels`` generated from step (2). if labels is None: # https://www.python.org/dev/peps/pep-0479/ try: index, labels = next(self.index_labels_generator) except StopIteration: return # Check if we found a valid sentence char. if labels[SentenceValidCharacterLabeler]: start = index break # Trigger next(...) action. labels = None index = -1 # 2. Find the ending. end = -1 try: while True: index, labels = next(self.index_labels_generator) # Detected invalid char. if config.enable_strict_sentence_charset and \ not labels[SentenceValidCharacterLabeler] and \ not labels[WhitespaceLabeler]: end = index break # Detected sentence ending. if self._labels_indicate_sentence_ending(labels): # Consume the ending span. while True: index, labels = next(self.index_labels_generator) is_ending = (self._labels_indicate_sentence_ending(labels) or (config.extend_ending_with_delimiters and labels[DelimitersLabeler])) if not is_ending: end = index break # yeah we found the ending. break except StopIteration: end = len(self.input_sequence) # Trigger next(...) action. labels = None index = -1 yield start, end
[ "def", "result", "(", "self", ")", "->", "workflow", ".", "IntervalGeneratorType", ":", "config", "=", "cast", "(", "SentenceSegementationConfig", ",", "self", ".", "config", ")", "index", "=", "-", "1", "labels", "=", "None", "while", "True", ":", "# 1. Find the start of the sentence.", "start", "=", "-", "1", "while", "True", ":", "# Check the ``labels`` generated from step (2).", "if", "labels", "is", "None", ":", "# https://www.python.org/dev/peps/pep-0479/", "try", ":", "index", ",", "labels", "=", "next", "(", "self", ".", "index_labels_generator", ")", "except", "StopIteration", ":", "return", "# Check if we found a valid sentence char.", "if", "labels", "[", "SentenceValidCharacterLabeler", "]", ":", "start", "=", "index", "break", "# Trigger next(...) action.", "labels", "=", "None", "index", "=", "-", "1", "# 2. Find the ending.", "end", "=", "-", "1", "try", ":", "while", "True", ":", "index", ",", "labels", "=", "next", "(", "self", ".", "index_labels_generator", ")", "# Detected invalid char.", "if", "config", ".", "enable_strict_sentence_charset", "and", "not", "labels", "[", "SentenceValidCharacterLabeler", "]", "and", "not", "labels", "[", "WhitespaceLabeler", "]", ":", "end", "=", "index", "break", "# Detected sentence ending.", "if", "self", ".", "_labels_indicate_sentence_ending", "(", "labels", ")", ":", "# Consume the ending span.", "while", "True", ":", "index", ",", "labels", "=", "next", "(", "self", ".", "index_labels_generator", ")", "is_ending", "=", "(", "self", ".", "_labels_indicate_sentence_ending", "(", "labels", ")", "or", "(", "config", ".", "extend_ending_with_delimiters", "and", "labels", "[", "DelimitersLabeler", "]", ")", ")", "if", "not", "is_ending", ":", "end", "=", "index", "break", "# yeah we found the ending.", "break", "except", "StopIteration", ":", "end", "=", "len", "(", "self", ".", "input_sequence", ")", "# Trigger next(...) action.", "labels", "=", "None", "index", "=", "-", "1", "yield", "start", ",", "end" ]
Generate intervals indicating the valid sentences.
[ "Generate", "intervals", "indicating", "the", "valid", "sentences", "." ]
python
train
mitsei/dlkit
dlkit/services/repository.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/repository.py#L1810-L1818
def use_plenary_asset_composition_view(self): """Pass through to provider AssetCompositionSession.use_plenary_asset_composition_view""" self._object_views['asset_composition'] = PLENARY # self._get_provider_session('asset_composition_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_asset_composition_view() except AttributeError: pass
[ "def", "use_plenary_asset_composition_view", "(", "self", ")", ":", "self", ".", "_object_views", "[", "'asset_composition'", "]", "=", "PLENARY", "# self._get_provider_session('asset_composition_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_plenary_asset_composition_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Pass through to provider AssetCompositionSession.use_plenary_asset_composition_view
[ "Pass", "through", "to", "provider", "AssetCompositionSession", ".", "use_plenary_asset_composition_view" ]
python
train
vtkiorg/vtki
vtki/qt_plotting.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/qt_plotting.py#L418-L420
def scale_axes_dialog(self, show=True): """ Open scale axes dialog """ return ScaleAxesDialog(self.app_window, self, show=show)
[ "def", "scale_axes_dialog", "(", "self", ",", "show", "=", "True", ")", ":", "return", "ScaleAxesDialog", "(", "self", ".", "app_window", ",", "self", ",", "show", "=", "show", ")" ]
Open scale axes dialog
[ "Open", "scale", "axes", "dialog" ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/reports/flexible_dictionary.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/reports/flexible_dictionary.py#L28-L80
def FromReadings(cls, uuid, readings, events, report_id=IOTileReading.InvalidReadingID, selector=0xFFFF, streamer=0x100, sent_timestamp=0, received_time=None): """Create a flexible dictionary report from a list of readings and events. Args: uuid (int): The uuid of the device that this report came from readings (list of IOTileReading): A list of IOTileReading objects containing the data in the report events (list of IOTileEvent): A list of the events contained in the report. report_id (int): The id of the report. If not provided it defaults to IOTileReading.InvalidReadingID. Note that you can specify anything you want for the report id but for actual IOTile devices the report id will always be greater than the id of all of the readings contained in the report since devices generate ids sequentially. selector (int): The streamer selector of this report. This can be anything but if the report came from a device, it would correspond with the query the device used to pick readings to go into the report. streamer (int): The streamer id that this reading was sent from. sent_timestamp (int): The device's uptime that sent this report. received_time(datetime): The UTC time when this report was received from an IOTile device. If it is being created now, received_time defaults to datetime.utcnow(). Returns: FlexibleDictionaryReport: A report containing the readings and events passed in. """ lowest_id = IOTileReading.InvalidReadingID highest_id = IOTileReading.InvalidReadingID for item in itertools.chain(iter(readings), iter(events)): if item.reading_id == IOTileReading.InvalidReadingID: continue if lowest_id == IOTileReading.InvalidReadingID or item.reading_id < lowest_id: lowest_id = item.reading_id if highest_id == IOTileReading.InvalidReadingID or item.reading_id > highest_id: highest_id = item.reading_id reading_list = [x.asdict() for x in readings] event_list = [x.asdict() for x in events] report_dict = { "format": cls.FORMAT_TAG, "device": uuid, "streamer_index": streamer, "streamer_selector": selector, "incremental_id": report_id, "lowest_id": lowest_id, "highest_id": highest_id, "device_sent_timestamp": sent_timestamp, "events": event_list, "data": reading_list } encoded = msgpack.packb(report_dict, default=_encode_datetime, use_bin_type=True) return FlexibleDictionaryReport(encoded, signed=False, encrypted=False, received_time=received_time)
[ "def", "FromReadings", "(", "cls", ",", "uuid", ",", "readings", ",", "events", ",", "report_id", "=", "IOTileReading", ".", "InvalidReadingID", ",", "selector", "=", "0xFFFF", ",", "streamer", "=", "0x100", ",", "sent_timestamp", "=", "0", ",", "received_time", "=", "None", ")", ":", "lowest_id", "=", "IOTileReading", ".", "InvalidReadingID", "highest_id", "=", "IOTileReading", ".", "InvalidReadingID", "for", "item", "in", "itertools", ".", "chain", "(", "iter", "(", "readings", ")", ",", "iter", "(", "events", ")", ")", ":", "if", "item", ".", "reading_id", "==", "IOTileReading", ".", "InvalidReadingID", ":", "continue", "if", "lowest_id", "==", "IOTileReading", ".", "InvalidReadingID", "or", "item", ".", "reading_id", "<", "lowest_id", ":", "lowest_id", "=", "item", ".", "reading_id", "if", "highest_id", "==", "IOTileReading", ".", "InvalidReadingID", "or", "item", ".", "reading_id", ">", "highest_id", ":", "highest_id", "=", "item", ".", "reading_id", "reading_list", "=", "[", "x", ".", "asdict", "(", ")", "for", "x", "in", "readings", "]", "event_list", "=", "[", "x", ".", "asdict", "(", ")", "for", "x", "in", "events", "]", "report_dict", "=", "{", "\"format\"", ":", "cls", ".", "FORMAT_TAG", ",", "\"device\"", ":", "uuid", ",", "\"streamer_index\"", ":", "streamer", ",", "\"streamer_selector\"", ":", "selector", ",", "\"incremental_id\"", ":", "report_id", ",", "\"lowest_id\"", ":", "lowest_id", ",", "\"highest_id\"", ":", "highest_id", ",", "\"device_sent_timestamp\"", ":", "sent_timestamp", ",", "\"events\"", ":", "event_list", ",", "\"data\"", ":", "reading_list", "}", "encoded", "=", "msgpack", ".", "packb", "(", "report_dict", ",", "default", "=", "_encode_datetime", ",", "use_bin_type", "=", "True", ")", "return", "FlexibleDictionaryReport", "(", "encoded", ",", "signed", "=", "False", ",", "encrypted", "=", "False", ",", "received_time", "=", "received_time", ")" ]
Create a flexible dictionary report from a list of readings and events. Args: uuid (int): The uuid of the device that this report came from readings (list of IOTileReading): A list of IOTileReading objects containing the data in the report events (list of IOTileEvent): A list of the events contained in the report. report_id (int): The id of the report. If not provided it defaults to IOTileReading.InvalidReadingID. Note that you can specify anything you want for the report id but for actual IOTile devices the report id will always be greater than the id of all of the readings contained in the report since devices generate ids sequentially. selector (int): The streamer selector of this report. This can be anything but if the report came from a device, it would correspond with the query the device used to pick readings to go into the report. streamer (int): The streamer id that this reading was sent from. sent_timestamp (int): The device's uptime that sent this report. received_time(datetime): The UTC time when this report was received from an IOTile device. If it is being created now, received_time defaults to datetime.utcnow(). Returns: FlexibleDictionaryReport: A report containing the readings and events passed in.
[ "Create", "a", "flexible", "dictionary", "report", "from", "a", "list", "of", "readings", "and", "events", "." ]
python
train
neithere/argh
argh/assembling.py
https://github.com/neithere/argh/blob/dcd3253f2994400a6a58a700c118c53765bc50a4/argh/assembling.py#L188-L318
def set_default_command(parser, function): """ Sets default command (i.e. a function) for given parser. If `parser.description` is empty and the function has a docstring, it is used as the description. .. note:: An attempt to set default command to a parser which already has subparsers (e.g. added with :func:`~argh.assembling.add_commands`) results in a `AssemblingError`. .. note:: If there are both explicitly declared arguments (e.g. via :func:`~argh.decorators.arg`) and ones inferred from the function signature (e.g. via :func:`~argh.decorators.command`), declared ones will be merged into inferred ones. If an argument does not conform function signature, `AssemblingError` is raised. .. note:: If the parser was created with ``add_help=True`` (which is by default), option name ``-h`` is silently removed from any argument. """ if parser._subparsers: _require_support_for_default_command_with_subparsers() spec = get_arg_spec(function) declared_args = getattr(function, ATTR_ARGS, []) inferred_args = list(_get_args_from_signature(function)) if inferred_args and declared_args: # We've got a mixture of declared and inferred arguments # a mapping of "dest" strings to argument declarations. # # * a "dest" string is a normalized form of argument name, i.e.: # # '-f', '--foo' → 'foo' # 'foo-bar' → 'foo_bar' # # * argument declaration is a dictionary representing an argument; # it is obtained either from _get_args_from_signature() or from # an @arg decorator (as is). # dests = OrderedDict() for argspec in inferred_args: dest = _get_parser_param_kwargs(parser, argspec)['dest'] dests[dest] = argspec for declared_kw in declared_args: # an argument is declared via decorator dest = _get_dest(parser, declared_kw) if dest in dests: # the argument is already known from function signature # # now make sure that this declared arg conforms to the function # signature and therefore only refines an inferred arg: # # @arg('my-foo') maps to func(my_foo) # @arg('--my-bar') maps to func(my_bar=...) # either both arguments are positional or both are optional decl_positional = _is_positional(declared_kw['option_strings']) infr_positional = _is_positional(dests[dest]['option_strings']) if decl_positional != infr_positional: kinds = {True: 'positional', False: 'optional'} raise AssemblingError( '{func}: argument "{dest}" declared as {kind_i} ' '(in function signature) and {kind_d} (via decorator)' .format( func=function.__name__, dest=dest, kind_i=kinds[infr_positional], kind_d=kinds[decl_positional], )) # merge explicit argument declaration into the inferred one # (e.g. `help=...`) dests[dest].update(**declared_kw) else: # the argument is not in function signature varkw = getattr(spec, 'varkw', getattr(spec, 'keywords', [])) if varkw: # function accepts **kwargs; the argument goes into it dests[dest] = declared_kw else: # there's no way we can map the argument declaration # to function signature xs = (dests[x]['option_strings'] for x in dests) raise AssemblingError( '{func}: argument {flags} does not fit ' 'function signature: {sig}'.format( flags=', '.join(declared_kw['option_strings']), func=function.__name__, sig=', '.join('/'.join(x) for x in xs))) # pack the modified data back into a list inferred_args = dests.values() command_args = inferred_args or declared_args # add types, actions, etc. (e.g. default=3 implies type=int) command_args = [_guess(x) for x in command_args] for draft in command_args: draft = draft.copy() if 'help' not in draft: draft.update(help=DEFAULT_ARGUMENT_TEMPLATE) dest_or_opt_strings = draft.pop('option_strings') if parser.add_help and '-h' in dest_or_opt_strings: dest_or_opt_strings = [x for x in dest_or_opt_strings if x != '-h'] completer = draft.pop('completer', None) try: action = parser.add_argument(*dest_or_opt_strings, **draft) if COMPLETION_ENABLED and completer: action.completer = completer except Exception as e: raise type(e)('{func}: cannot add arg {args}: {msg}'.format( args='/'.join(dest_or_opt_strings), func=function.__name__, msg=e)) if function.__doc__ and not parser.description: parser.description = function.__doc__ parser.set_defaults(**{ DEST_FUNCTION: function, })
[ "def", "set_default_command", "(", "parser", ",", "function", ")", ":", "if", "parser", ".", "_subparsers", ":", "_require_support_for_default_command_with_subparsers", "(", ")", "spec", "=", "get_arg_spec", "(", "function", ")", "declared_args", "=", "getattr", "(", "function", ",", "ATTR_ARGS", ",", "[", "]", ")", "inferred_args", "=", "list", "(", "_get_args_from_signature", "(", "function", ")", ")", "if", "inferred_args", "and", "declared_args", ":", "# We've got a mixture of declared and inferred arguments", "# a mapping of \"dest\" strings to argument declarations.", "#", "# * a \"dest\" string is a normalized form of argument name, i.e.:", "#", "# '-f', '--foo' → 'foo'", "# 'foo-bar' → 'foo_bar'", "#", "# * argument declaration is a dictionary representing an argument;", "# it is obtained either from _get_args_from_signature() or from", "# an @arg decorator (as is).", "#", "dests", "=", "OrderedDict", "(", ")", "for", "argspec", "in", "inferred_args", ":", "dest", "=", "_get_parser_param_kwargs", "(", "parser", ",", "argspec", ")", "[", "'dest'", "]", "dests", "[", "dest", "]", "=", "argspec", "for", "declared_kw", "in", "declared_args", ":", "# an argument is declared via decorator", "dest", "=", "_get_dest", "(", "parser", ",", "declared_kw", ")", "if", "dest", "in", "dests", ":", "# the argument is already known from function signature", "#", "# now make sure that this declared arg conforms to the function", "# signature and therefore only refines an inferred arg:", "#", "# @arg('my-foo') maps to func(my_foo)", "# @arg('--my-bar') maps to func(my_bar=...)", "# either both arguments are positional or both are optional", "decl_positional", "=", "_is_positional", "(", "declared_kw", "[", "'option_strings'", "]", ")", "infr_positional", "=", "_is_positional", "(", "dests", "[", "dest", "]", "[", "'option_strings'", "]", ")", "if", "decl_positional", "!=", "infr_positional", ":", "kinds", "=", "{", "True", ":", "'positional'", ",", "False", ":", "'optional'", "}", "raise", "AssemblingError", "(", "'{func}: argument \"{dest}\" declared as {kind_i} '", "'(in function signature) and {kind_d} (via decorator)'", ".", "format", "(", "func", "=", "function", ".", "__name__", ",", "dest", "=", "dest", ",", "kind_i", "=", "kinds", "[", "infr_positional", "]", ",", "kind_d", "=", "kinds", "[", "decl_positional", "]", ",", ")", ")", "# merge explicit argument declaration into the inferred one", "# (e.g. `help=...`)", "dests", "[", "dest", "]", ".", "update", "(", "*", "*", "declared_kw", ")", "else", ":", "# the argument is not in function signature", "varkw", "=", "getattr", "(", "spec", ",", "'varkw'", ",", "getattr", "(", "spec", ",", "'keywords'", ",", "[", "]", ")", ")", "if", "varkw", ":", "# function accepts **kwargs; the argument goes into it", "dests", "[", "dest", "]", "=", "declared_kw", "else", ":", "# there's no way we can map the argument declaration", "# to function signature", "xs", "=", "(", "dests", "[", "x", "]", "[", "'option_strings'", "]", "for", "x", "in", "dests", ")", "raise", "AssemblingError", "(", "'{func}: argument {flags} does not fit '", "'function signature: {sig}'", ".", "format", "(", "flags", "=", "', '", ".", "join", "(", "declared_kw", "[", "'option_strings'", "]", ")", ",", "func", "=", "function", ".", "__name__", ",", "sig", "=", "', '", ".", "join", "(", "'/'", ".", "join", "(", "x", ")", "for", "x", "in", "xs", ")", ")", ")", "# pack the modified data back into a list", "inferred_args", "=", "dests", ".", "values", "(", ")", "command_args", "=", "inferred_args", "or", "declared_args", "# add types, actions, etc. (e.g. default=3 implies type=int)", "command_args", "=", "[", "_guess", "(", "x", ")", "for", "x", "in", "command_args", "]", "for", "draft", "in", "command_args", ":", "draft", "=", "draft", ".", "copy", "(", ")", "if", "'help'", "not", "in", "draft", ":", "draft", ".", "update", "(", "help", "=", "DEFAULT_ARGUMENT_TEMPLATE", ")", "dest_or_opt_strings", "=", "draft", ".", "pop", "(", "'option_strings'", ")", "if", "parser", ".", "add_help", "and", "'-h'", "in", "dest_or_opt_strings", ":", "dest_or_opt_strings", "=", "[", "x", "for", "x", "in", "dest_or_opt_strings", "if", "x", "!=", "'-h'", "]", "completer", "=", "draft", ".", "pop", "(", "'completer'", ",", "None", ")", "try", ":", "action", "=", "parser", ".", "add_argument", "(", "*", "dest_or_opt_strings", ",", "*", "*", "draft", ")", "if", "COMPLETION_ENABLED", "and", "completer", ":", "action", ".", "completer", "=", "completer", "except", "Exception", "as", "e", ":", "raise", "type", "(", "e", ")", "(", "'{func}: cannot add arg {args}: {msg}'", ".", "format", "(", "args", "=", "'/'", ".", "join", "(", "dest_or_opt_strings", ")", ",", "func", "=", "function", ".", "__name__", ",", "msg", "=", "e", ")", ")", "if", "function", ".", "__doc__", "and", "not", "parser", ".", "description", ":", "parser", ".", "description", "=", "function", ".", "__doc__", "parser", ".", "set_defaults", "(", "*", "*", "{", "DEST_FUNCTION", ":", "function", ",", "}", ")" ]
Sets default command (i.e. a function) for given parser. If `parser.description` is empty and the function has a docstring, it is used as the description. .. note:: An attempt to set default command to a parser which already has subparsers (e.g. added with :func:`~argh.assembling.add_commands`) results in a `AssemblingError`. .. note:: If there are both explicitly declared arguments (e.g. via :func:`~argh.decorators.arg`) and ones inferred from the function signature (e.g. via :func:`~argh.decorators.command`), declared ones will be merged into inferred ones. If an argument does not conform function signature, `AssemblingError` is raised. .. note:: If the parser was created with ``add_help=True`` (which is by default), option name ``-h`` is silently removed from any argument.
[ "Sets", "default", "command", "(", "i", ".", "e", ".", "a", "function", ")", "for", "given", "parser", "." ]
python
test
noxdafox/clipspy
clips/agenda.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/agenda.py#L269-L287
def matches(self, verbosity=Verbosity.TERSE): """Shows partial matches and activations. Returns a tuple containing the combined sum of the matches for each pattern, the combined sum of partial matches and the number of activations. The verbosity parameter controls how much to output: * Verbosity.VERBOSE: detailed matches are printed to stdout * Verbosity.SUCCINT: a brief description is printed to stdout * Verbosity.TERSE: (default) nothing is printed to stdout """ data = clips.data.DataObject(self._env) lib.EnvMatches(self._env, self._rule, verbosity, data.byref) return tuple(data.value)
[ "def", "matches", "(", "self", ",", "verbosity", "=", "Verbosity", ".", "TERSE", ")", ":", "data", "=", "clips", ".", "data", ".", "DataObject", "(", "self", ".", "_env", ")", "lib", ".", "EnvMatches", "(", "self", ".", "_env", ",", "self", ".", "_rule", ",", "verbosity", ",", "data", ".", "byref", ")", "return", "tuple", "(", "data", ".", "value", ")" ]
Shows partial matches and activations. Returns a tuple containing the combined sum of the matches for each pattern, the combined sum of partial matches and the number of activations. The verbosity parameter controls how much to output: * Verbosity.VERBOSE: detailed matches are printed to stdout * Verbosity.SUCCINT: a brief description is printed to stdout * Verbosity.TERSE: (default) nothing is printed to stdout
[ "Shows", "partial", "matches", "and", "activations", "." ]
python
train
cloudtools/stacker
stacker/hooks/ecs.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/hooks/ecs.py#L15-L45
def create_clusters(provider, context, **kwargs): """Creates ECS clusters. Expects a "clusters" argument, which should contain a list of cluster names to create. Args: provider (:class:`stacker.providers.base.BaseProvider`): provider instance context (:class:`stacker.context.Context`): context instance Returns: boolean for whether or not the hook succeeded. """ conn = get_session(provider.region).client('ecs') try: clusters = kwargs["clusters"] except KeyError: logger.error("setup_clusters hook missing \"clusters\" argument") return False if isinstance(clusters, basestring): clusters = [clusters] cluster_info = {} for cluster in clusters: logger.debug("Creating ECS cluster: %s", cluster) r = conn.create_cluster(clusterName=cluster) cluster_info[r["cluster"]["clusterName"]] = r return {"clusters": cluster_info}
[ "def", "create_clusters", "(", "provider", ",", "context", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "get_session", "(", "provider", ".", "region", ")", ".", "client", "(", "'ecs'", ")", "try", ":", "clusters", "=", "kwargs", "[", "\"clusters\"", "]", "except", "KeyError", ":", "logger", ".", "error", "(", "\"setup_clusters hook missing \\\"clusters\\\" argument\"", ")", "return", "False", "if", "isinstance", "(", "clusters", ",", "basestring", ")", ":", "clusters", "=", "[", "clusters", "]", "cluster_info", "=", "{", "}", "for", "cluster", "in", "clusters", ":", "logger", ".", "debug", "(", "\"Creating ECS cluster: %s\"", ",", "cluster", ")", "r", "=", "conn", ".", "create_cluster", "(", "clusterName", "=", "cluster", ")", "cluster_info", "[", "r", "[", "\"cluster\"", "]", "[", "\"clusterName\"", "]", "]", "=", "r", "return", "{", "\"clusters\"", ":", "cluster_info", "}" ]
Creates ECS clusters. Expects a "clusters" argument, which should contain a list of cluster names to create. Args: provider (:class:`stacker.providers.base.BaseProvider`): provider instance context (:class:`stacker.context.Context`): context instance Returns: boolean for whether or not the hook succeeded.
[ "Creates", "ECS", "clusters", "." ]
python
train
apache/spark
python/pyspark/rdd.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L150-L160
def ignore_unicode_prefix(f): """ Ignore the 'u' prefix of string in doc tests, to make it works in both python 2 and 3 """ if sys.version >= '3': # the representation of unicode string in Python 3 does not have prefix 'u', # so remove the prefix 'u' for doc tests literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE) f.__doc__ = literal_re.sub(r'\1\2', f.__doc__) return f
[ "def", "ignore_unicode_prefix", "(", "f", ")", ":", "if", "sys", ".", "version", ">=", "'3'", ":", "# the representation of unicode string in Python 3 does not have prefix 'u',", "# so remove the prefix 'u' for doc tests", "literal_re", "=", "re", ".", "compile", "(", "r\"(\\W|^)[uU](['])\"", ",", "re", ".", "UNICODE", ")", "f", ".", "__doc__", "=", "literal_re", ".", "sub", "(", "r'\\1\\2'", ",", "f", ".", "__doc__", ")", "return", "f" ]
Ignore the 'u' prefix of string in doc tests, to make it works in both python 2 and 3
[ "Ignore", "the", "u", "prefix", "of", "string", "in", "doc", "tests", "to", "make", "it", "works", "in", "both", "python", "2", "and", "3" ]
python
train
stitchfix/pyxley
pyxley/charts/mg/graphic.py
https://github.com/stitchfix/pyxley/blob/2dab00022d977d986169cd8a629b3a2f91be893f/pyxley/charts/mg/graphic.py#L74-L87
def chart_type(self, value): """Set the MetricsGraphics chart type. Allowed charts are: line, histogram, point, and bar Args: value (str): chart type. Raises: ValueError: Not a valid chart type. """ if value not in self._allowed_charts: raise ValueError("Not a valid chart type") self.options["chart_type"] = value
[ "def", "chart_type", "(", "self", ",", "value", ")", ":", "if", "value", "not", "in", "self", ".", "_allowed_charts", ":", "raise", "ValueError", "(", "\"Not a valid chart type\"", ")", "self", ".", "options", "[", "\"chart_type\"", "]", "=", "value" ]
Set the MetricsGraphics chart type. Allowed charts are: line, histogram, point, and bar Args: value (str): chart type. Raises: ValueError: Not a valid chart type.
[ "Set", "the", "MetricsGraphics", "chart", "type", ".", "Allowed", "charts", "are", ":", "line", "histogram", "point", "and", "bar" ]
python
train
Microsoft/LightGBM
python-package/lightgbm/basic.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L2063-L2090
def save_model(self, filename, num_iteration=None, start_iteration=0): """Save Booster to file. Parameters ---------- filename : string Filename to save Booster. num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved. start_iteration : int, optional (default=0) Start index of the iteration that should be saved. Returns ------- self : Booster Returns self. """ if num_iteration is None: num_iteration = self.best_iteration _safe_call(_LIB.LGBM_BoosterSaveModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), c_str(filename))) _dump_pandas_categorical(self.pandas_categorical, filename) return self
[ "def", "save_model", "(", "self", ",", "filename", ",", "num_iteration", "=", "None", ",", "start_iteration", "=", "0", ")", ":", "if", "num_iteration", "is", "None", ":", "num_iteration", "=", "self", ".", "best_iteration", "_safe_call", "(", "_LIB", ".", "LGBM_BoosterSaveModel", "(", "self", ".", "handle", ",", "ctypes", ".", "c_int", "(", "start_iteration", ")", ",", "ctypes", ".", "c_int", "(", "num_iteration", ")", ",", "c_str", "(", "filename", ")", ")", ")", "_dump_pandas_categorical", "(", "self", ".", "pandas_categorical", ",", "filename", ")", "return", "self" ]
Save Booster to file. Parameters ---------- filename : string Filename to save Booster. num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved. start_iteration : int, optional (default=0) Start index of the iteration that should be saved. Returns ------- self : Booster Returns self.
[ "Save", "Booster", "to", "file", "." ]
python
train
pypa/pipenv
pipenv/patched/notpip/_internal/cache.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cache.py#L132-L151
def get_path_for_link(self, link): # type: (Link) -> str """Return a directory to store cached wheels for link Because there are M wheels for any one sdist, we provide a directory to cache them in, and then consult that directory when looking up cache hits. We only insert things into the cache if they have plausible version numbers, so that we don't contaminate the cache with things that were not unique. E.g. ./package might have dozens of installs done for it and build a version of 0.0...and if we built and cached a wheel, we'd end up using the same wheel even if the source has been edited. :param link: The link of the sdist for which this will cache wheels. """ parts = self._get_cache_path_parts(link) # Store wheels within the root cache_dir return os.path.join(self.cache_dir, "wheels", *parts)
[ "def", "get_path_for_link", "(", "self", ",", "link", ")", ":", "# type: (Link) -> str", "parts", "=", "self", ".", "_get_cache_path_parts", "(", "link", ")", "# Store wheels within the root cache_dir", "return", "os", ".", "path", ".", "join", "(", "self", ".", "cache_dir", ",", "\"wheels\"", ",", "*", "parts", ")" ]
Return a directory to store cached wheels for link Because there are M wheels for any one sdist, we provide a directory to cache them in, and then consult that directory when looking up cache hits. We only insert things into the cache if they have plausible version numbers, so that we don't contaminate the cache with things that were not unique. E.g. ./package might have dozens of installs done for it and build a version of 0.0...and if we built and cached a wheel, we'd end up using the same wheel even if the source has been edited. :param link: The link of the sdist for which this will cache wheels.
[ "Return", "a", "directory", "to", "store", "cached", "wheels", "for", "link" ]
python
train
SeabornGames/Meta
seaborn_meta/calling_function.py
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L290-L303
def trace_error(function_index=2): """ This will return the line number and line text of the last error :param function_index: int to tell what frame to look from :return: int, str of the line number and line text """ info = function_info(function_index) traces = traceback.format_stack(limit=10) for trace in traces: file_, line_number, line_text = trace.split(',', 2) if file_ == ' File "%s"' % info['file'] and\ line_number != 'line %s' % info['line_number']: return line_number.split()[-1], line_text.strip() return None, None
[ "def", "trace_error", "(", "function_index", "=", "2", ")", ":", "info", "=", "function_info", "(", "function_index", ")", "traces", "=", "traceback", ".", "format_stack", "(", "limit", "=", "10", ")", "for", "trace", "in", "traces", ":", "file_", ",", "line_number", ",", "line_text", "=", "trace", ".", "split", "(", "','", ",", "2", ")", "if", "file_", "==", "' File \"%s\"'", "%", "info", "[", "'file'", "]", "and", "line_number", "!=", "'line %s'", "%", "info", "[", "'line_number'", "]", ":", "return", "line_number", ".", "split", "(", ")", "[", "-", "1", "]", ",", "line_text", ".", "strip", "(", ")", "return", "None", ",", "None" ]
This will return the line number and line text of the last error :param function_index: int to tell what frame to look from :return: int, str of the line number and line text
[ "This", "will", "return", "the", "line", "number", "and", "line", "text", "of", "the", "last", "error", ":", "param", "function_index", ":", "int", "to", "tell", "what", "frame", "to", "look", "from", ":", "return", ":", "int", "str", "of", "the", "line", "number", "and", "line", "text" ]
python
train
marl/jams
jams/schema.py
https://github.com/marl/jams/blob/b16778399b9528efbd71434842a079f7691a7a66/jams/schema.py#L35-L47
def add_namespace(filename): '''Add a namespace definition to our working set. Namespace files consist of partial JSON schemas defining the behavior of the `value` and `confidence` fields of an Annotation. Parameters ---------- filename : str Path to json file defining the namespace object ''' with open(filename, mode='r') as fileobj: __NAMESPACE__.update(json.load(fileobj))
[ "def", "add_namespace", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "mode", "=", "'r'", ")", "as", "fileobj", ":", "__NAMESPACE__", ".", "update", "(", "json", ".", "load", "(", "fileobj", ")", ")" ]
Add a namespace definition to our working set. Namespace files consist of partial JSON schemas defining the behavior of the `value` and `confidence` fields of an Annotation. Parameters ---------- filename : str Path to json file defining the namespace object
[ "Add", "a", "namespace", "definition", "to", "our", "working", "set", "." ]
python
valid
readbeyond/aeneas
aeneas/textfile.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/textfile.py#L879-L896
def _read_parsed(self, lines): """ Read text fragments from a parsed format text file. :param list lines: the lines of the parsed text file :param dict parameters: additional parameters for parsing (e.g., class/id regex strings) """ self.log(u"Parsing fragments from parsed text format") pairs = [] for line in lines: pieces = line.split(gc.PARSED_TEXT_SEPARATOR) if len(pieces) == 2: identifier = pieces[0].strip() text = pieces[1].strip() if len(identifier) > 0: pairs.append((identifier, [text])) self._create_text_fragments(pairs)
[ "def", "_read_parsed", "(", "self", ",", "lines", ")", ":", "self", ".", "log", "(", "u\"Parsing fragments from parsed text format\"", ")", "pairs", "=", "[", "]", "for", "line", "in", "lines", ":", "pieces", "=", "line", ".", "split", "(", "gc", ".", "PARSED_TEXT_SEPARATOR", ")", "if", "len", "(", "pieces", ")", "==", "2", ":", "identifier", "=", "pieces", "[", "0", "]", ".", "strip", "(", ")", "text", "=", "pieces", "[", "1", "]", ".", "strip", "(", ")", "if", "len", "(", "identifier", ")", ">", "0", ":", "pairs", ".", "append", "(", "(", "identifier", ",", "[", "text", "]", ")", ")", "self", ".", "_create_text_fragments", "(", "pairs", ")" ]
Read text fragments from a parsed format text file. :param list lines: the lines of the parsed text file :param dict parameters: additional parameters for parsing (e.g., class/id regex strings)
[ "Read", "text", "fragments", "from", "a", "parsed", "format", "text", "file", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_span.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_span.py#L128-L141
def monitor_session_span_command_dest_vlan_val(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") monitor = ET.SubElement(config, "monitor", xmlns="urn:brocade.com:mgmt:brocade-span") session = ET.SubElement(monitor, "session") session_number_key = ET.SubElement(session, "session-number") session_number_key.text = kwargs.pop('session_number') span_command = ET.SubElement(session, "span-command") dest_vlan_val = ET.SubElement(span_command, "dest-vlan-val") dest_vlan_val.text = kwargs.pop('dest_vlan_val') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "monitor_session_span_command_dest_vlan_val", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "monitor", "=", "ET", ".", "SubElement", "(", "config", ",", "\"monitor\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-span\"", ")", "session", "=", "ET", ".", "SubElement", "(", "monitor", ",", "\"session\"", ")", "session_number_key", "=", "ET", ".", "SubElement", "(", "session", ",", "\"session-number\"", ")", "session_number_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'session_number'", ")", "span_command", "=", "ET", ".", "SubElement", "(", "session", ",", "\"span-command\"", ")", "dest_vlan_val", "=", "ET", ".", "SubElement", "(", "span_command", ",", "\"dest-vlan-val\"", ")", "dest_vlan_val", ".", "text", "=", "kwargs", ".", "pop", "(", "'dest_vlan_val'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
Arello-Mobile/swagger2rst
swg2rst/utils/exampilators.py
https://github.com/Arello-Mobile/swagger2rst/blob/e519f70701477dcc9f0bb237ee5b8e08e848701b/swg2rst/utils/exampilators.py#L169-L186
def get_response_example(cls, operation, response): """ Get example for response object by operation object :param Operation operation: operation object :param Response response: response object """ path = "#/paths/'{}'/{}/responses/{}".format( operation.path, operation.method, response.name) kwargs = dict(paths=[path]) if response.type in PRIMITIVE_TYPES: result = cls.get_example_value_for_primitive_type( response.type, response.properties, response.type_format, **kwargs) else: schema = SchemaObjects.get(response.type) result = cls.get_example_by_schema(schema, **kwargs) return result
[ "def", "get_response_example", "(", "cls", ",", "operation", ",", "response", ")", ":", "path", "=", "\"#/paths/'{}'/{}/responses/{}\"", ".", "format", "(", "operation", ".", "path", ",", "operation", ".", "method", ",", "response", ".", "name", ")", "kwargs", "=", "dict", "(", "paths", "=", "[", "path", "]", ")", "if", "response", ".", "type", "in", "PRIMITIVE_TYPES", ":", "result", "=", "cls", ".", "get_example_value_for_primitive_type", "(", "response", ".", "type", ",", "response", ".", "properties", ",", "response", ".", "type_format", ",", "*", "*", "kwargs", ")", "else", ":", "schema", "=", "SchemaObjects", ".", "get", "(", "response", ".", "type", ")", "result", "=", "cls", ".", "get_example_by_schema", "(", "schema", ",", "*", "*", "kwargs", ")", "return", "result" ]
Get example for response object by operation object :param Operation operation: operation object :param Response response: response object
[ "Get", "example", "for", "response", "object", "by", "operation", "object" ]
python
train
RedHatInsights/insights-core
insights/contrib/pyparsing.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/pyparsing.py#L3683-L3755
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): """Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted) - lpar - expression for matching left-parentheses (default=Suppress('(')) - rpar - expression for matching right-parentheses (default=Suppress(')')) """ ret = Forward() lastExpr = baseExpr | ( lpar + ret + rpar ) for i,operDef in enumerate(opList): opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr if arity == 3: if opExpr is None or len(opExpr) != 2: raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") opExpr1, opExpr2 = opExpr thisExpr = Forward().setName(termName) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) else: matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") elif rightLeftAssoc == opAssoc.RIGHT: if arity == 1: # try to avoid LR with this extra test if not isinstance(opExpr, Optional): opExpr = Optional(opExpr) matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) else: matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") else: raise ValueError("operator must indicate right or left associativity") if pa: matchExpr.setParseAction( pa ) thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) lastExpr = thisExpr ret <<= lastExpr return ret
[ "def", "infixNotation", "(", "baseExpr", ",", "opList", ",", "lpar", "=", "Suppress", "(", "'('", ")", ",", "rpar", "=", "Suppress", "(", "')'", ")", ")", ":", "ret", "=", "Forward", "(", ")", "lastExpr", "=", "baseExpr", "|", "(", "lpar", "+", "ret", "+", "rpar", ")", "for", "i", ",", "operDef", "in", "enumerate", "(", "opList", ")", ":", "opExpr", ",", "arity", ",", "rightLeftAssoc", ",", "pa", "=", "(", "operDef", "+", "(", "None", ",", ")", ")", "[", ":", "4", "]", "termName", "=", "\"%s term\"", "%", "opExpr", "if", "arity", "<", "3", "else", "\"%s%s term\"", "%", "opExpr", "if", "arity", "==", "3", ":", "if", "opExpr", "is", "None", "or", "len", "(", "opExpr", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"if numterms=3, opExpr must be a tuple or list of two expressions\"", ")", "opExpr1", ",", "opExpr2", "=", "opExpr", "thisExpr", "=", "Forward", "(", ")", ".", "setName", "(", "termName", ")", "if", "rightLeftAssoc", "==", "opAssoc", ".", "LEFT", ":", "if", "arity", "==", "1", ":", "matchExpr", "=", "FollowedBy", "(", "lastExpr", "+", "opExpr", ")", "+", "Group", "(", "lastExpr", "+", "OneOrMore", "(", "opExpr", ")", ")", "elif", "arity", "==", "2", ":", "if", "opExpr", "is", "not", "None", ":", "matchExpr", "=", "FollowedBy", "(", "lastExpr", "+", "opExpr", "+", "lastExpr", ")", "+", "Group", "(", "lastExpr", "+", "OneOrMore", "(", "opExpr", "+", "lastExpr", ")", ")", "else", ":", "matchExpr", "=", "FollowedBy", "(", "lastExpr", "+", "lastExpr", ")", "+", "Group", "(", "lastExpr", "+", "OneOrMore", "(", "lastExpr", ")", ")", "elif", "arity", "==", "3", ":", "matchExpr", "=", "FollowedBy", "(", "lastExpr", "+", "opExpr1", "+", "lastExpr", "+", "opExpr2", "+", "lastExpr", ")", "+", "Group", "(", "lastExpr", "+", "opExpr1", "+", "lastExpr", "+", "opExpr2", "+", "lastExpr", ")", "else", ":", "raise", "ValueError", "(", "\"operator must be unary (1), binary (2), or ternary (3)\"", ")", "elif", "rightLeftAssoc", "==", "opAssoc", ".", "RIGHT", ":", "if", "arity", "==", "1", ":", "# try to avoid LR with this extra test", "if", "not", "isinstance", "(", "opExpr", ",", "Optional", ")", ":", "opExpr", "=", "Optional", "(", "opExpr", ")", "matchExpr", "=", "FollowedBy", "(", "opExpr", ".", "expr", "+", "thisExpr", ")", "+", "Group", "(", "opExpr", "+", "thisExpr", ")", "elif", "arity", "==", "2", ":", "if", "opExpr", "is", "not", "None", ":", "matchExpr", "=", "FollowedBy", "(", "lastExpr", "+", "opExpr", "+", "thisExpr", ")", "+", "Group", "(", "lastExpr", "+", "OneOrMore", "(", "opExpr", "+", "thisExpr", ")", ")", "else", ":", "matchExpr", "=", "FollowedBy", "(", "lastExpr", "+", "thisExpr", ")", "+", "Group", "(", "lastExpr", "+", "OneOrMore", "(", "thisExpr", ")", ")", "elif", "arity", "==", "3", ":", "matchExpr", "=", "FollowedBy", "(", "lastExpr", "+", "opExpr1", "+", "thisExpr", "+", "opExpr2", "+", "thisExpr", ")", "+", "Group", "(", "lastExpr", "+", "opExpr1", "+", "thisExpr", "+", "opExpr2", "+", "thisExpr", ")", "else", ":", "raise", "ValueError", "(", "\"operator must be unary (1), binary (2), or ternary (3)\"", ")", "else", ":", "raise", "ValueError", "(", "\"operator must indicate right or left associativity\"", ")", "if", "pa", ":", "matchExpr", ".", "setParseAction", "(", "pa", ")", "thisExpr", "<<=", "(", "matchExpr", ".", "setName", "(", "termName", ")", "|", "lastExpr", ")", "lastExpr", "=", "thisExpr", "ret", "<<=", "lastExpr", "return", "ret" ]
Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted) - lpar - expression for matching left-parentheses (default=Suppress('(')) - rpar - expression for matching right-parentheses (default=Suppress(')'))
[ "Helper", "method", "for", "constructing", "grammars", "of", "expressions", "made", "up", "of", "operators", "working", "in", "a", "precedence", "hierarchy", ".", "Operators", "may", "be", "unary", "or", "binary", "left", "-", "or", "right", "-", "associative", ".", "Parse", "actions", "can", "also", "be", "attached", "to", "operator", "expressions", "." ]
python
train
ryan-roemer/django-cloud-browser
cloud_browser/cloud/fs.py
https://github.com/ryan-roemer/django-cloud-browser/blob/b06cdd24885a6309e843ed924dbf1705b67e7f48/cloud_browser/cloud/fs.py#L65-L80
def from_path(cls, container, path): """Create object from path.""" from datetime import datetime path = path.strip(SEP) full_path = os.path.join(container.base_path, path) last_modified = datetime.fromtimestamp(os.path.getmtime(full_path)) obj_type = cls.type_cls.SUBDIR if is_dir(full_path)\ else cls.type_cls.FILE return cls(container, name=path, size=os.path.getsize(full_path), content_type=None, last_modified=last_modified, obj_type=obj_type)
[ "def", "from_path", "(", "cls", ",", "container", ",", "path", ")", ":", "from", "datetime", "import", "datetime", "path", "=", "path", ".", "strip", "(", "SEP", ")", "full_path", "=", "os", ".", "path", ".", "join", "(", "container", ".", "base_path", ",", "path", ")", "last_modified", "=", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getmtime", "(", "full_path", ")", ")", "obj_type", "=", "cls", ".", "type_cls", ".", "SUBDIR", "if", "is_dir", "(", "full_path", ")", "else", "cls", ".", "type_cls", ".", "FILE", "return", "cls", "(", "container", ",", "name", "=", "path", ",", "size", "=", "os", ".", "path", ".", "getsize", "(", "full_path", ")", ",", "content_type", "=", "None", ",", "last_modified", "=", "last_modified", ",", "obj_type", "=", "obj_type", ")" ]
Create object from path.
[ "Create", "object", "from", "path", "." ]
python
train
OSSOS/MOP
src/ossos/core/ossos/storage.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/storage.py#L138-L212
def cone_search(ra, dec, dra=0.01, ddec=0.01, mjdate=None, calibration_level=2, use_ssos=True, collection='CFHTMEGAPIPE'): """Do a QUERY on the TAP service for all observations that are part of OSSOS (*P05/*P016) where taken after mjd and have calibration 'observable'. @param ra: RA center of search cont @type ra: Quantity @param dec: float degrees @type dec: Quantity @param dra: float degrees @type dra: Quantity @param ddec: float degrees @type ddec: Quantity @param calibration_level: What calibration level must the found image have, @param mjdate: what data must the observation be to @param collection: name of the data collection to be searched. @param use_ssos: USE the SSOIS server to find comparison? False -> Use CAOM2 TAP query. ke on. this is a CAOM2 parameter of CADC, 2 means calibrated data. """ if use_ssos: ssois_server = "http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/cadcbin/ssos/fixedssos.pl" params = dict(pos="{0:f},{1:f}".format(ra.to(units.degree).value, dec.to(units.degree).value)) result = requests.get(ssois_server, params=params) table = ascii.read(result.text, format='tab') table = table[table['Telescope/Instrument'] == 'CFHT/MegaCam'] column_name_mapping = {'Image': 'collectionID', 'Filter': 'filter', 'Exptime': 'exptime'} # rename the columns for key in column_name_mapping: table[key].name = column_name_mapping[key] table['collectionID'] = [x[:-1] for x in table['collectionID']] # compute the mjdate from the time string. table['mjdate'] = Time(table['Date/Time']).mjd return table data = dict(QUERY=(" SELECT Observation.observationID as collectionID, " " Plane.time_bounds_lower AS mjdate, " " Plane.time_exposure AS exptime, " " Plane.energy_bandpassName as filter" " FROM caom2.Observation AS Observation " " JOIN caom2.Plane AS Plane " " ON Observation.obsID = Plane.obsID " " WHERE ( Observation.collection = '{}' ) " " AND Plane.calibrationLevel > {} " " AND ( Plane.energy_bandpassName LIKE 'r.%' OR Plane.energy_bandpassName LIKE 'gri.%' ) " " AND ( Observation.proposal_id LIKE '%P05' or Observation.proposal_id LIKE '%P06' )" " AND Observation.target_name NOT LIKE 'WP%'"), REQUEST="doQuery", LANG="ADQL", FORMAT="tsv") data["QUERY"] = data["QUERY"].format(calibration_level, collection) data["QUERY"] += (" AND " " CONTAINS( BOX('ICRS', {}, {}, {}, {}), " " Plane.position_bounds ) = 1 ").format(ra.to(units.degree).value, dec.to(units.degree).value, dra.to(units.degree).value, ddec.to(units.degree).value) if mjdate is not None: data["QUERY"] += " AND Plane.time_bounds_lower < {} AND Plane.time_bounds_cval2 > {} ".format( mjdate + 1.0 / 24.0, mjdate - 1 / 24.0) result = requests.get(TAP_WEB_SERVICE, params=data, verify=False) logger.debug("Doing TAP Query using url: %s" % (str(result.url))) table_reader = ascii.get_reader(Reader=ascii.Basic) table_reader.header.splitter.delimiter = '\t' table_reader.data.splitter.delimiter = '\t' table = table_reader.read(result.text) logger.debug(str(table)) return table
[ "def", "cone_search", "(", "ra", ",", "dec", ",", "dra", "=", "0.01", ",", "ddec", "=", "0.01", ",", "mjdate", "=", "None", ",", "calibration_level", "=", "2", ",", "use_ssos", "=", "True", ",", "collection", "=", "'CFHTMEGAPIPE'", ")", ":", "if", "use_ssos", ":", "ssois_server", "=", "\"http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/cadcbin/ssos/fixedssos.pl\"", "params", "=", "dict", "(", "pos", "=", "\"{0:f},{1:f}\"", ".", "format", "(", "ra", ".", "to", "(", "units", ".", "degree", ")", ".", "value", ",", "dec", ".", "to", "(", "units", ".", "degree", ")", ".", "value", ")", ")", "result", "=", "requests", ".", "get", "(", "ssois_server", ",", "params", "=", "params", ")", "table", "=", "ascii", ".", "read", "(", "result", ".", "text", ",", "format", "=", "'tab'", ")", "table", "=", "table", "[", "table", "[", "'Telescope/Instrument'", "]", "==", "'CFHT/MegaCam'", "]", "column_name_mapping", "=", "{", "'Image'", ":", "'collectionID'", ",", "'Filter'", ":", "'filter'", ",", "'Exptime'", ":", "'exptime'", "}", "# rename the columns", "for", "key", "in", "column_name_mapping", ":", "table", "[", "key", "]", ".", "name", "=", "column_name_mapping", "[", "key", "]", "table", "[", "'collectionID'", "]", "=", "[", "x", "[", ":", "-", "1", "]", "for", "x", "in", "table", "[", "'collectionID'", "]", "]", "# compute the mjdate from the time string.", "table", "[", "'mjdate'", "]", "=", "Time", "(", "table", "[", "'Date/Time'", "]", ")", ".", "mjd", "return", "table", "data", "=", "dict", "(", "QUERY", "=", "(", "\" SELECT Observation.observationID as collectionID, \"", "\" Plane.time_bounds_lower AS mjdate, \"", "\" Plane.time_exposure AS exptime, \"", "\" Plane.energy_bandpassName as filter\"", "\" FROM caom2.Observation AS Observation \"", "\" JOIN caom2.Plane AS Plane \"", "\" ON Observation.obsID = Plane.obsID \"", "\" WHERE ( Observation.collection = '{}' ) \"", "\" AND Plane.calibrationLevel > {} \"", "\" AND ( Plane.energy_bandpassName LIKE 'r.%' OR Plane.energy_bandpassName LIKE 'gri.%' ) \"", "\" AND ( Observation.proposal_id LIKE '%P05' or Observation.proposal_id LIKE '%P06' )\"", "\" AND Observation.target_name NOT LIKE 'WP%'\"", ")", ",", "REQUEST", "=", "\"doQuery\"", ",", "LANG", "=", "\"ADQL\"", ",", "FORMAT", "=", "\"tsv\"", ")", "data", "[", "\"QUERY\"", "]", "=", "data", "[", "\"QUERY\"", "]", ".", "format", "(", "calibration_level", ",", "collection", ")", "data", "[", "\"QUERY\"", "]", "+=", "(", "\" AND \"", "\" CONTAINS( BOX('ICRS', {}, {}, {}, {}), \"", "\" Plane.position_bounds ) = 1 \"", ")", ".", "format", "(", "ra", ".", "to", "(", "units", ".", "degree", ")", ".", "value", ",", "dec", ".", "to", "(", "units", ".", "degree", ")", ".", "value", ",", "dra", ".", "to", "(", "units", ".", "degree", ")", ".", "value", ",", "ddec", ".", "to", "(", "units", ".", "degree", ")", ".", "value", ")", "if", "mjdate", "is", "not", "None", ":", "data", "[", "\"QUERY\"", "]", "+=", "\" AND Plane.time_bounds_lower < {} AND Plane.time_bounds_cval2 > {} \"", ".", "format", "(", "mjdate", "+", "1.0", "/", "24.0", ",", "mjdate", "-", "1", "/", "24.0", ")", "result", "=", "requests", ".", "get", "(", "TAP_WEB_SERVICE", ",", "params", "=", "data", ",", "verify", "=", "False", ")", "logger", ".", "debug", "(", "\"Doing TAP Query using url: %s\"", "%", "(", "str", "(", "result", ".", "url", ")", ")", ")", "table_reader", "=", "ascii", ".", "get_reader", "(", "Reader", "=", "ascii", ".", "Basic", ")", "table_reader", ".", "header", ".", "splitter", ".", "delimiter", "=", "'\\t'", "table_reader", ".", "data", ".", "splitter", ".", "delimiter", "=", "'\\t'", "table", "=", "table_reader", ".", "read", "(", "result", ".", "text", ")", "logger", ".", "debug", "(", "str", "(", "table", ")", ")", "return", "table" ]
Do a QUERY on the TAP service for all observations that are part of OSSOS (*P05/*P016) where taken after mjd and have calibration 'observable'. @param ra: RA center of search cont @type ra: Quantity @param dec: float degrees @type dec: Quantity @param dra: float degrees @type dra: Quantity @param ddec: float degrees @type ddec: Quantity @param calibration_level: What calibration level must the found image have, @param mjdate: what data must the observation be to @param collection: name of the data collection to be searched. @param use_ssos: USE the SSOIS server to find comparison? False -> Use CAOM2 TAP query. ke on. this is a CAOM2 parameter of CADC, 2 means calibrated data.
[ "Do", "a", "QUERY", "on", "the", "TAP", "service", "for", "all", "observations", "that", "are", "part", "of", "OSSOS", "(", "*", "P05", "/", "*", "P016", ")", "where", "taken", "after", "mjd", "and", "have", "calibration", "observable", "." ]
python
train
dineshappavoo/virtdc
vmonere/sockets/vmonere_sender.socket.py
https://github.com/dineshappavoo/virtdc/blob/e61872cdc860092ab7affac8f13a7ca1f9e46a49/vmonere/sockets/vmonere_sender.socket.py#L68-L90
def report_usage_to_host(host_ip, vmid): #base value cpu_usage = 0.0 os_mem_usage = 0.0 task_mem_usage = 0.0 io_usage = 0.0 cpu_usage = get_cpu_usage() os_mem_usage = get_os_mem_usage() task_mem_usage = get_task_mem_usage() io_usage = get_io_usage() usage = str(vmid.strip())+' | '+str(cpu_usage)+' | '+str(os_mem_usage)+' | '+str(task_mem_usage)+' | '+str(io_usage) #usage = "'cpu |sdbfsj |sdfsdhf |sdfvsdvfgdfvj'" #cmd = 'python /var/lib/virtdc/vmonere/host/vmonere_listener.py '+usage '''cmd = '/bin/ssh -n -q -o StrictHostKeyChecking=no root@host_ip \"/bin/nohup /bin/python /var/lib/virtdc/vmonere/host/vmonere_listener.py '+usage+' &\"' cmd = cmd.replace("host_ip",str(host_ip).strip())''' #report usage via socket start_client_socket(host_ip, usage)
[ "def", "report_usage_to_host", "(", "host_ip", ",", "vmid", ")", ":", "#base value", "cpu_usage", "=", "0.0", "os_mem_usage", "=", "0.0", "task_mem_usage", "=", "0.0", "io_usage", "=", "0.0", "cpu_usage", "=", "get_cpu_usage", "(", ")", "os_mem_usage", "=", "get_os_mem_usage", "(", ")", "task_mem_usage", "=", "get_task_mem_usage", "(", ")", "io_usage", "=", "get_io_usage", "(", ")", "usage", "=", "str", "(", "vmid", ".", "strip", "(", ")", ")", "+", "' | '", "+", "str", "(", "cpu_usage", ")", "+", "' | '", "+", "str", "(", "os_mem_usage", ")", "+", "' | '", "+", "str", "(", "task_mem_usage", ")", "+", "' | '", "+", "str", "(", "io_usage", ")", "#usage = \"'cpu |sdbfsj |sdfsdhf |sdfvsdvfgdfvj'\"", "#cmd = 'python /var/lib/virtdc/vmonere/host/vmonere_listener.py '+usage", "#report usage via socket", "start_client_socket", "(", "host_ip", ",", "usage", ")" ]
cmd = '/bin/ssh -n -q -o StrictHostKeyChecking=no root@host_ip \"/bin/nohup /bin/python /var/lib/virtdc/vmonere/host/vmonere_listener.py '+usage+' &\"' cmd = cmd.replace("host_ip",str(host_ip).strip())
[ "cmd", "=", "/", "bin", "/", "ssh", "-", "n", "-", "q", "-", "o", "StrictHostKeyChecking", "=", "no", "root" ]
python
train
yoavaviram/python-amazon-simple-product-api
amazon/api.py
https://github.com/yoavaviram/python-amazon-simple-product-api/blob/f1cb0e209145fcfac9444e4c733dd19deb59d31a/amazon/api.py#L795-L816
def creators(self): """Creators. Creators are not the authors. These are usually editors, translators, narrators, etc. :return: Returns a list of creators where each is a tuple containing: 1. The creators name (string). 2. The creators role (string). """ # return tuples of name and role result = [] creators = self._safe_get_element('ItemAttributes.Creator') if creators is not None: for creator in creators: role = creator.attrib['Role'] if \ 'Role' in creator.attrib else None result.append((creator.text, role)) return result
[ "def", "creators", "(", "self", ")", ":", "# return tuples of name and role", "result", "=", "[", "]", "creators", "=", "self", ".", "_safe_get_element", "(", "'ItemAttributes.Creator'", ")", "if", "creators", "is", "not", "None", ":", "for", "creator", "in", "creators", ":", "role", "=", "creator", ".", "attrib", "[", "'Role'", "]", "if", "'Role'", "in", "creator", ".", "attrib", "else", "None", "result", ".", "append", "(", "(", "creator", ".", "text", ",", "role", ")", ")", "return", "result" ]
Creators. Creators are not the authors. These are usually editors, translators, narrators, etc. :return: Returns a list of creators where each is a tuple containing: 1. The creators name (string). 2. The creators role (string).
[ "Creators", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAFetch/Fetcher.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/Fetcher.py#L118-L201
def QA_quotation(code, start, end, frequence, market, source=DATASOURCE.TDX, output=OUTPUT_FORMAT.DATAFRAME): """一个统一的获取k线的方法 如果使用mongo,从本地数据库获取,失败则在线获取 Arguments: code {str/list} -- 期货/股票的代码 start {str} -- 开始日期 end {str} -- 结束日期 frequence {enum} -- 频率 QA.FREQUENCE market {enum} -- 市场 QA.MARKET_TYPE source {enum} -- 来源 QA.DATASOURCE output {enum} -- 输出类型 QA.OUTPUT_FORMAT """ res = None if market == MARKET_TYPE.STOCK_CN: if frequence == FREQUENCE.DAY: if source == DATASOURCE.MONGO: try: res = QAQueryAdv.QA_fetch_stock_day_adv(code, start, end) except: res = None if source == DATASOURCE.TDX or res == None: res = QATdx.QA_fetch_get_stock_day(code, start, end, '00') res = QA_DataStruct_Stock_day(res.set_index(['date', 'code'])) elif source == DATASOURCE.TUSHARE: res = QATushare.QA_fetch_get_stock_day(code, start, end, '00') elif frequence in [FREQUENCE.ONE_MIN, FREQUENCE.FIVE_MIN, FREQUENCE.FIFTEEN_MIN, FREQUENCE.THIRTY_MIN, FREQUENCE.SIXTY_MIN]: if source == DATASOURCE.MONGO: try: res = QAQueryAdv.QA_fetch_stock_min_adv( code, start, end, frequence=frequence) except: res = None if source == DATASOURCE.TDX or res == None: res = QATdx.QA_fetch_get_stock_min( code, start, end, frequence=frequence) res = QA_DataStruct_Stock_min( res.set_index(['datetime', 'code'])) elif market == MARKET_TYPE.FUTURE_CN: if frequence == FREQUENCE.DAY: if source == DATASOURCE.MONGO: try: res = QAQueryAdv.QA_fetch_future_day_adv(code, start, end) except: res = None if source == DATASOURCE.TDX or res == None: res = QATdx.QA_fetch_get_future_day(code, start, end) res = QA_DataStruct_Future_day(res.set_index(['date', 'code'])) elif frequence in [FREQUENCE.ONE_MIN, FREQUENCE.FIVE_MIN, FREQUENCE.FIFTEEN_MIN, FREQUENCE.THIRTY_MIN, FREQUENCE.SIXTY_MIN]: if source == DATASOURCE.MONGO: try: res = QAQueryAdv.QA_fetch_future_min_adv( code, start, end, frequence=frequence) except: res = None if source == DATASOURCE.TDX or res == None: res = QATdx.QA_fetch_get_future_min( code, start, end, frequence=frequence) res = QA_DataStruct_Future_min( res.set_index(['datetime', 'code'])) # 指数代码和股票代码是冲突重复的, sh000001 上证指数 000001 是不同的 elif market == MARKET_TYPE.INDEX_CN: if frequence == FREQUENCE.DAY: if source == DATASOURCE.MONGO: res = QAQueryAdv.QA_fetch_index_day_adv(code, start, end) elif market == MARKET_TYPE.OPTION_CN: if source == DATASOURCE.MONGO: #res = QAQueryAdv.QA_fetch_option_day_adv(code, start, end) raise NotImplementedError('CURRENT NOT FINISH THIS METHOD') # print(type(res)) if output is OUTPUT_FORMAT.DATAFRAME: return res.data elif output is OUTPUT_FORMAT.DATASTRUCT: return res elif output is OUTPUT_FORMAT.NDARRAY: return res.to_numpy() elif output is OUTPUT_FORMAT.JSON: return res.to_json() elif output is OUTPUT_FORMAT.LIST: return res.to_list()
[ "def", "QA_quotation", "(", "code", ",", "start", ",", "end", ",", "frequence", ",", "market", ",", "source", "=", "DATASOURCE", ".", "TDX", ",", "output", "=", "OUTPUT_FORMAT", ".", "DATAFRAME", ")", ":", "res", "=", "None", "if", "market", "==", "MARKET_TYPE", ".", "STOCK_CN", ":", "if", "frequence", "==", "FREQUENCE", ".", "DAY", ":", "if", "source", "==", "DATASOURCE", ".", "MONGO", ":", "try", ":", "res", "=", "QAQueryAdv", ".", "QA_fetch_stock_day_adv", "(", "code", ",", "start", ",", "end", ")", "except", ":", "res", "=", "None", "if", "source", "==", "DATASOURCE", ".", "TDX", "or", "res", "==", "None", ":", "res", "=", "QATdx", ".", "QA_fetch_get_stock_day", "(", "code", ",", "start", ",", "end", ",", "'00'", ")", "res", "=", "QA_DataStruct_Stock_day", "(", "res", ".", "set_index", "(", "[", "'date'", ",", "'code'", "]", ")", ")", "elif", "source", "==", "DATASOURCE", ".", "TUSHARE", ":", "res", "=", "QATushare", ".", "QA_fetch_get_stock_day", "(", "code", ",", "start", ",", "end", ",", "'00'", ")", "elif", "frequence", "in", "[", "FREQUENCE", ".", "ONE_MIN", ",", "FREQUENCE", ".", "FIVE_MIN", ",", "FREQUENCE", ".", "FIFTEEN_MIN", ",", "FREQUENCE", ".", "THIRTY_MIN", ",", "FREQUENCE", ".", "SIXTY_MIN", "]", ":", "if", "source", "==", "DATASOURCE", ".", "MONGO", ":", "try", ":", "res", "=", "QAQueryAdv", ".", "QA_fetch_stock_min_adv", "(", "code", ",", "start", ",", "end", ",", "frequence", "=", "frequence", ")", "except", ":", "res", "=", "None", "if", "source", "==", "DATASOURCE", ".", "TDX", "or", "res", "==", "None", ":", "res", "=", "QATdx", ".", "QA_fetch_get_stock_min", "(", "code", ",", "start", ",", "end", ",", "frequence", "=", "frequence", ")", "res", "=", "QA_DataStruct_Stock_min", "(", "res", ".", "set_index", "(", "[", "'datetime'", ",", "'code'", "]", ")", ")", "elif", "market", "==", "MARKET_TYPE", ".", "FUTURE_CN", ":", "if", "frequence", "==", "FREQUENCE", ".", "DAY", ":", "if", "source", "==", "DATASOURCE", ".", "MONGO", ":", "try", ":", "res", "=", "QAQueryAdv", ".", "QA_fetch_future_day_adv", "(", "code", ",", "start", ",", "end", ")", "except", ":", "res", "=", "None", "if", "source", "==", "DATASOURCE", ".", "TDX", "or", "res", "==", "None", ":", "res", "=", "QATdx", ".", "QA_fetch_get_future_day", "(", "code", ",", "start", ",", "end", ")", "res", "=", "QA_DataStruct_Future_day", "(", "res", ".", "set_index", "(", "[", "'date'", ",", "'code'", "]", ")", ")", "elif", "frequence", "in", "[", "FREQUENCE", ".", "ONE_MIN", ",", "FREQUENCE", ".", "FIVE_MIN", ",", "FREQUENCE", ".", "FIFTEEN_MIN", ",", "FREQUENCE", ".", "THIRTY_MIN", ",", "FREQUENCE", ".", "SIXTY_MIN", "]", ":", "if", "source", "==", "DATASOURCE", ".", "MONGO", ":", "try", ":", "res", "=", "QAQueryAdv", ".", "QA_fetch_future_min_adv", "(", "code", ",", "start", ",", "end", ",", "frequence", "=", "frequence", ")", "except", ":", "res", "=", "None", "if", "source", "==", "DATASOURCE", ".", "TDX", "or", "res", "==", "None", ":", "res", "=", "QATdx", ".", "QA_fetch_get_future_min", "(", "code", ",", "start", ",", "end", ",", "frequence", "=", "frequence", ")", "res", "=", "QA_DataStruct_Future_min", "(", "res", ".", "set_index", "(", "[", "'datetime'", ",", "'code'", "]", ")", ")", "# 指数代码和股票代码是冲突重复的, sh000001 上证指数 000001 是不同的", "elif", "market", "==", "MARKET_TYPE", ".", "INDEX_CN", ":", "if", "frequence", "==", "FREQUENCE", ".", "DAY", ":", "if", "source", "==", "DATASOURCE", ".", "MONGO", ":", "res", "=", "QAQueryAdv", ".", "QA_fetch_index_day_adv", "(", "code", ",", "start", ",", "end", ")", "elif", "market", "==", "MARKET_TYPE", ".", "OPTION_CN", ":", "if", "source", "==", "DATASOURCE", ".", "MONGO", ":", "#res = QAQueryAdv.QA_fetch_option_day_adv(code, start, end)", "raise", "NotImplementedError", "(", "'CURRENT NOT FINISH THIS METHOD'", ")", "# print(type(res))", "if", "output", "is", "OUTPUT_FORMAT", ".", "DATAFRAME", ":", "return", "res", ".", "data", "elif", "output", "is", "OUTPUT_FORMAT", ".", "DATASTRUCT", ":", "return", "res", "elif", "output", "is", "OUTPUT_FORMAT", ".", "NDARRAY", ":", "return", "res", ".", "to_numpy", "(", ")", "elif", "output", "is", "OUTPUT_FORMAT", ".", "JSON", ":", "return", "res", ".", "to_json", "(", ")", "elif", "output", "is", "OUTPUT_FORMAT", ".", "LIST", ":", "return", "res", ".", "to_list", "(", ")" ]
一个统一的获取k线的方法 如果使用mongo,从本地数据库获取,失败则在线获取 Arguments: code {str/list} -- 期货/股票的代码 start {str} -- 开始日期 end {str} -- 结束日期 frequence {enum} -- 频率 QA.FREQUENCE market {enum} -- 市场 QA.MARKET_TYPE source {enum} -- 来源 QA.DATASOURCE output {enum} -- 输出类型 QA.OUTPUT_FORMAT
[ "一个统一的获取k线的方法", "如果使用mongo", "从本地数据库获取", "失败则在线获取" ]
python
train
PeerAssets/pypeerassets
pypeerassets/transactions.py
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/transactions.py#L259-L267
def tx_output(network: str, value: Decimal, n: int, script: ScriptSig) -> TxOut: '''create TxOut object''' network_params = net_query(network) return TxOut(network=network_params, value=int(value * network_params.to_unit), n=n, script_pubkey=script)
[ "def", "tx_output", "(", "network", ":", "str", ",", "value", ":", "Decimal", ",", "n", ":", "int", ",", "script", ":", "ScriptSig", ")", "->", "TxOut", ":", "network_params", "=", "net_query", "(", "network", ")", "return", "TxOut", "(", "network", "=", "network_params", ",", "value", "=", "int", "(", "value", "*", "network_params", ".", "to_unit", ")", ",", "n", "=", "n", ",", "script_pubkey", "=", "script", ")" ]
create TxOut object
[ "create", "TxOut", "object" ]
python
train
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/datastore_range_iterators.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/datastore_range_iterators.py#L405-L419
def to_json(self): """Serializes all states into json form. Returns: all states in json-compatible map. """ cursor = self._get_cursor() cursor_object = False if cursor and isinstance(cursor, datastore_query.Cursor): cursor = cursor.to_websafe_string() cursor_object = True return {"key_range": self._key_range.to_json(), "query_spec": self._query_spec.to_json(), "cursor": cursor, "cursor_object": cursor_object}
[ "def", "to_json", "(", "self", ")", ":", "cursor", "=", "self", ".", "_get_cursor", "(", ")", "cursor_object", "=", "False", "if", "cursor", "and", "isinstance", "(", "cursor", ",", "datastore_query", ".", "Cursor", ")", ":", "cursor", "=", "cursor", ".", "to_websafe_string", "(", ")", "cursor_object", "=", "True", "return", "{", "\"key_range\"", ":", "self", ".", "_key_range", ".", "to_json", "(", ")", ",", "\"query_spec\"", ":", "self", ".", "_query_spec", ".", "to_json", "(", ")", ",", "\"cursor\"", ":", "cursor", ",", "\"cursor_object\"", ":", "cursor_object", "}" ]
Serializes all states into json form. Returns: all states in json-compatible map.
[ "Serializes", "all", "states", "into", "json", "form", "." ]
python
train
openid/python-openid
openid/server/server.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/server/server.py#L1262-L1295
def getAssociation(self, assoc_handle, dumb, checkExpiration=True): """Get the association with the specified handle. @type assoc_handle: str @param dumb: Is this association used with dumb mode? @type dumb: bool @returns: the association, or None if no valid association with that handle was found. @returntype: L{openid.association.Association} """ # Hmm. We've created an interface that deals almost entirely with # assoc_handles. The only place outside the Signatory that uses this # (and thus the only place that ever sees Association objects) is # when creating a response to an association request, as it must have # the association's secret. if assoc_handle is None: raise ValueError("assoc_handle must not be None") if dumb: key = self._dumb_key else: key = self._normal_key assoc = self.store.getAssociation(key, assoc_handle) if assoc is not None and assoc.expiresIn <= 0: logging.info("requested %sdumb key %r is expired (by %s seconds)" % ((not dumb) and 'not-' or '', assoc_handle, assoc.expiresIn)) if checkExpiration: self.store.removeAssociation(key, assoc_handle) assoc = None return assoc
[ "def", "getAssociation", "(", "self", ",", "assoc_handle", ",", "dumb", ",", "checkExpiration", "=", "True", ")", ":", "# Hmm. We've created an interface that deals almost entirely with", "# assoc_handles. The only place outside the Signatory that uses this", "# (and thus the only place that ever sees Association objects) is", "# when creating a response to an association request, as it must have", "# the association's secret.", "if", "assoc_handle", "is", "None", ":", "raise", "ValueError", "(", "\"assoc_handle must not be None\"", ")", "if", "dumb", ":", "key", "=", "self", ".", "_dumb_key", "else", ":", "key", "=", "self", ".", "_normal_key", "assoc", "=", "self", ".", "store", ".", "getAssociation", "(", "key", ",", "assoc_handle", ")", "if", "assoc", "is", "not", "None", "and", "assoc", ".", "expiresIn", "<=", "0", ":", "logging", ".", "info", "(", "\"requested %sdumb key %r is expired (by %s seconds)\"", "%", "(", "(", "not", "dumb", ")", "and", "'not-'", "or", "''", ",", "assoc_handle", ",", "assoc", ".", "expiresIn", ")", ")", "if", "checkExpiration", ":", "self", ".", "store", ".", "removeAssociation", "(", "key", ",", "assoc_handle", ")", "assoc", "=", "None", "return", "assoc" ]
Get the association with the specified handle. @type assoc_handle: str @param dumb: Is this association used with dumb mode? @type dumb: bool @returns: the association, or None if no valid association with that handle was found. @returntype: L{openid.association.Association}
[ "Get", "the", "association", "with", "the", "specified", "handle", "." ]
python
train
orbingol/NURBS-Python
geomdl/multi.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/multi.py#L1080-L1111
def select_color(cpcolor, evalcolor, idx=0): """ Selects item color for plotting. :param cpcolor: color for control points grid item :type cpcolor: str, list, tuple :param evalcolor: color for evaluated points grid item :type evalcolor: str, list, tuple :param idx: index of the current geometry object :type idx: int :return: a list of color values :rtype: list """ # Random colors by default color = utilities.color_generator() # Constant color for control points grid if isinstance(cpcolor, str): color[0] = cpcolor # User-defined color for control points grid if isinstance(cpcolor, (list, tuple)): color[0] = cpcolor[idx] # Constant color for evaluated points grid if isinstance(evalcolor, str): color[1] = evalcolor # User-defined color for evaluated points grid if isinstance(evalcolor, (list, tuple)): color[1] = evalcolor[idx] return color
[ "def", "select_color", "(", "cpcolor", ",", "evalcolor", ",", "idx", "=", "0", ")", ":", "# Random colors by default", "color", "=", "utilities", ".", "color_generator", "(", ")", "# Constant color for control points grid", "if", "isinstance", "(", "cpcolor", ",", "str", ")", ":", "color", "[", "0", "]", "=", "cpcolor", "# User-defined color for control points grid", "if", "isinstance", "(", "cpcolor", ",", "(", "list", ",", "tuple", ")", ")", ":", "color", "[", "0", "]", "=", "cpcolor", "[", "idx", "]", "# Constant color for evaluated points grid", "if", "isinstance", "(", "evalcolor", ",", "str", ")", ":", "color", "[", "1", "]", "=", "evalcolor", "# User-defined color for evaluated points grid", "if", "isinstance", "(", "evalcolor", ",", "(", "list", ",", "tuple", ")", ")", ":", "color", "[", "1", "]", "=", "evalcolor", "[", "idx", "]", "return", "color" ]
Selects item color for plotting. :param cpcolor: color for control points grid item :type cpcolor: str, list, tuple :param evalcolor: color for evaluated points grid item :type evalcolor: str, list, tuple :param idx: index of the current geometry object :type idx: int :return: a list of color values :rtype: list
[ "Selects", "item", "color", "for", "plotting", "." ]
python
train
rchatterjee/pwmodels
src/pwmodel/models.py
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L25-L81
def create_model(modelfunc, fname='', listw=[], outfname='', limit=int(3e6), min_pwlen=6, topk=10000, sep=r'\s+'): """:modelfunc: is a function that takes a word and returns its splits. for ngram model this function returns all the ngrams of a word, for PCFG it will return splits of the password. @modelfunc: func: string -> [list of strings] @fname: name of the file to read from @listw: list of passwords. Used passwords from both the files and listw if provided. @outfname: the file to write down the model. """ def length_filter(pw): pw = ''.join(c for c in pw if c in VALID_CHARS) return len(pw) >= min_pwlen pws = [] if fname: pws = helper.open_get_line(fname, limit=limit, pw_filter=length_filter, sep=sep) big_dict = defaultdict(int) total_f, total_e = 0, 0 # Add topk passwords from the input dataset to the list topk_pws = [] for pw, c in itertools.chain(pws, listw): for ng in modelfunc(pw): big_dict[ng] += c total_f += c total_e += 1 if len(big_dict) % 100000 == 0: print(("Dictionary size: {} (Total_freq: {}; Total_pws: {}"\ .format(len(big_dict), total_f, total_e))) if len(topk_pws) >= topk: heapq.heappushpop(topk_pws, (c, pw)) else: heapq.heappush(topk_pws, (c, pw)) # Adding topk password to deal with probability reduction of popular # passwords. Mostly effective for n-gram models print("topk={}".format(topk)) if topk > 0: for c, pw in topk_pws: tpw = helper.START + pw + helper.END big_dict[tpw] += c total_f += c total_e += 1 big_dict[NPWS_W] = total_e big_dict[TOTALF_W] = total_f nDawg = dawg.IntCompletionDAWG(big_dict) if not outfname: outfname = 'tmpmodel.dawg.gz' elif not outfname.endswith('.gz'): outfname += '.gz' pathlib.Path(outfname).parent.mkdir(parents=True, exist_ok=True) helper.save_dawg(nDawg, outfname) return nDawg
[ "def", "create_model", "(", "modelfunc", ",", "fname", "=", "''", ",", "listw", "=", "[", "]", ",", "outfname", "=", "''", ",", "limit", "=", "int", "(", "3e6", ")", ",", "min_pwlen", "=", "6", ",", "topk", "=", "10000", ",", "sep", "=", "r'\\s+'", ")", ":", "def", "length_filter", "(", "pw", ")", ":", "pw", "=", "''", ".", "join", "(", "c", "for", "c", "in", "pw", "if", "c", "in", "VALID_CHARS", ")", "return", "len", "(", "pw", ")", ">=", "min_pwlen", "pws", "=", "[", "]", "if", "fname", ":", "pws", "=", "helper", ".", "open_get_line", "(", "fname", ",", "limit", "=", "limit", ",", "pw_filter", "=", "length_filter", ",", "sep", "=", "sep", ")", "big_dict", "=", "defaultdict", "(", "int", ")", "total_f", ",", "total_e", "=", "0", ",", "0", "# Add topk passwords from the input dataset to the list", "topk_pws", "=", "[", "]", "for", "pw", ",", "c", "in", "itertools", ".", "chain", "(", "pws", ",", "listw", ")", ":", "for", "ng", "in", "modelfunc", "(", "pw", ")", ":", "big_dict", "[", "ng", "]", "+=", "c", "total_f", "+=", "c", "total_e", "+=", "1", "if", "len", "(", "big_dict", ")", "%", "100000", "==", "0", ":", "print", "(", "(", "\"Dictionary size: {} (Total_freq: {}; Total_pws: {}\"", ".", "format", "(", "len", "(", "big_dict", ")", ",", "total_f", ",", "total_e", ")", ")", ")", "if", "len", "(", "topk_pws", ")", ">=", "topk", ":", "heapq", ".", "heappushpop", "(", "topk_pws", ",", "(", "c", ",", "pw", ")", ")", "else", ":", "heapq", ".", "heappush", "(", "topk_pws", ",", "(", "c", ",", "pw", ")", ")", "# Adding topk password to deal with probability reduction of popular", "# passwords. Mostly effective for n-gram models", "print", "(", "\"topk={}\"", ".", "format", "(", "topk", ")", ")", "if", "topk", ">", "0", ":", "for", "c", ",", "pw", "in", "topk_pws", ":", "tpw", "=", "helper", ".", "START", "+", "pw", "+", "helper", ".", "END", "big_dict", "[", "tpw", "]", "+=", "c", "total_f", "+=", "c", "total_e", "+=", "1", "big_dict", "[", "NPWS_W", "]", "=", "total_e", "big_dict", "[", "TOTALF_W", "]", "=", "total_f", "nDawg", "=", "dawg", ".", "IntCompletionDAWG", "(", "big_dict", ")", "if", "not", "outfname", ":", "outfname", "=", "'tmpmodel.dawg.gz'", "elif", "not", "outfname", ".", "endswith", "(", "'.gz'", ")", ":", "outfname", "+=", "'.gz'", "pathlib", ".", "Path", "(", "outfname", ")", ".", "parent", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "helper", ".", "save_dawg", "(", "nDawg", ",", "outfname", ")", "return", "nDawg" ]
:modelfunc: is a function that takes a word and returns its splits. for ngram model this function returns all the ngrams of a word, for PCFG it will return splits of the password. @modelfunc: func: string -> [list of strings] @fname: name of the file to read from @listw: list of passwords. Used passwords from both the files and listw if provided. @outfname: the file to write down the model.
[ ":", "modelfunc", ":", "is", "a", "function", "that", "takes", "a", "word", "and", "returns", "its", "splits", ".", "for", "ngram", "model", "this", "function", "returns", "all", "the", "ngrams", "of", "a", "word", "for", "PCFG", "it", "will", "return", "splits", "of", "the", "password", "." ]
python
train
blockstack/blockstack-core
api/resolver.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/api/resolver.py#L250-L281
def get_users(username): """ Fetch data from username in .id namespace """ reply = {} log.debug('Begin /v[x]/users/' + username) if username is None: reply['error'] = "No username given" return jsonify(reply), 404 if ',' in username: reply['error'] = 'Multiple username queries are no longer supported.' return jsonify(reply), 401 if "." not in username: fqa = "{}.{}".format(username, 'id') else: fqa = username profile = get_profile(fqa) reply[username] = profile if 'error' in profile: status_code = 200 if 'status_code' in profile: status_code = profile['status_code'] del profile['status_code'] return jsonify(reply), status_code else: return jsonify(reply), 200
[ "def", "get_users", "(", "username", ")", ":", "reply", "=", "{", "}", "log", ".", "debug", "(", "'Begin /v[x]/users/'", "+", "username", ")", "if", "username", "is", "None", ":", "reply", "[", "'error'", "]", "=", "\"No username given\"", "return", "jsonify", "(", "reply", ")", ",", "404", "if", "','", "in", "username", ":", "reply", "[", "'error'", "]", "=", "'Multiple username queries are no longer supported.'", "return", "jsonify", "(", "reply", ")", ",", "401", "if", "\".\"", "not", "in", "username", ":", "fqa", "=", "\"{}.{}\"", ".", "format", "(", "username", ",", "'id'", ")", "else", ":", "fqa", "=", "username", "profile", "=", "get_profile", "(", "fqa", ")", "reply", "[", "username", "]", "=", "profile", "if", "'error'", "in", "profile", ":", "status_code", "=", "200", "if", "'status_code'", "in", "profile", ":", "status_code", "=", "profile", "[", "'status_code'", "]", "del", "profile", "[", "'status_code'", "]", "return", "jsonify", "(", "reply", ")", ",", "status_code", "else", ":", "return", "jsonify", "(", "reply", ")", ",", "200" ]
Fetch data from username in .id namespace
[ "Fetch", "data", "from", "username", "in", ".", "id", "namespace" ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/new_key.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/new_key.py#L83-L92
def get_instance(self, payload): """ Build an instance of NewKeyInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.new_key.NewKeyInstance :rtype: twilio.rest.api.v2010.account.new_key.NewKeyInstance """ return NewKeyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "NewKeyInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")" ]
Build an instance of NewKeyInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.new_key.NewKeyInstance :rtype: twilio.rest.api.v2010.account.new_key.NewKeyInstance
[ "Build", "an", "instance", "of", "NewKeyInstance" ]
python
train
gpiantoni/bidso
bidso/files.py
https://github.com/gpiantoni/bidso/blob/af163b921ec4e3d70802de07f174de184491cfce/bidso/files.py#L37-L87
def get_filename(self, base_dir=None, modality=None): """Construct filename based on the attributes. Parameters ---------- base_dir : Path path of the root directory. If specified, the return value is a Path, with base_dir / sub-XXX / (ses-XXX /) modality / filename otherwise the return value is a string. modality : str overwrite value for modality (i.e. the directory inside subject/session). This is necessary because sometimes the modality attribute is ambiguous. Returns ------- str or Path str of the filename if base_dir is not specified, otherwise the full Path """ filename = 'sub-' + self.subject if self.session is not None: filename += '_ses-' + self.session if self.task is not None: filename += '_task-' + self.task if self.run is not None and self.direction is None: filename += '_run-' + self.run if self.acquisition is not None: filename += '_acq-' + self.acquisition if self.direction is not None: filename += '_dir-' + self.direction if self.run is not None and self.direction is not None: filename += '_run-' + self.run if self.modality is not None: filename += '_' + self.modality if self.extension is not None: filename += self.extension if base_dir is None: return filename else: dir_name = base_dir / ('sub-' + self.subject) if self.session is not None: dir_name /= 'ses-' + self.session if modality is not None: dir_name /= modality else: dir_name = add_modality(dir_name, self.modality) return dir_name / filename
[ "def", "get_filename", "(", "self", ",", "base_dir", "=", "None", ",", "modality", "=", "None", ")", ":", "filename", "=", "'sub-'", "+", "self", ".", "subject", "if", "self", ".", "session", "is", "not", "None", ":", "filename", "+=", "'_ses-'", "+", "self", ".", "session", "if", "self", ".", "task", "is", "not", "None", ":", "filename", "+=", "'_task-'", "+", "self", ".", "task", "if", "self", ".", "run", "is", "not", "None", "and", "self", ".", "direction", "is", "None", ":", "filename", "+=", "'_run-'", "+", "self", ".", "run", "if", "self", ".", "acquisition", "is", "not", "None", ":", "filename", "+=", "'_acq-'", "+", "self", ".", "acquisition", "if", "self", ".", "direction", "is", "not", "None", ":", "filename", "+=", "'_dir-'", "+", "self", ".", "direction", "if", "self", ".", "run", "is", "not", "None", "and", "self", ".", "direction", "is", "not", "None", ":", "filename", "+=", "'_run-'", "+", "self", ".", "run", "if", "self", ".", "modality", "is", "not", "None", ":", "filename", "+=", "'_'", "+", "self", ".", "modality", "if", "self", ".", "extension", "is", "not", "None", ":", "filename", "+=", "self", ".", "extension", "if", "base_dir", "is", "None", ":", "return", "filename", "else", ":", "dir_name", "=", "base_dir", "/", "(", "'sub-'", "+", "self", ".", "subject", ")", "if", "self", ".", "session", "is", "not", "None", ":", "dir_name", "/=", "'ses-'", "+", "self", ".", "session", "if", "modality", "is", "not", "None", ":", "dir_name", "/=", "modality", "else", ":", "dir_name", "=", "add_modality", "(", "dir_name", ",", "self", ".", "modality", ")", "return", "dir_name", "/", "filename" ]
Construct filename based on the attributes. Parameters ---------- base_dir : Path path of the root directory. If specified, the return value is a Path, with base_dir / sub-XXX / (ses-XXX /) modality / filename otherwise the return value is a string. modality : str overwrite value for modality (i.e. the directory inside subject/session). This is necessary because sometimes the modality attribute is ambiguous. Returns ------- str or Path str of the filename if base_dir is not specified, otherwise the full Path
[ "Construct", "filename", "based", "on", "the", "attributes", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/mellanox/infiniband.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/mellanox/infiniband.py#L137-L153
def ipoib_interfaces(): """Return a list of IPOIB capable ethernet interfaces""" interfaces = [] for interface in network_interfaces(): try: driver = re.search('^driver: (.+)$', subprocess.check_output([ 'ethtool', '-i', interface]), re.M).group(1) if driver in IPOIB_DRIVERS: interfaces.append(interface) except Exception: log("Skipping interface %s" % interface, level=INFO) continue return interfaces
[ "def", "ipoib_interfaces", "(", ")", ":", "interfaces", "=", "[", "]", "for", "interface", "in", "network_interfaces", "(", ")", ":", "try", ":", "driver", "=", "re", ".", "search", "(", "'^driver: (.+)$'", ",", "subprocess", ".", "check_output", "(", "[", "'ethtool'", ",", "'-i'", ",", "interface", "]", ")", ",", "re", ".", "M", ")", ".", "group", "(", "1", ")", "if", "driver", "in", "IPOIB_DRIVERS", ":", "interfaces", ".", "append", "(", "interface", ")", "except", "Exception", ":", "log", "(", "\"Skipping interface %s\"", "%", "interface", ",", "level", "=", "INFO", ")", "continue", "return", "interfaces" ]
Return a list of IPOIB capable ethernet interfaces
[ "Return", "a", "list", "of", "IPOIB", "capable", "ethernet", "interfaces" ]
python
train
awacha/sastool
sastool/fitting/fitfunctions/basic.py
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/basic.py#L110-L124
def Exponential(x, a, tau, y0): """Exponential function Inputs: ------- ``x``: independent variable ``a``: scaling factor ``tau``: time constant ``y0``: additive constant Formula: -------- ``a*exp(x/tau)+y0`` """ return np.exp(x / tau) * a + y0
[ "def", "Exponential", "(", "x", ",", "a", ",", "tau", ",", "y0", ")", ":", "return", "np", ".", "exp", "(", "x", "/", "tau", ")", "*", "a", "+", "y0" ]
Exponential function Inputs: ------- ``x``: independent variable ``a``: scaling factor ``tau``: time constant ``y0``: additive constant Formula: -------- ``a*exp(x/tau)+y0``
[ "Exponential", "function" ]
python
train
emory-libraries/eulcommon
eulcommon/djangoextras/auth/decorators.py
https://github.com/emory-libraries/eulcommon/blob/dc63a9b3b5e38205178235e0d716d1b28158d3a9/eulcommon/djangoextras/auth/decorators.py#L132-L147
def permission_required_with_ajax(perm, login_url=None): """ Decorator for views that checks whether a user has a particular permission enabled, redirecting to the log-in page if necessary, but returns a special response for ajax requests. See :meth:`eulcore.django.auth.decorators.user_passes_test_with_ajax`. Usage is the same as :meth:`django.contrib.auth.decorators.permission_required` :: @permission_required_with_ajax('polls.can_vote', login_url='/loginpage/') def my_view(request): ... """ return user_passes_test_with_ajax(lambda u: u.has_perm(perm), login_url=login_url)
[ "def", "permission_required_with_ajax", "(", "perm", ",", "login_url", "=", "None", ")", ":", "return", "user_passes_test_with_ajax", "(", "lambda", "u", ":", "u", ".", "has_perm", "(", "perm", ")", ",", "login_url", "=", "login_url", ")" ]
Decorator for views that checks whether a user has a particular permission enabled, redirecting to the log-in page if necessary, but returns a special response for ajax requests. See :meth:`eulcore.django.auth.decorators.user_passes_test_with_ajax`. Usage is the same as :meth:`django.contrib.auth.decorators.permission_required` :: @permission_required_with_ajax('polls.can_vote', login_url='/loginpage/') def my_view(request): ...
[ "Decorator", "for", "views", "that", "checks", "whether", "a", "user", "has", "a", "particular", "permission", "enabled", "redirecting", "to", "the", "log", "-", "in", "page", "if", "necessary", "but", "returns", "a", "special", "response", "for", "ajax", "requests", ".", "See", ":", "meth", ":", "eulcore", ".", "django", ".", "auth", ".", "decorators", ".", "user_passes_test_with_ajax", "." ]
python
train
jazzband/django-ddp
dddp/__init__.py
https://github.com/jazzband/django-ddp/blob/1e1954b06fe140346acea43582515991685e4e01/dddp/__init__.py#L180-L185
def autodiscover(): """Import all `ddp` submodules from `settings.INSTALLED_APPS`.""" from django.utils.module_loading import autodiscover_modules from dddp.api import API autodiscover_modules('ddp', register_to=API) return API
[ "def", "autodiscover", "(", ")", ":", "from", "django", ".", "utils", ".", "module_loading", "import", "autodiscover_modules", "from", "dddp", ".", "api", "import", "API", "autodiscover_modules", "(", "'ddp'", ",", "register_to", "=", "API", ")", "return", "API" ]
Import all `ddp` submodules from `settings.INSTALLED_APPS`.
[ "Import", "all", "ddp", "submodules", "from", "settings", ".", "INSTALLED_APPS", "." ]
python
test
offu/WeRoBot
werobot/robot.py
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/robot.py#L556-L573
def parse_message( self, body, timestamp=None, nonce=None, msg_signature=None ): """ 解析获取到的 Raw XML ,如果需要的话进行解密,返回 WeRoBot Message。 :param body: 微信服务器发来的请求中的 Body。 :return: WeRoBot Message """ message_dict = parse_xml(body) if "Encrypt" in message_dict: xml = self.crypto.decrypt_message( timestamp=timestamp, nonce=nonce, msg_signature=msg_signature, encrypt_msg=message_dict["Encrypt"] ) message_dict = parse_xml(xml) return process_message(message_dict)
[ "def", "parse_message", "(", "self", ",", "body", ",", "timestamp", "=", "None", ",", "nonce", "=", "None", ",", "msg_signature", "=", "None", ")", ":", "message_dict", "=", "parse_xml", "(", "body", ")", "if", "\"Encrypt\"", "in", "message_dict", ":", "xml", "=", "self", ".", "crypto", ".", "decrypt_message", "(", "timestamp", "=", "timestamp", ",", "nonce", "=", "nonce", ",", "msg_signature", "=", "msg_signature", ",", "encrypt_msg", "=", "message_dict", "[", "\"Encrypt\"", "]", ")", "message_dict", "=", "parse_xml", "(", "xml", ")", "return", "process_message", "(", "message_dict", ")" ]
解析获取到的 Raw XML ,如果需要的话进行解密,返回 WeRoBot Message。 :param body: 微信服务器发来的请求中的 Body。 :return: WeRoBot Message
[ "解析获取到的", "Raw", "XML", ",如果需要的话进行解密,返回", "WeRoBot", "Message。", ":", "param", "body", ":", "微信服务器发来的请求中的", "Body。", ":", "return", ":", "WeRoBot", "Message" ]
python
train
quantmind/pulsar
pulsar/utils/config.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/config.py#L184-L195
def update(self, data, default=False): """Update this :attr:`Config` with ``data``. :param data: must be a ``Mapping`` like object exposing the ``item`` method for iterating through key-value pairs. :param default: if ``True`` the updated :attr:`settings` will also set their :attr:`~Setting.default` attribute with the updating value (provided it is a valid one). """ for name, value in data.items(): if value is not None: self.set(name, value, default)
[ "def", "update", "(", "self", ",", "data", ",", "default", "=", "False", ")", ":", "for", "name", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "value", "is", "not", "None", ":", "self", ".", "set", "(", "name", ",", "value", ",", "default", ")" ]
Update this :attr:`Config` with ``data``. :param data: must be a ``Mapping`` like object exposing the ``item`` method for iterating through key-value pairs. :param default: if ``True`` the updated :attr:`settings` will also set their :attr:`~Setting.default` attribute with the updating value (provided it is a valid one).
[ "Update", "this", ":", "attr", ":", "Config", "with", "data", "." ]
python
train
payu-org/payu
payu/calendar.py
https://github.com/payu-org/payu/blob/1442a9a226012eff248b8097cc1eaabc3e224867/payu/calendar.py#L7-L16
def int_to_date(date): """ Convert an int of form yyyymmdd to a python date object. """ year = date // 10**4 month = date % 10**4 // 10**2 day = date % 10**2 return datetime.date(year, month, day)
[ "def", "int_to_date", "(", "date", ")", ":", "year", "=", "date", "//", "10", "**", "4", "month", "=", "date", "%", "10", "**", "4", "//", "10", "**", "2", "day", "=", "date", "%", "10", "**", "2", "return", "datetime", ".", "date", "(", "year", ",", "month", ",", "day", ")" ]
Convert an int of form yyyymmdd to a python date object.
[ "Convert", "an", "int", "of", "form", "yyyymmdd", "to", "a", "python", "date", "object", "." ]
python
train
brunato/lograptor
lograptor/cache.py
https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/cache.py#L139-L164
def gethost(self, ip_addr): """ Do reverse lookup on an ip address """ # Handle silly fake ipv6 addresses try: if ip_addr[:7] == '::ffff:': ip_addr = ip_addr[7:] except TypeError: pass if ip_addr[0] in string.letters: return ip_addr try: return self.hostsmap[ip_addr] except KeyError: pass try: name = socket.gethostbyaddr(ip_addr)[0] except socket.error: name = ip_addr self.hostsmap[ip_addr] = name return name
[ "def", "gethost", "(", "self", ",", "ip_addr", ")", ":", "# Handle silly fake ipv6 addresses", "try", ":", "if", "ip_addr", "[", ":", "7", "]", "==", "'::ffff:'", ":", "ip_addr", "=", "ip_addr", "[", "7", ":", "]", "except", "TypeError", ":", "pass", "if", "ip_addr", "[", "0", "]", "in", "string", ".", "letters", ":", "return", "ip_addr", "try", ":", "return", "self", ".", "hostsmap", "[", "ip_addr", "]", "except", "KeyError", ":", "pass", "try", ":", "name", "=", "socket", ".", "gethostbyaddr", "(", "ip_addr", ")", "[", "0", "]", "except", "socket", ".", "error", ":", "name", "=", "ip_addr", "self", ".", "hostsmap", "[", "ip_addr", "]", "=", "name", "return", "name" ]
Do reverse lookup on an ip address
[ "Do", "reverse", "lookup", "on", "an", "ip", "address" ]
python
train
xtrementl/focus
focus/environment/__init__.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/__init__.py#L125-L136
def _setup_task(self, load): """ Sets up the ``Task`` object and loads active file for task. `load` Set to ``True`` to load task after setup. """ if not self._task: self._task = Task(self._data_dir) if load: self._task.load()
[ "def", "_setup_task", "(", "self", ",", "load", ")", ":", "if", "not", "self", ".", "_task", ":", "self", ".", "_task", "=", "Task", "(", "self", ".", "_data_dir", ")", "if", "load", ":", "self", ".", "_task", ".", "load", "(", ")" ]
Sets up the ``Task`` object and loads active file for task. `load` Set to ``True`` to load task after setup.
[ "Sets", "up", "the", "Task", "object", "and", "loads", "active", "file", "for", "task", "." ]
python
train
artizirk/python-axp209
axp209.py
https://github.com/artizirk/python-axp209/blob/dc48015b23ea3695bf1ee076355c96ea434b77e4/axp209.py#L207-L214
def battery_discharge_current(self): """ Returns current in mA """ msb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_DISCHARGE_CURRENT_MSB_REG) lsb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_DISCHARGE_CURRENT_LSB_REG) # 13bits discharge_bin = msb << 5 | lsb & 0x1f # 0 mV -> 000h, 0.5 mA/bit 1FFFh -> 1800 mA return discharge_bin * 0.5
[ "def", "battery_discharge_current", "(", "self", ")", ":", "msb", "=", "self", ".", "bus", ".", "read_byte_data", "(", "AXP209_ADDRESS", ",", "BATTERY_DISCHARGE_CURRENT_MSB_REG", ")", "lsb", "=", "self", ".", "bus", ".", "read_byte_data", "(", "AXP209_ADDRESS", ",", "BATTERY_DISCHARGE_CURRENT_LSB_REG", ")", "# 13bits", "discharge_bin", "=", "msb", "<<", "5", "|", "lsb", "&", "0x1f", "# 0 mV -> 000h,\t0.5 mA/bit\t1FFFh -> 1800 mA", "return", "discharge_bin", "*", "0.5" ]
Returns current in mA
[ "Returns", "current", "in", "mA" ]
python
train
edx/edx-enterprise
enterprise/utils.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/utils.py#L446-L467
def update_query_parameters(url, query_parameters): """ Return url with updated query parameters. Arguments: url (str): Original url whose query parameters need to be updated. query_parameters (dict): A dictionary containing query parameters to be added to course selection url. Returns: (slug): slug identifier for the identity provider that can be used for identity verification of users associated the enterprise customer of the given user. """ scheme, netloc, path, query_string, fragment = urlsplit(url) url_params = parse_qs(query_string) # Update url query parameters url_params.update(query_parameters) return urlunsplit( (scheme, netloc, path, urlencode(sorted(url_params.items()), doseq=True), fragment), )
[ "def", "update_query_parameters", "(", "url", ",", "query_parameters", ")", ":", "scheme", ",", "netloc", ",", "path", ",", "query_string", ",", "fragment", "=", "urlsplit", "(", "url", ")", "url_params", "=", "parse_qs", "(", "query_string", ")", "# Update url query parameters", "url_params", ".", "update", "(", "query_parameters", ")", "return", "urlunsplit", "(", "(", "scheme", ",", "netloc", ",", "path", ",", "urlencode", "(", "sorted", "(", "url_params", ".", "items", "(", ")", ")", ",", "doseq", "=", "True", ")", ",", "fragment", ")", ",", ")" ]
Return url with updated query parameters. Arguments: url (str): Original url whose query parameters need to be updated. query_parameters (dict): A dictionary containing query parameters to be added to course selection url. Returns: (slug): slug identifier for the identity provider that can be used for identity verification of users associated the enterprise customer of the given user.
[ "Return", "url", "with", "updated", "query", "parameters", "." ]
python
valid
Opentrons/opentrons
api/src/opentrons/protocol_api/contexts.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/contexts.py#L146-L166
def temp_connect(self, hardware: hc.API): """ Connect temporarily to the specified hardware controller. This should be used as a context manager: .. code-block :: python with ctx.temp_connect(hw): # do some tasks ctx.home() # after the with block, the context is connected to the same # hardware control API it was connected to before, even if # an error occured in the code inside the with block """ old_hw = self._hw_manager.hardware try: self._hw_manager.set_hw(hardware) yield self finally: self._hw_manager.set_hw(old_hw)
[ "def", "temp_connect", "(", "self", ",", "hardware", ":", "hc", ".", "API", ")", ":", "old_hw", "=", "self", ".", "_hw_manager", ".", "hardware", "try", ":", "self", ".", "_hw_manager", ".", "set_hw", "(", "hardware", ")", "yield", "self", "finally", ":", "self", ".", "_hw_manager", ".", "set_hw", "(", "old_hw", ")" ]
Connect temporarily to the specified hardware controller. This should be used as a context manager: .. code-block :: python with ctx.temp_connect(hw): # do some tasks ctx.home() # after the with block, the context is connected to the same # hardware control API it was connected to before, even if # an error occured in the code inside the with block
[ "Connect", "temporarily", "to", "the", "specified", "hardware", "controller", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/operations/__init__.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/__init__.py#L360-L370
def op_get_consensus_fields( op_name ): """ Get the set of consensus-generating fields for an operation. """ global SERIALIZE_FIELDS if op_name not in SERIALIZE_FIELDS.keys(): raise Exception("No such operation '%s'" % op_name ) fields = SERIALIZE_FIELDS[op_name][:] return fields
[ "def", "op_get_consensus_fields", "(", "op_name", ")", ":", "global", "SERIALIZE_FIELDS", "if", "op_name", "not", "in", "SERIALIZE_FIELDS", ".", "keys", "(", ")", ":", "raise", "Exception", "(", "\"No such operation '%s'\"", "%", "op_name", ")", "fields", "=", "SERIALIZE_FIELDS", "[", "op_name", "]", "[", ":", "]", "return", "fields" ]
Get the set of consensus-generating fields for an operation.
[ "Get", "the", "set", "of", "consensus", "-", "generating", "fields", "for", "an", "operation", "." ]
python
train
brocade/pynos
pynos/versions/base/interface.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/interface.py#L299-L344
def remove_port_channel(self, **kwargs): """ Remove a port channel interface. Args: port_int (str): port-channel number (1, 2, 3, etc). callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `port_int` is not passed. ValueError: if `port_int` is invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.channel_group(name='225/0/20', ... int_type='tengigabitethernet', ... port_int='1', channel_type='standard', mode='active') ... output = dev.interface.remove_port_channel( ... port_int='1') """ port_int = kwargs.pop('port_int') callback = kwargs.pop('callback', self._callback) if re.search('^[0-9]{1,4}$', port_int) is None: raise ValueError('%s must be in the format of x for port channel ' 'interfaces.' % repr(port_int)) port_channel = getattr(self._interface, 'interface_port_channel_name') port_channel_args = dict(name=port_int) config = port_channel(**port_channel_args) delete_channel = config.find('.//*port-channel') delete_channel.set('operation', 'delete') return callback(config)
[ "def", "remove_port_channel", "(", "self", ",", "*", "*", "kwargs", ")", ":", "port_int", "=", "kwargs", ".", "pop", "(", "'port_int'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "if", "re", ".", "search", "(", "'^[0-9]{1,4}$'", ",", "port_int", ")", "is", "None", ":", "raise", "ValueError", "(", "'%s must be in the format of x for port channel '", "'interfaces.'", "%", "repr", "(", "port_int", ")", ")", "port_channel", "=", "getattr", "(", "self", ".", "_interface", ",", "'interface_port_channel_name'", ")", "port_channel_args", "=", "dict", "(", "name", "=", "port_int", ")", "config", "=", "port_channel", "(", "*", "*", "port_channel_args", ")", "delete_channel", "=", "config", ".", "find", "(", "'.//*port-channel'", ")", "delete_channel", ".", "set", "(", "'operation'", ",", "'delete'", ")", "return", "callback", "(", "config", ")" ]
Remove a port channel interface. Args: port_int (str): port-channel number (1, 2, 3, etc). callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `port_int` is not passed. ValueError: if `port_int` is invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.channel_group(name='225/0/20', ... int_type='tengigabitethernet', ... port_int='1', channel_type='standard', mode='active') ... output = dev.interface.remove_port_channel( ... port_int='1')
[ "Remove", "a", "port", "channel", "interface", "." ]
python
train
rfosterslo/wagtailplus
wagtailplus/wagtailrelations/models.py
https://github.com/rfosterslo/wagtailplus/blob/22cac857175d8a6f77e470751831c14a92ccd768/wagtailplus/wagtailrelations/models.py#L343-L356
def for_category(self, category, live_only=False): """ Returns queryset of EntryTag instances for specified category. :param category: the Category instance. :param live_only: flag to include only "live" entries. :rtype: django.db.models.query.QuerySet. """ filters = {'tag': category.tag} if live_only: filters.update({'entry__live': True}) return self.filter(**filters)
[ "def", "for_category", "(", "self", ",", "category", ",", "live_only", "=", "False", ")", ":", "filters", "=", "{", "'tag'", ":", "category", ".", "tag", "}", "if", "live_only", ":", "filters", ".", "update", "(", "{", "'entry__live'", ":", "True", "}", ")", "return", "self", ".", "filter", "(", "*", "*", "filters", ")" ]
Returns queryset of EntryTag instances for specified category. :param category: the Category instance. :param live_only: flag to include only "live" entries. :rtype: django.db.models.query.QuerySet.
[ "Returns", "queryset", "of", "EntryTag", "instances", "for", "specified", "category", "." ]
python
train
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L29642-L29788
def update(cls, first_name=None, middle_name=None, last_name=None, public_nick_name=None, address_main=None, address_postal=None, avatar_uuid=None, tax_resident=None, document_type=None, document_number=None, document_country_of_issuance=None, document_front_attachment_id=None, document_back_attachment_id=None, date_of_birth=None, place_of_birth=None, country_of_birth=None, nationality=None, language=None, region=None, gender=None, status=None, sub_status=None, legal_guardian_alias=None, session_timeout=None, card_ids=None, card_limits=None, daily_limit_without_confirmation_login=None, notification_filters=None, display_name=None, custom_headers=None): """ Modify a specific person object's data. :type user_person_id: int :param first_name: The person's first name. :type first_name: str :param middle_name: The person's middle name. :type middle_name: str :param last_name: The person's last name. :type last_name: str :param public_nick_name: The person's public nick name. :type public_nick_name: str :param address_main: The user's main address. :type address_main: object_.Address :param address_postal: The person's postal address. :type address_postal: object_.Address :param avatar_uuid: The public UUID of the user's avatar. :type avatar_uuid: str :param tax_resident: The user's tax residence numbers for different countries. :type tax_resident: list[object_.TaxResident] :param document_type: The type of identification document the person registered with. :type document_type: str :param document_number: The identification document number the person registered with. :type document_number: str :param document_country_of_issuance: The country which issued the identification document the person registered with. :type document_country_of_issuance: str :param document_front_attachment_id: The reference to the uploaded picture/scan of the front side of the identification document. :type document_front_attachment_id: int :param document_back_attachment_id: The reference to the uploaded picture/scan of the back side of the identification document. :type document_back_attachment_id: int :param date_of_birth: The person's date of birth. Accepts ISO8601 date formats. :type date_of_birth: str :param place_of_birth: The person's place of birth. :type place_of_birth: str :param country_of_birth: The person's country of birth. Formatted as a SO 3166-1 alpha-2 country code. :type country_of_birth: str :param nationality: The person's nationality. Formatted as a SO 3166-1 alpha-2 country code. :type nationality: str :param language: The person's preferred language. Formatted as a ISO 639-1 language code plus a ISO 3166-1 alpha-2 country code, seperated by an underscore. :type language: str :param region: The person's preferred region. Formatted as a ISO 639-1 language code plus a ISO 3166-1 alpha-2 country code, seperated by an underscore. :type region: str :param gender: The person's gender. Can be: MALE, FEMALE and UNKNOWN. :type gender: str :param status: The user status. You are not allowed to update the status via PUT. :type status: str :param sub_status: The user sub-status. Can be updated to SUBMIT if status is RECOVERY. :type sub_status: str :param legal_guardian_alias: The legal guardian of the user. Required for minors. :type legal_guardian_alias: object_.Pointer :param session_timeout: The setting for the session timeout of the user in seconds. :type session_timeout: int :param card_ids: Card ids used for centralized card limits. :type card_ids: list[object_.BunqId] :param card_limits: The centralized limits for user's cards. :type card_limits: list[object_.CardLimit] :param daily_limit_without_confirmation_login: The amount the user can pay in the session without asking for credentials. :type daily_limit_without_confirmation_login: object_.Amount :param notification_filters: The types of notifications that will result in a push notification or URL callback for this UserPerson. :type notification_filters: list[object_.NotificationFilter] :param display_name: The person's legal name. Available legal names can be listed via the 'user/{user_id}/legal-name' endpoint. :type display_name: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} api_client = client.ApiClient(cls._get_api_context()) request_map = { cls.FIELD_FIRST_NAME: first_name, cls.FIELD_MIDDLE_NAME: middle_name, cls.FIELD_LAST_NAME: last_name, cls.FIELD_PUBLIC_NICK_NAME: public_nick_name, cls.FIELD_ADDRESS_MAIN: address_main, cls.FIELD_ADDRESS_POSTAL: address_postal, cls.FIELD_AVATAR_UUID: avatar_uuid, cls.FIELD_TAX_RESIDENT: tax_resident, cls.FIELD_DOCUMENT_TYPE: document_type, cls.FIELD_DOCUMENT_NUMBER: document_number, cls.FIELD_DOCUMENT_COUNTRY_OF_ISSUANCE: document_country_of_issuance, cls.FIELD_DOCUMENT_FRONT_ATTACHMENT_ID: document_front_attachment_id, cls.FIELD_DOCUMENT_BACK_ATTACHMENT_ID: document_back_attachment_id, cls.FIELD_DATE_OF_BIRTH: date_of_birth, cls.FIELD_PLACE_OF_BIRTH: place_of_birth, cls.FIELD_COUNTRY_OF_BIRTH: country_of_birth, cls.FIELD_NATIONALITY: nationality, cls.FIELD_LANGUAGE: language, cls.FIELD_REGION: region, cls.FIELD_GENDER: gender, cls.FIELD_STATUS: status, cls.FIELD_SUB_STATUS: sub_status, cls.FIELD_LEGAL_GUARDIAN_ALIAS: legal_guardian_alias, cls.FIELD_SESSION_TIMEOUT: session_timeout, cls.FIELD_CARD_IDS: card_ids, cls.FIELD_CARD_LIMITS: card_limits, cls.FIELD_DAILY_LIMIT_WITHOUT_CONFIRMATION_LOGIN: daily_limit_without_confirmation_login, cls.FIELD_NOTIFICATION_FILTERS: notification_filters, cls.FIELD_DISPLAY_NAME: display_name } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_UPDATE.format(cls._determine_user_id()) response_raw = api_client.put(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
[ "def", "update", "(", "cls", ",", "first_name", "=", "None", ",", "middle_name", "=", "None", ",", "last_name", "=", "None", ",", "public_nick_name", "=", "None", ",", "address_main", "=", "None", ",", "address_postal", "=", "None", ",", "avatar_uuid", "=", "None", ",", "tax_resident", "=", "None", ",", "document_type", "=", "None", ",", "document_number", "=", "None", ",", "document_country_of_issuance", "=", "None", ",", "document_front_attachment_id", "=", "None", ",", "document_back_attachment_id", "=", "None", ",", "date_of_birth", "=", "None", ",", "place_of_birth", "=", "None", ",", "country_of_birth", "=", "None", ",", "nationality", "=", "None", ",", "language", "=", "None", ",", "region", "=", "None", ",", "gender", "=", "None", ",", "status", "=", "None", ",", "sub_status", "=", "None", ",", "legal_guardian_alias", "=", "None", ",", "session_timeout", "=", "None", ",", "card_ids", "=", "None", ",", "card_limits", "=", "None", ",", "daily_limit_without_confirmation_login", "=", "None", ",", "notification_filters", "=", "None", ",", "display_name", "=", "None", ",", "custom_headers", "=", "None", ")", ":", "if", "custom_headers", "is", "None", ":", "custom_headers", "=", "{", "}", "api_client", "=", "client", ".", "ApiClient", "(", "cls", ".", "_get_api_context", "(", ")", ")", "request_map", "=", "{", "cls", ".", "FIELD_FIRST_NAME", ":", "first_name", ",", "cls", ".", "FIELD_MIDDLE_NAME", ":", "middle_name", ",", "cls", ".", "FIELD_LAST_NAME", ":", "last_name", ",", "cls", ".", "FIELD_PUBLIC_NICK_NAME", ":", "public_nick_name", ",", "cls", ".", "FIELD_ADDRESS_MAIN", ":", "address_main", ",", "cls", ".", "FIELD_ADDRESS_POSTAL", ":", "address_postal", ",", "cls", ".", "FIELD_AVATAR_UUID", ":", "avatar_uuid", ",", "cls", ".", "FIELD_TAX_RESIDENT", ":", "tax_resident", ",", "cls", ".", "FIELD_DOCUMENT_TYPE", ":", "document_type", ",", "cls", ".", "FIELD_DOCUMENT_NUMBER", ":", "document_number", ",", "cls", ".", "FIELD_DOCUMENT_COUNTRY_OF_ISSUANCE", ":", "document_country_of_issuance", ",", "cls", ".", "FIELD_DOCUMENT_FRONT_ATTACHMENT_ID", ":", "document_front_attachment_id", ",", "cls", ".", "FIELD_DOCUMENT_BACK_ATTACHMENT_ID", ":", "document_back_attachment_id", ",", "cls", ".", "FIELD_DATE_OF_BIRTH", ":", "date_of_birth", ",", "cls", ".", "FIELD_PLACE_OF_BIRTH", ":", "place_of_birth", ",", "cls", ".", "FIELD_COUNTRY_OF_BIRTH", ":", "country_of_birth", ",", "cls", ".", "FIELD_NATIONALITY", ":", "nationality", ",", "cls", ".", "FIELD_LANGUAGE", ":", "language", ",", "cls", ".", "FIELD_REGION", ":", "region", ",", "cls", ".", "FIELD_GENDER", ":", "gender", ",", "cls", ".", "FIELD_STATUS", ":", "status", ",", "cls", ".", "FIELD_SUB_STATUS", ":", "sub_status", ",", "cls", ".", "FIELD_LEGAL_GUARDIAN_ALIAS", ":", "legal_guardian_alias", ",", "cls", ".", "FIELD_SESSION_TIMEOUT", ":", "session_timeout", ",", "cls", ".", "FIELD_CARD_IDS", ":", "card_ids", ",", "cls", ".", "FIELD_CARD_LIMITS", ":", "card_limits", ",", "cls", ".", "FIELD_DAILY_LIMIT_WITHOUT_CONFIRMATION_LOGIN", ":", "daily_limit_without_confirmation_login", ",", "cls", ".", "FIELD_NOTIFICATION_FILTERS", ":", "notification_filters", ",", "cls", ".", "FIELD_DISPLAY_NAME", ":", "display_name", "}", "request_map_string", "=", "converter", ".", "class_to_json", "(", "request_map", ")", "request_map_string", "=", "cls", ".", "_remove_field_for_request", "(", "request_map_string", ")", "request_bytes", "=", "request_map_string", ".", "encode", "(", ")", "endpoint_url", "=", "cls", ".", "_ENDPOINT_URL_UPDATE", ".", "format", "(", "cls", ".", "_determine_user_id", "(", ")", ")", "response_raw", "=", "api_client", ".", "put", "(", "endpoint_url", ",", "request_bytes", ",", "custom_headers", ")", "return", "BunqResponseInt", ".", "cast_from_bunq_response", "(", "cls", ".", "_process_for_id", "(", "response_raw", ")", ")" ]
Modify a specific person object's data. :type user_person_id: int :param first_name: The person's first name. :type first_name: str :param middle_name: The person's middle name. :type middle_name: str :param last_name: The person's last name. :type last_name: str :param public_nick_name: The person's public nick name. :type public_nick_name: str :param address_main: The user's main address. :type address_main: object_.Address :param address_postal: The person's postal address. :type address_postal: object_.Address :param avatar_uuid: The public UUID of the user's avatar. :type avatar_uuid: str :param tax_resident: The user's tax residence numbers for different countries. :type tax_resident: list[object_.TaxResident] :param document_type: The type of identification document the person registered with. :type document_type: str :param document_number: The identification document number the person registered with. :type document_number: str :param document_country_of_issuance: The country which issued the identification document the person registered with. :type document_country_of_issuance: str :param document_front_attachment_id: The reference to the uploaded picture/scan of the front side of the identification document. :type document_front_attachment_id: int :param document_back_attachment_id: The reference to the uploaded picture/scan of the back side of the identification document. :type document_back_attachment_id: int :param date_of_birth: The person's date of birth. Accepts ISO8601 date formats. :type date_of_birth: str :param place_of_birth: The person's place of birth. :type place_of_birth: str :param country_of_birth: The person's country of birth. Formatted as a SO 3166-1 alpha-2 country code. :type country_of_birth: str :param nationality: The person's nationality. Formatted as a SO 3166-1 alpha-2 country code. :type nationality: str :param language: The person's preferred language. Formatted as a ISO 639-1 language code plus a ISO 3166-1 alpha-2 country code, seperated by an underscore. :type language: str :param region: The person's preferred region. Formatted as a ISO 639-1 language code plus a ISO 3166-1 alpha-2 country code, seperated by an underscore. :type region: str :param gender: The person's gender. Can be: MALE, FEMALE and UNKNOWN. :type gender: str :param status: The user status. You are not allowed to update the status via PUT. :type status: str :param sub_status: The user sub-status. Can be updated to SUBMIT if status is RECOVERY. :type sub_status: str :param legal_guardian_alias: The legal guardian of the user. Required for minors. :type legal_guardian_alias: object_.Pointer :param session_timeout: The setting for the session timeout of the user in seconds. :type session_timeout: int :param card_ids: Card ids used for centralized card limits. :type card_ids: list[object_.BunqId] :param card_limits: The centralized limits for user's cards. :type card_limits: list[object_.CardLimit] :param daily_limit_without_confirmation_login: The amount the user can pay in the session without asking for credentials. :type daily_limit_without_confirmation_login: object_.Amount :param notification_filters: The types of notifications that will result in a push notification or URL callback for this UserPerson. :type notification_filters: list[object_.NotificationFilter] :param display_name: The person's legal name. Available legal names can be listed via the 'user/{user_id}/legal-name' endpoint. :type display_name: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
[ "Modify", "a", "specific", "person", "object", "s", "data", "." ]
python
train
riga/tfdeploy
tfdeploy.py
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1070-L1078
def RandomShuffle(a, seed): """ Random uniform op. """ if seed: np.random.seed(seed) r = a.copy() np.random.shuffle(r) return r,
[ "def", "RandomShuffle", "(", "a", ",", "seed", ")", ":", "if", "seed", ":", "np", ".", "random", ".", "seed", "(", "seed", ")", "r", "=", "a", ".", "copy", "(", ")", "np", ".", "random", ".", "shuffle", "(", "r", ")", "return", "r", "," ]
Random uniform op.
[ "Random", "uniform", "op", "." ]
python
train
wndhydrnt/python-oauth2
oauth2/store/dbapi/__init__.py
https://github.com/wndhydrnt/python-oauth2/blob/abe3bf5f27bda2ff737cab387b040e2e6e85c2e2/oauth2/store/dbapi/__init__.py#L118-L138
def fetch_by_refresh_token(self, refresh_token): """ Retrieves an access token by its refresh token. :param refresh_token: The refresh token of an access token as a `str`. :return: An instance of :class:`oauth2.datatype.AccessToken`. :raises: :class:`oauth2.error.AccessTokenNotFound` if not access token could be retrieved. """ row = self.fetchone(self.fetch_by_refresh_token_query, refresh_token) if row is None: raise AccessTokenNotFound scopes = self._fetch_scopes(access_token_id=row[0]) data = self._fetch_data(access_token_id=row[0]) return self._row_to_token(data=data, scopes=scopes, row=row)
[ "def", "fetch_by_refresh_token", "(", "self", ",", "refresh_token", ")", ":", "row", "=", "self", ".", "fetchone", "(", "self", ".", "fetch_by_refresh_token_query", ",", "refresh_token", ")", "if", "row", "is", "None", ":", "raise", "AccessTokenNotFound", "scopes", "=", "self", ".", "_fetch_scopes", "(", "access_token_id", "=", "row", "[", "0", "]", ")", "data", "=", "self", ".", "_fetch_data", "(", "access_token_id", "=", "row", "[", "0", "]", ")", "return", "self", ".", "_row_to_token", "(", "data", "=", "data", ",", "scopes", "=", "scopes", ",", "row", "=", "row", ")" ]
Retrieves an access token by its refresh token. :param refresh_token: The refresh token of an access token as a `str`. :return: An instance of :class:`oauth2.datatype.AccessToken`. :raises: :class:`oauth2.error.AccessTokenNotFound` if not access token could be retrieved.
[ "Retrieves", "an", "access", "token", "by", "its", "refresh", "token", "." ]
python
train
bird-house/twitcher
twitcher/tokengenerator.py
https://github.com/bird-house/twitcher/blob/e6a36b3aeeacf44eec537434b0fb87c09ab54b5f/twitcher/tokengenerator.py#L22-L34
def create_access_token(self, valid_in_hours=1, data=None): """ Creates an access token. TODO: check valid in hours TODO: maybe specify how often a token can be used """ data = data or {} token = AccessToken( token=self.generate(), expires_at=expires_at(hours=valid_in_hours), data=data) return token
[ "def", "create_access_token", "(", "self", ",", "valid_in_hours", "=", "1", ",", "data", "=", "None", ")", ":", "data", "=", "data", "or", "{", "}", "token", "=", "AccessToken", "(", "token", "=", "self", ".", "generate", "(", ")", ",", "expires_at", "=", "expires_at", "(", "hours", "=", "valid_in_hours", ")", ",", "data", "=", "data", ")", "return", "token" ]
Creates an access token. TODO: check valid in hours TODO: maybe specify how often a token can be used
[ "Creates", "an", "access", "token", "." ]
python
valid
miLibris/flask-rest-jsonapi
flask_rest_jsonapi/data_layers/filtering/alchemy.py
https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/data_layers/filtering/alchemy.py#L41-L62
def resolve(self): """Create filter for a particular node of the filter tree""" if 'or' not in self.filter_ and 'and' not in self.filter_ and 'not' not in self.filter_: value = self.value if isinstance(value, dict): value = Node(self.related_model, value, self.resource, self.related_schema).resolve() if '__' in self.filter_.get('name', ''): value = {self.filter_['name'].split('__')[1]: value} if isinstance(value, dict): return getattr(self.column, self.operator)(**value) else: return getattr(self.column, self.operator)(value) if 'or' in self.filter_: return or_(Node(self.model, filt, self.resource, self.schema).resolve() for filt in self.filter_['or']) if 'and' in self.filter_: return and_(Node(self.model, filt, self.resource, self.schema).resolve() for filt in self.filter_['and']) if 'not' in self.filter_: return not_(Node(self.model, self.filter_['not'], self.resource, self.schema).resolve())
[ "def", "resolve", "(", "self", ")", ":", "if", "'or'", "not", "in", "self", ".", "filter_", "and", "'and'", "not", "in", "self", ".", "filter_", "and", "'not'", "not", "in", "self", ".", "filter_", ":", "value", "=", "self", ".", "value", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "value", "=", "Node", "(", "self", ".", "related_model", ",", "value", ",", "self", ".", "resource", ",", "self", ".", "related_schema", ")", ".", "resolve", "(", ")", "if", "'__'", "in", "self", ".", "filter_", ".", "get", "(", "'name'", ",", "''", ")", ":", "value", "=", "{", "self", ".", "filter_", "[", "'name'", "]", ".", "split", "(", "'__'", ")", "[", "1", "]", ":", "value", "}", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "getattr", "(", "self", ".", "column", ",", "self", ".", "operator", ")", "(", "*", "*", "value", ")", "else", ":", "return", "getattr", "(", "self", ".", "column", ",", "self", ".", "operator", ")", "(", "value", ")", "if", "'or'", "in", "self", ".", "filter_", ":", "return", "or_", "(", "Node", "(", "self", ".", "model", ",", "filt", ",", "self", ".", "resource", ",", "self", ".", "schema", ")", ".", "resolve", "(", ")", "for", "filt", "in", "self", ".", "filter_", "[", "'or'", "]", ")", "if", "'and'", "in", "self", ".", "filter_", ":", "return", "and_", "(", "Node", "(", "self", ".", "model", ",", "filt", ",", "self", ".", "resource", ",", "self", ".", "schema", ")", ".", "resolve", "(", ")", "for", "filt", "in", "self", ".", "filter_", "[", "'and'", "]", ")", "if", "'not'", "in", "self", ".", "filter_", ":", "return", "not_", "(", "Node", "(", "self", ".", "model", ",", "self", ".", "filter_", "[", "'not'", "]", ",", "self", ".", "resource", ",", "self", ".", "schema", ")", ".", "resolve", "(", ")", ")" ]
Create filter for a particular node of the filter tree
[ "Create", "filter", "for", "a", "particular", "node", "of", "the", "filter", "tree" ]
python
train
blockstack/blockstack-core
blockstack/lib/atlas.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1191-L1213
def atlasdb_get_old_peers( now, con=None, path=None ): """ Get peers older than now - PEER_LIFETIME """ with AtlasDBOpen(con=con, path=path) as dbcon: if now is None: now = time.time() expire = now - atlas_peer_max_age() sql = "SELECT * FROM peers WHERE discovery_time < ?"; args = (expire,) cur = dbcon.cursor() res = atlasdb_query_execute( cur, sql, args ) rows = [] for row in res: tmp = {} tmp.update(row) rows.append(tmp) return rows
[ "def", "atlasdb_get_old_peers", "(", "now", ",", "con", "=", "None", ",", "path", "=", "None", ")", ":", "with", "AtlasDBOpen", "(", "con", "=", "con", ",", "path", "=", "path", ")", "as", "dbcon", ":", "if", "now", "is", "None", ":", "now", "=", "time", ".", "time", "(", ")", "expire", "=", "now", "-", "atlas_peer_max_age", "(", ")", "sql", "=", "\"SELECT * FROM peers WHERE discovery_time < ?\"", "args", "=", "(", "expire", ",", ")", "cur", "=", "dbcon", ".", "cursor", "(", ")", "res", "=", "atlasdb_query_execute", "(", "cur", ",", "sql", ",", "args", ")", "rows", "=", "[", "]", "for", "row", "in", "res", ":", "tmp", "=", "{", "}", "tmp", ".", "update", "(", "row", ")", "rows", ".", "append", "(", "tmp", ")", "return", "rows" ]
Get peers older than now - PEER_LIFETIME
[ "Get", "peers", "older", "than", "now", "-", "PEER_LIFETIME" ]
python
train
osrg/ryu
ryu/lib/ovs/bridge.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/ovs/bridge.py#L425-L433
def add_gre_port(self, name, remote_ip, local_ip=None, key=None, ofport=None): """ Creates a GRE tunnel port. See the description of ``add_tunnel_port()``. """ self.add_tunnel_port(name, 'gre', remote_ip, local_ip=local_ip, key=key, ofport=ofport)
[ "def", "add_gre_port", "(", "self", ",", "name", ",", "remote_ip", ",", "local_ip", "=", "None", ",", "key", "=", "None", ",", "ofport", "=", "None", ")", ":", "self", ".", "add_tunnel_port", "(", "name", ",", "'gre'", ",", "remote_ip", ",", "local_ip", "=", "local_ip", ",", "key", "=", "key", ",", "ofport", "=", "ofport", ")" ]
Creates a GRE tunnel port. See the description of ``add_tunnel_port()``.
[ "Creates", "a", "GRE", "tunnel", "port", "." ]
python
train
robertmartin8/PyPortfolioOpt
pypfopt/risk_models.py
https://github.com/robertmartin8/PyPortfolioOpt/blob/dfad1256cb6995c7fbd7a025eedb54b1ca04b2fc/pypfopt/risk_models.py#L182-L195
def format_and_annualise(self, raw_cov_array): """ Helper method which annualises the output of shrinkage calculations, and formats the result into a dataframe :param raw_cov_array: raw covariance matrix of daily returns :type raw_cov_array: np.ndarray :return: annualised covariance matrix :rtype: pd.DataFrame """ assets = self.X.columns return ( pd.DataFrame(raw_cov_array, index=assets, columns=assets) * self.frequency )
[ "def", "format_and_annualise", "(", "self", ",", "raw_cov_array", ")", ":", "assets", "=", "self", ".", "X", ".", "columns", "return", "(", "pd", ".", "DataFrame", "(", "raw_cov_array", ",", "index", "=", "assets", ",", "columns", "=", "assets", ")", "*", "self", ".", "frequency", ")" ]
Helper method which annualises the output of shrinkage calculations, and formats the result into a dataframe :param raw_cov_array: raw covariance matrix of daily returns :type raw_cov_array: np.ndarray :return: annualised covariance matrix :rtype: pd.DataFrame
[ "Helper", "method", "which", "annualises", "the", "output", "of", "shrinkage", "calculations", "and", "formats", "the", "result", "into", "a", "dataframe" ]
python
train
Neurita/boyle
boyle/utils/logger.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/logger.py#L13-L31
def setup_logging(log_config_file=op.join(op.dirname(__file__), 'logger.yml'), log_default_level=LOG_LEVEL, env_key=MODULE_NAME.upper() + '_LOG_CFG'): """Setup logging configuration.""" path = log_config_file value = os.getenv(env_key, None) if value: path = value if op.exists(path): log_cfg = yaml.load(read(path).format(MODULE_NAME)) logging.config.dictConfig(log_cfg) #print('Started logging using config file {0}.'.format(path)) else: logging.basicConfig(level=log_default_level) #print('Started default logging. Could not find config file ' # 'in {0}.'.format(path)) log = logging.getLogger(__name__) log.debug('Start logging.')
[ "def", "setup_logging", "(", "log_config_file", "=", "op", ".", "join", "(", "op", ".", "dirname", "(", "__file__", ")", ",", "'logger.yml'", ")", ",", "log_default_level", "=", "LOG_LEVEL", ",", "env_key", "=", "MODULE_NAME", ".", "upper", "(", ")", "+", "'_LOG_CFG'", ")", ":", "path", "=", "log_config_file", "value", "=", "os", ".", "getenv", "(", "env_key", ",", "None", ")", "if", "value", ":", "path", "=", "value", "if", "op", ".", "exists", "(", "path", ")", ":", "log_cfg", "=", "yaml", ".", "load", "(", "read", "(", "path", ")", ".", "format", "(", "MODULE_NAME", ")", ")", "logging", ".", "config", ".", "dictConfig", "(", "log_cfg", ")", "#print('Started logging using config file {0}.'.format(path))", "else", ":", "logging", ".", "basicConfig", "(", "level", "=", "log_default_level", ")", "#print('Started default logging. Could not find config file '", "# 'in {0}.'.format(path))", "log", "=", "logging", ".", "getLogger", "(", "__name__", ")", "log", ".", "debug", "(", "'Start logging.'", ")" ]
Setup logging configuration.
[ "Setup", "logging", "configuration", "." ]
python
valid
tcalmant/ipopo
pelix/ipopo/waiting.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/waiting.py#L172-L197
def handle_ipopo_event(self, event): # type: (IPopoEvent) -> None """ Handles an iPOPO event :param event: iPOPO event bean """ kind = event.get_kind() if kind == IPopoEvent.REGISTERED: # A factory has been registered try: with use_ipopo(self.__context) as ipopo: factory = event.get_factory_name() with self.__lock: # Copy the list of components names for this factory components = self.__queue[factory].copy() for component in components: self._try_instantiate(ipopo, factory, component) except BundleException: # iPOPO not yet started pass except KeyError: # No components for this new factory pass
[ "def", "handle_ipopo_event", "(", "self", ",", "event", ")", ":", "# type: (IPopoEvent) -> None", "kind", "=", "event", ".", "get_kind", "(", ")", "if", "kind", "==", "IPopoEvent", ".", "REGISTERED", ":", "# A factory has been registered", "try", ":", "with", "use_ipopo", "(", "self", ".", "__context", ")", "as", "ipopo", ":", "factory", "=", "event", ".", "get_factory_name", "(", ")", "with", "self", ".", "__lock", ":", "# Copy the list of components names for this factory", "components", "=", "self", ".", "__queue", "[", "factory", "]", ".", "copy", "(", ")", "for", "component", "in", "components", ":", "self", ".", "_try_instantiate", "(", "ipopo", ",", "factory", ",", "component", ")", "except", "BundleException", ":", "# iPOPO not yet started", "pass", "except", "KeyError", ":", "# No components for this new factory", "pass" ]
Handles an iPOPO event :param event: iPOPO event bean
[ "Handles", "an", "iPOPO", "event" ]
python
train
log2timeline/plaso
plaso/output/json_out.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/json_out.py#L31-L50
def WriteEventBody(self, event): """Writes the body of an event object to the output. Args: event (EventObject): event. """ inode = getattr(event, 'inode', None) if inode is None: event.inode = 0 json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event) json_string = json.dumps(json_dict, sort_keys=True) if self._event_counter != 0: self._output_writer.Write(', ') line = '"event_{0:d}": {1:s}\n'.format(self._event_counter, json_string) self._output_writer.Write(line) self._event_counter += 1
[ "def", "WriteEventBody", "(", "self", ",", "event", ")", ":", "inode", "=", "getattr", "(", "event", ",", "'inode'", ",", "None", ")", "if", "inode", "is", "None", ":", "event", ".", "inode", "=", "0", "json_dict", "=", "self", ".", "_JSON_SERIALIZER", ".", "WriteSerializedDict", "(", "event", ")", "json_string", "=", "json", ".", "dumps", "(", "json_dict", ",", "sort_keys", "=", "True", ")", "if", "self", ".", "_event_counter", "!=", "0", ":", "self", ".", "_output_writer", ".", "Write", "(", "', '", ")", "line", "=", "'\"event_{0:d}\": {1:s}\\n'", ".", "format", "(", "self", ".", "_event_counter", ",", "json_string", ")", "self", ".", "_output_writer", ".", "Write", "(", "line", ")", "self", ".", "_event_counter", "+=", "1" ]
Writes the body of an event object to the output. Args: event (EventObject): event.
[ "Writes", "the", "body", "of", "an", "event", "object", "to", "the", "output", "." ]
python
train
Miserlou/SoundScrape
soundscrape/soundscrape.py
https://github.com/Miserlou/SoundScrape/blob/efc63b99ce7e78b352e2ba22d5e51f83445546d7/soundscrape/soundscrape.py#L307-L366
def download_track(track, album_name=u'', keep_previews=False, folders=False, filenames=[], custom_path=''): """ Given a track, force scrape it. """ hard_track_url = get_hard_track_url(track['id']) # We have no info on this track whatsoever. if not 'title' in track: return None if not keep_previews: if (track.get('duration', 0) < track.get('full_duration', 0)): puts_safe(colored.yellow("Skipping preview track") + colored.white(": " + track['title'])) return None # May not have a "full name" name = track['user'].get('full_name', '') if name == '': name = track['user']['username'] filename = sanitize_filename(name + ' - ' + track['title'] + '.mp3') if folders: name_path = join(custom_path, name) if not exists(name_path): mkdir(name_path) filename = join(name_path, filename) else: filename = join(custom_path, filename) if exists(filename): puts_safe(colored.yellow("Track already downloaded: ") + colored.white(track['title'])) return None # Skip already downloaded track. if filename in filenames: return None if hard_track_url: puts_safe(colored.green("Scraping") + colored.white(": " + track['title'])) else: # Region coded? puts_safe(colored.yellow("Unable to download") + colored.white(": " + track['title'])) return None filename = download_file(hard_track_url, filename) tagged = tag_file(filename, artist=name, title=track['title'], year=track['created_at'][:4], genre=track['genre'], album=album_name, artwork_url=track['artwork_url']) if not tagged: wav_filename = filename[:-3] + 'wav' os.rename(filename, wav_filename) filename = wav_filename return filename
[ "def", "download_track", "(", "track", ",", "album_name", "=", "u''", ",", "keep_previews", "=", "False", ",", "folders", "=", "False", ",", "filenames", "=", "[", "]", ",", "custom_path", "=", "''", ")", ":", "hard_track_url", "=", "get_hard_track_url", "(", "track", "[", "'id'", "]", ")", "# We have no info on this track whatsoever.", "if", "not", "'title'", "in", "track", ":", "return", "None", "if", "not", "keep_previews", ":", "if", "(", "track", ".", "get", "(", "'duration'", ",", "0", ")", "<", "track", ".", "get", "(", "'full_duration'", ",", "0", ")", ")", ":", "puts_safe", "(", "colored", ".", "yellow", "(", "\"Skipping preview track\"", ")", "+", "colored", ".", "white", "(", "\": \"", "+", "track", "[", "'title'", "]", ")", ")", "return", "None", "# May not have a \"full name\"", "name", "=", "track", "[", "'user'", "]", ".", "get", "(", "'full_name'", ",", "''", ")", "if", "name", "==", "''", ":", "name", "=", "track", "[", "'user'", "]", "[", "'username'", "]", "filename", "=", "sanitize_filename", "(", "name", "+", "' - '", "+", "track", "[", "'title'", "]", "+", "'.mp3'", ")", "if", "folders", ":", "name_path", "=", "join", "(", "custom_path", ",", "name", ")", "if", "not", "exists", "(", "name_path", ")", ":", "mkdir", "(", "name_path", ")", "filename", "=", "join", "(", "name_path", ",", "filename", ")", "else", ":", "filename", "=", "join", "(", "custom_path", ",", "filename", ")", "if", "exists", "(", "filename", ")", ":", "puts_safe", "(", "colored", ".", "yellow", "(", "\"Track already downloaded: \"", ")", "+", "colored", ".", "white", "(", "track", "[", "'title'", "]", ")", ")", "return", "None", "# Skip already downloaded track.", "if", "filename", "in", "filenames", ":", "return", "None", "if", "hard_track_url", ":", "puts_safe", "(", "colored", ".", "green", "(", "\"Scraping\"", ")", "+", "colored", ".", "white", "(", "\": \"", "+", "track", "[", "'title'", "]", ")", ")", "else", ":", "# Region coded?", "puts_safe", "(", "colored", ".", "yellow", "(", "\"Unable to download\"", ")", "+", "colored", ".", "white", "(", "\": \"", "+", "track", "[", "'title'", "]", ")", ")", "return", "None", "filename", "=", "download_file", "(", "hard_track_url", ",", "filename", ")", "tagged", "=", "tag_file", "(", "filename", ",", "artist", "=", "name", ",", "title", "=", "track", "[", "'title'", "]", ",", "year", "=", "track", "[", "'created_at'", "]", "[", ":", "4", "]", ",", "genre", "=", "track", "[", "'genre'", "]", ",", "album", "=", "album_name", ",", "artwork_url", "=", "track", "[", "'artwork_url'", "]", ")", "if", "not", "tagged", ":", "wav_filename", "=", "filename", "[", ":", "-", "3", "]", "+", "'wav'", "os", ".", "rename", "(", "filename", ",", "wav_filename", ")", "filename", "=", "wav_filename", "return", "filename" ]
Given a track, force scrape it.
[ "Given", "a", "track", "force", "scrape", "it", "." ]
python
train
dev-pipeline/dev-pipeline-build
lib/devpipeline_build/builder.py
https://github.com/dev-pipeline/dev-pipeline-build/blob/52e3e22b1c433fb7c3902acc46d6f3ac2c6fc426/lib/devpipeline_build/builder.py#L93-L106
def _make_builder(config, current_target): """ Create and return a Builder for a component. Arguments component - The component the builder should be created for. """ tool_key = devpipeline_core.toolsupport.choose_tool_key( current_target, _BUILD_TOOL_KEYS ) return devpipeline_core.toolsupport.tool_builder( config, tool_key, devpipeline_build.BUILDERS, current_target )
[ "def", "_make_builder", "(", "config", ",", "current_target", ")", ":", "tool_key", "=", "devpipeline_core", ".", "toolsupport", ".", "choose_tool_key", "(", "current_target", ",", "_BUILD_TOOL_KEYS", ")", "return", "devpipeline_core", ".", "toolsupport", ".", "tool_builder", "(", "config", ",", "tool_key", ",", "devpipeline_build", ".", "BUILDERS", ",", "current_target", ")" ]
Create and return a Builder for a component. Arguments component - The component the builder should be created for.
[ "Create", "and", "return", "a", "Builder", "for", "a", "component", "." ]
python
train
PyCQA/pylint
pylint/checkers/refactoring.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/refactoring.py#L611-L614
def _check_exception_inherit_from_stopiteration(exc): """Return True if the exception node in argument inherit from StopIteration""" stopiteration_qname = "{}.StopIteration".format(utils.EXCEPTIONS_MODULE) return any(_class.qname() == stopiteration_qname for _class in exc.mro())
[ "def", "_check_exception_inherit_from_stopiteration", "(", "exc", ")", ":", "stopiteration_qname", "=", "\"{}.StopIteration\"", ".", "format", "(", "utils", ".", "EXCEPTIONS_MODULE", ")", "return", "any", "(", "_class", ".", "qname", "(", ")", "==", "stopiteration_qname", "for", "_class", "in", "exc", ".", "mro", "(", ")", ")" ]
Return True if the exception node in argument inherit from StopIteration
[ "Return", "True", "if", "the", "exception", "node", "in", "argument", "inherit", "from", "StopIteration" ]
python
test
ranaroussi/qtpylib
qtpylib/indicators.py
https://github.com/ranaroussi/qtpylib/blob/0dbbc465fafd9cb9b0f4d10e1e07fae4e15032dd/qtpylib/indicators.py#L339-L349
def vwap(bars): """ calculate vwap of entire time series (input can be pandas series or numpy array) bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ] """ typical = ((bars['high'] + bars['low'] + bars['close']) / 3).values volume = bars['volume'].values return pd.Series(index=bars.index, data=np.cumsum(volume * typical) / np.cumsum(volume))
[ "def", "vwap", "(", "bars", ")", ":", "typical", "=", "(", "(", "bars", "[", "'high'", "]", "+", "bars", "[", "'low'", "]", "+", "bars", "[", "'close'", "]", ")", "/", "3", ")", ".", "values", "volume", "=", "bars", "[", "'volume'", "]", ".", "values", "return", "pd", ".", "Series", "(", "index", "=", "bars", ".", "index", ",", "data", "=", "np", ".", "cumsum", "(", "volume", "*", "typical", ")", "/", "np", ".", "cumsum", "(", "volume", ")", ")" ]
calculate vwap of entire time series (input can be pandas series or numpy array) bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]
[ "calculate", "vwap", "of", "entire", "time", "series", "(", "input", "can", "be", "pandas", "series", "or", "numpy", "array", ")", "bars", "are", "usually", "mid", "[", "(", "h", "+", "l", ")", "/", "2", "]", "or", "typical", "[", "(", "h", "+", "l", "+", "c", ")", "/", "3", "]" ]
python
train
genialis/resolwe-runtime-utils
resolwe_runtime_utils.py
https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L146-L173
def save_dir(key, dir_path, *refs): """Convert the given parameters to a special JSON object. JSON object is of the form: { key: {"dir": dir_path}}, or { key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}} """ if not os.path.isdir(dir_path): return error( "Output '{}' set to a missing directory: '{}'.".format(key, dir_path) ) result = {key: {"dir": dir_path}} if refs: missing_refs = [ ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref)) ] if len(missing_refs) > 0: return error( "Output '{}' set to missing references: '{}'.".format( key, ', '.join(missing_refs) ) ) result[key]["refs"] = refs return json.dumps(result)
[ "def", "save_dir", "(", "key", ",", "dir_path", ",", "*", "refs", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "dir_path", ")", ":", "return", "error", "(", "\"Output '{}' set to a missing directory: '{}'.\"", ".", "format", "(", "key", ",", "dir_path", ")", ")", "result", "=", "{", "key", ":", "{", "\"dir\"", ":", "dir_path", "}", "}", "if", "refs", ":", "missing_refs", "=", "[", "ref", "for", "ref", "in", "refs", "if", "not", "(", "os", ".", "path", ".", "isfile", "(", "ref", ")", "or", "os", ".", "path", ".", "isdir", "(", "ref", ")", ")", "]", "if", "len", "(", "missing_refs", ")", ">", "0", ":", "return", "error", "(", "\"Output '{}' set to missing references: '{}'.\"", ".", "format", "(", "key", ",", "', '", ".", "join", "(", "missing_refs", ")", ")", ")", "result", "[", "key", "]", "[", "\"refs\"", "]", "=", "refs", "return", "json", ".", "dumps", "(", "result", ")" ]
Convert the given parameters to a special JSON object. JSON object is of the form: { key: {"dir": dir_path}}, or { key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}}
[ "Convert", "the", "given", "parameters", "to", "a", "special", "JSON", "object", "." ]
python
train
rigetti/pyquil
pyquil/quil.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/quil.py#L194-L211
def gate(self, name, params, qubits): """ Add a gate to the program. .. note:: The matrix elements along each axis are ordered by bitstring. For two qubits the order is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index, i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1. See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`. :param string name: The name of the gate. :param list params: Parameters to send to the gate. :param list qubits: Qubits that the gate operates on. :return: The Program instance :rtype: Program """ return self.inst(Gate(name, params, [unpack_qubit(q) for q in qubits]))
[ "def", "gate", "(", "self", ",", "name", ",", "params", ",", "qubits", ")", ":", "return", "self", ".", "inst", "(", "Gate", "(", "name", ",", "params", ",", "[", "unpack_qubit", "(", "q", ")", "for", "q", "in", "qubits", "]", ")", ")" ]
Add a gate to the program. .. note:: The matrix elements along each axis are ordered by bitstring. For two qubits the order is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index, i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1. See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`. :param string name: The name of the gate. :param list params: Parameters to send to the gate. :param list qubits: Qubits that the gate operates on. :return: The Program instance :rtype: Program
[ "Add", "a", "gate", "to", "the", "program", "." ]
python
train
bhmm/bhmm
bhmm/output_models/gaussian.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/output_models/gaussian.py#L382-L418
def generate_observation_trajectory(self, s_t): """ Generate synthetic observation data from a given state sequence. Parameters ---------- s_t : numpy.array with shape (T,) of int type s_t[t] is the hidden state sampled at time t Returns ------- o_t : numpy.array with shape (T,) of type dtype o_t[t] is the observation associated with state s_t[t] Examples -------- Generate an observation model and synthetic state trajectory. >>> nobs = 1000 >>> output_model = GaussianOutputModel(nstates=3, means=[-1, 0, +1], sigmas=[0.5, 1, 2]) >>> s_t = np.random.randint(0, output_model.nstates, size=[nobs]) Generate a synthetic trajectory >>> o_t = output_model.generate_observation_trajectory(s_t) """ # Determine number of samples to generate. T = s_t.shape[0] o_t = np.zeros([T], dtype=config.dtype) for t in range(T): s = s_t[t] o_t[t] = self.sigmas[s] * np.random.randn() + self.means[s] return o_t
[ "def", "generate_observation_trajectory", "(", "self", ",", "s_t", ")", ":", "# Determine number of samples to generate.", "T", "=", "s_t", ".", "shape", "[", "0", "]", "o_t", "=", "np", ".", "zeros", "(", "[", "T", "]", ",", "dtype", "=", "config", ".", "dtype", ")", "for", "t", "in", "range", "(", "T", ")", ":", "s", "=", "s_t", "[", "t", "]", "o_t", "[", "t", "]", "=", "self", ".", "sigmas", "[", "s", "]", "*", "np", ".", "random", ".", "randn", "(", ")", "+", "self", ".", "means", "[", "s", "]", "return", "o_t" ]
Generate synthetic observation data from a given state sequence. Parameters ---------- s_t : numpy.array with shape (T,) of int type s_t[t] is the hidden state sampled at time t Returns ------- o_t : numpy.array with shape (T,) of type dtype o_t[t] is the observation associated with state s_t[t] Examples -------- Generate an observation model and synthetic state trajectory. >>> nobs = 1000 >>> output_model = GaussianOutputModel(nstates=3, means=[-1, 0, +1], sigmas=[0.5, 1, 2]) >>> s_t = np.random.randint(0, output_model.nstates, size=[nobs]) Generate a synthetic trajectory >>> o_t = output_model.generate_observation_trajectory(s_t)
[ "Generate", "synthetic", "observation", "data", "from", "a", "given", "state", "sequence", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiVrf.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiVrf.py#L66-L75
def delete(self, ids): """ Method to delete vrf's by their id's :param ids: Identifiers of vrf's :return: None """ url = build_uri_with_ids('api/v3/vrf/%s/', ids) return super(ApiVrf, self).delete(url)
[ "def", "delete", "(", "self", ",", "ids", ")", ":", "url", "=", "build_uri_with_ids", "(", "'api/v3/vrf/%s/'", ",", "ids", ")", "return", "super", "(", "ApiVrf", ",", "self", ")", ".", "delete", "(", "url", ")" ]
Method to delete vrf's by their id's :param ids: Identifiers of vrf's :return: None
[ "Method", "to", "delete", "vrf", "s", "by", "their", "id", "s" ]
python
train
google/grumpy
third_party/stdlib/threading.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/threading.py#L309-L371
def wait(self, timeout=None): """Wait until notified or until a timeout occurs. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notifyAll() call for the same condition variable in another thread, or until the optional timeout occurs. Once awakened or timed out, it re-acquires the lock and returns. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). When the underlying lock is an RLock, it is not released using its release() method, since this may not actually unlock the lock when it was acquired multiple times recursively. Instead, an internal interface of the RLock class is used, which really unlocks it even when it has been recursively acquired several times. Another internal interface is then used to restore the recursion level when the lock is reacquired. """ if not self._is_owned(): raise RuntimeError("cannot wait on un-acquired lock") waiter = _allocate_lock() waiter.acquire() self.__waiters.append(waiter) saved_state = self._release_save() try: # restore state no matter what (e.g., KeyboardInterrupt) if timeout is None: waiter.acquire() if __debug__: self._note("%s.wait(): got it", self) else: # Balancing act: We can't afford a pure busy loop, so we # have to sleep; but if we sleep the whole timeout time, # we'll be unresponsive. The scheme here sleeps very # little at first, longer as time goes on, but never longer # than 20 times per second (or the timeout time remaining). endtime = _time() + timeout delay = 0.0005 # 500 us -> initial delay of 1 ms while True: gotit = waiter.acquire(0) if gotit: break remaining = endtime - _time() if remaining <= 0: break delay = min(delay * 2, remaining, .05) _sleep(delay) if not gotit: if __debug__: self._note("%s.wait(%s): timed out", self, timeout) try: self.__waiters.remove(waiter) except ValueError: pass else: if __debug__: self._note("%s.wait(%s): got it", self, timeout) finally: self._acquire_restore(saved_state)
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "not", "self", ".", "_is_owned", "(", ")", ":", "raise", "RuntimeError", "(", "\"cannot wait on un-acquired lock\"", ")", "waiter", "=", "_allocate_lock", "(", ")", "waiter", ".", "acquire", "(", ")", "self", ".", "__waiters", ".", "append", "(", "waiter", ")", "saved_state", "=", "self", ".", "_release_save", "(", ")", "try", ":", "# restore state no matter what (e.g., KeyboardInterrupt)", "if", "timeout", "is", "None", ":", "waiter", ".", "acquire", "(", ")", "if", "__debug__", ":", "self", ".", "_note", "(", "\"%s.wait(): got it\"", ",", "self", ")", "else", ":", "# Balancing act: We can't afford a pure busy loop, so we", "# have to sleep; but if we sleep the whole timeout time,", "# we'll be unresponsive. The scheme here sleeps very", "# little at first, longer as time goes on, but never longer", "# than 20 times per second (or the timeout time remaining).", "endtime", "=", "_time", "(", ")", "+", "timeout", "delay", "=", "0.0005", "# 500 us -> initial delay of 1 ms", "while", "True", ":", "gotit", "=", "waiter", ".", "acquire", "(", "0", ")", "if", "gotit", ":", "break", "remaining", "=", "endtime", "-", "_time", "(", ")", "if", "remaining", "<=", "0", ":", "break", "delay", "=", "min", "(", "delay", "*", "2", ",", "remaining", ",", ".05", ")", "_sleep", "(", "delay", ")", "if", "not", "gotit", ":", "if", "__debug__", ":", "self", ".", "_note", "(", "\"%s.wait(%s): timed out\"", ",", "self", ",", "timeout", ")", "try", ":", "self", ".", "__waiters", ".", "remove", "(", "waiter", ")", "except", "ValueError", ":", "pass", "else", ":", "if", "__debug__", ":", "self", ".", "_note", "(", "\"%s.wait(%s): got it\"", ",", "self", ",", "timeout", ")", "finally", ":", "self", ".", "_acquire_restore", "(", "saved_state", ")" ]
Wait until notified or until a timeout occurs. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notifyAll() call for the same condition variable in another thread, or until the optional timeout occurs. Once awakened or timed out, it re-acquires the lock and returns. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). When the underlying lock is an RLock, it is not released using its release() method, since this may not actually unlock the lock when it was acquired multiple times recursively. Instead, an internal interface of the RLock class is used, which really unlocks it even when it has been recursively acquired several times. Another internal interface is then used to restore the recursion level when the lock is reacquired.
[ "Wait", "until", "notified", "or", "until", "a", "timeout", "occurs", "." ]
python
valid
ejeschke/ginga
ginga/rv/plugins/TVMark.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/TVMark.py#L678-L686
def set_marktype_cb(self, w, index): """Set type of marking.""" self.marktype = self._mark_options[index] # Mark size is not used for point if self.marktype != 'point': self.w.mark_size.set_enabled(True) else: self.w.mark_size.set_enabled(False)
[ "def", "set_marktype_cb", "(", "self", ",", "w", ",", "index", ")", ":", "self", ".", "marktype", "=", "self", ".", "_mark_options", "[", "index", "]", "# Mark size is not used for point", "if", "self", ".", "marktype", "!=", "'point'", ":", "self", ".", "w", ".", "mark_size", ".", "set_enabled", "(", "True", ")", "else", ":", "self", ".", "w", ".", "mark_size", ".", "set_enabled", "(", "False", ")" ]
Set type of marking.
[ "Set", "type", "of", "marking", "." ]
python
train
zarr-developers/zarr
zarr/hierarchy.py
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/hierarchy.py#L858-L861
def empty(self, name, **kwargs): """Create an array. Keyword arguments as per :func:`zarr.creation.empty`.""" return self._write_op(self._empty_nosync, name, **kwargs)
[ "def", "empty", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_write_op", "(", "self", ".", "_empty_nosync", ",", "name", ",", "*", "*", "kwargs", ")" ]
Create an array. Keyword arguments as per :func:`zarr.creation.empty`.
[ "Create", "an", "array", ".", "Keyword", "arguments", "as", "per", ":", "func", ":", "zarr", ".", "creation", ".", "empty", "." ]
python
train