repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
blaix/tdubs
tdubs/verifications.py
https://github.com/blaix/tdubs/blob/5df4ee32bb973dbf52baa4f10640505394089b78/tdubs/verifications.py#L50-L61
def called_with(self, *args, **kwargs): """Return True if the spy was called with the specified args/kwargs. Otherwise raise VerificationError. """ expected_call = Call(*args, **kwargs) if expected_call in calls(self.spy): return True raise VerificationError( "expected %s to be called with %s, but it wasn't" % ( self.spy, expected_call.formatted_args))
[ "def", "called_with", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "expected_call", "=", "Call", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "expected_call", "in", "calls", "(", "self", ".", "spy", ")", ":", "return", "True", "raise", "VerificationError", "(", "\"expected %s to be called with %s, but it wasn't\"", "%", "(", "self", ".", "spy", ",", "expected_call", ".", "formatted_args", ")", ")" ]
Return True if the spy was called with the specified args/kwargs. Otherwise raise VerificationError.
[ "Return", "True", "if", "the", "spy", "was", "called", "with", "the", "specified", "args", "/", "kwargs", "." ]
python
train
smarie/python-parsyfiles
parsyfiles/plugins_base/support_for_collections.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/plugins_base/support_for_collections.py#L421-L432
def get_default_collection_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]: """ Utility method to return the default parsers able to parse a dictionary from a file. :return: """ return [SingleFileParserFunction(parser_function=read_dict_or_list_from_json, streaming_mode=True, custom_name='read_dict_or_list_from_json', supported_exts={'.json'}, supported_types={dict, list}, function_args={'conversion_finder': conversion_finder}), MultifileCollectionParser(parser_finder) ]
[ "def", "get_default_collection_parsers", "(", "parser_finder", ":", "ParserFinder", ",", "conversion_finder", ":", "ConversionFinder", ")", "->", "List", "[", "AnyParser", "]", ":", "return", "[", "SingleFileParserFunction", "(", "parser_function", "=", "read_dict_or_list_from_json", ",", "streaming_mode", "=", "True", ",", "custom_name", "=", "'read_dict_or_list_from_json'", ",", "supported_exts", "=", "{", "'.json'", "}", ",", "supported_types", "=", "{", "dict", ",", "list", "}", ",", "function_args", "=", "{", "'conversion_finder'", ":", "conversion_finder", "}", ")", ",", "MultifileCollectionParser", "(", "parser_finder", ")", "]" ]
Utility method to return the default parsers able to parse a dictionary from a file. :return:
[ "Utility", "method", "to", "return", "the", "default", "parsers", "able", "to", "parse", "a", "dictionary", "from", "a", "file", ".", ":", "return", ":" ]
python
train
inasafe/inasafe
safe/gui/widgets/dock.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/dock.py#L872-L880
def show_busy(self): """Hide the question group box and enable the busy cursor.""" self.progress_bar.show() self.question_group.setEnabled(False) self.question_group.setVisible(False) enable_busy_cursor() self.repaint() qApp.processEvents() self.busy = True
[ "def", "show_busy", "(", "self", ")", ":", "self", ".", "progress_bar", ".", "show", "(", ")", "self", ".", "question_group", ".", "setEnabled", "(", "False", ")", "self", ".", "question_group", ".", "setVisible", "(", "False", ")", "enable_busy_cursor", "(", ")", "self", ".", "repaint", "(", ")", "qApp", ".", "processEvents", "(", ")", "self", ".", "busy", "=", "True" ]
Hide the question group box and enable the busy cursor.
[ "Hide", "the", "question", "group", "box", "and", "enable", "the", "busy", "cursor", "." ]
python
train
Clivern/PyLogging
pylogging/pylogging.py
https://github.com/Clivern/PyLogging/blob/46a1442ec63796302ec7fe3d49bd06a0f7a2fe70/pylogging/pylogging.py#L209-L213
def _execFilters(self, type, msg): """ Execute Registered Filters """ for filter in self.FILTERS: msg = filter(type, msg) return msg
[ "def", "_execFilters", "(", "self", ",", "type", ",", "msg", ")", ":", "for", "filter", "in", "self", ".", "FILTERS", ":", "msg", "=", "filter", "(", "type", ",", "msg", ")", "return", "msg" ]
Execute Registered Filters
[ "Execute", "Registered", "Filters" ]
python
train
nerdvegas/rez
src/rez/rex.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/rex.py#L564-L571
def apply_environ(self): """Apply changes to target environ. """ if self.manager is None: raise RezSystemError("You must call 'set_manager' on a Python rex " "interpreter before using it.") self.target_environ.update(self.manager.environ)
[ "def", "apply_environ", "(", "self", ")", ":", "if", "self", ".", "manager", "is", "None", ":", "raise", "RezSystemError", "(", "\"You must call 'set_manager' on a Python rex \"", "\"interpreter before using it.\"", ")", "self", ".", "target_environ", ".", "update", "(", "self", ".", "manager", ".", "environ", ")" ]
Apply changes to target environ.
[ "Apply", "changes", "to", "target", "environ", "." ]
python
train
fudge-py/fudge
fudge/__init__.py
https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1112-L1139
def returns_fake(self, *args, **kwargs): """Set the last call to return a new :class:`fudge.Fake`. Any given arguments are passed to the :class:`fudge.Fake` constructor Take note that this is different from the cascading nature of other methods. This will return an instance of the *new* Fake, not self, so you should be careful to store its return value in a new variable. I.E.:: >>> session = Fake('session') >>> query = session.provides('query').returns_fake(name="Query") >>> assert query is not session >>> query = query.provides('one').returns(['object']) >>> session.query().one() ['object'] """ exp = self._get_current_call() endpoint = kwargs.get('name', exp.call_name) name = self._endpoint_name(endpoint) kwargs['name'] = '%s()' % name fake = self.__class__(*args, **kwargs) exp.return_val = fake return fake
[ "def", "returns_fake", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "exp", "=", "self", ".", "_get_current_call", "(", ")", "endpoint", "=", "kwargs", ".", "get", "(", "'name'", ",", "exp", ".", "call_name", ")", "name", "=", "self", ".", "_endpoint_name", "(", "endpoint", ")", "kwargs", "[", "'name'", "]", "=", "'%s()'", "%", "name", "fake", "=", "self", ".", "__class__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "exp", ".", "return_val", "=", "fake", "return", "fake" ]
Set the last call to return a new :class:`fudge.Fake`. Any given arguments are passed to the :class:`fudge.Fake` constructor Take note that this is different from the cascading nature of other methods. This will return an instance of the *new* Fake, not self, so you should be careful to store its return value in a new variable. I.E.:: >>> session = Fake('session') >>> query = session.provides('query').returns_fake(name="Query") >>> assert query is not session >>> query = query.provides('one').returns(['object']) >>> session.query().one() ['object']
[ "Set", "the", "last", "call", "to", "return", "a", "new", ":", "class", ":", "fudge", ".", "Fake", "." ]
python
train
NicolasLM/spinach
spinach/brokers/redis.py
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L203-L213
def register_periodic_tasks(self, tasks: Iterable[Task]): """Register tasks that need to be scheduled periodically.""" tasks = [task.serialize() for task in tasks] self._number_periodic_tasks = len(tasks) self._run_script( self._register_periodic_tasks, math.ceil(datetime.now(timezone.utc).timestamp()), self._to_namespaced(PERIODIC_TASKS_HASH_KEY), self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY), *tasks )
[ "def", "register_periodic_tasks", "(", "self", ",", "tasks", ":", "Iterable", "[", "Task", "]", ")", ":", "tasks", "=", "[", "task", ".", "serialize", "(", ")", "for", "task", "in", "tasks", "]", "self", ".", "_number_periodic_tasks", "=", "len", "(", "tasks", ")", "self", ".", "_run_script", "(", "self", ".", "_register_periodic_tasks", ",", "math", ".", "ceil", "(", "datetime", ".", "now", "(", "timezone", ".", "utc", ")", ".", "timestamp", "(", ")", ")", ",", "self", ".", "_to_namespaced", "(", "PERIODIC_TASKS_HASH_KEY", ")", ",", "self", ".", "_to_namespaced", "(", "PERIODIC_TASKS_QUEUE_KEY", ")", ",", "*", "tasks", ")" ]
Register tasks that need to be scheduled periodically.
[ "Register", "tasks", "that", "need", "to", "be", "scheduled", "periodically", "." ]
python
train
jupyterhub/jupyter-server-proxy
contrib/rstudio/jupyter_rsession_proxy/__init__.py
https://github.com/jupyterhub/jupyter-server-proxy/blob/f12a090babe3c6e37a777b7e54c7b415de5c7e18/contrib/rstudio/jupyter_rsession_proxy/__init__.py#L8-L40
def setup_shiny(): '''Manage a Shiny instance.''' name = 'shiny' def _get_shiny_cmd(port): conf = dedent(""" run_as {user}; server {{ listen {port}; location / {{ site_dir {site_dir}; log_dir {site_dir}/logs; directory_index on; }} }} """).format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return ['shiny-server', f.name] return { 'command': _get_shiny_cmd, 'launcher_entry': { 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } }
[ "def", "setup_shiny", "(", ")", ":", "name", "=", "'shiny'", "def", "_get_shiny_cmd", "(", "port", ")", ":", "conf", "=", "dedent", "(", "\"\"\"\n run_as {user};\n server {{\n listen {port};\n location / {{\n site_dir {site_dir};\n log_dir {site_dir}/logs;\n directory_index on;\n }}\n }}\n \"\"\"", ")", ".", "format", "(", "user", "=", "getpass", ".", "getuser", "(", ")", ",", "port", "=", "str", "(", "port", ")", ",", "site_dir", "=", "os", ".", "getcwd", "(", ")", ")", "f", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "'w'", ",", "delete", "=", "False", ")", "f", ".", "write", "(", "conf", ")", "f", ".", "close", "(", ")", "return", "[", "'shiny-server'", ",", "f", ".", "name", "]", "return", "{", "'command'", ":", "_get_shiny_cmd", ",", "'launcher_entry'", ":", "{", "'title'", ":", "'Shiny'", ",", "'icon_path'", ":", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ",", "'icons'", ",", "'shiny.svg'", ")", "}", "}" ]
Manage a Shiny instance.
[ "Manage", "a", "Shiny", "instance", "." ]
python
train
cggh/scikit-allel
allel/model/ndarray.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L714-L724
def count_hom_ref(self, axis=None): """Count homozygous reference genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_hom_ref() return np.sum(b, axis=axis)
[ "def", "count_hom_ref", "(", "self", ",", "axis", "=", "None", ")", ":", "b", "=", "self", ".", "is_hom_ref", "(", ")", "return", "np", ".", "sum", "(", "b", ",", "axis", "=", "axis", ")" ]
Count homozygous reference genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
[ "Count", "homozygous", "reference", "genotypes", "." ]
python
train
python-cmd2/cmd2
examples/pirate.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/examples/pirate.py#L93-L98
def do_yo(self, args): """Compose a yo-ho-ho type chant with flexible options.""" chant = ['yo'] + ['ho'] * args.ho separator = ', ' if args.commas else ' ' chant = separator.join(chant) self.poutput('{0} and a bottle of {1}'.format(chant, args.beverage))
[ "def", "do_yo", "(", "self", ",", "args", ")", ":", "chant", "=", "[", "'yo'", "]", "+", "[", "'ho'", "]", "*", "args", ".", "ho", "separator", "=", "', '", "if", "args", ".", "commas", "else", "' '", "chant", "=", "separator", ".", "join", "(", "chant", ")", "self", ".", "poutput", "(", "'{0} and a bottle of {1}'", ".", "format", "(", "chant", ",", "args", ".", "beverage", ")", ")" ]
Compose a yo-ho-ho type chant with flexible options.
[ "Compose", "a", "yo", "-", "ho", "-", "ho", "type", "chant", "with", "flexible", "options", "." ]
python
train
gtaylor/django-dynamodb-sessions
dynamodb_sessions/backends/dynamodb.py
https://github.com/gtaylor/django-dynamodb-sessions/blob/434031aa483b26b0b7b5acbdf683bbe1575956f1/dynamodb_sessions/backends/dynamodb.py#L181-L194
def delete(self, session_key=None): """ Deletes the current session, or the one specified in ``session_key``. :keyword str session_key: Optionally, override the session key to delete. """ if session_key is None: if self.session_key is None: return session_key = self.session_key self.table.delete_item(Key={'session_key': session_key})
[ "def", "delete", "(", "self", ",", "session_key", "=", "None", ")", ":", "if", "session_key", "is", "None", ":", "if", "self", ".", "session_key", "is", "None", ":", "return", "session_key", "=", "self", ".", "session_key", "self", ".", "table", ".", "delete_item", "(", "Key", "=", "{", "'session_key'", ":", "session_key", "}", ")" ]
Deletes the current session, or the one specified in ``session_key``. :keyword str session_key: Optionally, override the session key to delete.
[ "Deletes", "the", "current", "session", "or", "the", "one", "specified", "in", "session_key", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/validate.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L93-L102
def _pick_lead_item(items): """Choose lead item for a set of samples. Picks tumors for tumor/normal pairs and first sample for batch groups. """ paired = vcfutils.get_paired(items) if paired: return paired.tumor_data else: return list(items)[0]
[ "def", "_pick_lead_item", "(", "items", ")", ":", "paired", "=", "vcfutils", ".", "get_paired", "(", "items", ")", "if", "paired", ":", "return", "paired", ".", "tumor_data", "else", ":", "return", "list", "(", "items", ")", "[", "0", "]" ]
Choose lead item for a set of samples. Picks tumors for tumor/normal pairs and first sample for batch groups.
[ "Choose", "lead", "item", "for", "a", "set", "of", "samples", "." ]
python
train
log2timeline/plaso
plaso/analyzers/hashing_analyzer.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analyzers/hashing_analyzer.py#L64-L77
def SetHasherNames(self, hasher_names_string): """Sets the hashers that should be enabled. Args: hasher_names_string (str): comma separated names of hashers to enable. """ hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString( hasher_names_string) debug_hasher_names = ', '.join(hasher_names) logger.debug('Got hasher names: {0:s}'.format(debug_hasher_names)) self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names) self._hasher_names_string = hasher_names_string
[ "def", "SetHasherNames", "(", "self", ",", "hasher_names_string", ")", ":", "hasher_names", "=", "hashers_manager", ".", "HashersManager", ".", "GetHasherNamesFromString", "(", "hasher_names_string", ")", "debug_hasher_names", "=", "', '", ".", "join", "(", "hasher_names", ")", "logger", ".", "debug", "(", "'Got hasher names: {0:s}'", ".", "format", "(", "debug_hasher_names", ")", ")", "self", ".", "_hashers", "=", "hashers_manager", ".", "HashersManager", ".", "GetHashers", "(", "hasher_names", ")", "self", ".", "_hasher_names_string", "=", "hasher_names_string" ]
Sets the hashers that should be enabled. Args: hasher_names_string (str): comma separated names of hashers to enable.
[ "Sets", "the", "hashers", "that", "should", "be", "enabled", "." ]
python
train
objectrocket/python-client
objectrocket/acls.py
https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/objectrocket/acls.py#L93-L108
def delete(self, instance, acl): """Delete an ACL by ID belonging to the instance specified by name. :param str instance: The name of the instance on which the ACL exists. :param str acll: The ID of the ACL to delete. """ base_url = self._url.format(instance=instance) url = '{base}{aclid}/'.format(base=base_url, aclid=acl) response = requests.delete(url, **self._default_request_kwargs) if response.status_code == 200: logger.info('Successfully deleted ACL {}'.format(acl)) else: logger.info('Failed to delete ACL {}'.format(acl)) logger.info('Response: [{0}] {1}'.format(response.status_code, response.content)) raise errors.ObjectRocketException('Failed to delete ACL.')
[ "def", "delete", "(", "self", ",", "instance", ",", "acl", ")", ":", "base_url", "=", "self", ".", "_url", ".", "format", "(", "instance", "=", "instance", ")", "url", "=", "'{base}{aclid}/'", ".", "format", "(", "base", "=", "base_url", ",", "aclid", "=", "acl", ")", "response", "=", "requests", ".", "delete", "(", "url", ",", "*", "*", "self", ".", "_default_request_kwargs", ")", "if", "response", ".", "status_code", "==", "200", ":", "logger", ".", "info", "(", "'Successfully deleted ACL {}'", ".", "format", "(", "acl", ")", ")", "else", ":", "logger", ".", "info", "(", "'Failed to delete ACL {}'", ".", "format", "(", "acl", ")", ")", "logger", ".", "info", "(", "'Response: [{0}] {1}'", ".", "format", "(", "response", ".", "status_code", ",", "response", ".", "content", ")", ")", "raise", "errors", ".", "ObjectRocketException", "(", "'Failed to delete ACL.'", ")" ]
Delete an ACL by ID belonging to the instance specified by name. :param str instance: The name of the instance on which the ACL exists. :param str acll: The ID of the ACL to delete.
[ "Delete", "an", "ACL", "by", "ID", "belonging", "to", "the", "instance", "specified", "by", "name", "." ]
python
train
casacore/python-casacore
casacore/tables/tablecolumn.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tablecolumn.py#L149-L152
def putcell(self, rownr, value): """Put a value into one or more table cells. (see :func:`table.putcell`)""" return self._table.putcell(self._column, rownr, value)
[ "def", "putcell", "(", "self", ",", "rownr", ",", "value", ")", ":", "return", "self", ".", "_table", ".", "putcell", "(", "self", ".", "_column", ",", "rownr", ",", "value", ")" ]
Put a value into one or more table cells. (see :func:`table.putcell`)
[ "Put", "a", "value", "into", "one", "or", "more", "table", "cells", ".", "(", "see", ":", "func", ":", "table", ".", "putcell", ")" ]
python
train
bcbio/bcbio-nextgen
bcbio/install.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L699-L730
def _datatarget_defaults(args, default_args): """Set data installation targets, handling defaults. Sets variation, rnaseq, smallrna as default targets if we're not isolated to a single method. Provides back compatibility for toolplus specifications. """ default_data = default_args.get("datatarget", []) # back-compatible toolplus specifications for x in default_args.get("toolplus", []): val = None if x == "data": val = "gemini" elif x in ["cadd", "dbnsfp", "dbscsnv", "kraken", "gnomad"]: val = x if val and val not in default_data: default_data.append(val) new_val = getattr(args, "datatarget") for x in default_data: if x not in new_val: new_val.append(x) has_std_target = False std_targets = ["variation", "rnaseq", "smallrna"] for target in std_targets: if target in new_val: has_std_target = True break if not has_std_target: new_val = new_val + std_targets setattr(args, "datatarget", new_val) return args
[ "def", "_datatarget_defaults", "(", "args", ",", "default_args", ")", ":", "default_data", "=", "default_args", ".", "get", "(", "\"datatarget\"", ",", "[", "]", ")", "# back-compatible toolplus specifications", "for", "x", "in", "default_args", ".", "get", "(", "\"toolplus\"", ",", "[", "]", ")", ":", "val", "=", "None", "if", "x", "==", "\"data\"", ":", "val", "=", "\"gemini\"", "elif", "x", "in", "[", "\"cadd\"", ",", "\"dbnsfp\"", ",", "\"dbscsnv\"", ",", "\"kraken\"", ",", "\"gnomad\"", "]", ":", "val", "=", "x", "if", "val", "and", "val", "not", "in", "default_data", ":", "default_data", ".", "append", "(", "val", ")", "new_val", "=", "getattr", "(", "args", ",", "\"datatarget\"", ")", "for", "x", "in", "default_data", ":", "if", "x", "not", "in", "new_val", ":", "new_val", ".", "append", "(", "x", ")", "has_std_target", "=", "False", "std_targets", "=", "[", "\"variation\"", ",", "\"rnaseq\"", ",", "\"smallrna\"", "]", "for", "target", "in", "std_targets", ":", "if", "target", "in", "new_val", ":", "has_std_target", "=", "True", "break", "if", "not", "has_std_target", ":", "new_val", "=", "new_val", "+", "std_targets", "setattr", "(", "args", ",", "\"datatarget\"", ",", "new_val", ")", "return", "args" ]
Set data installation targets, handling defaults. Sets variation, rnaseq, smallrna as default targets if we're not isolated to a single method. Provides back compatibility for toolplus specifications.
[ "Set", "data", "installation", "targets", "handling", "defaults", "." ]
python
train
apache/airflow
airflow/contrib/hooks/sftp_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sftp_hook.py#L92-L115
def get_conn(self): """ Returns an SFTP connection object """ if self.conn is None: cnopts = pysftp.CnOpts() if self.no_host_key_check: cnopts.hostkeys = None cnopts.compression = self.compress conn_params = { 'host': self.remote_host, 'port': self.port, 'username': self.username, 'cnopts': cnopts } if self.password and self.password.strip(): conn_params['password'] = self.password if self.key_file: conn_params['private_key'] = self.key_file if self.private_key_pass: conn_params['private_key_pass'] = self.private_key_pass self.conn = pysftp.Connection(**conn_params) return self.conn
[ "def", "get_conn", "(", "self", ")", ":", "if", "self", ".", "conn", "is", "None", ":", "cnopts", "=", "pysftp", ".", "CnOpts", "(", ")", "if", "self", ".", "no_host_key_check", ":", "cnopts", ".", "hostkeys", "=", "None", "cnopts", ".", "compression", "=", "self", ".", "compress", "conn_params", "=", "{", "'host'", ":", "self", ".", "remote_host", ",", "'port'", ":", "self", ".", "port", ",", "'username'", ":", "self", ".", "username", ",", "'cnopts'", ":", "cnopts", "}", "if", "self", ".", "password", "and", "self", ".", "password", ".", "strip", "(", ")", ":", "conn_params", "[", "'password'", "]", "=", "self", ".", "password", "if", "self", ".", "key_file", ":", "conn_params", "[", "'private_key'", "]", "=", "self", ".", "key_file", "if", "self", ".", "private_key_pass", ":", "conn_params", "[", "'private_key_pass'", "]", "=", "self", ".", "private_key_pass", "self", ".", "conn", "=", "pysftp", ".", "Connection", "(", "*", "*", "conn_params", ")", "return", "self", ".", "conn" ]
Returns an SFTP connection object
[ "Returns", "an", "SFTP", "connection", "object" ]
python
test
trevisanj/f311
f311/hapi.py
https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/hapi.py#L2265-L2295
def describeTable(TableName): """ INPUT PARAMETERS: TableName: name of the table to describe OUTPUT PARAMETERS: none --- DESCRIPTION: Print information about table, including parameter names, formats and wavenumber range. --- EXAMPLE OF USAGE: describeTable('sampletab') --- """ print('-----------------------------------------') print(TableName+' summary:') try: print('-----------------------------------------') print('Comment: \n'+LOCAL_TABLE_CACHE[TableName]['header']['comment']) except: pass print('Number of rows: '+str(LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'])) print('Table type: '+str(LOCAL_TABLE_CACHE[TableName]['header']['table_type'])) print('-----------------------------------------') print(' PAR_NAME PAR_FORMAT') print('') for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']: par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name] print('%20s %20s' % (par_name,par_format)) print('-----------------------------------------')
[ "def", "describeTable", "(", "TableName", ")", ":", "print", "(", "'-----------------------------------------'", ")", "print", "(", "TableName", "+", "' summary:'", ")", "try", ":", "print", "(", "'-----------------------------------------'", ")", "print", "(", "'Comment: \\n'", "+", "LOCAL_TABLE_CACHE", "[", "TableName", "]", "[", "'header'", "]", "[", "'comment'", "]", ")", "except", ":", "pass", "print", "(", "'Number of rows: '", "+", "str", "(", "LOCAL_TABLE_CACHE", "[", "TableName", "]", "[", "'header'", "]", "[", "'number_of_rows'", "]", ")", ")", "print", "(", "'Table type: '", "+", "str", "(", "LOCAL_TABLE_CACHE", "[", "TableName", "]", "[", "'header'", "]", "[", "'table_type'", "]", ")", ")", "print", "(", "'-----------------------------------------'", ")", "print", "(", "' PAR_NAME PAR_FORMAT'", ")", "print", "(", "''", ")", "for", "par_name", "in", "LOCAL_TABLE_CACHE", "[", "TableName", "]", "[", "'header'", "]", "[", "'order'", "]", ":", "par_format", "=", "LOCAL_TABLE_CACHE", "[", "TableName", "]", "[", "'header'", "]", "[", "'format'", "]", "[", "par_name", "]", "print", "(", "'%20s %20s'", "%", "(", "par_name", ",", "par_format", ")", ")", "print", "(", "'-----------------------------------------'", ")" ]
INPUT PARAMETERS: TableName: name of the table to describe OUTPUT PARAMETERS: none --- DESCRIPTION: Print information about table, including parameter names, formats and wavenumber range. --- EXAMPLE OF USAGE: describeTable('sampletab') ---
[ "INPUT", "PARAMETERS", ":", "TableName", ":", "name", "of", "the", "table", "to", "describe", "OUTPUT", "PARAMETERS", ":", "none", "---", "DESCRIPTION", ":", "Print", "information", "about", "table", "including", "parameter", "names", "formats", "and", "wavenumber", "range", ".", "---", "EXAMPLE", "OF", "USAGE", ":", "describeTable", "(", "sampletab", ")", "---" ]
python
train
jjgomera/iapws
iapws/iapws08.py
https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/iapws08.py#L275-L323
def _waterSupp(cls, T, P): """Get properties of pure water using the supplementary release SR7-09, Table4 pag 6""" tau = (T-273.15)/40 pi = (P-0.101325)/100 J = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7] K = [0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 0, 1] G = [0.101342743139674e3, 0.100015695367145e6, -0.254457654203630e4, 0.284517778446287e3, -0.333146754253611e2, 0.420263108803084e1, -0.546428511471039, 0.590578347909402e1, -0.270983805184062e3, 0.776153611613101e3, -0.196512550881220e3, 0.289796526294175e2, -0.213290083518327e1, -0.123577859330390e5, 0.145503645404680e4, -0.756558385769359e3, 0.273479662323528e3, -0.555604063817218e2, 0.434420671917197e1, 0.736741204151612e3, -0.672507783145070e3, 0.499360390819152e3, -0.239545330654412e3, 0.488012518593872e2, -0.166307106208905e1, -0.148185936433658e3, 0.397968445406972e3, -0.301815380621876e3, 0.152196371733841e3, -0.263748377232802e2, 0.580259125842571e2, -0.194618310617595e3, 0.120520654902025e3, -0.552723052340152e2, 0.648190668077221e1, -0.189843846514172e2, 0.635113936641785e2, -0.222897317140459e2, 0.817060541818112e1, 0.305081646487967e1, -0.963108119393062e1] g, gt, gp, gtt, gtp, gpp = 0, 0, 0, 0, 0, 0 for j, k, gi in zip(J, K, G): g += gi*tau**j*pi**k if j >= 1: gt += gi*j*tau**(j-1)*pi**k if k >= 1: gp += k*gi*tau**j*pi**(k-1) if j >= 2: gtt += j*(j-1)*gi*tau**(j-2)*pi**k if j >= 1 and k >= 1: gtp += j*k*gi*tau**(j-1)*pi**(k-1) if k >= 2: gpp += k*(k-1)*gi*tau**j*pi**(k-2) prop = {} prop["g"] = g*1e-3 prop["gt"] = gt/40*1e-3 prop["gp"] = gp/100*1e-6 prop["gtt"] = gtt/40**2*1e-3 prop["gtp"] = gtp/40/100*1e-6 prop["gpp"] = gpp/100**2*1e-6 prop["gs"] = 0 prop["gsp"] = 0 return prop
[ "def", "_waterSupp", "(", "cls", ",", "T", ",", "P", ")", ":", "tau", "=", "(", "T", "-", "273.15", ")", "/", "40", "pi", "=", "(", "P", "-", "0.101325", ")", "/", "100", "J", "=", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "2", ",", "2", ",", "2", ",", "2", ",", "2", ",", "2", ",", "3", ",", "3", ",", "3", ",", "3", ",", "3", ",", "3", ",", "4", ",", "4", ",", "4", ",", "4", ",", "4", ",", "5", ",", "5", ",", "5", ",", "5", ",", "5", ",", "6", ",", "6", ",", "6", ",", "6", ",", "7", ",", "7", "]", "K", "=", "[", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ",", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "0", ",", "1", ",", "2", ",", "3", ",", "0", ",", "1", "]", "G", "=", "[", "0.101342743139674e3", ",", "0.100015695367145e6", ",", "-", "0.254457654203630e4", ",", "0.284517778446287e3", ",", "-", "0.333146754253611e2", ",", "0.420263108803084e1", ",", "-", "0.546428511471039", ",", "0.590578347909402e1", ",", "-", "0.270983805184062e3", ",", "0.776153611613101e3", ",", "-", "0.196512550881220e3", ",", "0.289796526294175e2", ",", "-", "0.213290083518327e1", ",", "-", "0.123577859330390e5", ",", "0.145503645404680e4", ",", "-", "0.756558385769359e3", ",", "0.273479662323528e3", ",", "-", "0.555604063817218e2", ",", "0.434420671917197e1", ",", "0.736741204151612e3", ",", "-", "0.672507783145070e3", ",", "0.499360390819152e3", ",", "-", "0.239545330654412e3", ",", "0.488012518593872e2", ",", "-", "0.166307106208905e1", ",", "-", "0.148185936433658e3", ",", "0.397968445406972e3", ",", "-", "0.301815380621876e3", ",", "0.152196371733841e3", ",", "-", "0.263748377232802e2", ",", "0.580259125842571e2", ",", "-", "0.194618310617595e3", ",", "0.120520654902025e3", ",", "-", "0.552723052340152e2", ",", "0.648190668077221e1", ",", "-", "0.189843846514172e2", ",", "0.635113936641785e2", ",", "-", "0.222897317140459e2", ",", "0.817060541818112e1", ",", "0.305081646487967e1", ",", "-", "0.963108119393062e1", "]", "g", ",", "gt", ",", "gp", ",", "gtt", ",", "gtp", ",", "gpp", "=", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", "for", "j", ",", "k", ",", "gi", "in", "zip", "(", "J", ",", "K", ",", "G", ")", ":", "g", "+=", "gi", "*", "tau", "**", "j", "*", "pi", "**", "k", "if", "j", ">=", "1", ":", "gt", "+=", "gi", "*", "j", "*", "tau", "**", "(", "j", "-", "1", ")", "*", "pi", "**", "k", "if", "k", ">=", "1", ":", "gp", "+=", "k", "*", "gi", "*", "tau", "**", "j", "*", "pi", "**", "(", "k", "-", "1", ")", "if", "j", ">=", "2", ":", "gtt", "+=", "j", "*", "(", "j", "-", "1", ")", "*", "gi", "*", "tau", "**", "(", "j", "-", "2", ")", "*", "pi", "**", "k", "if", "j", ">=", "1", "and", "k", ">=", "1", ":", "gtp", "+=", "j", "*", "k", "*", "gi", "*", "tau", "**", "(", "j", "-", "1", ")", "*", "pi", "**", "(", "k", "-", "1", ")", "if", "k", ">=", "2", ":", "gpp", "+=", "k", "*", "(", "k", "-", "1", ")", "*", "gi", "*", "tau", "**", "j", "*", "pi", "**", "(", "k", "-", "2", ")", "prop", "=", "{", "}", "prop", "[", "\"g\"", "]", "=", "g", "*", "1e-3", "prop", "[", "\"gt\"", "]", "=", "gt", "/", "40", "*", "1e-3", "prop", "[", "\"gp\"", "]", "=", "gp", "/", "100", "*", "1e-6", "prop", "[", "\"gtt\"", "]", "=", "gtt", "/", "40", "**", "2", "*", "1e-3", "prop", "[", "\"gtp\"", "]", "=", "gtp", "/", "40", "/", "100", "*", "1e-6", "prop", "[", "\"gpp\"", "]", "=", "gpp", "/", "100", "**", "2", "*", "1e-6", "prop", "[", "\"gs\"", "]", "=", "0", "prop", "[", "\"gsp\"", "]", "=", "0", "return", "prop" ]
Get properties of pure water using the supplementary release SR7-09, Table4 pag 6
[ "Get", "properties", "of", "pure", "water", "using", "the", "supplementary", "release", "SR7", "-", "09", "Table4", "pag", "6" ]
python
train
Min-ops/cruddy
cruddy/scripts/cli.py
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/scripts/cli.py#L142-L147
def delete(handler, item_id, id_name): """Delete an item""" data = {'operation': 'delete', 'id': item_id, 'id_name': id_name} handler.invoke(data)
[ "def", "delete", "(", "handler", ",", "item_id", ",", "id_name", ")", ":", "data", "=", "{", "'operation'", ":", "'delete'", ",", "'id'", ":", "item_id", ",", "'id_name'", ":", "id_name", "}", "handler", ".", "invoke", "(", "data", ")" ]
Delete an item
[ "Delete", "an", "item" ]
python
train
saltstack/salt
salt/modules/cpan.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cpan.py#L152-L214
def show(module): ''' Show information about a specific Perl module CLI Example: .. code-block:: bash salt '*' cpan.show Template::Alloy ''' ret = {} ret['name'] = module # This section parses out details from CPAN, if possible cmd = 'cpan -D {0}'.format(module) out = __salt__['cmd.run'](cmd).splitlines() mode = 'skip' info = [] for line in out: if line.startswith('-------------'): mode = 'parse' continue if mode == 'skip': continue info.append(line) if len(info) == 6: # If the module is not installed, we'll be short a line info.insert(2, '') if len(info) < 6: # This must not be a real package ret['error'] = 'This package does not seem to exist' return ret ret['description'] = info[0].strip() ret['cpan file'] = info[1].strip() if info[2].strip(): ret['installed file'] = info[2].strip() else: ret['installed file'] = None comps = info[3].split(':') if len(comps) > 1: ret['installed version'] = comps[1].strip() if 'installed version' not in ret or not ret['installed version']: ret['installed version'] = None comps = info[4].split(':') comps = comps[1].split() ret['cpan version'] = comps[0].strip() ret['author name'] = info[5].strip() ret['author email'] = info[6].strip() # Check and see if there are cpan build directories config = show_config() build_dir = config.get('build_dir', None) if build_dir is not None: ret['cpan build dirs'] = [] builds = os.listdir(build_dir) pfile = module.replace('::', '-') for file_ in builds: if file_.startswith(pfile): ret['cpan build dirs'].append(os.path.join(build_dir, file_)) return ret
[ "def", "show", "(", "module", ")", ":", "ret", "=", "{", "}", "ret", "[", "'name'", "]", "=", "module", "# This section parses out details from CPAN, if possible", "cmd", "=", "'cpan -D {0}'", ".", "format", "(", "module", ")", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", ".", "splitlines", "(", ")", "mode", "=", "'skip'", "info", "=", "[", "]", "for", "line", "in", "out", ":", "if", "line", ".", "startswith", "(", "'-------------'", ")", ":", "mode", "=", "'parse'", "continue", "if", "mode", "==", "'skip'", ":", "continue", "info", ".", "append", "(", "line", ")", "if", "len", "(", "info", ")", "==", "6", ":", "# If the module is not installed, we'll be short a line", "info", ".", "insert", "(", "2", ",", "''", ")", "if", "len", "(", "info", ")", "<", "6", ":", "# This must not be a real package", "ret", "[", "'error'", "]", "=", "'This package does not seem to exist'", "return", "ret", "ret", "[", "'description'", "]", "=", "info", "[", "0", "]", ".", "strip", "(", ")", "ret", "[", "'cpan file'", "]", "=", "info", "[", "1", "]", ".", "strip", "(", ")", "if", "info", "[", "2", "]", ".", "strip", "(", ")", ":", "ret", "[", "'installed file'", "]", "=", "info", "[", "2", "]", ".", "strip", "(", ")", "else", ":", "ret", "[", "'installed file'", "]", "=", "None", "comps", "=", "info", "[", "3", "]", ".", "split", "(", "':'", ")", "if", "len", "(", "comps", ")", ">", "1", ":", "ret", "[", "'installed version'", "]", "=", "comps", "[", "1", "]", ".", "strip", "(", ")", "if", "'installed version'", "not", "in", "ret", "or", "not", "ret", "[", "'installed version'", "]", ":", "ret", "[", "'installed version'", "]", "=", "None", "comps", "=", "info", "[", "4", "]", ".", "split", "(", "':'", ")", "comps", "=", "comps", "[", "1", "]", ".", "split", "(", ")", "ret", "[", "'cpan version'", "]", "=", "comps", "[", "0", "]", ".", "strip", "(", ")", "ret", "[", "'author name'", "]", "=", "info", "[", "5", "]", ".", "strip", "(", ")", "ret", "[", "'author email'", "]", "=", "info", "[", "6", "]", ".", "strip", "(", ")", "# Check and see if there are cpan build directories", "config", "=", "show_config", "(", ")", "build_dir", "=", "config", ".", "get", "(", "'build_dir'", ",", "None", ")", "if", "build_dir", "is", "not", "None", ":", "ret", "[", "'cpan build dirs'", "]", "=", "[", "]", "builds", "=", "os", ".", "listdir", "(", "build_dir", ")", "pfile", "=", "module", ".", "replace", "(", "'::'", ",", "'-'", ")", "for", "file_", "in", "builds", ":", "if", "file_", ".", "startswith", "(", "pfile", ")", ":", "ret", "[", "'cpan build dirs'", "]", ".", "append", "(", "os", ".", "path", ".", "join", "(", "build_dir", ",", "file_", ")", ")", "return", "ret" ]
Show information about a specific Perl module CLI Example: .. code-block:: bash salt '*' cpan.show Template::Alloy
[ "Show", "information", "about", "a", "specific", "Perl", "module" ]
python
train
hyperledger/indy-sdk
wrappers/python/indy/crypto.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/crypto.py#L199-L253
async def auth_crypt(wallet_handle: int, sender_vk: str, recipient_vk: str, msg: bytes) -> bytes: """ **** THIS FUNCTION WILL BE DEPRECATED USE pack_message INSTEAD **** Encrypt a message by authenticated-encryption scheme. Sender can encrypt a confidential message specifically for Recipient, using Sender's public key. Using Recipient's public key, Sender can compute a shared secret key. Using Sender's public key and his secret key, Recipient can compute the exact same shared secret key. That shared secret key can be used to verify that the encrypted message was not tampered with, before eventually decrypting it. Note to use DID keys with this function you can call indy_key_for_did to get key id (verkey) for specific DID. :param wallet_handle: wallet handler (created by open_wallet). :param sender_vk: id (verkey) of my key. The key must be created by calling indy_create_key or indy_create_and_store_my_did :param recipient_vk: id (verkey) of their key :param msg: a message to be signed :return: encrypted message as an array of bytes """ logger = logging.getLogger(__name__) logger.debug("auth_crypt: >>> wallet_handle: %r,sender_vk: %r, recipient_vk: %r, msg: %r", wallet_handle, sender_vk, recipient_vk, msg) def transform_cb(arr_ptr: POINTER(c_uint8), arr_len: c_uint32): return bytes(arr_ptr[:arr_len]), if not hasattr(auth_crypt, "cb"): logger.debug("auth_crypt: Creating callback") auth_crypt.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, POINTER(c_uint8), c_uint32), transform_cb) c_wallet_handle = c_int32(wallet_handle) c_sender_vk = c_char_p(sender_vk.encode('utf-8')) c_recipient_vk = c_char_p(recipient_vk.encode('utf-8')) c_msg_len = c_uint32(len(msg)) res = await do_call('indy_crypto_auth_crypt', c_wallet_handle, c_sender_vk, c_recipient_vk, msg, c_msg_len, auth_crypt.cb) logger.debug("auth_crypt: <<< res: %r", res) return res
[ "async", "def", "auth_crypt", "(", "wallet_handle", ":", "int", ",", "sender_vk", ":", "str", ",", "recipient_vk", ":", "str", ",", "msg", ":", "bytes", ")", "->", "bytes", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"auth_crypt: >>> wallet_handle: %r,sender_vk: %r, recipient_vk: %r, msg: %r\"", ",", "wallet_handle", ",", "sender_vk", ",", "recipient_vk", ",", "msg", ")", "def", "transform_cb", "(", "arr_ptr", ":", "POINTER", "(", "c_uint8", ")", ",", "arr_len", ":", "c_uint32", ")", ":", "return", "bytes", "(", "arr_ptr", "[", ":", "arr_len", "]", ")", ",", "if", "not", "hasattr", "(", "auth_crypt", ",", "\"cb\"", ")", ":", "logger", ".", "debug", "(", "\"auth_crypt: Creating callback\"", ")", "auth_crypt", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_int32", ",", "c_int32", ",", "POINTER", "(", "c_uint8", ")", ",", "c_uint32", ")", ",", "transform_cb", ")", "c_wallet_handle", "=", "c_int32", "(", "wallet_handle", ")", "c_sender_vk", "=", "c_char_p", "(", "sender_vk", ".", "encode", "(", "'utf-8'", ")", ")", "c_recipient_vk", "=", "c_char_p", "(", "recipient_vk", ".", "encode", "(", "'utf-8'", ")", ")", "c_msg_len", "=", "c_uint32", "(", "len", "(", "msg", ")", ")", "res", "=", "await", "do_call", "(", "'indy_crypto_auth_crypt'", ",", "c_wallet_handle", ",", "c_sender_vk", ",", "c_recipient_vk", ",", "msg", ",", "c_msg_len", ",", "auth_crypt", ".", "cb", ")", "logger", ".", "debug", "(", "\"auth_crypt: <<< res: %r\"", ",", "res", ")", "return", "res" ]
**** THIS FUNCTION WILL BE DEPRECATED USE pack_message INSTEAD **** Encrypt a message by authenticated-encryption scheme. Sender can encrypt a confidential message specifically for Recipient, using Sender's public key. Using Recipient's public key, Sender can compute a shared secret key. Using Sender's public key and his secret key, Recipient can compute the exact same shared secret key. That shared secret key can be used to verify that the encrypted message was not tampered with, before eventually decrypting it. Note to use DID keys with this function you can call indy_key_for_did to get key id (verkey) for specific DID. :param wallet_handle: wallet handler (created by open_wallet). :param sender_vk: id (verkey) of my key. The key must be created by calling indy_create_key or indy_create_and_store_my_did :param recipient_vk: id (verkey) of their key :param msg: a message to be signed :return: encrypted message as an array of bytes
[ "****", "THIS", "FUNCTION", "WILL", "BE", "DEPRECATED", "USE", "pack_message", "INSTEAD", "****" ]
python
train
aio-libs/aioodbc
aioodbc/connection.py
https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/connection.py#L15-L40
def connect(*, dsn, autocommit=False, ansi=False, timeout=0, loop=None, executor=None, echo=False, after_created=None, **kwargs): """Accepts an ODBC connection string and returns a new Connection object. The connection string can be passed as the string `str`, as a list of keywords,or a combination of the two. Any keywords except autocommit, ansi, and timeout are simply added to the connection string. :param autocommit bool: False or zero, the default, if True or non-zero, the connection is put into ODBC autocommit mode and statements are committed automatically. :param ansi bool: By default, pyodbc first attempts to connect using the Unicode version of SQLDriverConnectW. If the driver returns IM001 indicating it does not support the Unicode version, the ANSI version is tried. :param timeout int: An integer login timeout in seconds, used to set the SQL_ATTR_LOGIN_TIMEOUT attribute of the connection. The default is 0 which means the database's default timeout, if any, is use :param after_created callable: support customize configuration after connection is connected. Must be an async unary function, or leave it as None. """ return _ContextManager(_connect(dsn=dsn, autocommit=autocommit, ansi=ansi, timeout=timeout, loop=loop, executor=executor, echo=echo, after_created=after_created, **kwargs))
[ "def", "connect", "(", "*", ",", "dsn", ",", "autocommit", "=", "False", ",", "ansi", "=", "False", ",", "timeout", "=", "0", ",", "loop", "=", "None", ",", "executor", "=", "None", ",", "echo", "=", "False", ",", "after_created", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_ContextManager", "(", "_connect", "(", "dsn", "=", "dsn", ",", "autocommit", "=", "autocommit", ",", "ansi", "=", "ansi", ",", "timeout", "=", "timeout", ",", "loop", "=", "loop", ",", "executor", "=", "executor", ",", "echo", "=", "echo", ",", "after_created", "=", "after_created", ",", "*", "*", "kwargs", ")", ")" ]
Accepts an ODBC connection string and returns a new Connection object. The connection string can be passed as the string `str`, as a list of keywords,or a combination of the two. Any keywords except autocommit, ansi, and timeout are simply added to the connection string. :param autocommit bool: False or zero, the default, if True or non-zero, the connection is put into ODBC autocommit mode and statements are committed automatically. :param ansi bool: By default, pyodbc first attempts to connect using the Unicode version of SQLDriverConnectW. If the driver returns IM001 indicating it does not support the Unicode version, the ANSI version is tried. :param timeout int: An integer login timeout in seconds, used to set the SQL_ATTR_LOGIN_TIMEOUT attribute of the connection. The default is 0 which means the database's default timeout, if any, is use :param after_created callable: support customize configuration after connection is connected. Must be an async unary function, or leave it as None.
[ "Accepts", "an", "ODBC", "connection", "string", "and", "returns", "a", "new", "Connection", "object", "." ]
python
train
erocarrera/pefile
peutils.py
https://github.com/erocarrera/pefile/blob/8a78a2e251a3f2336c232bf411133927b479edf2/peutils.py#L381-L387
def load(self , filename=None, data=None): """Load a PEiD signature file. Invoking this method on different files combines the signatures. """ self.__load(filename=filename, data=data)
[ "def", "load", "(", "self", ",", "filename", "=", "None", ",", "data", "=", "None", ")", ":", "self", ".", "__load", "(", "filename", "=", "filename", ",", "data", "=", "data", ")" ]
Load a PEiD signature file. Invoking this method on different files combines the signatures.
[ "Load", "a", "PEiD", "signature", "file", "." ]
python
train
contains-io/rcli
rcli/autodetect.py
https://github.com/contains-io/rcli/blob/cdd6191a0e0a19bc767f84921650835d099349cf/rcli/autodetect.py#L124-L148
def _append_commands(dct, # type: typing.Dict[str, typing.Set[str]] module_name, # type: str commands # type:typing.Iterable[_EntryPoint] ): # type: (...) -> None """Append entry point strings representing the given Command objects. Args: dct: The dictionary to append with entry point strings. Each key will be a primary command with a value containing a list of entry point strings representing a Command. module_name: The name of the module in which the command object resides. commands: A list of Command objects to convert to entry point strings. """ for command in commands: entry_point = '{command}{subcommand} = {module}{callable}'.format( command=command.command, subcommand=(':{}'.format(command.subcommand) if command.subcommand else ''), module=module_name, callable=(':{}'.format(command.callable) if command.callable else ''), ) dct.setdefault(command.command, set()).add(entry_point)
[ "def", "_append_commands", "(", "dct", ",", "# type: typing.Dict[str, typing.Set[str]]", "module_name", ",", "# type: str", "commands", "# type:typing.Iterable[_EntryPoint]", ")", ":", "# type: (...) -> None", "for", "command", "in", "commands", ":", "entry_point", "=", "'{command}{subcommand} = {module}{callable}'", ".", "format", "(", "command", "=", "command", ".", "command", ",", "subcommand", "=", "(", "':{}'", ".", "format", "(", "command", ".", "subcommand", ")", "if", "command", ".", "subcommand", "else", "''", ")", ",", "module", "=", "module_name", ",", "callable", "=", "(", "':{}'", ".", "format", "(", "command", ".", "callable", ")", "if", "command", ".", "callable", "else", "''", ")", ",", ")", "dct", ".", "setdefault", "(", "command", ".", "command", ",", "set", "(", ")", ")", ".", "add", "(", "entry_point", ")" ]
Append entry point strings representing the given Command objects. Args: dct: The dictionary to append with entry point strings. Each key will be a primary command with a value containing a list of entry point strings representing a Command. module_name: The name of the module in which the command object resides. commands: A list of Command objects to convert to entry point strings.
[ "Append", "entry", "point", "strings", "representing", "the", "given", "Command", "objects", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/diffraction/core.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/diffraction/core.py#L181-L213
def get_unique_families(hkls): """ Returns unique families of Miller indices. Families must be permutations of each other. Args: hkls ([h, k, l]): List of Miller indices. Returns: {hkl: multiplicity}: A dict with unique hkl and multiplicity. """ # TODO: Definitely can be sped up. def is_perm(hkl1, hkl2): h1 = np.abs(hkl1) h2 = np.abs(hkl2) return all([i == j for i, j in zip(sorted(h1), sorted(h2))]) unique = collections.defaultdict(list) for hkl1 in hkls: found = False for hkl2 in unique.keys(): if is_perm(hkl1, hkl2): found = True unique[hkl2].append(hkl1) break if not found: unique[hkl1].append(hkl1) pretty_unique = {} for k, v in unique.items(): pretty_unique[sorted(v)[-1]] = len(v) return pretty_unique
[ "def", "get_unique_families", "(", "hkls", ")", ":", "# TODO: Definitely can be sped up.", "def", "is_perm", "(", "hkl1", ",", "hkl2", ")", ":", "h1", "=", "np", ".", "abs", "(", "hkl1", ")", "h2", "=", "np", ".", "abs", "(", "hkl2", ")", "return", "all", "(", "[", "i", "==", "j", "for", "i", ",", "j", "in", "zip", "(", "sorted", "(", "h1", ")", ",", "sorted", "(", "h2", ")", ")", "]", ")", "unique", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "hkl1", "in", "hkls", ":", "found", "=", "False", "for", "hkl2", "in", "unique", ".", "keys", "(", ")", ":", "if", "is_perm", "(", "hkl1", ",", "hkl2", ")", ":", "found", "=", "True", "unique", "[", "hkl2", "]", ".", "append", "(", "hkl1", ")", "break", "if", "not", "found", ":", "unique", "[", "hkl1", "]", ".", "append", "(", "hkl1", ")", "pretty_unique", "=", "{", "}", "for", "k", ",", "v", "in", "unique", ".", "items", "(", ")", ":", "pretty_unique", "[", "sorted", "(", "v", ")", "[", "-", "1", "]", "]", "=", "len", "(", "v", ")", "return", "pretty_unique" ]
Returns unique families of Miller indices. Families must be permutations of each other. Args: hkls ([h, k, l]): List of Miller indices. Returns: {hkl: multiplicity}: A dict with unique hkl and multiplicity.
[ "Returns", "unique", "families", "of", "Miller", "indices", ".", "Families", "must", "be", "permutations", "of", "each", "other", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/slurm.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/slurm.py#L66-L211
def launch_slurm(jobname: str, cmd: str, memory_mb: int, project: str, qos: str, email: str, duration: timedelta, tasks_per_node: int, cpus_per_task: int, partition: str = "", modules: List[str] = None, directory: str = os.getcwd(), encoding: str = "ascii") -> None: """ Launch a job into the SLURM environment. Args: jobname: name of the job cmd: command to be executed memory_mb: maximum memory requirement per process (Mb) project: project name qos: quality-of-service name email: user's e-mail address duration: maximum duration per job tasks_per_node: tasks per (cluster) node cpus_per_task: CPUs per task partition: cluster partition name modules: SLURM modules to load directory: directory to change to encoding: encoding to apply to launch script as sent to ``sbatch`` """ if partition: partition_cmd = "#SBATCH -p {}".format(partition) else: partition_cmd = "" if modules is None: modules = ["default-wbic"] log.info("Launching SLURM job: {}", jobname) script = """#!/bin/bash #! Name of the job: #SBATCH -J {jobname} #! Which project should jobs run under: #SBATCH -A {project} #! What QoS [Quality of Service] should the job run in? #SBATCH --qos={qos} #! How much resource should be allocated? #SBATCH --tasks-per-node={tasks_per_node} #SBATCH --cpus-per-task={cpus_per_task} #! Memory requirements #SBATCH --mem={memory_mb} #! How much wall-clock time will be required? #SBATCH --time={duration} #! What e-mail address to use for notifications? #SBATCH --mail-user={email} #! What types of email messages do you wish to receive? #SBATCH --mail-type=ALL #! Uncomment this to prevent the job from being requeued (e.g. if #! interrupted by node failure or system downtime): #! SBATCH --no-requeue #! Partition {partition_cmd} #! sbatch directives end here (put any additional directives above this line) #! ############################################################ #! Modify the settings below to specify the application's environment, location #! and launch method: #! Optionally modify the environment seen by the application #! (note that SLURM reproduces the environment at submission irrespective of ~/.bashrc): . /etc/profile.d/modules.sh # Leave this line (enables the module command) module purge # Removes all modules still loaded module load {modules} # Basic one, e.g. default-wbic, is REQUIRED - loads the basic environment #! Insert additional module load commands after this line if needed: #! Full path to your application executable: application="hostname" #! Run options for the application: options="" #! Work directory (i.e. where the job will run): workdir="$SLURM_SUBMIT_DIR" # The value of SLURM_SUBMIT_DIR sets workdir to the directory # in which sbatch is run. #! Are you using OpenMP (NB this is **unrelated to OpenMPI**)? If so increase this #! safe value to no more than 24: export OMP_NUM_THREADS=24 # Command line to be submited by SLURM: CMD="{cmd}" ############################################################### ### You should not have to change anything below this line #### ############################################################### cd $workdir echo -e "Changed directory to `pwd`.\n" JOBID=$SLURM_JOB_ID echo -e "JobID: $JOBID\n======" echo "Time: `date`" echo "Running on master node: `hostname`" echo "Current directory: `pwd`" if [ "$SLURM_JOB_NODELIST" ]; then #! Create a machine file: export NODEFILE=`/usr/bin/generate_pbs_nodefile` cat $NODEFILE | uniq > machine.file.$JOBID echo -e "\nNodes allocated:\n================" echo `cat machine.file.$JOBID | sed -e 's/\..*$//g'` fi echo -e "\nExecuting command:\n==================\n$CMD\n" eval $CMD """.format( # noqa cmd=cmd, cpus_per_task=cpus_per_task, duration=strfdelta(duration, SLURM_TIMEDELTA_FMT), email=email, jobname=jobname, memory_mb=memory_mb, modules=" ".join(modules), partition_cmd=partition_cmd, project=project, qos=qos, tasks_per_node=tasks_per_node, ) cmdargs = ["sbatch"] with pushd(directory): p = Popen(cmdargs, stdin=PIPE) p.communicate(input=script.encode(encoding))
[ "def", "launch_slurm", "(", "jobname", ":", "str", ",", "cmd", ":", "str", ",", "memory_mb", ":", "int", ",", "project", ":", "str", ",", "qos", ":", "str", ",", "email", ":", "str", ",", "duration", ":", "timedelta", ",", "tasks_per_node", ":", "int", ",", "cpus_per_task", ":", "int", ",", "partition", ":", "str", "=", "\"\"", ",", "modules", ":", "List", "[", "str", "]", "=", "None", ",", "directory", ":", "str", "=", "os", ".", "getcwd", "(", ")", ",", "encoding", ":", "str", "=", "\"ascii\"", ")", "->", "None", ":", "if", "partition", ":", "partition_cmd", "=", "\"#SBATCH -p {}\"", ".", "format", "(", "partition", ")", "else", ":", "partition_cmd", "=", "\"\"", "if", "modules", "is", "None", ":", "modules", "=", "[", "\"default-wbic\"", "]", "log", ".", "info", "(", "\"Launching SLURM job: {}\"", ",", "jobname", ")", "script", "=", "\"\"\"#!/bin/bash\n\n#! Name of the job:\n#SBATCH -J {jobname}\n\n#! Which project should jobs run under:\n#SBATCH -A {project}\n\n#! What QoS [Quality of Service] should the job run in?\n#SBATCH --qos={qos}\n\n#! How much resource should be allocated?\n#SBATCH --tasks-per-node={tasks_per_node}\n#SBATCH --cpus-per-task={cpus_per_task}\n\n#! Memory requirements\n#SBATCH --mem={memory_mb}\n\n#! How much wall-clock time will be required?\n#SBATCH --time={duration}\n\n#! What e-mail address to use for notifications?\n#SBATCH --mail-user={email}\n\n#! What types of email messages do you wish to receive?\n#SBATCH --mail-type=ALL\n\n#! Uncomment this to prevent the job from being requeued (e.g. if\n#! interrupted by node failure or system downtime):\n#! SBATCH --no-requeue\n\n#! Partition\n{partition_cmd}\n\n#! sbatch directives end here (put any additional directives above this line)\n\n#! ############################################################\n#! Modify the settings below to specify the application's environment, location\n#! and launch method:\n\n#! Optionally modify the environment seen by the application\n#! (note that SLURM reproduces the environment at submission irrespective of ~/.bashrc):\n. /etc/profile.d/modules.sh # Leave this line (enables the module command)\nmodule purge # Removes all modules still loaded\nmodule load {modules} # Basic one, e.g. default-wbic, is REQUIRED - loads the basic environment\n\n#! Insert additional module load commands after this line if needed:\n\n#! Full path to your application executable:\napplication=\"hostname\"\n\n#! Run options for the application:\noptions=\"\"\n\n#! Work directory (i.e. where the job will run):\nworkdir=\"$SLURM_SUBMIT_DIR\" # The value of SLURM_SUBMIT_DIR sets workdir to the directory\n # in which sbatch is run.\n\n#! Are you using OpenMP (NB this is **unrelated to OpenMPI**)? If so increase this\n#! safe value to no more than 24:\nexport OMP_NUM_THREADS=24\n\n# Command line to be submited by SLURM:\nCMD=\"{cmd}\"\n\n###############################################################\n### You should not have to change anything below this line ####\n###############################################################\n\ncd $workdir\necho -e \"Changed directory to `pwd`.\\n\"\n\nJOBID=$SLURM_JOB_ID\n\necho -e \"JobID: $JOBID\\n======\"\necho \"Time: `date`\"\necho \"Running on master node: `hostname`\"\necho \"Current directory: `pwd`\"\n\nif [ \"$SLURM_JOB_NODELIST\" ]; then\n #! Create a machine file:\n export NODEFILE=`/usr/bin/generate_pbs_nodefile`\n cat $NODEFILE | uniq > machine.file.$JOBID\n echo -e \"\\nNodes allocated:\\n================\"\n echo `cat machine.file.$JOBID | sed -e 's/\\..*$//g'`\nfi\n\necho -e \"\\nExecuting command:\\n==================\\n$CMD\\n\"\n\neval $CMD\n \"\"\"", ".", "format", "(", "# noqa", "cmd", "=", "cmd", ",", "cpus_per_task", "=", "cpus_per_task", ",", "duration", "=", "strfdelta", "(", "duration", ",", "SLURM_TIMEDELTA_FMT", ")", ",", "email", "=", "email", ",", "jobname", "=", "jobname", ",", "memory_mb", "=", "memory_mb", ",", "modules", "=", "\" \"", ".", "join", "(", "modules", ")", ",", "partition_cmd", "=", "partition_cmd", ",", "project", "=", "project", ",", "qos", "=", "qos", ",", "tasks_per_node", "=", "tasks_per_node", ",", ")", "cmdargs", "=", "[", "\"sbatch\"", "]", "with", "pushd", "(", "directory", ")", ":", "p", "=", "Popen", "(", "cmdargs", ",", "stdin", "=", "PIPE", ")", "p", ".", "communicate", "(", "input", "=", "script", ".", "encode", "(", "encoding", ")", ")" ]
Launch a job into the SLURM environment. Args: jobname: name of the job cmd: command to be executed memory_mb: maximum memory requirement per process (Mb) project: project name qos: quality-of-service name email: user's e-mail address duration: maximum duration per job tasks_per_node: tasks per (cluster) node cpus_per_task: CPUs per task partition: cluster partition name modules: SLURM modules to load directory: directory to change to encoding: encoding to apply to launch script as sent to ``sbatch``
[ "Launch", "a", "job", "into", "the", "SLURM", "environment", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L219-L238
def _BuildMessageFromTypeName(type_name, descriptor_pool): """Returns a protobuf message instance. Args: type_name: Fully-qualified protobuf message type name string. descriptor_pool: DescriptorPool instance. Returns: A Message instance of type matching type_name, or None if the a Descriptor wasn't found matching type_name. """ # pylint: disable=g-import-not-at-top from google.protobuf import symbol_database database = symbol_database.Default() try: message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) except KeyError: return None message_type = database.GetPrototype(message_descriptor) return message_type()
[ "def", "_BuildMessageFromTypeName", "(", "type_name", ",", "descriptor_pool", ")", ":", "# pylint: disable=g-import-not-at-top", "from", "google", ".", "protobuf", "import", "symbol_database", "database", "=", "symbol_database", ".", "Default", "(", ")", "try", ":", "message_descriptor", "=", "descriptor_pool", ".", "FindMessageTypeByName", "(", "type_name", ")", "except", "KeyError", ":", "return", "None", "message_type", "=", "database", ".", "GetPrototype", "(", "message_descriptor", ")", "return", "message_type", "(", ")" ]
Returns a protobuf message instance. Args: type_name: Fully-qualified protobuf message type name string. descriptor_pool: DescriptorPool instance. Returns: A Message instance of type matching type_name, or None if the a Descriptor wasn't found matching type_name.
[ "Returns", "a", "protobuf", "message", "instance", "." ]
python
train
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/zeromq/driver.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/zeromq/driver.py#L162-L171
def on_stop(self): """ stop subscriber """ LOGGER.debug("zeromq.Subscriber.on_stop") self.running = False while self.is_started: time.sleep(0.1) self.zmqsocket.close() self.zmqcontext.destroy()
[ "def", "on_stop", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"zeromq.Subscriber.on_stop\"", ")", "self", ".", "running", "=", "False", "while", "self", ".", "is_started", ":", "time", ".", "sleep", "(", "0.1", ")", "self", ".", "zmqsocket", ".", "close", "(", ")", "self", ".", "zmqcontext", ".", "destroy", "(", ")" ]
stop subscriber
[ "stop", "subscriber" ]
python
train
kytos/kytos-utils
kytos/utils/users.py
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/users.py#L81-L116
def ask_question(self, field_name, pattern=NAME_PATTERN, is_required=False, password=False): """Ask a question and get the input values. This method will validade the input values. Args: field_name(string): Field name used to ask for input value. pattern(tuple): Pattern to validate the input value. is_required(bool): Boolean value if the input value is required. password(bool): Boolean value to get input password with mask. Returns: input_value(string): Input value validated. """ input_value = "" question = ("Insert the field using the pattern below:" "\n{}\n{}: ".format(pattern[0], field_name)) while not input_value: input_value = getpass(question) if password else input(question) if not (input_value or is_required): break if password: confirm_password = getpass('Confirm your password: ') if confirm_password != input_value: print("Password does not match") input_value = "" if not self.valid_attribute(input_value, pattern[1]): error_message = "The content must fit the pattern: {}\n" print(error_message.format(pattern[0])) input_value = "" return input_value
[ "def", "ask_question", "(", "self", ",", "field_name", ",", "pattern", "=", "NAME_PATTERN", ",", "is_required", "=", "False", ",", "password", "=", "False", ")", ":", "input_value", "=", "\"\"", "question", "=", "(", "\"Insert the field using the pattern below:\"", "\"\\n{}\\n{}: \"", ".", "format", "(", "pattern", "[", "0", "]", ",", "field_name", ")", ")", "while", "not", "input_value", ":", "input_value", "=", "getpass", "(", "question", ")", "if", "password", "else", "input", "(", "question", ")", "if", "not", "(", "input_value", "or", "is_required", ")", ":", "break", "if", "password", ":", "confirm_password", "=", "getpass", "(", "'Confirm your password: '", ")", "if", "confirm_password", "!=", "input_value", ":", "print", "(", "\"Password does not match\"", ")", "input_value", "=", "\"\"", "if", "not", "self", ".", "valid_attribute", "(", "input_value", ",", "pattern", "[", "1", "]", ")", ":", "error_message", "=", "\"The content must fit the pattern: {}\\n\"", "print", "(", "error_message", ".", "format", "(", "pattern", "[", "0", "]", ")", ")", "input_value", "=", "\"\"", "return", "input_value" ]
Ask a question and get the input values. This method will validade the input values. Args: field_name(string): Field name used to ask for input value. pattern(tuple): Pattern to validate the input value. is_required(bool): Boolean value if the input value is required. password(bool): Boolean value to get input password with mask. Returns: input_value(string): Input value validated.
[ "Ask", "a", "question", "and", "get", "the", "input", "values", "." ]
python
train
loli/medpy
medpy/metric/histogram.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/histogram.py#L865-L909
def noelle_5(h1, h2): # 26 us @array, 52 us @list \w 100 bins r""" Extension of `fidelity_based` proposed by [1]_. .. math:: d_{\sin F}(H, H') = \sqrt{1 -d_{F}^2(H, H')} See `fidelity_based` for the definition of :math:`d_{F}(H, H')`. *Attributes:* - metric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - not applicable *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. Returns ------- fidelity_based : float Fidelity based distance. References ---------- .. [1] M. Noelle "Distribution Distance Measures Applied to 3-D Object Recognition", 2003 """ return math.sqrt(1 - math.pow(fidelity_based(h1, h2), 2))
[ "def", "noelle_5", "(", "h1", ",", "h2", ")", ":", "# 26 us @array, 52 us @list \\w 100 bins", "return", "math", ".", "sqrt", "(", "1", "-", "math", ".", "pow", "(", "fidelity_based", "(", "h1", ",", "h2", ")", ",", "2", ")", ")" ]
r""" Extension of `fidelity_based` proposed by [1]_. .. math:: d_{\sin F}(H, H') = \sqrt{1 -d_{F}^2(H, H')} See `fidelity_based` for the definition of :math:`d_{F}(H, H')`. *Attributes:* - metric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - not applicable *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. Returns ------- fidelity_based : float Fidelity based distance. References ---------- .. [1] M. Noelle "Distribution Distance Measures Applied to 3-D Object Recognition", 2003
[ "r", "Extension", "of", "fidelity_based", "proposed", "by", "[", "1", "]", "_", ".", "..", "math", "::", "d_", "{", "\\", "sin", "F", "}", "(", "H", "H", ")", "=", "\\", "sqrt", "{", "1", "-", "d_", "{", "F", "}", "^2", "(", "H", "H", ")", "}", "See", "fidelity_based", "for", "the", "definition", "of", ":", "math", ":", "d_", "{", "F", "}", "(", "H", "H", ")", ".", "*", "Attributes", ":", "*" ]
python
train
pypa/setuptools
setuptools/command/easy_install.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/command/easy_install.py#L2145-L2149
def get_header(cls, script_text="", executable=None): """Create a #! line, getting options (if any) from script_text""" cmd = cls.command_spec_class.best().from_param(executable) cmd.install_options(script_text) return cmd.as_header()
[ "def", "get_header", "(", "cls", ",", "script_text", "=", "\"\"", ",", "executable", "=", "None", ")", ":", "cmd", "=", "cls", ".", "command_spec_class", ".", "best", "(", ")", ".", "from_param", "(", "executable", ")", "cmd", ".", "install_options", "(", "script_text", ")", "return", "cmd", ".", "as_header", "(", ")" ]
Create a #! line, getting options (if any) from script_text
[ "Create", "a", "#!", "line", "getting", "options", "(", "if", "any", ")", "from", "script_text" ]
python
train
annoviko/pyclustering
pyclustering/nnet/som.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/som.py#L211-L224
def awards(self): """! @brief Return amount of captured objects by each neuron after training. @return (list) Amount of captured objects by each neuron. @see train() """ if self.__ccore_som_pointer is not None: self._award = wrapper.som_get_awards(self.__ccore_som_pointer) return self._award
[ "def", "awards", "(", "self", ")", ":", "if", "self", ".", "__ccore_som_pointer", "is", "not", "None", ":", "self", ".", "_award", "=", "wrapper", ".", "som_get_awards", "(", "self", ".", "__ccore_som_pointer", ")", "return", "self", ".", "_award" ]
! @brief Return amount of captured objects by each neuron after training. @return (list) Amount of captured objects by each neuron. @see train()
[ "!" ]
python
valid
globocom/GloboNetworkAPI-client-python
networkapiclient/rest.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/rest.py#L371-L376
def get_full_url(self, parsed_url): """ Returns url path with querystring """ full_path = parsed_url.path if parsed_url.query: full_path = '%s?%s' % (full_path, parsed_url.query) return full_path
[ "def", "get_full_url", "(", "self", ",", "parsed_url", ")", ":", "full_path", "=", "parsed_url", ".", "path", "if", "parsed_url", ".", "query", ":", "full_path", "=", "'%s?%s'", "%", "(", "full_path", ",", "parsed_url", ".", "query", ")", "return", "full_path" ]
Returns url path with querystring
[ "Returns", "url", "path", "with", "querystring" ]
python
train
Cue/scales
src/greplin/scales/loop.py
https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/loop.py#L22-L34
def installStatsLoop(statsFile, statsDelay): """Installs an interval loop that dumps stats to a file.""" def dumpStats(): """Actual stats dump function.""" scales.dumpStatsTo(statsFile) reactor.callLater(statsDelay, dumpStats) def startStats(): """Starts the stats dump in "statsDelay" seconds.""" reactor.callLater(statsDelay, dumpStats) reactor.callWhenRunning(startStats)
[ "def", "installStatsLoop", "(", "statsFile", ",", "statsDelay", ")", ":", "def", "dumpStats", "(", ")", ":", "\"\"\"Actual stats dump function.\"\"\"", "scales", ".", "dumpStatsTo", "(", "statsFile", ")", "reactor", ".", "callLater", "(", "statsDelay", ",", "dumpStats", ")", "def", "startStats", "(", ")", ":", "\"\"\"Starts the stats dump in \"statsDelay\" seconds.\"\"\"", "reactor", ".", "callLater", "(", "statsDelay", ",", "dumpStats", ")", "reactor", ".", "callWhenRunning", "(", "startStats", ")" ]
Installs an interval loop that dumps stats to a file.
[ "Installs", "an", "interval", "loop", "that", "dumps", "stats", "to", "a", "file", "." ]
python
train
SciTools/biggus
biggus/_init.py
https://github.com/SciTools/biggus/blob/0a76fbe7806dd6295081cd399bcb76135d834d25/biggus/_init.py#L115-L124
def output(self, chunk): """ Dispatch the given Chunk onto all the registered output queues. If the chunk is None, it is silently ignored. """ if chunk is not None: for queue in self.output_queues: queue.put(chunk)
[ "def", "output", "(", "self", ",", "chunk", ")", ":", "if", "chunk", "is", "not", "None", ":", "for", "queue", "in", "self", ".", "output_queues", ":", "queue", ".", "put", "(", "chunk", ")" ]
Dispatch the given Chunk onto all the registered output queues. If the chunk is None, it is silently ignored.
[ "Dispatch", "the", "given", "Chunk", "onto", "all", "the", "registered", "output", "queues", "." ]
python
train
ChrisCummins/labm8
dirhashcache.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/dirhashcache.py#L65-L105
def dirhash(self, path, **dirhash_opts): """ Compute the hash of a directory. Arguments: path: Directory. **dirhash_opts: Additional options to checksumdir.dirhash(). Returns: str: Checksum of directory. """ path = fs.path(path) last_modified = time.ctime(max( max(os.path.getmtime(os.path.join(root, file)) for file in files) for root,_,files in os.walk(path))) db = sqlite3.connect(self.path) c = db.cursor() c.execute("SELECT date, hash FROM dirhashcache WHERE path=?", (path,)) cached = c.fetchone() if cached: cached_date, cached_hash = cached if cached_date == last_modified: # cache hit dirhash = cached_hash else: # out of date cache dirhash = checksumdir.dirhash(path, self.hash, **dirhash_opts) c.execute("UPDATE dirhashcache SET date=?, hash=? WHERE path=?", (last_modified, dirhash, path)) db.commit() else: # new entry dirhash = checksumdir.dirhash(path, self.hash, **dirhash_opts) c.execute("INSERT INTO dirhashcache VALUES (?,?,?)", (path, last_modified, dirhash)) db.commit() db.close() return dirhash
[ "def", "dirhash", "(", "self", ",", "path", ",", "*", "*", "dirhash_opts", ")", ":", "path", "=", "fs", ".", "path", "(", "path", ")", "last_modified", "=", "time", ".", "ctime", "(", "max", "(", "max", "(", "os", ".", "path", ".", "getmtime", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", ")", "for", "file", "in", "files", ")", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ")", ")", "db", "=", "sqlite3", ".", "connect", "(", "self", ".", "path", ")", "c", "=", "db", ".", "cursor", "(", ")", "c", ".", "execute", "(", "\"SELECT date, hash FROM dirhashcache WHERE path=?\"", ",", "(", "path", ",", ")", ")", "cached", "=", "c", ".", "fetchone", "(", ")", "if", "cached", ":", "cached_date", ",", "cached_hash", "=", "cached", "if", "cached_date", "==", "last_modified", ":", "# cache hit", "dirhash", "=", "cached_hash", "else", ":", "# out of date cache", "dirhash", "=", "checksumdir", ".", "dirhash", "(", "path", ",", "self", ".", "hash", ",", "*", "*", "dirhash_opts", ")", "c", ".", "execute", "(", "\"UPDATE dirhashcache SET date=?, hash=? WHERE path=?\"", ",", "(", "last_modified", ",", "dirhash", ",", "path", ")", ")", "db", ".", "commit", "(", ")", "else", ":", "# new entry", "dirhash", "=", "checksumdir", ".", "dirhash", "(", "path", ",", "self", ".", "hash", ",", "*", "*", "dirhash_opts", ")", "c", ".", "execute", "(", "\"INSERT INTO dirhashcache VALUES (?,?,?)\"", ",", "(", "path", ",", "last_modified", ",", "dirhash", ")", ")", "db", ".", "commit", "(", ")", "db", ".", "close", "(", ")", "return", "dirhash" ]
Compute the hash of a directory. Arguments: path: Directory. **dirhash_opts: Additional options to checksumdir.dirhash(). Returns: str: Checksum of directory.
[ "Compute", "the", "hash", "of", "a", "directory", "." ]
python
train
phoebe-project/phoebe2
phoebe/backend/universe.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/universe.py#L913-L925
def reset_time(self, time, true_anom, elongan, eincl): """ TODO: add documentation """ self.true_anom = true_anom self.elongan = elongan self.eincl = eincl self.time = time self.populated_at_time = [] self.reset() return
[ "def", "reset_time", "(", "self", ",", "time", ",", "true_anom", ",", "elongan", ",", "eincl", ")", ":", "self", ".", "true_anom", "=", "true_anom", "self", ".", "elongan", "=", "elongan", "self", ".", "eincl", "=", "eincl", "self", ".", "time", "=", "time", "self", ".", "populated_at_time", "=", "[", "]", "self", ".", "reset", "(", ")", "return" ]
TODO: add documentation
[ "TODO", ":", "add", "documentation" ]
python
train
mottosso/be
be/vendor/click/core.py
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/click/core.py#L1071-L1078
def add_command(self, cmd, name=None): """Registers another :class:`Command` with this group. If the name is not provided, the name of the command is used. """ name = name or cmd.name if name is None: raise TypeError('Command has no name.') self.commands[name] = cmd
[ "def", "add_command", "(", "self", ",", "cmd", ",", "name", "=", "None", ")", ":", "name", "=", "name", "or", "cmd", ".", "name", "if", "name", "is", "None", ":", "raise", "TypeError", "(", "'Command has no name.'", ")", "self", ".", "commands", "[", "name", "]", "=", "cmd" ]
Registers another :class:`Command` with this group. If the name is not provided, the name of the command is used.
[ "Registers", "another", ":", "class", ":", "Command", "with", "this", "group", ".", "If", "the", "name", "is", "not", "provided", "the", "name", "of", "the", "command", "is", "used", "." ]
python
train
Aluriak/tergraw
tergraw/graphutils.py
https://github.com/Aluriak/tergraw/blob/7f73cd286a77611e9c73f50b1e43be4f6643ac9f/tergraw/graphutils.py#L21-L34
def process_input_graph(func): """Decorator, ensuring first argument is a networkx graph object. If the first arg is a dict {node: succs}, a networkx graph equivalent to the dict will be send in place of it.""" @wraps(func) def wrapped_func(*args, **kwargs): input_graph = args[0] if isinstance(input_graph, nx.DiGraph): return func(*args, **kwargs) else: nx_graph = dict_to_nx(args[0], oriented=True) args = [nx_graph] + list(args[1:]) return func(*args, **kwargs) return wrapped_func
[ "def", "process_input_graph", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "input_graph", "=", "args", "[", "0", "]", "if", "isinstance", "(", "input_graph", ",", "nx", ".", "DiGraph", ")", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "nx_graph", "=", "dict_to_nx", "(", "args", "[", "0", "]", ",", "oriented", "=", "True", ")", "args", "=", "[", "nx_graph", "]", "+", "list", "(", "args", "[", "1", ":", "]", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped_func" ]
Decorator, ensuring first argument is a networkx graph object. If the first arg is a dict {node: succs}, a networkx graph equivalent to the dict will be send in place of it.
[ "Decorator", "ensuring", "first", "argument", "is", "a", "networkx", "graph", "object", ".", "If", "the", "first", "arg", "is", "a", "dict", "{", "node", ":", "succs", "}", "a", "networkx", "graph", "equivalent", "to", "the", "dict", "will", "be", "send", "in", "place", "of", "it", "." ]
python
train
odlgroup/odl
odl/trafos/util/ft_utils.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/util/ft_utils.py#L393-L547
def dft_postprocess_data(arr, real_grid, recip_grid, shift, axes, interp, sign='-', op='multiply', out=None): """Post-process the Fourier-space data after DFT. This function multiplies the given data with the separable function:: q(xi) = exp(+- 1j * dot(x[0], xi)) * s * phi_hat(xi_bar) where ``x[0]`` and ``s`` are the minimum point and the stride of the real-space grid, respectively, and ``phi_hat(xi_bar)`` is the FT of the interpolation kernel. The sign of the exponent depends on the choice of ``sign``. Note that for ``op='divide'`` the multiplication with ``s * phi_hat(xi_bar)`` is replaced by a division with the same array. In discretized form on the reciprocal grid, the exponential part of this function becomes an array:: q[k] = exp(+- 1j * dot(x[0], xi[k])) and the arguments ``xi_bar`` to the interpolation kernel are the normalized frequencies:: for 'shift=True' : xi_bar[k] = -pi + pi * (2*k) / N for 'shift=False' : xi_bar[k] = -pi + pi * (2*k+1) / N See [Pre+2007], Section 13.9 "Computing Fourier Integrals Using the FFT" for a similar approach. Parameters ---------- arr : `array-like` Array to be pre-processed. An array with real data type is converted to its complex counterpart. real_grid : uniform `RectGrid` Real space grid in the transform. recip_grid : uniform `RectGrid` Reciprocal grid in the transform shift : bool or sequence of bools If ``True``, the grid is shifted by half a stride in the negative direction in the corresponding axes. The sequence must have the same length as ``axes``. axes : int or sequence of ints Dimensions along which to take the transform. The sequence must have the same length as ``shifts``. interp : string or sequence of strings Interpolation scheme used in the real-space. sign : {'-', '+'}, optional Sign of the complex exponent. op : {'multiply', 'divide'}, optional Operation to perform with the stride times the interpolation kernel FT out : `numpy.ndarray`, optional Array in which the result is stored. If ``out is arr``, an in-place modification is performed. Returns ------- out : `numpy.ndarray` Result of the post-processing. If ``out`` was given, the returned object is a reference to it. References ---------- [Pre+2007] Press, W H, Teukolsky, S A, Vetterling, W T, and Flannery, B P. *Numerical Recipes in C - The Art of Scientific Computing* (Volume 3). Cambridge University Press, 2007. """ arr = np.asarray(arr) if is_real_floating_dtype(arr.dtype): arr = arr.astype(complex_dtype(arr.dtype)) elif not is_complex_floating_dtype(arr.dtype): raise ValueError('array data type {} is not a complex floating point ' 'data type'.format(dtype_repr(arr.dtype))) if out is None: out = arr.copy() elif out is not arr: out[:] = arr if axes is None: axes = list(range(arr.ndim)) else: try: axes = [int(axes)] except TypeError: axes = list(axes) shift_list = normalized_scalar_param_list(shift, length=len(axes), param_conv=bool) if sign == '-': imag = -1j elif sign == '+': imag = 1j else: raise ValueError("`sign` '{}' not understood".format(sign)) op, op_in = str(op).lower(), op if op not in ('multiply', 'divide'): raise ValueError("kernel `op` '{}' not understood".format(op_in)) # Make a list from interp if that's not the case already if is_string(interp): interp = [str(interp).lower()] * arr.ndim onedim_arrs = [] for ax, shift, intp in zip(axes, shift_list, interp): x = real_grid.min_pt[ax] xi = recip_grid.coord_vectors[ax] # First part: exponential array onedim_arr = np.exp(imag * x * xi) # Second part: interpolation kernel len_dft = recip_grid.shape[ax] len_orig = real_grid.shape[ax] halfcomplex = (len_dft < len_orig) odd = len_orig % 2 fmin = -0.5 if shift else -0.5 + 1.0 / (2 * len_orig) if halfcomplex: # maximum lies around 0, possibly half a cell left or right of it if shift and odd: fmax = - 1.0 / (2 * len_orig) elif not shift and not odd: fmax = 1.0 / (2 * len_orig) else: fmax = 0.0 else: # not halfcomplex # maximum lies close to 0.5, half or full cell left of it if shift: # -0.5 + (N-1)/N = 0.5 - 1/N fmax = 0.5 - 1.0 / len_orig else: # -0.5 + 1/(2*N) + (N-1)/N = 0.5 - 1/(2*N) fmax = 0.5 - 1.0 / (2 * len_orig) freqs = np.linspace(fmin, fmax, num=len_dft) stride = real_grid.stride[ax] interp_kernel = _interp_kernel_ft(freqs, intp) interp_kernel *= stride if op == 'multiply': onedim_arr *= interp_kernel else: onedim_arr /= interp_kernel onedim_arrs.append(onedim_arr.astype(out.dtype, copy=False)) fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out) return out
[ "def", "dft_postprocess_data", "(", "arr", ",", "real_grid", ",", "recip_grid", ",", "shift", ",", "axes", ",", "interp", ",", "sign", "=", "'-'", ",", "op", "=", "'multiply'", ",", "out", "=", "None", ")", ":", "arr", "=", "np", ".", "asarray", "(", "arr", ")", "if", "is_real_floating_dtype", "(", "arr", ".", "dtype", ")", ":", "arr", "=", "arr", ".", "astype", "(", "complex_dtype", "(", "arr", ".", "dtype", ")", ")", "elif", "not", "is_complex_floating_dtype", "(", "arr", ".", "dtype", ")", ":", "raise", "ValueError", "(", "'array data type {} is not a complex floating point '", "'data type'", ".", "format", "(", "dtype_repr", "(", "arr", ".", "dtype", ")", ")", ")", "if", "out", "is", "None", ":", "out", "=", "arr", ".", "copy", "(", ")", "elif", "out", "is", "not", "arr", ":", "out", "[", ":", "]", "=", "arr", "if", "axes", "is", "None", ":", "axes", "=", "list", "(", "range", "(", "arr", ".", "ndim", ")", ")", "else", ":", "try", ":", "axes", "=", "[", "int", "(", "axes", ")", "]", "except", "TypeError", ":", "axes", "=", "list", "(", "axes", ")", "shift_list", "=", "normalized_scalar_param_list", "(", "shift", ",", "length", "=", "len", "(", "axes", ")", ",", "param_conv", "=", "bool", ")", "if", "sign", "==", "'-'", ":", "imag", "=", "-", "1j", "elif", "sign", "==", "'+'", ":", "imag", "=", "1j", "else", ":", "raise", "ValueError", "(", "\"`sign` '{}' not understood\"", ".", "format", "(", "sign", ")", ")", "op", ",", "op_in", "=", "str", "(", "op", ")", ".", "lower", "(", ")", ",", "op", "if", "op", "not", "in", "(", "'multiply'", ",", "'divide'", ")", ":", "raise", "ValueError", "(", "\"kernel `op` '{}' not understood\"", ".", "format", "(", "op_in", ")", ")", "# Make a list from interp if that's not the case already", "if", "is_string", "(", "interp", ")", ":", "interp", "=", "[", "str", "(", "interp", ")", ".", "lower", "(", ")", "]", "*", "arr", ".", "ndim", "onedim_arrs", "=", "[", "]", "for", "ax", ",", "shift", ",", "intp", "in", "zip", "(", "axes", ",", "shift_list", ",", "interp", ")", ":", "x", "=", "real_grid", ".", "min_pt", "[", "ax", "]", "xi", "=", "recip_grid", ".", "coord_vectors", "[", "ax", "]", "# First part: exponential array", "onedim_arr", "=", "np", ".", "exp", "(", "imag", "*", "x", "*", "xi", ")", "# Second part: interpolation kernel", "len_dft", "=", "recip_grid", ".", "shape", "[", "ax", "]", "len_orig", "=", "real_grid", ".", "shape", "[", "ax", "]", "halfcomplex", "=", "(", "len_dft", "<", "len_orig", ")", "odd", "=", "len_orig", "%", "2", "fmin", "=", "-", "0.5", "if", "shift", "else", "-", "0.5", "+", "1.0", "/", "(", "2", "*", "len_orig", ")", "if", "halfcomplex", ":", "# maximum lies around 0, possibly half a cell left or right of it", "if", "shift", "and", "odd", ":", "fmax", "=", "-", "1.0", "/", "(", "2", "*", "len_orig", ")", "elif", "not", "shift", "and", "not", "odd", ":", "fmax", "=", "1.0", "/", "(", "2", "*", "len_orig", ")", "else", ":", "fmax", "=", "0.0", "else", ":", "# not halfcomplex", "# maximum lies close to 0.5, half or full cell left of it", "if", "shift", ":", "# -0.5 + (N-1)/N = 0.5 - 1/N", "fmax", "=", "0.5", "-", "1.0", "/", "len_orig", "else", ":", "# -0.5 + 1/(2*N) + (N-1)/N = 0.5 - 1/(2*N)", "fmax", "=", "0.5", "-", "1.0", "/", "(", "2", "*", "len_orig", ")", "freqs", "=", "np", ".", "linspace", "(", "fmin", ",", "fmax", ",", "num", "=", "len_dft", ")", "stride", "=", "real_grid", ".", "stride", "[", "ax", "]", "interp_kernel", "=", "_interp_kernel_ft", "(", "freqs", ",", "intp", ")", "interp_kernel", "*=", "stride", "if", "op", "==", "'multiply'", ":", "onedim_arr", "*=", "interp_kernel", "else", ":", "onedim_arr", "/=", "interp_kernel", "onedim_arrs", ".", "append", "(", "onedim_arr", ".", "astype", "(", "out", ".", "dtype", ",", "copy", "=", "False", ")", ")", "fast_1d_tensor_mult", "(", "out", ",", "onedim_arrs", ",", "axes", "=", "axes", ",", "out", "=", "out", ")", "return", "out" ]
Post-process the Fourier-space data after DFT. This function multiplies the given data with the separable function:: q(xi) = exp(+- 1j * dot(x[0], xi)) * s * phi_hat(xi_bar) where ``x[0]`` and ``s`` are the minimum point and the stride of the real-space grid, respectively, and ``phi_hat(xi_bar)`` is the FT of the interpolation kernel. The sign of the exponent depends on the choice of ``sign``. Note that for ``op='divide'`` the multiplication with ``s * phi_hat(xi_bar)`` is replaced by a division with the same array. In discretized form on the reciprocal grid, the exponential part of this function becomes an array:: q[k] = exp(+- 1j * dot(x[0], xi[k])) and the arguments ``xi_bar`` to the interpolation kernel are the normalized frequencies:: for 'shift=True' : xi_bar[k] = -pi + pi * (2*k) / N for 'shift=False' : xi_bar[k] = -pi + pi * (2*k+1) / N See [Pre+2007], Section 13.9 "Computing Fourier Integrals Using the FFT" for a similar approach. Parameters ---------- arr : `array-like` Array to be pre-processed. An array with real data type is converted to its complex counterpart. real_grid : uniform `RectGrid` Real space grid in the transform. recip_grid : uniform `RectGrid` Reciprocal grid in the transform shift : bool or sequence of bools If ``True``, the grid is shifted by half a stride in the negative direction in the corresponding axes. The sequence must have the same length as ``axes``. axes : int or sequence of ints Dimensions along which to take the transform. The sequence must have the same length as ``shifts``. interp : string or sequence of strings Interpolation scheme used in the real-space. sign : {'-', '+'}, optional Sign of the complex exponent. op : {'multiply', 'divide'}, optional Operation to perform with the stride times the interpolation kernel FT out : `numpy.ndarray`, optional Array in which the result is stored. If ``out is arr``, an in-place modification is performed. Returns ------- out : `numpy.ndarray` Result of the post-processing. If ``out`` was given, the returned object is a reference to it. References ---------- [Pre+2007] Press, W H, Teukolsky, S A, Vetterling, W T, and Flannery, B P. *Numerical Recipes in C - The Art of Scientific Computing* (Volume 3). Cambridge University Press, 2007.
[ "Post", "-", "process", "the", "Fourier", "-", "space", "data", "after", "DFT", "." ]
python
train
ptrus/suffix-trees
suffix_trees/STree.py
https://github.com/ptrus/suffix-trees/blob/e7439c93a7b895523fad36c8c65a781710320b57/suffix_trees/STree.py#L124-L132
def _label_generalized(self, node): """Helper method that labels the nodes of GST with indexes of strings found in their descendants. """ if node.is_leaf(): x = {self._get_word_start_index(node.idx)} else: x = {n for ns in node.transition_links for n in ns[0].generalized_idxs} node.generalized_idxs = x
[ "def", "_label_generalized", "(", "self", ",", "node", ")", ":", "if", "node", ".", "is_leaf", "(", ")", ":", "x", "=", "{", "self", ".", "_get_word_start_index", "(", "node", ".", "idx", ")", "}", "else", ":", "x", "=", "{", "n", "for", "ns", "in", "node", ".", "transition_links", "for", "n", "in", "ns", "[", "0", "]", ".", "generalized_idxs", "}", "node", ".", "generalized_idxs", "=", "x" ]
Helper method that labels the nodes of GST with indexes of strings found in their descendants.
[ "Helper", "method", "that", "labels", "the", "nodes", "of", "GST", "with", "indexes", "of", "strings", "found", "in", "their", "descendants", "." ]
python
valid
doakey3/DashTable
dashtable/data2rst/cell/get_merge_direction.py
https://github.com/doakey3/DashTable/blob/744cfb6a717fa75a8092c83ebcd49b2668023681/dashtable/data2rst/cell/get_merge_direction.py#L1-L74
def get_merge_direction(cell1, cell2): """ Determine the side of cell1 that can be merged with cell2. This is based on the location of the two cells in the table as well as the compatability of their height and width. For example these cells can merge:: cell1 cell2 merge "RIGHT" +-----+ +------+ +-----+------+ | foo | | dog | | foo | dog | | | +------+ | +------+ | | | cat | | | cat | | | +------+ | +------+ | | | bird | | | bird | +-----+ +------+ +-----+------+ But these cells cannot merge:: +-----+ +------+ | foo | | dog | | | +------+ | | | cat | | | +------+ | | +-----+ Parameters ---------- cell1 : dashtable.data2rst.Cell cell2 : dashtable.data2rst.Cell Returns ------- str The side onto which cell2 can be merged. Will be one of ["LEFT", "RIGHT", "BOTTOM", "TOP", "NONE"] """ cell1_left = cell1.column cell1_right = cell1.column + cell1.column_count cell1_top = cell1.row cell1_bottom = cell1.row + cell1.row_count cell2_left = cell2.column cell2_right = cell2.column + cell2.column_count cell2_top = cell2.row cell2_bottom = cell2.row + cell2.row_count if (cell1_right == cell2_left and cell1_top == cell2_top and cell1_bottom == cell2_bottom and cell1.right_sections >= cell2.left_sections): return "RIGHT" elif (cell1_left == cell2_left and cell1_right == cell2_right and cell1_top == cell2_bottom and cell1.top_sections >= cell2.bottom_sections): return "TOP" elif (cell1_left == cell2_left and cell1_right == cell2_right and cell1_bottom == cell2_top and cell1.bottom_sections >= cell2.top_sections): return "BOTTOM" elif (cell1_left == cell2_right and cell1_top == cell2_top and cell1_bottom == cell2_bottom and cell1.left_sections >= cell2.right_sections): return "LEFT" else: return "NONE"
[ "def", "get_merge_direction", "(", "cell1", ",", "cell2", ")", ":", "cell1_left", "=", "cell1", ".", "column", "cell1_right", "=", "cell1", ".", "column", "+", "cell1", ".", "column_count", "cell1_top", "=", "cell1", ".", "row", "cell1_bottom", "=", "cell1", ".", "row", "+", "cell1", ".", "row_count", "cell2_left", "=", "cell2", ".", "column", "cell2_right", "=", "cell2", ".", "column", "+", "cell2", ".", "column_count", "cell2_top", "=", "cell2", ".", "row", "cell2_bottom", "=", "cell2", ".", "row", "+", "cell2", ".", "row_count", "if", "(", "cell1_right", "==", "cell2_left", "and", "cell1_top", "==", "cell2_top", "and", "cell1_bottom", "==", "cell2_bottom", "and", "cell1", ".", "right_sections", ">=", "cell2", ".", "left_sections", ")", ":", "return", "\"RIGHT\"", "elif", "(", "cell1_left", "==", "cell2_left", "and", "cell1_right", "==", "cell2_right", "and", "cell1_top", "==", "cell2_bottom", "and", "cell1", ".", "top_sections", ">=", "cell2", ".", "bottom_sections", ")", ":", "return", "\"TOP\"", "elif", "(", "cell1_left", "==", "cell2_left", "and", "cell1_right", "==", "cell2_right", "and", "cell1_bottom", "==", "cell2_top", "and", "cell1", ".", "bottom_sections", ">=", "cell2", ".", "top_sections", ")", ":", "return", "\"BOTTOM\"", "elif", "(", "cell1_left", "==", "cell2_right", "and", "cell1_top", "==", "cell2_top", "and", "cell1_bottom", "==", "cell2_bottom", "and", "cell1", ".", "left_sections", ">=", "cell2", ".", "right_sections", ")", ":", "return", "\"LEFT\"", "else", ":", "return", "\"NONE\"" ]
Determine the side of cell1 that can be merged with cell2. This is based on the location of the two cells in the table as well as the compatability of their height and width. For example these cells can merge:: cell1 cell2 merge "RIGHT" +-----+ +------+ +-----+------+ | foo | | dog | | foo | dog | | | +------+ | +------+ | | | cat | | | cat | | | +------+ | +------+ | | | bird | | | bird | +-----+ +------+ +-----+------+ But these cells cannot merge:: +-----+ +------+ | foo | | dog | | | +------+ | | | cat | | | +------+ | | +-----+ Parameters ---------- cell1 : dashtable.data2rst.Cell cell2 : dashtable.data2rst.Cell Returns ------- str The side onto which cell2 can be merged. Will be one of ["LEFT", "RIGHT", "BOTTOM", "TOP", "NONE"]
[ "Determine", "the", "side", "of", "cell1", "that", "can", "be", "merged", "with", "cell2", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L130-L134
def agent_show(self, agent_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/agents#get-agent-by-id" api_path = "/api/v2/agents/{agent_id}" api_path = api_path.format(agent_id=agent_id) return self.call(api_path, **kwargs)
[ "def", "agent_show", "(", "self", ",", "agent_id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/agents/{agent_id}\"", "api_path", "=", "api_path", ".", "format", "(", "agent_id", "=", "agent_id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/chat/agents#get-agent-by-id
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "chat", "/", "agents#get", "-", "agent", "-", "by", "-", "id" ]
python
train
aio-libs/aiohttp-devtools
aiohttp_devtools/start/template/app/views.py
https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/start/template/app/views.py#L60-L91
async def index(request): """ This is the view handler for the "/" url. **Note: returning html without a template engine like jinja2 is ugly, no way around that.** :param request: the request object see http://aiohttp.readthedocs.io/en/stable/web_reference.html#request :return: aiohttp.web.Response object """ # {% if database.is_none and example.is_message_board %} # app.router allows us to generate urls based on their names, # see http://aiohttp.readthedocs.io/en/stable/web.html#reverse-url-constructing-using-named-resources message_url = request.app.router['messages'].url_for() ctx = dict( title=request.app['name'], styles_css_url=request.app['static_root_url'] + '/styles.css', content="""\ <p>Success! you've setup a basic aiohttp app.</p> <p>To demonstrate a little of the functionality of aiohttp this app implements a very simple message board.</p> <b> <a href="{message_url}">View and add messages</a> </b>""".format(message_url=message_url) ) # {% else %} ctx = dict( title=request.app['name'], styles_css_url=request.app['static_root_url'] + '/styles.css', content="<p>Success! you've setup a basic aiohttp app.</p>", ) # {% endif %} # with the base web.Response type we have to manually set the content type, otherwise text/plain will be used. return web.Response(text=BASE_PAGE.format(**ctx), content_type='text/html')
[ "async", "def", "index", "(", "request", ")", ":", "# {% if database.is_none and example.is_message_board %}", "# app.router allows us to generate urls based on their names,", "# see http://aiohttp.readthedocs.io/en/stable/web.html#reverse-url-constructing-using-named-resources", "message_url", "=", "request", ".", "app", ".", "router", "[", "'messages'", "]", ".", "url_for", "(", ")", "ctx", "=", "dict", "(", "title", "=", "request", ".", "app", "[", "'name'", "]", ",", "styles_css_url", "=", "request", ".", "app", "[", "'static_root_url'", "]", "+", "'/styles.css'", ",", "content", "=", "\"\"\"\\\n <p>Success! you've setup a basic aiohttp app.</p>\n <p>To demonstrate a little of the functionality of aiohttp this app implements a very simple message board.</p>\n <b>\n <a href=\"{message_url}\">View and add messages</a>\n </b>\"\"\"", ".", "format", "(", "message_url", "=", "message_url", ")", ")", "# {% else %}", "ctx", "=", "dict", "(", "title", "=", "request", ".", "app", "[", "'name'", "]", ",", "styles_css_url", "=", "request", ".", "app", "[", "'static_root_url'", "]", "+", "'/styles.css'", ",", "content", "=", "\"<p>Success! you've setup a basic aiohttp app.</p>\"", ",", ")", "# {% endif %}", "# with the base web.Response type we have to manually set the content type, otherwise text/plain will be used.", "return", "web", ".", "Response", "(", "text", "=", "BASE_PAGE", ".", "format", "(", "*", "*", "ctx", ")", ",", "content_type", "=", "'text/html'", ")" ]
This is the view handler for the "/" url. **Note: returning html without a template engine like jinja2 is ugly, no way around that.** :param request: the request object see http://aiohttp.readthedocs.io/en/stable/web_reference.html#request :return: aiohttp.web.Response object
[ "This", "is", "the", "view", "handler", "for", "the", "/", "url", "." ]
python
train
hotdoc/hotdoc
hotdoc/extensions/c/clang/cindex.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/extensions/c/clang/cindex.py#L1727-L1735
def walk_preorder(self): """Depth-first preorder walk over the cursor and its descendants. Yields cursors. """ yield self for child in self.get_children(): for descendant in child.walk_preorder(): yield descendant
[ "def", "walk_preorder", "(", "self", ")", ":", "yield", "self", "for", "child", "in", "self", ".", "get_children", "(", ")", ":", "for", "descendant", "in", "child", ".", "walk_preorder", "(", ")", ":", "yield", "descendant" ]
Depth-first preorder walk over the cursor and its descendants. Yields cursors.
[ "Depth", "-", "first", "preorder", "walk", "over", "the", "cursor", "and", "its", "descendants", "." ]
python
train
maartenbreddels/ipyvolume
ipyvolume/utils.py
https://github.com/maartenbreddels/ipyvolume/blob/e68b72852b61276f8e6793bc8811f5b2432a155f/ipyvolume/utils.py#L122-L193
def download_to_file(url, filepath, resume=False, overwrite=False, chunk_size=1024 * 1024 * 10, loadbar_length=10): """Download a url. prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook) :type url: str :type filepath: str :param filepath: path to download to :param resume: if True resume download from existing file chunk :param overwrite: if True remove any existing filepath :param chunk_size: None or int in bytes :param loadbar_length: int length of load bar :return: """ resume_header = None loaded_size = 0 write_mode = 'wb' if os.path.exists(filepath): if overwrite: os.remove(filepath) elif resume: # if we want to resume, first try and see if the file is already complete loaded_size = os.path.getsize(filepath) clength = requests.head(url).headers.get('content-length') if clength is not None: if int(clength) == loaded_size: return None # give the point to resume at resume_header = {'Range': 'bytes=%s-' % loaded_size} write_mode = 'ab' else: return None stream = False if chunk_size is None else True # start printing with no return character, so that we can have everything on one line print("Downloading {0:s}: ".format(url), end="") response = requests.get(url, stream=stream, headers=resume_header) # raise error if download was unsuccessful response.raise_for_status() # get the size of the file if available total_length = response.headers.get('content-length') if total_length is not None: total_length = float(total_length) + loaded_size print("{0:.2f}Mb/{1:} ".format(total_length / (1024 * 1024), loadbar_length), end="") print("[", end="") parent = os.path.dirname(filepath) if not os.path.exists(parent) and parent: os.makedirs(parent) with io.open(filepath, write_mode) as f: loaded = 0 for chunk in response.iter_content(chunk_size=chunk_size): if chunk: # filter out keep-alive new chunks # print our progress bar if total_length is not None and chunk_size is not None: while loaded < loadbar_length * loaded_size / total_length: print("=", end='') loaded += 1 loaded_size += chunk_size f.write(chunk) if total_length is None: print("=" * loadbar_length, end='') else: while loaded < loadbar_length: print("=", end='') loaded += 1 print("] Finished")
[ "def", "download_to_file", "(", "url", ",", "filepath", ",", "resume", "=", "False", ",", "overwrite", "=", "False", ",", "chunk_size", "=", "1024", "*", "1024", "*", "10", ",", "loadbar_length", "=", "10", ")", ":", "resume_header", "=", "None", "loaded_size", "=", "0", "write_mode", "=", "'wb'", "if", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "if", "overwrite", ":", "os", ".", "remove", "(", "filepath", ")", "elif", "resume", ":", "# if we want to resume, first try and see if the file is already complete", "loaded_size", "=", "os", ".", "path", ".", "getsize", "(", "filepath", ")", "clength", "=", "requests", ".", "head", "(", "url", ")", ".", "headers", ".", "get", "(", "'content-length'", ")", "if", "clength", "is", "not", "None", ":", "if", "int", "(", "clength", ")", "==", "loaded_size", ":", "return", "None", "# give the point to resume at", "resume_header", "=", "{", "'Range'", ":", "'bytes=%s-'", "%", "loaded_size", "}", "write_mode", "=", "'ab'", "else", ":", "return", "None", "stream", "=", "False", "if", "chunk_size", "is", "None", "else", "True", "# start printing with no return character, so that we can have everything on one line", "print", "(", "\"Downloading {0:s}: \"", ".", "format", "(", "url", ")", ",", "end", "=", "\"\"", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "stream", ",", "headers", "=", "resume_header", ")", "# raise error if download was unsuccessful", "response", ".", "raise_for_status", "(", ")", "# get the size of the file if available", "total_length", "=", "response", ".", "headers", ".", "get", "(", "'content-length'", ")", "if", "total_length", "is", "not", "None", ":", "total_length", "=", "float", "(", "total_length", ")", "+", "loaded_size", "print", "(", "\"{0:.2f}Mb/{1:} \"", ".", "format", "(", "total_length", "/", "(", "1024", "*", "1024", ")", ",", "loadbar_length", ")", ",", "end", "=", "\"\"", ")", "print", "(", "\"[\"", ",", "end", "=", "\"\"", ")", "parent", "=", "os", ".", "path", ".", "dirname", "(", "filepath", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "parent", ")", "and", "parent", ":", "os", ".", "makedirs", "(", "parent", ")", "with", "io", ".", "open", "(", "filepath", ",", "write_mode", ")", "as", "f", ":", "loaded", "=", "0", "for", "chunk", "in", "response", ".", "iter_content", "(", "chunk_size", "=", "chunk_size", ")", ":", "if", "chunk", ":", "# filter out keep-alive new chunks", "# print our progress bar", "if", "total_length", "is", "not", "None", "and", "chunk_size", "is", "not", "None", ":", "while", "loaded", "<", "loadbar_length", "*", "loaded_size", "/", "total_length", ":", "print", "(", "\"=\"", ",", "end", "=", "''", ")", "loaded", "+=", "1", "loaded_size", "+=", "chunk_size", "f", ".", "write", "(", "chunk", ")", "if", "total_length", "is", "None", ":", "print", "(", "\"=\"", "*", "loadbar_length", ",", "end", "=", "''", ")", "else", ":", "while", "loaded", "<", "loadbar_length", ":", "print", "(", "\"=\"", ",", "end", "=", "''", ")", "loaded", "+=", "1", "print", "(", "\"] Finished\"", ")" ]
Download a url. prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook) :type url: str :type filepath: str :param filepath: path to download to :param resume: if True resume download from existing file chunk :param overwrite: if True remove any existing filepath :param chunk_size: None or int in bytes :param loadbar_length: int length of load bar :return:
[ "Download", "a", "url", "." ]
python
train
welchbj/sublemon
sublemon/runtime.py
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/runtime.py#L80-L98
async def iter_lines( self, *cmds: str, stream: str='both') -> AsyncGenerator[str, None]: """Coroutine to spawn commands and yield text lines from stdout.""" sps = self.spawn(*cmds) if stream == 'both': agen = amerge( amerge(*[sp.stdout for sp in sps]), amerge(*[sp.stderr for sp in sps])) elif stream == 'stdout': agen = amerge(*[sp.stdout for sp in sps]) elif stream == 'stderr': agen = amerge(*[sp.stderr for sp in sps]) else: raise SublemonRuntimeError( 'Invalid `stream` kwarg received: `' + str(stream) + '`') async for line in agen: yield line.decode('utf-8').rstrip()
[ "async", "def", "iter_lines", "(", "self", ",", "*", "cmds", ":", "str", ",", "stream", ":", "str", "=", "'both'", ")", "->", "AsyncGenerator", "[", "str", ",", "None", "]", ":", "sps", "=", "self", ".", "spawn", "(", "*", "cmds", ")", "if", "stream", "==", "'both'", ":", "agen", "=", "amerge", "(", "amerge", "(", "*", "[", "sp", ".", "stdout", "for", "sp", "in", "sps", "]", ")", ",", "amerge", "(", "*", "[", "sp", ".", "stderr", "for", "sp", "in", "sps", "]", ")", ")", "elif", "stream", "==", "'stdout'", ":", "agen", "=", "amerge", "(", "*", "[", "sp", ".", "stdout", "for", "sp", "in", "sps", "]", ")", "elif", "stream", "==", "'stderr'", ":", "agen", "=", "amerge", "(", "*", "[", "sp", ".", "stderr", "for", "sp", "in", "sps", "]", ")", "else", ":", "raise", "SublemonRuntimeError", "(", "'Invalid `stream` kwarg received: `'", "+", "str", "(", "stream", ")", "+", "'`'", ")", "async", "for", "line", "in", "agen", ":", "yield", "line", ".", "decode", "(", "'utf-8'", ")", ".", "rstrip", "(", ")" ]
Coroutine to spawn commands and yield text lines from stdout.
[ "Coroutine", "to", "spawn", "commands", "and", "yield", "text", "lines", "from", "stdout", "." ]
python
train
saltstack/salt
salt/modules/boto_lambda.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_lambda.py#L737-L758
def alias_exists(FunctionName, Name, region=None, key=None, keyid=None, profile=None): ''' Given a function name and alias name, check to see if the given alias exists. Returns True if the given alias exists and returns False if the given alias does not exist. CLI Example: .. code-block:: bash salt myminion boto_lambda.alias_exists myfunction myalias ''' try: alias = _find_alias(FunctionName, Name, region=region, key=key, keyid=keyid, profile=profile) return {'exists': bool(alias)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
[ "def", "alias_exists", "(", "FunctionName", ",", "Name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "alias", "=", "_find_alias", "(", "FunctionName", ",", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "return", "{", "'exists'", ":", "bool", "(", "alias", ")", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
Given a function name and alias name, check to see if the given alias exists. Returns True if the given alias exists and returns False if the given alias does not exist. CLI Example: .. code-block:: bash salt myminion boto_lambda.alias_exists myfunction myalias
[ "Given", "a", "function", "name", "and", "alias", "name", "check", "to", "see", "if", "the", "given", "alias", "exists", "." ]
python
train
sbg/sevenbridges-python
sevenbridges/models/task.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/task.py#L381-L389
def get_batch_children(self): """ Retrieves batch child tasks for this task if its a batch task. :return: Collection instance. :raises SbError if task is not a batch task. """ if not self.batch: raise SbgError("This task is not a batch task.") return self.query(parent=self.id, api=self._api)
[ "def", "get_batch_children", "(", "self", ")", ":", "if", "not", "self", ".", "batch", ":", "raise", "SbgError", "(", "\"This task is not a batch task.\"", ")", "return", "self", ".", "query", "(", "parent", "=", "self", ".", "id", ",", "api", "=", "self", ".", "_api", ")" ]
Retrieves batch child tasks for this task if its a batch task. :return: Collection instance. :raises SbError if task is not a batch task.
[ "Retrieves", "batch", "child", "tasks", "for", "this", "task", "if", "its", "a", "batch", "task", ".", ":", "return", ":", "Collection", "instance", ".", ":", "raises", "SbError", "if", "task", "is", "not", "a", "batch", "task", "." ]
python
train
bokeh/bokeh
bokeh/core/property/descriptors.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/descriptors.py#L671-L697
def _get(self, obj): ''' Internal implementation of instance attribute access for the ``BasicPropertyDescriptor`` getter. If the value has not been explicitly set by a user, return that value. Otherwise, return the default. Args: obj (HasProps) : the instance to get a value of this property for Returns: object Raises: RuntimeError If the |HasProps| instance has not yet been initialized, or if this descriptor is on a class that is not a |HasProps|. ''' if not hasattr(obj, '_property_values'): raise RuntimeError("Cannot get a property value '%s' from a %s instance before HasProps.__init__" % (self.name, obj.__class__.__name__)) if self.name not in obj._property_values: return self._get_default(obj) else: return obj._property_values[self.name]
[ "def", "_get", "(", "self", ",", "obj", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'_property_values'", ")", ":", "raise", "RuntimeError", "(", "\"Cannot get a property value '%s' from a %s instance before HasProps.__init__\"", "%", "(", "self", ".", "name", ",", "obj", ".", "__class__", ".", "__name__", ")", ")", "if", "self", ".", "name", "not", "in", "obj", ".", "_property_values", ":", "return", "self", ".", "_get_default", "(", "obj", ")", "else", ":", "return", "obj", ".", "_property_values", "[", "self", ".", "name", "]" ]
Internal implementation of instance attribute access for the ``BasicPropertyDescriptor`` getter. If the value has not been explicitly set by a user, return that value. Otherwise, return the default. Args: obj (HasProps) : the instance to get a value of this property for Returns: object Raises: RuntimeError If the |HasProps| instance has not yet been initialized, or if this descriptor is on a class that is not a |HasProps|.
[ "Internal", "implementation", "of", "instance", "attribute", "access", "for", "the", "BasicPropertyDescriptor", "getter", "." ]
python
train
willkg/everett
everett/sphinxext.py
https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/sphinxext.py#L216-L237
def add_target_and_index(self, name, sig, signode): """Add a target and index for this thing.""" targetname = '%s-%s' % (self.objtype, name) if targetname not in self.state.document.ids: signode['names'].append(targetname) signode['ids'].append(targetname) signode['first'] = (not self.names) self.state.document.note_explicit_target(signode) objects = self.env.domaindata['everett']['objects'] key = (self.objtype, name) if key in objects: self.state_machine.reporter.warning( 'duplicate description of %s %s, ' % (self.objtype, name) + 'other instance in ' + self.env.doc2path(objects[key]), line=self.lineno ) objects[key] = self.env.docname indextext = _('%s (component)') % name self.indexnode['entries'].append(('single', indextext, targetname, '', None))
[ "def", "add_target_and_index", "(", "self", ",", "name", ",", "sig", ",", "signode", ")", ":", "targetname", "=", "'%s-%s'", "%", "(", "self", ".", "objtype", ",", "name", ")", "if", "targetname", "not", "in", "self", ".", "state", ".", "document", ".", "ids", ":", "signode", "[", "'names'", "]", ".", "append", "(", "targetname", ")", "signode", "[", "'ids'", "]", ".", "append", "(", "targetname", ")", "signode", "[", "'first'", "]", "=", "(", "not", "self", ".", "names", ")", "self", ".", "state", ".", "document", ".", "note_explicit_target", "(", "signode", ")", "objects", "=", "self", ".", "env", ".", "domaindata", "[", "'everett'", "]", "[", "'objects'", "]", "key", "=", "(", "self", ".", "objtype", ",", "name", ")", "if", "key", "in", "objects", ":", "self", ".", "state_machine", ".", "reporter", ".", "warning", "(", "'duplicate description of %s %s, '", "%", "(", "self", ".", "objtype", ",", "name", ")", "+", "'other instance in '", "+", "self", ".", "env", ".", "doc2path", "(", "objects", "[", "key", "]", ")", ",", "line", "=", "self", ".", "lineno", ")", "objects", "[", "key", "]", "=", "self", ".", "env", ".", "docname", "indextext", "=", "_", "(", "'%s (component)'", ")", "%", "name", "self", ".", "indexnode", "[", "'entries'", "]", ".", "append", "(", "(", "'single'", ",", "indextext", ",", "targetname", ",", "''", ",", "None", ")", ")" ]
Add a target and index for this thing.
[ "Add", "a", "target", "and", "index", "for", "this", "thing", "." ]
python
train
laplacesdemon/django-social-friends-finder
social_friends_finder/models.py
https://github.com/laplacesdemon/django-social-friends-finder/blob/cad63349b19b3c301626c24420ace13c63f45ad7/social_friends_finder/models.py#L68-L90
def get_or_create_with_social_auth(self, social_auth): """ creates and saves model instance with collection of UserSocialAuth Raise: NotImplemetedError """ # Type check self.assert_user_is_social_auth_user(social_auth) # Fetch the record try: social_friend_list = self.filter(user_social_auth=social_auth).get() except: # if no record found, create a new one friend_ids = self.fetch_social_friend_ids(social_auth) social_friend_list = SocialFriendList() social_friend_list.friend_ids = friend_ids social_friend_list.user_social_auth = social_auth social_friend_list.save() return social_friend_list
[ "def", "get_or_create_with_social_auth", "(", "self", ",", "social_auth", ")", ":", "# Type check", "self", ".", "assert_user_is_social_auth_user", "(", "social_auth", ")", "# Fetch the record", "try", ":", "social_friend_list", "=", "self", ".", "filter", "(", "user_social_auth", "=", "social_auth", ")", ".", "get", "(", ")", "except", ":", "# if no record found, create a new one", "friend_ids", "=", "self", ".", "fetch_social_friend_ids", "(", "social_auth", ")", "social_friend_list", "=", "SocialFriendList", "(", ")", "social_friend_list", ".", "friend_ids", "=", "friend_ids", "social_friend_list", ".", "user_social_auth", "=", "social_auth", "social_friend_list", ".", "save", "(", ")", "return", "social_friend_list" ]
creates and saves model instance with collection of UserSocialAuth Raise: NotImplemetedError
[ "creates", "and", "saves", "model", "instance", "with", "collection", "of", "UserSocialAuth" ]
python
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L411-L455
def apply_trade(self, trade): """ 应用成交,并计算交易产生的现金变动。 开仓: delta_cash = -1 * margin = -1 * quantity * contract_multiplier * price * margin_rate 平仓: delta_cash = old_margin - margin + delta_realized_pnl = (sum of (cost_price * quantity) of closed trade) * contract_multiplier * margin_rate + delta_realized_pnl :param trade: rqalpha.model.trade.Trade :return: float """ # close_trade: delta_cash = old_margin - margin + delta_realized_pnl trade_quantity = trade.last_quantity if trade.side == SIDE.BUY: if trade.position_effect == POSITION_EFFECT.OPEN: self._buy_avg_open_price = (self._buy_avg_open_price * self.buy_quantity + trade_quantity * trade.last_price) / (self.buy_quantity + trade_quantity) self._buy_transaction_cost += trade.transaction_cost self._buy_today_holding_list.insert(0, (trade.last_price, trade_quantity)) return -1 * self._margin_of(trade_quantity, trade.last_price) else: old_margin = self.margin self._sell_transaction_cost += trade.transaction_cost delta_realized_pnl = self._close_holding(trade) self._sell_realized_pnl += delta_realized_pnl return old_margin - self.margin + delta_realized_pnl else: if trade.position_effect == POSITION_EFFECT.OPEN: self._sell_avg_open_price = (self._sell_avg_open_price * self.sell_quantity + trade_quantity * trade.last_price) / (self.sell_quantity + trade_quantity) self._sell_transaction_cost += trade.transaction_cost self._sell_today_holding_list.insert(0, (trade.last_price, trade_quantity)) return -1 * self._margin_of(trade_quantity, trade.last_price) else: old_margin = self.margin self._buy_transaction_cost += trade.transaction_cost delta_realized_pnl = self._close_holding(trade) self._buy_realized_pnl += delta_realized_pnl return old_margin - self.margin + delta_realized_pnl
[ "def", "apply_trade", "(", "self", ",", "trade", ")", ":", "# close_trade: delta_cash = old_margin - margin + delta_realized_pnl", "trade_quantity", "=", "trade", ".", "last_quantity", "if", "trade", ".", "side", "==", "SIDE", ".", "BUY", ":", "if", "trade", ".", "position_effect", "==", "POSITION_EFFECT", ".", "OPEN", ":", "self", ".", "_buy_avg_open_price", "=", "(", "self", ".", "_buy_avg_open_price", "*", "self", ".", "buy_quantity", "+", "trade_quantity", "*", "trade", ".", "last_price", ")", "/", "(", "self", ".", "buy_quantity", "+", "trade_quantity", ")", "self", ".", "_buy_transaction_cost", "+=", "trade", ".", "transaction_cost", "self", ".", "_buy_today_holding_list", ".", "insert", "(", "0", ",", "(", "trade", ".", "last_price", ",", "trade_quantity", ")", ")", "return", "-", "1", "*", "self", ".", "_margin_of", "(", "trade_quantity", ",", "trade", ".", "last_price", ")", "else", ":", "old_margin", "=", "self", ".", "margin", "self", ".", "_sell_transaction_cost", "+=", "trade", ".", "transaction_cost", "delta_realized_pnl", "=", "self", ".", "_close_holding", "(", "trade", ")", "self", ".", "_sell_realized_pnl", "+=", "delta_realized_pnl", "return", "old_margin", "-", "self", ".", "margin", "+", "delta_realized_pnl", "else", ":", "if", "trade", ".", "position_effect", "==", "POSITION_EFFECT", ".", "OPEN", ":", "self", ".", "_sell_avg_open_price", "=", "(", "self", ".", "_sell_avg_open_price", "*", "self", ".", "sell_quantity", "+", "trade_quantity", "*", "trade", ".", "last_price", ")", "/", "(", "self", ".", "sell_quantity", "+", "trade_quantity", ")", "self", ".", "_sell_transaction_cost", "+=", "trade", ".", "transaction_cost", "self", ".", "_sell_today_holding_list", ".", "insert", "(", "0", ",", "(", "trade", ".", "last_price", ",", "trade_quantity", ")", ")", "return", "-", "1", "*", "self", ".", "_margin_of", "(", "trade_quantity", ",", "trade", ".", "last_price", ")", "else", ":", "old_margin", "=", "self", ".", "margin", "self", ".", "_buy_transaction_cost", "+=", "trade", ".", "transaction_cost", "delta_realized_pnl", "=", "self", ".", "_close_holding", "(", "trade", ")", "self", ".", "_buy_realized_pnl", "+=", "delta_realized_pnl", "return", "old_margin", "-", "self", ".", "margin", "+", "delta_realized_pnl" ]
应用成交,并计算交易产生的现金变动。 开仓: delta_cash = -1 * margin = -1 * quantity * contract_multiplier * price * margin_rate 平仓: delta_cash = old_margin - margin + delta_realized_pnl = (sum of (cost_price * quantity) of closed trade) * contract_multiplier * margin_rate + delta_realized_pnl :param trade: rqalpha.model.trade.Trade :return: float
[ "应用成交,并计算交易产生的现金变动。" ]
python
train
psd-tools/packbits
src/packbits.py
https://github.com/psd-tools/packbits/blob/38909758005abfb891770996f321a630f3c9ece2/src/packbits.py#L29-L101
def encode(data): """ Encodes data using PackBits encoding. """ if len(data) == 0: return data if len(data) == 1: return b'\x00' + data data = bytearray(data) result = bytearray() buf = bytearray() pos = 0 repeat_count = 0 MAX_LENGTH = 127 # we can safely start with RAW as empty RAW sequences # are handled by finish_raw() state = 'RAW' def finish_raw(): if len(buf) == 0: return result.append(len(buf)-1) result.extend(buf) buf[:] = bytearray() def finish_rle(): result.append(256-(repeat_count - 1)) result.append(data[pos]) while pos < len(data)-1: current_byte = data[pos] if data[pos] == data[pos+1]: if state == 'RAW': # end of RAW data finish_raw() state = 'RLE' repeat_count = 1 elif state == 'RLE': if repeat_count == MAX_LENGTH: # restart the encoding finish_rle() repeat_count = 0 # move to next byte repeat_count += 1 else: if state == 'RLE': repeat_count += 1 finish_rle() state = 'RAW' repeat_count = 0 elif state == 'RAW': if len(buf) == MAX_LENGTH: # restart the encoding finish_raw() buf.append(current_byte) pos += 1 if state == 'RAW': buf.append(data[pos]) finish_raw() else: repeat_count += 1 finish_rle() return bytes(result)
[ "def", "encode", "(", "data", ")", ":", "if", "len", "(", "data", ")", "==", "0", ":", "return", "data", "if", "len", "(", "data", ")", "==", "1", ":", "return", "b'\\x00'", "+", "data", "data", "=", "bytearray", "(", "data", ")", "result", "=", "bytearray", "(", ")", "buf", "=", "bytearray", "(", ")", "pos", "=", "0", "repeat_count", "=", "0", "MAX_LENGTH", "=", "127", "# we can safely start with RAW as empty RAW sequences", "# are handled by finish_raw()", "state", "=", "'RAW'", "def", "finish_raw", "(", ")", ":", "if", "len", "(", "buf", ")", "==", "0", ":", "return", "result", ".", "append", "(", "len", "(", "buf", ")", "-", "1", ")", "result", ".", "extend", "(", "buf", ")", "buf", "[", ":", "]", "=", "bytearray", "(", ")", "def", "finish_rle", "(", ")", ":", "result", ".", "append", "(", "256", "-", "(", "repeat_count", "-", "1", ")", ")", "result", ".", "append", "(", "data", "[", "pos", "]", ")", "while", "pos", "<", "len", "(", "data", ")", "-", "1", ":", "current_byte", "=", "data", "[", "pos", "]", "if", "data", "[", "pos", "]", "==", "data", "[", "pos", "+", "1", "]", ":", "if", "state", "==", "'RAW'", ":", "# end of RAW data", "finish_raw", "(", ")", "state", "=", "'RLE'", "repeat_count", "=", "1", "elif", "state", "==", "'RLE'", ":", "if", "repeat_count", "==", "MAX_LENGTH", ":", "# restart the encoding", "finish_rle", "(", ")", "repeat_count", "=", "0", "# move to next byte", "repeat_count", "+=", "1", "else", ":", "if", "state", "==", "'RLE'", ":", "repeat_count", "+=", "1", "finish_rle", "(", ")", "state", "=", "'RAW'", "repeat_count", "=", "0", "elif", "state", "==", "'RAW'", ":", "if", "len", "(", "buf", ")", "==", "MAX_LENGTH", ":", "# restart the encoding", "finish_raw", "(", ")", "buf", ".", "append", "(", "current_byte", ")", "pos", "+=", "1", "if", "state", "==", "'RAW'", ":", "buf", ".", "append", "(", "data", "[", "pos", "]", ")", "finish_raw", "(", ")", "else", ":", "repeat_count", "+=", "1", "finish_rle", "(", ")", "return", "bytes", "(", "result", ")" ]
Encodes data using PackBits encoding.
[ "Encodes", "data", "using", "PackBits", "encoding", "." ]
python
test
odlgroup/odl
odl/operator/pspace_ops.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/pspace_ops.py#L810-L849
def derivative(self, x): """Derivative of the broadcast operator. Parameters ---------- x : `domain` element The point to take the derivative in Returns ------- adjoint : linear `BroadcastOperator` The derivative Examples -------- Example with an affine operator: >>> I = odl.IdentityOperator(odl.rn(3)) >>> residual_op = I - I.domain.element([1, 1, 1]) >>> op = BroadcastOperator(residual_op, 2 * residual_op) Calling operator offsets by ``[1, 1, 1]``: >>> x = [1, 2, 3] >>> op(x) ProductSpace(rn(3), 2).element([ [ 0., 1., 2.], [ 0., 2., 4.] ]) The derivative of this affine operator does not have an offset: >>> op.derivative(x)(x) ProductSpace(rn(3), 2).element([ [ 1., 2., 3.], [ 2., 4., 6.] ]) """ return BroadcastOperator(*[op.derivative(x) for op in self.operators])
[ "def", "derivative", "(", "self", ",", "x", ")", ":", "return", "BroadcastOperator", "(", "*", "[", "op", ".", "derivative", "(", "x", ")", "for", "op", "in", "self", ".", "operators", "]", ")" ]
Derivative of the broadcast operator. Parameters ---------- x : `domain` element The point to take the derivative in Returns ------- adjoint : linear `BroadcastOperator` The derivative Examples -------- Example with an affine operator: >>> I = odl.IdentityOperator(odl.rn(3)) >>> residual_op = I - I.domain.element([1, 1, 1]) >>> op = BroadcastOperator(residual_op, 2 * residual_op) Calling operator offsets by ``[1, 1, 1]``: >>> x = [1, 2, 3] >>> op(x) ProductSpace(rn(3), 2).element([ [ 0., 1., 2.], [ 0., 2., 4.] ]) The derivative of this affine operator does not have an offset: >>> op.derivative(x)(x) ProductSpace(rn(3), 2).element([ [ 1., 2., 3.], [ 2., 4., 6.] ])
[ "Derivative", "of", "the", "broadcast", "operator", "." ]
python
train
dmlc/gluon-nlp
src/gluonnlp/data/utils.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/utils.py#L92-L133
def count_tokens(tokens, to_lower=False, counter=None): r"""Counts tokens in the specified string. For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may look like:: (td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd) Parameters ---------- tokens : list of str A source list of tokens. to_lower : bool, default False Whether to convert the source source_str to the lower case. counter : Counter or None, default None The Counter instance to be updated with the counts of `tokens`. If None, return a new Counter instance counting tokens from `tokens`. Returns ------- The `counter` Counter instance after being updated with the token counts of `source_str`. If `counter` is None, return a new Counter instance counting tokens from `source_str`. Examples -------- >>> import re >>> source_str = ' Life is great ! \n life is good . \n' >>> source_str_tokens = filter(None, re.split(' |\n', source_str)) >>> gluonnlp.data.count_tokens(source_str_tokens) Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1}) """ if to_lower: tokens = [t.lower() for t in tokens] if counter is None: return Counter(tokens) else: counter.update(tokens) return counter
[ "def", "count_tokens", "(", "tokens", ",", "to_lower", "=", "False", ",", "counter", "=", "None", ")", ":", "if", "to_lower", ":", "tokens", "=", "[", "t", ".", "lower", "(", ")", "for", "t", "in", "tokens", "]", "if", "counter", "is", "None", ":", "return", "Counter", "(", "tokens", ")", "else", ":", "counter", ".", "update", "(", "tokens", ")", "return", "counter" ]
r"""Counts tokens in the specified string. For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may look like:: (td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd) Parameters ---------- tokens : list of str A source list of tokens. to_lower : bool, default False Whether to convert the source source_str to the lower case. counter : Counter or None, default None The Counter instance to be updated with the counts of `tokens`. If None, return a new Counter instance counting tokens from `tokens`. Returns ------- The `counter` Counter instance after being updated with the token counts of `source_str`. If `counter` is None, return a new Counter instance counting tokens from `source_str`. Examples -------- >>> import re >>> source_str = ' Life is great ! \n life is good . \n' >>> source_str_tokens = filter(None, re.split(' |\n', source_str)) >>> gluonnlp.data.count_tokens(source_str_tokens) Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1})
[ "r", "Counts", "tokens", "in", "the", "specified", "string", "." ]
python
train
J535D165/recordlinkage
recordlinkage/api.py
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/api.py#L42-L56
def block(self, *args, **kwargs): """Add a block index. Shortcut of :class:`recordlinkage.index.Block`:: from recordlinkage.index import Block indexer = recordlinkage.Index() indexer.add(Block()) """ indexer = Block(*args, **kwargs) self.add(indexer) return self
[ "def", "block", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "indexer", "=", "Block", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "add", "(", "indexer", ")", "return", "self" ]
Add a block index. Shortcut of :class:`recordlinkage.index.Block`:: from recordlinkage.index import Block indexer = recordlinkage.Index() indexer.add(Block())
[ "Add", "a", "block", "index", "." ]
python
train
pydata/xarray
xarray/core/dataarray.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataarray.py#L1034-L1080
def interp_like(self, other, method='linear', assume_sorted=False, kwargs={}): """Interpolate this object onto the coordinates of another object, filling out of range values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. method: string, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. assume_sorted: boolean, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs: dictionary, optional Additional keyword passed to scipy's interpolator. Returns ------- interpolated: xr.DataArray Another dataarray by interpolating this dataarray's data along the coordinates of the other object. Notes ----- scipy is required. If the dataarray has object-type coordinates, reindex is used for these coordinates instead of the interpolation. See Also -------- DataArray.interp DataArray.reindex_like """ if self.dtype.kind not in 'uifc': raise TypeError('interp only works for a numeric type array. ' 'Given {}.'.format(self.dtype)) ds = self._to_temp_dataset().interp_like( other, method=method, kwargs=kwargs, assume_sorted=assume_sorted) return self._from_temp_dataset(ds)
[ "def", "interp_like", "(", "self", ",", "other", ",", "method", "=", "'linear'", ",", "assume_sorted", "=", "False", ",", "kwargs", "=", "{", "}", ")", ":", "if", "self", ".", "dtype", ".", "kind", "not", "in", "'uifc'", ":", "raise", "TypeError", "(", "'interp only works for a numeric type array. '", "'Given {}.'", ".", "format", "(", "self", ".", "dtype", ")", ")", "ds", "=", "self", ".", "_to_temp_dataset", "(", ")", ".", "interp_like", "(", "other", ",", "method", "=", "method", ",", "kwargs", "=", "kwargs", ",", "assume_sorted", "=", "assume_sorted", ")", "return", "self", ".", "_from_temp_dataset", "(", "ds", ")" ]
Interpolate this object onto the coordinates of another object, filling out of range values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. method: string, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. assume_sorted: boolean, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs: dictionary, optional Additional keyword passed to scipy's interpolator. Returns ------- interpolated: xr.DataArray Another dataarray by interpolating this dataarray's data along the coordinates of the other object. Notes ----- scipy is required. If the dataarray has object-type coordinates, reindex is used for these coordinates instead of the interpolation. See Also -------- DataArray.interp DataArray.reindex_like
[ "Interpolate", "this", "object", "onto", "the", "coordinates", "of", "another", "object", "filling", "out", "of", "range", "values", "with", "NaN", "." ]
python
train
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L1726-L1739
def _disks_equal(disk1, disk2): ''' Test if two disk elements should be considered like the same device ''' target1 = disk1.find('target') target2 = disk2.find('target') source1 = ElementTree.tostring(disk1.find('source')) if disk1.find('source') is not None else None source2 = ElementTree.tostring(disk2.find('source')) if disk2.find('source') is not None else None return source1 == source2 and \ target1 is not None and target2 is not None and \ target1.get('bus') == target2.get('bus') and \ disk1.get('device', 'disk') == disk2.get('device', 'disk') and \ target1.get('dev') == target2.get('dev')
[ "def", "_disks_equal", "(", "disk1", ",", "disk2", ")", ":", "target1", "=", "disk1", ".", "find", "(", "'target'", ")", "target2", "=", "disk2", ".", "find", "(", "'target'", ")", "source1", "=", "ElementTree", ".", "tostring", "(", "disk1", ".", "find", "(", "'source'", ")", ")", "if", "disk1", ".", "find", "(", "'source'", ")", "is", "not", "None", "else", "None", "source2", "=", "ElementTree", ".", "tostring", "(", "disk2", ".", "find", "(", "'source'", ")", ")", "if", "disk2", ".", "find", "(", "'source'", ")", "is", "not", "None", "else", "None", "return", "source1", "==", "source2", "and", "target1", "is", "not", "None", "and", "target2", "is", "not", "None", "and", "target1", ".", "get", "(", "'bus'", ")", "==", "target2", ".", "get", "(", "'bus'", ")", "and", "disk1", ".", "get", "(", "'device'", ",", "'disk'", ")", "==", "disk2", ".", "get", "(", "'device'", ",", "'disk'", ")", "and", "target1", ".", "get", "(", "'dev'", ")", "==", "target2", ".", "get", "(", "'dev'", ")" ]
Test if two disk elements should be considered like the same device
[ "Test", "if", "two", "disk", "elements", "should", "be", "considered", "like", "the", "same", "device" ]
python
train
androguard/androguard
androguard/core/bytecodes/dvm.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L7987-L7998
def get_fields(self): """ Return all field objects :rtype: a list of :class:`EncodedField` objects """ if self.__cache_all_fields is None: self.__cache_all_fields = [] for i in self.get_classes(): for j in i.get_fields(): self.__cache_all_fields.append(j) return self.__cache_all_fields
[ "def", "get_fields", "(", "self", ")", ":", "if", "self", ".", "__cache_all_fields", "is", "None", ":", "self", ".", "__cache_all_fields", "=", "[", "]", "for", "i", "in", "self", ".", "get_classes", "(", ")", ":", "for", "j", "in", "i", ".", "get_fields", "(", ")", ":", "self", ".", "__cache_all_fields", ".", "append", "(", "j", ")", "return", "self", ".", "__cache_all_fields" ]
Return all field objects :rtype: a list of :class:`EncodedField` objects
[ "Return", "all", "field", "objects" ]
python
train
ejeschke/ginga
ginga/web/pgw/ipg.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/web/pgw/ipg.py#L168-L197
def build_gui(self, container): """ This is responsible for building the viewer's UI. It should place the UI in `container`. """ vbox = Widgets.VBox() vbox.set_border_width(2) vbox.set_spacing(1) w = Viewers.GingaViewerWidget(viewer=self) vbox.add_widget(w, stretch=1) # set up to capture cursor movement for reading out coordinates # coordinates reported in base 1 or 0? self.pixel_base = 1.0 self.readout = Widgets.Label("") vbox.add_widget(self.readout, stretch=0) #self.set_callback('none-move', self.motion_cb) self.set_callback('cursor-changed', self.motion_cb) # need to put this in an hbox with an expanding label or the # browser wants to resize the canvas, distorting it hbox = Widgets.HBox() hbox.add_widget(vbox, stretch=0) hbox.add_widget(Widgets.Label(''), stretch=1) container.set_widget(hbox)
[ "def", "build_gui", "(", "self", ",", "container", ")", ":", "vbox", "=", "Widgets", ".", "VBox", "(", ")", "vbox", ".", "set_border_width", "(", "2", ")", "vbox", ".", "set_spacing", "(", "1", ")", "w", "=", "Viewers", ".", "GingaViewerWidget", "(", "viewer", "=", "self", ")", "vbox", ".", "add_widget", "(", "w", ",", "stretch", "=", "1", ")", "# set up to capture cursor movement for reading out coordinates", "# coordinates reported in base 1 or 0?", "self", ".", "pixel_base", "=", "1.0", "self", ".", "readout", "=", "Widgets", ".", "Label", "(", "\"\"", ")", "vbox", ".", "add_widget", "(", "self", ".", "readout", ",", "stretch", "=", "0", ")", "#self.set_callback('none-move', self.motion_cb)", "self", ".", "set_callback", "(", "'cursor-changed'", ",", "self", ".", "motion_cb", ")", "# need to put this in an hbox with an expanding label or the", "# browser wants to resize the canvas, distorting it", "hbox", "=", "Widgets", ".", "HBox", "(", ")", "hbox", ".", "add_widget", "(", "vbox", ",", "stretch", "=", "0", ")", "hbox", ".", "add_widget", "(", "Widgets", ".", "Label", "(", "''", ")", ",", "stretch", "=", "1", ")", "container", ".", "set_widget", "(", "hbox", ")" ]
This is responsible for building the viewer's UI. It should place the UI in `container`.
[ "This", "is", "responsible", "for", "building", "the", "viewer", "s", "UI", ".", "It", "should", "place", "the", "UI", "in", "container", "." ]
python
train
dvdotsenko/jsonrpc.py
jsonrpcparts/serializers.py
https://github.com/dvdotsenko/jsonrpc.py/blob/19673edd77a9518ac5655bd407f6b93ffbb2cafc/jsonrpcparts/serializers.py#L206-L240
def parse_request(cls, jsonrpc_message): """We take apart JSON-RPC-formatted message as a string and decompose it into a dictionary object, emitting errors if parsing detects issues with the format of the message. :Returns: | [method_name, params, id] or [method_name, params] | params is a tuple/list | if id is missing, this is a Notification :Raises: RPCParseError, RPCInvalidRPC, RPCInvalidMethodParams """ try: data = cls.json_loads(jsonrpc_message) except ValueError, err: raise errors.RPCParseError("No valid JSON. (%s)" % str(err)) if not isinstance(data, dict): raise errors.RPCInvalidRPC("No valid RPC-package.") if "method" not in data: raise errors.RPCInvalidRPC("""Invalid Request, "method" is missing.""") if not isinstance(data["method"], (str, unicode)): raise errors.RPCInvalidRPC("""Invalid Request, "method" must be a string.""") if "id" not in data: data["id"] = None #be liberal if "params" not in data: data["params"] = () #be liberal if not isinstance(data["params"], (list, tuple)): raise errors.RPCInvalidRPC("""Invalid Request, "params" must be an array.""") if len(data) != 3: raise errors.RPCInvalidRPC("""Invalid Request, additional fields found.""") # notification / request if data["id"] is None: return data["method"], data["params"] #notification else: return data["method"], data["params"], data["id"]
[ "def", "parse_request", "(", "cls", ",", "jsonrpc_message", ")", ":", "try", ":", "data", "=", "cls", ".", "json_loads", "(", "jsonrpc_message", ")", "except", "ValueError", ",", "err", ":", "raise", "errors", ".", "RPCParseError", "(", "\"No valid JSON. (%s)\"", "%", "str", "(", "err", ")", ")", "if", "not", "isinstance", "(", "data", ",", "dict", ")", ":", "raise", "errors", ".", "RPCInvalidRPC", "(", "\"No valid RPC-package.\"", ")", "if", "\"method\"", "not", "in", "data", ":", "raise", "errors", ".", "RPCInvalidRPC", "(", "\"\"\"Invalid Request, \"method\" is missing.\"\"\"", ")", "if", "not", "isinstance", "(", "data", "[", "\"method\"", "]", ",", "(", "str", ",", "unicode", ")", ")", ":", "raise", "errors", ".", "RPCInvalidRPC", "(", "\"\"\"Invalid Request, \"method\" must be a string.\"\"\"", ")", "if", "\"id\"", "not", "in", "data", ":", "data", "[", "\"id\"", "]", "=", "None", "#be liberal", "if", "\"params\"", "not", "in", "data", ":", "data", "[", "\"params\"", "]", "=", "(", ")", "#be liberal", "if", "not", "isinstance", "(", "data", "[", "\"params\"", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "errors", ".", "RPCInvalidRPC", "(", "\"\"\"Invalid Request, \"params\" must be an array.\"\"\"", ")", "if", "len", "(", "data", ")", "!=", "3", ":", "raise", "errors", ".", "RPCInvalidRPC", "(", "\"\"\"Invalid Request, additional fields found.\"\"\"", ")", "# notification / request", "if", "data", "[", "\"id\"", "]", "is", "None", ":", "return", "data", "[", "\"method\"", "]", ",", "data", "[", "\"params\"", "]", "#notification", "else", ":", "return", "data", "[", "\"method\"", "]", ",", "data", "[", "\"params\"", "]", ",", "data", "[", "\"id\"", "]" ]
We take apart JSON-RPC-formatted message as a string and decompose it into a dictionary object, emitting errors if parsing detects issues with the format of the message. :Returns: | [method_name, params, id] or [method_name, params] | params is a tuple/list | if id is missing, this is a Notification :Raises: RPCParseError, RPCInvalidRPC, RPCInvalidMethodParams
[ "We", "take", "apart", "JSON", "-", "RPC", "-", "formatted", "message", "as", "a", "string", "and", "decompose", "it", "into", "a", "dictionary", "object", "emitting", "errors", "if", "parsing", "detects", "issues", "with", "the", "format", "of", "the", "message", "." ]
python
train
Becksteinlab/GromacsWrapper
gromacs/setup.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/setup.py#L535-L627
def solvate(struct='top/protein.pdb', top='top/system.top', distance=0.9, boxtype='dodecahedron', concentration=0, cation='NA', anion='CL', water='tip4p', solvent_name='SOL', with_membrane=False, ndx = 'main.ndx', mainselection = '"Protein"', dirname='solvate', **kwargs): """Put protein into box, add water, add counter-ions. Currently this really only supports solutes in water. If you need to embedd a protein in a membrane then you will require more sophisticated approaches. However, you *can* supply a protein already inserted in a bilayer. In this case you will probably want to set *distance* = ``None`` and also enable *with_membrane* = ``True`` (using extra big vdw radii for typical lipids). .. Note:: The defaults are suitable for solvating a globular protein in a fairly tight (increase *distance*!) dodecahedral box. :Arguments: *struct* : filename pdb or gro input structure *top* : filename Gromacs topology *distance* : float When solvating with water, make the box big enough so that at least *distance* nm water are between the solute *struct* and the box boundary. Set *boxtype* to ``None`` in order to use a box size in the input file (gro or pdb). *boxtype* or *bt*: string Any of the box types supported by :class:`~gromacs.tools.Editconf` (triclinic, cubic, dodecahedron, octahedron). Set the box dimensions either with *distance* or the *box* and *angle* keywords. If set to ``None`` it will ignore *distance* and use the box inside the *struct* file. *bt* overrides the value of *boxtype*. *box* List of three box lengths [A,B,C] that are used by :class:`~gromacs.tools.Editconf` in combination with *boxtype* (``bt`` in :program:`editconf`) and *angles*. Setting *box* overrides *distance*. *angles* List of three angles (only necessary for triclinic boxes). *concentration* : float Concentration of the free ions in mol/l. Note that counter ions are added in excess of this concentration. *cation* and *anion* : string Molecule names of the ions. This depends on the chosen force field. *water* : string Name of the water model; one of "spc", "spce", "tip3p", "tip4p". This should be appropriate for the chosen force field. If an alternative solvent is required, simply supply the path to a box with solvent molecules (used by :func:`~gromacs.genbox`'s *cs* argument) and also supply the molecule name via *solvent_name*. *solvent_name* Name of the molecules that make up the solvent (as set in the itp/top). Typically needs to be changed when using non-standard/non-water solvents. ["SOL"] *with_membrane* : bool ``True``: use special ``vdwradii.dat`` with 0.1 nm-increased radii on lipids. Default is ``False``. *ndx* : filename How to name the index file that is produced by this function. *mainselection* : string A string that is fed to :class:`~gromacs.tools.Make_ndx` and which should select the solute. *dirname* : directory name Name of the directory in which all files for the solvation stage are stored. *includes* List of additional directories to add to the mdp include path *kwargs* Additional arguments are passed on to :class:`~gromacs.tools.Editconf` or are interpreted as parameters to be changed in the mdp file. """ sol = solvate_sol(struct=struct, top=top, distance=distance, boxtype=boxtype, water=water, solvent_name=solvent_name, with_membrane=with_membrane, dirname=dirname, **kwargs) ion = solvate_ion(struct=sol['struct'], top=top, concentration=concentration, cation=cation, anion=anion, solvent_name=solvent_name, ndx=ndx, mainselection=mainselection, dirname=dirname, **kwargs) return ion
[ "def", "solvate", "(", "struct", "=", "'top/protein.pdb'", ",", "top", "=", "'top/system.top'", ",", "distance", "=", "0.9", ",", "boxtype", "=", "'dodecahedron'", ",", "concentration", "=", "0", ",", "cation", "=", "'NA'", ",", "anion", "=", "'CL'", ",", "water", "=", "'tip4p'", ",", "solvent_name", "=", "'SOL'", ",", "with_membrane", "=", "False", ",", "ndx", "=", "'main.ndx'", ",", "mainselection", "=", "'\"Protein\"'", ",", "dirname", "=", "'solvate'", ",", "*", "*", "kwargs", ")", ":", "sol", "=", "solvate_sol", "(", "struct", "=", "struct", ",", "top", "=", "top", ",", "distance", "=", "distance", ",", "boxtype", "=", "boxtype", ",", "water", "=", "water", ",", "solvent_name", "=", "solvent_name", ",", "with_membrane", "=", "with_membrane", ",", "dirname", "=", "dirname", ",", "*", "*", "kwargs", ")", "ion", "=", "solvate_ion", "(", "struct", "=", "sol", "[", "'struct'", "]", ",", "top", "=", "top", ",", "concentration", "=", "concentration", ",", "cation", "=", "cation", ",", "anion", "=", "anion", ",", "solvent_name", "=", "solvent_name", ",", "ndx", "=", "ndx", ",", "mainselection", "=", "mainselection", ",", "dirname", "=", "dirname", ",", "*", "*", "kwargs", ")", "return", "ion" ]
Put protein into box, add water, add counter-ions. Currently this really only supports solutes in water. If you need to embedd a protein in a membrane then you will require more sophisticated approaches. However, you *can* supply a protein already inserted in a bilayer. In this case you will probably want to set *distance* = ``None`` and also enable *with_membrane* = ``True`` (using extra big vdw radii for typical lipids). .. Note:: The defaults are suitable for solvating a globular protein in a fairly tight (increase *distance*!) dodecahedral box. :Arguments: *struct* : filename pdb or gro input structure *top* : filename Gromacs topology *distance* : float When solvating with water, make the box big enough so that at least *distance* nm water are between the solute *struct* and the box boundary. Set *boxtype* to ``None`` in order to use a box size in the input file (gro or pdb). *boxtype* or *bt*: string Any of the box types supported by :class:`~gromacs.tools.Editconf` (triclinic, cubic, dodecahedron, octahedron). Set the box dimensions either with *distance* or the *box* and *angle* keywords. If set to ``None`` it will ignore *distance* and use the box inside the *struct* file. *bt* overrides the value of *boxtype*. *box* List of three box lengths [A,B,C] that are used by :class:`~gromacs.tools.Editconf` in combination with *boxtype* (``bt`` in :program:`editconf`) and *angles*. Setting *box* overrides *distance*. *angles* List of three angles (only necessary for triclinic boxes). *concentration* : float Concentration of the free ions in mol/l. Note that counter ions are added in excess of this concentration. *cation* and *anion* : string Molecule names of the ions. This depends on the chosen force field. *water* : string Name of the water model; one of "spc", "spce", "tip3p", "tip4p". This should be appropriate for the chosen force field. If an alternative solvent is required, simply supply the path to a box with solvent molecules (used by :func:`~gromacs.genbox`'s *cs* argument) and also supply the molecule name via *solvent_name*. *solvent_name* Name of the molecules that make up the solvent (as set in the itp/top). Typically needs to be changed when using non-standard/non-water solvents. ["SOL"] *with_membrane* : bool ``True``: use special ``vdwradii.dat`` with 0.1 nm-increased radii on lipids. Default is ``False``. *ndx* : filename How to name the index file that is produced by this function. *mainselection* : string A string that is fed to :class:`~gromacs.tools.Make_ndx` and which should select the solute. *dirname* : directory name Name of the directory in which all files for the solvation stage are stored. *includes* List of additional directories to add to the mdp include path *kwargs* Additional arguments are passed on to :class:`~gromacs.tools.Editconf` or are interpreted as parameters to be changed in the mdp file.
[ "Put", "protein", "into", "box", "add", "water", "add", "counter", "-", "ions", "." ]
python
valid
google/transitfeed
transitfeed/shapelib.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/shapelib.py#L160-L167
def CrossProd(self, other): """ Returns the cross product of self and other. """ return Point( self.y * other.z - self.z * other.y, self.z * other.x - self.x * other.z, self.x * other.y - self.y * other.x)
[ "def", "CrossProd", "(", "self", ",", "other", ")", ":", "return", "Point", "(", "self", ".", "y", "*", "other", ".", "z", "-", "self", ".", "z", "*", "other", ".", "y", ",", "self", ".", "z", "*", "other", ".", "x", "-", "self", ".", "x", "*", "other", ".", "z", ",", "self", ".", "x", "*", "other", ".", "y", "-", "self", ".", "y", "*", "other", ".", "x", ")" ]
Returns the cross product of self and other.
[ "Returns", "the", "cross", "product", "of", "self", "and", "other", "." ]
python
train
dropbox/stone
stone/backends/obj_c_client.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/obj_c_client.py#L275-L333
def _generate_routes_m(self, namespace): """Generates implementation file for namespace object that has as methods all routes within the namespace.""" with self.block_m( fmt_routes_class(namespace.name, self.args.auth_type)): init_args = fmt_func_args_declaration([( 'client', 'id<{}>'.format(self.args.transport_client_name))]) with self.block_func( func='init', args=init_args, return_type='instancetype'): self.emit('self = [super init];') with self.block_init(): self.emit('_client = client;') self.emit() style_to_request = json.loads(self.args.z__style_to_request) for route in namespace.routes: if (route.attrs.get('auth') != self.args.auth_type and route.attrs.get('auth') != 'noauth'): continue route_type = route.attrs.get('style') client_args = json.loads(self.args.client_args) if route_type in client_args.keys(): for args_data in client_args[route_type]: task_type_key, type_data_dict = tuple(args_data) task_type_name = style_to_request[task_type_key] func_suffix = type_data_dict[0] extra_args = [ tuple(type_data[:-1]) for type_data in type_data_dict[1] ] if (is_struct_type(route.arg_data_type) and self._struct_has_defaults(route.arg_data_type)): route_args, _ = self._get_default_route_args( namespace, route) self._generate_route_m(route, namespace, route_args, extra_args, task_type_name, func_suffix) route_args, _ = self._get_route_args(namespace, route) self._generate_route_m(route, namespace, route_args, extra_args, task_type_name, func_suffix) else: task_type_name = style_to_request[route_type] if (is_struct_type(route.arg_data_type) and self._struct_has_defaults(route.arg_data_type)): route_args, _ = self._get_default_route_args( namespace, route) self._generate_route_m(route, namespace, route_args, [], task_type_name, '') route_args, _ = self._get_route_args(namespace, route) self._generate_route_m(route, namespace, route_args, [], task_type_name, '')
[ "def", "_generate_routes_m", "(", "self", ",", "namespace", ")", ":", "with", "self", ".", "block_m", "(", "fmt_routes_class", "(", "namespace", ".", "name", ",", "self", ".", "args", ".", "auth_type", ")", ")", ":", "init_args", "=", "fmt_func_args_declaration", "(", "[", "(", "'client'", ",", "'id<{}>'", ".", "format", "(", "self", ".", "args", ".", "transport_client_name", ")", ")", "]", ")", "with", "self", ".", "block_func", "(", "func", "=", "'init'", ",", "args", "=", "init_args", ",", "return_type", "=", "'instancetype'", ")", ":", "self", ".", "emit", "(", "'self = [super init];'", ")", "with", "self", ".", "block_init", "(", ")", ":", "self", ".", "emit", "(", "'_client = client;'", ")", "self", ".", "emit", "(", ")", "style_to_request", "=", "json", ".", "loads", "(", "self", ".", "args", ".", "z__style_to_request", ")", "for", "route", "in", "namespace", ".", "routes", ":", "if", "(", "route", ".", "attrs", ".", "get", "(", "'auth'", ")", "!=", "self", ".", "args", ".", "auth_type", "and", "route", ".", "attrs", ".", "get", "(", "'auth'", ")", "!=", "'noauth'", ")", ":", "continue", "route_type", "=", "route", ".", "attrs", ".", "get", "(", "'style'", ")", "client_args", "=", "json", ".", "loads", "(", "self", ".", "args", ".", "client_args", ")", "if", "route_type", "in", "client_args", ".", "keys", "(", ")", ":", "for", "args_data", "in", "client_args", "[", "route_type", "]", ":", "task_type_key", ",", "type_data_dict", "=", "tuple", "(", "args_data", ")", "task_type_name", "=", "style_to_request", "[", "task_type_key", "]", "func_suffix", "=", "type_data_dict", "[", "0", "]", "extra_args", "=", "[", "tuple", "(", "type_data", "[", ":", "-", "1", "]", ")", "for", "type_data", "in", "type_data_dict", "[", "1", "]", "]", "if", "(", "is_struct_type", "(", "route", ".", "arg_data_type", ")", "and", "self", ".", "_struct_has_defaults", "(", "route", ".", "arg_data_type", ")", ")", ":", "route_args", ",", "_", "=", "self", ".", "_get_default_route_args", "(", "namespace", ",", "route", ")", "self", ".", "_generate_route_m", "(", "route", ",", "namespace", ",", "route_args", ",", "extra_args", ",", "task_type_name", ",", "func_suffix", ")", "route_args", ",", "_", "=", "self", ".", "_get_route_args", "(", "namespace", ",", "route", ")", "self", ".", "_generate_route_m", "(", "route", ",", "namespace", ",", "route_args", ",", "extra_args", ",", "task_type_name", ",", "func_suffix", ")", "else", ":", "task_type_name", "=", "style_to_request", "[", "route_type", "]", "if", "(", "is_struct_type", "(", "route", ".", "arg_data_type", ")", "and", "self", ".", "_struct_has_defaults", "(", "route", ".", "arg_data_type", ")", ")", ":", "route_args", ",", "_", "=", "self", ".", "_get_default_route_args", "(", "namespace", ",", "route", ")", "self", ".", "_generate_route_m", "(", "route", ",", "namespace", ",", "route_args", ",", "[", "]", ",", "task_type_name", ",", "''", ")", "route_args", ",", "_", "=", "self", ".", "_get_route_args", "(", "namespace", ",", "route", ")", "self", ".", "_generate_route_m", "(", "route", ",", "namespace", ",", "route_args", ",", "[", "]", ",", "task_type_name", ",", "''", ")" ]
Generates implementation file for namespace object that has as methods all routes within the namespace.
[ "Generates", "implementation", "file", "for", "namespace", "object", "that", "has", "as", "methods", "all", "routes", "within", "the", "namespace", "." ]
python
train
phoebe-project/phoebe2
phoebe/backend/universe.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/universe.py#L1507-L1535
def _fill_loggs(self, mesh=None, ignore_effects=False): """ TODO: add documentation Calculate local surface gravity GMSunNom = 1.3271244e20 m**3 s**-2 RSunNom = 6.597e8 m """ logger.debug("{}._fill_loggs".format(self.component)) if mesh is None: mesh = self.mesh loggs = np.log10(mesh.normgrads.for_computations * g_rel_to_abs(self.masses[self.ind_self], self.sma)) if not ignore_effects: for feature in self.features: if feature.proto_coords: loggs = feature.process_loggs(loggs, mesh.roche_coords_for_computations, s=self.polar_direction_xyz, t=self.time) else: loggs = feature.process_loggs(loggs, mesh.coords_for_computations, s=self.polar_direction_xyz, t=self.time) mesh.update_columns(loggs=loggs) if not self.needs_recompute_instantaneous: logger.debug("{}._fill_loggs: copying loggs to standard mesh".format(self.component)) theta = 0.0 self._standard_meshes[theta].update_columns(loggs=loggs)
[ "def", "_fill_loggs", "(", "self", ",", "mesh", "=", "None", ",", "ignore_effects", "=", "False", ")", ":", "logger", ".", "debug", "(", "\"{}._fill_loggs\"", ".", "format", "(", "self", ".", "component", ")", ")", "if", "mesh", "is", "None", ":", "mesh", "=", "self", ".", "mesh", "loggs", "=", "np", ".", "log10", "(", "mesh", ".", "normgrads", ".", "for_computations", "*", "g_rel_to_abs", "(", "self", ".", "masses", "[", "self", ".", "ind_self", "]", ",", "self", ".", "sma", ")", ")", "if", "not", "ignore_effects", ":", "for", "feature", "in", "self", ".", "features", ":", "if", "feature", ".", "proto_coords", ":", "loggs", "=", "feature", ".", "process_loggs", "(", "loggs", ",", "mesh", ".", "roche_coords_for_computations", ",", "s", "=", "self", ".", "polar_direction_xyz", ",", "t", "=", "self", ".", "time", ")", "else", ":", "loggs", "=", "feature", ".", "process_loggs", "(", "loggs", ",", "mesh", ".", "coords_for_computations", ",", "s", "=", "self", ".", "polar_direction_xyz", ",", "t", "=", "self", ".", "time", ")", "mesh", ".", "update_columns", "(", "loggs", "=", "loggs", ")", "if", "not", "self", ".", "needs_recompute_instantaneous", ":", "logger", ".", "debug", "(", "\"{}._fill_loggs: copying loggs to standard mesh\"", ".", "format", "(", "self", ".", "component", ")", ")", "theta", "=", "0.0", "self", ".", "_standard_meshes", "[", "theta", "]", ".", "update_columns", "(", "loggs", "=", "loggs", ")" ]
TODO: add documentation Calculate local surface gravity GMSunNom = 1.3271244e20 m**3 s**-2 RSunNom = 6.597e8 m
[ "TODO", ":", "add", "documentation" ]
python
train
sdss/sdss_access
python/sdss_access/path/path.py
https://github.com/sdss/sdss_access/blob/76375bbf37d39d2e4ccbed90bdfa9a4298784470/python/sdss_access/path/path.py#L419-L451
def random(self, filetype, **kwargs): ''' Returns random number of the given type of file Parameters ---------- filetype : str File type parameter. num : int The number of files to return as_url: bool Boolean to return SAS urls refine: str Regular expression string to filter the list of files by before random selection Returns ------- random : list Random file selected from the expanded list of full paths on disk. ''' expanded_files = self.expand(filetype, **kwargs) isany = self.any(filetype, **kwargs) if isany: # get the desired number num = kwargs.get('num', 1) assert num <= len(expanded_files), 'Requested number must be larger the sample. Reduce your number.' return sample(expanded_files, num) else: return None
[ "def", "random", "(", "self", ",", "filetype", ",", "*", "*", "kwargs", ")", ":", "expanded_files", "=", "self", ".", "expand", "(", "filetype", ",", "*", "*", "kwargs", ")", "isany", "=", "self", ".", "any", "(", "filetype", ",", "*", "*", "kwargs", ")", "if", "isany", ":", "# get the desired number", "num", "=", "kwargs", ".", "get", "(", "'num'", ",", "1", ")", "assert", "num", "<=", "len", "(", "expanded_files", ")", ",", "'Requested number must be larger the sample. Reduce your number.'", "return", "sample", "(", "expanded_files", ",", "num", ")", "else", ":", "return", "None" ]
Returns random number of the given type of file Parameters ---------- filetype : str File type parameter. num : int The number of files to return as_url: bool Boolean to return SAS urls refine: str Regular expression string to filter the list of files by before random selection Returns ------- random : list Random file selected from the expanded list of full paths on disk.
[ "Returns", "random", "number", "of", "the", "given", "type", "of", "file" ]
python
train
ionelmc/python-tblib
src/tblib/__init__.py
https://github.com/ionelmc/python-tblib/blob/00be69aa97e1eb1c09282b1cdb72539c947d4515/src/tblib/__init__.py#L141-L160
def to_dict(self): """Convert a Traceback into a dictionary representation""" if self.tb_next is None: tb_next = None else: tb_next = self.tb_next.to_dict() code = { 'co_filename': self.tb_frame.f_code.co_filename, 'co_name': self.tb_frame.f_code.co_name, } frame = { 'f_globals': self.tb_frame.f_globals, 'f_code': code, } return { 'tb_frame': frame, 'tb_lineno': self.tb_lineno, 'tb_next': tb_next, }
[ "def", "to_dict", "(", "self", ")", ":", "if", "self", ".", "tb_next", "is", "None", ":", "tb_next", "=", "None", "else", ":", "tb_next", "=", "self", ".", "tb_next", ".", "to_dict", "(", ")", "code", "=", "{", "'co_filename'", ":", "self", ".", "tb_frame", ".", "f_code", ".", "co_filename", ",", "'co_name'", ":", "self", ".", "tb_frame", ".", "f_code", ".", "co_name", ",", "}", "frame", "=", "{", "'f_globals'", ":", "self", ".", "tb_frame", ".", "f_globals", ",", "'f_code'", ":", "code", ",", "}", "return", "{", "'tb_frame'", ":", "frame", ",", "'tb_lineno'", ":", "self", ".", "tb_lineno", ",", "'tb_next'", ":", "tb_next", ",", "}" ]
Convert a Traceback into a dictionary representation
[ "Convert", "a", "Traceback", "into", "a", "dictionary", "representation" ]
python
test
stephan-mclean/KickassTorrentsAPI
kat.py
https://github.com/stephan-mclean/KickassTorrentsAPI/blob/4d867a090c06ce95b9ed996b48092cb5bfe28bbd/kat.py#L98-L110
def print_details(self): """Print torrent details""" print("Title:", self.title) print("Category:", self.category) print("Page: ", self.page) print("Size: ", self.size) print("Files: ", self.files) print("Age: ", self.age) print("Seeds:", self.seeders) print("Leechers: ", self.leechers) print("Magnet: ", self.magnet) print("Download: ", self.download) print("Verified:", self.isVerified)
[ "def", "print_details", "(", "self", ")", ":", "print", "(", "\"Title:\"", ",", "self", ".", "title", ")", "print", "(", "\"Category:\"", ",", "self", ".", "category", ")", "print", "(", "\"Page: \"", ",", "self", ".", "page", ")", "print", "(", "\"Size: \"", ",", "self", ".", "size", ")", "print", "(", "\"Files: \"", ",", "self", ".", "files", ")", "print", "(", "\"Age: \"", ",", "self", ".", "age", ")", "print", "(", "\"Seeds:\"", ",", "self", ".", "seeders", ")", "print", "(", "\"Leechers: \"", ",", "self", ".", "leechers", ")", "print", "(", "\"Magnet: \"", ",", "self", ".", "magnet", ")", "print", "(", "\"Download: \"", ",", "self", ".", "download", ")", "print", "(", "\"Verified:\"", ",", "self", ".", "isVerified", ")" ]
Print torrent details
[ "Print", "torrent", "details" ]
python
train
saltstack/salt
salt/modules/zypperpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zypperpkg.py#L707-L726
def version_cmp(ver1, ver2, ignore_epoch=False, **kwargs): ''' .. versionadded:: 2015.5.4 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' return __salt__['lowpkg.version_cmp'](ver1, ver2, ignore_epoch=ignore_epoch)
[ "def", "version_cmp", "(", "ver1", ",", "ver2", ",", "ignore_epoch", "=", "False", ",", "*", "*", "kwargs", ")", ":", "return", "__salt__", "[", "'lowpkg.version_cmp'", "]", "(", "ver1", ",", "ver2", ",", "ignore_epoch", "=", "ignore_epoch", ")" ]
.. versionadded:: 2015.5.4 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002'
[ "..", "versionadded", "::", "2015", ".", "5", ".", "4" ]
python
train
softlayer/softlayer-python
SoftLayer/managers/network.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/network.py#L257-L283
def edit_rwhois(self, abuse_email=None, address1=None, address2=None, city=None, company_name=None, country=None, first_name=None, last_name=None, postal_code=None, private_residence=None, state=None): """Edit rwhois record.""" update = {} for key, value in [('abuseEmail', abuse_email), ('address1', address1), ('address2', address2), ('city', city), ('companyName', company_name), ('country', country), ('firstName', first_name), ('lastName', last_name), ('privateResidenceFlag', private_residence), ('state', state), ('postalCode', postal_code)]: if value is not None: update[key] = value # If there's anything to update, update it if update: rwhois = self.get_rwhois() return self.client['Network_Subnet_Rwhois_Data'].editObject( update, id=rwhois['id']) return True
[ "def", "edit_rwhois", "(", "self", ",", "abuse_email", "=", "None", ",", "address1", "=", "None", ",", "address2", "=", "None", ",", "city", "=", "None", ",", "company_name", "=", "None", ",", "country", "=", "None", ",", "first_name", "=", "None", ",", "last_name", "=", "None", ",", "postal_code", "=", "None", ",", "private_residence", "=", "None", ",", "state", "=", "None", ")", ":", "update", "=", "{", "}", "for", "key", ",", "value", "in", "[", "(", "'abuseEmail'", ",", "abuse_email", ")", ",", "(", "'address1'", ",", "address1", ")", ",", "(", "'address2'", ",", "address2", ")", ",", "(", "'city'", ",", "city", ")", ",", "(", "'companyName'", ",", "company_name", ")", ",", "(", "'country'", ",", "country", ")", ",", "(", "'firstName'", ",", "first_name", ")", ",", "(", "'lastName'", ",", "last_name", ")", ",", "(", "'privateResidenceFlag'", ",", "private_residence", ")", ",", "(", "'state'", ",", "state", ")", ",", "(", "'postalCode'", ",", "postal_code", ")", "]", ":", "if", "value", "is", "not", "None", ":", "update", "[", "key", "]", "=", "value", "# If there's anything to update, update it", "if", "update", ":", "rwhois", "=", "self", ".", "get_rwhois", "(", ")", "return", "self", ".", "client", "[", "'Network_Subnet_Rwhois_Data'", "]", ".", "editObject", "(", "update", ",", "id", "=", "rwhois", "[", "'id'", "]", ")", "return", "True" ]
Edit rwhois record.
[ "Edit", "rwhois", "record", "." ]
python
train
O365/python-o365
O365/utils/utils.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/utils.py#L275-L281
def _recipients_from_cloud(self, recipients, field=None): """ Transform a recipient from cloud data to object data """ recipients_data = [] for recipient in recipients: recipients_data.append( self._recipient_from_cloud(recipient, field=field)) return Recipients(recipients_data, parent=self, field=field)
[ "def", "_recipients_from_cloud", "(", "self", ",", "recipients", ",", "field", "=", "None", ")", ":", "recipients_data", "=", "[", "]", "for", "recipient", "in", "recipients", ":", "recipients_data", ".", "append", "(", "self", ".", "_recipient_from_cloud", "(", "recipient", ",", "field", "=", "field", ")", ")", "return", "Recipients", "(", "recipients_data", ",", "parent", "=", "self", ",", "field", "=", "field", ")" ]
Transform a recipient from cloud data to object data
[ "Transform", "a", "recipient", "from", "cloud", "data", "to", "object", "data" ]
python
train
FujiMakoto/IPS-Vagrant
ips_vagrant/downloaders/downloader.py
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/downloaders/downloader.py#L147-L179
def download(self): """ Download the latest IPS release @return: Download file path @rtype: str """ # Submit a download request and test the response self.log.debug('Submitting request: %s', self.request) response = self.session.request(*self.request, stream=True) if response.status_code != 200: self.log.error('Download request failed: %d', response.status_code) raise HtmlParserError # If we're re-downloading this version, delete the old file if self.filepath and os.path.isfile(self.filepath): self.log.info('Removing old version download') os.remove(self.filepath) # Make sure our versions data directory exists if not os.path.isdir(self.basedir): self.log.debug('Creating versions data directory') os.makedirs(self.basedir, 0o755) # Process our file download vslug = self.version.vstring.replace(' ', '-') self.filepath = self.filepath or os.path.join(self.basedir, '{v}.zip'.format(v=vslug)) with open(self.filepath, 'wb') as f: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() self.log.info('Version {v} successfully downloaded to {fn}'.format(v=self.version, fn=self.filepath))
[ "def", "download", "(", "self", ")", ":", "# Submit a download request and test the response", "self", ".", "log", ".", "debug", "(", "'Submitting request: %s'", ",", "self", ".", "request", ")", "response", "=", "self", ".", "session", ".", "request", "(", "*", "self", ".", "request", ",", "stream", "=", "True", ")", "if", "response", ".", "status_code", "!=", "200", ":", "self", ".", "log", ".", "error", "(", "'Download request failed: %d'", ",", "response", ".", "status_code", ")", "raise", "HtmlParserError", "# If we're re-downloading this version, delete the old file", "if", "self", ".", "filepath", "and", "os", ".", "path", ".", "isfile", "(", "self", ".", "filepath", ")", ":", "self", ".", "log", ".", "info", "(", "'Removing old version download'", ")", "os", ".", "remove", "(", "self", ".", "filepath", ")", "# Make sure our versions data directory exists", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "basedir", ")", ":", "self", ".", "log", ".", "debug", "(", "'Creating versions data directory'", ")", "os", ".", "makedirs", "(", "self", ".", "basedir", ",", "0o755", ")", "# Process our file download", "vslug", "=", "self", ".", "version", ".", "vstring", ".", "replace", "(", "' '", ",", "'-'", ")", "self", ".", "filepath", "=", "self", ".", "filepath", "or", "os", ".", "path", ".", "join", "(", "self", ".", "basedir", ",", "'{v}.zip'", ".", "format", "(", "v", "=", "vslug", ")", ")", "with", "open", "(", "self", ".", "filepath", ",", "'wb'", ")", "as", "f", ":", "for", "chunk", "in", "response", ".", "iter_content", "(", "chunk_size", "=", "1024", ")", ":", "if", "chunk", ":", "# filter out keep-alive new chunks", "f", ".", "write", "(", "chunk", ")", "f", ".", "flush", "(", ")", "self", ".", "log", ".", "info", "(", "'Version {v} successfully downloaded to {fn}'", ".", "format", "(", "v", "=", "self", ".", "version", ",", "fn", "=", "self", ".", "filepath", ")", ")" ]
Download the latest IPS release @return: Download file path @rtype: str
[ "Download", "the", "latest", "IPS", "release" ]
python
train
iotile/coretools
transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py#L228-L284
def connect_async(self, connection_id, connection_string, callback, retries=4, context=None): """Connect to a device by its connection_string This function asynchronously connects to a device by its BLE address + address type passed in the connection_string parameter and calls callback when finished. Callback is called on either success or failure with the signature: callback(connection_id: int, result: bool, value: None) The optional retries argument specifies how many times we should retry the connection if the connection fails due to an early disconnect. Early disconnects are expected ble failure modes in busy environments where the slave device misses the connection packet and the master therefore fails immediately. Retrying a few times should succeed in this case. Args: connection_string (string): A BLE address information in AA:BB:CC:DD:EE:FF,<address_type> format connection_id (int): A unique integer set by the caller for referring to this connection once created callback (callable): A callback function called when the connection has succeeded or failed retries (int): The number of attempts to connect to this device that can end in early disconnect before we give up and report that we could not connect. A retry count of 0 will mean that we fail as soon as we receive the first early disconnect. context (dict): If we are retrying to connect, passes the context to not considering it as a new connection. """ if context is None: # It is the first attempt to connect: begin a new connection context = { 'connection_id': connection_id, 'retries': retries, 'retry_connect': False, 'connection_string': connection_string, 'connect_time': time.time(), 'callback': callback } self.connections.begin_connection( connection_id, connection_string, callback, context, self.get_config('default_timeout') ) # Don't scan while we attempt to connect to this device if self.scanning: self.stop_scan() address, address_type = connection_string.split(',') # First, cancel any pending connection to prevent errors when starting a new one self.bable.cancel_connection(sync=False) # Send a connect request self.bable.connect( address=address, address_type=address_type, connection_interval=[7.5, 7.5], on_connected=[self._on_connection_finished, context], on_disconnected=[self._on_unexpected_disconnection, context] )
[ "def", "connect_async", "(", "self", ",", "connection_id", ",", "connection_string", ",", "callback", ",", "retries", "=", "4", ",", "context", "=", "None", ")", ":", "if", "context", "is", "None", ":", "# It is the first attempt to connect: begin a new connection", "context", "=", "{", "'connection_id'", ":", "connection_id", ",", "'retries'", ":", "retries", ",", "'retry_connect'", ":", "False", ",", "'connection_string'", ":", "connection_string", ",", "'connect_time'", ":", "time", ".", "time", "(", ")", ",", "'callback'", ":", "callback", "}", "self", ".", "connections", ".", "begin_connection", "(", "connection_id", ",", "connection_string", ",", "callback", ",", "context", ",", "self", ".", "get_config", "(", "'default_timeout'", ")", ")", "# Don't scan while we attempt to connect to this device", "if", "self", ".", "scanning", ":", "self", ".", "stop_scan", "(", ")", "address", ",", "address_type", "=", "connection_string", ".", "split", "(", "','", ")", "# First, cancel any pending connection to prevent errors when starting a new one", "self", ".", "bable", ".", "cancel_connection", "(", "sync", "=", "False", ")", "# Send a connect request", "self", ".", "bable", ".", "connect", "(", "address", "=", "address", ",", "address_type", "=", "address_type", ",", "connection_interval", "=", "[", "7.5", ",", "7.5", "]", ",", "on_connected", "=", "[", "self", ".", "_on_connection_finished", ",", "context", "]", ",", "on_disconnected", "=", "[", "self", ".", "_on_unexpected_disconnection", ",", "context", "]", ")" ]
Connect to a device by its connection_string This function asynchronously connects to a device by its BLE address + address type passed in the connection_string parameter and calls callback when finished. Callback is called on either success or failure with the signature: callback(connection_id: int, result: bool, value: None) The optional retries argument specifies how many times we should retry the connection if the connection fails due to an early disconnect. Early disconnects are expected ble failure modes in busy environments where the slave device misses the connection packet and the master therefore fails immediately. Retrying a few times should succeed in this case. Args: connection_string (string): A BLE address information in AA:BB:CC:DD:EE:FF,<address_type> format connection_id (int): A unique integer set by the caller for referring to this connection once created callback (callable): A callback function called when the connection has succeeded or failed retries (int): The number of attempts to connect to this device that can end in early disconnect before we give up and report that we could not connect. A retry count of 0 will mean that we fail as soon as we receive the first early disconnect. context (dict): If we are retrying to connect, passes the context to not considering it as a new connection.
[ "Connect", "to", "a", "device", "by", "its", "connection_string" ]
python
train
Opentrons/opentrons
api/src/opentrons/deck_calibration/dc_main.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/deck_calibration/dc_main.py#L365-L395
def validate( self, point: Tuple[float, float, float], point_num: int, pipette_mount) -> str: """ :param point: Expected values from mechanical drawings :param point_num: The current position attempting to be validated :param pipette: 'Z' for left mount or 'A' for right mount :return: """ _, _, cz = self._driver_to_deck_coords(self._position()) if self._current_mount != pipette_mount and cz < SAFE_HEIGHT: self.move_to_safe_height() self._current_mount = pipette_mount self._current_point = point_num _, _, cz = self._driver_to_deck_coords(self._position()) if cz < SAFE_HEIGHT: self.move_to_safe_height() tx, ty, tz = self._deck_to_driver_coords(point) if not feature_flags.use_protocol_api_v2(): self.hardware._driver.move({'X': tx, 'Y': ty}) self.hardware._driver.move({self._current_mount: tz}) else: pt = types.Point(x=tx, y=ty, z=tz) self.hardware.move_to(self._current_mount, pt) return 'moved to point {}'.format(point)
[ "def", "validate", "(", "self", ",", "point", ":", "Tuple", "[", "float", ",", "float", ",", "float", "]", ",", "point_num", ":", "int", ",", "pipette_mount", ")", "->", "str", ":", "_", ",", "_", ",", "cz", "=", "self", ".", "_driver_to_deck_coords", "(", "self", ".", "_position", "(", ")", ")", "if", "self", ".", "_current_mount", "!=", "pipette_mount", "and", "cz", "<", "SAFE_HEIGHT", ":", "self", ".", "move_to_safe_height", "(", ")", "self", ".", "_current_mount", "=", "pipette_mount", "self", ".", "_current_point", "=", "point_num", "_", ",", "_", ",", "cz", "=", "self", ".", "_driver_to_deck_coords", "(", "self", ".", "_position", "(", ")", ")", "if", "cz", "<", "SAFE_HEIGHT", ":", "self", ".", "move_to_safe_height", "(", ")", "tx", ",", "ty", ",", "tz", "=", "self", ".", "_deck_to_driver_coords", "(", "point", ")", "if", "not", "feature_flags", ".", "use_protocol_api_v2", "(", ")", ":", "self", ".", "hardware", ".", "_driver", ".", "move", "(", "{", "'X'", ":", "tx", ",", "'Y'", ":", "ty", "}", ")", "self", ".", "hardware", ".", "_driver", ".", "move", "(", "{", "self", ".", "_current_mount", ":", "tz", "}", ")", "else", ":", "pt", "=", "types", ".", "Point", "(", "x", "=", "tx", ",", "y", "=", "ty", ",", "z", "=", "tz", ")", "self", ".", "hardware", ".", "move_to", "(", "self", ".", "_current_mount", ",", "pt", ")", "return", "'moved to point {}'", ".", "format", "(", "point", ")" ]
:param point: Expected values from mechanical drawings :param point_num: The current position attempting to be validated :param pipette: 'Z' for left mount or 'A' for right mount :return:
[ ":", "param", "point", ":", "Expected", "values", "from", "mechanical", "drawings", ":", "param", "point_num", ":", "The", "current", "position", "attempting", "to", "be", "validated", ":", "param", "pipette", ":", "Z", "for", "left", "mount", "or", "A", "for", "right", "mount" ]
python
train
dwavesystems/dwave-system
dwave/embedding/polynomialembedder.py
https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/embedding/polynomialembedder.py#L1199-L1226
def random_processor(M, N, L, qubit_yield, num_evil=0): """A utility function that generates a random :math:`C_{M,N,L}` missing some percentage of its qubits. INPUTS: M,N,L: the chimera parameters qubit_yield: ratio (0 <= qubit_yield <= 1) of #{qubits}/(2*M*N*L) num_evil: number of broken in-cell couplers between working qubits OUTPUT: proc (:class:`processor`): a :class:`processor` instance with a random collection of qubits and couplers as specified """ # replacement for lambda in edge filter below that works with bot h def edge_filter(pq): # we have to unpack the (p,q) edge p, q = pq return q in qubits and p < q qubits = [(x, y, u, k) for x in range(M) for y in range(N) for u in [0, 1] for k in range(L)] nqubits = len(qubits) qubits = set(sample(qubits, int(nqubits * qubit_yield))) edges = ((p, q) for p in qubits for q in _chimera_neighbors(M, N, L, p)) edges = list(filter(edge_filter, edges)) possibly_evil_edges = [(p, q) for p, q in edges if p[:2] == q[:2]] num_evil = min(num_evil, len(possibly_evil_edges)) evil_edges = sample(possibly_evil_edges, num_evil) return processor(set(edges) - set(evil_edges), M=M, N=N, L=L, linear=False)
[ "def", "random_processor", "(", "M", ",", "N", ",", "L", ",", "qubit_yield", ",", "num_evil", "=", "0", ")", ":", "# replacement for lambda in edge filter below that works with bot h", "def", "edge_filter", "(", "pq", ")", ":", "# we have to unpack the (p,q) edge", "p", ",", "q", "=", "pq", "return", "q", "in", "qubits", "and", "p", "<", "q", "qubits", "=", "[", "(", "x", ",", "y", ",", "u", ",", "k", ")", "for", "x", "in", "range", "(", "M", ")", "for", "y", "in", "range", "(", "N", ")", "for", "u", "in", "[", "0", ",", "1", "]", "for", "k", "in", "range", "(", "L", ")", "]", "nqubits", "=", "len", "(", "qubits", ")", "qubits", "=", "set", "(", "sample", "(", "qubits", ",", "int", "(", "nqubits", "*", "qubit_yield", ")", ")", ")", "edges", "=", "(", "(", "p", ",", "q", ")", "for", "p", "in", "qubits", "for", "q", "in", "_chimera_neighbors", "(", "M", ",", "N", ",", "L", ",", "p", ")", ")", "edges", "=", "list", "(", "filter", "(", "edge_filter", ",", "edges", ")", ")", "possibly_evil_edges", "=", "[", "(", "p", ",", "q", ")", "for", "p", ",", "q", "in", "edges", "if", "p", "[", ":", "2", "]", "==", "q", "[", ":", "2", "]", "]", "num_evil", "=", "min", "(", "num_evil", ",", "len", "(", "possibly_evil_edges", ")", ")", "evil_edges", "=", "sample", "(", "possibly_evil_edges", ",", "num_evil", ")", "return", "processor", "(", "set", "(", "edges", ")", "-", "set", "(", "evil_edges", ")", ",", "M", "=", "M", ",", "N", "=", "N", ",", "L", "=", "L", ",", "linear", "=", "False", ")" ]
A utility function that generates a random :math:`C_{M,N,L}` missing some percentage of its qubits. INPUTS: M,N,L: the chimera parameters qubit_yield: ratio (0 <= qubit_yield <= 1) of #{qubits}/(2*M*N*L) num_evil: number of broken in-cell couplers between working qubits OUTPUT: proc (:class:`processor`): a :class:`processor` instance with a random collection of qubits and couplers as specified
[ "A", "utility", "function", "that", "generates", "a", "random", ":", "math", ":", "C_", "{", "M", "N", "L", "}", "missing", "some", "percentage", "of", "its", "qubits", "." ]
python
train
ska-sa/hypercube
hypercube/base_cube.py
https://github.com/ska-sa/hypercube/blob/6564a9e65ccd9ed7e7a71bd643f183e1ec645b29/hypercube/base_cube.py#L439-L497
def register_property(self, name, dtype, default, **kwargs): """ Registers a property with this Solver object .. code-block:: python cube.register_property("reference_frequency", np.float64, 1.4e9) Parameters ---------- name : str The name of this property. dtype : Numpy data type Numpy data type default : Should be convertable to dtype Default value for this value """ if name in self._properties: raise ValueError(('Property %s is already registered ' 'on this cube object.') % name) P = self._properties[name] = AttrDict(name=name, dtype=dtype, default=default) #if not hasattr(HyperCube, name): if not HyperCube.__dict__.has_key(name): # Create the descriptor for this property on the class instance setattr(HyperCube, name, PropertyDescriptor(record_key=name, default=default)) # Set the descriptor on this object instance setattr(self, name, default) # Should we create a setter for this property? setter = kwargs.get('setter_method', True) setter_name = hcu.setter_name(name) # Yes, create a default setter if isinstance(setter, types.BooleanType) and setter is True: def set(self, value): setattr(self,name,value) setter_method = types.MethodType(set, self) setattr(self, setter_name, setter_method) # Set up the docstring, using the supplied one # if it is present, otherwise generating a default setter_docstring = kwargs.get('setter_docstring', None) getattr(setter_method, '__func__').__doc__ = \ """ Sets property %s to value. """ % (name) \ if setter_docstring is None else setter_docstring elif isinstance(setter, types.MethodType): setattr(self, setter_name, setter) else: raise TypeError, ('setter keyword argument set', ' to an invalid type %s' % (type(setter))) return P
[ "def", "register_property", "(", "self", ",", "name", ",", "dtype", ",", "default", ",", "*", "*", "kwargs", ")", ":", "if", "name", "in", "self", ".", "_properties", ":", "raise", "ValueError", "(", "(", "'Property %s is already registered '", "'on this cube object.'", ")", "%", "name", ")", "P", "=", "self", ".", "_properties", "[", "name", "]", "=", "AttrDict", "(", "name", "=", "name", ",", "dtype", "=", "dtype", ",", "default", "=", "default", ")", "#if not hasattr(HyperCube, name):", "if", "not", "HyperCube", ".", "__dict__", ".", "has_key", "(", "name", ")", ":", "# Create the descriptor for this property on the class instance", "setattr", "(", "HyperCube", ",", "name", ",", "PropertyDescriptor", "(", "record_key", "=", "name", ",", "default", "=", "default", ")", ")", "# Set the descriptor on this object instance", "setattr", "(", "self", ",", "name", ",", "default", ")", "# Should we create a setter for this property?", "setter", "=", "kwargs", ".", "get", "(", "'setter_method'", ",", "True", ")", "setter_name", "=", "hcu", ".", "setter_name", "(", "name", ")", "# Yes, create a default setter", "if", "isinstance", "(", "setter", ",", "types", ".", "BooleanType", ")", "and", "setter", "is", "True", ":", "def", "set", "(", "self", ",", "value", ")", ":", "setattr", "(", "self", ",", "name", ",", "value", ")", "setter_method", "=", "types", ".", "MethodType", "(", "set", ",", "self", ")", "setattr", "(", "self", ",", "setter_name", ",", "setter_method", ")", "# Set up the docstring, using the supplied one", "# if it is present, otherwise generating a default", "setter_docstring", "=", "kwargs", ".", "get", "(", "'setter_docstring'", ",", "None", ")", "getattr", "(", "setter_method", ",", "'__func__'", ")", ".", "__doc__", "=", "\"\"\" Sets property %s to value. \"\"\"", "%", "(", "name", ")", "if", "setter_docstring", "is", "None", "else", "setter_docstring", "elif", "isinstance", "(", "setter", ",", "types", ".", "MethodType", ")", ":", "setattr", "(", "self", ",", "setter_name", ",", "setter", ")", "else", ":", "raise", "TypeError", ",", "(", "'setter keyword argument set'", ",", "' to an invalid type %s'", "%", "(", "type", "(", "setter", ")", ")", ")", "return", "P" ]
Registers a property with this Solver object .. code-block:: python cube.register_property("reference_frequency", np.float64, 1.4e9) Parameters ---------- name : str The name of this property. dtype : Numpy data type Numpy data type default : Should be convertable to dtype Default value for this value
[ "Registers", "a", "property", "with", "this", "Solver", "object" ]
python
train
CityOfZion/neo-python
neo/Core/State/AccountState.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/State/AccountState.py#L83-L90
def Size(self): """ Get the total size in bytes of the object. Returns: int: size. """ return super(AccountState, self).Size() + s.uint160 + s.uint8 + GetVarSize(self.Votes) + GetVarSize(len(self.Balances)) + (len(self.Balances) * (32 + 8))
[ "def", "Size", "(", "self", ")", ":", "return", "super", "(", "AccountState", ",", "self", ")", ".", "Size", "(", ")", "+", "s", ".", "uint160", "+", "s", ".", "uint8", "+", "GetVarSize", "(", "self", ".", "Votes", ")", "+", "GetVarSize", "(", "len", "(", "self", ".", "Balances", ")", ")", "+", "(", "len", "(", "self", ".", "Balances", ")", "*", "(", "32", "+", "8", ")", ")" ]
Get the total size in bytes of the object. Returns: int: size.
[ "Get", "the", "total", "size", "in", "bytes", "of", "the", "object", "." ]
python
train
thombashi/SimpleSQLite
simplesqlite/converter.py
https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/converter.py#L21-L47
def to_record(cls, attr_names, values): """ Convert values to a record to be inserted into a database. :param list attr_names: List of attributes for the converting record. :param values: Values to be converted. :type values: |dict|/|namedtuple|/|list|/|tuple| :raises ValueError: If the ``values`` is invalid. """ try: # from a namedtuple to a dict values = values._asdict() except AttributeError: pass try: # from a dictionary to a list return [cls.__to_sqlite_element(values.get(attr_name)) for attr_name in attr_names] except AttributeError: pass if isinstance(values, (tuple, list)): return [cls.__to_sqlite_element(value) for value in values] raise ValueError("cannot convert from {} to list".format(type(values)))
[ "def", "to_record", "(", "cls", ",", "attr_names", ",", "values", ")", ":", "try", ":", "# from a namedtuple to a dict", "values", "=", "values", ".", "_asdict", "(", ")", "except", "AttributeError", ":", "pass", "try", ":", "# from a dictionary to a list", "return", "[", "cls", ".", "__to_sqlite_element", "(", "values", ".", "get", "(", "attr_name", ")", ")", "for", "attr_name", "in", "attr_names", "]", "except", "AttributeError", ":", "pass", "if", "isinstance", "(", "values", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "[", "cls", ".", "__to_sqlite_element", "(", "value", ")", "for", "value", "in", "values", "]", "raise", "ValueError", "(", "\"cannot convert from {} to list\"", ".", "format", "(", "type", "(", "values", ")", ")", ")" ]
Convert values to a record to be inserted into a database. :param list attr_names: List of attributes for the converting record. :param values: Values to be converted. :type values: |dict|/|namedtuple|/|list|/|tuple| :raises ValueError: If the ``values`` is invalid.
[ "Convert", "values", "to", "a", "record", "to", "be", "inserted", "into", "a", "database", "." ]
python
train
pop/pageup
pageup/pageup.py
https://github.com/pop/pageup/blob/e78471d50517e1779e6e2a5ea961f2a2def7e5e8/pageup/pageup.py#L77-L86
def grab(filename, directory): """ Copy dist files from their installed path to cwd/directory/filename cwd is the current directory, directory is their custom site name dir, filename is the name of the example file being copied over. """ r = requests.get('https://raw.githubusercontent.com/ElijahCaine/pageup/master/pageup/data/'+filename) with open(path.join(directory,filename), 'wb') as f: f.write(r.content)
[ "def", "grab", "(", "filename", ",", "directory", ")", ":", "r", "=", "requests", ".", "get", "(", "'https://raw.githubusercontent.com/ElijahCaine/pageup/master/pageup/data/'", "+", "filename", ")", "with", "open", "(", "path", ".", "join", "(", "directory", ",", "filename", ")", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "r", ".", "content", ")" ]
Copy dist files from their installed path to cwd/directory/filename cwd is the current directory, directory is their custom site name dir, filename is the name of the example file being copied over.
[ "Copy", "dist", "files", "from", "their", "installed", "path", "to", "cwd", "/", "directory", "/", "filename", "cwd", "is", "the", "current", "directory", "directory", "is", "their", "custom", "site", "name", "dir", "filename", "is", "the", "name", "of", "the", "example", "file", "being", "copied", "over", "." ]
python
train
lord63/v2ex_daily_mission
v2ex_daily_mission/v2ex.py
https://github.com/lord63/v2ex_daily_mission/blob/499901dd540dca68c3889d88c21c2594f25f27ec/v2ex_daily_mission/v2ex.py#L88-L93
def get_last(self): """Get to know how long you have kept signing in.""" response = self.session.get(self.mission_url, verify=False) soup = BeautifulSoup(response.text, 'html.parser') last = soup.select('#Main div')[-1].text return last
[ "def", "get_last", "(", "self", ")", ":", "response", "=", "self", ".", "session", ".", "get", "(", "self", ".", "mission_url", ",", "verify", "=", "False", ")", "soup", "=", "BeautifulSoup", "(", "response", ".", "text", ",", "'html.parser'", ")", "last", "=", "soup", ".", "select", "(", "'#Main div'", ")", "[", "-", "1", "]", ".", "text", "return", "last" ]
Get to know how long you have kept signing in.
[ "Get", "to", "know", "how", "long", "you", "have", "kept", "signing", "in", "." ]
python
train
mongodb/mongo-python-driver
pymongo/pool.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/pool.py#L696-L710
def validate_session(self, client, session): """Validate this session before use with client. Raises error if this session is logged in as a different user or the client is not the one that created the session. """ if session: if session._client is not client: raise InvalidOperation( 'Can only use session with the MongoClient that' ' started it') if session._authset != self.authset: raise InvalidOperation( 'Cannot use session after authenticating with different' ' credentials')
[ "def", "validate_session", "(", "self", ",", "client", ",", "session", ")", ":", "if", "session", ":", "if", "session", ".", "_client", "is", "not", "client", ":", "raise", "InvalidOperation", "(", "'Can only use session with the MongoClient that'", "' started it'", ")", "if", "session", ".", "_authset", "!=", "self", ".", "authset", ":", "raise", "InvalidOperation", "(", "'Cannot use session after authenticating with different'", "' credentials'", ")" ]
Validate this session before use with client. Raises error if this session is logged in as a different user or the client is not the one that created the session.
[ "Validate", "this", "session", "before", "use", "with", "client", "." ]
python
train
brentp/cruzdb
cruzdb/models.py
https://github.com/brentp/cruzdb/blob/9068d46e25952f4a929dde0242beb31fa4c7e89a/cruzdb/models.py#L665-L709
def localize(self, *positions, **kwargs): """ convert global coordinate(s) to local taking introns into account and cds/tx-Start depending on cdna=True kwarg """ cdna = kwargs.get('cdna', False) # TODO: account for strand ?? add kwarg ?? # if it's to the CDNA, then it's based on the cdsStart start, end = (self.cdsStart, self.cdsEnd) if cdna else \ (self.start, self.end) introns = self.introns or None if cdna: if not self.is_coding: return ([None] * len(positions)) if len(positions) > 1 else None introns = self._introns(self.cds) or None if introns is None: local_ps = [p - start if (start <= p < end) else None for p in positions] return local_ps[0] if len(positions) == 1 else local_ps introns = [(s - start, e - start) for s, e in introns] positions = [p - start for p in positions] # now both introns and positions are local starts based on cds/tx-Start local_ps = [] l = end - start for original_p in positions: subtract = 0 p = original_p print(p, l, file=sys.stderr) if p < 0 or p >= l: # outside of transcript local_ps.append(None) continue for s, e in introns: # within intron if s <= p <= e: subtract = None break # otherwise, adjust for intron length. elif p >= e: subtract += (e - s) local_ps.append(p - subtract if subtract is not None else None) assert all(p is None or p >=0 for p in local_ps), (local_ps) return local_ps[0] if len(positions) == 1 else local_ps
[ "def", "localize", "(", "self", ",", "*", "positions", ",", "*", "*", "kwargs", ")", ":", "cdna", "=", "kwargs", ".", "get", "(", "'cdna'", ",", "False", ")", "# TODO: account for strand ?? add kwarg ??", "# if it's to the CDNA, then it's based on the cdsStart", "start", ",", "end", "=", "(", "self", ".", "cdsStart", ",", "self", ".", "cdsEnd", ")", "if", "cdna", "else", "(", "self", ".", "start", ",", "self", ".", "end", ")", "introns", "=", "self", ".", "introns", "or", "None", "if", "cdna", ":", "if", "not", "self", ".", "is_coding", ":", "return", "(", "[", "None", "]", "*", "len", "(", "positions", ")", ")", "if", "len", "(", "positions", ")", ">", "1", "else", "None", "introns", "=", "self", ".", "_introns", "(", "self", ".", "cds", ")", "or", "None", "if", "introns", "is", "None", ":", "local_ps", "=", "[", "p", "-", "start", "if", "(", "start", "<=", "p", "<", "end", ")", "else", "None", "for", "p", "in", "positions", "]", "return", "local_ps", "[", "0", "]", "if", "len", "(", "positions", ")", "==", "1", "else", "local_ps", "introns", "=", "[", "(", "s", "-", "start", ",", "e", "-", "start", ")", "for", "s", ",", "e", "in", "introns", "]", "positions", "=", "[", "p", "-", "start", "for", "p", "in", "positions", "]", "# now both introns and positions are local starts based on cds/tx-Start", "local_ps", "=", "[", "]", "l", "=", "end", "-", "start", "for", "original_p", "in", "positions", ":", "subtract", "=", "0", "p", "=", "original_p", "print", "(", "p", ",", "l", ",", "file", "=", "sys", ".", "stderr", ")", "if", "p", "<", "0", "or", "p", ">=", "l", ":", "# outside of transcript", "local_ps", ".", "append", "(", "None", ")", "continue", "for", "s", ",", "e", "in", "introns", ":", "# within intron", "if", "s", "<=", "p", "<=", "e", ":", "subtract", "=", "None", "break", "# otherwise, adjust for intron length.", "elif", "p", ">=", "e", ":", "subtract", "+=", "(", "e", "-", "s", ")", "local_ps", ".", "append", "(", "p", "-", "subtract", "if", "subtract", "is", "not", "None", "else", "None", ")", "assert", "all", "(", "p", "is", "None", "or", "p", ">=", "0", "for", "p", "in", "local_ps", ")", ",", "(", "local_ps", ")", "return", "local_ps", "[", "0", "]", "if", "len", "(", "positions", ")", "==", "1", "else", "local_ps" ]
convert global coordinate(s) to local taking introns into account and cds/tx-Start depending on cdna=True kwarg
[ "convert", "global", "coordinate", "(", "s", ")", "to", "local", "taking", "introns", "into", "account", "and", "cds", "/", "tx", "-", "Start", "depending", "on", "cdna", "=", "True", "kwarg" ]
python
train
simion/pip-upgrader
pip_upgrader/packages_status_detector.py
https://github.com/simion/pip-upgrader/blob/716adca65d9ed56d4d416f94ede8a8e4fa8d640a/pip_upgrader/packages_status_detector.py#L188-L227
def _parse_pypi_json_package_info(self, package_name, current_version, response): """ :type package_name: str :type current_version: version.Version :type response: requests.models.Response """ data = response.json() all_versions = [version.parse(vers) for vers in data['releases'].keys()] filtered_versions = [vers for vers in all_versions if not vers.is_prerelease and not vers.is_postrelease] if not filtered_versions: # pragma: nocover return False, 'error while parsing version' latest_version = max(filtered_versions) # even if user did not choose prerelease, if the package from requirements is pre/post release, use it if self._prerelease or current_version.is_postrelease or current_version.is_prerelease: prerelease_versions = [vers for vers in all_versions if vers.is_prerelease or vers.is_postrelease] if prerelease_versions: latest_version = max(prerelease_versions) try: try: latest_version_info = data['releases'][str(latest_version)][0] except KeyError: # pragma: nocover # non-RFC versions, get the latest from pypi response latest_version = version.parse(data['info']['version']) latest_version_info = data['releases'][str(latest_version)][0] except Exception: # pragma: nocover return False, 'error while parsing version' upload_time = latest_version_info['upload_time'].replace('T', ' ') return { 'name': package_name, 'current_version': current_version, 'latest_version': latest_version, 'upgrade_available': current_version < latest_version, 'upload_time': upload_time }, 'success'
[ "def", "_parse_pypi_json_package_info", "(", "self", ",", "package_name", ",", "current_version", ",", "response", ")", ":", "data", "=", "response", ".", "json", "(", ")", "all_versions", "=", "[", "version", ".", "parse", "(", "vers", ")", "for", "vers", "in", "data", "[", "'releases'", "]", ".", "keys", "(", ")", "]", "filtered_versions", "=", "[", "vers", "for", "vers", "in", "all_versions", "if", "not", "vers", ".", "is_prerelease", "and", "not", "vers", ".", "is_postrelease", "]", "if", "not", "filtered_versions", ":", "# pragma: nocover", "return", "False", ",", "'error while parsing version'", "latest_version", "=", "max", "(", "filtered_versions", ")", "# even if user did not choose prerelease, if the package from requirements is pre/post release, use it", "if", "self", ".", "_prerelease", "or", "current_version", ".", "is_postrelease", "or", "current_version", ".", "is_prerelease", ":", "prerelease_versions", "=", "[", "vers", "for", "vers", "in", "all_versions", "if", "vers", ".", "is_prerelease", "or", "vers", ".", "is_postrelease", "]", "if", "prerelease_versions", ":", "latest_version", "=", "max", "(", "prerelease_versions", ")", "try", ":", "try", ":", "latest_version_info", "=", "data", "[", "'releases'", "]", "[", "str", "(", "latest_version", ")", "]", "[", "0", "]", "except", "KeyError", ":", "# pragma: nocover", "# non-RFC versions, get the latest from pypi response", "latest_version", "=", "version", ".", "parse", "(", "data", "[", "'info'", "]", "[", "'version'", "]", ")", "latest_version_info", "=", "data", "[", "'releases'", "]", "[", "str", "(", "latest_version", ")", "]", "[", "0", "]", "except", "Exception", ":", "# pragma: nocover", "return", "False", ",", "'error while parsing version'", "upload_time", "=", "latest_version_info", "[", "'upload_time'", "]", ".", "replace", "(", "'T'", ",", "' '", ")", "return", "{", "'name'", ":", "package_name", ",", "'current_version'", ":", "current_version", ",", "'latest_version'", ":", "latest_version", ",", "'upgrade_available'", ":", "current_version", "<", "latest_version", ",", "'upload_time'", ":", "upload_time", "}", ",", "'success'" ]
:type package_name: str :type current_version: version.Version :type response: requests.models.Response
[ ":", "type", "package_name", ":", "str", ":", "type", "current_version", ":", "version", ".", "Version", ":", "type", "response", ":", "requests", ".", "models", ".", "Response" ]
python
test
Microsoft/nni
src/sdk/pynni/nni/networkmorphism_tuner/graph.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/graph.py#L438-L448
def _insert_new_layers(self, new_layers, start_node_id, end_node_id): """Insert the new_layers after the node with start_node_id.""" new_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) temp_output_id = new_node_id for layer in new_layers[:-1]: temp_output_id = self.add_layer(layer, temp_output_id) self._add_edge(new_layers[-1], temp_output_id, end_node_id) new_layers[-1].input = self.node_list[temp_output_id] new_layers[-1].output = self.node_list[end_node_id] self._redirect_edge(start_node_id, end_node_id, new_node_id)
[ "def", "_insert_new_layers", "(", "self", ",", "new_layers", ",", "start_node_id", ",", "end_node_id", ")", ":", "new_node_id", "=", "self", ".", "_add_node", "(", "deepcopy", "(", "self", ".", "node_list", "[", "end_node_id", "]", ")", ")", "temp_output_id", "=", "new_node_id", "for", "layer", "in", "new_layers", "[", ":", "-", "1", "]", ":", "temp_output_id", "=", "self", ".", "add_layer", "(", "layer", ",", "temp_output_id", ")", "self", ".", "_add_edge", "(", "new_layers", "[", "-", "1", "]", ",", "temp_output_id", ",", "end_node_id", ")", "new_layers", "[", "-", "1", "]", ".", "input", "=", "self", ".", "node_list", "[", "temp_output_id", "]", "new_layers", "[", "-", "1", "]", ".", "output", "=", "self", ".", "node_list", "[", "end_node_id", "]", "self", ".", "_redirect_edge", "(", "start_node_id", ",", "end_node_id", ",", "new_node_id", ")" ]
Insert the new_layers after the node with start_node_id.
[ "Insert", "the", "new_layers", "after", "the", "node", "with", "start_node_id", "." ]
python
train
timmahrt/ProMo
promo/duration_morph.py
https://github.com/timmahrt/ProMo/blob/99d9f5cc01ff328a62973c5a5da910cc905ae4d5/promo/duration_morph.py#L35-L87
def changeDuration(fromWavFN, durationParameters, stepList, outputName, outputMinPitch, outputMaxPitch, praatEXE): ''' Uses praat to morph duration in one file to duration in another Praat uses the PSOLA algorithm ''' rootPath = os.path.split(fromWavFN)[0] # Prep output directories outputPath = join(rootPath, "duration_resynthesized_wavs") utils.makeDir(outputPath) durationTierPath = join(rootPath, "duration_tiers") utils.makeDir(durationTierPath) fromWavDuration = audio_scripts.getSoundFileDuration(fromWavFN) durationParameters = copy.deepcopy(durationParameters) # Pad any gaps with values of 1 (no change in duration) # No need to stretch out any pauses at the beginning if durationParameters[0][0] != 0: tmpVar = (0, durationParameters[0][0] - PRAAT_TIME_DIFF, 1) durationParameters.insert(0, tmpVar) # Or the end if durationParameters[-1][1] < fromWavDuration: durationParameters.append((durationParameters[-1][1] + PRAAT_TIME_DIFF, fromWavDuration, 1)) # Create the praat script for doing duration manipulation for stepAmount in stepList: durationPointList = [] for start, end, ratio in durationParameters: percentChange = 1 + (ratio - 1) * stepAmount durationPointList.append((start, percentChange)) durationPointList.append((end, percentChange)) outputPrefix = "%s_%0.3g" % (outputName, stepAmount) durationTierFN = join(durationTierPath, "%s.DurationTier" % outputPrefix) outputWavFN = join(outputPath, "%s.wav" % outputPrefix) durationTier = dataio.PointObject2D(durationPointList, dataio.DURATION, 0, fromWavDuration) durationTier.save(durationTierFN) praat_scripts.resynthesizeDuration(praatEXE, fromWavFN, durationTierFN, outputWavFN, outputMinPitch, outputMaxPitch)
[ "def", "changeDuration", "(", "fromWavFN", ",", "durationParameters", ",", "stepList", ",", "outputName", ",", "outputMinPitch", ",", "outputMaxPitch", ",", "praatEXE", ")", ":", "rootPath", "=", "os", ".", "path", ".", "split", "(", "fromWavFN", ")", "[", "0", "]", "# Prep output directories", "outputPath", "=", "join", "(", "rootPath", ",", "\"duration_resynthesized_wavs\"", ")", "utils", ".", "makeDir", "(", "outputPath", ")", "durationTierPath", "=", "join", "(", "rootPath", ",", "\"duration_tiers\"", ")", "utils", ".", "makeDir", "(", "durationTierPath", ")", "fromWavDuration", "=", "audio_scripts", ".", "getSoundFileDuration", "(", "fromWavFN", ")", "durationParameters", "=", "copy", ".", "deepcopy", "(", "durationParameters", ")", "# Pad any gaps with values of 1 (no change in duration)", "# No need to stretch out any pauses at the beginning", "if", "durationParameters", "[", "0", "]", "[", "0", "]", "!=", "0", ":", "tmpVar", "=", "(", "0", ",", "durationParameters", "[", "0", "]", "[", "0", "]", "-", "PRAAT_TIME_DIFF", ",", "1", ")", "durationParameters", ".", "insert", "(", "0", ",", "tmpVar", ")", "# Or the end", "if", "durationParameters", "[", "-", "1", "]", "[", "1", "]", "<", "fromWavDuration", ":", "durationParameters", ".", "append", "(", "(", "durationParameters", "[", "-", "1", "]", "[", "1", "]", "+", "PRAAT_TIME_DIFF", ",", "fromWavDuration", ",", "1", ")", ")", "# Create the praat script for doing duration manipulation", "for", "stepAmount", "in", "stepList", ":", "durationPointList", "=", "[", "]", "for", "start", ",", "end", ",", "ratio", "in", "durationParameters", ":", "percentChange", "=", "1", "+", "(", "ratio", "-", "1", ")", "*", "stepAmount", "durationPointList", ".", "append", "(", "(", "start", ",", "percentChange", ")", ")", "durationPointList", ".", "append", "(", "(", "end", ",", "percentChange", ")", ")", "outputPrefix", "=", "\"%s_%0.3g\"", "%", "(", "outputName", ",", "stepAmount", ")", "durationTierFN", "=", "join", "(", "durationTierPath", ",", "\"%s.DurationTier\"", "%", "outputPrefix", ")", "outputWavFN", "=", "join", "(", "outputPath", ",", "\"%s.wav\"", "%", "outputPrefix", ")", "durationTier", "=", "dataio", ".", "PointObject2D", "(", "durationPointList", ",", "dataio", ".", "DURATION", ",", "0", ",", "fromWavDuration", ")", "durationTier", ".", "save", "(", "durationTierFN", ")", "praat_scripts", ".", "resynthesizeDuration", "(", "praatEXE", ",", "fromWavFN", ",", "durationTierFN", ",", "outputWavFN", ",", "outputMinPitch", ",", "outputMaxPitch", ")" ]
Uses praat to morph duration in one file to duration in another Praat uses the PSOLA algorithm
[ "Uses", "praat", "to", "morph", "duration", "in", "one", "file", "to", "duration", "in", "another" ]
python
train
HPAC/matchpy
matchpy/matching/syntactic.py
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L40-L42
def is_operation(term: Any) -> bool: """Return True iff the given term is a subclass of :class:`.Operation`.""" return isinstance(term, type) and issubclass(term, Operation)
[ "def", "is_operation", "(", "term", ":", "Any", ")", "->", "bool", ":", "return", "isinstance", "(", "term", ",", "type", ")", "and", "issubclass", "(", "term", ",", "Operation", ")" ]
Return True iff the given term is a subclass of :class:`.Operation`.
[ "Return", "True", "iff", "the", "given", "term", "is", "a", "subclass", "of", ":", "class", ":", ".", "Operation", "." ]
python
train
NetEaseGame/ATX
atx/strutils.py
https://github.com/NetEaseGame/ATX/blob/f4415c57b45cb0730e08899cbc92a2af1c047ffb/atx/strutils.py#L40-L52
def to_string(s, encoding='utf-8'): """ Accept unicode(py2) or bytes(py3) Returns: py2 type: str py3 type: str """ if six.PY2: return s.encode(encoding) if isinstance(s, bytes): return s.decode(encoding) return s
[ "def", "to_string", "(", "s", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "six", ".", "PY2", ":", "return", "s", ".", "encode", "(", "encoding", ")", "if", "isinstance", "(", "s", ",", "bytes", ")", ":", "return", "s", ".", "decode", "(", "encoding", ")", "return", "s" ]
Accept unicode(py2) or bytes(py3) Returns: py2 type: str py3 type: str
[ "Accept", "unicode", "(", "py2", ")", "or", "bytes", "(", "py3", ")" ]
python
train
tjguk/winshell
winshell.py
https://github.com/tjguk/winshell/blob/1509d211ab3403dd1cff6113e4e13462d6dec35b/winshell.py#L87-L89
def dumped(text, level, indent=2): """Put curly brackets round an indented text""" return indented("{\n%s\n}" % indented(text, level + 1, indent) or "None", level, indent) + "\n"
[ "def", "dumped", "(", "text", ",", "level", ",", "indent", "=", "2", ")", ":", "return", "indented", "(", "\"{\\n%s\\n}\"", "%", "indented", "(", "text", ",", "level", "+", "1", ",", "indent", ")", "or", "\"None\"", ",", "level", ",", "indent", ")", "+", "\"\\n\"" ]
Put curly brackets round an indented text
[ "Put", "curly", "brackets", "round", "an", "indented", "text" ]
python
train
Qiskit/qiskit-terra
qiskit/dagcircuit/dagcircuit.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/dagcircuit/dagcircuit.py#L641-L652
def depth(self): """Return the circuit depth. Returns: int: the circuit depth Raises: DAGCircuitError: if not a directed acyclic graph """ if not nx.is_directed_acyclic_graph(self._multi_graph): raise DAGCircuitError("not a DAG") depth = nx.dag_longest_path_length(self._multi_graph) - 1 return depth if depth != -1 else 0
[ "def", "depth", "(", "self", ")", ":", "if", "not", "nx", ".", "is_directed_acyclic_graph", "(", "self", ".", "_multi_graph", ")", ":", "raise", "DAGCircuitError", "(", "\"not a DAG\"", ")", "depth", "=", "nx", ".", "dag_longest_path_length", "(", "self", ".", "_multi_graph", ")", "-", "1", "return", "depth", "if", "depth", "!=", "-", "1", "else", "0" ]
Return the circuit depth. Returns: int: the circuit depth Raises: DAGCircuitError: if not a directed acyclic graph
[ "Return", "the", "circuit", "depth", ".", "Returns", ":", "int", ":", "the", "circuit", "depth", "Raises", ":", "DAGCircuitError", ":", "if", "not", "a", "directed", "acyclic", "graph" ]
python
test
MacHu-GWU/windtalker-project
windtalker/cipher.py
https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/cipher.py#L161-L198
def encrypt_dir(self, path, output_path=None, overwrite=False, stream=True, enable_verbose=True): """ Encrypt everything in a directory. :param path: path of the dir you need to encrypt :param output_path: encrypted dir output path :param overwrite: if True, then silently overwrite output file if exists :param stream: if it is a very big file, stream mode can avoid using too much memory :param enable_verbose: boolean, trigger on/off the help information """ path, output_path = files.process_dst_overwrite_args( src=path, dst=output_path, overwrite=overwrite, src_to_dst_func=files.get_encrpyted_path, ) self._show("--- Encrypt directory '%s' ---" % path, enable_verbose=enable_verbose) st = time.clock() for current_dir, _, file_list in os.walk(path): new_dir = current_dir.replace(path, output_path) if not os.path.exists(new_dir): # pragma: no cover os.mkdir(new_dir) for basename in file_list: old_path = os.path.join(current_dir, basename) new_path = os.path.join(new_dir, basename) self.encrypt_file(old_path, new_path, overwrite=overwrite, stream=stream, enable_verbose=enable_verbose) self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,), enable_verbose=enable_verbose) return output_path
[ "def", "encrypt_dir", "(", "self", ",", "path", ",", "output_path", "=", "None", ",", "overwrite", "=", "False", ",", "stream", "=", "True", ",", "enable_verbose", "=", "True", ")", ":", "path", ",", "output_path", "=", "files", ".", "process_dst_overwrite_args", "(", "src", "=", "path", ",", "dst", "=", "output_path", ",", "overwrite", "=", "overwrite", ",", "src_to_dst_func", "=", "files", ".", "get_encrpyted_path", ",", ")", "self", ".", "_show", "(", "\"--- Encrypt directory '%s' ---\"", "%", "path", ",", "enable_verbose", "=", "enable_verbose", ")", "st", "=", "time", ".", "clock", "(", ")", "for", "current_dir", ",", "_", ",", "file_list", "in", "os", ".", "walk", "(", "path", ")", ":", "new_dir", "=", "current_dir", ".", "replace", "(", "path", ",", "output_path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "new_dir", ")", ":", "# pragma: no cover", "os", ".", "mkdir", "(", "new_dir", ")", "for", "basename", "in", "file_list", ":", "old_path", "=", "os", ".", "path", ".", "join", "(", "current_dir", ",", "basename", ")", "new_path", "=", "os", ".", "path", ".", "join", "(", "new_dir", ",", "basename", ")", "self", ".", "encrypt_file", "(", "old_path", ",", "new_path", ",", "overwrite", "=", "overwrite", ",", "stream", "=", "stream", ",", "enable_verbose", "=", "enable_verbose", ")", "self", ".", "_show", "(", "\"Complete! Elapse %.6f seconds\"", "%", "(", "time", ".", "clock", "(", ")", "-", "st", ",", ")", ",", "enable_verbose", "=", "enable_verbose", ")", "return", "output_path" ]
Encrypt everything in a directory. :param path: path of the dir you need to encrypt :param output_path: encrypted dir output path :param overwrite: if True, then silently overwrite output file if exists :param stream: if it is a very big file, stream mode can avoid using too much memory :param enable_verbose: boolean, trigger on/off the help information
[ "Encrypt", "everything", "in", "a", "directory", "." ]
python
train
LionelAuroux/pyrser
pyrser/passes/topython.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/passes/topython.py#L14-L32
def __exit_scope(self) -> ast.stmt: """Create the appropriate scope exiting statement. The documentation only shows one level and always uses 'return False' in examples. 'raise AltFalse()' within a try. 'break' within a loop. 'return False' otherwise. """ if self.in_optional: return ast.Pass() if self.in_try: return ast.Raise( ast.Call(ast.Name('AltFalse', ast.Load()), [], [], None, None), None) if self.in_loop: return ast.Break() return ast.Return(ast.Name('False', ast.Load()))
[ "def", "__exit_scope", "(", "self", ")", "->", "ast", ".", "stmt", ":", "if", "self", ".", "in_optional", ":", "return", "ast", ".", "Pass", "(", ")", "if", "self", ".", "in_try", ":", "return", "ast", ".", "Raise", "(", "ast", ".", "Call", "(", "ast", ".", "Name", "(", "'AltFalse'", ",", "ast", ".", "Load", "(", ")", ")", ",", "[", "]", ",", "[", "]", ",", "None", ",", "None", ")", ",", "None", ")", "if", "self", ".", "in_loop", ":", "return", "ast", ".", "Break", "(", ")", "return", "ast", ".", "Return", "(", "ast", ".", "Name", "(", "'False'", ",", "ast", ".", "Load", "(", ")", ")", ")" ]
Create the appropriate scope exiting statement. The documentation only shows one level and always uses 'return False' in examples. 'raise AltFalse()' within a try. 'break' within a loop. 'return False' otherwise.
[ "Create", "the", "appropriate", "scope", "exiting", "statement", "." ]
python
test
google/grumpy
third_party/stdlib/difflib.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/difflib.py#L295-L319
def set_seq1(self, a): """Set the first sequence to be compared. The second sequence to be compared is not changed. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.set_seq1("bcde") >>> s.ratio() 1.0 >>> SequenceMatcher computes and caches detailed information about the second sequence, so if you want to compare one sequence S against many sequences, use .set_seq2(S) once and call .set_seq1(x) repeatedly for each of the other sequences. See also set_seqs() and set_seq2(). """ if a is self.a: return self.a = a self.matching_blocks = self.opcodes = None
[ "def", "set_seq1", "(", "self", ",", "a", ")", ":", "if", "a", "is", "self", ".", "a", ":", "return", "self", ".", "a", "=", "a", "self", ".", "matching_blocks", "=", "self", ".", "opcodes", "=", "None" ]
Set the first sequence to be compared. The second sequence to be compared is not changed. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.set_seq1("bcde") >>> s.ratio() 1.0 >>> SequenceMatcher computes and caches detailed information about the second sequence, so if you want to compare one sequence S against many sequences, use .set_seq2(S) once and call .set_seq1(x) repeatedly for each of the other sequences. See also set_seqs() and set_seq2().
[ "Set", "the", "first", "sequence", "to", "be", "compared", "." ]
python
valid
tensorflow/datasets
tensorflow_datasets/core/tf_compat.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L70-L89
def _patch_tf(tf): """Patch TF to maintain compatibility across versions.""" global TF_PATCH if TF_PATCH: return v_1_12 = distutils.version.LooseVersion("1.12.0") v_1_13 = distutils.version.LooseVersion("1.13.0") v_2 = distutils.version.LooseVersion("2.0.0") tf_version = distutils.version.LooseVersion(tf.__version__) if v_1_12 <= tf_version < v_1_13: # TODO(b/123930850): remove when 1.13 is stable. TF_PATCH = "tf1_12" _patch_for_tf1_12(tf) elif v_1_13 <= tf_version < v_2: TF_PATCH = "tf1_13" _patch_for_tf1_13(tf) else: TF_PATCH = "tf2" _patch_for_tf2(tf)
[ "def", "_patch_tf", "(", "tf", ")", ":", "global", "TF_PATCH", "if", "TF_PATCH", ":", "return", "v_1_12", "=", "distutils", ".", "version", ".", "LooseVersion", "(", "\"1.12.0\"", ")", "v_1_13", "=", "distutils", ".", "version", ".", "LooseVersion", "(", "\"1.13.0\"", ")", "v_2", "=", "distutils", ".", "version", ".", "LooseVersion", "(", "\"2.0.0\"", ")", "tf_version", "=", "distutils", ".", "version", ".", "LooseVersion", "(", "tf", ".", "__version__", ")", "if", "v_1_12", "<=", "tf_version", "<", "v_1_13", ":", "# TODO(b/123930850): remove when 1.13 is stable.", "TF_PATCH", "=", "\"tf1_12\"", "_patch_for_tf1_12", "(", "tf", ")", "elif", "v_1_13", "<=", "tf_version", "<", "v_2", ":", "TF_PATCH", "=", "\"tf1_13\"", "_patch_for_tf1_13", "(", "tf", ")", "else", ":", "TF_PATCH", "=", "\"tf2\"", "_patch_for_tf2", "(", "tf", ")" ]
Patch TF to maintain compatibility across versions.
[ "Patch", "TF", "to", "maintain", "compatibility", "across", "versions", "." ]
python
train
pywbem/pywbem
pywbem/_subscription_manager.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/_subscription_manager.py#L916-L1021
def add_subscriptions(self, server_id, filter_path, destination_paths=None, owned=True): # pylint: disable=line-too-long """ Add subscriptions to a WBEM server for a particular set of indications defined by an indication filter and for a particular set of WBEM listeners defined by the instance paths of their listener destinations, by creating indication subscription instances (of CIM class "CIM_IndicationSubscription") in the Interop namespace of that server. The specified indication filter may be owned, permanent or static. The specified listener destinations may be owned, permanent or static. When creating permanent subscriptions, the indication filter and the listener destinations must not be owned. Owned subscriptions are added or updated conditionally: If the subscription instance to be added is already registered with this subscription manager and has the same property values, it is not created or modified. If it has the same path but different property values, it is modified to get the desired property values. If an instance with this path does not exist yet (the normal case), it is created. Permanent subscriptions are created unconditionally, and it is up to the user to ensure that such an instance does not exist yet. Upon successful return of this method, the added subscriptions are active, so that the specified WBEM listeners may immediately receive indications. Parameters: server_id (:term:`string`): The server ID of the WBEM server, returned by :meth:`~pywbem.WBEMSubscriptionManager.add_server`. filter_path (:class:`~pywbem.CIMInstanceName`): Instance path of the indication filter instance in the WBEM server that specifies the indications to be sent. destination_paths (:class:`~pywbem.CIMInstanceName` or list of :class:`~pywbem.CIMInstanceName`): Instance paths of the listener destination instances in the WBEM server that specify the target WBEM listener. If `None`, subscriptions will be created for all owned listener destinations registered to this subscription manager. owned (:class:`py:bool`): Defines the ownership type of the created subscription instances: If `True`, they will be owned. Otherwise, they will be permanent. See :ref:`WBEMSubscriptionManager` for details about these ownership types. Returns: :class:`py:list` of :class:`~pywbem.CIMInstance`: The indication subscription instances created in the WBEM server. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`. ValueError: Incorrect input parameter values. """ # noqa: E501 # server_id is validated in _create_...() method. owned_destination_paths = [inst.path for inst in self._owned_destinations[server_id]] # Apply default if destination_paths is None: destination_paths = owned_destination_paths # If list, recursively call this function with each list item. if isinstance(destination_paths, list): sub_insts = [] for dest_path in destination_paths: new_sub_insts = self.add_subscriptions( server_id, filter_path, dest_path, owned) sub_insts.extend(new_sub_insts) return sub_insts # Here, the variable will be a single list item. dest_path = destination_paths owned_filter_paths = [inst.path for inst in self._owned_filters[server_id]] # Enforce that a permanent subscription is not created on an owned # filter or on an owned destination. if not owned: if filter_path in owned_filter_paths: raise ValueError( _format("Permanent subscription cannot be created on " "owned filter: {0!A}", filter_path)) if dest_path in owned_destination_paths: raise ValueError( _format("Permanent subscription cannot be created on " "owned listener destination: {0!A}", dest_path)) sub_inst = self._create_subscription(server_id, dest_path, filter_path, owned) return [sub_inst]
[ "def", "add_subscriptions", "(", "self", ",", "server_id", ",", "filter_path", ",", "destination_paths", "=", "None", ",", "owned", "=", "True", ")", ":", "# pylint: disable=line-too-long", "# noqa: E501", "# server_id is validated in _create_...() method.", "owned_destination_paths", "=", "[", "inst", ".", "path", "for", "inst", "in", "self", ".", "_owned_destinations", "[", "server_id", "]", "]", "# Apply default", "if", "destination_paths", "is", "None", ":", "destination_paths", "=", "owned_destination_paths", "# If list, recursively call this function with each list item.", "if", "isinstance", "(", "destination_paths", ",", "list", ")", ":", "sub_insts", "=", "[", "]", "for", "dest_path", "in", "destination_paths", ":", "new_sub_insts", "=", "self", ".", "add_subscriptions", "(", "server_id", ",", "filter_path", ",", "dest_path", ",", "owned", ")", "sub_insts", ".", "extend", "(", "new_sub_insts", ")", "return", "sub_insts", "# Here, the variable will be a single list item.", "dest_path", "=", "destination_paths", "owned_filter_paths", "=", "[", "inst", ".", "path", "for", "inst", "in", "self", ".", "_owned_filters", "[", "server_id", "]", "]", "# Enforce that a permanent subscription is not created on an owned", "# filter or on an owned destination.", "if", "not", "owned", ":", "if", "filter_path", "in", "owned_filter_paths", ":", "raise", "ValueError", "(", "_format", "(", "\"Permanent subscription cannot be created on \"", "\"owned filter: {0!A}\"", ",", "filter_path", ")", ")", "if", "dest_path", "in", "owned_destination_paths", ":", "raise", "ValueError", "(", "_format", "(", "\"Permanent subscription cannot be created on \"", "\"owned listener destination: {0!A}\"", ",", "dest_path", ")", ")", "sub_inst", "=", "self", ".", "_create_subscription", "(", "server_id", ",", "dest_path", ",", "filter_path", ",", "owned", ")", "return", "[", "sub_inst", "]" ]
Add subscriptions to a WBEM server for a particular set of indications defined by an indication filter and for a particular set of WBEM listeners defined by the instance paths of their listener destinations, by creating indication subscription instances (of CIM class "CIM_IndicationSubscription") in the Interop namespace of that server. The specified indication filter may be owned, permanent or static. The specified listener destinations may be owned, permanent or static. When creating permanent subscriptions, the indication filter and the listener destinations must not be owned. Owned subscriptions are added or updated conditionally: If the subscription instance to be added is already registered with this subscription manager and has the same property values, it is not created or modified. If it has the same path but different property values, it is modified to get the desired property values. If an instance with this path does not exist yet (the normal case), it is created. Permanent subscriptions are created unconditionally, and it is up to the user to ensure that such an instance does not exist yet. Upon successful return of this method, the added subscriptions are active, so that the specified WBEM listeners may immediately receive indications. Parameters: server_id (:term:`string`): The server ID of the WBEM server, returned by :meth:`~pywbem.WBEMSubscriptionManager.add_server`. filter_path (:class:`~pywbem.CIMInstanceName`): Instance path of the indication filter instance in the WBEM server that specifies the indications to be sent. destination_paths (:class:`~pywbem.CIMInstanceName` or list of :class:`~pywbem.CIMInstanceName`): Instance paths of the listener destination instances in the WBEM server that specify the target WBEM listener. If `None`, subscriptions will be created for all owned listener destinations registered to this subscription manager. owned (:class:`py:bool`): Defines the ownership type of the created subscription instances: If `True`, they will be owned. Otherwise, they will be permanent. See :ref:`WBEMSubscriptionManager` for details about these ownership types. Returns: :class:`py:list` of :class:`~pywbem.CIMInstance`: The indication subscription instances created in the WBEM server. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`. ValueError: Incorrect input parameter values.
[ "Add", "subscriptions", "to", "a", "WBEM", "server", "for", "a", "particular", "set", "of", "indications", "defined", "by", "an", "indication", "filter", "and", "for", "a", "particular", "set", "of", "WBEM", "listeners", "defined", "by", "the", "instance", "paths", "of", "their", "listener", "destinations", "by", "creating", "indication", "subscription", "instances", "(", "of", "CIM", "class", "CIM_IndicationSubscription", ")", "in", "the", "Interop", "namespace", "of", "that", "server", "." ]
python
train
pmacosta/pexdoc
pexdoc/exh.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/exh.py#L1065-L1070
def _get_ex_data(self): """Return hierarchical function name.""" func_id, func_name = self._get_callable_path() if self._full_cname: func_name = self.encode_call(func_name) return func_id, func_name
[ "def", "_get_ex_data", "(", "self", ")", ":", "func_id", ",", "func_name", "=", "self", ".", "_get_callable_path", "(", ")", "if", "self", ".", "_full_cname", ":", "func_name", "=", "self", ".", "encode_call", "(", "func_name", ")", "return", "func_id", ",", "func_name" ]
Return hierarchical function name.
[ "Return", "hierarchical", "function", "name", "." ]
python
train
raphaelvallat/pingouin
pingouin/bayesian.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/bayesian.py#L124-L192
def bayesfactor_pearson(r, n): """ Bayes Factor of a Pearson correlation. Parameters ---------- r : float Pearson correlation coefficient n : int Sample size Returns ------- bf : str Bayes Factor (BF10). The Bayes Factor quantifies the evidence in favour of the alternative hypothesis. Notes ----- Adapted from a Matlab code found at https://github.com/anne-urai/Tools/blob/master/stats/BayesFactors/corrbf.m If you would like to compute the Bayes Factor directly from the raw data instead of from the correlation coefficient, use the :py:func:`pingouin.corr` function. The JZS Bayes Factor is approximated using the formula described in ref [1]_: .. math:: BF_{10} = \\frac{\\sqrt{n/2}}{\\gamma(1/2)}* \\int_{0}^{\\infty}e((n-2)/2)* log(1+g)+(-(n-1)/2)log(1+(1-r^2)*g)+(-3/2)log(g)-n/2g where **n** is the sample size and **r** is the Pearson correlation coefficient. References ---------- .. [1] Wetzels, R., Wagenmakers, E.-J., 2012. A default Bayesian hypothesis test for correlations and partial correlations. Psychon. Bull. Rev. 19, 1057–1064. https://doi.org/10.3758/s13423-012-0295-x Examples -------- Bayes Factor of a Pearson correlation >>> from pingouin import bayesfactor_pearson >>> bf = bayesfactor_pearson(0.6, 20) >>> print("Bayes Factor: %s" % bf) Bayes Factor: 8.221 """ from scipy.special import gamma # Function to be integrated def fun(g, r, n): return np.exp(((n - 2) / 2) * np.log(1 + g) + (-(n - 1) / 2) * np.log(1 + (1 - r**2) * g) + (-3 / 2) * np.log(g) + - n / (2 * g)) # JZS Bayes factor calculation integr = quad(fun, 0, np.inf, args=(r, n))[0] bf10 = np.sqrt((n / 2)) / gamma(1 / 2) * integr return _format_bf(bf10)
[ "def", "bayesfactor_pearson", "(", "r", ",", "n", ")", ":", "from", "scipy", ".", "special", "import", "gamma", "# Function to be integrated", "def", "fun", "(", "g", ",", "r", ",", "n", ")", ":", "return", "np", ".", "exp", "(", "(", "(", "n", "-", "2", ")", "/", "2", ")", "*", "np", ".", "log", "(", "1", "+", "g", ")", "+", "(", "-", "(", "n", "-", "1", ")", "/", "2", ")", "*", "np", ".", "log", "(", "1", "+", "(", "1", "-", "r", "**", "2", ")", "*", "g", ")", "+", "(", "-", "3", "/", "2", ")", "*", "np", ".", "log", "(", "g", ")", "+", "-", "n", "/", "(", "2", "*", "g", ")", ")", "# JZS Bayes factor calculation", "integr", "=", "quad", "(", "fun", ",", "0", ",", "np", ".", "inf", ",", "args", "=", "(", "r", ",", "n", ")", ")", "[", "0", "]", "bf10", "=", "np", ".", "sqrt", "(", "(", "n", "/", "2", ")", ")", "/", "gamma", "(", "1", "/", "2", ")", "*", "integr", "return", "_format_bf", "(", "bf10", ")" ]
Bayes Factor of a Pearson correlation. Parameters ---------- r : float Pearson correlation coefficient n : int Sample size Returns ------- bf : str Bayes Factor (BF10). The Bayes Factor quantifies the evidence in favour of the alternative hypothesis. Notes ----- Adapted from a Matlab code found at https://github.com/anne-urai/Tools/blob/master/stats/BayesFactors/corrbf.m If you would like to compute the Bayes Factor directly from the raw data instead of from the correlation coefficient, use the :py:func:`pingouin.corr` function. The JZS Bayes Factor is approximated using the formula described in ref [1]_: .. math:: BF_{10} = \\frac{\\sqrt{n/2}}{\\gamma(1/2)}* \\int_{0}^{\\infty}e((n-2)/2)* log(1+g)+(-(n-1)/2)log(1+(1-r^2)*g)+(-3/2)log(g)-n/2g where **n** is the sample size and **r** is the Pearson correlation coefficient. References ---------- .. [1] Wetzels, R., Wagenmakers, E.-J., 2012. A default Bayesian hypothesis test for correlations and partial correlations. Psychon. Bull. Rev. 19, 1057–1064. https://doi.org/10.3758/s13423-012-0295-x Examples -------- Bayes Factor of a Pearson correlation >>> from pingouin import bayesfactor_pearson >>> bf = bayesfactor_pearson(0.6, 20) >>> print("Bayes Factor: %s" % bf) Bayes Factor: 8.221
[ "Bayes", "Factor", "of", "a", "Pearson", "correlation", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/gaussian_kappa.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/gaussian_kappa.py#L62-L80
def hessian(self, x, y, amp, sigma, center_x=0, center_y=0): """ returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy """ x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) sigma_x, sigma_y = sigma, sigma if isinstance(r, int) or isinstance(r, float): r = max(r, self.ds) else: r[r <= self.ds] = self.ds d_alpha_dr = -self.d_alpha_dr(r, amp, sigma_x, sigma_y) alpha = self.alpha_abs(r, amp, sigma) f_xx = -(d_alpha_dr/r + alpha/r**2) * x_**2/r + alpha/r f_yy = -(d_alpha_dr/r + alpha/r**2) * y_**2/r + alpha/r f_xy = -(d_alpha_dr/r + alpha/r**2) * x_*y_/r return f_xx, f_yy, f_xy
[ "def", "hessian", "(", "self", ",", "x", ",", "y", ",", "amp", ",", "sigma", ",", "center_x", "=", "0", ",", "center_y", "=", "0", ")", ":", "x_", "=", "x", "-", "center_x", "y_", "=", "y", "-", "center_y", "r", "=", "np", ".", "sqrt", "(", "x_", "**", "2", "+", "y_", "**", "2", ")", "sigma_x", ",", "sigma_y", "=", "sigma", ",", "sigma", "if", "isinstance", "(", "r", ",", "int", ")", "or", "isinstance", "(", "r", ",", "float", ")", ":", "r", "=", "max", "(", "r", ",", "self", ".", "ds", ")", "else", ":", "r", "[", "r", "<=", "self", ".", "ds", "]", "=", "self", ".", "ds", "d_alpha_dr", "=", "-", "self", ".", "d_alpha_dr", "(", "r", ",", "amp", ",", "sigma_x", ",", "sigma_y", ")", "alpha", "=", "self", ".", "alpha_abs", "(", "r", ",", "amp", ",", "sigma", ")", "f_xx", "=", "-", "(", "d_alpha_dr", "/", "r", "+", "alpha", "/", "r", "**", "2", ")", "*", "x_", "**", "2", "/", "r", "+", "alpha", "/", "r", "f_yy", "=", "-", "(", "d_alpha_dr", "/", "r", "+", "alpha", "/", "r", "**", "2", ")", "*", "y_", "**", "2", "/", "r", "+", "alpha", "/", "r", "f_xy", "=", "-", "(", "d_alpha_dr", "/", "r", "+", "alpha", "/", "r", "**", "2", ")", "*", "x_", "*", "y_", "/", "r", "return", "f_xx", ",", "f_yy", ",", "f_xy" ]
returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy
[ "returns", "Hessian", "matrix", "of", "function", "d^2f", "/", "dx^2", "d^f", "/", "dy^2", "d^2", "/", "dxdy" ]
python
train
saltstack/salt
salt/modules/elasticsearch.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/elasticsearch.py#L917-L937
def search_template_get(id, hosts=None, profile=None): ''' .. versionadded:: 2017.7.0 Obtain existing search template definition. id Template ID CLI example:: salt myminion elasticsearch.search_template_get mytemplate ''' es = _get_instance(hosts, profile) try: return es.get_template(id=id) except elasticsearch.NotFoundError: return None except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot obtain search template {0}, server returned code {1} with message {2}".format(id, e.status_code, e.error))
[ "def", "search_template_get", "(", "id", ",", "hosts", "=", "None", ",", "profile", "=", "None", ")", ":", "es", "=", "_get_instance", "(", "hosts", ",", "profile", ")", "try", ":", "return", "es", ".", "get_template", "(", "id", "=", "id", ")", "except", "elasticsearch", ".", "NotFoundError", ":", "return", "None", "except", "elasticsearch", ".", "TransportError", "as", "e", ":", "raise", "CommandExecutionError", "(", "\"Cannot obtain search template {0}, server returned code {1} with message {2}\"", ".", "format", "(", "id", ",", "e", ".", "status_code", ",", "e", ".", "error", ")", ")" ]
.. versionadded:: 2017.7.0 Obtain existing search template definition. id Template ID CLI example:: salt myminion elasticsearch.search_template_get mytemplate
[ "..", "versionadded", "::", "2017", ".", "7", ".", "0" ]
python
train