repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
nwilming/ocupy
ocupy/measures.py
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L199-L212
def nss_model(prediction, fm): """ wraps nss functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Fixations that define the actuals """ (r_y, r_x) = calc_resize_factor(prediction, fm.image_size) fix = ((np.array(fm.y-1)*r_y).astype(int), (np.array(fm.x-1)*r_x).astype(int)) return nss(prediction, fix)
[ "def", "nss_model", "(", "prediction", ",", "fm", ")", ":", "(", "r_y", ",", "r_x", ")", "=", "calc_resize_factor", "(", "prediction", ",", "fm", ".", "image_size", ")", "fix", "=", "(", "(", "np", ".", "array", "(", "fm", ".", "y", "-", "1", ")", "*", "r_y", ")", ".", "astype", "(", "int", ")", ",", "(", "np", ".", "array", "(", "fm", ".", "x", "-", "1", ")", "*", "r_x", ")", ".", "astype", "(", "int", ")", ")", "return", "nss", "(", "prediction", ",", "fix", ")" ]
wraps nss functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Fixations that define the actuals
[ "wraps", "nss", "functionality", "for", "model", "evaluation" ]
python
train
iotile/coretools
iotileemulate/iotile/emulate/virtual/peripheral_tile.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/virtual/peripheral_tile.py#L127-L140
def restore_state(self, state): """Restore the current state of this emulated object. Args: state (dict): A previously dumped state produced by dump_state. """ super(EmulatedPeripheralTile, self).restore_state(state) self.debug_mode = state.get('debug_mode', False) self.run_level = state.get('run_level', None) if state.get('app_started', False): self._hosted_app_running.set()
[ "def", "restore_state", "(", "self", ",", "state", ")", ":", "super", "(", "EmulatedPeripheralTile", ",", "self", ")", ".", "restore_state", "(", "state", ")", "self", ".", "debug_mode", "=", "state", ".", "get", "(", "'debug_mode'", ",", "False", ")", "self", ".", "run_level", "=", "state", ".", "get", "(", "'run_level'", ",", "None", ")", "if", "state", ".", "get", "(", "'app_started'", ",", "False", ")", ":", "self", ".", "_hosted_app_running", ".", "set", "(", ")" ]
Restore the current state of this emulated object. Args: state (dict): A previously dumped state produced by dump_state.
[ "Restore", "the", "current", "state", "of", "this", "emulated", "object", "." ]
python
train
googledatalab/pydatalab
google/datalab/storage/_object.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_object.py#L175-L188
def metadata(self): """Retrieves metadata about the object. Returns: An ObjectMetadata instance with information about this object. Raises: Exception if there was an error requesting the object's metadata. """ if self._info is None: try: self._info = self._api.objects_get(self._bucket, self._key) except Exception as e: raise e return ObjectMetadata(self._info) if self._info else None
[ "def", "metadata", "(", "self", ")", ":", "if", "self", ".", "_info", "is", "None", ":", "try", ":", "self", ".", "_info", "=", "self", ".", "_api", ".", "objects_get", "(", "self", ".", "_bucket", ",", "self", ".", "_key", ")", "except", "Exception", "as", "e", ":", "raise", "e", "return", "ObjectMetadata", "(", "self", ".", "_info", ")", "if", "self", ".", "_info", "else", "None" ]
Retrieves metadata about the object. Returns: An ObjectMetadata instance with information about this object. Raises: Exception if there was an error requesting the object's metadata.
[ "Retrieves", "metadata", "about", "the", "object", "." ]
python
train
Rapptz/discord.py
discord/ext/commands/core.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/ext/commands/core.py#L1430-L1453
def bot_has_any_role(*items): """Similar to :func:`.has_any_role` except checks if the bot itself has any of the roles listed. This check raises one of two special exceptions, :exc:`.BotMissingAnyRole` if the bot is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message. Both inherit from :exc:`.CheckFailure`. .. versionchanged:: 1.1.0 Raise :exc:`.BotMissingAnyRole` or :exc:`.NoPrivateMessage` instead of generic checkfailure """ def predicate(ctx): ch = ctx.channel if not isinstance(ch, discord.abc.GuildChannel): raise NoPrivateMessage() me = ch.guild.me getter = functools.partial(discord.utils.get, me.roles) if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items): return True raise BotMissingAnyRole(items) return check(predicate)
[ "def", "bot_has_any_role", "(", "*", "items", ")", ":", "def", "predicate", "(", "ctx", ")", ":", "ch", "=", "ctx", ".", "channel", "if", "not", "isinstance", "(", "ch", ",", "discord", ".", "abc", ".", "GuildChannel", ")", ":", "raise", "NoPrivateMessage", "(", ")", "me", "=", "ch", ".", "guild", ".", "me", "getter", "=", "functools", ".", "partial", "(", "discord", ".", "utils", ".", "get", ",", "me", ".", "roles", ")", "if", "any", "(", "getter", "(", "id", "=", "item", ")", "is", "not", "None", "if", "isinstance", "(", "item", ",", "int", ")", "else", "getter", "(", "name", "=", "item", ")", "is", "not", "None", "for", "item", "in", "items", ")", ":", "return", "True", "raise", "BotMissingAnyRole", "(", "items", ")", "return", "check", "(", "predicate", ")" ]
Similar to :func:`.has_any_role` except checks if the bot itself has any of the roles listed. This check raises one of two special exceptions, :exc:`.BotMissingAnyRole` if the bot is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message. Both inherit from :exc:`.CheckFailure`. .. versionchanged:: 1.1.0 Raise :exc:`.BotMissingAnyRole` or :exc:`.NoPrivateMessage` instead of generic checkfailure
[ "Similar", "to", ":", "func", ":", ".", "has_any_role", "except", "checks", "if", "the", "bot", "itself", "has", "any", "of", "the", "roles", "listed", "." ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/ssl/list.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/ssl/list.py#L24-L42
def cli(env, status, sortby): """List SSL certificates.""" manager = SoftLayer.SSLManager(env.client) certificates = manager.list_certs(status) table = formatting.Table(['id', 'common_name', 'days_until_expire', 'notes']) for certificate in certificates: table.add_row([ certificate['id'], certificate['commonName'], certificate['validityDays'], certificate.get('notes', formatting.blank()) ]) table.sortby = sortby env.fout(table)
[ "def", "cli", "(", "env", ",", "status", ",", "sortby", ")", ":", "manager", "=", "SoftLayer", ".", "SSLManager", "(", "env", ".", "client", ")", "certificates", "=", "manager", ".", "list_certs", "(", "status", ")", "table", "=", "formatting", ".", "Table", "(", "[", "'id'", ",", "'common_name'", ",", "'days_until_expire'", ",", "'notes'", "]", ")", "for", "certificate", "in", "certificates", ":", "table", ".", "add_row", "(", "[", "certificate", "[", "'id'", "]", ",", "certificate", "[", "'commonName'", "]", ",", "certificate", "[", "'validityDays'", "]", ",", "certificate", ".", "get", "(", "'notes'", ",", "formatting", ".", "blank", "(", ")", ")", "]", ")", "table", ".", "sortby", "=", "sortby", "env", ".", "fout", "(", "table", ")" ]
List SSL certificates.
[ "List", "SSL", "certificates", "." ]
python
train
sdispater/pendulum
pendulum/formatting/formatter.py
https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/formatting/formatter.py#L232-L261
def format(self, dt, fmt, locale=None): """ Formats a DateTime instance with a given format and locale. :param dt: The instance to format :type dt: pendulum.DateTime :param fmt: The format to use :type fmt: str :param locale: The locale to use :type locale: str or Locale or None :rtype: str """ if not locale: locale = pendulum.get_locale() locale = Locale.load(locale) result = self._FORMAT_RE.sub( lambda m: m.group(1) if m.group(1) else m.group(2) if m.group(2) else self._format_token(dt, m.group(3), locale), fmt, ) return decode(result)
[ "def", "format", "(", "self", ",", "dt", ",", "fmt", ",", "locale", "=", "None", ")", ":", "if", "not", "locale", ":", "locale", "=", "pendulum", ".", "get_locale", "(", ")", "locale", "=", "Locale", ".", "load", "(", "locale", ")", "result", "=", "self", ".", "_FORMAT_RE", ".", "sub", "(", "lambda", "m", ":", "m", ".", "group", "(", "1", ")", "if", "m", ".", "group", "(", "1", ")", "else", "m", ".", "group", "(", "2", ")", "if", "m", ".", "group", "(", "2", ")", "else", "self", ".", "_format_token", "(", "dt", ",", "m", ".", "group", "(", "3", ")", ",", "locale", ")", ",", "fmt", ",", ")", "return", "decode", "(", "result", ")" ]
Formats a DateTime instance with a given format and locale. :param dt: The instance to format :type dt: pendulum.DateTime :param fmt: The format to use :type fmt: str :param locale: The locale to use :type locale: str or Locale or None :rtype: str
[ "Formats", "a", "DateTime", "instance", "with", "a", "given", "format", "and", "locale", "." ]
python
train
quantmind/pulsar
pulsar/utils/pylib/websocket.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/pylib/websocket.py#L136-L138
def continuation(self, body=None, final=True): '''return a `continuation` :class:`Frame`.''' return self.encode(body, opcode=0, final=final)
[ "def", "continuation", "(", "self", ",", "body", "=", "None", ",", "final", "=", "True", ")", ":", "return", "self", ".", "encode", "(", "body", ",", "opcode", "=", "0", ",", "final", "=", "final", ")" ]
return a `continuation` :class:`Frame`.
[ "return", "a", "continuation", ":", "class", ":", "Frame", "." ]
python
train
LettError/MutatorMath
Lib/mutatorMath/objects/location.py
https://github.com/LettError/MutatorMath/blob/10318fc4e7c9cee9df6130826829baea3054a42b/Lib/mutatorMath/objects/location.py#L204-L238
def asString(self, strict=False): """ Return the location as a string. :: >>> l = Location(pop=1, snap=(-100.0, -200)) >>> l.asString() 'pop:1, snap:(-100.000,-200.000)' """ if len(self.keys())==0: return "origin" v = [] n = [] try: for name, value in self.asTuple(): s = '' if value is None: s = "None" elif type(value) == tuple or type(value) == list: s = "(%.3f,%.3f)"%(value[0], value[1]) elif int(value) == value: s = "%d"%(int(value)) else: s = "%.3f"%(value) if s != '': n.append("%s:%s"%(name, s)) return ", ".join(n) except TypeError: import traceback print("Location value error:", name, value) for key, value in self.items(): print("\t\tkey:", key) print("\t\tvalue:", value) traceback.print_exc() return "error"
[ "def", "asString", "(", "self", ",", "strict", "=", "False", ")", ":", "if", "len", "(", "self", ".", "keys", "(", ")", ")", "==", "0", ":", "return", "\"origin\"", "v", "=", "[", "]", "n", "=", "[", "]", "try", ":", "for", "name", ",", "value", "in", "self", ".", "asTuple", "(", ")", ":", "s", "=", "''", "if", "value", "is", "None", ":", "s", "=", "\"None\"", "elif", "type", "(", "value", ")", "==", "tuple", "or", "type", "(", "value", ")", "==", "list", ":", "s", "=", "\"(%.3f,%.3f)\"", "%", "(", "value", "[", "0", "]", ",", "value", "[", "1", "]", ")", "elif", "int", "(", "value", ")", "==", "value", ":", "s", "=", "\"%d\"", "%", "(", "int", "(", "value", ")", ")", "else", ":", "s", "=", "\"%.3f\"", "%", "(", "value", ")", "if", "s", "!=", "''", ":", "n", ".", "append", "(", "\"%s:%s\"", "%", "(", "name", ",", "s", ")", ")", "return", "\", \"", ".", "join", "(", "n", ")", "except", "TypeError", ":", "import", "traceback", "print", "(", "\"Location value error:\"", ",", "name", ",", "value", ")", "for", "key", ",", "value", "in", "self", ".", "items", "(", ")", ":", "print", "(", "\"\\t\\tkey:\"", ",", "key", ")", "print", "(", "\"\\t\\tvalue:\"", ",", "value", ")", "traceback", ".", "print_exc", "(", ")", "return", "\"error\"" ]
Return the location as a string. :: >>> l = Location(pop=1, snap=(-100.0, -200)) >>> l.asString() 'pop:1, snap:(-100.000,-200.000)'
[ "Return", "the", "location", "as", "a", "string", ".", "::", ">>>", "l", "=", "Location", "(", "pop", "=", "1", "snap", "=", "(", "-", "100", ".", "0", "-", "200", "))", ">>>", "l", ".", "asString", "()", "pop", ":", "1", "snap", ":", "(", "-", "100", ".", "000", "-", "200", ".", "000", ")" ]
python
train
fermiPy/fermipy
fermipy/config.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/config.py#L269-L291
def create(cls, configfile): """Create a configuration dictionary from a yaml config file. This function will first populate the dictionary with defaults taken from pre-defined configuration files. The configuration dictionary is then updated with the user-defined configuration file. Any settings defined by the user will take precedence over the default settings.""" # populate config dictionary with an initial set of values # config_logging = ConfigManager.load('logging.yaml') config = {} if config['fileio']['outdir'] is None: config['fileio']['outdir'] = os.path.abspath( os.path.dirname(configfile)) user_config = cls.load(configfile) config = utils.merge_dict(config, user_config, True) config['fileio']['outdir'] = os.path.abspath( config['fileio']['outdir']) return config
[ "def", "create", "(", "cls", ",", "configfile", ")", ":", "# populate config dictionary with an initial set of values", "# config_logging = ConfigManager.load('logging.yaml')", "config", "=", "{", "}", "if", "config", "[", "'fileio'", "]", "[", "'outdir'", "]", "is", "None", ":", "config", "[", "'fileio'", "]", "[", "'outdir'", "]", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "configfile", ")", ")", "user_config", "=", "cls", ".", "load", "(", "configfile", ")", "config", "=", "utils", ".", "merge_dict", "(", "config", ",", "user_config", ",", "True", ")", "config", "[", "'fileio'", "]", "[", "'outdir'", "]", "=", "os", ".", "path", ".", "abspath", "(", "config", "[", "'fileio'", "]", "[", "'outdir'", "]", ")", "return", "config" ]
Create a configuration dictionary from a yaml config file. This function will first populate the dictionary with defaults taken from pre-defined configuration files. The configuration dictionary is then updated with the user-defined configuration file. Any settings defined by the user will take precedence over the default settings.
[ "Create", "a", "configuration", "dictionary", "from", "a", "yaml", "config", "file", ".", "This", "function", "will", "first", "populate", "the", "dictionary", "with", "defaults", "taken", "from", "pre", "-", "defined", "configuration", "files", ".", "The", "configuration", "dictionary", "is", "then", "updated", "with", "the", "user", "-", "defined", "configuration", "file", ".", "Any", "settings", "defined", "by", "the", "user", "will", "take", "precedence", "over", "the", "default", "settings", "." ]
python
train
CivicSpleen/ambry
ambry/orm/dataset.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/dataset.py#L649-L680
def rows(self): """Return configuration in a form that can be used to reconstitute a Metadata object. Returns all of the rows for a dataset. This is distinct from get_config_value, which returns the value for the library. """ from ambry.orm import Config as SAConfig from sqlalchemy import or_ rows = [] configs = self.dataset.session\ .query(SAConfig)\ .filter(or_(SAConfig.group == 'config', SAConfig.group == 'process'), SAConfig.d_vid == self.dataset.vid)\ .all() for r in configs: parts = r.key.split('.', 3) if r.group == 'process': parts = ['process'] + parts cr = ((parts[0] if len(parts) > 0 else None, parts[1] if len(parts) > 1 else None, parts[2] if len(parts) > 2 else None ), r.value) rows.append(cr) return rows
[ "def", "rows", "(", "self", ")", ":", "from", "ambry", ".", "orm", "import", "Config", "as", "SAConfig", "from", "sqlalchemy", "import", "or_", "rows", "=", "[", "]", "configs", "=", "self", ".", "dataset", ".", "session", ".", "query", "(", "SAConfig", ")", ".", "filter", "(", "or_", "(", "SAConfig", ".", "group", "==", "'config'", ",", "SAConfig", ".", "group", "==", "'process'", ")", ",", "SAConfig", ".", "d_vid", "==", "self", ".", "dataset", ".", "vid", ")", ".", "all", "(", ")", "for", "r", "in", "configs", ":", "parts", "=", "r", ".", "key", ".", "split", "(", "'.'", ",", "3", ")", "if", "r", ".", "group", "==", "'process'", ":", "parts", "=", "[", "'process'", "]", "+", "parts", "cr", "=", "(", "(", "parts", "[", "0", "]", "if", "len", "(", "parts", ")", ">", "0", "else", "None", ",", "parts", "[", "1", "]", "if", "len", "(", "parts", ")", ">", "1", "else", "None", ",", "parts", "[", "2", "]", "if", "len", "(", "parts", ")", ">", "2", "else", "None", ")", ",", "r", ".", "value", ")", "rows", ".", "append", "(", "cr", ")", "return", "rows" ]
Return configuration in a form that can be used to reconstitute a Metadata object. Returns all of the rows for a dataset. This is distinct from get_config_value, which returns the value for the library.
[ "Return", "configuration", "in", "a", "form", "that", "can", "be", "used", "to", "reconstitute", "a", "Metadata", "object", ".", "Returns", "all", "of", "the", "rows", "for", "a", "dataset", "." ]
python
train
Iotic-Labs/py-IoticAgent
src/IoticAgent/Core/Client.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/Core/Client.py#L1324-L1356
def __dispatch_msg(self, message): """Verify the signature and update RequestEvents / perform callbacks Note messages with an invalid wrapper, invalid hash, invalid sequence number or unexpected clientRef will be sent to debug_bad callback. """ msg = self.__validate_decode_msg(message) if msg: msg, seqnum = msg else: self.__fire_callback(_CB_DEBUG_BAD, message.body, message.content_type) return if DEBUG_ENABLED: logger.debug(decode_rcvd_msg('decode_rcvd_msg', msg, seqnum)) self.__fire_callback(_CB_DEBUG_RCVD, msg) # no reference, or set by client (not container) if msg[M_TYPE] not in _RSP_CONTAINER_REF: # solicitied if msg[M_CLIENTREF]: if not self.__handle_known_solicited(msg): logger.debug('Ignoring response for unknown request %s of type %s', msg[M_CLIENTREF], msg[M_TYPE]) # unsolicitied else: self.__perform_unsolicited_callbacks(msg) # unsolicited but can have reference set by container elif msg[M_TYPE] == E_CONTROLREQ: self.__handle_controlreq(msg[M_PAYLOAD], msg[M_CLIENTREF]) else: logger.error('Unhandled unsolicited message of type %s', msg[M_TYPE])
[ "def", "__dispatch_msg", "(", "self", ",", "message", ")", ":", "msg", "=", "self", ".", "__validate_decode_msg", "(", "message", ")", "if", "msg", ":", "msg", ",", "seqnum", "=", "msg", "else", ":", "self", ".", "__fire_callback", "(", "_CB_DEBUG_BAD", ",", "message", ".", "body", ",", "message", ".", "content_type", ")", "return", "if", "DEBUG_ENABLED", ":", "logger", ".", "debug", "(", "decode_rcvd_msg", "(", "'decode_rcvd_msg'", ",", "msg", ",", "seqnum", ")", ")", "self", ".", "__fire_callback", "(", "_CB_DEBUG_RCVD", ",", "msg", ")", "# no reference, or set by client (not container)", "if", "msg", "[", "M_TYPE", "]", "not", "in", "_RSP_CONTAINER_REF", ":", "# solicitied", "if", "msg", "[", "M_CLIENTREF", "]", ":", "if", "not", "self", ".", "__handle_known_solicited", "(", "msg", ")", ":", "logger", ".", "debug", "(", "'Ignoring response for unknown request %s of type %s'", ",", "msg", "[", "M_CLIENTREF", "]", ",", "msg", "[", "M_TYPE", "]", ")", "# unsolicitied", "else", ":", "self", ".", "__perform_unsolicited_callbacks", "(", "msg", ")", "# unsolicited but can have reference set by container", "elif", "msg", "[", "M_TYPE", "]", "==", "E_CONTROLREQ", ":", "self", ".", "__handle_controlreq", "(", "msg", "[", "M_PAYLOAD", "]", ",", "msg", "[", "M_CLIENTREF", "]", ")", "else", ":", "logger", ".", "error", "(", "'Unhandled unsolicited message of type %s'", ",", "msg", "[", "M_TYPE", "]", ")" ]
Verify the signature and update RequestEvents / perform callbacks Note messages with an invalid wrapper, invalid hash, invalid sequence number or unexpected clientRef will be sent to debug_bad callback.
[ "Verify", "the", "signature", "and", "update", "RequestEvents", "/", "perform", "callbacks" ]
python
train
lltk/lltk
lltk/scraping.py
https://github.com/lltk/lltk/blob/d171de55c1b97695fddedf4b02401ae27bf1d634/lltk/scraping.py#L19-L29
def register(scraper): ''' Registers a scraper to make it available for the generic scraping interface. ''' global scrapers language = scraper('').language if not language: raise Exception('No language specified for your scraper.') if scrapers.has_key(language): scrapers[language].append(scraper) else: scrapers[language] = [scraper]
[ "def", "register", "(", "scraper", ")", ":", "global", "scrapers", "language", "=", "scraper", "(", "''", ")", ".", "language", "if", "not", "language", ":", "raise", "Exception", "(", "'No language specified for your scraper.'", ")", "if", "scrapers", ".", "has_key", "(", "language", ")", ":", "scrapers", "[", "language", "]", ".", "append", "(", "scraper", ")", "else", ":", "scrapers", "[", "language", "]", "=", "[", "scraper", "]" ]
Registers a scraper to make it available for the generic scraping interface.
[ "Registers", "a", "scraper", "to", "make", "it", "available", "for", "the", "generic", "scraping", "interface", "." ]
python
train
django-parler/django-parler
parler/models.py
https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/models.py#L445-L557
def _get_translated_model(self, language_code=None, use_fallback=False, auto_create=False, meta=None): """ Fetch the translated fields model. """ if self._parler_meta is None: raise ImproperlyConfigured("No translation is assigned to the current model!") if self._translations_cache is None: raise RuntimeError("Accessing translated fields before super.__init__() is not possible.") if not language_code: language_code = self._current_language if language_code is None: raise ValueError(get_null_language_error()) if meta is None: meta = self._parler_meta.root # work on base model by default local_cache = self._translations_cache[meta.model] # 1. fetch the object from the local cache try: object = local_cache[language_code] # If cached object indicates the language doesn't exist, need to query the fallback. if not is_missing(object): return object except KeyError: # 2. No cache, need to query # Check that this object already exists, would be pointless otherwise to check for a translation. if not self._state.adding and self.pk is not None: prefetch = self._get_prefetched_translations(meta=meta) if prefetch is not None: # 2.1, use prefetched data # If the object is not found in the prefetched data (which contains all translations), # it's pointless to check for memcached (2.2) or perform a single query (2.3) for object in prefetch: if object.language_code == language_code: local_cache[language_code] = object _cache_translation(object) # Store in memcached return object else: # 2.2, fetch from memcached object = get_cached_translation(self, language_code, related_name=meta.rel_name, use_fallback=use_fallback) if object is not None: # Track in local cache if object.language_code != language_code: local_cache[language_code] = MISSING # Set fallback marker local_cache[object.language_code] = object return object elif is_missing(local_cache.get(language_code, None)): # If get_cached_translation() explicitly set the "does not exist" marker, # there is no need to try a database query. pass else: # 2.3, fetch from database try: object = self._get_translated_queryset(meta).get(language_code=language_code) except meta.model.DoesNotExist: pass else: local_cache[language_code] = object _cache_translation(object) # Store in memcached return object # Not in cache, or default. # Not fetched from DB # 3. Auto create? if auto_create: # Auto create policy first (e.g. a __set__ call) kwargs = { 'language_code': language_code, } if self.pk: # ID might be None at this point, and Django does not allow that. kwargs['master'] = self object = meta.model(**kwargs) local_cache[language_code] = object # Not stored in memcached here yet, first fill + save it. return object # 4. Fallback? fallback_msg = None lang_dict = get_language_settings(language_code) if language_code not in local_cache: # Explicitly set a marker for the fact that this translation uses the fallback instead. # Avoid making that query again. local_cache[language_code] = MISSING # None value is the marker. if not self._state.adding or self.pk is not None: _cache_translation_needs_fallback(self, language_code, related_name=meta.rel_name) fallback_choices = [lang_dict['code']] + list(lang_dict['fallbacks']) if use_fallback and fallback_choices: # Jump to fallback language, return directly. # Don't cache under this language_code for fallback_lang in fallback_choices: if fallback_lang == language_code: # Skip the current language, could also be fallback 1 of 2 choices continue try: return self._get_translated_model(fallback_lang, use_fallback=False, auto_create=auto_create, meta=meta) except meta.model.DoesNotExist: pass fallback_msg = " (tried fallbacks {0})".format(', '.join(lang_dict['fallbacks'])) # None of the above, bail out! raise meta.model.DoesNotExist( "{0} does not have a translation for the current language!\n" "{0} ID #{1}, language={2}{3}".format(self._meta.verbose_name, self.pk, language_code, fallback_msg or '' ))
[ "def", "_get_translated_model", "(", "self", ",", "language_code", "=", "None", ",", "use_fallback", "=", "False", ",", "auto_create", "=", "False", ",", "meta", "=", "None", ")", ":", "if", "self", ".", "_parler_meta", "is", "None", ":", "raise", "ImproperlyConfigured", "(", "\"No translation is assigned to the current model!\"", ")", "if", "self", ".", "_translations_cache", "is", "None", ":", "raise", "RuntimeError", "(", "\"Accessing translated fields before super.__init__() is not possible.\"", ")", "if", "not", "language_code", ":", "language_code", "=", "self", ".", "_current_language", "if", "language_code", "is", "None", ":", "raise", "ValueError", "(", "get_null_language_error", "(", ")", ")", "if", "meta", "is", "None", ":", "meta", "=", "self", ".", "_parler_meta", ".", "root", "# work on base model by default", "local_cache", "=", "self", ".", "_translations_cache", "[", "meta", ".", "model", "]", "# 1. fetch the object from the local cache", "try", ":", "object", "=", "local_cache", "[", "language_code", "]", "# If cached object indicates the language doesn't exist, need to query the fallback.", "if", "not", "is_missing", "(", "object", ")", ":", "return", "object", "except", "KeyError", ":", "# 2. No cache, need to query", "# Check that this object already exists, would be pointless otherwise to check for a translation.", "if", "not", "self", ".", "_state", ".", "adding", "and", "self", ".", "pk", "is", "not", "None", ":", "prefetch", "=", "self", ".", "_get_prefetched_translations", "(", "meta", "=", "meta", ")", "if", "prefetch", "is", "not", "None", ":", "# 2.1, use prefetched data", "# If the object is not found in the prefetched data (which contains all translations),", "# it's pointless to check for memcached (2.2) or perform a single query (2.3)", "for", "object", "in", "prefetch", ":", "if", "object", ".", "language_code", "==", "language_code", ":", "local_cache", "[", "language_code", "]", "=", "object", "_cache_translation", "(", "object", ")", "# Store in memcached", "return", "object", "else", ":", "# 2.2, fetch from memcached", "object", "=", "get_cached_translation", "(", "self", ",", "language_code", ",", "related_name", "=", "meta", ".", "rel_name", ",", "use_fallback", "=", "use_fallback", ")", "if", "object", "is", "not", "None", ":", "# Track in local cache", "if", "object", ".", "language_code", "!=", "language_code", ":", "local_cache", "[", "language_code", "]", "=", "MISSING", "# Set fallback marker", "local_cache", "[", "object", ".", "language_code", "]", "=", "object", "return", "object", "elif", "is_missing", "(", "local_cache", ".", "get", "(", "language_code", ",", "None", ")", ")", ":", "# If get_cached_translation() explicitly set the \"does not exist\" marker,", "# there is no need to try a database query.", "pass", "else", ":", "# 2.3, fetch from database", "try", ":", "object", "=", "self", ".", "_get_translated_queryset", "(", "meta", ")", ".", "get", "(", "language_code", "=", "language_code", ")", "except", "meta", ".", "model", ".", "DoesNotExist", ":", "pass", "else", ":", "local_cache", "[", "language_code", "]", "=", "object", "_cache_translation", "(", "object", ")", "# Store in memcached", "return", "object", "# Not in cache, or default.", "# Not fetched from DB", "# 3. Auto create?", "if", "auto_create", ":", "# Auto create policy first (e.g. a __set__ call)", "kwargs", "=", "{", "'language_code'", ":", "language_code", ",", "}", "if", "self", ".", "pk", ":", "# ID might be None at this point, and Django does not allow that.", "kwargs", "[", "'master'", "]", "=", "self", "object", "=", "meta", ".", "model", "(", "*", "*", "kwargs", ")", "local_cache", "[", "language_code", "]", "=", "object", "# Not stored in memcached here yet, first fill + save it.", "return", "object", "# 4. Fallback?", "fallback_msg", "=", "None", "lang_dict", "=", "get_language_settings", "(", "language_code", ")", "if", "language_code", "not", "in", "local_cache", ":", "# Explicitly set a marker for the fact that this translation uses the fallback instead.", "# Avoid making that query again.", "local_cache", "[", "language_code", "]", "=", "MISSING", "# None value is the marker.", "if", "not", "self", ".", "_state", ".", "adding", "or", "self", ".", "pk", "is", "not", "None", ":", "_cache_translation_needs_fallback", "(", "self", ",", "language_code", ",", "related_name", "=", "meta", ".", "rel_name", ")", "fallback_choices", "=", "[", "lang_dict", "[", "'code'", "]", "]", "+", "list", "(", "lang_dict", "[", "'fallbacks'", "]", ")", "if", "use_fallback", "and", "fallback_choices", ":", "# Jump to fallback language, return directly.", "# Don't cache under this language_code", "for", "fallback_lang", "in", "fallback_choices", ":", "if", "fallback_lang", "==", "language_code", ":", "# Skip the current language, could also be fallback 1 of 2 choices", "continue", "try", ":", "return", "self", ".", "_get_translated_model", "(", "fallback_lang", ",", "use_fallback", "=", "False", ",", "auto_create", "=", "auto_create", ",", "meta", "=", "meta", ")", "except", "meta", ".", "model", ".", "DoesNotExist", ":", "pass", "fallback_msg", "=", "\" (tried fallbacks {0})\"", ".", "format", "(", "', '", ".", "join", "(", "lang_dict", "[", "'fallbacks'", "]", ")", ")", "# None of the above, bail out!", "raise", "meta", ".", "model", ".", "DoesNotExist", "(", "\"{0} does not have a translation for the current language!\\n\"", "\"{0} ID #{1}, language={2}{3}\"", ".", "format", "(", "self", ".", "_meta", ".", "verbose_name", ",", "self", ".", "pk", ",", "language_code", ",", "fallback_msg", "or", "''", ")", ")" ]
Fetch the translated fields model.
[ "Fetch", "the", "translated", "fields", "model", "." ]
python
train
mamrhein/specification
specification/_extd_ast_expr.py
https://github.com/mamrhein/specification/blob/a4c09a0d286cda7a04e8a189f12e23edd97f64ea/specification/_extd_ast_expr.py#L205-L208
def visit_Tuple(self, node: AST, dfltChaining: bool = True) -> str: """Return tuple representation of `node`s elements.""" elems = (self.visit(elt) for elt in node.elts) return f"({', '.join(elems)}{')' if len(node.elts) != 1 else ',)'}"
[ "def", "visit_Tuple", "(", "self", ",", "node", ":", "AST", ",", "dfltChaining", ":", "bool", "=", "True", ")", "->", "str", ":", "elems", "=", "(", "self", ".", "visit", "(", "elt", ")", "for", "elt", "in", "node", ".", "elts", ")", "return", "f\"({', '.join(elems)}{')' if len(node.elts) != 1 else ',)'}\"" ]
Return tuple representation of `node`s elements.
[ "Return", "tuple", "representation", "of", "node", "s", "elements", "." ]
python
train
sernst/cauldron
cauldron/environ/systems.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/environ/systems.py#L76-L108
def get_system_data() -> typing.Union[None, dict]: """ Returns information about the system in which Cauldron is running. If the information cannot be found, None is returned instead. :return: Dictionary containing information about the Cauldron system, whic includes: * name * location * version """ site_packages = get_site_packages() path_prefixes = [('[SP]', p) for p in site_packages] path_prefixes.append(('[CORE]', sys.exec_prefix)) packages = [ module_to_package_data(name, entry, path_prefixes) for name, entry in list(sys.modules.items()) ] python_data = dict( version=list(sys.version_info), executable=simplify_path(sys.executable), directory=simplify_path(sys.exec_prefix), site_packages=[simplify_path(sp) for sp in site_packages] ) return dict( python=python_data, packages=[p for p in packages if p is not None] )
[ "def", "get_system_data", "(", ")", "->", "typing", ".", "Union", "[", "None", ",", "dict", "]", ":", "site_packages", "=", "get_site_packages", "(", ")", "path_prefixes", "=", "[", "(", "'[SP]'", ",", "p", ")", "for", "p", "in", "site_packages", "]", "path_prefixes", ".", "append", "(", "(", "'[CORE]'", ",", "sys", ".", "exec_prefix", ")", ")", "packages", "=", "[", "module_to_package_data", "(", "name", ",", "entry", ",", "path_prefixes", ")", "for", "name", ",", "entry", "in", "list", "(", "sys", ".", "modules", ".", "items", "(", ")", ")", "]", "python_data", "=", "dict", "(", "version", "=", "list", "(", "sys", ".", "version_info", ")", ",", "executable", "=", "simplify_path", "(", "sys", ".", "executable", ")", ",", "directory", "=", "simplify_path", "(", "sys", ".", "exec_prefix", ")", ",", "site_packages", "=", "[", "simplify_path", "(", "sp", ")", "for", "sp", "in", "site_packages", "]", ")", "return", "dict", "(", "python", "=", "python_data", ",", "packages", "=", "[", "p", "for", "p", "in", "packages", "if", "p", "is", "not", "None", "]", ")" ]
Returns information about the system in which Cauldron is running. If the information cannot be found, None is returned instead. :return: Dictionary containing information about the Cauldron system, whic includes: * name * location * version
[ "Returns", "information", "about", "the", "system", "in", "which", "Cauldron", "is", "running", ".", "If", "the", "information", "cannot", "be", "found", "None", "is", "returned", "instead", "." ]
python
train
TheGhouls/oct
oct/results/report.py
https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/results/report.py#L99-L105
def compile_results(self): """Compile all results for the current test """ self._init_dataframes() self.total_transactions = len(self.main_results['raw']) self._init_dates()
[ "def", "compile_results", "(", "self", ")", ":", "self", ".", "_init_dataframes", "(", ")", "self", ".", "total_transactions", "=", "len", "(", "self", ".", "main_results", "[", "'raw'", "]", ")", "self", ".", "_init_dates", "(", ")" ]
Compile all results for the current test
[ "Compile", "all", "results", "for", "the", "current", "test" ]
python
train
tanghaibao/jcvi
jcvi/formats/fasta.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L1572-L1621
def some(args): """ %prog some fastafile listfile outfastafile generate a subset of fastafile, based on a list """ p = OptionParser(some.__doc__) p.add_option("--exclude", default=False, action="store_true", help="Output sequences not in the list file [default: %default]") p.add_option("--uniprot", default=False, action="store_true", help="Header is from uniprot [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(p.print_help()) fastafile, listfile, outfastafile = args outfastahandle = must_open(outfastafile, "w") qualfile = get_qual(fastafile) names = set(x.strip() for x in open(listfile)) if qualfile: outqualfile = outfastafile + ".qual" outqualhandle = open(outqualfile, "w") parser = iter_fasta_qual(fastafile, qualfile) else: parser = SeqIO.parse(fastafile, "fasta") num_records = 0 for rec in parser: name = rec.id if opts.uniprot: name = name.split("|")[-1] if opts.exclude: if name in names: continue else: if name not in names: continue SeqIO.write([rec], outfastahandle, "fasta") if qualfile: SeqIO.write([rec], outqualhandle, "qual") num_records += 1 logging.debug("A total of %d records written to `%s`" % \ (num_records, outfastafile))
[ "def", "some", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "some", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--exclude\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Output sequences not in the list file [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--uniprot\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Header is from uniprot [default: %default]\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "3", ":", "sys", ".", "exit", "(", "p", ".", "print_help", "(", ")", ")", "fastafile", ",", "listfile", ",", "outfastafile", "=", "args", "outfastahandle", "=", "must_open", "(", "outfastafile", ",", "\"w\"", ")", "qualfile", "=", "get_qual", "(", "fastafile", ")", "names", "=", "set", "(", "x", ".", "strip", "(", ")", "for", "x", "in", "open", "(", "listfile", ")", ")", "if", "qualfile", ":", "outqualfile", "=", "outfastafile", "+", "\".qual\"", "outqualhandle", "=", "open", "(", "outqualfile", ",", "\"w\"", ")", "parser", "=", "iter_fasta_qual", "(", "fastafile", ",", "qualfile", ")", "else", ":", "parser", "=", "SeqIO", ".", "parse", "(", "fastafile", ",", "\"fasta\"", ")", "num_records", "=", "0", "for", "rec", "in", "parser", ":", "name", "=", "rec", ".", "id", "if", "opts", ".", "uniprot", ":", "name", "=", "name", ".", "split", "(", "\"|\"", ")", "[", "-", "1", "]", "if", "opts", ".", "exclude", ":", "if", "name", "in", "names", ":", "continue", "else", ":", "if", "name", "not", "in", "names", ":", "continue", "SeqIO", ".", "write", "(", "[", "rec", "]", ",", "outfastahandle", ",", "\"fasta\"", ")", "if", "qualfile", ":", "SeqIO", ".", "write", "(", "[", "rec", "]", ",", "outqualhandle", ",", "\"qual\"", ")", "num_records", "+=", "1", "logging", ".", "debug", "(", "\"A total of %d records written to `%s`\"", "%", "(", "num_records", ",", "outfastafile", ")", ")" ]
%prog some fastafile listfile outfastafile generate a subset of fastafile, based on a list
[ "%prog", "some", "fastafile", "listfile", "outfastafile" ]
python
train
CalebBell/fluids
fluids/drag.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/drag.py#L1260-L1414
def integrate_drag_sphere(D, rhop, rho, mu, t, V=0, Method=None, distance=False): r'''Integrates the velocity and distance traveled by a particle moving at a speed which will converge to its terminal velocity. Performs an integration of the following expression for acceleration: .. math:: a = \frac{g(\rho_p-\rho_f)}{\rho_p} - \frac{3C_D \rho_f u^2}{4D \rho_p} Parameters ---------- D : float Diameter of the sphere, [m] rhop : float Particle density, [kg/m^3] rho : float Density of the surrounding fluid, [kg/m^3] mu : float Viscosity of the surrounding fluid [Pa*s] t : float Time to integrate the particle to, [s] V : float Initial velocity of the particle, [m/s] Method : string, optional A string of the function name to use, as in the dictionary drag_sphere_correlations distance : bool, optional Whether or not to calculate the distance traveled and return it as well Returns ------- v : float Velocity of falling sphere after time `t` [m/s] x : float, returned only if `distance` == True Distance traveled by the falling sphere in time `t`, [m] Notes ----- This can be relatively slow as drag correlations can be complex. There are analytical solutions available for the Stokes law regime (Re < 0.3). They were obtained from Wolfram Alpha. [1]_ was not used in the derivation, but also describes the derivation fully. .. math:: V(t) = \frac{\exp(-at) (V_0 a + b(\exp(at) - 1))}{a} .. math:: x(t) = \frac{\exp(-a t)\left[V_0 a(\exp(a t) - 1) + b\exp(a t)(a t-1) + b\right]}{a^2} .. math:: a = \frac{18\mu_f}{D^2\rho_p} .. math:: b = \frac{g(\rho_p-\rho_f)}{\rho_p} The analytical solution will automatically be used if the initial and terminal velocity is show the particle's behavior to be laminar. Note that this behavior requires that the terminal velocity of the particle be solved for - this adds slight (1%) overhead for the cases where particles are not laminar. Examples -------- >>> integrate_drag_sphere(D=0.001, rhop=2200., rho=1.2, mu=1.78E-5, t=0.5, ... V=30, distance=True) (9.686465044053476, 7.8294546436299175) References ---------- .. [1] Timmerman, Peter, and Jacobus P. van der Weele. "On the Rise and Fall of a Ball with Linear or Quadratic Drag." American Journal of Physics 67, no. 6 (June 1999): 538-46. https://doi.org/10.1119/1.19320. ''' laminar_initial = Reynolds(V=V, rho=rho, D=D, mu=mu) < 0.01 v_laminar_end_assumed = v_terminal(D=D, rhop=rhop, rho=rho, mu=mu, Method=Method) laminar_end = Reynolds(V=v_laminar_end_assumed, rho=rho, D=D, mu=mu) < 0.01 if Method == 'Stokes' or (laminar_initial and laminar_end and Method is None): try: t1 = 18.0*mu/(D*D*rhop) t2 = g*(rhop-rho)/rhop V_end = exp(-t1*t)*(t1*V + t2*(exp(t1*t) - 1.0))/t1 x_end = exp(-t1*t)*(V*t1*(exp(t1*t) - 1.0) + t2*exp(t1*t)*(t1*t - 1.0) + t2)/(t1*t1) if distance: return V_end, x_end else: return V_end except OverflowError: # It is only necessary to integrate to terminal velocity t_to_terminal = time_v_terminal_Stokes(D, rhop, rho, mu, V0=V, tol=1e-9) if t_to_terminal > t: raise Exception('Should never happen') V_end, x_end = integrate_drag_sphere(D=D, rhop=rhop, rho=rho, mu=mu, t=t_to_terminal, V=V, Method='Stokes', distance=True) # terminal velocity has been reached - V does not change, but x does # No reason to believe this isn't working even though it isn't # matching the ode solver if distance: return V_end, x_end + V_end*(t - t_to_terminal) else: return V_end # This is a serious problem for small diameters # It would be possible to step slowly, using smaller increments # of time to avlid overflows. However, this unfortunately quickly # gets much, exponentially, slower than just using odeint because # for example solving 10000 seconds might require steps of .0001 # seconds at a diameter of 1e-7 meters. # x = 0.0 # subdivisions = 10 # dt = t/subdivisions # for i in range(subdivisions): # V, dx = integrate_drag_sphere(D=D, rhop=rhop, rho=rho, mu=mu, # t=dt, V=V, distance=True, # Method=Method) # x += dx # if distance: # return V, x # else: # return V Re_ish = rho*D/mu c1 = g*(rhop-rho)/rhop c2 = -0.75*rho/(D*rhop) def dv_dt(V, t): if V == 0: # 64/Re goes to infinity, but gets multiplied by 0 squared. t2 = 0.0 else: # t2 = c2*V*V*Stokes(Re_ish*V) t2 = c2*V*V*drag_sphere(Re_ish*V, Method=Method) return c1 + t2 # Number of intervals for the solution to be solved for; the integrator # doesn't care what we give it, but a large number of intervals are needed # For an accurate integration of the particle's distance traveled pts = 1000 if distance else 2 ts = np.linspace(0, t, pts) # Delayed import of necessaray functions from scipy.integrate import odeint, cumtrapz # Perform the integration Vs = odeint(dv_dt, [V], ts) # V_end = float(Vs[-1]) if distance: # Calculate the distance traveled x = float(cumtrapz(np.ravel(Vs), ts)[-1]) return V_end, x else: return V_end
[ "def", "integrate_drag_sphere", "(", "D", ",", "rhop", ",", "rho", ",", "mu", ",", "t", ",", "V", "=", "0", ",", "Method", "=", "None", ",", "distance", "=", "False", ")", ":", "laminar_initial", "=", "Reynolds", "(", "V", "=", "V", ",", "rho", "=", "rho", ",", "D", "=", "D", ",", "mu", "=", "mu", ")", "<", "0.01", "v_laminar_end_assumed", "=", "v_terminal", "(", "D", "=", "D", ",", "rhop", "=", "rhop", ",", "rho", "=", "rho", ",", "mu", "=", "mu", ",", "Method", "=", "Method", ")", "laminar_end", "=", "Reynolds", "(", "V", "=", "v_laminar_end_assumed", ",", "rho", "=", "rho", ",", "D", "=", "D", ",", "mu", "=", "mu", ")", "<", "0.01", "if", "Method", "==", "'Stokes'", "or", "(", "laminar_initial", "and", "laminar_end", "and", "Method", "is", "None", ")", ":", "try", ":", "t1", "=", "18.0", "*", "mu", "/", "(", "D", "*", "D", "*", "rhop", ")", "t2", "=", "g", "*", "(", "rhop", "-", "rho", ")", "/", "rhop", "V_end", "=", "exp", "(", "-", "t1", "*", "t", ")", "*", "(", "t1", "*", "V", "+", "t2", "*", "(", "exp", "(", "t1", "*", "t", ")", "-", "1.0", ")", ")", "/", "t1", "x_end", "=", "exp", "(", "-", "t1", "*", "t", ")", "*", "(", "V", "*", "t1", "*", "(", "exp", "(", "t1", "*", "t", ")", "-", "1.0", ")", "+", "t2", "*", "exp", "(", "t1", "*", "t", ")", "*", "(", "t1", "*", "t", "-", "1.0", ")", "+", "t2", ")", "/", "(", "t1", "*", "t1", ")", "if", "distance", ":", "return", "V_end", ",", "x_end", "else", ":", "return", "V_end", "except", "OverflowError", ":", "# It is only necessary to integrate to terminal velocity", "t_to_terminal", "=", "time_v_terminal_Stokes", "(", "D", ",", "rhop", ",", "rho", ",", "mu", ",", "V0", "=", "V", ",", "tol", "=", "1e-9", ")", "if", "t_to_terminal", ">", "t", ":", "raise", "Exception", "(", "'Should never happen'", ")", "V_end", ",", "x_end", "=", "integrate_drag_sphere", "(", "D", "=", "D", ",", "rhop", "=", "rhop", ",", "rho", "=", "rho", ",", "mu", "=", "mu", ",", "t", "=", "t_to_terminal", ",", "V", "=", "V", ",", "Method", "=", "'Stokes'", ",", "distance", "=", "True", ")", "# terminal velocity has been reached - V does not change, but x does", "# No reason to believe this isn't working even though it isn't", "# matching the ode solver", "if", "distance", ":", "return", "V_end", ",", "x_end", "+", "V_end", "*", "(", "t", "-", "t_to_terminal", ")", "else", ":", "return", "V_end", "# This is a serious problem for small diameters", "# It would be possible to step slowly, using smaller increments", "# of time to avlid overflows. However, this unfortunately quickly", "# gets much, exponentially, slower than just using odeint because", "# for example solving 10000 seconds might require steps of .0001", "# seconds at a diameter of 1e-7 meters.", "# x = 0.0", "# subdivisions = 10", "# dt = t/subdivisions", "# for i in range(subdivisions):", "# V, dx = integrate_drag_sphere(D=D, rhop=rhop, rho=rho, mu=mu,", "# t=dt, V=V, distance=True,", "# Method=Method)", "# x += dx", "# if distance:", "# return V, x", "# else:", "# return V", "Re_ish", "=", "rho", "*", "D", "/", "mu", "c1", "=", "g", "*", "(", "rhop", "-", "rho", ")", "/", "rhop", "c2", "=", "-", "0.75", "*", "rho", "/", "(", "D", "*", "rhop", ")", "def", "dv_dt", "(", "V", ",", "t", ")", ":", "if", "V", "==", "0", ":", "# 64/Re goes to infinity, but gets multiplied by 0 squared.", "t2", "=", "0.0", "else", ":", "# t2 = c2*V*V*Stokes(Re_ish*V)", "t2", "=", "c2", "*", "V", "*", "V", "*", "drag_sphere", "(", "Re_ish", "*", "V", ",", "Method", "=", "Method", ")", "return", "c1", "+", "t2", "# Number of intervals for the solution to be solved for; the integrator", "# doesn't care what we give it, but a large number of intervals are needed", "# For an accurate integration of the particle's distance traveled", "pts", "=", "1000", "if", "distance", "else", "2", "ts", "=", "np", ".", "linspace", "(", "0", ",", "t", ",", "pts", ")", "# Delayed import of necessaray functions", "from", "scipy", ".", "integrate", "import", "odeint", ",", "cumtrapz", "# Perform the integration", "Vs", "=", "odeint", "(", "dv_dt", ",", "[", "V", "]", ",", "ts", ")", "#", "V_end", "=", "float", "(", "Vs", "[", "-", "1", "]", ")", "if", "distance", ":", "# Calculate the distance traveled", "x", "=", "float", "(", "cumtrapz", "(", "np", ".", "ravel", "(", "Vs", ")", ",", "ts", ")", "[", "-", "1", "]", ")", "return", "V_end", ",", "x", "else", ":", "return", "V_end" ]
r'''Integrates the velocity and distance traveled by a particle moving at a speed which will converge to its terminal velocity. Performs an integration of the following expression for acceleration: .. math:: a = \frac{g(\rho_p-\rho_f)}{\rho_p} - \frac{3C_D \rho_f u^2}{4D \rho_p} Parameters ---------- D : float Diameter of the sphere, [m] rhop : float Particle density, [kg/m^3] rho : float Density of the surrounding fluid, [kg/m^3] mu : float Viscosity of the surrounding fluid [Pa*s] t : float Time to integrate the particle to, [s] V : float Initial velocity of the particle, [m/s] Method : string, optional A string of the function name to use, as in the dictionary drag_sphere_correlations distance : bool, optional Whether or not to calculate the distance traveled and return it as well Returns ------- v : float Velocity of falling sphere after time `t` [m/s] x : float, returned only if `distance` == True Distance traveled by the falling sphere in time `t`, [m] Notes ----- This can be relatively slow as drag correlations can be complex. There are analytical solutions available for the Stokes law regime (Re < 0.3). They were obtained from Wolfram Alpha. [1]_ was not used in the derivation, but also describes the derivation fully. .. math:: V(t) = \frac{\exp(-at) (V_0 a + b(\exp(at) - 1))}{a} .. math:: x(t) = \frac{\exp(-a t)\left[V_0 a(\exp(a t) - 1) + b\exp(a t)(a t-1) + b\right]}{a^2} .. math:: a = \frac{18\mu_f}{D^2\rho_p} .. math:: b = \frac{g(\rho_p-\rho_f)}{\rho_p} The analytical solution will automatically be used if the initial and terminal velocity is show the particle's behavior to be laminar. Note that this behavior requires that the terminal velocity of the particle be solved for - this adds slight (1%) overhead for the cases where particles are not laminar. Examples -------- >>> integrate_drag_sphere(D=0.001, rhop=2200., rho=1.2, mu=1.78E-5, t=0.5, ... V=30, distance=True) (9.686465044053476, 7.8294546436299175) References ---------- .. [1] Timmerman, Peter, and Jacobus P. van der Weele. "On the Rise and Fall of a Ball with Linear or Quadratic Drag." American Journal of Physics 67, no. 6 (June 1999): 538-46. https://doi.org/10.1119/1.19320.
[ "r", "Integrates", "the", "velocity", "and", "distance", "traveled", "by", "a", "particle", "moving", "at", "a", "speed", "which", "will", "converge", "to", "its", "terminal", "velocity", "." ]
python
train
shimpe/pyvectortween
vectortween/TimeConversion.py
https://github.com/shimpe/pyvectortween/blob/aff071180474739060ec2d3102c39c8e73510988/vectortween/TimeConversion.py#L19-L27
def hms2frame(hms, fps): """ :param hms: a string, e.g. "01:23:15" for one hour, 23 minutes 15 seconds :param fps: framerate :return: frame number """ import time t = time.strptime(hms, "%H:%M:%S") return (t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec) * fps
[ "def", "hms2frame", "(", "hms", ",", "fps", ")", ":", "import", "time", "t", "=", "time", ".", "strptime", "(", "hms", ",", "\"%H:%M:%S\"", ")", "return", "(", "t", ".", "tm_hour", "*", "60", "*", "60", "+", "t", ".", "tm_min", "*", "60", "+", "t", ".", "tm_sec", ")", "*", "fps" ]
:param hms: a string, e.g. "01:23:15" for one hour, 23 minutes 15 seconds :param fps: framerate :return: frame number
[ ":", "param", "hms", ":", "a", "string", "e", ".", "g", ".", "01", ":", "23", ":", "15", "for", "one", "hour", "23", "minutes", "15", "seconds", ":", "param", "fps", ":", "framerate", ":", "return", ":", "frame", "number" ]
python
train
DataBiosphere/toil
src/toil/batchSystems/abstractBatchSystem.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/abstractBatchSystem.py#L283-L297
def workerCleanup(info): """ Cleans up the worker node on batch system shutdown. Also see :meth:`supportsWorkerCleanup`. :param WorkerCleanupInfo info: A named tuple consisting of all the relevant information for cleaning up the worker. """ assert isinstance(info, WorkerCleanupInfo) workflowDir = Toil.getWorkflowDir(info.workflowID, info.workDir) workflowDirContents = os.listdir(workflowDir) shutdownFileStore(workflowDir, info.workflowID) if (info.cleanWorkDir == 'always' or info.cleanWorkDir in ('onSuccess', 'onError') and workflowDirContents in ([], [cacheDirName(info.workflowID)])): shutil.rmtree(workflowDir)
[ "def", "workerCleanup", "(", "info", ")", ":", "assert", "isinstance", "(", "info", ",", "WorkerCleanupInfo", ")", "workflowDir", "=", "Toil", ".", "getWorkflowDir", "(", "info", ".", "workflowID", ",", "info", ".", "workDir", ")", "workflowDirContents", "=", "os", ".", "listdir", "(", "workflowDir", ")", "shutdownFileStore", "(", "workflowDir", ",", "info", ".", "workflowID", ")", "if", "(", "info", ".", "cleanWorkDir", "==", "'always'", "or", "info", ".", "cleanWorkDir", "in", "(", "'onSuccess'", ",", "'onError'", ")", "and", "workflowDirContents", "in", "(", "[", "]", ",", "[", "cacheDirName", "(", "info", ".", "workflowID", ")", "]", ")", ")", ":", "shutil", ".", "rmtree", "(", "workflowDir", ")" ]
Cleans up the worker node on batch system shutdown. Also see :meth:`supportsWorkerCleanup`. :param WorkerCleanupInfo info: A named tuple consisting of all the relevant information for cleaning up the worker.
[ "Cleans", "up", "the", "worker", "node", "on", "batch", "system", "shutdown", ".", "Also", "see", ":", "meth", ":", "supportsWorkerCleanup", "." ]
python
train
disqus/nose-performance
src/noseperf/wrappers/django.py
https://github.com/disqus/nose-performance/blob/916c8bd7fe7f30e4b7cba24a79a4157fd7889ec2/src/noseperf/wrappers/django.py#L55-L77
def execute(self, operation, parameters=()): """ Wraps execute method to record the query, execution duration and stackframe. """ __traceback_hide__ = True # NOQ # Time the exection of the query start = time.time() try: return self.cursor.execute(operation, parameters) finally: end = time.time() # Save the data data = { 'name': operation, 'args': parameters, 'start': start, 'end': end, } self._record(data)
[ "def", "execute", "(", "self", ",", "operation", ",", "parameters", "=", "(", ")", ")", ":", "__traceback_hide__", "=", "True", "# NOQ", "# Time the exection of the query", "start", "=", "time", ".", "time", "(", ")", "try", ":", "return", "self", ".", "cursor", ".", "execute", "(", "operation", ",", "parameters", ")", "finally", ":", "end", "=", "time", ".", "time", "(", ")", "# Save the data", "data", "=", "{", "'name'", ":", "operation", ",", "'args'", ":", "parameters", ",", "'start'", ":", "start", ",", "'end'", ":", "end", ",", "}", "self", ".", "_record", "(", "data", ")" ]
Wraps execute method to record the query, execution duration and stackframe.
[ "Wraps", "execute", "method", "to", "record", "the", "query", "execution", "duration", "and", "stackframe", "." ]
python
train
RLBot/RLBot
src/main/python/rlbot/gui/qt_root.py
https://github.com/RLBot/RLBot/blob/3f9b6bec8b9baf4dcfff0f6cf3103c8744ac6234/src/main/python/rlbot/gui/qt_root.py#L747-L756
def update_match_settings(self): """ Sets all match setting widgets to the values in the overall config :return: """ self.mode_type_combobox.setCurrentText(self.overall_config.get(MATCH_CONFIGURATION_HEADER, GAME_MODE)) self.map_type_combobox.setCurrentText(self.overall_config.get(MATCH_CONFIGURATION_HEADER, GAME_MAP)) self.skip_replays_checkbox.setChecked(self.overall_config.getboolean(MATCH_CONFIGURATION_HEADER, SKIP_REPLAYS)) self.instant_start_checkbox.setChecked( self.overall_config.getboolean(MATCH_CONFIGURATION_HEADER, INSTANT_START))
[ "def", "update_match_settings", "(", "self", ")", ":", "self", ".", "mode_type_combobox", ".", "setCurrentText", "(", "self", ".", "overall_config", ".", "get", "(", "MATCH_CONFIGURATION_HEADER", ",", "GAME_MODE", ")", ")", "self", ".", "map_type_combobox", ".", "setCurrentText", "(", "self", ".", "overall_config", ".", "get", "(", "MATCH_CONFIGURATION_HEADER", ",", "GAME_MAP", ")", ")", "self", ".", "skip_replays_checkbox", ".", "setChecked", "(", "self", ".", "overall_config", ".", "getboolean", "(", "MATCH_CONFIGURATION_HEADER", ",", "SKIP_REPLAYS", ")", ")", "self", ".", "instant_start_checkbox", ".", "setChecked", "(", "self", ".", "overall_config", ".", "getboolean", "(", "MATCH_CONFIGURATION_HEADER", ",", "INSTANT_START", ")", ")" ]
Sets all match setting widgets to the values in the overall config :return:
[ "Sets", "all", "match", "setting", "widgets", "to", "the", "values", "in", "the", "overall", "config", ":", "return", ":" ]
python
train
bwhite/hadoopy
hadoopy/_freeze.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_freeze.py#L156-L188
def freeze(script_path, target_dir='frozen', **kw): """Wraps pyinstaller and provides an easy to use interface Args: script_path: Absolute path to python script to be frozen. Returns: List of freeze commands ran Raises: subprocess.CalledProcessError: Freeze error. OSError: Freeze not found. """ cmds = [] freeze_start_time = time.time() logging.debug('/\\%s%s Output%s/\\' % ('-' * 10, 'Pyinstaller', '-' * 10)) orig_dir = os.path.abspath('.') script_path = os.path.abspath(script_path) try: os.chdir(target_dir) cmds += _freeze_config() pyinst_path = '%s/thirdparty/pyinstaller' % __path__[0] cur_cmd = 'python -O %s/pyinstaller.py %s --skip-configure' % (pyinst_path, script_path) cmds.append(cur_cmd) if _run(cur_cmd): # If there is a problem, try removing the config and re-doing _freeze_config(force=True) cur_cmd = 'python -O %s/pyinstaller.py %s' % (pyinst_path, script_path) _run(cur_cmd) finally: os.chdir(orig_dir) logging.debug('\\/%s%s Output%s\\/' % ('-' * 10, 'Pyinstaller', '-' * 10)) logging.info('Pyinstaller took [%f] seconds' % (time.time() - freeze_start_time)) return cmds
[ "def", "freeze", "(", "script_path", ",", "target_dir", "=", "'frozen'", ",", "*", "*", "kw", ")", ":", "cmds", "=", "[", "]", "freeze_start_time", "=", "time", ".", "time", "(", ")", "logging", ".", "debug", "(", "'/\\\\%s%s Output%s/\\\\'", "%", "(", "'-'", "*", "10", ",", "'Pyinstaller'", ",", "'-'", "*", "10", ")", ")", "orig_dir", "=", "os", ".", "path", ".", "abspath", "(", "'.'", ")", "script_path", "=", "os", ".", "path", ".", "abspath", "(", "script_path", ")", "try", ":", "os", ".", "chdir", "(", "target_dir", ")", "cmds", "+=", "_freeze_config", "(", ")", "pyinst_path", "=", "'%s/thirdparty/pyinstaller'", "%", "__path__", "[", "0", "]", "cur_cmd", "=", "'python -O %s/pyinstaller.py %s --skip-configure'", "%", "(", "pyinst_path", ",", "script_path", ")", "cmds", ".", "append", "(", "cur_cmd", ")", "if", "_run", "(", "cur_cmd", ")", ":", "# If there is a problem, try removing the config and re-doing", "_freeze_config", "(", "force", "=", "True", ")", "cur_cmd", "=", "'python -O %s/pyinstaller.py %s'", "%", "(", "pyinst_path", ",", "script_path", ")", "_run", "(", "cur_cmd", ")", "finally", ":", "os", ".", "chdir", "(", "orig_dir", ")", "logging", ".", "debug", "(", "'\\\\/%s%s Output%s\\\\/'", "%", "(", "'-'", "*", "10", ",", "'Pyinstaller'", ",", "'-'", "*", "10", ")", ")", "logging", ".", "info", "(", "'Pyinstaller took [%f] seconds'", "%", "(", "time", ".", "time", "(", ")", "-", "freeze_start_time", ")", ")", "return", "cmds" ]
Wraps pyinstaller and provides an easy to use interface Args: script_path: Absolute path to python script to be frozen. Returns: List of freeze commands ran Raises: subprocess.CalledProcessError: Freeze error. OSError: Freeze not found.
[ "Wraps", "pyinstaller", "and", "provides", "an", "easy", "to", "use", "interface" ]
python
train
blue-yonder/tsfresh
tsfresh/feature_extraction/extraction.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/extraction.py#L252-L335
def _do_extraction(df, column_id, column_value, column_kind, default_fc_parameters, kind_to_fc_parameters, n_jobs, chunk_size, disable_progressbar, distributor): """ Wrapper around the _do_extraction_on_chunk, which calls it on all chunks in the data frame. A chunk is a subset of the data, with a given kind and id - so a single time series. The data is separated out into those single time series and the _do_extraction_on_chunk is called on each of them. The results are then combined into a single pandas DataFrame. The call is either happening in parallel or not and is showing a progress bar or not depending on the given flags. :param df: The dataframe in the normalized format which is used for extraction. :type df: pd.DataFrame :param default_fc_parameters: mapping from feature calculator names to parameters. Only those names which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for more information. :type default_fc_parameters: dict :param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for default_fc_parameters. If you put a kind as a key here, the fc_parameters object (which is the value), will be used instead of the default_fc_parameters. :type kind_to_fc_parameters: dict :param column_id: The name of the id column to group by. :type column_id: str :param column_kind: The name of the column keeping record on the kind of the value. :type column_kind: str :param column_value: The name for the column keeping the value itself. :type column_value: str :param chunk_size: The size of one chunk for the parallelization :type chunk_size: None or int :param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used. :type n_jobs: int :param disable_progressbar: Do not show a progressbar while doing the calculation. :type disable_progressbar: bool :param distributor: Advanced parameter: See the utilities/distribution.py for more information. Leave to None, if you want TSFresh to choose the best distributor. :type distributor: DistributorBaseClass :return: the extracted features :rtype: pd.DataFrame """ data_in_chunks = generate_data_chunk_format(df, column_id, column_kind, column_value) if distributor is None: if n_jobs == 0: distributor = MapDistributor(disable_progressbar=disable_progressbar, progressbar_title="Feature Extraction") else: distributor = MultiprocessingDistributor(n_workers=n_jobs, disable_progressbar=disable_progressbar, progressbar_title="Feature Extraction") if not isinstance(distributor, DistributorBaseClass): raise ValueError("the passed distributor is not an DistributorBaseClass object") kwargs = dict(default_fc_parameters=default_fc_parameters, kind_to_fc_parameters=kind_to_fc_parameters) result = distributor.map_reduce(_do_extraction_on_chunk, data=data_in_chunks, chunk_size=chunk_size, function_kwargs=kwargs) distributor.close() # Return a dataframe in the typical form (id as index and feature names as columns) result = pd.DataFrame(result) if result.columns.contains("value"): result["value"] = result["value"].astype(float) if len(result) != 0: result = result.pivot("id", "variable", "value") result.index = result.index.astype(df[column_id].dtype) return result
[ "def", "_do_extraction", "(", "df", ",", "column_id", ",", "column_value", ",", "column_kind", ",", "default_fc_parameters", ",", "kind_to_fc_parameters", ",", "n_jobs", ",", "chunk_size", ",", "disable_progressbar", ",", "distributor", ")", ":", "data_in_chunks", "=", "generate_data_chunk_format", "(", "df", ",", "column_id", ",", "column_kind", ",", "column_value", ")", "if", "distributor", "is", "None", ":", "if", "n_jobs", "==", "0", ":", "distributor", "=", "MapDistributor", "(", "disable_progressbar", "=", "disable_progressbar", ",", "progressbar_title", "=", "\"Feature Extraction\"", ")", "else", ":", "distributor", "=", "MultiprocessingDistributor", "(", "n_workers", "=", "n_jobs", ",", "disable_progressbar", "=", "disable_progressbar", ",", "progressbar_title", "=", "\"Feature Extraction\"", ")", "if", "not", "isinstance", "(", "distributor", ",", "DistributorBaseClass", ")", ":", "raise", "ValueError", "(", "\"the passed distributor is not an DistributorBaseClass object\"", ")", "kwargs", "=", "dict", "(", "default_fc_parameters", "=", "default_fc_parameters", ",", "kind_to_fc_parameters", "=", "kind_to_fc_parameters", ")", "result", "=", "distributor", ".", "map_reduce", "(", "_do_extraction_on_chunk", ",", "data", "=", "data_in_chunks", ",", "chunk_size", "=", "chunk_size", ",", "function_kwargs", "=", "kwargs", ")", "distributor", ".", "close", "(", ")", "# Return a dataframe in the typical form (id as index and feature names as columns)", "result", "=", "pd", ".", "DataFrame", "(", "result", ")", "if", "result", ".", "columns", ".", "contains", "(", "\"value\"", ")", ":", "result", "[", "\"value\"", "]", "=", "result", "[", "\"value\"", "]", ".", "astype", "(", "float", ")", "if", "len", "(", "result", ")", "!=", "0", ":", "result", "=", "result", ".", "pivot", "(", "\"id\"", ",", "\"variable\"", ",", "\"value\"", ")", "result", ".", "index", "=", "result", ".", "index", ".", "astype", "(", "df", "[", "column_id", "]", ".", "dtype", ")", "return", "result" ]
Wrapper around the _do_extraction_on_chunk, which calls it on all chunks in the data frame. A chunk is a subset of the data, with a given kind and id - so a single time series. The data is separated out into those single time series and the _do_extraction_on_chunk is called on each of them. The results are then combined into a single pandas DataFrame. The call is either happening in parallel or not and is showing a progress bar or not depending on the given flags. :param df: The dataframe in the normalized format which is used for extraction. :type df: pd.DataFrame :param default_fc_parameters: mapping from feature calculator names to parameters. Only those names which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for more information. :type default_fc_parameters: dict :param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for default_fc_parameters. If you put a kind as a key here, the fc_parameters object (which is the value), will be used instead of the default_fc_parameters. :type kind_to_fc_parameters: dict :param column_id: The name of the id column to group by. :type column_id: str :param column_kind: The name of the column keeping record on the kind of the value. :type column_kind: str :param column_value: The name for the column keeping the value itself. :type column_value: str :param chunk_size: The size of one chunk for the parallelization :type chunk_size: None or int :param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used. :type n_jobs: int :param disable_progressbar: Do not show a progressbar while doing the calculation. :type disable_progressbar: bool :param distributor: Advanced parameter: See the utilities/distribution.py for more information. Leave to None, if you want TSFresh to choose the best distributor. :type distributor: DistributorBaseClass :return: the extracted features :rtype: pd.DataFrame
[ "Wrapper", "around", "the", "_do_extraction_on_chunk", "which", "calls", "it", "on", "all", "chunks", "in", "the", "data", "frame", ".", "A", "chunk", "is", "a", "subset", "of", "the", "data", "with", "a", "given", "kind", "and", "id", "-", "so", "a", "single", "time", "series", "." ]
python
train
tarmstrong/nbdiff
nbdiff/merge.py
https://github.com/tarmstrong/nbdiff/blob/3fdfb89f94fc0f4821bc04999ddf53b34d882ab9/nbdiff/merge.py#L46-L177
def notebook_merge(local, base, remote, check_modified=False): """Unify three notebooks into a single notebook with merge metadata. The result of this function is a valid notebook that can be loaded by the IPython Notebook front-end. This function adds additional cell metadata that the front-end Javascript uses to render the merge. Parameters ---------- local : dict The local branch's version of the notebook. base : dict The last common ancestor of local and remote. remote : dict The remote branch's version of the notebook. Returns ------- nb : A valid notebook containing merge metadata. """ local_cells = get_cells(local) base_cells = get_cells(base) remote_cells = get_cells(remote) rows = [] current_row = [] empty_cell = lambda: { 'cell_type': 'code', 'language': 'python', 'outputs': [], 'prompt_number': 1, 'text': ['Placeholder'], 'metadata': {'state': 'empty'} } diff_of_diffs = merge(local_cells, base_cells, remote_cells) # For each item in the higher-order diff, create a "row" that # corresponds to a row in the NBDiff interface. A row contains: # | LOCAL | BASE | REMOTE | for item in diff_of_diffs: state = item['state'] cell = copy.deepcopy(diff_result_to_cell(item['value'])) if state == 'deleted': # This change is between base and local branches. # It can be an addition or a deletion. if cell['metadata']['state'] == 'unchanged': # This side doesn't have the change; wait # until we encounter the change to create the row. continue cell['metadata']['side'] = 'local' remote_cell = empty_cell() remote_cell['metadata']['side'] = 'remote' if cell['metadata']['state'] == 'deleted' \ or cell['metadata']['state'] == 'unchanged': base_cell = copy.deepcopy(cell) else: base_cell = empty_cell() base_cell['metadata']['side'] = 'base' # This change is on the right. current_row = [ cell, base_cell, remote_cell, ] elif state == 'added': # This change is between base and remote branches. # It can be an addition or a deletion. cell['metadata']['side'] = 'remote' if cell['metadata']['state'] == 'unchanged': # This side doesn't have the change; wait # until we encounter the change to create the row. continue if cell['metadata']['state'] == 'deleted': base_cell = copy.deepcopy(cell) base_cell['metadata']['state'] = 'unchanged' local_cell = copy.deepcopy(cell) local_cell['metadata']['state'] = 'unchanged' else: base_cell = empty_cell() local_cell = empty_cell() base_cell['metadata']['side'] = 'base' local_cell['metadata']['side'] = 'local' current_row = [ local_cell, base_cell, cell, ] elif state == 'unchanged': # The same item occurs between base-local and base-remote. # This happens if both branches made the same change, whether # that is an addition or deletion. If neither branches # changed a given cell, that cell shows up here too. cell1 = copy.deepcopy(cell) cell3 = copy.deepcopy(cell) if cell['metadata']['state'] == 'deleted' \ or cell['metadata']['state'] == 'unchanged': # If the change is a deletion, the cell-to-be-deleted # should in the base as 'unchanged'. The user will # choose to make it deleted. cell2 = copy.deepcopy(cell) cell2['metadata']['state'] = 'unchanged' else: # If the change is an addition, it should not # show in the base; the user must add it to the merged version. cell2 = empty_cell() cell1['metadata']['side'] = 'local' cell2['metadata']['side'] = 'base' cell3['metadata']['side'] = 'remote' current_row = [ cell1, cell2, cell3, ] rows.append(current_row) # Chain all rows together; create a flat array from the nested array. # Use the base notebook's notebook-level metadata (title, version, etc.) result_notebook = local if len(result_notebook['worksheets']) == 0: result_notebook['worksheets'] = [nbformat.new_worksheet()] new_cell_array = list(it.chain.from_iterable(rows)) result_notebook['worksheets'][0]['cells'] = new_cell_array result_notebook['metadata']['nbdiff-type'] = 'merge' return result_notebook
[ "def", "notebook_merge", "(", "local", ",", "base", ",", "remote", ",", "check_modified", "=", "False", ")", ":", "local_cells", "=", "get_cells", "(", "local", ")", "base_cells", "=", "get_cells", "(", "base", ")", "remote_cells", "=", "get_cells", "(", "remote", ")", "rows", "=", "[", "]", "current_row", "=", "[", "]", "empty_cell", "=", "lambda", ":", "{", "'cell_type'", ":", "'code'", ",", "'language'", ":", "'python'", ",", "'outputs'", ":", "[", "]", ",", "'prompt_number'", ":", "1", ",", "'text'", ":", "[", "'Placeholder'", "]", ",", "'metadata'", ":", "{", "'state'", ":", "'empty'", "}", "}", "diff_of_diffs", "=", "merge", "(", "local_cells", ",", "base_cells", ",", "remote_cells", ")", "# For each item in the higher-order diff, create a \"row\" that", "# corresponds to a row in the NBDiff interface. A row contains:", "# | LOCAL | BASE | REMOTE |", "for", "item", "in", "diff_of_diffs", ":", "state", "=", "item", "[", "'state'", "]", "cell", "=", "copy", ".", "deepcopy", "(", "diff_result_to_cell", "(", "item", "[", "'value'", "]", ")", ")", "if", "state", "==", "'deleted'", ":", "# This change is between base and local branches.", "# It can be an addition or a deletion.", "if", "cell", "[", "'metadata'", "]", "[", "'state'", "]", "==", "'unchanged'", ":", "# This side doesn't have the change; wait", "# until we encounter the change to create the row.", "continue", "cell", "[", "'metadata'", "]", "[", "'side'", "]", "=", "'local'", "remote_cell", "=", "empty_cell", "(", ")", "remote_cell", "[", "'metadata'", "]", "[", "'side'", "]", "=", "'remote'", "if", "cell", "[", "'metadata'", "]", "[", "'state'", "]", "==", "'deleted'", "or", "cell", "[", "'metadata'", "]", "[", "'state'", "]", "==", "'unchanged'", ":", "base_cell", "=", "copy", ".", "deepcopy", "(", "cell", ")", "else", ":", "base_cell", "=", "empty_cell", "(", ")", "base_cell", "[", "'metadata'", "]", "[", "'side'", "]", "=", "'base'", "# This change is on the right.", "current_row", "=", "[", "cell", ",", "base_cell", ",", "remote_cell", ",", "]", "elif", "state", "==", "'added'", ":", "# This change is between base and remote branches.", "# It can be an addition or a deletion.", "cell", "[", "'metadata'", "]", "[", "'side'", "]", "=", "'remote'", "if", "cell", "[", "'metadata'", "]", "[", "'state'", "]", "==", "'unchanged'", ":", "# This side doesn't have the change; wait", "# until we encounter the change to create the row.", "continue", "if", "cell", "[", "'metadata'", "]", "[", "'state'", "]", "==", "'deleted'", ":", "base_cell", "=", "copy", ".", "deepcopy", "(", "cell", ")", "base_cell", "[", "'metadata'", "]", "[", "'state'", "]", "=", "'unchanged'", "local_cell", "=", "copy", ".", "deepcopy", "(", "cell", ")", "local_cell", "[", "'metadata'", "]", "[", "'state'", "]", "=", "'unchanged'", "else", ":", "base_cell", "=", "empty_cell", "(", ")", "local_cell", "=", "empty_cell", "(", ")", "base_cell", "[", "'metadata'", "]", "[", "'side'", "]", "=", "'base'", "local_cell", "[", "'metadata'", "]", "[", "'side'", "]", "=", "'local'", "current_row", "=", "[", "local_cell", ",", "base_cell", ",", "cell", ",", "]", "elif", "state", "==", "'unchanged'", ":", "# The same item occurs between base-local and base-remote.", "# This happens if both branches made the same change, whether", "# that is an addition or deletion. If neither branches", "# changed a given cell, that cell shows up here too.", "cell1", "=", "copy", ".", "deepcopy", "(", "cell", ")", "cell3", "=", "copy", ".", "deepcopy", "(", "cell", ")", "if", "cell", "[", "'metadata'", "]", "[", "'state'", "]", "==", "'deleted'", "or", "cell", "[", "'metadata'", "]", "[", "'state'", "]", "==", "'unchanged'", ":", "# If the change is a deletion, the cell-to-be-deleted", "# should in the base as 'unchanged'. The user will", "# choose to make it deleted.", "cell2", "=", "copy", ".", "deepcopy", "(", "cell", ")", "cell2", "[", "'metadata'", "]", "[", "'state'", "]", "=", "'unchanged'", "else", ":", "# If the change is an addition, it should not", "# show in the base; the user must add it to the merged version.", "cell2", "=", "empty_cell", "(", ")", "cell1", "[", "'metadata'", "]", "[", "'side'", "]", "=", "'local'", "cell2", "[", "'metadata'", "]", "[", "'side'", "]", "=", "'base'", "cell3", "[", "'metadata'", "]", "[", "'side'", "]", "=", "'remote'", "current_row", "=", "[", "cell1", ",", "cell2", ",", "cell3", ",", "]", "rows", ".", "append", "(", "current_row", ")", "# Chain all rows together; create a flat array from the nested array.", "# Use the base notebook's notebook-level metadata (title, version, etc.)", "result_notebook", "=", "local", "if", "len", "(", "result_notebook", "[", "'worksheets'", "]", ")", "==", "0", ":", "result_notebook", "[", "'worksheets'", "]", "=", "[", "nbformat", ".", "new_worksheet", "(", ")", "]", "new_cell_array", "=", "list", "(", "it", ".", "chain", ".", "from_iterable", "(", "rows", ")", ")", "result_notebook", "[", "'worksheets'", "]", "[", "0", "]", "[", "'cells'", "]", "=", "new_cell_array", "result_notebook", "[", "'metadata'", "]", "[", "'nbdiff-type'", "]", "=", "'merge'", "return", "result_notebook" ]
Unify three notebooks into a single notebook with merge metadata. The result of this function is a valid notebook that can be loaded by the IPython Notebook front-end. This function adds additional cell metadata that the front-end Javascript uses to render the merge. Parameters ---------- local : dict The local branch's version of the notebook. base : dict The last common ancestor of local and remote. remote : dict The remote branch's version of the notebook. Returns ------- nb : A valid notebook containing merge metadata.
[ "Unify", "three", "notebooks", "into", "a", "single", "notebook", "with", "merge", "metadata", "." ]
python
train
walkr/nanoservice
benchmarks/bench_req_rep_raw.py
https://github.com/walkr/nanoservice/blob/e2098986b1baa5f283167ae487d14f3c6c21961a/benchmarks/bench_req_rep_raw.py#L24-L39
def bench(client, n): """ Benchmark n requests """ items = list(range(n)) # Time client publish operations # ------------------------------ started = time.time() msg = b'x' for i in items: client.socket.send(msg) res = client.socket.recv() assert msg == res duration = time.time() - started print('Raw REQ client stats:') util.print_stats(n, duration)
[ "def", "bench", "(", "client", ",", "n", ")", ":", "items", "=", "list", "(", "range", "(", "n", ")", ")", "# Time client publish operations", "# ------------------------------", "started", "=", "time", ".", "time", "(", ")", "msg", "=", "b'x'", "for", "i", "in", "items", ":", "client", ".", "socket", ".", "send", "(", "msg", ")", "res", "=", "client", ".", "socket", ".", "recv", "(", ")", "assert", "msg", "==", "res", "duration", "=", "time", ".", "time", "(", ")", "-", "started", "print", "(", "'Raw REQ client stats:'", ")", "util", ".", "print_stats", "(", "n", ",", "duration", ")" ]
Benchmark n requests
[ "Benchmark", "n", "requests" ]
python
train
brentp/cruzdb
cruzdb/__init__.py
https://github.com/brentp/cruzdb/blob/9068d46e25952f4a929dde0242beb31fa4c7e89a/cruzdb/__init__.py#L94-L110
def mirror(self, tables, dest_url): """ miror a set of `tables` from `dest_url` Returns a new Genome object Parameters ---------- tables : list an iterable of tables dest_url: str a dburl string, e.g. 'sqlite:///local.db' """ from mirror import mirror return mirror(self, tables, dest_url)
[ "def", "mirror", "(", "self", ",", "tables", ",", "dest_url", ")", ":", "from", "mirror", "import", "mirror", "return", "mirror", "(", "self", ",", "tables", ",", "dest_url", ")" ]
miror a set of `tables` from `dest_url` Returns a new Genome object Parameters ---------- tables : list an iterable of tables dest_url: str a dburl string, e.g. 'sqlite:///local.db'
[ "miror", "a", "set", "of", "tables", "from", "dest_url" ]
python
train
IndicoDataSolutions/IndicoIo-python
indicoio/pdf/pdf_extraction.py
https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/pdf/pdf_extraction.py#L6-L35
def pdf_extraction(pdf, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ Given a pdf, returns the text and metadata associated with the given pdf. PDFs may be provided as base64 encoded data or as a filepath. Base64 image data and formatted table is optionally returned by setting `images=True` or `tables=True`. Example usage: .. code-block:: python >>> from indicoio import pdf_extraction >>> results = pdf_extraction(pdf_file) >>> results.keys() ['text', 'metadata'] :param pdf: The pdf to be analyzed. :type pdf: str or list of strs :rtype: dict or list of dicts """ pdf = pdf_preprocess(pdf, batch=batch) url_params = {"batch": batch, "api_key": api_key, "version": version} results = api_handler(pdf, cloud=cloud, api="pdfextraction", url_params=url_params, **kwargs) if batch: for result in results: result["images"] = postprocess_images(result.get("images", [])) else: results['images'] = postprocess_images(results.get("images", [])) return results
[ "def", "pdf_extraction", "(", "pdf", ",", "cloud", "=", "None", ",", "batch", "=", "False", ",", "api_key", "=", "None", ",", "version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "pdf", "=", "pdf_preprocess", "(", "pdf", ",", "batch", "=", "batch", ")", "url_params", "=", "{", "\"batch\"", ":", "batch", ",", "\"api_key\"", ":", "api_key", ",", "\"version\"", ":", "version", "}", "results", "=", "api_handler", "(", "pdf", ",", "cloud", "=", "cloud", ",", "api", "=", "\"pdfextraction\"", ",", "url_params", "=", "url_params", ",", "*", "*", "kwargs", ")", "if", "batch", ":", "for", "result", "in", "results", ":", "result", "[", "\"images\"", "]", "=", "postprocess_images", "(", "result", ".", "get", "(", "\"images\"", ",", "[", "]", ")", ")", "else", ":", "results", "[", "'images'", "]", "=", "postprocess_images", "(", "results", ".", "get", "(", "\"images\"", ",", "[", "]", ")", ")", "return", "results" ]
Given a pdf, returns the text and metadata associated with the given pdf. PDFs may be provided as base64 encoded data or as a filepath. Base64 image data and formatted table is optionally returned by setting `images=True` or `tables=True`. Example usage: .. code-block:: python >>> from indicoio import pdf_extraction >>> results = pdf_extraction(pdf_file) >>> results.keys() ['text', 'metadata'] :param pdf: The pdf to be analyzed. :type pdf: str or list of strs :rtype: dict or list of dicts
[ "Given", "a", "pdf", "returns", "the", "text", "and", "metadata", "associated", "with", "the", "given", "pdf", ".", "PDFs", "may", "be", "provided", "as", "base64", "encoded", "data", "or", "as", "a", "filepath", ".", "Base64", "image", "data", "and", "formatted", "table", "is", "optionally", "returned", "by", "setting", "images", "=", "True", "or", "tables", "=", "True", "." ]
python
train
welbornprod/colr
colr/__main__.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/__main__.py#L333-L344
def list_known_codes(s, unique=True, rgb_mode=False): """ Find and print all known escape codes in a string, using get_known_codes. """ total = 0 for codedesc in get_known_codes(s, unique=unique, rgb_mode=rgb_mode): total += 1 print(codedesc) plural = 'code' if total == 1 else 'codes' codetype = ' unique' if unique else '' print('\nFound {}{} escape {}.'.format(total, codetype, plural)) return 0 if total > 0 else 1
[ "def", "list_known_codes", "(", "s", ",", "unique", "=", "True", ",", "rgb_mode", "=", "False", ")", ":", "total", "=", "0", "for", "codedesc", "in", "get_known_codes", "(", "s", ",", "unique", "=", "unique", ",", "rgb_mode", "=", "rgb_mode", ")", ":", "total", "+=", "1", "print", "(", "codedesc", ")", "plural", "=", "'code'", "if", "total", "==", "1", "else", "'codes'", "codetype", "=", "' unique'", "if", "unique", "else", "''", "print", "(", "'\\nFound {}{} escape {}.'", ".", "format", "(", "total", ",", "codetype", ",", "plural", ")", ")", "return", "0", "if", "total", ">", "0", "else", "1" ]
Find and print all known escape codes in a string, using get_known_codes.
[ "Find", "and", "print", "all", "known", "escape", "codes", "in", "a", "string", "using", "get_known_codes", "." ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/cluster/eval_server/launch_eval.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/cluster/eval_server/launch_eval.py#L33-L88
def launch_eval_job(tag, m1_path, m2_path, job_name, completions): """Launches an evaluator job. tag: name for this eval job (used as top level folder name) m1_path, m2_path: full gs:// paths to the .pb files to match up job_name: string, appended to the container, used to differentiate the job names (e.g. 'minigo-cc-evaluator-v5-123-v7-456') completions: the number of completions desired (each completion is 2 games) """ print() if not re.match(r'[a-z0-9-]*$', tag, re.I): print("{} is not a valid tag".format(tag)) return # TODO: Change to minigo-pub sgf_bucket_path = "sethtroisi-sandbox/experiments/eval/" + tag assert not sgf_bucket_path.startswith("gs://"), bucket_pat bucket_path = "gs://" + sgf_bucket_path metadata_path = os.path.join(bucket_path, 'metadata') assert not gfile.Exists(metadata_path), "Already exists" TS=str(int(time.time())) metadata = { 'timestamp': TS, 'date': datetime.datetime.now().isoformat(' '), 'model1': os.path.basename(m1_path), 'model2': os.path.basename(m2_path), 'model1_path': m1_path, 'model2_path': m2_path, 'job_name': job_name, 'completions': completions, 'launch_eval_version' : LAUNCH_EVAL_VERSION, } job_conf, resp_bw, resp_wb = launch_eval.launch_eval_job( m1_path, m2_path, job_name, sgf_bucket_path, completions) if not (resp_bw and resp_wb): print("launch_eval.py failed") print(job_conf) print(resp_bw) print(resp_wb) print() assert False # Jobs were launched, record metadata to GCS. with gfile.GFile(metadata_path, "w") as metadata_file: json.dump(metadata, metadata_file) with gfile.GFile(os.path.join(bucket_path, 'commands'), "w") as f: f.write(str(sys.argv) + "\n") with gfile.GFile(os.path.join(bucket_path, 'job_conf'), "w") as f: f.write(str(job_conf) + "\n")
[ "def", "launch_eval_job", "(", "tag", ",", "m1_path", ",", "m2_path", ",", "job_name", ",", "completions", ")", ":", "print", "(", ")", "if", "not", "re", ".", "match", "(", "r'[a-z0-9-]*$'", ",", "tag", ",", "re", ".", "I", ")", ":", "print", "(", "\"{} is not a valid tag\"", ".", "format", "(", "tag", ")", ")", "return", "# TODO: Change to minigo-pub", "sgf_bucket_path", "=", "\"sethtroisi-sandbox/experiments/eval/\"", "+", "tag", "assert", "not", "sgf_bucket_path", ".", "startswith", "(", "\"gs://\"", ")", ",", "bucket_pat", "bucket_path", "=", "\"gs://\"", "+", "sgf_bucket_path", "metadata_path", "=", "os", ".", "path", ".", "join", "(", "bucket_path", ",", "'metadata'", ")", "assert", "not", "gfile", ".", "Exists", "(", "metadata_path", ")", ",", "\"Already exists\"", "TS", "=", "str", "(", "int", "(", "time", ".", "time", "(", ")", ")", ")", "metadata", "=", "{", "'timestamp'", ":", "TS", ",", "'date'", ":", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", "' '", ")", ",", "'model1'", ":", "os", ".", "path", ".", "basename", "(", "m1_path", ")", ",", "'model2'", ":", "os", ".", "path", ".", "basename", "(", "m2_path", ")", ",", "'model1_path'", ":", "m1_path", ",", "'model2_path'", ":", "m2_path", ",", "'job_name'", ":", "job_name", ",", "'completions'", ":", "completions", ",", "'launch_eval_version'", ":", "LAUNCH_EVAL_VERSION", ",", "}", "job_conf", ",", "resp_bw", ",", "resp_wb", "=", "launch_eval", ".", "launch_eval_job", "(", "m1_path", ",", "m2_path", ",", "job_name", ",", "sgf_bucket_path", ",", "completions", ")", "if", "not", "(", "resp_bw", "and", "resp_wb", ")", ":", "print", "(", "\"launch_eval.py failed\"", ")", "print", "(", "job_conf", ")", "print", "(", "resp_bw", ")", "print", "(", "resp_wb", ")", "print", "(", ")", "assert", "False", "# Jobs were launched, record metadata to GCS.", "with", "gfile", ".", "GFile", "(", "metadata_path", ",", "\"w\"", ")", "as", "metadata_file", ":", "json", ".", "dump", "(", "metadata", ",", "metadata_file", ")", "with", "gfile", ".", "GFile", "(", "os", ".", "path", ".", "join", "(", "bucket_path", ",", "'commands'", ")", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "str", "(", "sys", ".", "argv", ")", "+", "\"\\n\"", ")", "with", "gfile", ".", "GFile", "(", "os", ".", "path", ".", "join", "(", "bucket_path", ",", "'job_conf'", ")", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "str", "(", "job_conf", ")", "+", "\"\\n\"", ")" ]
Launches an evaluator job. tag: name for this eval job (used as top level folder name) m1_path, m2_path: full gs:// paths to the .pb files to match up job_name: string, appended to the container, used to differentiate the job names (e.g. 'minigo-cc-evaluator-v5-123-v7-456') completions: the number of completions desired (each completion is 2 games)
[ "Launches", "an", "evaluator", "job", ".", "tag", ":", "name", "for", "this", "eval", "job", "(", "used", "as", "top", "level", "folder", "name", ")", "m1_path", "m2_path", ":", "full", "gs", ":", "//", "paths", "to", "the", ".", "pb", "files", "to", "match", "up", "job_name", ":", "string", "appended", "to", "the", "container", "used", "to", "differentiate", "the", "job", "names", "(", "e", ".", "g", ".", "minigo", "-", "cc", "-", "evaluator", "-", "v5", "-", "123", "-", "v7", "-", "456", ")", "completions", ":", "the", "number", "of", "completions", "desired", "(", "each", "completion", "is", "2", "games", ")" ]
python
train
bcbio/bcbio-nextgen
bcbio/broad/picardrun.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L279-L309
def bed2interval(align_file, bed, out_file=None): """Converts a bed file to an interval file for use with some of the Picard tools by grabbing the header from the alignment file, reording the bed file columns and gluing them together. align_file can be in BAM or SAM format. bed needs to be in bed12 format: http://genome.ucsc.edu/FAQ/FAQformat.html#format1.5 """ import pysam base, ext = os.path.splitext(align_file) if out_file is None: out_file = base + ".interval" with pysam.Samfile(align_file, "r" if ext.endswith(".sam") else "rb") as in_bam: header = in_bam.text def reorder_line(line): splitline = line.strip().split("\t") reordered = "\t".join([splitline[0], str(int(splitline[1]) + 1), splitline[2], splitline[5], splitline[3]]) return reordered + "\n" with file_transaction(out_file) as tx_out_file: with open(bed) as bed_handle: with open(tx_out_file, "w") as out_handle: out_handle.write(header) for line in bed_handle: out_handle.write(reorder_line(line)) return out_file
[ "def", "bed2interval", "(", "align_file", ",", "bed", ",", "out_file", "=", "None", ")", ":", "import", "pysam", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "align_file", ")", "if", "out_file", "is", "None", ":", "out_file", "=", "base", "+", "\".interval\"", "with", "pysam", ".", "Samfile", "(", "align_file", ",", "\"r\"", "if", "ext", ".", "endswith", "(", "\".sam\"", ")", "else", "\"rb\"", ")", "as", "in_bam", ":", "header", "=", "in_bam", ".", "text", "def", "reorder_line", "(", "line", ")", ":", "splitline", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "reordered", "=", "\"\\t\"", ".", "join", "(", "[", "splitline", "[", "0", "]", ",", "str", "(", "int", "(", "splitline", "[", "1", "]", ")", "+", "1", ")", ",", "splitline", "[", "2", "]", ",", "splitline", "[", "5", "]", ",", "splitline", "[", "3", "]", "]", ")", "return", "reordered", "+", "\"\\n\"", "with", "file_transaction", "(", "out_file", ")", "as", "tx_out_file", ":", "with", "open", "(", "bed", ")", "as", "bed_handle", ":", "with", "open", "(", "tx_out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "out_handle", ".", "write", "(", "header", ")", "for", "line", "in", "bed_handle", ":", "out_handle", ".", "write", "(", "reorder_line", "(", "line", ")", ")", "return", "out_file" ]
Converts a bed file to an interval file for use with some of the Picard tools by grabbing the header from the alignment file, reording the bed file columns and gluing them together. align_file can be in BAM or SAM format. bed needs to be in bed12 format: http://genome.ucsc.edu/FAQ/FAQformat.html#format1.5
[ "Converts", "a", "bed", "file", "to", "an", "interval", "file", "for", "use", "with", "some", "of", "the", "Picard", "tools", "by", "grabbing", "the", "header", "from", "the", "alignment", "file", "reording", "the", "bed", "file", "columns", "and", "gluing", "them", "together", "." ]
python
train
RI-imaging/ODTbrain
odtbrain/_preproc.py
https://github.com/RI-imaging/ODTbrain/blob/abbab8b790f10c0c7aea8d858d7d60f2fdd7161e/odtbrain/_preproc.py#L60-L102
def sinogram_as_radon(uSin, align=True): r"""Compute the phase from a complex wave field sinogram This step is essential when using the ray approximation before computation of the refractive index with the inverse Radon transform. Parameters ---------- uSin: 2d or 3d complex ndarray The background-corrected sinogram of the complex scattered wave :math:`u(\mathbf{r})/u_0(\mathbf{r})`. The first axis iterates through the angles :math:`\phi_0`. align: bool Tries to correct for a phase offset in the phase sinogram. Returns ------- phase: 2d or 3d real ndarray The unwrapped phase array corresponding to `uSin`. See Also -------- skimage.restoration.unwrap_phase: phase unwrapping radontea.backproject_3d: e.g. reconstruction via backprojection """ ndims = len(uSin.shape) if ndims == 2: # unwrapping is very important phiR = np.unwrap(np.angle(uSin), axis=-1) else: # Unwrap gets the dimension of the problem from the input # data. Since we have a sinogram, we need to pass it the # slices one by one. phiR = np.angle(uSin) for ii in range(len(phiR)): phiR[ii] = unwrap_phase(phiR[ii], seed=47) if align: align_unwrapped(phiR) return phiR
[ "def", "sinogram_as_radon", "(", "uSin", ",", "align", "=", "True", ")", ":", "ndims", "=", "len", "(", "uSin", ".", "shape", ")", "if", "ndims", "==", "2", ":", "# unwrapping is very important", "phiR", "=", "np", ".", "unwrap", "(", "np", ".", "angle", "(", "uSin", ")", ",", "axis", "=", "-", "1", ")", "else", ":", "# Unwrap gets the dimension of the problem from the input", "# data. Since we have a sinogram, we need to pass it the", "# slices one by one.", "phiR", "=", "np", ".", "angle", "(", "uSin", ")", "for", "ii", "in", "range", "(", "len", "(", "phiR", ")", ")", ":", "phiR", "[", "ii", "]", "=", "unwrap_phase", "(", "phiR", "[", "ii", "]", ",", "seed", "=", "47", ")", "if", "align", ":", "align_unwrapped", "(", "phiR", ")", "return", "phiR" ]
r"""Compute the phase from a complex wave field sinogram This step is essential when using the ray approximation before computation of the refractive index with the inverse Radon transform. Parameters ---------- uSin: 2d or 3d complex ndarray The background-corrected sinogram of the complex scattered wave :math:`u(\mathbf{r})/u_0(\mathbf{r})`. The first axis iterates through the angles :math:`\phi_0`. align: bool Tries to correct for a phase offset in the phase sinogram. Returns ------- phase: 2d or 3d real ndarray The unwrapped phase array corresponding to `uSin`. See Also -------- skimage.restoration.unwrap_phase: phase unwrapping radontea.backproject_3d: e.g. reconstruction via backprojection
[ "r", "Compute", "the", "phase", "from", "a", "complex", "wave", "field", "sinogram" ]
python
train
bububa/pyTOP
pyTOP/packages/requests/api.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/packages/requests/api.py#L17-L66
def request(method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=False, proxies=None, hooks=None, return_response=True, prefetch=False, config=None): """Constructs and sends a :class:`Request <Request>`. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload. :param auth: (optional) Auth typle to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) Float describing the timeout of the request. :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param return_response: (optional) If False, an un-sent Request object will returned. :param config: (optional) A configuration dictionary. """ s = session() return s.request( method=method, url=url, params=params, data=data, headers=headers, cookies=cookies, files=files, auth=auth, timeout=timeout, allow_redirects=allow_redirects, proxies=proxies, hooks=hooks, return_response=return_response, config=config, prefetch=prefetch )
[ "def", "request", "(", "method", ",", "url", ",", "params", "=", "None", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "cookies", "=", "None", ",", "files", "=", "None", ",", "auth", "=", "None", ",", "timeout", "=", "None", ",", "allow_redirects", "=", "False", ",", "proxies", "=", "None", ",", "hooks", "=", "None", ",", "return_response", "=", "True", ",", "prefetch", "=", "False", ",", "config", "=", "None", ")", ":", "s", "=", "session", "(", ")", "return", "s", ".", "request", "(", "method", "=", "method", ",", "url", "=", "url", ",", "params", "=", "params", ",", "data", "=", "data", ",", "headers", "=", "headers", ",", "cookies", "=", "cookies", ",", "files", "=", "files", ",", "auth", "=", "auth", ",", "timeout", "=", "timeout", ",", "allow_redirects", "=", "allow_redirects", ",", "proxies", "=", "proxies", ",", "hooks", "=", "hooks", ",", "return_response", "=", "return_response", ",", "config", "=", "config", ",", "prefetch", "=", "prefetch", ")" ]
Constructs and sends a :class:`Request <Request>`. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload. :param auth: (optional) Auth typle to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) Float describing the timeout of the request. :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param return_response: (optional) If False, an un-sent Request object will returned. :param config: (optional) A configuration dictionary.
[ "Constructs", "and", "sends", "a", ":", "class", ":", "Request", "<Request", ">", ".", "Returns", ":", "class", ":", "Response", "<Response", ">", "object", "." ]
python
train
tanghaibao/jcvi
jcvi/formats/fastq.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L721-L749
def trim(args): """ %prog trim fastqfile Wraps `fastx_trimmer` to trim from begin or end of reads. """ p = OptionParser(trim.__doc__) p.add_option("-f", dest="first", default=0, type="int", help="First base to keep. Default is 1.") p.add_option("-l", dest="last", default=0, type="int", help="Last base to keep. Default is entire read.") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastqfile, = args obfastqfile = op.basename(fastqfile) fq = obfastqfile.rsplit(".", 1)[0] + ".ntrimmed.fastq" if fastqfile.endswith(".gz"): fq = obfastqfile.rsplit(".", 2)[0] + ".ntrimmed.fastq.gz" cmd = "fastx_trimmer -Q33 " if opts.first: cmd += "-f {0.first} ".format(opts) if opts.last: cmd += "-l {0.last} ".format(opts) sh(cmd, infile=fastqfile, outfile=fq)
[ "def", "trim", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "trim", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-f\"", ",", "dest", "=", "\"first\"", ",", "default", "=", "0", ",", "type", "=", "\"int\"", ",", "help", "=", "\"First base to keep. Default is 1.\"", ")", "p", ".", "add_option", "(", "\"-l\"", ",", "dest", "=", "\"last\"", ",", "default", "=", "0", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Last base to keep. Default is entire read.\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "fastqfile", ",", "=", "args", "obfastqfile", "=", "op", ".", "basename", "(", "fastqfile", ")", "fq", "=", "obfastqfile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "+", "\".ntrimmed.fastq\"", "if", "fastqfile", ".", "endswith", "(", "\".gz\"", ")", ":", "fq", "=", "obfastqfile", ".", "rsplit", "(", "\".\"", ",", "2", ")", "[", "0", "]", "+", "\".ntrimmed.fastq.gz\"", "cmd", "=", "\"fastx_trimmer -Q33 \"", "if", "opts", ".", "first", ":", "cmd", "+=", "\"-f {0.first} \"", ".", "format", "(", "opts", ")", "if", "opts", ".", "last", ":", "cmd", "+=", "\"-l {0.last} \"", ".", "format", "(", "opts", ")", "sh", "(", "cmd", ",", "infile", "=", "fastqfile", ",", "outfile", "=", "fq", ")" ]
%prog trim fastqfile Wraps `fastx_trimmer` to trim from begin or end of reads.
[ "%prog", "trim", "fastqfile" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/color/color_space.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/color/color_space.py#L39-L43
def _rgb_to_hex(rgbs): """Convert rgb to hex triplet""" rgbs, n_dim = _check_color_dim(rgbs) return np.array(['#%02x%02x%02x' % tuple((255*rgb[:3]).astype(np.uint8)) for rgb in rgbs], '|U7')
[ "def", "_rgb_to_hex", "(", "rgbs", ")", ":", "rgbs", ",", "n_dim", "=", "_check_color_dim", "(", "rgbs", ")", "return", "np", ".", "array", "(", "[", "'#%02x%02x%02x'", "%", "tuple", "(", "(", "255", "*", "rgb", "[", ":", "3", "]", ")", ".", "astype", "(", "np", ".", "uint8", ")", ")", "for", "rgb", "in", "rgbs", "]", ",", "'|U7'", ")" ]
Convert rgb to hex triplet
[ "Convert", "rgb", "to", "hex", "triplet" ]
python
train
xflr6/graphviz
graphviz/backend.py
https://github.com/xflr6/graphviz/blob/7376095ef1e47abad7e0b0361b6c9720b706e7a0/graphviz/backend.py#L230-L242
def view(filepath): """Open filepath with its default viewing application (platform-specific). Args: filepath: Path to the file to open in viewer. Raises: RuntimeError: If the current platform is not supported. """ try: view_func = getattr(view, PLATFORM) except AttributeError: raise RuntimeError('platform %r not supported' % PLATFORM) view_func(filepath)
[ "def", "view", "(", "filepath", ")", ":", "try", ":", "view_func", "=", "getattr", "(", "view", ",", "PLATFORM", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "'platform %r not supported'", "%", "PLATFORM", ")", "view_func", "(", "filepath", ")" ]
Open filepath with its default viewing application (platform-specific). Args: filepath: Path to the file to open in viewer. Raises: RuntimeError: If the current platform is not supported.
[ "Open", "filepath", "with", "its", "default", "viewing", "application", "(", "platform", "-", "specific", ")", "." ]
python
train
Genida/archan
src/archan/analysis.py
https://github.com/Genida/archan/blob/a026d3105c7e86f30e6c9507b93ceb736684bfdc/src/archan/analysis.py#L120-L125
def successful(self): """Property to tell if the run was successful: no failures.""" for result in self.results: if result.code == ResultCode.FAILED: return False return True
[ "def", "successful", "(", "self", ")", ":", "for", "result", "in", "self", ".", "results", ":", "if", "result", ".", "code", "==", "ResultCode", ".", "FAILED", ":", "return", "False", "return", "True" ]
Property to tell if the run was successful: no failures.
[ "Property", "to", "tell", "if", "the", "run", "was", "successful", ":", "no", "failures", "." ]
python
train
cuihantao/andes
andes/models/agc.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/models/agc.py#L169-L178
def switch(self): """Switch if time for eAgc has come""" t = self.system.dae.t for idx in range(0, self.n): if t >= self.tl[idx]: if self.en[idx] == 0: self.en[idx] = 1 logger.info( 'Extended ACE <{}> activated at t = {}.'.format( self.idx[idx], t))
[ "def", "switch", "(", "self", ")", ":", "t", "=", "self", ".", "system", ".", "dae", ".", "t", "for", "idx", "in", "range", "(", "0", ",", "self", ".", "n", ")", ":", "if", "t", ">=", "self", ".", "tl", "[", "idx", "]", ":", "if", "self", ".", "en", "[", "idx", "]", "==", "0", ":", "self", ".", "en", "[", "idx", "]", "=", "1", "logger", ".", "info", "(", "'Extended ACE <{}> activated at t = {}.'", ".", "format", "(", "self", ".", "idx", "[", "idx", "]", ",", "t", ")", ")" ]
Switch if time for eAgc has come
[ "Switch", "if", "time", "for", "eAgc", "has", "come" ]
python
train
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L3949-L3980
def strace_read(self, num_instructions): """Reads and returns a number of instructions captured by STRACE. The number of instructions must be a non-negative value of at most ``0x10000`` (``65536``). Args: self (JLink): the ``JLink`` instance. num_instructions (int): number of instructions to fetch. Returns: A list of instruction addresses in order from most recently executed to oldest executed instructions. Note that the number of instructions returned can be less than the number of instructions requested in the case that there are not ``num_instructions`` in the trace buffer. Raises: JLinkException: on error. ValueError: if ``num_instructions < 0`` or ``num_instructions > 0x10000``. """ if num_instructions < 0 or num_instructions > 0x10000: raise ValueError('Invalid instruction count.') buf = (ctypes.c_uint32 * num_instructions)() buf_size = num_instructions res = self._dll.JLINK_STRACE_Read(ctypes.byref(buf), buf_size) if res < 0: raise errors.JLinkException('Failed to read from STRACE buffer.') return list(buf)[:res]
[ "def", "strace_read", "(", "self", ",", "num_instructions", ")", ":", "if", "num_instructions", "<", "0", "or", "num_instructions", ">", "0x10000", ":", "raise", "ValueError", "(", "'Invalid instruction count.'", ")", "buf", "=", "(", "ctypes", ".", "c_uint32", "*", "num_instructions", ")", "(", ")", "buf_size", "=", "num_instructions", "res", "=", "self", ".", "_dll", ".", "JLINK_STRACE_Read", "(", "ctypes", ".", "byref", "(", "buf", ")", ",", "buf_size", ")", "if", "res", "<", "0", ":", "raise", "errors", ".", "JLinkException", "(", "'Failed to read from STRACE buffer.'", ")", "return", "list", "(", "buf", ")", "[", ":", "res", "]" ]
Reads and returns a number of instructions captured by STRACE. The number of instructions must be a non-negative value of at most ``0x10000`` (``65536``). Args: self (JLink): the ``JLink`` instance. num_instructions (int): number of instructions to fetch. Returns: A list of instruction addresses in order from most recently executed to oldest executed instructions. Note that the number of instructions returned can be less than the number of instructions requested in the case that there are not ``num_instructions`` in the trace buffer. Raises: JLinkException: on error. ValueError: if ``num_instructions < 0`` or ``num_instructions > 0x10000``.
[ "Reads", "and", "returns", "a", "number", "of", "instructions", "captured", "by", "STRACE", "." ]
python
train
seomoz/qless-py
qless/workers/serial.py
https://github.com/seomoz/qless-py/blob/3eda4ffcd4c0016c9a7e44f780d6155e1a354dda/qless/workers/serial.py#L24-L43
def run(self): '''Run jobs, popping one after another''' # Register our signal handlers self.signals() with self.listener(): for job in self.jobs(): # If there was no job to be had, we should sleep a little bit if not job: self.jid = None self.title('Sleeping for %fs' % self.interval) time.sleep(self.interval) else: self.jid = job.jid self.title('Working on %s (%s)' % (job.jid, job.klass_name)) with Worker.sandbox(self.sandbox): job.sandbox = self.sandbox job.process() if self.shutdown: break
[ "def", "run", "(", "self", ")", ":", "# Register our signal handlers", "self", ".", "signals", "(", ")", "with", "self", ".", "listener", "(", ")", ":", "for", "job", "in", "self", ".", "jobs", "(", ")", ":", "# If there was no job to be had, we should sleep a little bit", "if", "not", "job", ":", "self", ".", "jid", "=", "None", "self", ".", "title", "(", "'Sleeping for %fs'", "%", "self", ".", "interval", ")", "time", ".", "sleep", "(", "self", ".", "interval", ")", "else", ":", "self", ".", "jid", "=", "job", ".", "jid", "self", ".", "title", "(", "'Working on %s (%s)'", "%", "(", "job", ".", "jid", ",", "job", ".", "klass_name", ")", ")", "with", "Worker", ".", "sandbox", "(", "self", ".", "sandbox", ")", ":", "job", ".", "sandbox", "=", "self", ".", "sandbox", "job", ".", "process", "(", ")", "if", "self", ".", "shutdown", ":", "break" ]
Run jobs, popping one after another
[ "Run", "jobs", "popping", "one", "after", "another" ]
python
train
quodlibet/mutagen
mutagen/_senf/_stdlib.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_senf/_stdlib.py#L135-L154
def expandvars(path): """ Args: path (pathlike): A path to expand Returns: `fsnative` Like :func:`python:os.path.expandvars` but supports unicode under Windows + Python 2 and always returns a `fsnative`. """ path = path2fsn(path) def repl_func(match): return environ.get(match.group(1), match.group(0)) path = re.compile(r"\$(\w+)", flags=re.UNICODE).sub(repl_func, path) if os.name == "nt": path = re.sub(r"%([^%]+)%", repl_func, path) return re.sub(r"\$\{([^\}]+)\}", repl_func, path)
[ "def", "expandvars", "(", "path", ")", ":", "path", "=", "path2fsn", "(", "path", ")", "def", "repl_func", "(", "match", ")", ":", "return", "environ", ".", "get", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "0", ")", ")", "path", "=", "re", ".", "compile", "(", "r\"\\$(\\w+)\"", ",", "flags", "=", "re", ".", "UNICODE", ")", ".", "sub", "(", "repl_func", ",", "path", ")", "if", "os", ".", "name", "==", "\"nt\"", ":", "path", "=", "re", ".", "sub", "(", "r\"%([^%]+)%\"", ",", "repl_func", ",", "path", ")", "return", "re", ".", "sub", "(", "r\"\\$\\{([^\\}]+)\\}\"", ",", "repl_func", ",", "path", ")" ]
Args: path (pathlike): A path to expand Returns: `fsnative` Like :func:`python:os.path.expandvars` but supports unicode under Windows + Python 2 and always returns a `fsnative`.
[ "Args", ":", "path", "(", "pathlike", ")", ":", "A", "path", "to", "expand", "Returns", ":", "fsnative" ]
python
train
dropbox/stone
stone/backends/python_rsrc/stone_serializers.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/python_rsrc/stone_serializers.py#L118-L170
def encode_sub(self, validator, value): # type: (bv.Validator, typing.Any) -> typing.Any """ Callback intended to be called by other ``encode`` methods to delegate encoding of sub-values. Arguments have the same semantics as with the ``encode`` method. """ if isinstance(validator, bv.List): # Because Lists are mutable, we always validate them during # serialization validate_f = validator.validate # type: typing.Callable[[typing.Any], None] encode_f = self.encode_list # type: typing.Callable[[typing.Any, typing.Any], typing.Any] # noqa: E501 elif isinstance(validator, bv.Map): # Also validate maps during serialization because they are also mutable validate_f = validator.validate encode_f = self.encode_map elif isinstance(validator, bv.Nullable): validate_f = validator.validate encode_f = self.encode_nullable elif isinstance(validator, bv.Primitive): validate_f = validator.validate encode_f = self.encode_primitive elif isinstance(validator, bv.Struct): if isinstance(validator, bv.StructTree): if self.caller_permissions.permissions: def validate_with_permissions(val): validator.validate_with_permissions(val, self.caller_permissions) validate_f = validate_with_permissions else: validate_f = validator.validate encode_f = self.encode_struct_tree else: # Fields are already validated on assignment if self.caller_permissions.permissions: def validate_with_permissions(val): validator.validate_with_permissions(val, self.caller_permissions) validate_f = validate_with_permissions else: validate_f = validator.validate_type_only encode_f = self.encode_struct elif isinstance(validator, bv.Union): # Fields are already validated on assignment validate_f = validator.validate_type_only encode_f = self.encode_union else: raise bv.ValidationError('Unsupported data type {}'.format(type(validator).__name__)) validate_f(value) return encode_f(validator, value)
[ "def", "encode_sub", "(", "self", ",", "validator", ",", "value", ")", ":", "# type: (bv.Validator, typing.Any) -> typing.Any", "if", "isinstance", "(", "validator", ",", "bv", ".", "List", ")", ":", "# Because Lists are mutable, we always validate them during", "# serialization", "validate_f", "=", "validator", ".", "validate", "# type: typing.Callable[[typing.Any], None]", "encode_f", "=", "self", ".", "encode_list", "# type: typing.Callable[[typing.Any, typing.Any], typing.Any] # noqa: E501", "elif", "isinstance", "(", "validator", ",", "bv", ".", "Map", ")", ":", "# Also validate maps during serialization because they are also mutable", "validate_f", "=", "validator", ".", "validate", "encode_f", "=", "self", ".", "encode_map", "elif", "isinstance", "(", "validator", ",", "bv", ".", "Nullable", ")", ":", "validate_f", "=", "validator", ".", "validate", "encode_f", "=", "self", ".", "encode_nullable", "elif", "isinstance", "(", "validator", ",", "bv", ".", "Primitive", ")", ":", "validate_f", "=", "validator", ".", "validate", "encode_f", "=", "self", ".", "encode_primitive", "elif", "isinstance", "(", "validator", ",", "bv", ".", "Struct", ")", ":", "if", "isinstance", "(", "validator", ",", "bv", ".", "StructTree", ")", ":", "if", "self", ".", "caller_permissions", ".", "permissions", ":", "def", "validate_with_permissions", "(", "val", ")", ":", "validator", ".", "validate_with_permissions", "(", "val", ",", "self", ".", "caller_permissions", ")", "validate_f", "=", "validate_with_permissions", "else", ":", "validate_f", "=", "validator", ".", "validate", "encode_f", "=", "self", ".", "encode_struct_tree", "else", ":", "# Fields are already validated on assignment", "if", "self", ".", "caller_permissions", ".", "permissions", ":", "def", "validate_with_permissions", "(", "val", ")", ":", "validator", ".", "validate_with_permissions", "(", "val", ",", "self", ".", "caller_permissions", ")", "validate_f", "=", "validate_with_permissions", "else", ":", "validate_f", "=", "validator", ".", "validate_type_only", "encode_f", "=", "self", ".", "encode_struct", "elif", "isinstance", "(", "validator", ",", "bv", ".", "Union", ")", ":", "# Fields are already validated on assignment", "validate_f", "=", "validator", ".", "validate_type_only", "encode_f", "=", "self", ".", "encode_union", "else", ":", "raise", "bv", ".", "ValidationError", "(", "'Unsupported data type {}'", ".", "format", "(", "type", "(", "validator", ")", ".", "__name__", ")", ")", "validate_f", "(", "value", ")", "return", "encode_f", "(", "validator", ",", "value", ")" ]
Callback intended to be called by other ``encode`` methods to delegate encoding of sub-values. Arguments have the same semantics as with the ``encode`` method.
[ "Callback", "intended", "to", "be", "called", "by", "other", "encode", "methods", "to", "delegate", "encoding", "of", "sub", "-", "values", ".", "Arguments", "have", "the", "same", "semantics", "as", "with", "the", "encode", "method", "." ]
python
train
juju-solutions/charms.reactive
charms/reactive/endpoints.py
https://github.com/juju-solutions/charms.reactive/blob/e37e781432e77c12b63d2c739bd6cd70d3230c3a/charms/reactive/endpoints.py#L613-L620
def load(cls, cache_key, deserializer, key_attr): """ Load the persisted cache and return a new instance of this class. """ items = unitdata.kv().get(cache_key) or [] return cls(cache_key, [deserializer(item) for item in items], key_attr)
[ "def", "load", "(", "cls", ",", "cache_key", ",", "deserializer", ",", "key_attr", ")", ":", "items", "=", "unitdata", ".", "kv", "(", ")", ".", "get", "(", "cache_key", ")", "or", "[", "]", "return", "cls", "(", "cache_key", ",", "[", "deserializer", "(", "item", ")", "for", "item", "in", "items", "]", ",", "key_attr", ")" ]
Load the persisted cache and return a new instance of this class.
[ "Load", "the", "persisted", "cache", "and", "return", "a", "new", "instance", "of", "this", "class", "." ]
python
train
byt3bl33d3r/CrackMapExec
cme/helpers/powershell.py
https://github.com/byt3bl33d3r/CrackMapExec/blob/333f1c4e06884e85b2776459963ef85d182aba8e/cme/helpers/powershell.py#L242-L392
def invoke_obfuscation(scriptString): # Add letters a-z with random case to $RandomDelimiters. alphabet = ''.join(choice([i.upper(), i]) for i in ascii_lowercase) # Create list of random dxelimiters called randomDelimiters. # Avoid using . * ' " [ ] ( ) etc. as delimiters as these will cause problems in the -Split command syntax. randomDelimiters = ['_','-',',','{','}','~','!','@','%','&','<','>',';',':'] for i in alphabet: randomDelimiters.append(i) # Only use a subset of current delimiters to randomize what you see in every iteration of this script's output. randomDelimiters = [choice(randomDelimiters) for _ in range(int(len(randomDelimiters)/4))] # Convert $ScriptString to delimited ASCII values in [Char] array separated by random delimiter from defined list $RandomDelimiters. delimitedEncodedArray = '' for char in scriptString: delimitedEncodedArray += str(ord(char)) + choice(randomDelimiters) # Remove trailing delimiter from $DelimitedEncodedArray. delimitedEncodedArray = delimitedEncodedArray[:-1] # Create printable version of $RandomDelimiters in random order to be used by final command. test = sample(randomDelimiters, len(randomDelimiters)) randomDelimitersToPrint = ''.join(i for i in test) # Generate random case versions for necessary operations. forEachObject = choice(['ForEach','ForEach-Object','%']) strJoin = ''.join(choice([i.upper(), i.lower()]) for i in '[String]::Join') strStr = ''.join(choice([i.upper(), i.lower()]) for i in '[String]') join = ''.join(choice([i.upper(), i.lower()]) for i in '-Join') charStr = ''.join(choice([i.upper(), i.lower()]) for i in 'Char') integer = ''.join(choice([i.upper(), i.lower()]) for i in 'Int') forEachObject = ''.join(choice([i.upper(), i.lower()]) for i in forEachObject) # Create printable version of $RandomDelimiters in random order to be used by final command specifically for -Split syntax. randomDelimitersToPrintForDashSplit = '' for delim in randomDelimiters: # Random case 'split' string. split = ''.join(choice([i.upper(), i.lower()]) for i in 'Split') randomDelimitersToPrintForDashSplit += '-' + split + choice(['', ' ']) + '\'' + delim + '\'' + choice(['', ' ']) randomDelimitersToPrintForDashSplit = randomDelimitersToPrintForDashSplit.strip('\t\n\r') # Randomly select between various conversion syntax options. randomConversionSyntax = [] randomConversionSyntax.append('[' + charStr + ']' + choice(['', ' ']) + '[' + integer + ']' + choice(['', ' ']) + '$_') randomConversionSyntax.append('[' + integer + ']' + choice(['', ' ']) + '$_' + choice(['', ' ']) + choice(['-as', '-As', '-aS', '-AS']) + choice(['', ' ']) + '[' + charStr + ']') randomConversionSyntax = choice(randomConversionSyntax) # Create array syntax for encoded scriptString as alternative to .Split/-Split syntax. encodedArray = '' for char in scriptString: encodedArray += str(ord(char)) + choice(['', ' ']) + ',' + choice(['', ' ']) # Remove trailing comma from encodedArray encodedArray = '(' + choice(['', ' ']) + encodedArray.rstrip().rstrip(',') + ')' # Generate random syntax to create/set OFS variable ($OFS is the Output Field Separator automatic variable). # Using Set-Item and Set-Variable/SV/SET syntax. Not using New-Item in case OFS variable already exists. # If the OFS variable did exists then we could use even more syntax: $varname, Set-Variable/SV, Set-Item/SET, Get-Variable/GV/Variable, Get-ChildItem/GCI/ChildItem/Dir/Ls # For more info: https://msdn.microsoft.com/en-us/powershell/reference/5.1/microsoft.powershell.core/about/about_automatic_variables setOfsVarSyntax = [] setOfsVarSyntax.append('Set-Item' + choice([' '*1, ' '*2]) + "'Variable:OFS'" + choice([' '*1, ' '*2]) + "''") setOfsVarSyntax.append(choice(['Set-Variable', 'SV', 'SET']) + choice([' '*1, ' '*2]) + "'OFS'" + choice([' '*1, ' '*2]) + "''") setOfsVar = choice(setOfsVarSyntax) setOfsVarBackSyntax = [] setOfsVarBackSyntax.append('Set-Item' + choice([' '*1, ' '*2]) + "'Variable:OFS'" + choice([' '*1, ' '*2]) + "' '") setOfsVarBackSyntax.append('Set-Item' + choice([' '*1, ' '*2]) + "'Variable:OFS'" + choice([' '*1, ' '*2]) + "' '") setOfsVarBack = choice(setOfsVarBackSyntax) # Randomize case of $SetOfsVar and $SetOfsVarBack. setOfsVar = ''.join(choice([i.upper(), i.lower()]) for i in setOfsVar) setOfsVarBack = ''.join(choice([i.upper(), i.lower()]) for i in setOfsVarBack) # Generate the code that will decrypt and execute the payload and randomly select one. baseScriptArray = [] baseScriptArray.append('[' + charStr + '[]' + ']' + choice(['', ' ']) + encodedArray) baseScriptArray.append('(' + choice(['', ' ']) + "'" + delimitedEncodedArray + "'." + split + "(" + choice(['', ' ']) + "'" + randomDelimitersToPrint + "'" + choice(['', ' ']) + ')' + choice(['', ' ']) + '|' + choice(['', ' ']) + forEachObject + choice(['', ' ']) + '{' + choice(['', ' ']) + '(' + choice(['', ' ']) + randomConversionSyntax + ')' + choice(['', ' ']) + '}' + choice(['', ' ']) + ')') baseScriptArray.append('(' + choice(['', ' ']) + "'" + delimitedEncodedArray + "'" + choice(['', ' ']) + randomDelimitersToPrintForDashSplit + choice(['', ' ']) + '|' + choice(['', ' ']) + forEachObject + choice(['', ' ']) + '{' + choice(['', ' ']) + '(' + choice(['', ' ']) + randomConversionSyntax + ')' + choice(['', ' ']) + '}' + choice(['', ' ']) + ')') baseScriptArray.append('(' + choice(['', ' ']) + encodedArray + choice(['', ' ']) + '|' + choice(['', ' ']) + forEachObject + choice(['', ' ']) + '{' + choice(['', ' ']) + '(' + choice(['', ' ']) + randomConversionSyntax + ')' + choice(['', ' ']) + '}' + choice(['', ' ']) + ')') # Generate random JOIN syntax for all above options newScriptArray = [] newScriptArray.append(choice(baseScriptArray) + choice(['', ' ']) + join + choice(['', ' ']) + "''") newScriptArray.append(join + choice(['', ' ']) + choice(baseScriptArray)) newScriptArray.append(strJoin + '(' + choice(['', ' ']) + "''" + choice(['', ' ']) + ',' + choice(['', ' ']) + choice(baseScriptArray) + choice(['', ' ']) + ')') newScriptArray.append('"' + choice(['', ' ']) + '$(' + choice(['', ' ']) + setOfsVar + choice(['', ' ']) + ')' + choice(['', ' ']) + '"' + choice(['', ' ']) + '+' + choice(['', ' ']) + strStr + choice(baseScriptArray) + choice(['', ' ']) + '+' + '"' + choice(['', ' ']) + '$(' + choice(['', ' ']) + setOfsVarBack + choice(['', ' ']) + ')' + choice(['', ' ']) + '"') # Randomly select one of the above commands. newScript = choice(newScriptArray) # Generate random invoke operation syntax. # Below code block is a copy from Out-ObfuscatedStringCommand.ps1. It is copied into this encoding function so that this will remain a standalone script without dependencies. invokeExpressionSyntax = [] invokeExpressionSyntax.append(choice(['IEX', 'Invoke-Expression'])) # Added below slightly-randomized obfuscated ways to form the string 'iex' and then invoke it with . or &. # Though far from fully built out, these are included to highlight how IEX/Invoke-Expression is a great indicator but not a silver bullet. # These methods draw on common environment variable values and PowerShell Automatic Variable values/methods/members/properties/etc. invocationOperator = choice(['.','&']) + choice(['', ' ']) invokeExpressionSyntax.append(invocationOperator + "( $ShellId[1]+$ShellId[13]+'x')") invokeExpressionSyntax.append(invocationOperator + "( $PSHome[" + choice(['4', '21']) + "]+$PSHOME[" + choice(['30', '34']) + "]+'x')") invokeExpressionSyntax.append(invocationOperator + "( $env:Public[13]+$env:Public[5]+'x')") invokeExpressionSyntax.append(invocationOperator + "( $env:ComSpec[4," + choice(['15', '24', '26']) + ",25]-Join'')") invokeExpressionSyntax.append(invocationOperator + "((" + choice(['Get-Variable','GV','Variable']) + " '*mdr*').Name[3,11,2]-Join'')") invokeExpressionSyntax.append(invocationOperator + "( " + choice(['$VerbosePreference.ToString()','([String]$VerbosePreference)']) + "[1,3]+'x'-Join'')") # Randomly choose from above invoke operation syntaxes. invokeExpression = choice(invokeExpressionSyntax) # Randomize the case of selected invoke operation. invokeExpression = ''.join(choice([i.upper(), i.lower()]) for i in invokeExpression) # Choose random Invoke-Expression/IEX syntax and ordering: IEX ($ScriptString) or ($ScriptString | IEX) invokeOptions = [] invokeOptions.append(choice(['', ' ']) + invokeExpression + choice(['', ' ']) + '(' + choice(['', ' ']) + newScript + choice(['', ' ']) + ')' + choice(['', ' '])) invokeOptions.append(choice(['', ' ']) + newScript + choice(['', ' ']) + '|' + choice(['', ' ']) + invokeExpression) obfuscatedPayload = choice(invokeOptions) """ # Array to store all selected PowerShell execution flags. powerShellFlags = [] noProfile = '-nop' nonInteractive = '-noni' windowStyle = '-w' # Build the PowerShell execution flags by randomly selecting execution flags substrings and randomizing the order. # This is to prevent Blue Team from placing false hope in simple signatures for common substrings of these execution flags. commandlineOptions = [] commandlineOptions.append(noProfile[0:randrange(4, len(noProfile) + 1, 1)]) commandlineOptions.append(nonInteractive[0:randrange(5, len(nonInteractive) + 1, 1)]) # Randomly decide to write WindowStyle value with flag substring or integer value. commandlineOptions.append(''.join(windowStyle[0:randrange(2, len(windowStyle) + 1, 1)] + choice([' '*1, ' '*2, ' '*3]) + choice(['1','h','hi','hid','hidd','hidde']))) # Randomize the case of all command-line arguments. for count, option in enumerate(commandlineOptions): commandlineOptions[count] = ''.join(choice([i.upper(), i.lower()]) for i in option) for count, option in enumerate(commandlineOptions): commandlineOptions[count] = ''.join(option) commandlineOptions = sample(commandlineOptions, len(commandlineOptions)) commandlineOptions = ''.join(i + choice([' '*1, ' '*2, ' '*3]) for i in commandlineOptions) obfuscatedPayload = 'powershell.exe ' + commandlineOptions + newScript """ return obfuscatedPayload
[ "def", "invoke_obfuscation", "(", "scriptString", ")", ":", "# Add letters a-z with random case to $RandomDelimiters.", "alphabet", "=", "''", ".", "join", "(", "choice", "(", "[", "i", ".", "upper", "(", ")", ",", "i", "]", ")", "for", "i", "in", "ascii_lowercase", ")", "# Create list of random dxelimiters called randomDelimiters.", "# Avoid using . * ' \" [ ] ( ) etc. as delimiters as these will cause problems in the -Split command syntax.", "randomDelimiters", "=", "[", "'_'", ",", "'-'", ",", "','", ",", "'{'", ",", "'}'", ",", "'~'", ",", "'!'", ",", "'@'", ",", "'%'", ",", "'&'", ",", "'<'", ",", "'>'", ",", "';'", ",", "':'", "]", "for", "i", "in", "alphabet", ":", "randomDelimiters", ".", "append", "(", "i", ")", "# Only use a subset of current delimiters to randomize what you see in every iteration of this script's output.", "randomDelimiters", "=", "[", "choice", "(", "randomDelimiters", ")", "for", "_", "in", "range", "(", "int", "(", "len", "(", "randomDelimiters", ")", "/", "4", ")", ")", "]", "# Convert $ScriptString to delimited ASCII values in [Char] array separated by random delimiter from defined list $RandomDelimiters.", "delimitedEncodedArray", "=", "''", "for", "char", "in", "scriptString", ":", "delimitedEncodedArray", "+=", "str", "(", "ord", "(", "char", ")", ")", "+", "choice", "(", "randomDelimiters", ")", "# Remove trailing delimiter from $DelimitedEncodedArray.", "delimitedEncodedArray", "=", "delimitedEncodedArray", "[", ":", "-", "1", "]", "# Create printable version of $RandomDelimiters in random order to be used by final command.", "test", "=", "sample", "(", "randomDelimiters", ",", "len", "(", "randomDelimiters", ")", ")", "randomDelimitersToPrint", "=", "''", ".", "join", "(", "i", "for", "i", "in", "test", ")", "# Generate random case versions for necessary operations.", "forEachObject", "=", "choice", "(", "[", "'ForEach'", ",", "'ForEach-Object'", ",", "'%'", "]", ")", "strJoin", "=", "''", ".", "join", "(", "choice", "(", "[", "i", ".", "upper", "(", ")", ",", "i", ".", "lower", "(", ")", "]", ")", "for", "i", "in", "'[String]::Join'", ")", "strStr", "=", "''", ".", "join", "(", "choice", "(", "[", "i", ".", "upper", "(", ")", ",", "i", ".", "lower", "(", ")", "]", ")", "for", "i", "in", "'[String]'", ")", "join", "=", "''", ".", "join", "(", "choice", "(", "[", "i", ".", "upper", "(", ")", ",", "i", ".", "lower", "(", ")", "]", ")", "for", "i", "in", "'-Join'", ")", "charStr", "=", "''", ".", "join", "(", "choice", "(", "[", "i", ".", "upper", "(", ")", ",", "i", ".", "lower", "(", ")", "]", ")", "for", "i", "in", "'Char'", ")", "integer", "=", "''", ".", "join", "(", "choice", "(", "[", "i", ".", "upper", "(", ")", ",", "i", ".", "lower", "(", ")", "]", ")", "for", "i", "in", "'Int'", ")", "forEachObject", "=", "''", ".", "join", "(", "choice", "(", "[", "i", ".", "upper", "(", ")", ",", "i", ".", "lower", "(", ")", "]", ")", "for", "i", "in", "forEachObject", ")", "# Create printable version of $RandomDelimiters in random order to be used by final command specifically for -Split syntax.", "randomDelimitersToPrintForDashSplit", "=", "''", "for", "delim", "in", "randomDelimiters", ":", "# Random case 'split' string.", "split", "=", "''", ".", "join", "(", "choice", "(", "[", "i", ".", "upper", "(", ")", ",", "i", ".", "lower", "(", ")", "]", ")", "for", "i", "in", "'Split'", ")", "randomDelimitersToPrintForDashSplit", "+=", "'-'", "+", "split", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'\\''", "+", "delim", "+", "'\\''", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "randomDelimitersToPrintForDashSplit", "=", "randomDelimitersToPrintForDashSplit", ".", "strip", "(", "'\\t\\n\\r'", ")", "# Randomly select between various conversion syntax options.", "randomConversionSyntax", "=", "[", "]", "randomConversionSyntax", ".", "append", "(", "'['", "+", "charStr", "+", "']'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'['", "+", "integer", "+", "']'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'$_'", ")", "randomConversionSyntax", ".", "append", "(", "'['", "+", "integer", "+", "']'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'$_'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "choice", "(", "[", "'-as'", ",", "'-As'", ",", "'-aS'", ",", "'-AS'", "]", ")", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'['", "+", "charStr", "+", "']'", ")", "randomConversionSyntax", "=", "choice", "(", "randomConversionSyntax", ")", "# Create array syntax for encoded scriptString as alternative to .Split/-Split syntax.", "encodedArray", "=", "''", "for", "char", "in", "scriptString", ":", "encodedArray", "+=", "str", "(", "ord", "(", "char", ")", ")", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "','", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "# Remove trailing comma from encodedArray", "encodedArray", "=", "'('", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "encodedArray", ".", "rstrip", "(", ")", ".", "rstrip", "(", "','", ")", "+", "')'", "# Generate random syntax to create/set OFS variable ($OFS is the Output Field Separator automatic variable).", "# Using Set-Item and Set-Variable/SV/SET syntax. Not using New-Item in case OFS variable already exists.", "# If the OFS variable did exists then we could use even more syntax: $varname, Set-Variable/SV, Set-Item/SET, Get-Variable/GV/Variable, Get-ChildItem/GCI/ChildItem/Dir/Ls", "# For more info: https://msdn.microsoft.com/en-us/powershell/reference/5.1/microsoft.powershell.core/about/about_automatic_variables", "setOfsVarSyntax", "=", "[", "]", "setOfsVarSyntax", ".", "append", "(", "'Set-Item'", "+", "choice", "(", "[", "' '", "*", "1", ",", "' '", "*", "2", "]", ")", "+", "\"'Variable:OFS'\"", "+", "choice", "(", "[", "' '", "*", "1", ",", "' '", "*", "2", "]", ")", "+", "\"''\"", ")", "setOfsVarSyntax", ".", "append", "(", "choice", "(", "[", "'Set-Variable'", ",", "'SV'", ",", "'SET'", "]", ")", "+", "choice", "(", "[", "' '", "*", "1", ",", "' '", "*", "2", "]", ")", "+", "\"'OFS'\"", "+", "choice", "(", "[", "' '", "*", "1", ",", "' '", "*", "2", "]", ")", "+", "\"''\"", ")", "setOfsVar", "=", "choice", "(", "setOfsVarSyntax", ")", "setOfsVarBackSyntax", "=", "[", "]", "setOfsVarBackSyntax", ".", "append", "(", "'Set-Item'", "+", "choice", "(", "[", "' '", "*", "1", ",", "' '", "*", "2", "]", ")", "+", "\"'Variable:OFS'\"", "+", "choice", "(", "[", "' '", "*", "1", ",", "' '", "*", "2", "]", ")", "+", "\"' '\"", ")", "setOfsVarBackSyntax", ".", "append", "(", "'Set-Item'", "+", "choice", "(", "[", "' '", "*", "1", ",", "' '", "*", "2", "]", ")", "+", "\"'Variable:OFS'\"", "+", "choice", "(", "[", "' '", "*", "1", ",", "' '", "*", "2", "]", ")", "+", "\"' '\"", ")", "setOfsVarBack", "=", "choice", "(", "setOfsVarBackSyntax", ")", "# Randomize case of $SetOfsVar and $SetOfsVarBack.", "setOfsVar", "=", "''", ".", "join", "(", "choice", "(", "[", "i", ".", "upper", "(", ")", ",", "i", ".", "lower", "(", ")", "]", ")", "for", "i", "in", "setOfsVar", ")", "setOfsVarBack", "=", "''", ".", "join", "(", "choice", "(", "[", "i", ".", "upper", "(", ")", ",", "i", ".", "lower", "(", ")", "]", ")", "for", "i", "in", "setOfsVarBack", ")", "# Generate the code that will decrypt and execute the payload and randomly select one.", "baseScriptArray", "=", "[", "]", "baseScriptArray", ".", "append", "(", "'['", "+", "charStr", "+", "'[]'", "+", "']'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "encodedArray", ")", "baseScriptArray", ".", "append", "(", "'('", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "\"'\"", "+", "delimitedEncodedArray", "+", "\"'.\"", "+", "split", "+", "\"(\"", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "\"'\"", "+", "randomDelimitersToPrint", "+", "\"'\"", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "')'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'|'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "forEachObject", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'{'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'('", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "randomConversionSyntax", "+", "')'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'}'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "')'", ")", "baseScriptArray", ".", "append", "(", "'('", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "\"'\"", "+", "delimitedEncodedArray", "+", "\"'\"", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "randomDelimitersToPrintForDashSplit", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'|'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "forEachObject", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'{'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'('", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "randomConversionSyntax", "+", "')'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'}'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "')'", ")", "baseScriptArray", ".", "append", "(", "'('", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "encodedArray", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'|'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "forEachObject", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'{'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'('", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "randomConversionSyntax", "+", "')'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'}'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "')'", ")", "# Generate random JOIN syntax for all above options", "newScriptArray", "=", "[", "]", "newScriptArray", ".", "append", "(", "choice", "(", "baseScriptArray", ")", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "join", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "\"''\"", ")", "newScriptArray", ".", "append", "(", "join", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "choice", "(", "baseScriptArray", ")", ")", "newScriptArray", ".", "append", "(", "strJoin", "+", "'('", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "\"''\"", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "','", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "choice", "(", "baseScriptArray", ")", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "')'", ")", "newScriptArray", ".", "append", "(", "'\"'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'$('", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "setOfsVar", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "')'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'\"'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'+'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "strStr", "+", "choice", "(", "baseScriptArray", ")", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'+'", "+", "'\"'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'$('", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "setOfsVarBack", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "')'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'\"'", ")", "# Randomly select one of the above commands.", "newScript", "=", "choice", "(", "newScriptArray", ")", "# Generate random invoke operation syntax.", "# Below code block is a copy from Out-ObfuscatedStringCommand.ps1. It is copied into this encoding function so that this will remain a standalone script without dependencies.", "invokeExpressionSyntax", "=", "[", "]", "invokeExpressionSyntax", ".", "append", "(", "choice", "(", "[", "'IEX'", ",", "'Invoke-Expression'", "]", ")", ")", "# Added below slightly-randomized obfuscated ways to form the string 'iex' and then invoke it with . or &.", "# Though far from fully built out, these are included to highlight how IEX/Invoke-Expression is a great indicator but not a silver bullet.", "# These methods draw on common environment variable values and PowerShell Automatic Variable values/methods/members/properties/etc.", "invocationOperator", "=", "choice", "(", "[", "'.'", ",", "'&'", "]", ")", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "invokeExpressionSyntax", ".", "append", "(", "invocationOperator", "+", "\"( $ShellId[1]+$ShellId[13]+'x')\"", ")", "invokeExpressionSyntax", ".", "append", "(", "invocationOperator", "+", "\"( $PSHome[\"", "+", "choice", "(", "[", "'4'", ",", "'21'", "]", ")", "+", "\"]+$PSHOME[\"", "+", "choice", "(", "[", "'30'", ",", "'34'", "]", ")", "+", "\"]+'x')\"", ")", "invokeExpressionSyntax", ".", "append", "(", "invocationOperator", "+", "\"( $env:Public[13]+$env:Public[5]+'x')\"", ")", "invokeExpressionSyntax", ".", "append", "(", "invocationOperator", "+", "\"( $env:ComSpec[4,\"", "+", "choice", "(", "[", "'15'", ",", "'24'", ",", "'26'", "]", ")", "+", "\",25]-Join'')\"", ")", "invokeExpressionSyntax", ".", "append", "(", "invocationOperator", "+", "\"((\"", "+", "choice", "(", "[", "'Get-Variable'", ",", "'GV'", ",", "'Variable'", "]", ")", "+", "\" '*mdr*').Name[3,11,2]-Join'')\"", ")", "invokeExpressionSyntax", ".", "append", "(", "invocationOperator", "+", "\"( \"", "+", "choice", "(", "[", "'$VerbosePreference.ToString()'", ",", "'([String]$VerbosePreference)'", "]", ")", "+", "\"[1,3]+'x'-Join'')\"", ")", "# Randomly choose from above invoke operation syntaxes.", "invokeExpression", "=", "choice", "(", "invokeExpressionSyntax", ")", "# Randomize the case of selected invoke operation.", "invokeExpression", "=", "''", ".", "join", "(", "choice", "(", "[", "i", ".", "upper", "(", ")", ",", "i", ".", "lower", "(", ")", "]", ")", "for", "i", "in", "invokeExpression", ")", "# Choose random Invoke-Expression/IEX syntax and ordering: IEX ($ScriptString) or ($ScriptString | IEX)", "invokeOptions", "=", "[", "]", "invokeOptions", ".", "append", "(", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "invokeExpression", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'('", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "newScript", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "')'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", ")", "invokeOptions", ".", "append", "(", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "newScript", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "'|'", "+", "choice", "(", "[", "''", ",", "' '", "]", ")", "+", "invokeExpression", ")", "obfuscatedPayload", "=", "choice", "(", "invokeOptions", ")", "return", "obfuscatedPayload" ]
# Array to store all selected PowerShell execution flags. powerShellFlags = [] noProfile = '-nop' nonInteractive = '-noni' windowStyle = '-w' # Build the PowerShell execution flags by randomly selecting execution flags substrings and randomizing the order. # This is to prevent Blue Team from placing false hope in simple signatures for common substrings of these execution flags. commandlineOptions = [] commandlineOptions.append(noProfile[0:randrange(4, len(noProfile) + 1, 1)]) commandlineOptions.append(nonInteractive[0:randrange(5, len(nonInteractive) + 1, 1)]) # Randomly decide to write WindowStyle value with flag substring or integer value. commandlineOptions.append(''.join(windowStyle[0:randrange(2, len(windowStyle) + 1, 1)] + choice([' '*1, ' '*2, ' '*3]) + choice(['1','h','hi','hid','hidd','hidde']))) # Randomize the case of all command-line arguments. for count, option in enumerate(commandlineOptions): commandlineOptions[count] = ''.join(choice([i.upper(), i.lower()]) for i in option) for count, option in enumerate(commandlineOptions): commandlineOptions[count] = ''.join(option) commandlineOptions = sample(commandlineOptions, len(commandlineOptions)) commandlineOptions = ''.join(i + choice([' '*1, ' '*2, ' '*3]) for i in commandlineOptions) obfuscatedPayload = 'powershell.exe ' + commandlineOptions + newScript
[ "#", "Array", "to", "store", "all", "selected", "PowerShell", "execution", "flags", ".", "powerShellFlags", "=", "[]" ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/tools/common.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/common.py#L537-L545
def prepend_path_variable_command(variable, paths): """ Returns a command that prepends the given paths to the named path variable on the current platform. """ assert isinstance(variable, basestring) assert is_iterable_typed(paths, basestring) return path_variable_setting_command( variable, paths + [expand_variable(variable)])
[ "def", "prepend_path_variable_command", "(", "variable", ",", "paths", ")", ":", "assert", "isinstance", "(", "variable", ",", "basestring", ")", "assert", "is_iterable_typed", "(", "paths", ",", "basestring", ")", "return", "path_variable_setting_command", "(", "variable", ",", "paths", "+", "[", "expand_variable", "(", "variable", ")", "]", ")" ]
Returns a command that prepends the given paths to the named path variable on the current platform.
[ "Returns", "a", "command", "that", "prepends", "the", "given", "paths", "to", "the", "named", "path", "variable", "on", "the", "current", "platform", "." ]
python
train
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L290-L316
def get_source_id(self): """Gets the ``Resource Id`` of the source of this asset. The source is the original owner of the copyright of this asset and may differ from the creator of this asset. The source for a published book written by Margaret Mitchell would be Macmillan. The source for an unpublished painting by Arthur Goodwin would be Arthur Goodwin. An ``Asset`` is ``Sourceable`` and also contains a provider identity. The provider is the entity that makes this digital asset available in this repository but may or may not be the publisher of the contents depicted in the asset. For example, a map published by Ticknor and Fields in 1848 may have a provider of Library of Congress and a source of Ticknor and Fields. If copied from a repository at Middlebury College, the provider would be Middlebury College and a source of Ticknor and Fields. return: (osid.id.Id) - the source ``Id`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.Resource.get_avatar_id_template if not bool(self._my_map['sourceId']): raise errors.IllegalState('this Asset has no source') else: return Id(self._my_map['sourceId'])
[ "def", "get_source_id", "(", "self", ")", ":", "# Implemented from template for osid.resource.Resource.get_avatar_id_template", "if", "not", "bool", "(", "self", ".", "_my_map", "[", "'sourceId'", "]", ")", ":", "raise", "errors", ".", "IllegalState", "(", "'this Asset has no source'", ")", "else", ":", "return", "Id", "(", "self", ".", "_my_map", "[", "'sourceId'", "]", ")" ]
Gets the ``Resource Id`` of the source of this asset. The source is the original owner of the copyright of this asset and may differ from the creator of this asset. The source for a published book written by Margaret Mitchell would be Macmillan. The source for an unpublished painting by Arthur Goodwin would be Arthur Goodwin. An ``Asset`` is ``Sourceable`` and also contains a provider identity. The provider is the entity that makes this digital asset available in this repository but may or may not be the publisher of the contents depicted in the asset. For example, a map published by Ticknor and Fields in 1848 may have a provider of Library of Congress and a source of Ticknor and Fields. If copied from a repository at Middlebury College, the provider would be Middlebury College and a source of Ticknor and Fields. return: (osid.id.Id) - the source ``Id`` *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "Resource", "Id", "of", "the", "source", "of", "this", "asset", "." ]
python
train
spyder-ide/spyder
spyder/plugins/projects/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/projects/plugin.py#L389-L396
def get_project_filenames(self): """Get the list of recent filenames of a project""" recent_files = [] if self.current_active_project: recent_files = self.current_active_project.get_recent_files() elif self.latest_project: recent_files = self.latest_project.get_recent_files() return recent_files
[ "def", "get_project_filenames", "(", "self", ")", ":", "recent_files", "=", "[", "]", "if", "self", ".", "current_active_project", ":", "recent_files", "=", "self", ".", "current_active_project", ".", "get_recent_files", "(", ")", "elif", "self", ".", "latest_project", ":", "recent_files", "=", "self", ".", "latest_project", ".", "get_recent_files", "(", ")", "return", "recent_files" ]
Get the list of recent filenames of a project
[ "Get", "the", "list", "of", "recent", "filenames", "of", "a", "project" ]
python
train
pmacosta/peng
peng/wave_functions.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/wave_functions.py#L1959-L1986
def wint(wave): r""" Convert a waveform's dependent variable vector to integer. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.wint :raises: * RuntimeError (Argument \`wave\` is not valid) * TypeError (Cannot convert complex to integer) .. [[[end]]] """ pexdoc.exh.addex( TypeError, "Cannot convert complex to integer", wave._dep_vector.dtype.name.startswith("complex"), ) ret = copy.copy(wave) ret._dep_vector = ret._dep_vector.astype(np.int) return ret
[ "def", "wint", "(", "wave", ")", ":", "pexdoc", ".", "exh", ".", "addex", "(", "TypeError", ",", "\"Cannot convert complex to integer\"", ",", "wave", ".", "_dep_vector", ".", "dtype", ".", "name", ".", "startswith", "(", "\"complex\"", ")", ",", ")", "ret", "=", "copy", ".", "copy", "(", "wave", ")", "ret", ".", "_dep_vector", "=", "ret", ".", "_dep_vector", ".", "astype", "(", "np", ".", "int", ")", "return", "ret" ]
r""" Convert a waveform's dependent variable vector to integer. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.wint :raises: * RuntimeError (Argument \`wave\` is not valid) * TypeError (Cannot convert complex to integer) .. [[[end]]]
[ "r", "Convert", "a", "waveform", "s", "dependent", "variable", "vector", "to", "integer", "." ]
python
test
senaite/senaite.core
bika/lims/content/calculation.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/calculation.py#L215-L228
def setFormula(self, Formula=None): """Set the Dependent Services from the text of the calculation Formula """ bsc = getToolByName(self, 'bika_setup_catalog') if Formula is None: self.setDependentServices(None) self.getField('Formula').set(self, Formula) else: keywords = re.compile(r"\[([^.^\]]+)\]").findall(Formula) brains = bsc(portal_type='AnalysisService', getKeyword=keywords) services = [brain.getObject() for brain in brains] self.getField('DependentServices').set(self, services) self.getField('Formula').set(self, Formula)
[ "def", "setFormula", "(", "self", ",", "Formula", "=", "None", ")", ":", "bsc", "=", "getToolByName", "(", "self", ",", "'bika_setup_catalog'", ")", "if", "Formula", "is", "None", ":", "self", ".", "setDependentServices", "(", "None", ")", "self", ".", "getField", "(", "'Formula'", ")", ".", "set", "(", "self", ",", "Formula", ")", "else", ":", "keywords", "=", "re", ".", "compile", "(", "r\"\\[([^.^\\]]+)\\]\"", ")", ".", "findall", "(", "Formula", ")", "brains", "=", "bsc", "(", "portal_type", "=", "'AnalysisService'", ",", "getKeyword", "=", "keywords", ")", "services", "=", "[", "brain", ".", "getObject", "(", ")", "for", "brain", "in", "brains", "]", "self", ".", "getField", "(", "'DependentServices'", ")", ".", "set", "(", "self", ",", "services", ")", "self", ".", "getField", "(", "'Formula'", ")", ".", "set", "(", "self", ",", "Formula", ")" ]
Set the Dependent Services from the text of the calculation Formula
[ "Set", "the", "Dependent", "Services", "from", "the", "text", "of", "the", "calculation", "Formula" ]
python
train
crytic/pyevmasm
pyevmasm/evmasm.py
https://github.com/crytic/pyevmasm/blob/d27daf19a36d630a31499e783b716cf1165798d8/pyevmasm/evmasm.py#L220-L225
def bytes(self): """ Encoded instruction """ b = [bytes([self._opcode])] for offset in reversed(range(self.operand_size)): b.append(bytes([(self.operand >> offset * 8) & 0xff])) return b''.join(b)
[ "def", "bytes", "(", "self", ")", ":", "b", "=", "[", "bytes", "(", "[", "self", ".", "_opcode", "]", ")", "]", "for", "offset", "in", "reversed", "(", "range", "(", "self", ".", "operand_size", ")", ")", ":", "b", ".", "append", "(", "bytes", "(", "[", "(", "self", ".", "operand", ">>", "offset", "*", "8", ")", "&", "0xff", "]", ")", ")", "return", "b''", ".", "join", "(", "b", ")" ]
Encoded instruction
[ "Encoded", "instruction" ]
python
valid
cidrblock/modelsettings
modelsettings/__init__.py
https://github.com/cidrblock/modelsettings/blob/09763c111fb38b3ba7a13cc95ca59e4393fe75ba/modelsettings/__init__.py#L220-L228
def generate_env(self): """ Generate sample environment variables """ for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] in (dict, list): value = f"\'{json.dumps(self.spec[key].get('example', ''))}\'" else: value = f"{self.spec[key].get('example', '')}" print(f"export {self.env_prefix}_{key.upper()}={value}")
[ "def", "generate_env", "(", "self", ")", ":", "for", "key", "in", "sorted", "(", "list", "(", "self", ".", "spec", ".", "keys", "(", ")", ")", ")", ":", "if", "self", ".", "spec", "[", "key", "]", "[", "'type'", "]", "in", "(", "dict", ",", "list", ")", ":", "value", "=", "f\"\\'{json.dumps(self.spec[key].get('example', ''))}\\'\"", "else", ":", "value", "=", "f\"{self.spec[key].get('example', '')}\"", "print", "(", "f\"export {self.env_prefix}_{key.upper()}={value}\"", ")" ]
Generate sample environment variables
[ "Generate", "sample", "environment", "variables" ]
python
train
EntilZha/PyFunctional
functional/pipeline.py
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/pipeline.py#L1467-L1495
def to_file(self, path, delimiter=None, mode='wt', buffering=-1, encoding=None, errors=None, newline=None, compresslevel=9, format=None, check=-1, preset=None, filters=None, compression=None): """ Saves the sequence to a file by executing str(self) which becomes str(self.to_list()). If delimiter is defined will instead execute self.make_string(delimiter) :param path: path to write file :param delimiter: if defined, will call make_string(delimiter) and save that to file. :param mode: file open mode :param buffering: passed to builtins.open :param encoding: passed to builtins.open :param errors: passed to builtins.open :param newline: passed to builtins.open :param compression: compression format :param compresslevel: passed to gzip.open :param format: passed to lzma.open :param check: passed to lzma.open :param preset: passed to lzma.open :param filters: passed to lzma.open """ with universal_write_open(path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, compression=compression, compresslevel=compresslevel, format=format, check=check, preset=preset, filters=filters) as output: if delimiter: output.write(six.u(self.make_string(delimiter))) else: output.write(six.u(str(self)))
[ "def", "to_file", "(", "self", ",", "path", ",", "delimiter", "=", "None", ",", "mode", "=", "'wt'", ",", "buffering", "=", "-", "1", ",", "encoding", "=", "None", ",", "errors", "=", "None", ",", "newline", "=", "None", ",", "compresslevel", "=", "9", ",", "format", "=", "None", ",", "check", "=", "-", "1", ",", "preset", "=", "None", ",", "filters", "=", "None", ",", "compression", "=", "None", ")", ":", "with", "universal_write_open", "(", "path", ",", "mode", "=", "mode", ",", "buffering", "=", "buffering", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ",", "newline", "=", "newline", ",", "compression", "=", "compression", ",", "compresslevel", "=", "compresslevel", ",", "format", "=", "format", ",", "check", "=", "check", ",", "preset", "=", "preset", ",", "filters", "=", "filters", ")", "as", "output", ":", "if", "delimiter", ":", "output", ".", "write", "(", "six", ".", "u", "(", "self", ".", "make_string", "(", "delimiter", ")", ")", ")", "else", ":", "output", ".", "write", "(", "six", ".", "u", "(", "str", "(", "self", ")", ")", ")" ]
Saves the sequence to a file by executing str(self) which becomes str(self.to_list()). If delimiter is defined will instead execute self.make_string(delimiter) :param path: path to write file :param delimiter: if defined, will call make_string(delimiter) and save that to file. :param mode: file open mode :param buffering: passed to builtins.open :param encoding: passed to builtins.open :param errors: passed to builtins.open :param newline: passed to builtins.open :param compression: compression format :param compresslevel: passed to gzip.open :param format: passed to lzma.open :param check: passed to lzma.open :param preset: passed to lzma.open :param filters: passed to lzma.open
[ "Saves", "the", "sequence", "to", "a", "file", "by", "executing", "str", "(", "self", ")", "which", "becomes", "str", "(", "self", ".", "to_list", "()", ")", ".", "If", "delimiter", "is", "defined", "will", "instead", "execute", "self", ".", "make_string", "(", "delimiter", ")" ]
python
train
PmagPy/PmagPy
pmagpy/convert_2_magic.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/convert_2_magic.py#L5098-L5357
def jr6_txt(mag_file, dir_path=".", input_dir_path="", meas_file="measurements.txt", spec_file="specimens.txt", samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt", user="", specnum=1, samp_con='1', location='unknown', lat='', lon='', noave=False, volume=12, timezone="UTC", meth_code="LP-NO"): """ Converts JR6 .txt format files to MagIC measurements format files. Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" user : str user name, default "" specnum : int number of characters to designate a specimen, default 0 samp_con : str sample/site naming convention, default '1', see info below location : str location name, default "unknown" lat : float latitude, default "" lon : float longitude, default "" noave : bool do not average duplicate measurements, default False (so by default, DO average) volume : float volume in ccs, default 12 timezone : timezone of date/time string in comment string, default UTC meth_code : str default "LP-NO" Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written) """ version_num = pmag.get_version() input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) mag_file = pmag.resolve_file_name(mag_file, input_dir_path) input_dir_path = os.path.split(mag_file)[0] specnum = - int(specnum) samp_con = str(samp_con) volume = float(volume) * 1e-6 # format variables mag_file = pmag.resolve_file_name(mag_file, input_dir_path) if samp_con.startswith("4"): if "-" not in samp_con: print("option [4] must be in form 4-Z where Z is an integer") return False, "naming convention option [4] must be in form 4-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "4" elif samp_con.startswith("7"): if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "naming convention option [7] must be in form 7-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "7" else: Z = 1 # create data holders MeasRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], [] data = pmag.open_file(mag_file) # remove garbage/blank lines data = [i for i in data if len(i) >= 5] if not len(data): print('No data') return n = 0 end = False # loop through records while not end: first_line = data[n].split() sampleName = first_line[0] demagLevel = first_line[2] date = first_line[3] + ":0:0:0" n += 2 third_line = data[n].split() if not third_line[0].startswith('SPEC.ANGLES'): print('third line of a block should start with SPEC.ANGLES') print(third_line) return specimenAngleDec = third_line[1] specimenAngleInc = third_line[2] n += 4 while not data[n].startswith('MEAN'): n += 1 mean_line = data[n] Mx = mean_line[1] My = mean_line[2] Mz = mean_line[3] n += 1 precision_line = data[n].split() if not precision_line[0].startswith('Modulus'): print('precision line should start with "Modulus"') return splitExp = precision_line[2].split('A') intensityVolStr = precision_line[1] + splitExp[0] intensityVol = float(intensityVolStr) # check and see if Prec is too big and messes with the parcing. precisionStr = '' if len(precision_line) == 6: # normal line precisionStr = precision_line[5][0:-1] else: precisionStr = precision_line[4][0:-1] precisionPer = float(precisionStr) precision = intensityVol * precisionPer/100 while not data[n].startswith('SPEC.'): n += 1 specimen_line = data[n].split() specimenDec = specimen_line[2] specimenInc = specimen_line[3] n += 1 geographic_line = data[n] if not geographic_line.startswith('GEOGR'): geographic_dec = '' geographic_inc = '' else: geographic_line = geographic_line.split() geographicDec = geographic_line[1] geographicInc = geographic_line[2] # Add data to various MagIC data tables. specimen = sampleName if specnum != 0: sample = specimen[:specnum] else: sample = specimen site = pmag.parse_site(sample, samp_con, Z) MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {} if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]: SpecRec['specimen'] = specimen SpecRec['sample'] = sample SpecRec["citations"] = "This study" SpecRec["analysts"] = user SpecRec['volume'] = volume SpecRecs.append(SpecRec) if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]: SampRec['sample'] = sample SampRec['site'] = site SampRec["citations"] = "This study" SampRec["analysts"] = user SampRec['azimuth'] = specimenAngleDec # convert to magic orientation sample_dip = str(float(specimenAngleInc)-90.0) SampRec['dip'] = sample_dip SampRec['method_codes'] = meth_code SampRecs.append(SampRec) if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]: SiteRec['site'] = site SiteRec['location'] = location SiteRec["citations"] = "This study" SiteRec["analysts"] = user SiteRec['lat'] = lat SiteRec['lon'] = lon SiteRecs.append(SiteRec) if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]: LocRec['location'] = location LocRec["citations"] = "This study" LocRec["analysts"] = user LocRec['lat_n'] = lat LocRec['lon_e'] = lon LocRec['lat_s'] = lat LocRec['lon_w'] = lon LocRecs.append(LocRec) local = pytz.timezone(timezone) naive = datetime.datetime.strptime(date, "%m-%d-%Y:%H:%M:%S") local_dt = local.localize(naive, is_dst=None) utc_dt = local_dt.astimezone(pytz.utc) timestamp = utc_dt.strftime("%Y-%m-%dT%H:%M:%S")+"Z" MeasRec["specimen"] = specimen MeasRec["timestamp"] = timestamp MeasRec['description'] = '' MeasRec["citations"] = "This study" MeasRec['software_packages'] = version_num MeasRec["treat_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["quality"] = 'g' MeasRec["standard"] = 'u' MeasRec["treat_step_num"] = 0 MeasRec["treat_ac_field"] = '0' if demagLevel == 'NRM': meas_type = "LT-NO" elif demagLevel[0] == 'A': if demagLevel[:2] == 'AD': treat = float(demagLevel[2:]) else: treat = float(demagLevel[1:]) meas_type = "LT-AF-Z" MeasRec["treat_ac_field"] = '%8.3e' % ( treat*1e-3) # convert from mT to tesla elif demagLevel[0] == 'T': meas_type = "LT-T-Z" treat = float(demagLevel[1:]) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin else: print("measurement type unknown", demagLevel) return False, "measurement type unknown" MeasRec["magn_moment"] = str(intensityVol*volume) # Am^2 MeasRec["magn_volume"] = intensityVolStr # A/m MeasRec["dir_dec"] = specimenDec MeasRec["dir_inc"] = specimenInc MeasRec['method_codes'] = meas_type MeasRecs.append(MeasRec) # ignore all the rest of the special characters. Some data files not consistantly formatted. n += 1 while ((len(data[n]) <= 5 and data[n] != '') or data[n].startswith('----')): n += 1 if n >= len(data): break if n >= len(data): # we're done! end = True # end of data while loop con = cb.Contribution(output_dir_path, read_tables=[]) con.add_magic_table_from_data(dtype='specimens', data=SpecRecs) con.add_magic_table_from_data(dtype='samples', data=SampRecs) con.add_magic_table_from_data(dtype='sites', data=SiteRecs) con.add_magic_table_from_data(dtype='locations', data=LocRecs) MeasOuts = pmag.measurements_methods3(MeasRecs, noave) con.add_magic_table_from_data(dtype='measurements', data=MeasOuts) con.tables['specimens'].write_magic_file(custom_name=spec_file,dir_path=dir_path) con.tables['samples'].write_magic_file(custom_name=samp_file,dir_path=dir_path) con.tables['sites'].write_magic_file(custom_name=site_file,dir_path=dir_path) con.tables['locations'].write_magic_file(custom_name=loc_file,dir_path=dir_path) con.tables['measurements'].write_magic_file(custom_name=meas_file,dir_path=dir_path) return True, meas_file
[ "def", "jr6_txt", "(", "mag_file", ",", "dir_path", "=", "\".\"", ",", "input_dir_path", "=", "\"\"", ",", "meas_file", "=", "\"measurements.txt\"", ",", "spec_file", "=", "\"specimens.txt\"", ",", "samp_file", "=", "\"samples.txt\"", ",", "site_file", "=", "\"sites.txt\"", ",", "loc_file", "=", "\"locations.txt\"", ",", "user", "=", "\"\"", ",", "specnum", "=", "1", ",", "samp_con", "=", "'1'", ",", "location", "=", "'unknown'", ",", "lat", "=", "''", ",", "lon", "=", "''", ",", "noave", "=", "False", ",", "volume", "=", "12", ",", "timezone", "=", "\"UTC\"", ",", "meth_code", "=", "\"LP-NO\"", ")", ":", "version_num", "=", "pmag", ".", "get_version", "(", ")", "input_dir_path", ",", "output_dir_path", "=", "pmag", ".", "fix_directories", "(", "input_dir_path", ",", "dir_path", ")", "mag_file", "=", "pmag", ".", "resolve_file_name", "(", "mag_file", ",", "input_dir_path", ")", "input_dir_path", "=", "os", ".", "path", ".", "split", "(", "mag_file", ")", "[", "0", "]", "specnum", "=", "-", "int", "(", "specnum", ")", "samp_con", "=", "str", "(", "samp_con", ")", "volume", "=", "float", "(", "volume", ")", "*", "1e-6", "# format variables", "mag_file", "=", "pmag", ".", "resolve_file_name", "(", "mag_file", ",", "input_dir_path", ")", "if", "samp_con", ".", "startswith", "(", "\"4\"", ")", ":", "if", "\"-\"", "not", "in", "samp_con", ":", "print", "(", "\"option [4] must be in form 4-Z where Z is an integer\"", ")", "return", "False", ",", "\"naming convention option [4] must be in form 4-Z where Z is an integer\"", "else", ":", "Z", "=", "samp_con", ".", "split", "(", "\"-\"", ")", "[", "1", "]", "samp_con", "=", "\"4\"", "elif", "samp_con", ".", "startswith", "(", "\"7\"", ")", ":", "if", "\"-\"", "not", "in", "samp_con", ":", "print", "(", "\"option [7] must be in form 7-Z where Z is an integer\"", ")", "return", "False", ",", "\"naming convention option [7] must be in form 7-Z where Z is an integer\"", "else", ":", "Z", "=", "samp_con", ".", "split", "(", "\"-\"", ")", "[", "1", "]", "samp_con", "=", "\"7\"", "else", ":", "Z", "=", "1", "# create data holders", "MeasRecs", ",", "SpecRecs", ",", "SampRecs", ",", "SiteRecs", ",", "LocRecs", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "data", "=", "pmag", ".", "open_file", "(", "mag_file", ")", "# remove garbage/blank lines", "data", "=", "[", "i", "for", "i", "in", "data", "if", "len", "(", "i", ")", ">=", "5", "]", "if", "not", "len", "(", "data", ")", ":", "print", "(", "'No data'", ")", "return", "n", "=", "0", "end", "=", "False", "# loop through records", "while", "not", "end", ":", "first_line", "=", "data", "[", "n", "]", ".", "split", "(", ")", "sampleName", "=", "first_line", "[", "0", "]", "demagLevel", "=", "first_line", "[", "2", "]", "date", "=", "first_line", "[", "3", "]", "+", "\":0:0:0\"", "n", "+=", "2", "third_line", "=", "data", "[", "n", "]", ".", "split", "(", ")", "if", "not", "third_line", "[", "0", "]", ".", "startswith", "(", "'SPEC.ANGLES'", ")", ":", "print", "(", "'third line of a block should start with SPEC.ANGLES'", ")", "print", "(", "third_line", ")", "return", "specimenAngleDec", "=", "third_line", "[", "1", "]", "specimenAngleInc", "=", "third_line", "[", "2", "]", "n", "+=", "4", "while", "not", "data", "[", "n", "]", ".", "startswith", "(", "'MEAN'", ")", ":", "n", "+=", "1", "mean_line", "=", "data", "[", "n", "]", "Mx", "=", "mean_line", "[", "1", "]", "My", "=", "mean_line", "[", "2", "]", "Mz", "=", "mean_line", "[", "3", "]", "n", "+=", "1", "precision_line", "=", "data", "[", "n", "]", ".", "split", "(", ")", "if", "not", "precision_line", "[", "0", "]", ".", "startswith", "(", "'Modulus'", ")", ":", "print", "(", "'precision line should start with \"Modulus\"'", ")", "return", "splitExp", "=", "precision_line", "[", "2", "]", ".", "split", "(", "'A'", ")", "intensityVolStr", "=", "precision_line", "[", "1", "]", "+", "splitExp", "[", "0", "]", "intensityVol", "=", "float", "(", "intensityVolStr", ")", "# check and see if Prec is too big and messes with the parcing.", "precisionStr", "=", "''", "if", "len", "(", "precision_line", ")", "==", "6", ":", "# normal line", "precisionStr", "=", "precision_line", "[", "5", "]", "[", "0", ":", "-", "1", "]", "else", ":", "precisionStr", "=", "precision_line", "[", "4", "]", "[", "0", ":", "-", "1", "]", "precisionPer", "=", "float", "(", "precisionStr", ")", "precision", "=", "intensityVol", "*", "precisionPer", "/", "100", "while", "not", "data", "[", "n", "]", ".", "startswith", "(", "'SPEC.'", ")", ":", "n", "+=", "1", "specimen_line", "=", "data", "[", "n", "]", ".", "split", "(", ")", "specimenDec", "=", "specimen_line", "[", "2", "]", "specimenInc", "=", "specimen_line", "[", "3", "]", "n", "+=", "1", "geographic_line", "=", "data", "[", "n", "]", "if", "not", "geographic_line", ".", "startswith", "(", "'GEOGR'", ")", ":", "geographic_dec", "=", "''", "geographic_inc", "=", "''", "else", ":", "geographic_line", "=", "geographic_line", ".", "split", "(", ")", "geographicDec", "=", "geographic_line", "[", "1", "]", "geographicInc", "=", "geographic_line", "[", "2", "]", "# Add data to various MagIC data tables.", "specimen", "=", "sampleName", "if", "specnum", "!=", "0", ":", "sample", "=", "specimen", "[", ":", "specnum", "]", "else", ":", "sample", "=", "specimen", "site", "=", "pmag", ".", "parse_site", "(", "sample", ",", "samp_con", ",", "Z", ")", "MeasRec", ",", "SpecRec", ",", "SampRec", ",", "SiteRec", ",", "LocRec", "=", "{", "}", ",", "{", "}", ",", "{", "}", ",", "{", "}", ",", "{", "}", "if", "specimen", "!=", "\"\"", "and", "specimen", "not", "in", "[", "x", "[", "'specimen'", "]", "if", "'specimen'", "in", "list", "(", "x", ".", "keys", "(", ")", ")", "else", "\"\"", "for", "x", "in", "SpecRecs", "]", ":", "SpecRec", "[", "'specimen'", "]", "=", "specimen", "SpecRec", "[", "'sample'", "]", "=", "sample", "SpecRec", "[", "\"citations\"", "]", "=", "\"This study\"", "SpecRec", "[", "\"analysts\"", "]", "=", "user", "SpecRec", "[", "'volume'", "]", "=", "volume", "SpecRecs", ".", "append", "(", "SpecRec", ")", "if", "sample", "!=", "\"\"", "and", "sample", "not", "in", "[", "x", "[", "'sample'", "]", "if", "'sample'", "in", "list", "(", "x", ".", "keys", "(", ")", ")", "else", "\"\"", "for", "x", "in", "SampRecs", "]", ":", "SampRec", "[", "'sample'", "]", "=", "sample", "SampRec", "[", "'site'", "]", "=", "site", "SampRec", "[", "\"citations\"", "]", "=", "\"This study\"", "SampRec", "[", "\"analysts\"", "]", "=", "user", "SampRec", "[", "'azimuth'", "]", "=", "specimenAngleDec", "# convert to magic orientation", "sample_dip", "=", "str", "(", "float", "(", "specimenAngleInc", ")", "-", "90.0", ")", "SampRec", "[", "'dip'", "]", "=", "sample_dip", "SampRec", "[", "'method_codes'", "]", "=", "meth_code", "SampRecs", ".", "append", "(", "SampRec", ")", "if", "site", "!=", "\"\"", "and", "site", "not", "in", "[", "x", "[", "'site'", "]", "if", "'site'", "in", "list", "(", "x", ".", "keys", "(", ")", ")", "else", "\"\"", "for", "x", "in", "SiteRecs", "]", ":", "SiteRec", "[", "'site'", "]", "=", "site", "SiteRec", "[", "'location'", "]", "=", "location", "SiteRec", "[", "\"citations\"", "]", "=", "\"This study\"", "SiteRec", "[", "\"analysts\"", "]", "=", "user", "SiteRec", "[", "'lat'", "]", "=", "lat", "SiteRec", "[", "'lon'", "]", "=", "lon", "SiteRecs", ".", "append", "(", "SiteRec", ")", "if", "location", "!=", "\"\"", "and", "location", "not", "in", "[", "x", "[", "'location'", "]", "if", "'location'", "in", "list", "(", "x", ".", "keys", "(", ")", ")", "else", "\"\"", "for", "x", "in", "LocRecs", "]", ":", "LocRec", "[", "'location'", "]", "=", "location", "LocRec", "[", "\"citations\"", "]", "=", "\"This study\"", "LocRec", "[", "\"analysts\"", "]", "=", "user", "LocRec", "[", "'lat_n'", "]", "=", "lat", "LocRec", "[", "'lon_e'", "]", "=", "lon", "LocRec", "[", "'lat_s'", "]", "=", "lat", "LocRec", "[", "'lon_w'", "]", "=", "lon", "LocRecs", ".", "append", "(", "LocRec", ")", "local", "=", "pytz", ".", "timezone", "(", "timezone", ")", "naive", "=", "datetime", ".", "datetime", ".", "strptime", "(", "date", ",", "\"%m-%d-%Y:%H:%M:%S\"", ")", "local_dt", "=", "local", ".", "localize", "(", "naive", ",", "is_dst", "=", "None", ")", "utc_dt", "=", "local_dt", ".", "astimezone", "(", "pytz", ".", "utc", ")", "timestamp", "=", "utc_dt", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%S\"", ")", "+", "\"Z\"", "MeasRec", "[", "\"specimen\"", "]", "=", "specimen", "MeasRec", "[", "\"timestamp\"", "]", "=", "timestamp", "MeasRec", "[", "'description'", "]", "=", "''", "MeasRec", "[", "\"citations\"", "]", "=", "\"This study\"", "MeasRec", "[", "'software_packages'", "]", "=", "version_num", "MeasRec", "[", "\"treat_temp\"", "]", "=", "'%8.3e'", "%", "(", "273", ")", "# room temp in kelvin", "MeasRec", "[", "\"meas_temp\"", "]", "=", "'%8.3e'", "%", "(", "273", ")", "# room temp in kelvin", "MeasRec", "[", "\"quality\"", "]", "=", "'g'", "MeasRec", "[", "\"standard\"", "]", "=", "'u'", "MeasRec", "[", "\"treat_step_num\"", "]", "=", "0", "MeasRec", "[", "\"treat_ac_field\"", "]", "=", "'0'", "if", "demagLevel", "==", "'NRM'", ":", "meas_type", "=", "\"LT-NO\"", "elif", "demagLevel", "[", "0", "]", "==", "'A'", ":", "if", "demagLevel", "[", ":", "2", "]", "==", "'AD'", ":", "treat", "=", "float", "(", "demagLevel", "[", "2", ":", "]", ")", "else", ":", "treat", "=", "float", "(", "demagLevel", "[", "1", ":", "]", ")", "meas_type", "=", "\"LT-AF-Z\"", "MeasRec", "[", "\"treat_ac_field\"", "]", "=", "'%8.3e'", "%", "(", "treat", "*", "1e-3", ")", "# convert from mT to tesla", "elif", "demagLevel", "[", "0", "]", "==", "'T'", ":", "meas_type", "=", "\"LT-T-Z\"", "treat", "=", "float", "(", "demagLevel", "[", "1", ":", "]", ")", "MeasRec", "[", "\"treat_temp\"", "]", "=", "'%8.3e'", "%", "(", "treat", "+", "273.", ")", "# temp in kelvin", "else", ":", "print", "(", "\"measurement type unknown\"", ",", "demagLevel", ")", "return", "False", ",", "\"measurement type unknown\"", "MeasRec", "[", "\"magn_moment\"", "]", "=", "str", "(", "intensityVol", "*", "volume", ")", "# Am^2", "MeasRec", "[", "\"magn_volume\"", "]", "=", "intensityVolStr", "# A/m", "MeasRec", "[", "\"dir_dec\"", "]", "=", "specimenDec", "MeasRec", "[", "\"dir_inc\"", "]", "=", "specimenInc", "MeasRec", "[", "'method_codes'", "]", "=", "meas_type", "MeasRecs", ".", "append", "(", "MeasRec", ")", "# ignore all the rest of the special characters. Some data files not consistantly formatted.", "n", "+=", "1", "while", "(", "(", "len", "(", "data", "[", "n", "]", ")", "<=", "5", "and", "data", "[", "n", "]", "!=", "''", ")", "or", "data", "[", "n", "]", ".", "startswith", "(", "'----'", ")", ")", ":", "n", "+=", "1", "if", "n", ">=", "len", "(", "data", ")", ":", "break", "if", "n", ">=", "len", "(", "data", ")", ":", "# we're done!", "end", "=", "True", "# end of data while loop", "con", "=", "cb", ".", "Contribution", "(", "output_dir_path", ",", "read_tables", "=", "[", "]", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'specimens'", ",", "data", "=", "SpecRecs", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'samples'", ",", "data", "=", "SampRecs", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'sites'", ",", "data", "=", "SiteRecs", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'locations'", ",", "data", "=", "LocRecs", ")", "MeasOuts", "=", "pmag", ".", "measurements_methods3", "(", "MeasRecs", ",", "noave", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'measurements'", ",", "data", "=", "MeasOuts", ")", "con", ".", "tables", "[", "'specimens'", "]", ".", "write_magic_file", "(", "custom_name", "=", "spec_file", ",", "dir_path", "=", "dir_path", ")", "con", ".", "tables", "[", "'samples'", "]", ".", "write_magic_file", "(", "custom_name", "=", "samp_file", ",", "dir_path", "=", "dir_path", ")", "con", ".", "tables", "[", "'sites'", "]", ".", "write_magic_file", "(", "custom_name", "=", "site_file", ",", "dir_path", "=", "dir_path", ")", "con", ".", "tables", "[", "'locations'", "]", ".", "write_magic_file", "(", "custom_name", "=", "loc_file", ",", "dir_path", "=", "dir_path", ")", "con", ".", "tables", "[", "'measurements'", "]", ".", "write_magic_file", "(", "custom_name", "=", "meas_file", ",", "dir_path", "=", "dir_path", ")", "return", "True", ",", "meas_file" ]
Converts JR6 .txt format files to MagIC measurements format files. Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" user : str user name, default "" specnum : int number of characters to designate a specimen, default 0 samp_con : str sample/site naming convention, default '1', see info below location : str location name, default "unknown" lat : float latitude, default "" lon : float longitude, default "" noave : bool do not average duplicate measurements, default False (so by default, DO average) volume : float volume in ccs, default 12 timezone : timezone of date/time string in comment string, default UTC meth_code : str default "LP-NO" Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
[ "Converts", "JR6", ".", "txt", "format", "files", "to", "MagIC", "measurements", "format", "files", "." ]
python
train
jtwhite79/pyemu
pyemu/pst/pst_utils.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_utils.py#L546-L575
def pst_from_io_files(tpl_files,in_files,ins_files,out_files,pst_filename=None): """ generate a new pyemu.Pst instance from model interface files. This function is emulated in the Pst.from_io_files() class method. Parameters ---------- tpl_files : (list) template file names in_files : (list) model input file names ins_files : (list) instruction file names out_files : (list) model output file names pst_filename : str filename to save new pyemu.Pst. If None, Pst is not written. default is None Returns ------- new_pst : pyemu.Pst """ warnings.warn("pst_from_io_files has moved to pyemu.helpers and is also "+\ "now avaiable as a Pst class method (Pst.from_io_files())",PyemuWarning) from pyemu import helpers return helpers.pst_from_io_files(tpl_files=tpl_files,in_files=in_files, ins_files=ins_files,out_files=out_files, pst_filename=pst_filename)
[ "def", "pst_from_io_files", "(", "tpl_files", ",", "in_files", ",", "ins_files", ",", "out_files", ",", "pst_filename", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"pst_from_io_files has moved to pyemu.helpers and is also \"", "+", "\"now avaiable as a Pst class method (Pst.from_io_files())\"", ",", "PyemuWarning", ")", "from", "pyemu", "import", "helpers", "return", "helpers", ".", "pst_from_io_files", "(", "tpl_files", "=", "tpl_files", ",", "in_files", "=", "in_files", ",", "ins_files", "=", "ins_files", ",", "out_files", "=", "out_files", ",", "pst_filename", "=", "pst_filename", ")" ]
generate a new pyemu.Pst instance from model interface files. This function is emulated in the Pst.from_io_files() class method. Parameters ---------- tpl_files : (list) template file names in_files : (list) model input file names ins_files : (list) instruction file names out_files : (list) model output file names pst_filename : str filename to save new pyemu.Pst. If None, Pst is not written. default is None Returns ------- new_pst : pyemu.Pst
[ "generate", "a", "new", "pyemu", ".", "Pst", "instance", "from", "model", "interface", "files", ".", "This", "function", "is", "emulated", "in", "the", "Pst", ".", "from_io_files", "()", "class", "method", "." ]
python
train
Chilipp/psyplot
psyplot/data.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L3241-L3246
def all_dims(self): """The dimensions for each of the arrays in this list""" return [ _get_dims(arr) if not isinstance(arr, ArrayList) else arr.all_dims for arr in self]
[ "def", "all_dims", "(", "self", ")", ":", "return", "[", "_get_dims", "(", "arr", ")", "if", "not", "isinstance", "(", "arr", ",", "ArrayList", ")", "else", "arr", ".", "all_dims", "for", "arr", "in", "self", "]" ]
The dimensions for each of the arrays in this list
[ "The", "dimensions", "for", "each", "of", "the", "arrays", "in", "this", "list" ]
python
train
Aluriak/bubble-tools
bubbletools/converter.py
https://github.com/Aluriak/bubble-tools/blob/f014f4a1986abefc80dc418feaa05ed258c2221a/bubbletools/converter.py#L16-L20
def bubble_to_dot(bblfile:str, dotfile:str=None, render:bool=False, oriented:bool=False): """Write in dotfile a graph equivalent to those depicted in bubble file""" tree = BubbleTree.from_bubble_file(bblfile, oriented=bool(oriented)) return tree_to_dot(tree, dotfile, render=render)
[ "def", "bubble_to_dot", "(", "bblfile", ":", "str", ",", "dotfile", ":", "str", "=", "None", ",", "render", ":", "bool", "=", "False", ",", "oriented", ":", "bool", "=", "False", ")", ":", "tree", "=", "BubbleTree", ".", "from_bubble_file", "(", "bblfile", ",", "oriented", "=", "bool", "(", "oriented", ")", ")", "return", "tree_to_dot", "(", "tree", ",", "dotfile", ",", "render", "=", "render", ")" ]
Write in dotfile a graph equivalent to those depicted in bubble file
[ "Write", "in", "dotfile", "a", "graph", "equivalent", "to", "those", "depicted", "in", "bubble", "file" ]
python
train
alexa/alexa-skills-kit-sdk-for-python
ask-sdk-core/ask_sdk_core/utils/viewport.py
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/097b6406aa12d5ca0b825b00c936861b530cbf39/ask-sdk-core/ask_sdk_core/utils/viewport.py#L84-L98
def get_orientation(width, height): # type: (int, int) -> Orientation """Get viewport orientation from given width and height. :type width: int :type height: int :return: viewport orientation enum :rtype: Orientation """ if width > height: return Orientation.LANDSCAPE elif width < height: return Orientation.PORTRAIT else: return Orientation.EQUAL
[ "def", "get_orientation", "(", "width", ",", "height", ")", ":", "# type: (int, int) -> Orientation", "if", "width", ">", "height", ":", "return", "Orientation", ".", "LANDSCAPE", "elif", "width", "<", "height", ":", "return", "Orientation", ".", "PORTRAIT", "else", ":", "return", "Orientation", ".", "EQUAL" ]
Get viewport orientation from given width and height. :type width: int :type height: int :return: viewport orientation enum :rtype: Orientation
[ "Get", "viewport", "orientation", "from", "given", "width", "and", "height", "." ]
python
train
maxalbert/tohu
tohu/v6/custom_generator/utils.py
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/custom_generator/utils.py#L11-L54
def make_tohu_items_class(clsname, attr_names): """ Parameters ---------- clsname: string Name of the class to be created attr_names: list of strings Names of the attributes of the class to be created """ item_cls = attr.make_class(clsname, {name: attr.ib() for name in attr_names}, repr=False, cmp=True, frozen=True) def new_repr(self): all_fields = ', '.join([f'{name}={repr(value)}' for name, value in attr.asdict(self).items()]) return f'{clsname}({all_fields})' orig_eq = item_cls.__eq__ def new_eq(self, other): """ Custom __eq__() method which also allows comparisons with tuples and dictionaries. This is mostly for convenience during testing. """ if isinstance(other, self.__class__): return orig_eq(self, other) else: if isinstance(other, tuple): return attr.astuple(self) == other elif isinstance(other, dict): return attr.asdict(self) == other else: return NotImplemented item_cls.__repr__ = new_repr item_cls.__eq__ = new_eq item_cls.keys = lambda self: attr_names item_cls.__getitem__ = lambda self, key: getattr(self, key) item_cls.as_dict = lambda self: attr.asdict(self) item_cls.to_series = lambda self: pd.Series(attr.asdict(self)) return item_cls
[ "def", "make_tohu_items_class", "(", "clsname", ",", "attr_names", ")", ":", "item_cls", "=", "attr", ".", "make_class", "(", "clsname", ",", "{", "name", ":", "attr", ".", "ib", "(", ")", "for", "name", "in", "attr_names", "}", ",", "repr", "=", "False", ",", "cmp", "=", "True", ",", "frozen", "=", "True", ")", "def", "new_repr", "(", "self", ")", ":", "all_fields", "=", "', '", ".", "join", "(", "[", "f'{name}={repr(value)}'", "for", "name", ",", "value", "in", "attr", ".", "asdict", "(", "self", ")", ".", "items", "(", ")", "]", ")", "return", "f'{clsname}({all_fields})'", "orig_eq", "=", "item_cls", ".", "__eq__", "def", "new_eq", "(", "self", ",", "other", ")", ":", "\"\"\"\n Custom __eq__() method which also allows comparisons with\n tuples and dictionaries. This is mostly for convenience\n during testing.\n \"\"\"", "if", "isinstance", "(", "other", ",", "self", ".", "__class__", ")", ":", "return", "orig_eq", "(", "self", ",", "other", ")", "else", ":", "if", "isinstance", "(", "other", ",", "tuple", ")", ":", "return", "attr", ".", "astuple", "(", "self", ")", "==", "other", "elif", "isinstance", "(", "other", ",", "dict", ")", ":", "return", "attr", ".", "asdict", "(", "self", ")", "==", "other", "else", ":", "return", "NotImplemented", "item_cls", ".", "__repr__", "=", "new_repr", "item_cls", ".", "__eq__", "=", "new_eq", "item_cls", ".", "keys", "=", "lambda", "self", ":", "attr_names", "item_cls", ".", "__getitem__", "=", "lambda", "self", ",", "key", ":", "getattr", "(", "self", ",", "key", ")", "item_cls", ".", "as_dict", "=", "lambda", "self", ":", "attr", ".", "asdict", "(", "self", ")", "item_cls", ".", "to_series", "=", "lambda", "self", ":", "pd", ".", "Series", "(", "attr", ".", "asdict", "(", "self", ")", ")", "return", "item_cls" ]
Parameters ---------- clsname: string Name of the class to be created attr_names: list of strings Names of the attributes of the class to be created
[ "Parameters", "----------", "clsname", ":", "string", "Name", "of", "the", "class", "to", "be", "created" ]
python
train
chriskiehl/Gooey
gooey/gui/processor.py
https://github.com/chriskiehl/Gooey/blob/e598573c6519b953e0ccfc1f3663f827f8cd7e22/gooey/gui/processor.py#L60-L72
def _forward_stdout(self, process): ''' Reads the stdout of `process` and forwards lines and progress to any interested subscribers ''' while True: line = process.stdout.readline() if not line: break pub.send_message(events.CONSOLE_UPDATE, msg=line.decode(self.encoding)) pub.send_message(events.PROGRESS_UPDATE, progress=self._extract_progress(line)) pub.send_message(events.EXECUTION_COMPLETE)
[ "def", "_forward_stdout", "(", "self", ",", "process", ")", ":", "while", "True", ":", "line", "=", "process", ".", "stdout", ".", "readline", "(", ")", "if", "not", "line", ":", "break", "pub", ".", "send_message", "(", "events", ".", "CONSOLE_UPDATE", ",", "msg", "=", "line", ".", "decode", "(", "self", ".", "encoding", ")", ")", "pub", ".", "send_message", "(", "events", ".", "PROGRESS_UPDATE", ",", "progress", "=", "self", ".", "_extract_progress", "(", "line", ")", ")", "pub", ".", "send_message", "(", "events", ".", "EXECUTION_COMPLETE", ")" ]
Reads the stdout of `process` and forwards lines and progress to any interested subscribers
[ "Reads", "the", "stdout", "of", "process", "and", "forwards", "lines", "and", "progress", "to", "any", "interested", "subscribers" ]
python
train
bintoro/overloading.py
overloading.py
https://github.com/bintoro/overloading.py/blob/d7b044d6f7e38043f0fc20f44f134baec84a5b32/overloading.py#L755-L769
def is_void(func): """ Determines if a function is a void function, i.e., one whose body contains nothing but a docstring or an ellipsis. A void function can be used to introduce an overloaded function without actually registering an implementation. """ try: source = dedent(inspect.getsource(func)) except (OSError, IOError): return False fdef = next(ast.iter_child_nodes(ast.parse(source))) return ( type(fdef) is ast.FunctionDef and len(fdef.body) == 1 and type(fdef.body[0]) is ast.Expr and type(fdef.body[0].value) in {ast.Str, ast.Ellipsis})
[ "def", "is_void", "(", "func", ")", ":", "try", ":", "source", "=", "dedent", "(", "inspect", ".", "getsource", "(", "func", ")", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "return", "False", "fdef", "=", "next", "(", "ast", ".", "iter_child_nodes", "(", "ast", ".", "parse", "(", "source", ")", ")", ")", "return", "(", "type", "(", "fdef", ")", "is", "ast", ".", "FunctionDef", "and", "len", "(", "fdef", ".", "body", ")", "==", "1", "and", "type", "(", "fdef", ".", "body", "[", "0", "]", ")", "is", "ast", ".", "Expr", "and", "type", "(", "fdef", ".", "body", "[", "0", "]", ".", "value", ")", "in", "{", "ast", ".", "Str", ",", "ast", ".", "Ellipsis", "}", ")" ]
Determines if a function is a void function, i.e., one whose body contains nothing but a docstring or an ellipsis. A void function can be used to introduce an overloaded function without actually registering an implementation.
[ "Determines", "if", "a", "function", "is", "a", "void", "function", "i", ".", "e", ".", "one", "whose", "body", "contains", "nothing", "but", "a", "docstring", "or", "an", "ellipsis", ".", "A", "void", "function", "can", "be", "used", "to", "introduce", "an", "overloaded", "function", "without", "actually", "registering", "an", "implementation", "." ]
python
train
not-na/peng3d
peng3d/model.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/model.py#L888-L898
def ensureModelData(self,obj): """ Ensures that the given ``obj`` has been initialized to be used with this model. If the object is found to not be initialized, it will be initialized. """ if not hasattr(obj,"_modeldata"): self.create(obj,cache=True) if "_modelcache" not in obj._modeldata: # Assume all initialization is missing, simply reinitialize self.create(obj,cache=True)
[ "def", "ensureModelData", "(", "self", ",", "obj", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "\"_modeldata\"", ")", ":", "self", ".", "create", "(", "obj", ",", "cache", "=", "True", ")", "if", "\"_modelcache\"", "not", "in", "obj", ".", "_modeldata", ":", "# Assume all initialization is missing, simply reinitialize", "self", ".", "create", "(", "obj", ",", "cache", "=", "True", ")" ]
Ensures that the given ``obj`` has been initialized to be used with this model. If the object is found to not be initialized, it will be initialized.
[ "Ensures", "that", "the", "given", "obj", "has", "been", "initialized", "to", "be", "used", "with", "this", "model", ".", "If", "the", "object", "is", "found", "to", "not", "be", "initialized", "it", "will", "be", "initialized", "." ]
python
test
Azure/azure-sdk-for-python
azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py#L1685-L1702
def start_roles(self, service_name, deployment_name, role_names): ''' Starts the specified virtual machines. service_name: The name of the service. deployment_name: The name of the deployment. role_names: The names of the roles, as an enumerable of strings. ''' _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) _validate_not_none('role_names', role_names) return self._perform_post( self._get_roles_operations_path(service_name, deployment_name), _XmlSerializer.start_roles_operation_to_xml(role_names), as_async=True)
[ "def", "start_roles", "(", "self", ",", "service_name", ",", "deployment_name", ",", "role_names", ")", ":", "_validate_not_none", "(", "'service_name'", ",", "service_name", ")", "_validate_not_none", "(", "'deployment_name'", ",", "deployment_name", ")", "_validate_not_none", "(", "'role_names'", ",", "role_names", ")", "return", "self", ".", "_perform_post", "(", "self", ".", "_get_roles_operations_path", "(", "service_name", ",", "deployment_name", ")", ",", "_XmlSerializer", ".", "start_roles_operation_to_xml", "(", "role_names", ")", ",", "as_async", "=", "True", ")" ]
Starts the specified virtual machines. service_name: The name of the service. deployment_name: The name of the deployment. role_names: The names of the roles, as an enumerable of strings.
[ "Starts", "the", "specified", "virtual", "machines", "." ]
python
test
foxx/peewee-extras
peewee_extras.py
https://github.com/foxx/peewee-extras/blob/327e7e63465b3f6e1afc0e6a651f4cb5c8c60889/peewee_extras.py#L359-L370
def retrieve(self, cursor): """ Retrieve items from query """ assert isinstance(cursor, dict), "expected cursor type 'dict'" # look for record in query query = self.get_query() assert isinstance(query, peewee.Query) query return query.get(**cursor)
[ "def", "retrieve", "(", "self", ",", "cursor", ")", ":", "assert", "isinstance", "(", "cursor", ",", "dict", ")", ",", "\"expected cursor type 'dict'\"", "# look for record in query", "query", "=", "self", ".", "get_query", "(", ")", "assert", "isinstance", "(", "query", ",", "peewee", ".", "Query", ")", "query", "return", "query", ".", "get", "(", "*", "*", "cursor", ")" ]
Retrieve items from query
[ "Retrieve", "items", "from", "query" ]
python
valid
vladsaveliev/TargQC
targqc/qualimap/runner.py
https://github.com/vladsaveliev/TargQC/blob/e887c36b2194dbd73c6ea32989b6cb84c6c0e58d/targqc/qualimap/runner.py#L97-L143
def run_multisample_qualimap(output_dir, work_dir, samples, targqc_full_report): """ 1. Generates Qualimap2 plots and put into plots_dirpath 2. Adds records to targqc_full_report.plots """ plots_dirpath = join(output_dir, 'plots') individual_report_fpaths = [s.qualimap_html_fpath for s in samples] if isdir(plots_dirpath) and not any( not can_reuse(join(plots_dirpath, f), individual_report_fpaths) for f in listdir(plots_dirpath) if not f.startswith('.')): debug('Qualimap miltisample plots exist - ' + plots_dirpath + ', reusing...') else: # Qualimap2 run for multi-sample plots if len([s.qualimap_html_fpath for s in samples if s.qualimap_html_fpath]) > 0: if find_executable() is not None: # and get_qualimap_type(find_executable()) == 'full': qualimap_output_dir = join(work_dir, 'qualimap_multi_bamqc') _correct_qualimap_genome_results(samples) _correct_qualimap_insert_size_histogram(samples) safe_mkdir(qualimap_output_dir) rows = [] for sample in samples: if sample.qualimap_html_fpath: rows += [[sample.name, sample.qualimap_html_fpath]] data_fpath = write_tsv_rows(([], rows), join(qualimap_output_dir, 'qualimap_results_by_sample.tsv')) qualimap_plots_dirpath = join(qualimap_output_dir, 'images_multisampleBamQcReport') cmdline = find_executable() + ' multi-bamqc --data {data_fpath} -outdir {qualimap_output_dir}'.format(**locals()) run(cmdline, env_vars=dict(DISPLAY=None), checks=[lambda _1, _2: verify_dir(qualimap_output_dir)], reuse=cfg.reuse_intermediate) if not verify_dir(qualimap_plots_dirpath): warn('Warning: Qualimap for multi-sample analysis failed to finish. TargQC will not contain plots.') return None else: if exists(plots_dirpath): shutil.rmtree(plots_dirpath) shutil.move(qualimap_plots_dirpath, plots_dirpath) else: warn('Warning: Qualimap for multi-sample analysis was not found. TargQC will not contain plots.') return None targqc_full_report.plots = [] for plot_fpath in listdir(plots_dirpath): plot_fpath = join(plots_dirpath, plot_fpath) if verify_file(plot_fpath) and plot_fpath.endswith('.png'): targqc_full_report.plots.append(relpath(plot_fpath, output_dir))
[ "def", "run_multisample_qualimap", "(", "output_dir", ",", "work_dir", ",", "samples", ",", "targqc_full_report", ")", ":", "plots_dirpath", "=", "join", "(", "output_dir", ",", "'plots'", ")", "individual_report_fpaths", "=", "[", "s", ".", "qualimap_html_fpath", "for", "s", "in", "samples", "]", "if", "isdir", "(", "plots_dirpath", ")", "and", "not", "any", "(", "not", "can_reuse", "(", "join", "(", "plots_dirpath", ",", "f", ")", ",", "individual_report_fpaths", ")", "for", "f", "in", "listdir", "(", "plots_dirpath", ")", "if", "not", "f", ".", "startswith", "(", "'.'", ")", ")", ":", "debug", "(", "'Qualimap miltisample plots exist - '", "+", "plots_dirpath", "+", "', reusing...'", ")", "else", ":", "# Qualimap2 run for multi-sample plots", "if", "len", "(", "[", "s", ".", "qualimap_html_fpath", "for", "s", "in", "samples", "if", "s", ".", "qualimap_html_fpath", "]", ")", ">", "0", ":", "if", "find_executable", "(", ")", "is", "not", "None", ":", "# and get_qualimap_type(find_executable()) == 'full':", "qualimap_output_dir", "=", "join", "(", "work_dir", ",", "'qualimap_multi_bamqc'", ")", "_correct_qualimap_genome_results", "(", "samples", ")", "_correct_qualimap_insert_size_histogram", "(", "samples", ")", "safe_mkdir", "(", "qualimap_output_dir", ")", "rows", "=", "[", "]", "for", "sample", "in", "samples", ":", "if", "sample", ".", "qualimap_html_fpath", ":", "rows", "+=", "[", "[", "sample", ".", "name", ",", "sample", ".", "qualimap_html_fpath", "]", "]", "data_fpath", "=", "write_tsv_rows", "(", "(", "[", "]", ",", "rows", ")", ",", "join", "(", "qualimap_output_dir", ",", "'qualimap_results_by_sample.tsv'", ")", ")", "qualimap_plots_dirpath", "=", "join", "(", "qualimap_output_dir", ",", "'images_multisampleBamQcReport'", ")", "cmdline", "=", "find_executable", "(", ")", "+", "' multi-bamqc --data {data_fpath} -outdir {qualimap_output_dir}'", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "run", "(", "cmdline", ",", "env_vars", "=", "dict", "(", "DISPLAY", "=", "None", ")", ",", "checks", "=", "[", "lambda", "_1", ",", "_2", ":", "verify_dir", "(", "qualimap_output_dir", ")", "]", ",", "reuse", "=", "cfg", ".", "reuse_intermediate", ")", "if", "not", "verify_dir", "(", "qualimap_plots_dirpath", ")", ":", "warn", "(", "'Warning: Qualimap for multi-sample analysis failed to finish. TargQC will not contain plots.'", ")", "return", "None", "else", ":", "if", "exists", "(", "plots_dirpath", ")", ":", "shutil", ".", "rmtree", "(", "plots_dirpath", ")", "shutil", ".", "move", "(", "qualimap_plots_dirpath", ",", "plots_dirpath", ")", "else", ":", "warn", "(", "'Warning: Qualimap for multi-sample analysis was not found. TargQC will not contain plots.'", ")", "return", "None", "targqc_full_report", ".", "plots", "=", "[", "]", "for", "plot_fpath", "in", "listdir", "(", "plots_dirpath", ")", ":", "plot_fpath", "=", "join", "(", "plots_dirpath", ",", "plot_fpath", ")", "if", "verify_file", "(", "plot_fpath", ")", "and", "plot_fpath", ".", "endswith", "(", "'.png'", ")", ":", "targqc_full_report", ".", "plots", ".", "append", "(", "relpath", "(", "plot_fpath", ",", "output_dir", ")", ")" ]
1. Generates Qualimap2 plots and put into plots_dirpath 2. Adds records to targqc_full_report.plots
[ "1", ".", "Generates", "Qualimap2", "plots", "and", "put", "into", "plots_dirpath", "2", ".", "Adds", "records", "to", "targqc_full_report", ".", "plots" ]
python
train
coursera-dl/coursera-dl
coursera/utils.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/utils.py#L274-L291
def extend_supplement_links(destination, source): """ Extends (merges) destination dictionary with supplement_links from source dictionary. Values are expected to be lists, or any data structure that has `extend` method. @param destination: Destination dictionary that will be extended. @type destination: @see CourseraOnDemand._extract_links_from_text @param source: Source dictionary that will be used to extend destination dictionary. @type source: @see CourseraOnDemand._extract_links_from_text """ for key, value in iteritems(source): if key not in destination: destination[key] = value else: destination[key].extend(value)
[ "def", "extend_supplement_links", "(", "destination", ",", "source", ")", ":", "for", "key", ",", "value", "in", "iteritems", "(", "source", ")", ":", "if", "key", "not", "in", "destination", ":", "destination", "[", "key", "]", "=", "value", "else", ":", "destination", "[", "key", "]", ".", "extend", "(", "value", ")" ]
Extends (merges) destination dictionary with supplement_links from source dictionary. Values are expected to be lists, or any data structure that has `extend` method. @param destination: Destination dictionary that will be extended. @type destination: @see CourseraOnDemand._extract_links_from_text @param source: Source dictionary that will be used to extend destination dictionary. @type source: @see CourseraOnDemand._extract_links_from_text
[ "Extends", "(", "merges", ")", "destination", "dictionary", "with", "supplement_links", "from", "source", "dictionary", ".", "Values", "are", "expected", "to", "be", "lists", "or", "any", "data", "structure", "that", "has", "extend", "method", "." ]
python
train
HPCC-Cloud-Computing/CAL
calplus/v1/network/resources/network.py
https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/network/resources/network.py#L9-L25
def index(self, req, drivers): """List all network List all of netowrks on some special cloud with: :Param req :Type object Request """ result = [] for driver in drivers: result.append(driver.list_network(req.params)) data = { 'action': "index", 'controller': "network", 'cloud': req.environ['calplus.cloud'], 'result': result } return data
[ "def", "index", "(", "self", ",", "req", ",", "drivers", ")", ":", "result", "=", "[", "]", "for", "driver", "in", "drivers", ":", "result", ".", "append", "(", "driver", ".", "list_network", "(", "req", ".", "params", ")", ")", "data", "=", "{", "'action'", ":", "\"index\"", ",", "'controller'", ":", "\"network\"", ",", "'cloud'", ":", "req", ".", "environ", "[", "'calplus.cloud'", "]", ",", "'result'", ":", "result", "}", "return", "data" ]
List all network List all of netowrks on some special cloud with: :Param req :Type object Request
[ "List", "all", "network", "List", "all", "of", "netowrks", "on", "some", "special", "cloud", "with", ":", ":", "Param", "req", ":", "Type", "object", "Request" ]
python
train
carpedm20/fbchat
fbchat/_client.py
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L3490-L3508
def onMarkedSeen( self, threads=None, seen_ts=None, ts=None, metadata=None, msg=None ): """ Called when the client is listening, and the client has successfully marked threads as seen :param threads: The threads that were marked :param author_id: The ID of the person who changed the emoji :param seen_ts: A timestamp of when the threads were seen :param ts: A timestamp of the action :param metadata: Extra metadata about the action :param msg: A full set of the data recieved :type thread_type: models.ThreadType """ log.info( "Marked messages as seen in threads {} at {}s".format( [(x[0], x[1].name) for x in threads], seen_ts / 1000 ) )
[ "def", "onMarkedSeen", "(", "self", ",", "threads", "=", "None", ",", "seen_ts", "=", "None", ",", "ts", "=", "None", ",", "metadata", "=", "None", ",", "msg", "=", "None", ")", ":", "log", ".", "info", "(", "\"Marked messages as seen in threads {} at {}s\"", ".", "format", "(", "[", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ".", "name", ")", "for", "x", "in", "threads", "]", ",", "seen_ts", "/", "1000", ")", ")" ]
Called when the client is listening, and the client has successfully marked threads as seen :param threads: The threads that were marked :param author_id: The ID of the person who changed the emoji :param seen_ts: A timestamp of when the threads were seen :param ts: A timestamp of the action :param metadata: Extra metadata about the action :param msg: A full set of the data recieved :type thread_type: models.ThreadType
[ "Called", "when", "the", "client", "is", "listening", "and", "the", "client", "has", "successfully", "marked", "threads", "as", "seen" ]
python
train
fatiando/pooch
pooch/core.py
https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L343-L388
def _download_file(self, fname): """ Download a file from the remote data storage to the local storage. Used by :meth:`~pooch.Pooch.fetch` to do the actual downloading. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. Raises ------ ValueError If the hash of the downloaded file doesn't match the hash in the registry. """ destination = self.abspath / fname source = self.get_url(fname) # Stream the file to a temporary so that we can safely check its hash before # overwriting the original fout = tempfile.NamedTemporaryFile(delete=False, dir=str(self.abspath)) try: with fout: response = requests.get(source, stream=True) response.raise_for_status() for chunk in response.iter_content(chunk_size=1024): if chunk: fout.write(chunk) tmphash = file_hash(fout.name) if tmphash != self.registry[fname]: raise ValueError( "Hash of downloaded file '{}' doesn't match the entry in the registry:" " Expected '{}' and got '{}'.".format( fout.name, self.registry[fname], tmphash ) ) # Make sure the parent directory exists in case the file is in a subdirectory. # Otherwise, move will cause an error. if not os.path.exists(str(destination.parent)): os.makedirs(str(destination.parent)) shutil.move(fout.name, str(destination)) except Exception: os.remove(fout.name) raise
[ "def", "_download_file", "(", "self", ",", "fname", ")", ":", "destination", "=", "self", ".", "abspath", "/", "fname", "source", "=", "self", ".", "get_url", "(", "fname", ")", "# Stream the file to a temporary so that we can safely check its hash before", "# overwriting the original", "fout", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ",", "dir", "=", "str", "(", "self", ".", "abspath", ")", ")", "try", ":", "with", "fout", ":", "response", "=", "requests", ".", "get", "(", "source", ",", "stream", "=", "True", ")", "response", ".", "raise_for_status", "(", ")", "for", "chunk", "in", "response", ".", "iter_content", "(", "chunk_size", "=", "1024", ")", ":", "if", "chunk", ":", "fout", ".", "write", "(", "chunk", ")", "tmphash", "=", "file_hash", "(", "fout", ".", "name", ")", "if", "tmphash", "!=", "self", ".", "registry", "[", "fname", "]", ":", "raise", "ValueError", "(", "\"Hash of downloaded file '{}' doesn't match the entry in the registry:\"", "\" Expected '{}' and got '{}'.\"", ".", "format", "(", "fout", ".", "name", ",", "self", ".", "registry", "[", "fname", "]", ",", "tmphash", ")", ")", "# Make sure the parent directory exists in case the file is in a subdirectory.", "# Otherwise, move will cause an error.", "if", "not", "os", ".", "path", ".", "exists", "(", "str", "(", "destination", ".", "parent", ")", ")", ":", "os", ".", "makedirs", "(", "str", "(", "destination", ".", "parent", ")", ")", "shutil", ".", "move", "(", "fout", ".", "name", ",", "str", "(", "destination", ")", ")", "except", "Exception", ":", "os", ".", "remove", "(", "fout", ".", "name", ")", "raise" ]
Download a file from the remote data storage to the local storage. Used by :meth:`~pooch.Pooch.fetch` to do the actual downloading. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. Raises ------ ValueError If the hash of the downloaded file doesn't match the hash in the registry.
[ "Download", "a", "file", "from", "the", "remote", "data", "storage", "to", "the", "local", "storage", "." ]
python
train
yashbathia/freeport
setup.py
https://github.com/yashbathia/freeport/blob/ea9bda56f40cde903e21aab23c1c792d28a6f663/setup.py#L76-L84
def origin(self): """ Return the fetch url for the git origin :return: """ for item in os.popen('git remote -v'): split_item = item.strip().split() if split_item[0] == 'origin' and split_item[-1] == '(push)': return split_item[1]
[ "def", "origin", "(", "self", ")", ":", "for", "item", "in", "os", ".", "popen", "(", "'git remote -v'", ")", ":", "split_item", "=", "item", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "split_item", "[", "0", "]", "==", "'origin'", "and", "split_item", "[", "-", "1", "]", "==", "'(push)'", ":", "return", "split_item", "[", "1", "]" ]
Return the fetch url for the git origin :return:
[ "Return", "the", "fetch", "url", "for", "the", "git", "origin", ":", "return", ":" ]
python
train
intelsdi-x/snap-plugin-lib-py
snap_plugin/v1/config_map.py
https://github.com/intelsdi-x/snap-plugin-lib-py/blob/8da5d00ac5f9d2b48a7239563ac7788209891ca4/snap_plugin/v1/config_map.py#L176-L181
def itervalues(self): "Returns an iterator over the values of ConfigMap." return chain(self._pb.StringMap.values(), self._pb.IntMap.values(), self._pb.FloatMap.values(), self._pb.BoolMap.values())
[ "def", "itervalues", "(", "self", ")", ":", "return", "chain", "(", "self", ".", "_pb", ".", "StringMap", ".", "values", "(", ")", ",", "self", ".", "_pb", ".", "IntMap", ".", "values", "(", ")", ",", "self", ".", "_pb", ".", "FloatMap", ".", "values", "(", ")", ",", "self", ".", "_pb", ".", "BoolMap", ".", "values", "(", ")", ")" ]
Returns an iterator over the values of ConfigMap.
[ "Returns", "an", "iterator", "over", "the", "values", "of", "ConfigMap", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/maps/email/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/maps/email/__init__.py#L92-L113
def _set_email_list(self, v, load=False): """ Setter method for email_list, mapped from YANG variable /rbridge_id/maps/email/email_list (list) If this variable is read-only (config: false) in the source YANG file, then _set_email_list is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_email_list() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("email",email_list.email_list, yang_name="email-list", rest_name="email-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='email', extensions={u'tailf-common': {u'info': u'Configure MAPS emails', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_emails_callpoint'}}), is_container='list', yang_name="email-list", rest_name="email-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAPS emails', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_emails_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """email_list must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("email",email_list.email_list, yang_name="email-list", rest_name="email-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='email', extensions={u'tailf-common': {u'info': u'Configure MAPS emails', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_emails_callpoint'}}), is_container='list', yang_name="email-list", rest_name="email-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAPS emails', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_emails_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='list', is_config=True)""", }) self.__email_list = t if hasattr(self, '_set'): self._set()
[ "def", "_set_email_list", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"email\"", ",", "email_list", ".", "email_list", ",", "yang_name", "=", "\"email-list\"", ",", "rest_name", "=", "\"email-list\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'email'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure MAPS emails'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-drop-node-name'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'callpoint'", ":", "u'maps_emails_callpoint'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"email-list\"", ",", "rest_name", "=", "\"email-list\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure MAPS emails'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-drop-node-name'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'callpoint'", ":", "u'maps_emails_callpoint'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-maps'", ",", "defining_module", "=", "'brocade-maps'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"email_list must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"email\",email_list.email_list, yang_name=\"email-list\", rest_name=\"email-list\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='email', extensions={u'tailf-common': {u'info': u'Configure MAPS emails', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_emails_callpoint'}}), is_container='list', yang_name=\"email-list\", rest_name=\"email-list\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAPS emails', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_emails_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__email_list", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for email_list, mapped from YANG variable /rbridge_id/maps/email/email_list (list) If this variable is read-only (config: false) in the source YANG file, then _set_email_list is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_email_list() directly.
[ "Setter", "method", "for", "email_list", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "maps", "/", "email", "/", "email_list", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_email_list", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_email_list", "()", "directly", "." ]
python
train
merll/docker-map
dockermap/map/config/__init__.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/config/__init__.py#L159-L191
def update_from_obj(self, obj, copy=False): """ Updates this configuration object from another. See :meth:`ConfigurationObject.update` for details. :param obj: Values to update the ConfigurationObject with. :type obj: ConfigurationObject :param copy: Copies lists and dictionaries. :type copy: bool """ obj.clean() obj_config = obj._config all_props = self.__class__.CONFIG_PROPERTIES if copy: for key, value in six.iteritems(obj_config): attr_config = all_props.get(key) if attr_config: attr_type = attr_config.attr_type if attr_type: if issubclass(attr_type, list): self._config[key] = value[:] elif attr_type is dict: self._config[key] = value.copy() else: self._config[key] = value self._modified.discard(key) else: filtered_dict = {key: value for key, value in six.iteritems(obj_config) if key in all_props} self._config.update(filtered_dict) self._modified.difference_update(filtered_dict.keys())
[ "def", "update_from_obj", "(", "self", ",", "obj", ",", "copy", "=", "False", ")", ":", "obj", ".", "clean", "(", ")", "obj_config", "=", "obj", ".", "_config", "all_props", "=", "self", ".", "__class__", ".", "CONFIG_PROPERTIES", "if", "copy", ":", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "obj_config", ")", ":", "attr_config", "=", "all_props", ".", "get", "(", "key", ")", "if", "attr_config", ":", "attr_type", "=", "attr_config", ".", "attr_type", "if", "attr_type", ":", "if", "issubclass", "(", "attr_type", ",", "list", ")", ":", "self", ".", "_config", "[", "key", "]", "=", "value", "[", ":", "]", "elif", "attr_type", "is", "dict", ":", "self", ".", "_config", "[", "key", "]", "=", "value", ".", "copy", "(", ")", "else", ":", "self", ".", "_config", "[", "key", "]", "=", "value", "self", ".", "_modified", ".", "discard", "(", "key", ")", "else", ":", "filtered_dict", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "obj_config", ")", "if", "key", "in", "all_props", "}", "self", ".", "_config", ".", "update", "(", "filtered_dict", ")", "self", ".", "_modified", ".", "difference_update", "(", "filtered_dict", ".", "keys", "(", ")", ")" ]
Updates this configuration object from another. See :meth:`ConfigurationObject.update` for details. :param obj: Values to update the ConfigurationObject with. :type obj: ConfigurationObject :param copy: Copies lists and dictionaries. :type copy: bool
[ "Updates", "this", "configuration", "object", "from", "another", "." ]
python
train
rodionovd/machobot
machobot/dylib.py
https://github.com/rodionovd/machobot/blob/60e10b63c2538a73dc8ec3ce636b3ed5bf09f524/machobot/dylib.py#L31-L79
def macho_dependencies_list(target_path, header_magic=None): """ Generates a list of libraries the given Mach-O file depends on. In that list a single library is represented by its "install path": for some libraries it would be a full file path, and for others it would be a relative path (sometimes with dyld templates like @executable_path or @rpath in it). Note: I don't know any reason why would some architectures of a fat Mach-O depend on certain libraries while others don't, but *it's technically possible*. So that's why you may want to specify the `header_magic` value for a particular header. Returns an object with two properties: `weak` and `strong` that hold lists of weak and strong dependencies respectively. """ MachODeprendencies = namedtuple("MachODeprendecies", "weak strong") # Convert the magic value into macholib representation if needed if isinstance(header_magic, basestring): header_magic = _MH_MAGIC_from_string(header_magic) macho = MachO(target_path) # Obtain a list of headers for the required magic value (if any) suggestions = filter(lambda t: t.header.magic == header_magic or # just add all headers if user didn't specifiy the magic header_magic == None, macho.headers) header = None if len(suggestions) <= 0 else suggestions[0] # filter() above *always* returns a list, so we have to check if it's empty if header is None: raise Exception("Unable to find a header for the given MAGIC value in that Mach-O file") return None def decodeLoadCommandData(data): # Also ignore trailing zeros return data[:data.find(b"\x00")].decode(sys.getfilesystemencoding()) def strongReferencesFromHeader(h): # List of LC_LOAD_DYLIB commands list = filter(lambda (lc,cmd,data): lc.cmd == LC_LOAD_DYLIB, h.commands) # Their contents (aka data) as a file path return map(lambda (lc,cmd,data): decodeLoadCommandData(data), list) def weakReferencesFromHeader(h): list = filter(lambda (lc,cmd,data): lc.cmd == LC_LOAD_WEAK_DYLIB, h.commands) return map(lambda (lc,cmd,data): decodeLoadCommandData(data), list) strongRefs = strongReferencesFromHeader(header) weakRefs = weakReferencesFromHeader(header) return MachODeprendencies(weak = weakRefs, strong = strongRefs)
[ "def", "macho_dependencies_list", "(", "target_path", ",", "header_magic", "=", "None", ")", ":", "MachODeprendencies", "=", "namedtuple", "(", "\"MachODeprendecies\"", ",", "\"weak strong\"", ")", "# Convert the magic value into macholib representation if needed", "if", "isinstance", "(", "header_magic", ",", "basestring", ")", ":", "header_magic", "=", "_MH_MAGIC_from_string", "(", "header_magic", ")", "macho", "=", "MachO", "(", "target_path", ")", "# Obtain a list of headers for the required magic value (if any)", "suggestions", "=", "filter", "(", "lambda", "t", ":", "t", ".", "header", ".", "magic", "==", "header_magic", "or", "# just add all headers if user didn't specifiy the magic", "header_magic", "==", "None", ",", "macho", ".", "headers", ")", "header", "=", "None", "if", "len", "(", "suggestions", ")", "<=", "0", "else", "suggestions", "[", "0", "]", "# filter() above *always* returns a list, so we have to check if it's empty", "if", "header", "is", "None", ":", "raise", "Exception", "(", "\"Unable to find a header for the given MAGIC value in that Mach-O file\"", ")", "return", "None", "def", "decodeLoadCommandData", "(", "data", ")", ":", "# Also ignore trailing zeros", "return", "data", "[", ":", "data", ".", "find", "(", "b\"\\x00\"", ")", "]", ".", "decode", "(", "sys", ".", "getfilesystemencoding", "(", ")", ")", "def", "strongReferencesFromHeader", "(", "h", ")", ":", "# List of LC_LOAD_DYLIB commands", "list", "=", "filter", "(", "lambda", "(", "lc", ",", "cmd", ",", "data", ")", ":", "lc", ".", "cmd", "==", "LC_LOAD_DYLIB", ",", "h", ".", "commands", ")", "# Their contents (aka data) as a file path", "return", "map", "(", "lambda", "(", "lc", ",", "cmd", ",", "data", ")", ":", "decodeLoadCommandData", "(", "data", ")", ",", "list", ")", "def", "weakReferencesFromHeader", "(", "h", ")", ":", "list", "=", "filter", "(", "lambda", "(", "lc", ",", "cmd", ",", "data", ")", ":", "lc", ".", "cmd", "==", "LC_LOAD_WEAK_DYLIB", ",", "h", ".", "commands", ")", "return", "map", "(", "lambda", "(", "lc", ",", "cmd", ",", "data", ")", ":", "decodeLoadCommandData", "(", "data", ")", ",", "list", ")", "strongRefs", "=", "strongReferencesFromHeader", "(", "header", ")", "weakRefs", "=", "weakReferencesFromHeader", "(", "header", ")", "return", "MachODeprendencies", "(", "weak", "=", "weakRefs", ",", "strong", "=", "strongRefs", ")" ]
Generates a list of libraries the given Mach-O file depends on. In that list a single library is represented by its "install path": for some libraries it would be a full file path, and for others it would be a relative path (sometimes with dyld templates like @executable_path or @rpath in it). Note: I don't know any reason why would some architectures of a fat Mach-O depend on certain libraries while others don't, but *it's technically possible*. So that's why you may want to specify the `header_magic` value for a particular header. Returns an object with two properties: `weak` and `strong` that hold lists of weak and strong dependencies respectively.
[ "Generates", "a", "list", "of", "libraries", "the", "given", "Mach", "-", "O", "file", "depends", "on", "." ]
python
train
ktbyers/netmiko
netmiko/utilities.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/utilities.py#L158-L179
def write_bytes(out_data, encoding="ascii"): """Write Python2 and Python3 compatible byte stream.""" if sys.version_info[0] >= 3: if isinstance(out_data, type("")): if encoding == "utf-8": return out_data.encode("utf-8") else: return out_data.encode("ascii", "ignore") elif isinstance(out_data, type(b"")): return out_data else: if isinstance(out_data, type("")): if encoding == "utf-8": return out_data.encode("utf-8") else: return out_data.encode("ascii", "ignore") elif isinstance(out_data, type(str(""))): return out_data msg = "Invalid value for out_data neither unicode nor byte string: {}".format( out_data ) raise ValueError(msg)
[ "def", "write_bytes", "(", "out_data", ",", "encoding", "=", "\"ascii\"", ")", ":", "if", "sys", ".", "version_info", "[", "0", "]", ">=", "3", ":", "if", "isinstance", "(", "out_data", ",", "type", "(", "\"\"", ")", ")", ":", "if", "encoding", "==", "\"utf-8\"", ":", "return", "out_data", ".", "encode", "(", "\"utf-8\"", ")", "else", ":", "return", "out_data", ".", "encode", "(", "\"ascii\"", ",", "\"ignore\"", ")", "elif", "isinstance", "(", "out_data", ",", "type", "(", "b\"\"", ")", ")", ":", "return", "out_data", "else", ":", "if", "isinstance", "(", "out_data", ",", "type", "(", "\"\"", ")", ")", ":", "if", "encoding", "==", "\"utf-8\"", ":", "return", "out_data", ".", "encode", "(", "\"utf-8\"", ")", "else", ":", "return", "out_data", ".", "encode", "(", "\"ascii\"", ",", "\"ignore\"", ")", "elif", "isinstance", "(", "out_data", ",", "type", "(", "str", "(", "\"\"", ")", ")", ")", ":", "return", "out_data", "msg", "=", "\"Invalid value for out_data neither unicode nor byte string: {}\"", ".", "format", "(", "out_data", ")", "raise", "ValueError", "(", "msg", ")" ]
Write Python2 and Python3 compatible byte stream.
[ "Write", "Python2", "and", "Python3", "compatible", "byte", "stream", "." ]
python
train
contentful/contentful-management.py
contentful_management/client_proxy.py
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/client_proxy.py#L56-L78
def create(self, resource_id=None, attributes=None): """ Creates a resource with the given ID (optional) and attributes. """ if attributes is None: attributes = {} result = None if not resource_id: result = self.client._post( self._url(resource_id), self._resource_class.create_attributes(attributes), headers=self._resource_class.create_headers(attributes) ) else: result = self.client._put( self._url(resource_id), self._resource_class.create_attributes(attributes), headers=self._resource_class.create_headers(attributes) ) return result
[ "def", "create", "(", "self", ",", "resource_id", "=", "None", ",", "attributes", "=", "None", ")", ":", "if", "attributes", "is", "None", ":", "attributes", "=", "{", "}", "result", "=", "None", "if", "not", "resource_id", ":", "result", "=", "self", ".", "client", ".", "_post", "(", "self", ".", "_url", "(", "resource_id", ")", ",", "self", ".", "_resource_class", ".", "create_attributes", "(", "attributes", ")", ",", "headers", "=", "self", ".", "_resource_class", ".", "create_headers", "(", "attributes", ")", ")", "else", ":", "result", "=", "self", ".", "client", ".", "_put", "(", "self", ".", "_url", "(", "resource_id", ")", ",", "self", ".", "_resource_class", ".", "create_attributes", "(", "attributes", ")", ",", "headers", "=", "self", ".", "_resource_class", ".", "create_headers", "(", "attributes", ")", ")", "return", "result" ]
Creates a resource with the given ID (optional) and attributes.
[ "Creates", "a", "resource", "with", "the", "given", "ID", "(", "optional", ")", "and", "attributes", "." ]
python
train
mbj4668/pyang
pyang/grammar.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/grammar.py#L799-L828
def sort_canonical(keyword, stmts): """Sort all `stmts` in the canonical order defined by `keyword`. Return the sorted list. The `stmt` list is not modified. If `keyword` does not have a canonical order, the list is returned as is. """ try: (_arg_type, subspec) = stmt_map[keyword] except KeyError: return stmts res = [] # keep the order of data definition statements and case keep = [s[0] for s in data_def_stmts] + ['case'] for (kw, _spec) in flatten_spec(subspec): # keep comments before a statement together with that statement comments = [] for s in stmts: if s.keyword == '_comment': comments.append(s) elif s.keyword == kw and kw not in keep: res.extend(comments) comments = [] res.append(s) else: comments = [] # then copy all other statements (extensions) res.extend([stmt for stmt in stmts if stmt not in res]) return res
[ "def", "sort_canonical", "(", "keyword", ",", "stmts", ")", ":", "try", ":", "(", "_arg_type", ",", "subspec", ")", "=", "stmt_map", "[", "keyword", "]", "except", "KeyError", ":", "return", "stmts", "res", "=", "[", "]", "# keep the order of data definition statements and case", "keep", "=", "[", "s", "[", "0", "]", "for", "s", "in", "data_def_stmts", "]", "+", "[", "'case'", "]", "for", "(", "kw", ",", "_spec", ")", "in", "flatten_spec", "(", "subspec", ")", ":", "# keep comments before a statement together with that statement", "comments", "=", "[", "]", "for", "s", "in", "stmts", ":", "if", "s", ".", "keyword", "==", "'_comment'", ":", "comments", ".", "append", "(", "s", ")", "elif", "s", ".", "keyword", "==", "kw", "and", "kw", "not", "in", "keep", ":", "res", ".", "extend", "(", "comments", ")", "comments", "=", "[", "]", "res", ".", "append", "(", "s", ")", "else", ":", "comments", "=", "[", "]", "# then copy all other statements (extensions)", "res", ".", "extend", "(", "[", "stmt", "for", "stmt", "in", "stmts", "if", "stmt", "not", "in", "res", "]", ")", "return", "res" ]
Sort all `stmts` in the canonical order defined by `keyword`. Return the sorted list. The `stmt` list is not modified. If `keyword` does not have a canonical order, the list is returned as is.
[ "Sort", "all", "stmts", "in", "the", "canonical", "order", "defined", "by", "keyword", ".", "Return", "the", "sorted", "list", ".", "The", "stmt", "list", "is", "not", "modified", ".", "If", "keyword", "does", "not", "have", "a", "canonical", "order", "the", "list", "is", "returned", "as", "is", "." ]
python
train
raphaelm/python-fints
fints/client.py
https://github.com/raphaelm/python-fints/blob/fee55ae37d3182d0adb40507d4acb98b06057e4a/fints/client.py#L1151-L1167
def send_tan(self, challenge: NeedTANResponse, tan: str): """ Sends a TAN to confirm a pending operation. :param challenge: NeedTANResponse to respond to :param tan: TAN value :return: Currently no response """ with self._get_dialog() as dialog: tan_seg = self._get_tan_segment(challenge.command_seg, '2', challenge.tan_request) self._pending_tan = tan response = dialog.send(tan_seg) resume_func = getattr(self, challenge.resume_method) return resume_func(challenge.command_seg, response)
[ "def", "send_tan", "(", "self", ",", "challenge", ":", "NeedTANResponse", ",", "tan", ":", "str", ")", ":", "with", "self", ".", "_get_dialog", "(", ")", "as", "dialog", ":", "tan_seg", "=", "self", ".", "_get_tan_segment", "(", "challenge", ".", "command_seg", ",", "'2'", ",", "challenge", ".", "tan_request", ")", "self", ".", "_pending_tan", "=", "tan", "response", "=", "dialog", ".", "send", "(", "tan_seg", ")", "resume_func", "=", "getattr", "(", "self", ",", "challenge", ".", "resume_method", ")", "return", "resume_func", "(", "challenge", ".", "command_seg", ",", "response", ")" ]
Sends a TAN to confirm a pending operation. :param challenge: NeedTANResponse to respond to :param tan: TAN value :return: Currently no response
[ "Sends", "a", "TAN", "to", "confirm", "a", "pending", "operation", "." ]
python
train
kevinconway/daemons
daemons/startstop/simple.py
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/startstop/simple.py#L24-L49
def start(self): """Start the process with daemonization. If the process is already started this call should exit with code ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then call 'run'. """ if self.pid is not None: LOG.error( "The process is already running with pid {0}.".format(self.pid) ) sys.exit(exit.ALREADY_RUNNING) self.daemonize() LOG.info("Beginning run loop for process.") try: self.run() except Exception: LOG.exception("Uncaught exception in the daemon run() method.") self.stop() sys.exit(exit.RUN_FAILURE)
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "pid", "is", "not", "None", ":", "LOG", ".", "error", "(", "\"The process is already running with pid {0}.\"", ".", "format", "(", "self", ".", "pid", ")", ")", "sys", ".", "exit", "(", "exit", ".", "ALREADY_RUNNING", ")", "self", ".", "daemonize", "(", ")", "LOG", ".", "info", "(", "\"Beginning run loop for process.\"", ")", "try", ":", "self", ".", "run", "(", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Uncaught exception in the daemon run() method.\"", ")", "self", ".", "stop", "(", ")", "sys", ".", "exit", "(", "exit", ".", "RUN_FAILURE", ")" ]
Start the process with daemonization. If the process is already started this call should exit with code ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then call 'run'.
[ "Start", "the", "process", "with", "daemonization", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/_process_win32.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/_process_win32.py#L75-L93
def _find_cmd(cmd): """Find the full path to a .bat or .exe using the win32api module.""" try: from win32api import SearchPath except ImportError: raise ImportError('you need to have pywin32 installed for this to work') else: PATH = os.environ['PATH'] extensions = ['.exe', '.com', '.bat', '.py'] path = None for ext in extensions: try: path = SearchPath(PATH, cmd + ext)[0] except: pass if path is None: raise OSError("command %r not found" % cmd) else: return path
[ "def", "_find_cmd", "(", "cmd", ")", ":", "try", ":", "from", "win32api", "import", "SearchPath", "except", "ImportError", ":", "raise", "ImportError", "(", "'you need to have pywin32 installed for this to work'", ")", "else", ":", "PATH", "=", "os", ".", "environ", "[", "'PATH'", "]", "extensions", "=", "[", "'.exe'", ",", "'.com'", ",", "'.bat'", ",", "'.py'", "]", "path", "=", "None", "for", "ext", "in", "extensions", ":", "try", ":", "path", "=", "SearchPath", "(", "PATH", ",", "cmd", "+", "ext", ")", "[", "0", "]", "except", ":", "pass", "if", "path", "is", "None", ":", "raise", "OSError", "(", "\"command %r not found\"", "%", "cmd", ")", "else", ":", "return", "path" ]
Find the full path to a .bat or .exe using the win32api module.
[ "Find", "the", "full", "path", "to", "a", ".", "bat", "or", ".", "exe", "using", "the", "win32api", "module", "." ]
python
test
Games-and-Simulations/sc-docker
scbw/docker_utils.py
https://github.com/Games-and-Simulations/sc-docker/blob/1d7adb9b5839783655564afc4bbcd204a0055dcb/scbw/docker_utils.py#L145-L164
def check_dockermachine() -> bool: """ Checks that docker-machine is available on the computer :raises FileNotFoundError if docker-machine is not present """ logger.debug("checking docker-machine presence") # noinspection PyBroadException try: out = subprocess \ .check_output(["docker-machine", "version"]) \ .decode("utf-8") \ .replace("docker-machine.exe", "") \ .replace("docker-machine", "") \ .strip() logger.debug(f"using docker machine version {out}") return True except Exception: logger.debug(f"docker machine not present") return False
[ "def", "check_dockermachine", "(", ")", "->", "bool", ":", "logger", ".", "debug", "(", "\"checking docker-machine presence\"", ")", "# noinspection PyBroadException", "try", ":", "out", "=", "subprocess", ".", "check_output", "(", "[", "\"docker-machine\"", ",", "\"version\"", "]", ")", ".", "decode", "(", "\"utf-8\"", ")", ".", "replace", "(", "\"docker-machine.exe\"", ",", "\"\"", ")", ".", "replace", "(", "\"docker-machine\"", ",", "\"\"", ")", ".", "strip", "(", ")", "logger", ".", "debug", "(", "f\"using docker machine version {out}\"", ")", "return", "True", "except", "Exception", ":", "logger", ".", "debug", "(", "f\"docker machine not present\"", ")", "return", "False" ]
Checks that docker-machine is available on the computer :raises FileNotFoundError if docker-machine is not present
[ "Checks", "that", "docker", "-", "machine", "is", "available", "on", "the", "computer" ]
python
train
c0fec0de/anytree
anytree/importer/jsonimporter.py
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/importer/jsonimporter.py#L64-L66
def read(self, filehandle): """Read JSON from `filehandle`.""" return self.__import(json.load(filehandle, **self.kwargs))
[ "def", "read", "(", "self", ",", "filehandle", ")", ":", "return", "self", ".", "__import", "(", "json", ".", "load", "(", "filehandle", ",", "*", "*", "self", ".", "kwargs", ")", ")" ]
Read JSON from `filehandle`.
[ "Read", "JSON", "from", "filehandle", "." ]
python
train
Hironsan/anago
anago/layers.py
https://github.com/Hironsan/anago/blob/66a97f91c41f9613b736892e9762dccb9c28f623/anago/layers.py#L363-L376
def get_energy(self, y_true, input_energy, mask): """Energy = a1' y1 + u1' y1 + y1' U y2 + u2' y2 + y2' U y3 + u3' y3 + an' y3 """ input_energy = K.sum(input_energy * y_true, 2) # (B, T) chain_energy = K.sum(K.dot(y_true[:, :-1, :], self.chain_kernel) * y_true[:, 1:, :], 2) # (B, T-1) if mask is not None: mask = K.cast(mask, K.floatx()) chain_mask = mask[:, :-1] * mask[:, 1:] # (B, T-1), mask[:,:-1]*mask[:,1:] makes it work with any padding input_energy = input_energy * mask chain_energy = chain_energy * chain_mask total_energy = K.sum(input_energy, -1) + K.sum(chain_energy, -1) # (B, ) return total_energy
[ "def", "get_energy", "(", "self", ",", "y_true", ",", "input_energy", ",", "mask", ")", ":", "input_energy", "=", "K", ".", "sum", "(", "input_energy", "*", "y_true", ",", "2", ")", "# (B, T)", "chain_energy", "=", "K", ".", "sum", "(", "K", ".", "dot", "(", "y_true", "[", ":", ",", ":", "-", "1", ",", ":", "]", ",", "self", ".", "chain_kernel", ")", "*", "y_true", "[", ":", ",", "1", ":", ",", ":", "]", ",", "2", ")", "# (B, T-1)", "if", "mask", "is", "not", "None", ":", "mask", "=", "K", ".", "cast", "(", "mask", ",", "K", ".", "floatx", "(", ")", ")", "chain_mask", "=", "mask", "[", ":", ",", ":", "-", "1", "]", "*", "mask", "[", ":", ",", "1", ":", "]", "# (B, T-1), mask[:,:-1]*mask[:,1:] makes it work with any padding", "input_energy", "=", "input_energy", "*", "mask", "chain_energy", "=", "chain_energy", "*", "chain_mask", "total_energy", "=", "K", ".", "sum", "(", "input_energy", ",", "-", "1", ")", "+", "K", ".", "sum", "(", "chain_energy", ",", "-", "1", ")", "# (B, )", "return", "total_energy" ]
Energy = a1' y1 + u1' y1 + y1' U y2 + u2' y2 + y2' U y3 + u3' y3 + an' y3
[ "Energy", "=", "a1", "y1", "+", "u1", "y1", "+", "y1", "U", "y2", "+", "u2", "y2", "+", "y2", "U", "y3", "+", "u3", "y3", "+", "an", "y3" ]
python
train
msmbuilder/msmbuilder
msmbuilder/msm/core.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/msm/core.py#L266-L308
def draw_samples(self, sequences, n_samples, random_state=None): """Sample conformations for a sequences of states. Parameters ---------- sequences : list or list of lists A sequence or list of sequences, in which each element corresponds to a state label. n_samples : int How many samples to return for any given state. Returns ------- selected_pairs_by_state : np.array, dtype=int, shape=(n_states, n_samples, 2) selected_pairs_by_state[state] gives an array of randomly selected (trj, frame) pairs from the specified state. See Also -------- utils.map_drawn_samples : Extract conformations from MD trajectories by index. """ if not any([isinstance(seq, collections.Iterable) for seq in sequences]): sequences = [sequences] random = check_random_state(random_state) selected_pairs_by_state = [] for state in range(self.n_states_): all_frames = [np.where(a == state)[0] for a in sequences] pairs = [(trj, frame) for (trj, frames) in enumerate(all_frames) for frame in frames] if pairs: selected_pairs_by_state.append( [pairs[random.choice(len(pairs))] for i in range(n_samples)]) else: selected_pairs_by_state.append([]) return np.array(selected_pairs_by_state)
[ "def", "draw_samples", "(", "self", ",", "sequences", ",", "n_samples", ",", "random_state", "=", "None", ")", ":", "if", "not", "any", "(", "[", "isinstance", "(", "seq", ",", "collections", ".", "Iterable", ")", "for", "seq", "in", "sequences", "]", ")", ":", "sequences", "=", "[", "sequences", "]", "random", "=", "check_random_state", "(", "random_state", ")", "selected_pairs_by_state", "=", "[", "]", "for", "state", "in", "range", "(", "self", ".", "n_states_", ")", ":", "all_frames", "=", "[", "np", ".", "where", "(", "a", "==", "state", ")", "[", "0", "]", "for", "a", "in", "sequences", "]", "pairs", "=", "[", "(", "trj", ",", "frame", ")", "for", "(", "trj", ",", "frames", ")", "in", "enumerate", "(", "all_frames", ")", "for", "frame", "in", "frames", "]", "if", "pairs", ":", "selected_pairs_by_state", ".", "append", "(", "[", "pairs", "[", "random", ".", "choice", "(", "len", "(", "pairs", ")", ")", "]", "for", "i", "in", "range", "(", "n_samples", ")", "]", ")", "else", ":", "selected_pairs_by_state", ".", "append", "(", "[", "]", ")", "return", "np", ".", "array", "(", "selected_pairs_by_state", ")" ]
Sample conformations for a sequences of states. Parameters ---------- sequences : list or list of lists A sequence or list of sequences, in which each element corresponds to a state label. n_samples : int How many samples to return for any given state. Returns ------- selected_pairs_by_state : np.array, dtype=int, shape=(n_states, n_samples, 2) selected_pairs_by_state[state] gives an array of randomly selected (trj, frame) pairs from the specified state. See Also -------- utils.map_drawn_samples : Extract conformations from MD trajectories by index.
[ "Sample", "conformations", "for", "a", "sequences", "of", "states", "." ]
python
train
jciskey/pygraph
pygraph/predefined_graphs.py
https://github.com/jciskey/pygraph/blob/037bb2f32503fecb60d62921f9766d54109f15e2/pygraph/predefined_graphs.py#L24-L37
def build_wheel_graph(num_nodes): """Builds a wheel graph with the specified number of nodes. Ref: http://mathworld.wolfram.com/WheelGraph.html""" # The easiest way to build a wheel graph is to build # C_n-1 and then add a hub node and spoke edges graph = build_cycle_graph(num_nodes - 1) cycle_graph_vertices = graph.get_all_node_ids() node_id = graph.new_node() for cycle_node in cycle_graph_vertices: graph.new_edge(node_id, cycle_node) return graph
[ "def", "build_wheel_graph", "(", "num_nodes", ")", ":", "# The easiest way to build a wheel graph is to build", "# C_n-1 and then add a hub node and spoke edges", "graph", "=", "build_cycle_graph", "(", "num_nodes", "-", "1", ")", "cycle_graph_vertices", "=", "graph", ".", "get_all_node_ids", "(", ")", "node_id", "=", "graph", ".", "new_node", "(", ")", "for", "cycle_node", "in", "cycle_graph_vertices", ":", "graph", ".", "new_edge", "(", "node_id", ",", "cycle_node", ")", "return", "graph" ]
Builds a wheel graph with the specified number of nodes. Ref: http://mathworld.wolfram.com/WheelGraph.html
[ "Builds", "a", "wheel", "graph", "with", "the", "specified", "number", "of", "nodes", ".", "Ref", ":", "http", ":", "//", "mathworld", ".", "wolfram", ".", "com", "/", "WheelGraph", ".", "html" ]
python
train
RedHatInsights/insights-core
insights/parsers/grub_conf.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/grub_conf.py#L447-L460
def _parse_title(line_iter, cur_line, conf): """ Parse "title" in grub v1 config """ title = [] conf['title'].append(title) title.append(('title_name', cur_line.split('title', 1)[1].strip())) while (True): line = next(line_iter) if line.startswith("title "): return line cmd, opt = _parse_cmd(line) title.append((cmd, opt))
[ "def", "_parse_title", "(", "line_iter", ",", "cur_line", ",", "conf", ")", ":", "title", "=", "[", "]", "conf", "[", "'title'", "]", ".", "append", "(", "title", ")", "title", ".", "append", "(", "(", "'title_name'", ",", "cur_line", ".", "split", "(", "'title'", ",", "1", ")", "[", "1", "]", ".", "strip", "(", ")", ")", ")", "while", "(", "True", ")", ":", "line", "=", "next", "(", "line_iter", ")", "if", "line", ".", "startswith", "(", "\"title \"", ")", ":", "return", "line", "cmd", ",", "opt", "=", "_parse_cmd", "(", "line", ")", "title", ".", "append", "(", "(", "cmd", ",", "opt", ")", ")" ]
Parse "title" in grub v1 config
[ "Parse", "title", "in", "grub", "v1", "config" ]
python
train
edx/xblock-utils
xblockutils/studio_editable.py
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L443-L450
def get_nested_blocks_spec(self): """ Converts allowed_nested_blocks items to NestedXBlockSpec to provide common interface """ return [ block_spec if isinstance(block_spec, NestedXBlockSpec) else NestedXBlockSpec(block_spec) for block_spec in self.allowed_nested_blocks ]
[ "def", "get_nested_blocks_spec", "(", "self", ")", ":", "return", "[", "block_spec", "if", "isinstance", "(", "block_spec", ",", "NestedXBlockSpec", ")", "else", "NestedXBlockSpec", "(", "block_spec", ")", "for", "block_spec", "in", "self", ".", "allowed_nested_blocks", "]" ]
Converts allowed_nested_blocks items to NestedXBlockSpec to provide common interface
[ "Converts", "allowed_nested_blocks", "items", "to", "NestedXBlockSpec", "to", "provide", "common", "interface" ]
python
train
persephone-tools/persephone
persephone/datasets/na.py
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L267-L289
def prepare_labels(label_type, org_xml_dir=ORG_XML_DIR, label_dir=LABEL_DIR): """ Prepare the neural network output targets.""" if not os.path.exists(os.path.join(label_dir, "TEXT")): os.makedirs(os.path.join(label_dir, "TEXT")) if not os.path.exists(os.path.join(label_dir, "WORDLIST")): os.makedirs(os.path.join(label_dir, "WORDLIST")) for path in Path(org_xml_dir).glob("*.xml"): fn = path.name prefix, _ = os.path.splitext(fn) rec_type, sents, _, _ = pangloss.get_sents_times_and_translations(str(path)) # Write the sentence transcriptions to file sents = [preprocess_na(sent, label_type) for sent in sents] for i, sent in enumerate(sents): if sent.strip() == "": # Then there's no transcription, so ignore this. continue out_fn = "%s.%d.%s" % (prefix, i, label_type) sent_path = os.path.join(label_dir, rec_type, out_fn) with open(sent_path, "w") as sent_f: print(sent, file=sent_f)
[ "def", "prepare_labels", "(", "label_type", ",", "org_xml_dir", "=", "ORG_XML_DIR", ",", "label_dir", "=", "LABEL_DIR", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "label_dir", ",", "\"TEXT\"", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "label_dir", ",", "\"TEXT\"", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "label_dir", ",", "\"WORDLIST\"", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "label_dir", ",", "\"WORDLIST\"", ")", ")", "for", "path", "in", "Path", "(", "org_xml_dir", ")", ".", "glob", "(", "\"*.xml\"", ")", ":", "fn", "=", "path", ".", "name", "prefix", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "fn", ")", "rec_type", ",", "sents", ",", "_", ",", "_", "=", "pangloss", ".", "get_sents_times_and_translations", "(", "str", "(", "path", ")", ")", "# Write the sentence transcriptions to file", "sents", "=", "[", "preprocess_na", "(", "sent", ",", "label_type", ")", "for", "sent", "in", "sents", "]", "for", "i", ",", "sent", "in", "enumerate", "(", "sents", ")", ":", "if", "sent", ".", "strip", "(", ")", "==", "\"\"", ":", "# Then there's no transcription, so ignore this.", "continue", "out_fn", "=", "\"%s.%d.%s\"", "%", "(", "prefix", ",", "i", ",", "label_type", ")", "sent_path", "=", "os", ".", "path", ".", "join", "(", "label_dir", ",", "rec_type", ",", "out_fn", ")", "with", "open", "(", "sent_path", ",", "\"w\"", ")", "as", "sent_f", ":", "print", "(", "sent", ",", "file", "=", "sent_f", ")" ]
Prepare the neural network output targets.
[ "Prepare", "the", "neural", "network", "output", "targets", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_ti_batch.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti_batch.py#L1477-L1534
def submit_files(self, halt_on_error=True): """Submit Files for Documents and Reports to ThreatConnect API. Critical Errors * There is insufficient document storage allocated to this account. Args: halt_on_error (bool, default:True): If True any exception will raise an error. Returns: dict: The upload status for each xid. """ # check global setting for override if self.halt_on_file_error is not None: halt_on_error = self.halt_on_file_error upload_status = [] for xid, content_data in self._files.items(): del self._files[xid] # win or loose remove the entry status = True # used for debug/testing to prevent upload of previously uploaded file if self.debug and xid in self.saved_xids: self.tcex.log.debug('skipping previously saved file {}.'.format(xid)) continue # process the file content content = content_data.get('fileContent') if callable(content): content = content_data.get('fileContent')(xid) if content is None: upload_status.append({'uploaded': False, 'xid': xid}) self.tcex.log.warning('File content was null for xid {}.'.format(xid)) continue if content_data.get('type') == 'Document': api_branch = 'documents' elif content_data.get('type') == 'Report': api_branch = 'reports' # Post File url = '/v2/groups/{}/{}/upload'.format(api_branch, xid) headers = {'Content-Type': 'application/octet-stream'} params = {'owner': self._owner} r = self.submit_file_content('POST', url, content, headers, params, halt_on_error) if r.status_code == 401: # use PUT method if file already exists self.tcex.log.info('Received 401 status code using POST. Trying PUT to update.') r = self.submit_file_content('PUT', url, content, headers, params, halt_on_error) self.tcex.log.debug('{} Upload URL: {}.'.format(content_data.get('type'), r.url)) if not r.ok: status = False self.tcex.handle_error(585, [r.status_code, r.text], halt_on_error) elif self.debug: self.saved_xids.append(xid) self.tcex.log.info('Status {} for file upload with xid {}.'.format(r.status_code, xid)) upload_status.append({'uploaded': status, 'xid': xid}) return upload_status
[ "def", "submit_files", "(", "self", ",", "halt_on_error", "=", "True", ")", ":", "# check global setting for override", "if", "self", ".", "halt_on_file_error", "is", "not", "None", ":", "halt_on_error", "=", "self", ".", "halt_on_file_error", "upload_status", "=", "[", "]", "for", "xid", ",", "content_data", "in", "self", ".", "_files", ".", "items", "(", ")", ":", "del", "self", ".", "_files", "[", "xid", "]", "# win or loose remove the entry", "status", "=", "True", "# used for debug/testing to prevent upload of previously uploaded file", "if", "self", ".", "debug", "and", "xid", "in", "self", ".", "saved_xids", ":", "self", ".", "tcex", ".", "log", ".", "debug", "(", "'skipping previously saved file {}.'", ".", "format", "(", "xid", ")", ")", "continue", "# process the file content", "content", "=", "content_data", ".", "get", "(", "'fileContent'", ")", "if", "callable", "(", "content", ")", ":", "content", "=", "content_data", ".", "get", "(", "'fileContent'", ")", "(", "xid", ")", "if", "content", "is", "None", ":", "upload_status", ".", "append", "(", "{", "'uploaded'", ":", "False", ",", "'xid'", ":", "xid", "}", ")", "self", ".", "tcex", ".", "log", ".", "warning", "(", "'File content was null for xid {}.'", ".", "format", "(", "xid", ")", ")", "continue", "if", "content_data", ".", "get", "(", "'type'", ")", "==", "'Document'", ":", "api_branch", "=", "'documents'", "elif", "content_data", ".", "get", "(", "'type'", ")", "==", "'Report'", ":", "api_branch", "=", "'reports'", "# Post File", "url", "=", "'/v2/groups/{}/{}/upload'", ".", "format", "(", "api_branch", ",", "xid", ")", "headers", "=", "{", "'Content-Type'", ":", "'application/octet-stream'", "}", "params", "=", "{", "'owner'", ":", "self", ".", "_owner", "}", "r", "=", "self", ".", "submit_file_content", "(", "'POST'", ",", "url", ",", "content", ",", "headers", ",", "params", ",", "halt_on_error", ")", "if", "r", ".", "status_code", "==", "401", ":", "# use PUT method if file already exists", "self", ".", "tcex", ".", "log", ".", "info", "(", "'Received 401 status code using POST. Trying PUT to update.'", ")", "r", "=", "self", ".", "submit_file_content", "(", "'PUT'", ",", "url", ",", "content", ",", "headers", ",", "params", ",", "halt_on_error", ")", "self", ".", "tcex", ".", "log", ".", "debug", "(", "'{} Upload URL: {}.'", ".", "format", "(", "content_data", ".", "get", "(", "'type'", ")", ",", "r", ".", "url", ")", ")", "if", "not", "r", ".", "ok", ":", "status", "=", "False", "self", ".", "tcex", ".", "handle_error", "(", "585", ",", "[", "r", ".", "status_code", ",", "r", ".", "text", "]", ",", "halt_on_error", ")", "elif", "self", ".", "debug", ":", "self", ".", "saved_xids", ".", "append", "(", "xid", ")", "self", ".", "tcex", ".", "log", ".", "info", "(", "'Status {} for file upload with xid {}.'", ".", "format", "(", "r", ".", "status_code", ",", "xid", ")", ")", "upload_status", ".", "append", "(", "{", "'uploaded'", ":", "status", ",", "'xid'", ":", "xid", "}", ")", "return", "upload_status" ]
Submit Files for Documents and Reports to ThreatConnect API. Critical Errors * There is insufficient document storage allocated to this account. Args: halt_on_error (bool, default:True): If True any exception will raise an error. Returns: dict: The upload status for each xid.
[ "Submit", "Files", "for", "Documents", "and", "Reports", "to", "ThreatConnect", "API", "." ]
python
train
nathan-hoad/aiomanhole
aiomanhole/__init__.py
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L99-L107
def run_command(self, codeobj): """Execute a compiled code object, and write the output back to the client.""" try: value, stdout = yield from self.attempt_exec(codeobj, self.namespace) except Exception: yield from self.send_exception() return else: yield from self.send_output(value, stdout)
[ "def", "run_command", "(", "self", ",", "codeobj", ")", ":", "try", ":", "value", ",", "stdout", "=", "yield", "from", "self", ".", "attempt_exec", "(", "codeobj", ",", "self", ".", "namespace", ")", "except", "Exception", ":", "yield", "from", "self", ".", "send_exception", "(", ")", "return", "else", ":", "yield", "from", "self", ".", "send_output", "(", "value", ",", "stdout", ")" ]
Execute a compiled code object, and write the output back to the client.
[ "Execute", "a", "compiled", "code", "object", "and", "write", "the", "output", "back", "to", "the", "client", "." ]
python
train
mitsei/dlkit
dlkit/handcar/repository/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L2979-L3002
def get_repository_lookup_session(self, proxy, *args, **kwargs): """Gets the repository lookup session. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.RepositoryLookupSession) - a RepositoryLookupSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_repository_lookup() is false compliance: optional - This method must be implemented if supports_repository_lookup() is true. """ if not self.supports_repository_lookup(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.RepositoryLookupSession(proxy, runtime=self._runtime, **kwargs) except AttributeError: raise # OperationFailed() return session
[ "def", "get_repository_lookup_session", "(", "self", ",", "proxy", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "supports_repository_lookup", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "# OperationFailed()", "proxy", "=", "self", ".", "_convert_proxy", "(", "proxy", ")", "try", ":", "session", "=", "sessions", ".", "RepositoryLookupSession", "(", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ",", "*", "*", "kwargs", ")", "except", "AttributeError", ":", "raise", "# OperationFailed()", "return", "session" ]
Gets the repository lookup session. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.RepositoryLookupSession) - a RepositoryLookupSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_repository_lookup() is false compliance: optional - This method must be implemented if supports_repository_lookup() is true.
[ "Gets", "the", "repository", "lookup", "session", "." ]
python
train
tensorpack/tensorpack
tensorpack/models/regularize.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/models/regularize.py#L103-L141
def regularize_cost_from_collection(name='regularize_cost'): """ Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``. If in replicated mode, will only regularize variables created within the current tower. Args: name (str): the name of the returned tensor Returns: tf.Tensor: a scalar, the total regularization cost. """ ctx = get_current_tower_context() if not ctx.is_training: # TODO Currently cannot build the wd_cost correctly at inference, # because ths vs_name used in inference can be '', therefore the # variable filter will fail return tf.constant(0, dtype=tf.float32, name='empty_' + name) # NOTE: this collection doesn't always grow with towers. # It only grows with actual variable creation, but not get_variable call. if ctx.has_own_variables: # be careful of the first tower (name='') losses = ctx.get_collection_in_tower(tfv1.GraphKeys.REGULARIZATION_LOSSES) else: losses = tfv1.get_collection(tfv1.GraphKeys.REGULARIZATION_LOSSES) if len(losses) > 0: logger.info("regularize_cost_from_collection() found {} regularizers " "in REGULARIZATION_LOSSES collection.".format(len(losses))) def maploss(l): assert l.dtype.is_floating, l if l.dtype != tf.float32: l = tf.cast(l, tf.float32) return l losses = [maploss(l) for l in losses] reg_loss = tf.add_n(losses, name=name) return reg_loss else: return tf.constant(0, dtype=tf.float32, name='empty_' + name)
[ "def", "regularize_cost_from_collection", "(", "name", "=", "'regularize_cost'", ")", ":", "ctx", "=", "get_current_tower_context", "(", ")", "if", "not", "ctx", ".", "is_training", ":", "# TODO Currently cannot build the wd_cost correctly at inference,", "# because ths vs_name used in inference can be '', therefore the", "# variable filter will fail", "return", "tf", ".", "constant", "(", "0", ",", "dtype", "=", "tf", ".", "float32", ",", "name", "=", "'empty_'", "+", "name", ")", "# NOTE: this collection doesn't always grow with towers.", "# It only grows with actual variable creation, but not get_variable call.", "if", "ctx", ".", "has_own_variables", ":", "# be careful of the first tower (name='')", "losses", "=", "ctx", ".", "get_collection_in_tower", "(", "tfv1", ".", "GraphKeys", ".", "REGULARIZATION_LOSSES", ")", "else", ":", "losses", "=", "tfv1", ".", "get_collection", "(", "tfv1", ".", "GraphKeys", ".", "REGULARIZATION_LOSSES", ")", "if", "len", "(", "losses", ")", ">", "0", ":", "logger", ".", "info", "(", "\"regularize_cost_from_collection() found {} regularizers \"", "\"in REGULARIZATION_LOSSES collection.\"", ".", "format", "(", "len", "(", "losses", ")", ")", ")", "def", "maploss", "(", "l", ")", ":", "assert", "l", ".", "dtype", ".", "is_floating", ",", "l", "if", "l", ".", "dtype", "!=", "tf", ".", "float32", ":", "l", "=", "tf", ".", "cast", "(", "l", ",", "tf", ".", "float32", ")", "return", "l", "losses", "=", "[", "maploss", "(", "l", ")", "for", "l", "in", "losses", "]", "reg_loss", "=", "tf", ".", "add_n", "(", "losses", ",", "name", "=", "name", ")", "return", "reg_loss", "else", ":", "return", "tf", ".", "constant", "(", "0", ",", "dtype", "=", "tf", ".", "float32", ",", "name", "=", "'empty_'", "+", "name", ")" ]
Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``. If in replicated mode, will only regularize variables created within the current tower. Args: name (str): the name of the returned tensor Returns: tf.Tensor: a scalar, the total regularization cost.
[ "Get", "the", "cost", "from", "the", "regularizers", "in", "tf", ".", "GraphKeys", ".", "REGULARIZATION_LOSSES", ".", "If", "in", "replicated", "mode", "will", "only", "regularize", "variables", "created", "within", "the", "current", "tower", "." ]
python
train
persandstrom/python-verisure
verisure/__main__.py
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/__main__.py#L22-L31
def print_result(overview, *names): """ Print the result of a verisure request """ if names: for name in names: toprint = overview for part in name.split('/'): toprint = toprint[part] print(json.dumps(toprint, indent=4, separators=(',', ': '))) else: print(json.dumps(overview, indent=4, separators=(',', ': ')))
[ "def", "print_result", "(", "overview", ",", "*", "names", ")", ":", "if", "names", ":", "for", "name", "in", "names", ":", "toprint", "=", "overview", "for", "part", "in", "name", ".", "split", "(", "'/'", ")", ":", "toprint", "=", "toprint", "[", "part", "]", "print", "(", "json", ".", "dumps", "(", "toprint", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")", "else", ":", "print", "(", "json", ".", "dumps", "(", "overview", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")" ]
Print the result of a verisure request
[ "Print", "the", "result", "of", "a", "verisure", "request" ]
python
train
sigmaris/python-gssapi
gssapi/creds.py
https://github.com/sigmaris/python-gssapi/blob/a8ca577b3ccf9d9fa48f16f4954a1eddd5896236/gssapi/creds.py#L266-L293
def export(self): """ Serializes this credential into a byte string, which can be passed to :meth:`imprt` in another process in order to deserialize the byte string back into a credential. Exporting a credential does not destroy it. :returns: The serialized token representation of this credential. :rtype: bytes :raises: :exc:`~gssapi.error.GSSException` if there is a problem with exporting the credential. :exc:`NotImplementedError` if the underlying GSSAPI implementation does not support the ``gss_export_cred`` C function. """ if not hasattr(C, 'gss_export_cred'): raise NotImplementedError("The GSSAPI implementation does not support gss_export_cred") minor_status = ffi.new('OM_uint32[1]') output_buffer = ffi.new('gss_buffer_desc[1]') retval = C.gss_export_cred(minor_status, self._cred[0], output_buffer) try: if GSS_ERROR(retval): raise _exception_for_status(retval, minor_status[0]) return _buf_to_str(output_buffer[0]) finally: if output_buffer[0].length != 0: C.gss_release_buffer(minor_status, output_buffer)
[ "def", "export", "(", "self", ")", ":", "if", "not", "hasattr", "(", "C", ",", "'gss_export_cred'", ")", ":", "raise", "NotImplementedError", "(", "\"The GSSAPI implementation does not support gss_export_cred\"", ")", "minor_status", "=", "ffi", ".", "new", "(", "'OM_uint32[1]'", ")", "output_buffer", "=", "ffi", ".", "new", "(", "'gss_buffer_desc[1]'", ")", "retval", "=", "C", ".", "gss_export_cred", "(", "minor_status", ",", "self", ".", "_cred", "[", "0", "]", ",", "output_buffer", ")", "try", ":", "if", "GSS_ERROR", "(", "retval", ")", ":", "raise", "_exception_for_status", "(", "retval", ",", "minor_status", "[", "0", "]", ")", "return", "_buf_to_str", "(", "output_buffer", "[", "0", "]", ")", "finally", ":", "if", "output_buffer", "[", "0", "]", ".", "length", "!=", "0", ":", "C", ".", "gss_release_buffer", "(", "minor_status", ",", "output_buffer", ")" ]
Serializes this credential into a byte string, which can be passed to :meth:`imprt` in another process in order to deserialize the byte string back into a credential. Exporting a credential does not destroy it. :returns: The serialized token representation of this credential. :rtype: bytes :raises: :exc:`~gssapi.error.GSSException` if there is a problem with exporting the credential. :exc:`NotImplementedError` if the underlying GSSAPI implementation does not support the ``gss_export_cred`` C function.
[ "Serializes", "this", "credential", "into", "a", "byte", "string", "which", "can", "be", "passed", "to", ":", "meth", ":", "imprt", "in", "another", "process", "in", "order", "to", "deserialize", "the", "byte", "string", "back", "into", "a", "credential", ".", "Exporting", "a", "credential", "does", "not", "destroy", "it", "." ]
python
test
NerdWalletOSS/savage
src/savage/utils.py
https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L22-L48
def get_bind_processor(column_type, dialect): """ Returns a bind processor for a column type and dialect, with special handling for JSON/JSONB column types to return dictionaries instead of serialized JSON strings. NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8 :param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine` :param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect` :return: bind processor for given column type and dialect """ if column_type.compile(dialect) not in {'JSON', 'JSONB'}: # For non-JSON/JSONB column types, return the column type's bind processor return column_type.bind_processor(dialect) if type(column_type) in {JSON, JSONB}: # For bare JSON/JSONB types, we simply skip bind processing altogether return None elif isinstance(column_type, TypeDecorator) and column_type._has_bind_processor: # For decorated JSON/JSONB types, we return the custom bind processor (if any) return partial(column_type.process_bind_param, dialect=dialect) else: # For all other cases, we fall back to deserializing the result of the bind processor def wrapped_bind_processor(value): json_deserializer = dialect._json_deserializer or json.loads return json_deserializer(column_type.bind_processor(dialect)(value)) return wrapped_bind_processor
[ "def", "get_bind_processor", "(", "column_type", ",", "dialect", ")", ":", "if", "column_type", ".", "compile", "(", "dialect", ")", "not", "in", "{", "'JSON'", ",", "'JSONB'", "}", ":", "# For non-JSON/JSONB column types, return the column type's bind processor", "return", "column_type", ".", "bind_processor", "(", "dialect", ")", "if", "type", "(", "column_type", ")", "in", "{", "JSON", ",", "JSONB", "}", ":", "# For bare JSON/JSONB types, we simply skip bind processing altogether", "return", "None", "elif", "isinstance", "(", "column_type", ",", "TypeDecorator", ")", "and", "column_type", ".", "_has_bind_processor", ":", "# For decorated JSON/JSONB types, we return the custom bind processor (if any)", "return", "partial", "(", "column_type", ".", "process_bind_param", ",", "dialect", "=", "dialect", ")", "else", ":", "# For all other cases, we fall back to deserializing the result of the bind processor", "def", "wrapped_bind_processor", "(", "value", ")", ":", "json_deserializer", "=", "dialect", ".", "_json_deserializer", "or", "json", ".", "loads", "return", "json_deserializer", "(", "column_type", ".", "bind_processor", "(", "dialect", ")", "(", "value", ")", ")", "return", "wrapped_bind_processor" ]
Returns a bind processor for a column type and dialect, with special handling for JSON/JSONB column types to return dictionaries instead of serialized JSON strings. NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8 :param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine` :param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect` :return: bind processor for given column type and dialect
[ "Returns", "a", "bind", "processor", "for", "a", "column", "type", "and", "dialect", "with", "special", "handling", "for", "JSON", "/", "JSONB", "column", "types", "to", "return", "dictionaries", "instead", "of", "serialized", "JSON", "strings", "." ]
python
train
jaraco/jaraco.util
jaraco/util/dice.py
https://github.com/jaraco/jaraco.util/blob/f21071c64f165a5cf844db15e39356e1a47f4b02/jaraco/util/dice.py#L32-L42
def do_dice_roll(): """ Roll n-sided dice and return each result and the total """ options = get_options() dice = Dice(options.sides) rolls = [dice.roll() for n in range(options.number)] for roll in rolls: print('rolled', roll) if options.number > 1: print('total', sum(rolls))
[ "def", "do_dice_roll", "(", ")", ":", "options", "=", "get_options", "(", ")", "dice", "=", "Dice", "(", "options", ".", "sides", ")", "rolls", "=", "[", "dice", ".", "roll", "(", ")", "for", "n", "in", "range", "(", "options", ".", "number", ")", "]", "for", "roll", "in", "rolls", ":", "print", "(", "'rolled'", ",", "roll", ")", "if", "options", ".", "number", ">", "1", ":", "print", "(", "'total'", ",", "sum", "(", "rolls", ")", ")" ]
Roll n-sided dice and return each result and the total
[ "Roll", "n", "-", "sided", "dice", "and", "return", "each", "result", "and", "the", "total" ]
python
test
tornadoweb/tornado
tornado/httputil.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/httputil.py#L212-L229
def parse(cls, headers: str) -> "HTTPHeaders": """Returns a dictionary from HTTP header text. >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") >>> sorted(h.items()) [('Content-Length', '42'), ('Content-Type', 'text/html')] .. versionchanged:: 5.1 Raises `HTTPInputError` on malformed headers instead of a mix of `KeyError`, and `ValueError`. """ h = cls() for line in _CRLF_RE.split(headers): if line: h.parse_line(line) return h
[ "def", "parse", "(", "cls", ",", "headers", ":", "str", ")", "->", "\"HTTPHeaders\"", ":", "h", "=", "cls", "(", ")", "for", "line", "in", "_CRLF_RE", ".", "split", "(", "headers", ")", ":", "if", "line", ":", "h", ".", "parse_line", "(", "line", ")", "return", "h" ]
Returns a dictionary from HTTP header text. >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") >>> sorted(h.items()) [('Content-Length', '42'), ('Content-Type', 'text/html')] .. versionchanged:: 5.1 Raises `HTTPInputError` on malformed headers instead of a mix of `KeyError`, and `ValueError`.
[ "Returns", "a", "dictionary", "from", "HTTP", "header", "text", "." ]
python
train
juju/python-libjuju
juju/client/_client1.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client1.py#L598-L611
async def GetChanges(self, yaml): ''' yaml : str Returns -> typing.Union[typing.Sequence[~BundleChange], typing.Sequence[str]] ''' # map input types to rpc msg _params = dict() msg = dict(type='Bundle', request='GetChanges', version=1, params=_params) _params['yaml'] = yaml reply = await self.rpc(msg) return reply
[ "async", "def", "GetChanges", "(", "self", ",", "yaml", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'Bundle'", ",", "request", "=", "'GetChanges'", ",", "version", "=", "1", ",", "params", "=", "_params", ")", "_params", "[", "'yaml'", "]", "=", "yaml", "reply", "=", "await", "self", ".", "rpc", "(", "msg", ")", "return", "reply" ]
yaml : str Returns -> typing.Union[typing.Sequence[~BundleChange], typing.Sequence[str]]
[ "yaml", ":", "str", "Returns", "-", ">", "typing", ".", "Union", "[", "typing", ".", "Sequence", "[", "~BundleChange", "]", "typing", ".", "Sequence", "[", "str", "]]" ]
python
train
Jammy2211/PyAutoLens
autolens/data/array/util/grid_util.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/util/grid_util.py#L83-L126
def regular_grid_1d_from_shape_pixel_scales_and_origin(shape, pixel_scales, origin=(0.0, 0.0)): """Compute the (y,x) arc second coordinates at the centre of every pixel of an array of shape (rows, columns). Coordinates are defined from the top-left corner, such that the first pixel at location [0, 0] has negative x \ and y values in arc seconds. The regular grid is returned on an array of shape (total_pixels**2, 2) where the 2D dimension of the original 2D \ array are reduced to one dimension. y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Parameters ---------- shape : (int, int) The (y,x) shape of the 2D array the regular grid of coordinates is computed for. pixel_scales : (float, float) The (y,x) arc-second to pixel scales of the 2D array. origin : (float, flloat) The (y,x) origin of the 2D array, which the regular grid is shifted around. Returns -------- ndarray A regular grid of (y,x) arc-second coordinates at the centre of every pixel on a 2D array. The regular grid array has dimensions (total_pixels**2, 2). Examples -------- regular_grid_1d = regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(5,5), pixel_scales=(0.5, 0.5), \ origin=(0.0, 0.0)) """ regular_grid_1d = np.zeros((shape[0]*shape[1], 2)) centres_arcsec = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales, origin=origin) i=0 for y in range(shape[0]): for x in range(shape[1]): regular_grid_1d[i, 0] = -(y - centres_arcsec[0]) * pixel_scales[0] regular_grid_1d[i, 1] = (x - centres_arcsec[1]) * pixel_scales[1] i += 1 return regular_grid_1d
[ "def", "regular_grid_1d_from_shape_pixel_scales_and_origin", "(", "shape", ",", "pixel_scales", ",", "origin", "=", "(", "0.0", ",", "0.0", ")", ")", ":", "regular_grid_1d", "=", "np", ".", "zeros", "(", "(", "shape", "[", "0", "]", "*", "shape", "[", "1", "]", ",", "2", ")", ")", "centres_arcsec", "=", "centres_from_shape_pixel_scales_and_origin", "(", "shape", "=", "shape", ",", "pixel_scales", "=", "pixel_scales", ",", "origin", "=", "origin", ")", "i", "=", "0", "for", "y", "in", "range", "(", "shape", "[", "0", "]", ")", ":", "for", "x", "in", "range", "(", "shape", "[", "1", "]", ")", ":", "regular_grid_1d", "[", "i", ",", "0", "]", "=", "-", "(", "y", "-", "centres_arcsec", "[", "0", "]", ")", "*", "pixel_scales", "[", "0", "]", "regular_grid_1d", "[", "i", ",", "1", "]", "=", "(", "x", "-", "centres_arcsec", "[", "1", "]", ")", "*", "pixel_scales", "[", "1", "]", "i", "+=", "1", "return", "regular_grid_1d" ]
Compute the (y,x) arc second coordinates at the centre of every pixel of an array of shape (rows, columns). Coordinates are defined from the top-left corner, such that the first pixel at location [0, 0] has negative x \ and y values in arc seconds. The regular grid is returned on an array of shape (total_pixels**2, 2) where the 2D dimension of the original 2D \ array are reduced to one dimension. y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Parameters ---------- shape : (int, int) The (y,x) shape of the 2D array the regular grid of coordinates is computed for. pixel_scales : (float, float) The (y,x) arc-second to pixel scales of the 2D array. origin : (float, flloat) The (y,x) origin of the 2D array, which the regular grid is shifted around. Returns -------- ndarray A regular grid of (y,x) arc-second coordinates at the centre of every pixel on a 2D array. The regular grid array has dimensions (total_pixels**2, 2). Examples -------- regular_grid_1d = regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(5,5), pixel_scales=(0.5, 0.5), \ origin=(0.0, 0.0))
[ "Compute", "the", "(", "y", "x", ")", "arc", "second", "coordinates", "at", "the", "centre", "of", "every", "pixel", "of", "an", "array", "of", "shape", "(", "rows", "columns", ")", "." ]
python
valid
noahbenson/neuropythy
neuropythy/optimize/core.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/optimize/core.py#L1153-L1165
def sign(f=Ellipsis): ''' sign() yields a potential function equivalent to the sign of the input. sign(f) yields the sign of the potential function f. Note that sign has a derivative of 0 at all points; this is not mathematically correct, but it is useful for the purposes of numerical methods. If you want traditional behavior, it is suggested that one instead employ f/sqrt(f**2). ''' f = to_potential(f) if is_const_potential(f): return const_potential(np.sign(f.c)) elif is_identity_potential(f): return SignPotential() else: return compose(SignPotential(), f)
[ "def", "sign", "(", "f", "=", "Ellipsis", ")", ":", "f", "=", "to_potential", "(", "f", ")", "if", "is_const_potential", "(", "f", ")", ":", "return", "const_potential", "(", "np", ".", "sign", "(", "f", ".", "c", ")", ")", "elif", "is_identity_potential", "(", "f", ")", ":", "return", "SignPotential", "(", ")", "else", ":", "return", "compose", "(", "SignPotential", "(", ")", ",", "f", ")" ]
sign() yields a potential function equivalent to the sign of the input. sign(f) yields the sign of the potential function f. Note that sign has a derivative of 0 at all points; this is not mathematically correct, but it is useful for the purposes of numerical methods. If you want traditional behavior, it is suggested that one instead employ f/sqrt(f**2).
[ "sign", "()", "yields", "a", "potential", "function", "equivalent", "to", "the", "sign", "of", "the", "input", ".", "sign", "(", "f", ")", "yields", "the", "sign", "of", "the", "potential", "function", "f", "." ]
python
train
cmcqueen/simplerandom
python/python2/simplerandom/random/_random_py.py
https://github.com/cmcqueen/simplerandom/blob/3f19ffdfeaa8256986adf7173f08c1c719164d01/python/python2/simplerandom/random/_random_py.py#L44-L59
def seed(self, x=None, *args): """For consistent cross-platform seeding, provide an integer seed. """ if x is None: # Use same random seed code copied from Python's random.Random try: x = long(_hexlify(_urandom(16)), 16) except NotImplementedError: import time x = long(time.time() * 256) # use fractional seconds elif not isinstance(x, _Integral): # Use the hash of the input seed object. Note this does not give # consistent results cross-platform--between Python versions or # between 32-bit and 64-bit systems. x = hash(x) self.rng_iterator.seed(x, *args, mix_extras=True)
[ "def", "seed", "(", "self", ",", "x", "=", "None", ",", "*", "args", ")", ":", "if", "x", "is", "None", ":", "# Use same random seed code copied from Python's random.Random", "try", ":", "x", "=", "long", "(", "_hexlify", "(", "_urandom", "(", "16", ")", ")", ",", "16", ")", "except", "NotImplementedError", ":", "import", "time", "x", "=", "long", "(", "time", ".", "time", "(", ")", "*", "256", ")", "# use fractional seconds", "elif", "not", "isinstance", "(", "x", ",", "_Integral", ")", ":", "# Use the hash of the input seed object. Note this does not give", "# consistent results cross-platform--between Python versions or", "# between 32-bit and 64-bit systems.", "x", "=", "hash", "(", "x", ")", "self", ".", "rng_iterator", ".", "seed", "(", "x", ",", "*", "args", ",", "mix_extras", "=", "True", ")" ]
For consistent cross-platform seeding, provide an integer seed.
[ "For", "consistent", "cross", "-", "platform", "seeding", "provide", "an", "integer", "seed", "." ]
python
train