repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L51-L84
def get_queue(cls, name, priority=0, **fields_if_new): """ Get, or create, and return the wanted queue. If the queue is created, fields in fields_if_new will be set for the new queue. """ queue_kwargs = {'name': name, 'priority': priority} retries = 0 while retries < 10: retries += 1 try: queue, created = cls.get_or_connect(**queue_kwargs) except IndexError: # Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP # => retry continue except ValueError: # more than one (race condition https://github.com/yohanboniface/redis-limpyd/issues/82 ?) try: queue = cls.collection(**queue_kwargs).instances()[0] except IndexError: # but no more now ?! # => retry continue else: created = False # ok we have our queue, stop now break if created and fields_if_new: queue.set_fields(**fields_if_new) return queue
[ "def", "get_queue", "(", "cls", ",", "name", ",", "priority", "=", "0", ",", "*", "*", "fields_if_new", ")", ":", "queue_kwargs", "=", "{", "'name'", ":", "name", ",", "'priority'", ":", "priority", "}", "retries", "=", "0", "while", "retries", "<", "10", ":", "retries", "+=", "1", "try", ":", "queue", ",", "created", "=", "cls", ".", "get_or_connect", "(", "*", "*", "queue_kwargs", ")", "except", "IndexError", ":", "# Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP", "# => retry", "continue", "except", "ValueError", ":", "# more than one (race condition https://github.com/yohanboniface/redis-limpyd/issues/82 ?)", "try", ":", "queue", "=", "cls", ".", "collection", "(", "*", "*", "queue_kwargs", ")", ".", "instances", "(", ")", "[", "0", "]", "except", "IndexError", ":", "# but no more now ?!", "# => retry", "continue", "else", ":", "created", "=", "False", "# ok we have our queue, stop now", "break", "if", "created", "and", "fields_if_new", ":", "queue", ".", "set_fields", "(", "*", "*", "fields_if_new", ")", "return", "queue" ]
Get, or create, and return the wanted queue. If the queue is created, fields in fields_if_new will be set for the new queue.
[ "Get", "or", "create", "and", "return", "the", "wanted", "queue", ".", "If", "the", "queue", "is", "created", "fields", "in", "fields_if_new", "will", "be", "set", "for", "the", "new", "queue", "." ]
python
train
34.882353
dwavesystems/dwave-system
dwave/embedding/polynomialembedder.py
https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/embedding/polynomialembedder.py#L1177-L1179
def _to_chimera(M, N, L, q): "Converts a qubit's linear index to chimera coordinates." return (q // N // L // 2, (q // L // 2) % N, (q // L) % 2, q % L)
[ "def", "_to_chimera", "(", "M", ",", "N", ",", "L", ",", "q", ")", ":", "return", "(", "q", "//", "N", "//", "L", "//", "2", ",", "(", "q", "//", "L", "//", "2", ")", "%", "N", ",", "(", "q", "//", "L", ")", "%", "2", ",", "q", "%", "L", ")" ]
Converts a qubit's linear index to chimera coordinates.
[ "Converts", "a", "qubit", "s", "linear", "index", "to", "chimera", "coordinates", "." ]
python
train
52.666667
sdispater/poetry
poetry/version/__init__.py
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/version/__init__.py#L28-L44
def parse( version, strict=False # type: str # type: bool ): # type:(...) -> Union[Version, LegacyVersion] """ Parse the given version string and return either a :class:`Version` object or a LegacyVersion object depending on if the given version is a valid PEP 440 version or a legacy version. If strict=True only PEP 440 versions will be accepted. """ try: return Version(version) except InvalidVersion: if strict: raise return LegacyVersion(version)
[ "def", "parse", "(", "version", ",", "strict", "=", "False", "# type: str # type: bool", ")", ":", "# type:(...) -> Union[Version, LegacyVersion]", "try", ":", "return", "Version", "(", "version", ")", "except", "InvalidVersion", ":", "if", "strict", ":", "raise", "return", "LegacyVersion", "(", "version", ")" ]
Parse the given version string and return either a :class:`Version` object or a LegacyVersion object depending on if the given version is a valid PEP 440 version or a legacy version. If strict=True only PEP 440 versions will be accepted.
[ "Parse", "the", "given", "version", "string", "and", "return", "either", "a", ":", "class", ":", "Version", "object", "or", "a", "LegacyVersion", "object", "depending", "on", "if", "the", "given", "version", "is", "a", "valid", "PEP", "440", "version", "or", "a", "legacy", "version", "." ]
python
train
30.117647
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/settings.py
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/settings.py#L149-L164
def get_all_constants(): """ Get list of all uppercase, non-private globals (doesn't start with ``_``). Returns: list: Uppercase names defined in `globals()` (variables from this \ module). """ return filter( lambda key: key.upper() == key and type(globals()[key]) in _ALLOWED, filter( # filter _PRIVATE variables lambda x: not x.startswith("_"), globals() ) )
[ "def", "get_all_constants", "(", ")", ":", "return", "filter", "(", "lambda", "key", ":", "key", ".", "upper", "(", ")", "==", "key", "and", "type", "(", "globals", "(", ")", "[", "key", "]", ")", "in", "_ALLOWED", ",", "filter", "(", "# filter _PRIVATE variables", "lambda", "x", ":", "not", "x", ".", "startswith", "(", "\"_\"", ")", ",", "globals", "(", ")", ")", ")" ]
Get list of all uppercase, non-private globals (doesn't start with ``_``). Returns: list: Uppercase names defined in `globals()` (variables from this \ module).
[ "Get", "list", "of", "all", "uppercase", "non", "-", "private", "globals", "(", "doesn", "t", "start", "with", "_", ")", "." ]
python
train
29.4375
rflamary/POT
ot/utils.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/utils.py#L388-L413
def _get_param_names(cls): """Get parameter names for the estimator""" # fetch the constructor or the original constructor before # deprecation wrapping if any init = getattr(cls.__init__, 'deprecated_original', cls.__init__) if init is object.__init__: # No explicit constructor to introspect return [] # introspect the constructor arguments to find the model parameters # to represent init_signature = signature(init) # Consider the constructor parameters excluding 'self' parameters = [p for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD] for p in parameters: if p.kind == p.VAR_POSITIONAL: raise RuntimeError("POT estimators should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s with constructor %s doesn't " " follow this convention." % (cls, init_signature)) # Extract and sort argument names excluding 'self' return sorted([p.name for p in parameters])
[ "def", "_get_param_names", "(", "cls", ")", ":", "# fetch the constructor or the original constructor before", "# deprecation wrapping if any", "init", "=", "getattr", "(", "cls", ".", "__init__", ",", "'deprecated_original'", ",", "cls", ".", "__init__", ")", "if", "init", "is", "object", ".", "__init__", ":", "# No explicit constructor to introspect", "return", "[", "]", "# introspect the constructor arguments to find the model parameters", "# to represent", "init_signature", "=", "signature", "(", "init", ")", "# Consider the constructor parameters excluding 'self'", "parameters", "=", "[", "p", "for", "p", "in", "init_signature", ".", "parameters", ".", "values", "(", ")", "if", "p", ".", "name", "!=", "'self'", "and", "p", ".", "kind", "!=", "p", ".", "VAR_KEYWORD", "]", "for", "p", "in", "parameters", ":", "if", "p", ".", "kind", "==", "p", ".", "VAR_POSITIONAL", ":", "raise", "RuntimeError", "(", "\"POT estimators should always \"", "\"specify their parameters in the signature\"", "\" of their __init__ (no varargs).\"", "\" %s with constructor %s doesn't \"", "\" follow this convention.\"", "%", "(", "cls", ",", "init_signature", ")", ")", "# Extract and sort argument names excluding 'self'", "return", "sorted", "(", "[", "p", ".", "name", "for", "p", "in", "parameters", "]", ")" ]
Get parameter names for the estimator
[ "Get", "parameter", "names", "for", "the", "estimator" ]
python
train
49.115385
jreese/tasky
tasky/loop.py
https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L158-L173
def run_for_time(self, duration: float=10.0) -> None: '''Execute the tasky/asyncio event loop for `duration` seconds.''' Log.debug('running event loop for %.1f seconds', duration) try: asyncio.ensure_future(self.init()) self.loop.run_until_complete(asyncio.sleep(duration)) self.terminate() self.loop.run_forever() except RuntimeError as e: if not e.args[0].startswith('Event loop stopped'): raise finally: self.loop.close()
[ "def", "run_for_time", "(", "self", ",", "duration", ":", "float", "=", "10.0", ")", "->", "None", ":", "Log", ".", "debug", "(", "'running event loop for %.1f seconds'", ",", "duration", ")", "try", ":", "asyncio", ".", "ensure_future", "(", "self", ".", "init", "(", ")", ")", "self", ".", "loop", ".", "run_until_complete", "(", "asyncio", ".", "sleep", "(", "duration", ")", ")", "self", ".", "terminate", "(", ")", "self", ".", "loop", ".", "run_forever", "(", ")", "except", "RuntimeError", "as", "e", ":", "if", "not", "e", ".", "args", "[", "0", "]", ".", "startswith", "(", "'Event loop stopped'", ")", ":", "raise", "finally", ":", "self", ".", "loop", ".", "close", "(", ")" ]
Execute the tasky/asyncio event loop for `duration` seconds.
[ "Execute", "the", "tasky", "/", "asyncio", "event", "loop", "for", "duration", "seconds", "." ]
python
train
33.75
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_openflow.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_openflow.py#L12-L21
def openflow_controller_controller_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") openflow_controller = ET.SubElement(config, "openflow-controller", xmlns="urn:brocade.com:mgmt:brocade-openflow") controller_name = ET.SubElement(openflow_controller, "controller-name") controller_name.text = kwargs.pop('controller_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "openflow_controller_controller_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "openflow_controller", "=", "ET", ".", "SubElement", "(", "config", ",", "\"openflow-controller\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-openflow\"", ")", "controller_name", "=", "ET", ".", "SubElement", "(", "openflow_controller", ",", "\"controller-name\"", ")", "controller_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'controller_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
48.2
ocaballeror/LyricFetch
lyricfetch/run.py
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L213-L247
def run_mp(songs): """ Concurrently calls get_lyrics to fetch the lyrics of a large list of songs. """ stats = Stats() if CONFIG['debug']: good = open('found', 'w') bad = open('notfound', 'w') logger.debug('Launching a pool of %d processes\n', CONFIG['jobcount']) chunksize = math.ceil(len(songs) / os.cpu_count()) try: with Pool(CONFIG['jobcount']) as pool: for result in pool.imap_unordered(get_lyrics, songs, chunksize): if result is None: continue for source, runtime in result.runtimes.items(): stats.add_result(source, result.source == source, runtime) found = process_result(result) if CONFIG['debug']: if found: good.write(f'{id_source(source)}: {result.song}\n') good.flush() else: bad.write(str(result.song) + '\n') bad.flush() finally: if CONFIG['debug']: good.close() bad.close() return stats
[ "def", "run_mp", "(", "songs", ")", ":", "stats", "=", "Stats", "(", ")", "if", "CONFIG", "[", "'debug'", "]", ":", "good", "=", "open", "(", "'found'", ",", "'w'", ")", "bad", "=", "open", "(", "'notfound'", ",", "'w'", ")", "logger", ".", "debug", "(", "'Launching a pool of %d processes\\n'", ",", "CONFIG", "[", "'jobcount'", "]", ")", "chunksize", "=", "math", ".", "ceil", "(", "len", "(", "songs", ")", "/", "os", ".", "cpu_count", "(", ")", ")", "try", ":", "with", "Pool", "(", "CONFIG", "[", "'jobcount'", "]", ")", "as", "pool", ":", "for", "result", "in", "pool", ".", "imap_unordered", "(", "get_lyrics", ",", "songs", ",", "chunksize", ")", ":", "if", "result", "is", "None", ":", "continue", "for", "source", ",", "runtime", "in", "result", ".", "runtimes", ".", "items", "(", ")", ":", "stats", ".", "add_result", "(", "source", ",", "result", ".", "source", "==", "source", ",", "runtime", ")", "found", "=", "process_result", "(", "result", ")", "if", "CONFIG", "[", "'debug'", "]", ":", "if", "found", ":", "good", ".", "write", "(", "f'{id_source(source)}: {result.song}\\n'", ")", "good", ".", "flush", "(", ")", "else", ":", "bad", ".", "write", "(", "str", "(", "result", ".", "song", ")", "+", "'\\n'", ")", "bad", ".", "flush", "(", ")", "finally", ":", "if", "CONFIG", "[", "'debug'", "]", ":", "good", ".", "close", "(", ")", "bad", ".", "close", "(", ")", "return", "stats" ]
Concurrently calls get_lyrics to fetch the lyrics of a large list of songs.
[ "Concurrently", "calls", "get_lyrics", "to", "fetch", "the", "lyrics", "of", "a", "large", "list", "of", "songs", "." ]
python
train
32.085714
miguelgrinberg/python-socketio
socketio/packet.py
https://github.com/miguelgrinberg/python-socketio/blob/c0c1bf8d21e3597389b18938550a0724dd9676b7/socketio/packet.py#L165-L179
def _data_is_binary(self, data): """Check if the data contains binary components.""" if isinstance(data, six.binary_type): return True elif isinstance(data, list): return functools.reduce( lambda a, b: a or b, [self._data_is_binary(item) for item in data], False) elif isinstance(data, dict): return functools.reduce( lambda a, b: a or b, [self._data_is_binary(item) for item in six.itervalues(data)], False) else: return False
[ "def", "_data_is_binary", "(", "self", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "six", ".", "binary_type", ")", ":", "return", "True", "elif", "isinstance", "(", "data", ",", "list", ")", ":", "return", "functools", ".", "reduce", "(", "lambda", "a", ",", "b", ":", "a", "or", "b", ",", "[", "self", ".", "_data_is_binary", "(", "item", ")", "for", "item", "in", "data", "]", ",", "False", ")", "elif", "isinstance", "(", "data", ",", "dict", ")", ":", "return", "functools", ".", "reduce", "(", "lambda", "a", ",", "b", ":", "a", "or", "b", ",", "[", "self", ".", "_data_is_binary", "(", "item", ")", "for", "item", "in", "six", ".", "itervalues", "(", "data", ")", "]", ",", "False", ")", "else", ":", "return", "False" ]
Check if the data contains binary components.
[ "Check", "if", "the", "data", "contains", "binary", "components", "." ]
python
train
41.666667
a1ezzz/wasp-general
wasp_general/config.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/config.py#L184-L192
def select_options(self, options_prefix): """ Select options from this selection, that are started with the specified prefix :param options_prefix: name prefix of options that should be selected :return: WConfigSelection """ return WConfigSelection( self.config(), self.section(), self.option_prefix() + options_prefix )
[ "def", "select_options", "(", "self", ",", "options_prefix", ")", ":", "return", "WConfigSelection", "(", "self", ".", "config", "(", ")", ",", "self", ".", "section", "(", ")", ",", "self", ".", "option_prefix", "(", ")", "+", "options_prefix", ")" ]
Select options from this selection, that are started with the specified prefix :param options_prefix: name prefix of options that should be selected :return: WConfigSelection
[ "Select", "options", "from", "this", "selection", "that", "are", "started", "with", "the", "specified", "prefix" ]
python
train
36.444444
saltstack/salt
salt/modules/zpool.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zpool.py#L1108-L1148
def create_file_vdev(size, *vdevs): ''' Creates file based virtual devices for a zpool CLI Example: .. code-block:: bash salt '*' zpool.create_file_vdev 7G /path/to/vdev1 [/path/to/vdev2] [...] .. note:: Depending on file size, the above command may take a while to return. ''' ret = OrderedDict() err = OrderedDict() _mkfile_cmd = salt.utils.path.which('mkfile') for vdev in vdevs: if os.path.isfile(vdev): ret[vdev] = 'existed' else: res = __salt__['cmd.run_all']( '{mkfile} {size} {vdev}'.format( mkfile=_mkfile_cmd, size=size, vdev=vdev, ), python_shell=False, ) if res['retcode'] != 0: if 'stderr' in res and ':' in res['stderr']: ret[vdev] = 'failed' err[vdev] = ":".join(res['stderr'].strip().split(':')[1:]) else: ret[vdev] = 'created' if err: ret['error'] = err return ret
[ "def", "create_file_vdev", "(", "size", ",", "*", "vdevs", ")", ":", "ret", "=", "OrderedDict", "(", ")", "err", "=", "OrderedDict", "(", ")", "_mkfile_cmd", "=", "salt", ".", "utils", ".", "path", ".", "which", "(", "'mkfile'", ")", "for", "vdev", "in", "vdevs", ":", "if", "os", ".", "path", ".", "isfile", "(", "vdev", ")", ":", "ret", "[", "vdev", "]", "=", "'existed'", "else", ":", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "'{mkfile} {size} {vdev}'", ".", "format", "(", "mkfile", "=", "_mkfile_cmd", ",", "size", "=", "size", ",", "vdev", "=", "vdev", ",", ")", ",", "python_shell", "=", "False", ",", ")", "if", "res", "[", "'retcode'", "]", "!=", "0", ":", "if", "'stderr'", "in", "res", "and", "':'", "in", "res", "[", "'stderr'", "]", ":", "ret", "[", "vdev", "]", "=", "'failed'", "err", "[", "vdev", "]", "=", "\":\"", ".", "join", "(", "res", "[", "'stderr'", "]", ".", "strip", "(", ")", ".", "split", "(", "':'", ")", "[", "1", ":", "]", ")", "else", ":", "ret", "[", "vdev", "]", "=", "'created'", "if", "err", ":", "ret", "[", "'error'", "]", "=", "err", "return", "ret" ]
Creates file based virtual devices for a zpool CLI Example: .. code-block:: bash salt '*' zpool.create_file_vdev 7G /path/to/vdev1 [/path/to/vdev2] [...] .. note:: Depending on file size, the above command may take a while to return.
[ "Creates", "file", "based", "virtual", "devices", "for", "a", "zpool" ]
python
train
26.268293
wdecoster/NanoPlot
nanoplot/NanoPlot.py
https://github.com/wdecoster/NanoPlot/blob/d1601076731df2a07020316bd159b544f497a606/nanoplot/NanoPlot.py#L441-L488
def make_report(plots, settings): ''' Creates a fat html report based on the previously created files plots is a list of Plot objects defined by a path and title statsfile is the file to which the stats have been saved, which is parsed to a table (rather dodgy) ''' logging.info("Writing html report.") html_content = ['<body>'] # Hyperlink Table of Contents panel html_content.append('<div class="panel panelC">') if settings["filtered"]: html_content.append( '<p><strong><a href="#stats0">Summary Statistics prior to filtering</a></strong></p>') html_content.append( '<p><strong><a href="#stats1">Summary Statistics after filtering</a></strong></p>') else: html_content.append( '<p><strong><a href="#stats0">Summary Statistics</a></strong></p>') html_content.append('<p><strong><a href="#plots">Plots</a></strong></p>') html_content.extend(['<p style="margin-left:20px"><a href="#' + p.title.replace(' ', '_') + '">' + p.title + '</a></p>' for p in plots]) html_content.append('</div>') # The report itself: stats html_content.append('<div class="panel panelM"> <h1>NanoPlot report</h1>') if settings["filtered"]: html_content.append('<h2 id="stats0">Summary statistics prior to filtering</h2>') html_content.append(utils.stats2html(settings["statsfile"][0])) html_content.append('<h2 id="stats1">Summary statistics after filtering</h2>') html_content.append(utils.stats2html(settings["statsfile"][1])) else: html_content.append('<h2 id="stats0">Summary statistics</h2>') html_content.append(utils.stats2html(settings["statsfile"][0])) # The report itself: plots html_content.append('<h2 id="plots">Plots</h2>') for plot in plots: html_content.append('\n<h3 id="' + plot.title.replace(' ', '_') + '">' + plot.title + '</h3>\n' + plot.encode()) html_content.append('\n<br>\n<br>\n<br>\n<br>') html_body = '\n'.join(html_content) + '</div></body></html>' html_str = utils.html_head + html_body htmlreport = settings["path"] + "NanoPlot-report.html" with open(htmlreport, "w") as html_file: html_file.write(html_str) return htmlreport
[ "def", "make_report", "(", "plots", ",", "settings", ")", ":", "logging", ".", "info", "(", "\"Writing html report.\"", ")", "html_content", "=", "[", "'<body>'", "]", "# Hyperlink Table of Contents panel", "html_content", ".", "append", "(", "'<div class=\"panel panelC\">'", ")", "if", "settings", "[", "\"filtered\"", "]", ":", "html_content", ".", "append", "(", "'<p><strong><a href=\"#stats0\">Summary Statistics prior to filtering</a></strong></p>'", ")", "html_content", ".", "append", "(", "'<p><strong><a href=\"#stats1\">Summary Statistics after filtering</a></strong></p>'", ")", "else", ":", "html_content", ".", "append", "(", "'<p><strong><a href=\"#stats0\">Summary Statistics</a></strong></p>'", ")", "html_content", ".", "append", "(", "'<p><strong><a href=\"#plots\">Plots</a></strong></p>'", ")", "html_content", ".", "extend", "(", "[", "'<p style=\"margin-left:20px\"><a href=\"#'", "+", "p", ".", "title", ".", "replace", "(", "' '", ",", "'_'", ")", "+", "'\">'", "+", "p", ".", "title", "+", "'</a></p>'", "for", "p", "in", "plots", "]", ")", "html_content", ".", "append", "(", "'</div>'", ")", "# The report itself: stats", "html_content", ".", "append", "(", "'<div class=\"panel panelM\"> <h1>NanoPlot report</h1>'", ")", "if", "settings", "[", "\"filtered\"", "]", ":", "html_content", ".", "append", "(", "'<h2 id=\"stats0\">Summary statistics prior to filtering</h2>'", ")", "html_content", ".", "append", "(", "utils", ".", "stats2html", "(", "settings", "[", "\"statsfile\"", "]", "[", "0", "]", ")", ")", "html_content", ".", "append", "(", "'<h2 id=\"stats1\">Summary statistics after filtering</h2>'", ")", "html_content", ".", "append", "(", "utils", ".", "stats2html", "(", "settings", "[", "\"statsfile\"", "]", "[", "1", "]", ")", ")", "else", ":", "html_content", ".", "append", "(", "'<h2 id=\"stats0\">Summary statistics</h2>'", ")", "html_content", ".", "append", "(", "utils", ".", "stats2html", "(", "settings", "[", "\"statsfile\"", "]", "[", "0", "]", ")", ")", "# The report itself: plots", "html_content", ".", "append", "(", "'<h2 id=\"plots\">Plots</h2>'", ")", "for", "plot", "in", "plots", ":", "html_content", ".", "append", "(", "'\\n<h3 id=\"'", "+", "plot", ".", "title", ".", "replace", "(", "' '", ",", "'_'", ")", "+", "'\">'", "+", "plot", ".", "title", "+", "'</h3>\\n'", "+", "plot", ".", "encode", "(", ")", ")", "html_content", ".", "append", "(", "'\\n<br>\\n<br>\\n<br>\\n<br>'", ")", "html_body", "=", "'\\n'", ".", "join", "(", "html_content", ")", "+", "'</div></body></html>'", "html_str", "=", "utils", ".", "html_head", "+", "html_body", "htmlreport", "=", "settings", "[", "\"path\"", "]", "+", "\"NanoPlot-report.html\"", "with", "open", "(", "htmlreport", ",", "\"w\"", ")", "as", "html_file", ":", "html_file", ".", "write", "(", "html_str", ")", "return", "htmlreport" ]
Creates a fat html report based on the previously created files plots is a list of Plot objects defined by a path and title statsfile is the file to which the stats have been saved, which is parsed to a table (rather dodgy)
[ "Creates", "a", "fat", "html", "report", "based", "on", "the", "previously", "created", "files", "plots", "is", "a", "list", "of", "Plot", "objects", "defined", "by", "a", "path", "and", "title", "statsfile", "is", "the", "file", "to", "which", "the", "stats", "have", "been", "saved", "which", "is", "parsed", "to", "a", "table", "(", "rather", "dodgy", ")" ]
python
train
47.541667
relwell/corenlp-xml-lib
corenlp_xml/coreference.py
https://github.com/relwell/corenlp-xml-lib/blob/9b0f8c912ba3ecedd34473f74a9f2d033a75baf9/corenlp_xml/coreference.py#L86-L98
def sentence(self): """ The sentence related to this mention :getter: returns the sentence this mention relates to :type: corenlp_xml.document.Sentence """ if self._sentence is None: sentences = self._element.xpath('sentence/text()') if len(sentences) > 0: self._sentence = self._coref.document.get_sentence_by_id(int(sentences[0])) return self._sentence
[ "def", "sentence", "(", "self", ")", ":", "if", "self", ".", "_sentence", "is", "None", ":", "sentences", "=", "self", ".", "_element", ".", "xpath", "(", "'sentence/text()'", ")", "if", "len", "(", "sentences", ")", ">", "0", ":", "self", ".", "_sentence", "=", "self", ".", "_coref", ".", "document", ".", "get_sentence_by_id", "(", "int", "(", "sentences", "[", "0", "]", ")", ")", "return", "self", ".", "_sentence" ]
The sentence related to this mention :getter: returns the sentence this mention relates to :type: corenlp_xml.document.Sentence
[ "The", "sentence", "related", "to", "this", "mention" ]
python
train
33.846154
FlaskGuys/Flask-Imagine
flask_imagine/filters/watermark.py
https://github.com/FlaskGuys/Flask-Imagine/blob/f79c6517ecb5480b63a2b3b8554edb6e2ac8be8c/flask_imagine/filters/watermark.py#L59-L79
def apply(self, resource): """ Apply filter to resource :param resource: Image.Image :return: Image.Image """ if not isinstance(resource, Image.Image): raise ValueError('Unknown resource format') resource_format = resource.format if resource.mode != 'RGBA': # pragma: no cover resource = resource.convert('RGBA') layer = Image.new('RGBA', resource.size, (0, 0, 0, 0)) image, left, upper = getattr(self, '_' + self.position + '_position')(resource) layer.paste(image, (left, upper)) image = Image.composite(layer, resource, layer) image.format = resource_format return image
[ "def", "apply", "(", "self", ",", "resource", ")", ":", "if", "not", "isinstance", "(", "resource", ",", "Image", ".", "Image", ")", ":", "raise", "ValueError", "(", "'Unknown resource format'", ")", "resource_format", "=", "resource", ".", "format", "if", "resource", ".", "mode", "!=", "'RGBA'", ":", "# pragma: no cover", "resource", "=", "resource", ".", "convert", "(", "'RGBA'", ")", "layer", "=", "Image", ".", "new", "(", "'RGBA'", ",", "resource", ".", "size", ",", "(", "0", ",", "0", ",", "0", ",", "0", ")", ")", "image", ",", "left", ",", "upper", "=", "getattr", "(", "self", ",", "'_'", "+", "self", ".", "position", "+", "'_position'", ")", "(", "resource", ")", "layer", ".", "paste", "(", "image", ",", "(", "left", ",", "upper", ")", ")", "image", "=", "Image", ".", "composite", "(", "layer", ",", "resource", ",", "layer", ")", "image", ".", "format", "=", "resource_format", "return", "image" ]
Apply filter to resource :param resource: Image.Image :return: Image.Image
[ "Apply", "filter", "to", "resource", ":", "param", "resource", ":", "Image", ".", "Image", ":", "return", ":", "Image", ".", "Image" ]
python
train
33.047619
honzajavorek/redis-collections
redis_collections/sortedsets.py
https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/sortedsets.py#L178-L186
def count_between(self, min_score=None, max_score=None): """ Returns the number of members whose score is between *min_score* and *max_score* (inclusive). """ min_score = float('-inf') if min_score is None else float(min_score) max_score = float('inf') if max_score is None else float(max_score) return self.redis.zcount(self.key, min_score, max_score)
[ "def", "count_between", "(", "self", ",", "min_score", "=", "None", ",", "max_score", "=", "None", ")", ":", "min_score", "=", "float", "(", "'-inf'", ")", "if", "min_score", "is", "None", "else", "float", "(", "min_score", ")", "max_score", "=", "float", "(", "'inf'", ")", "if", "max_score", "is", "None", "else", "float", "(", "max_score", ")", "return", "self", ".", "redis", ".", "zcount", "(", "self", ".", "key", ",", "min_score", ",", "max_score", ")" ]
Returns the number of members whose score is between *min_score* and *max_score* (inclusive).
[ "Returns", "the", "number", "of", "members", "whose", "score", "is", "between", "*", "min_score", "*", "and", "*", "max_score", "*", "(", "inclusive", ")", "." ]
python
train
44.555556
collectiveacuity/labPack
labpack/banking/capitalone.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/banking/capitalone.py#L398-L465
def account_product(self, product_id): ''' a method to retrieve details about a particular account product { "error": "", "code": 200, "method": "GET", "url": "https://...", "headers": { }, "json": { "productId": "3300", "productName": "Capital One 360 Money Market Account", "cdTerms": [ "12M" ], "annualPercentageYieldDetails": { "annualPercentageYieldType": "simple", "annualPercentageYield": 1.4, "tieredAnnualPercentageYield": [ { "tierDescription": "$0 - $9,999.99", "annualPercentageYield": 1.4 } ], "termBasedAnnualPercentageYield": [ { "term": "6M", "annualPercentageYield": 1.2 } ] }, "disclosures": { "productDisclosureUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#360savingsdisclosure", "termsAndConditionsUrl": "https://www.capitalone.com/online-money-market-account/disclosures/#360moneymarketagreement", "electronicFundTransferDisclosureUrl": "https://www.capitalone.com/cds/online-cds/disclosures/#electronicfundtransferdisclosurestatement", "privacyPolicyUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#privacypolicy", "wireTransferAgreementUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#wirefundstransferdisclosurestatement", "paperlessAgreementUrl": "https://www.capitalone.com/terms_eddn", "fraudProtectionAgreementUrl": "https://www.capitalone.com/terms-personal-data", "tcpaDisclosureContent": "If number(s) provided above is(are) mobile phone number(s), it is (they are) my mobile phone number(s), by clicking on the button below, I consent to receive autodialed and prerecorded/artificial calls , including texts, relating to my relationship with Capital One (which may include handling, servicing, and billing for any of my accounts). Message and Data rates may apply. You can stop these types of messages by replying STOP in response to a text message, or by following any other instructions contained in the time-sensitive call.\n[Radio button] You can call or text me through automated means\n[Radio button] You can only contact me through non-automated mean" } } } ''' title = '%s.account_product' % self.__class__.__name__ # validate inputs input_fields = { 'product_id': product_id } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct url url = self.deposits_endpoint + 'account-products/%s' % product_id # construct method specific errors error_map = { 404: 'Not Found. No products found for the provided productId.' } # send request details = self._requests(url, errors=error_map) return details
[ "def", "account_product", "(", "self", ",", "product_id", ")", ":", "title", "=", "'%s.account_product'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'product_id'", ":", "product_id", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# construct url", "url", "=", "self", ".", "deposits_endpoint", "+", "'account-products/%s'", "%", "product_id", "# construct method specific errors", "error_map", "=", "{", "404", ":", "'Not Found. No products found for the provided productId.'", "}", "# send request", "details", "=", "self", ".", "_requests", "(", "url", ",", "errors", "=", "error_map", ")", "return", "details" ]
a method to retrieve details about a particular account product { "error": "", "code": 200, "method": "GET", "url": "https://...", "headers": { }, "json": { "productId": "3300", "productName": "Capital One 360 Money Market Account", "cdTerms": [ "12M" ], "annualPercentageYieldDetails": { "annualPercentageYieldType": "simple", "annualPercentageYield": 1.4, "tieredAnnualPercentageYield": [ { "tierDescription": "$0 - $9,999.99", "annualPercentageYield": 1.4 } ], "termBasedAnnualPercentageYield": [ { "term": "6M", "annualPercentageYield": 1.2 } ] }, "disclosures": { "productDisclosureUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#360savingsdisclosure", "termsAndConditionsUrl": "https://www.capitalone.com/online-money-market-account/disclosures/#360moneymarketagreement", "electronicFundTransferDisclosureUrl": "https://www.capitalone.com/cds/online-cds/disclosures/#electronicfundtransferdisclosurestatement", "privacyPolicyUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#privacypolicy", "wireTransferAgreementUrl": "https://www.capitalone.com/savings-accounts/online-savings-account/disclosures/#wirefundstransferdisclosurestatement", "paperlessAgreementUrl": "https://www.capitalone.com/terms_eddn", "fraudProtectionAgreementUrl": "https://www.capitalone.com/terms-personal-data", "tcpaDisclosureContent": "If number(s) provided above is(are) mobile phone number(s), it is (they are) my mobile phone number(s), by clicking on the button below, I consent to receive autodialed and prerecorded/artificial calls , including texts, relating to my relationship with Capital One (which may include handling, servicing, and billing for any of my accounts). Message and Data rates may apply. You can stop these types of messages by replying STOP in response to a text message, or by following any other instructions contained in the time-sensitive call.\n[Radio button] You can call or text me through automated means\n[Radio button] You can only contact me through non-automated mean" } } }
[ "a", "method", "to", "retrieve", "details", "about", "a", "particular", "account", "product", "{", "error", ":", "code", ":", "200", "method", ":", "GET", "url", ":", "https", ":", "//", "...", "headers", ":", "{", "}", "json", ":", "{", "productId", ":", "3300", "productName", ":", "Capital", "One", "360", "Money", "Market", "Account", "cdTerms", ":", "[", "12M", "]", "annualPercentageYieldDetails", ":", "{", "annualPercentageYieldType", ":", "simple", "annualPercentageYield", ":", "1", ".", "4", "tieredAnnualPercentageYield", ":", "[", "{", "tierDescription", ":", "$0", "-", "$9", "999", ".", "99", "annualPercentageYield", ":", "1", ".", "4", "}", "]", "termBasedAnnualPercentageYield", ":", "[", "{", "term", ":", "6M", "annualPercentageYield", ":", "1", ".", "2", "}", "]", "}", "disclosures", ":", "{", "productDisclosureUrl", ":", "https", ":", "//", "www", ".", "capitalone", ".", "com", "/", "savings", "-", "accounts", "/", "online", "-", "savings", "-", "account", "/", "disclosures", "/", "#360savingsdisclosure", "termsAndConditionsUrl", ":", "https", ":", "//", "www", ".", "capitalone", ".", "com", "/", "online", "-", "money", "-", "market", "-", "account", "/", "disclosures", "/", "#360moneymarketagreement", "electronicFundTransferDisclosureUrl", ":", "https", ":", "//", "www", ".", "capitalone", ".", "com", "/", "cds", "/", "online", "-", "cds", "/", "disclosures", "/", "#electronicfundtransferdisclosurestatement", "privacyPolicyUrl", ":", "https", ":", "//", "www", ".", "capitalone", ".", "com", "/", "savings", "-", "accounts", "/", "online", "-", "savings", "-", "account", "/", "disclosures", "/", "#privacypolicy", "wireTransferAgreementUrl", ":", "https", ":", "//", "www", ".", "capitalone", ".", "com", "/", "savings", "-", "accounts", "/", "online", "-", "savings", "-", "account", "/", "disclosures", "/", "#wirefundstransferdisclosurestatement", "paperlessAgreementUrl", ":", "https", ":", "//", "www", ".", "capitalone", ".", "com", "/", "terms_eddn", "fraudProtectionAgreementUrl", ":", "https", ":", "//", "www", ".", "capitalone", ".", "com", "/", "terms", "-", "personal", "-", "data", "tcpaDisclosureContent", ":", "If", "number", "(", "s", ")", "provided", "above", "is", "(", "are", ")", "mobile", "phone", "number", "(", "s", ")", "it", "is", "(", "they", "are", ")", "my", "mobile", "phone", "number", "(", "s", ")", "by", "clicking", "on", "the", "button", "below", "I", "consent", "to", "receive", "autodialed", "and", "prerecorded", "/", "artificial", "calls", "including", "texts", "relating", "to", "my", "relationship", "with", "Capital", "One", "(", "which", "may", "include", "handling", "servicing", "and", "billing", "for", "any", "of", "my", "accounts", ")", ".", "Message", "and", "Data", "rates", "may", "apply", ".", "You", "can", "stop", "these", "types", "of", "messages", "by", "replying", "STOP", "in", "response", "to", "a", "text", "message", "or", "by", "following", "any", "other", "instructions", "contained", "in", "the", "time", "-", "sensitive", "call", ".", "\\", "n", "[", "Radio", "button", "]", "You", "can", "call", "or", "text", "me", "through", "automated", "means", "\\", "n", "[", "Radio", "button", "]", "You", "can", "only", "contact", "me", "through", "non", "-", "automated", "mean", "}", "}", "}" ]
python
train
49.794118
limpyd/redis-limpyd
limpyd/model.py
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/model.py#L319-L329
def _set_defaults(self): """ Set default values to fields. We assume that they are not yet populated as this method is called just after creation of a new pk. """ for field_name in self._fields: if field_name in self._init_fields: continue field = self.get_field(field_name) if hasattr(field, "default"): field.proxy_set(field.default)
[ "def", "_set_defaults", "(", "self", ")", ":", "for", "field_name", "in", "self", ".", "_fields", ":", "if", "field_name", "in", "self", ".", "_init_fields", ":", "continue", "field", "=", "self", ".", "get_field", "(", "field_name", ")", "if", "hasattr", "(", "field", ",", "\"default\"", ")", ":", "field", ".", "proxy_set", "(", "field", ".", "default", ")" ]
Set default values to fields. We assume that they are not yet populated as this method is called just after creation of a new pk.
[ "Set", "default", "values", "to", "fields", ".", "We", "assume", "that", "they", "are", "not", "yet", "populated", "as", "this", "method", "is", "called", "just", "after", "creation", "of", "a", "new", "pk", "." ]
python
train
39.363636
yougov/vr.common
vr/common/models.py
https://github.com/yougov/vr.common/blob/ca8ed0c50ba873fc51fdfeeaa25d3b8ec1b54eb4/vr/common/models.py#L738-L749
def dispatch(self, **changes): """ Patch the swarm with changes and then trigger the swarm. """ self.patch(**changes) trigger_url = self._vr._build_url(self.resource_uri, 'swarm/') resp = self._vr.session.post(trigger_url) resp.raise_for_status() try: return resp.json() except ValueError: return None
[ "def", "dispatch", "(", "self", ",", "*", "*", "changes", ")", ":", "self", ".", "patch", "(", "*", "*", "changes", ")", "trigger_url", "=", "self", ".", "_vr", ".", "_build_url", "(", "self", ".", "resource_uri", ",", "'swarm/'", ")", "resp", "=", "self", ".", "_vr", ".", "session", ".", "post", "(", "trigger_url", ")", "resp", ".", "raise_for_status", "(", ")", "try", ":", "return", "resp", ".", "json", "(", ")", "except", "ValueError", ":", "return", "None" ]
Patch the swarm with changes and then trigger the swarm.
[ "Patch", "the", "swarm", "with", "changes", "and", "then", "trigger", "the", "swarm", "." ]
python
train
32.166667
maximtrp/scikit-posthocs
scikit_posthocs/_posthocs.py
https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L2388-L2490
def posthoc_dscf(a, val_col=None, group_col=None, sort=False): '''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a one-factorial layout with non-normally distributed residuals. As opposed to the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF test is basically an extension of the U-test as re-ranking is conducted for each pairwise test [1]_, [2]_, [3]_. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. sort : bool, optional If True, sort data by block and group columns. Returns ------- Pandas DataFrame containing p values. Notes ----- The p values are computed from the Tukey-distribution. References ---------- .. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple comparisons in the one-way analysis of variance, Communications in Statistics - Theory and Methods, 20, 127-139. .. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford University Press. .. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of treatments, Technometrics, 2, 197-207. Examples -------- >>> import scikit_posthocs as sp >>> import pandas as pd >>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]}) >>> x = x.melt(var_name='groups', value_name='values') >>> sp.posthoc_dscf(x, val_col='values', group_col='groups') ''' x, _val_col, _group_col = __convert_to_df(a, val_col, group_col) if not sort: x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True) x.sort_values(by=[_group_col], ascending=True, inplace=True) groups = np.unique(x[_group_col]) x_grouped = x.groupby(_group_col)[_val_col] n = x_grouped.count() k = groups.size def get_ties(x): t = x.value_counts().values c = np.sum((t ** 3 - t) / 12.) return c def compare(i, j): ni = n.loc[i] nj = n.loc[j] x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy() x_raw['ranks'] = x_raw.loc[:, _val_col].rank() r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]] u = np.array([nj * ni + (nj * (nj + 1) / 2), nj * ni + (ni * (ni + 1) / 2)]) - r u_min = np.min(u) s = ni + nj var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks'])) p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var) return p vs = np.zeros((k, k)) tri_upper = np.triu_indices(vs.shape[0], 1) tri_lower = np.tril_indices(vs.shape[0], -1) vs[:,:] = 0 combs = it.combinations(range(k), 2) for i, j in combs: vs[i, j] = compare(groups[i], groups[j]) vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf) vs[tri_lower] = vs.T[tri_lower] np.fill_diagonal(vs, -1) return DataFrame(vs, index=groups, columns=groups)
[ "def", "posthoc_dscf", "(", "a", ",", "val_col", "=", "None", ",", "group_col", "=", "None", ",", "sort", "=", "False", ")", ":", "x", ",", "_val_col", ",", "_group_col", "=", "__convert_to_df", "(", "a", ",", "val_col", ",", "group_col", ")", "if", "not", "sort", ":", "x", "[", "_group_col", "]", "=", "Categorical", "(", "x", "[", "_group_col", "]", ",", "categories", "=", "x", "[", "_group_col", "]", ".", "unique", "(", ")", ",", "ordered", "=", "True", ")", "x", ".", "sort_values", "(", "by", "=", "[", "_group_col", "]", ",", "ascending", "=", "True", ",", "inplace", "=", "True", ")", "groups", "=", "np", ".", "unique", "(", "x", "[", "_group_col", "]", ")", "x_grouped", "=", "x", ".", "groupby", "(", "_group_col", ")", "[", "_val_col", "]", "n", "=", "x_grouped", ".", "count", "(", ")", "k", "=", "groups", ".", "size", "def", "get_ties", "(", "x", ")", ":", "t", "=", "x", ".", "value_counts", "(", ")", ".", "values", "c", "=", "np", ".", "sum", "(", "(", "t", "**", "3", "-", "t", ")", "/", "12.", ")", "return", "c", "def", "compare", "(", "i", ",", "j", ")", ":", "ni", "=", "n", ".", "loc", "[", "i", "]", "nj", "=", "n", ".", "loc", "[", "j", "]", "x_raw", "=", "x", ".", "loc", "[", "(", "x", "[", "_group_col", "]", "==", "i", ")", "|", "(", "x", "[", "_group_col", "]", "==", "j", ")", "]", ".", "copy", "(", ")", "x_raw", "[", "'ranks'", "]", "=", "x_raw", ".", "loc", "[", ":", ",", "_val_col", "]", ".", "rank", "(", ")", "r", "=", "x_raw", ".", "groupby", "(", "_group_col", ")", "[", "'ranks'", "]", ".", "sum", "(", ")", ".", "loc", "[", "[", "i", ",", "j", "]", "]", "u", "=", "np", ".", "array", "(", "[", "nj", "*", "ni", "+", "(", "nj", "*", "(", "nj", "+", "1", ")", "/", "2", ")", ",", "nj", "*", "ni", "+", "(", "ni", "*", "(", "ni", "+", "1", ")", "/", "2", ")", "]", ")", "-", "r", "u_min", "=", "np", ".", "min", "(", "u", ")", "s", "=", "ni", "+", "nj", "var", "=", "(", "nj", "*", "ni", "/", "(", "s", "*", "(", "s", "-", "1.", ")", ")", ")", "*", "(", "(", "s", "**", "3", "-", "s", ")", "/", "12.", "-", "get_ties", "(", "x_raw", "[", "'ranks'", "]", ")", ")", "p", "=", "np", ".", "sqrt", "(", "2.", ")", "*", "(", "u_min", "-", "nj", "*", "ni", "/", "2.", ")", "/", "np", ".", "sqrt", "(", "var", ")", "return", "p", "vs", "=", "np", ".", "zeros", "(", "(", "k", ",", "k", ")", ")", "tri_upper", "=", "np", ".", "triu_indices", "(", "vs", ".", "shape", "[", "0", "]", ",", "1", ")", "tri_lower", "=", "np", ".", "tril_indices", "(", "vs", ".", "shape", "[", "0", "]", ",", "-", "1", ")", "vs", "[", ":", ",", ":", "]", "=", "0", "combs", "=", "it", ".", "combinations", "(", "range", "(", "k", ")", ",", "2", ")", "for", "i", ",", "j", "in", "combs", ":", "vs", "[", "i", ",", "j", "]", "=", "compare", "(", "groups", "[", "i", "]", ",", "groups", "[", "j", "]", ")", "vs", "[", "tri_upper", "]", "=", "psturng", "(", "np", ".", "abs", "(", "vs", "[", "tri_upper", "]", ")", ",", "k", ",", "np", ".", "inf", ")", "vs", "[", "tri_lower", "]", "=", "vs", ".", "T", "[", "tri_lower", "]", "np", ".", "fill_diagonal", "(", "vs", ",", "-", "1", ")", "return", "DataFrame", "(", "vs", ",", "index", "=", "groups", ",", "columns", "=", "groups", ")" ]
Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a one-factorial layout with non-normally distributed residuals. As opposed to the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF test is basically an extension of the U-test as re-ranking is conducted for each pairwise test [1]_, [2]_, [3]_. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. sort : bool, optional If True, sort data by block and group columns. Returns ------- Pandas DataFrame containing p values. Notes ----- The p values are computed from the Tukey-distribution. References ---------- .. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple comparisons in the one-way analysis of variance, Communications in Statistics - Theory and Methods, 20, 127-139. .. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford University Press. .. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of treatments, Technometrics, 2, 197-207. Examples -------- >>> import scikit_posthocs as sp >>> import pandas as pd >>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]}) >>> x = x.melt(var_name='groups', value_name='values') >>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
[ "Dwass", "Steel", "Critchlow", "and", "Fligner", "all", "-", "pairs", "comparison", "test", "for", "a", "one", "-", "factorial", "layout", "with", "non", "-", "normally", "distributed", "residuals", ".", "As", "opposed", "to", "the", "all", "-", "pairs", "comparison", "procedures", "that", "depend", "on", "Kruskal", "ranks", "the", "DSCF", "test", "is", "basically", "an", "extension", "of", "the", "U", "-", "test", "as", "re", "-", "ranking", "is", "conducted", "for", "each", "pairwise", "test", "[", "1", "]", "_", "[", "2", "]", "_", "[", "3", "]", "_", "." ]
python
train
34.951456
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L169-L184
def ReadTag(buffer, pos): """Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple. We return the raw bytes of the tag rather than decoding them. The raw bytes can then be used to look up the proper decoder. This effectively allows us to trade some work that would be done in pure-python (decoding a varint) for work that is done in C (searching for a byte string in a hash table). In a low-level language it would be much cheaper to decode the varint and use that, but not in Python. """ start = pos while six.indexbytes(buffer, pos) & 0x80: pos += 1 pos += 1 return (buffer[start:pos], pos)
[ "def", "ReadTag", "(", "buffer", ",", "pos", ")", ":", "start", "=", "pos", "while", "six", ".", "indexbytes", "(", "buffer", ",", "pos", ")", "&", "0x80", ":", "pos", "+=", "1", "pos", "+=", "1", "return", "(", "buffer", "[", "start", ":", "pos", "]", ",", "pos", ")" ]
Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple. We return the raw bytes of the tag rather than decoding them. The raw bytes can then be used to look up the proper decoder. This effectively allows us to trade some work that would be done in pure-python (decoding a varint) for work that is done in C (searching for a byte string in a hash table). In a low-level language it would be much cheaper to decode the varint and use that, but not in Python.
[ "Read", "a", "tag", "from", "the", "buffer", "and", "return", "a", "(", "tag_bytes", "new_pos", ")", "tuple", "." ]
python
train
38.9375
log2timeline/plaso
plaso/preprocessors/linux.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/preprocessors/linux.py#L183-L221
def _ParseFileEntry(self, knowledge_base, file_entry): """Parses artifact file system data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_entry (dfvfs.FileEntry): file entry that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails. """ if file_entry.link: # Determine the timezone based on the file path. _, _, time_zone = file_entry.link.partition('zoneinfo/') else: # Determine the timezone based on the timezone information file. file_object = file_entry.GetFileObject() time_zone = None try: time_zone_file = tz.tzfile(file_object) date_time = datetime.datetime(2017, 1, 1) time_zone = time_zone_file.tzname(date_time) except ValueError: # TODO: add and store preprocessing errors. logger.error('Unable to read time zone information file.') finally: file_object.close() # TODO: check if time zone is set in knowledge base. if time_zone: try: knowledge_base.SetTimeZone(time_zone) except ValueError: # TODO: add and store preprocessing errors. logger.error('Unable to set time zone in knowledge base.')
[ "def", "_ParseFileEntry", "(", "self", ",", "knowledge_base", ",", "file_entry", ")", ":", "if", "file_entry", ".", "link", ":", "# Determine the timezone based on the file path.", "_", ",", "_", ",", "time_zone", "=", "file_entry", ".", "link", ".", "partition", "(", "'zoneinfo/'", ")", "else", ":", "# Determine the timezone based on the timezone information file.", "file_object", "=", "file_entry", ".", "GetFileObject", "(", ")", "time_zone", "=", "None", "try", ":", "time_zone_file", "=", "tz", ".", "tzfile", "(", "file_object", ")", "date_time", "=", "datetime", ".", "datetime", "(", "2017", ",", "1", ",", "1", ")", "time_zone", "=", "time_zone_file", ".", "tzname", "(", "date_time", ")", "except", "ValueError", ":", "# TODO: add and store preprocessing errors.", "logger", ".", "error", "(", "'Unable to read time zone information file.'", ")", "finally", ":", "file_object", ".", "close", "(", ")", "# TODO: check if time zone is set in knowledge base.", "if", "time_zone", ":", "try", ":", "knowledge_base", ".", "SetTimeZone", "(", "time_zone", ")", "except", "ValueError", ":", "# TODO: add and store preprocessing errors.", "logger", ".", "error", "(", "'Unable to set time zone in knowledge base.'", ")" ]
Parses artifact file system data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_entry (dfvfs.FileEntry): file entry that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
[ "Parses", "artifact", "file", "system", "data", "for", "a", "preprocessing", "attribute", "." ]
python
train
32.641026
pymoca/pymoca
src/pymoca/backends/xml/model.py
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/model.py#L70-L76
def create_function_f_c(self): """condition function""" return ca.Function( 'f_c', [self.t, self.x, self.y, self.m, self.p, self.ng, self.nu], [self.f_c], ['t', 'x', 'y', 'm', 'p', 'ng', 'nu'], ['c'], self.func_opt)
[ "def", "create_function_f_c", "(", "self", ")", ":", "return", "ca", ".", "Function", "(", "'f_c'", ",", "[", "self", ".", "t", ",", "self", ".", "x", ",", "self", ".", "y", ",", "self", ".", "m", ",", "self", ".", "p", ",", "self", ".", "ng", ",", "self", ".", "nu", "]", ",", "[", "self", ".", "f_c", "]", ",", "[", "'t'", ",", "'x'", ",", "'y'", ",", "'m'", ",", "'p'", ",", "'ng'", ",", "'nu'", "]", ",", "[", "'c'", "]", ",", "self", ".", "func_opt", ")" ]
condition function
[ "condition", "function" ]
python
train
39
pyviz/imagen
imagen/colorspaces.py
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L413-L416
def rgb_to_hsv(self,RGB): "linear rgb to hsv" gammaRGB = self._gamma_rgb(RGB) return self._ABC_to_DEF_by_fn(gammaRGB,rgb_to_hsv)
[ "def", "rgb_to_hsv", "(", "self", ",", "RGB", ")", ":", "gammaRGB", "=", "self", ".", "_gamma_rgb", "(", "RGB", ")", "return", "self", ".", "_ABC_to_DEF_by_fn", "(", "gammaRGB", ",", "rgb_to_hsv", ")" ]
linear rgb to hsv
[ "linear", "rgb", "to", "hsv" ]
python
train
37.25
tchellomello/raincloudy
raincloudy/faucet.py
https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L291-L306
def _set_auto_watering(self, zoneid, value): """Private method to set auto_watering program.""" if not isinstance(value, bool): return None ddata = self.preupdate() attr = 'zone{}_program_toggle'.format(zoneid) try: if not value: ddata.pop(attr) else: ddata[attr] = 'on' except KeyError: pass self.submit_action(ddata) return True
[ "def", "_set_auto_watering", "(", "self", ",", "zoneid", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "bool", ")", ":", "return", "None", "ddata", "=", "self", ".", "preupdate", "(", ")", "attr", "=", "'zone{}_program_toggle'", ".", "format", "(", "zoneid", ")", "try", ":", "if", "not", "value", ":", "ddata", ".", "pop", "(", "attr", ")", "else", ":", "ddata", "[", "attr", "]", "=", "'on'", "except", "KeyError", ":", "pass", "self", ".", "submit_action", "(", "ddata", ")", "return", "True" ]
Private method to set auto_watering program.
[ "Private", "method", "to", "set", "auto_watering", "program", "." ]
python
train
28.75
ponty/confduino
confduino/examples/remove_libraries.py
https://github.com/ponty/confduino/blob/f4c261e5e84997f145a8bdd001f471db74c9054b/confduino/examples/remove_libraries.py#L8-L20
def gui(): """remove libraries by GUI.""" sel = psidialogs.multi_choice(libraries(), 'select libraries to remove from %s!' % libraries_dir(), title='remove boards') print('%s selected' % sel) if sel: if psidialogs.ask_yes_no('Do you really want to remove selected libraries?\n' + '\n'.join(sel)): for x in sel: remove_lib(x) print('%s was removed' % x)
[ "def", "gui", "(", ")", ":", "sel", "=", "psidialogs", ".", "multi_choice", "(", "libraries", "(", ")", ",", "'select libraries to remove from %s!'", "%", "libraries_dir", "(", ")", ",", "title", "=", "'remove boards'", ")", "print", "(", "'%s selected'", "%", "sel", ")", "if", "sel", ":", "if", "psidialogs", ".", "ask_yes_no", "(", "'Do you really want to remove selected libraries?\\n'", "+", "'\\n'", ".", "join", "(", "sel", ")", ")", ":", "for", "x", "in", "sel", ":", "remove_lib", "(", "x", ")", "print", "(", "'%s was removed'", "%", "x", ")" ]
remove libraries by GUI.
[ "remove", "libraries", "by", "GUI", "." ]
python
train
36.769231
MozillaSecurity/fuzzfetch
src/fuzzfetch/fetch.py
https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L171-L186
def auto_name_prefix(self): """ Generate platform prefix for cross-platform downloads. """ # if the platform is not native, auto_name would clobber native downloads. # make a prefix to avoid this native_system = std_platform.system() native_machine = self.CPU_ALIASES.get(std_platform.machine(), std_platform.machine()) if native_system == self.system and native_machine == self.machine: return '' platform = { 'linux': 'linux32', 'android-api-16': 'android-arm', 'android-aarch64': 'android-arm64', }.get(self.gecko_platform, self.gecko_platform) return platform + '-'
[ "def", "auto_name_prefix", "(", "self", ")", ":", "# if the platform is not native, auto_name would clobber native downloads.", "# make a prefix to avoid this", "native_system", "=", "std_platform", ".", "system", "(", ")", "native_machine", "=", "self", ".", "CPU_ALIASES", ".", "get", "(", "std_platform", ".", "machine", "(", ")", ",", "std_platform", ".", "machine", "(", ")", ")", "if", "native_system", "==", "self", ".", "system", "and", "native_machine", "==", "self", ".", "machine", ":", "return", "''", "platform", "=", "{", "'linux'", ":", "'linux32'", ",", "'android-api-16'", ":", "'android-arm'", ",", "'android-aarch64'", ":", "'android-arm64'", ",", "}", ".", "get", "(", "self", ".", "gecko_platform", ",", "self", ".", "gecko_platform", ")", "return", "platform", "+", "'-'" ]
Generate platform prefix for cross-platform downloads.
[ "Generate", "platform", "prefix", "for", "cross", "-", "platform", "downloads", "." ]
python
train
43.125
tritemio/PyBroMo
pybromo/storage.py
https://github.com/tritemio/PyBroMo/blob/b75f82a4551ff37e7c7a7e6954c536451f3e6d06/pybromo/storage.py#L89-L108
def set_sim_params(self, nparams, attr_params): """Store parameters in `params` in `h5file.root.parameters`. `nparams` (dict) A dict as returned by `get_params()` in `ParticlesSimulation()` The format is: keys: used as parameter name values: (2-elements tuple) first element is the parameter value second element is a string used as "title" (description) `attr_params` (dict) A dict whole items are stored as attributes in '/parameters' """ for name, value in nparams.items(): val = value[0] if value[0] is not None else 'none' self.h5file.create_array('/parameters', name, obj=val, title=value[1]) for name, value in attr_params.items(): self.h5file.set_node_attr('/parameters', name, value)
[ "def", "set_sim_params", "(", "self", ",", "nparams", ",", "attr_params", ")", ":", "for", "name", ",", "value", "in", "nparams", ".", "items", "(", ")", ":", "val", "=", "value", "[", "0", "]", "if", "value", "[", "0", "]", "is", "not", "None", "else", "'none'", "self", ".", "h5file", ".", "create_array", "(", "'/parameters'", ",", "name", ",", "obj", "=", "val", ",", "title", "=", "value", "[", "1", "]", ")", "for", "name", ",", "value", "in", "attr_params", ".", "items", "(", ")", ":", "self", ".", "h5file", ".", "set_node_attr", "(", "'/parameters'", ",", "name", ",", "value", ")" ]
Store parameters in `params` in `h5file.root.parameters`. `nparams` (dict) A dict as returned by `get_params()` in `ParticlesSimulation()` The format is: keys: used as parameter name values: (2-elements tuple) first element is the parameter value second element is a string used as "title" (description) `attr_params` (dict) A dict whole items are stored as attributes in '/parameters'
[ "Store", "parameters", "in", "params", "in", "h5file", ".", "root", ".", "parameters", "." ]
python
valid
45.15
pyrapt/rapt
rapt/treebrd/grammars/core_grammar.py
https://github.com/pyrapt/rapt/blob/0193a07aafff83a887fdc9e5e0f25eafa5b1b205/rapt/treebrd/grammars/core_grammar.py#L132-L140
def assignment(self): """ assignment ::= relation_name assign expression | relation_name param_start attribute_list param_stop assign expression """ lhs = Group(self.relation_name + Optional(self.parenthesize(self.attribute_list))) return Group(lhs + Keyword(self.syntax.assign_op) + self.expression)
[ "def", "assignment", "(", "self", ")", ":", "lhs", "=", "Group", "(", "self", ".", "relation_name", "+", "Optional", "(", "self", ".", "parenthesize", "(", "self", ".", "attribute_list", ")", ")", ")", "return", "Group", "(", "lhs", "+", "Keyword", "(", "self", ".", "syntax", ".", "assign_op", ")", "+", "self", ".", "expression", ")" ]
assignment ::= relation_name assign expression | relation_name param_start attribute_list param_stop assign expression
[ "assignment", "::", "=", "relation_name", "assign", "expression", "|", "relation_name", "param_start", "attribute_list", "param_stop", "assign", "expression" ]
python
train
41.777778
yolothreat/utilitybelt
utilitybelt/utilitybelt.py
https://github.com/yolothreat/utilitybelt/blob/55ac6c31f87963d5e97be0402a4343c84846d118/utilitybelt/utilitybelt.py#L84-L95
def is_rfc1918(ip): """Checks to see if an IP address is used for local communications within a private network as specified by RFC 1918 """ if ip_between(ip, "10.0.0.0", "10.255.255.255"): return True elif ip_between(ip, "172.16.0.0", "172.31.255.255"): return True elif ip_between(ip, "192.168.0.0", "192.168.255.255"): return True else: return False
[ "def", "is_rfc1918", "(", "ip", ")", ":", "if", "ip_between", "(", "ip", ",", "\"10.0.0.0\"", ",", "\"10.255.255.255\"", ")", ":", "return", "True", "elif", "ip_between", "(", "ip", ",", "\"172.16.0.0\"", ",", "\"172.31.255.255\"", ")", ":", "return", "True", "elif", "ip_between", "(", "ip", ",", "\"192.168.0.0\"", ",", "\"192.168.255.255\"", ")", ":", "return", "True", "else", ":", "return", "False" ]
Checks to see if an IP address is used for local communications within a private network as specified by RFC 1918
[ "Checks", "to", "see", "if", "an", "IP", "address", "is", "used", "for", "local", "communications", "within", "a", "private", "network", "as", "specified", "by", "RFC", "1918" ]
python
train
33.416667
juju/python-libjuju
juju/model.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/model.py#L1449-L1461
async def destroy_unit(self, *unit_names): """Destroy units by name. """ connection = self.connection() app_facade = client.ApplicationFacade.from_connection(connection) log.debug( 'Destroying unit%s %s', 's' if len(unit_names) == 1 else '', ' '.join(unit_names)) return await app_facade.DestroyUnits(list(unit_names))
[ "async", "def", "destroy_unit", "(", "self", ",", "*", "unit_names", ")", ":", "connection", "=", "self", ".", "connection", "(", ")", "app_facade", "=", "client", ".", "ApplicationFacade", ".", "from_connection", "(", "connection", ")", "log", ".", "debug", "(", "'Destroying unit%s %s'", ",", "'s'", "if", "len", "(", "unit_names", ")", "==", "1", "else", "''", ",", "' '", ".", "join", "(", "unit_names", ")", ")", "return", "await", "app_facade", ".", "DestroyUnits", "(", "list", "(", "unit_names", ")", ")" ]
Destroy units by name.
[ "Destroy", "units", "by", "name", "." ]
python
train
30.230769
genialis/resolwe
resolwe/flow/executors/run.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/run.py#L129-L132
def _create_file(self, filename): """Ensure a new file is created and opened for writing.""" file_descriptor = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_EXCL) return os.fdopen(file_descriptor, 'w')
[ "def", "_create_file", "(", "self", ",", "filename", ")", ":", "file_descriptor", "=", "os", ".", "open", "(", "filename", ",", "os", ".", "O_WRONLY", "|", "os", ".", "O_CREAT", "|", "os", ".", "O_EXCL", ")", "return", "os", ".", "fdopen", "(", "file_descriptor", ",", "'w'", ")" ]
Ensure a new file is created and opened for writing.
[ "Ensure", "a", "new", "file", "is", "created", "and", "opened", "for", "writing", "." ]
python
train
56.5
diffeo/rejester
rejester/workers.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/workers.py#L950-L965
def stop_all_children(self): '''Kill all workers.''' # There's an unfortunate race condition if we try to log this # case: we can't depend on the logging child actually receiving # the log message before we kill it off. C'est la vie... self.stop_log_child() for pid in self.children: try: os.kill(pid, signal.SIGTERM) os.waitpid(pid, 0) except OSError, e: if e.errno == errno.ESRCH or e.errno == errno.ECHILD: # No such process pass else: raise
[ "def", "stop_all_children", "(", "self", ")", ":", "# There's an unfortunate race condition if we try to log this", "# case: we can't depend on the logging child actually receiving", "# the log message before we kill it off. C'est la vie...", "self", ".", "stop_log_child", "(", ")", "for", "pid", "in", "self", ".", "children", ":", "try", ":", "os", ".", "kill", "(", "pid", ",", "signal", ".", "SIGTERM", ")", "os", ".", "waitpid", "(", "pid", ",", "0", ")", "except", "OSError", ",", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ESRCH", "or", "e", ".", "errno", "==", "errno", ".", "ECHILD", ":", "# No such process", "pass", "else", ":", "raise" ]
Kill all workers.
[ "Kill", "all", "workers", "." ]
python
train
39.125
PmagPy/PmagPy
pmagpy/pmagplotlib.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmagplotlib.py#L2268-L2301
def plot_hpars(HDD, hpars, sym): """ function to plot hysteresis parameters deprecated (used in hysteresis_magic) """ plt.figure(num=HDD['hyst']) X, Y = [], [] X.append(0) Y.append(old_div(float(hpars['hysteresis_mr_moment']), float( hpars['hysteresis_ms_moment']))) X.append(float(hpars['hysteresis_bc'])) Y.append(0) plt.plot(X, Y, sym) bounds = plt.axis() n4 = 'Ms: ' + '%8.2e' % (float(hpars['hysteresis_ms_moment'])) + ' Am^2' plt.text(bounds[1] - .9 * bounds[1], -.9, n4) n1 = 'Mr: ' + '%8.2e' % (float(hpars['hysteresis_mr_moment'])) + ' Am^2' plt.text(bounds[1] - .9 * bounds[1], -.7, n1) n2 = 'Bc: ' + '%8.2e' % (float(hpars['hysteresis_bc'])) + ' T' plt.text(bounds[1] - .9 * bounds[1], -.5, n2) if 'hysteresis_xhf' in list(hpars.keys()): n3 = r'Xhf: ' + '%8.2e' % (float(hpars['hysteresis_xhf'])) + ' m^3' plt.text(bounds[1] - .9 * bounds[1], -.3, n3) plt.figure(num=HDD['deltaM']) X, Y, Bcr = [], [], "" if 'hysteresis_bcr' in list(hpars.keys()): X.append(float(hpars['hysteresis_bcr'])) Y.append(0) Bcr = float(hpars['hysteresis_bcr']) plt.plot(X, Y, sym) bounds = plt.axis() if Bcr != "": n1 = 'Bcr: ' + '%8.2e' % (Bcr) + ' T' plt.text(bounds[1] - .5 * bounds[1], .9 * bounds[3], n1)
[ "def", "plot_hpars", "(", "HDD", ",", "hpars", ",", "sym", ")", ":", "plt", ".", "figure", "(", "num", "=", "HDD", "[", "'hyst'", "]", ")", "X", ",", "Y", "=", "[", "]", ",", "[", "]", "X", ".", "append", "(", "0", ")", "Y", ".", "append", "(", "old_div", "(", "float", "(", "hpars", "[", "'hysteresis_mr_moment'", "]", ")", ",", "float", "(", "hpars", "[", "'hysteresis_ms_moment'", "]", ")", ")", ")", "X", ".", "append", "(", "float", "(", "hpars", "[", "'hysteresis_bc'", "]", ")", ")", "Y", ".", "append", "(", "0", ")", "plt", ".", "plot", "(", "X", ",", "Y", ",", "sym", ")", "bounds", "=", "plt", ".", "axis", "(", ")", "n4", "=", "'Ms: '", "+", "'%8.2e'", "%", "(", "float", "(", "hpars", "[", "'hysteresis_ms_moment'", "]", ")", ")", "+", "' Am^2'", "plt", ".", "text", "(", "bounds", "[", "1", "]", "-", ".9", "*", "bounds", "[", "1", "]", ",", "-", ".9", ",", "n4", ")", "n1", "=", "'Mr: '", "+", "'%8.2e'", "%", "(", "float", "(", "hpars", "[", "'hysteresis_mr_moment'", "]", ")", ")", "+", "' Am^2'", "plt", ".", "text", "(", "bounds", "[", "1", "]", "-", ".9", "*", "bounds", "[", "1", "]", ",", "-", ".7", ",", "n1", ")", "n2", "=", "'Bc: '", "+", "'%8.2e'", "%", "(", "float", "(", "hpars", "[", "'hysteresis_bc'", "]", ")", ")", "+", "' T'", "plt", ".", "text", "(", "bounds", "[", "1", "]", "-", ".9", "*", "bounds", "[", "1", "]", ",", "-", ".5", ",", "n2", ")", "if", "'hysteresis_xhf'", "in", "list", "(", "hpars", ".", "keys", "(", ")", ")", ":", "n3", "=", "r'Xhf: '", "+", "'%8.2e'", "%", "(", "float", "(", "hpars", "[", "'hysteresis_xhf'", "]", ")", ")", "+", "' m^3'", "plt", ".", "text", "(", "bounds", "[", "1", "]", "-", ".9", "*", "bounds", "[", "1", "]", ",", "-", ".3", ",", "n3", ")", "plt", ".", "figure", "(", "num", "=", "HDD", "[", "'deltaM'", "]", ")", "X", ",", "Y", ",", "Bcr", "=", "[", "]", ",", "[", "]", ",", "\"\"", "if", "'hysteresis_bcr'", "in", "list", "(", "hpars", ".", "keys", "(", ")", ")", ":", "X", ".", "append", "(", "float", "(", "hpars", "[", "'hysteresis_bcr'", "]", ")", ")", "Y", ".", "append", "(", "0", ")", "Bcr", "=", "float", "(", "hpars", "[", "'hysteresis_bcr'", "]", ")", "plt", ".", "plot", "(", "X", ",", "Y", ",", "sym", ")", "bounds", "=", "plt", ".", "axis", "(", ")", "if", "Bcr", "!=", "\"\"", ":", "n1", "=", "'Bcr: '", "+", "'%8.2e'", "%", "(", "Bcr", ")", "+", "' T'", "plt", ".", "text", "(", "bounds", "[", "1", "]", "-", ".5", "*", "bounds", "[", "1", "]", ",", ".9", "*", "bounds", "[", "3", "]", ",", "n1", ")" ]
function to plot hysteresis parameters deprecated (used in hysteresis_magic)
[ "function", "to", "plot", "hysteresis", "parameters", "deprecated", "(", "used", "in", "hysteresis_magic", ")" ]
python
train
39.058824
grundprinzip/pyxplorer
pyxplorer/manager.py
https://github.com/grundprinzip/pyxplorer/blob/34c1d166cfef4a94aeb6d5fcb3cbb726d48146e2/pyxplorer/manager.py#L22-L29
def tables(self): """ :return: all tables stored in this database """ cursor = self.connection.cursor() cursor.execute("show tables in %s" % self.db) self._tables = [t.Table(r[0], con=self.connection, db=self.db) for r in cursor.fetchall()] return self._tables
[ "def", "tables", "(", "self", ")", ":", "cursor", "=", "self", ".", "connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "\"show tables in %s\"", "%", "self", ".", "db", ")", "self", ".", "_tables", "=", "[", "t", ".", "Table", "(", "r", "[", "0", "]", ",", "con", "=", "self", ".", "connection", ",", "db", "=", "self", ".", "db", ")", "for", "r", "in", "cursor", ".", "fetchall", "(", ")", "]", "return", "self", ".", "_tables" ]
:return: all tables stored in this database
[ ":", "return", ":", "all", "tables", "stored", "in", "this", "database" ]
python
train
38.625
spyder-ide/spyder
spyder/plugins/editor/widgets/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L293-L297
def remove_and_append(self, index): """Remove previous entrances of a tab, and add it as the latest.""" while index in self: self.remove(index) self.append(index)
[ "def", "remove_and_append", "(", "self", ",", "index", ")", ":", "while", "index", "in", "self", ":", "self", ".", "remove", "(", "index", ")", "self", ".", "append", "(", "index", ")" ]
Remove previous entrances of a tab, and add it as the latest.
[ "Remove", "previous", "entrances", "of", "a", "tab", "and", "add", "it", "as", "the", "latest", "." ]
python
train
39.6
kalaspuff/tomodachi
tomodachi/transport/http.py
https://github.com/kalaspuff/tomodachi/blob/0f21e87cb43821dd6e726a8d4871fb8e17c8fe3d/tomodachi/transport/http.py#L88-L143
def handle_error(self, request: Any, status: int = 500, exc: Any = None, message: Optional[str] = None) -> web.Response: """Handle errors. Returns HTTP response with specific status code. Logs additional information. It always closes current connection.""" if self.transport is None: # client has been disconnected during writing. if self._access_log: request_ip = RequestHandler.get_request_ip(request, None) version_string = None if isinstance(request.version, HttpVersion): version_string = 'HTTP/{}.{}'.format(request.version.major, request.version.minor) logging.getLogger('transport.http').info('[{}] [{}] {} {} "{} {}{}{}" - {} "{}" -'.format( RequestHandler.colorize_status('http', 499), RequestHandler.colorize_status(499), request_ip or '', '"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-', request.method, request.path, '?{}'.format(request.query_string) if request.query_string else '', ' {}'.format(version_string) if version_string else '', request.content_length if request.content_length is not None else '-', request.headers.get('User-Agent', '').replace('"', '') )) headers = {} headers[hdrs.CONTENT_TYPE] = 'text/plain; charset=utf-8' msg = '' if status == 500 or not message else message headers[hdrs.CONTENT_LENGTH] = str(len(msg)) headers[hdrs.SERVER] = self._server_header or '' resp = web.Response(status=status, # type: ignore text=msg, headers=headers) # type: web.Response resp.force_close() # type: ignore # some data already got sent, connection is broken if request.writer.output_size > 0 or self.transport is None: self.force_close() # type: ignore elif self.transport is not None: request_ip = RequestHandler.get_request_ip(request, None) if not request_ip: peername = request.transport.get_extra_info('peername') if peername: request_ip, _ = peername if self._access_log: logging.getLogger('transport.http').info('[{}] [{}] {} {} "INVALID" {} - "" -'.format( RequestHandler.colorize_status('http', status), RequestHandler.colorize_status(status), request_ip or '', '"{}"'.format(request._cache['auth'].login.replace('"', '')) if request._cache.get('auth') and getattr(request._cache.get('auth'), 'login', None) else '-', len(msg) )) return resp
[ "def", "handle_error", "(", "self", ",", "request", ":", "Any", ",", "status", ":", "int", "=", "500", ",", "exc", ":", "Any", "=", "None", ",", "message", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "web", ".", "Response", ":", "if", "self", ".", "transport", "is", "None", ":", "# client has been disconnected during writing.", "if", "self", ".", "_access_log", ":", "request_ip", "=", "RequestHandler", ".", "get_request_ip", "(", "request", ",", "None", ")", "version_string", "=", "None", "if", "isinstance", "(", "request", ".", "version", ",", "HttpVersion", ")", ":", "version_string", "=", "'HTTP/{}.{}'", ".", "format", "(", "request", ".", "version", ".", "major", ",", "request", ".", "version", ".", "minor", ")", "logging", ".", "getLogger", "(", "'transport.http'", ")", ".", "info", "(", "'[{}] [{}] {} {} \"{} {}{}{}\" - {} \"{}\" -'", ".", "format", "(", "RequestHandler", ".", "colorize_status", "(", "'http'", ",", "499", ")", ",", "RequestHandler", ".", "colorize_status", "(", "499", ")", ",", "request_ip", "or", "''", ",", "'\"{}\"'", ".", "format", "(", "request", ".", "_cache", "[", "'auth'", "]", ".", "login", ".", "replace", "(", "'\"'", ",", "''", ")", ")", "if", "request", ".", "_cache", ".", "get", "(", "'auth'", ")", "and", "getattr", "(", "request", ".", "_cache", ".", "get", "(", "'auth'", ")", ",", "'login'", ",", "None", ")", "else", "'-'", ",", "request", ".", "method", ",", "request", ".", "path", ",", "'?{}'", ".", "format", "(", "request", ".", "query_string", ")", "if", "request", ".", "query_string", "else", "''", ",", "' {}'", ".", "format", "(", "version_string", ")", "if", "version_string", "else", "''", ",", "request", ".", "content_length", "if", "request", ".", "content_length", "is", "not", "None", "else", "'-'", ",", "request", ".", "headers", ".", "get", "(", "'User-Agent'", ",", "''", ")", ".", "replace", "(", "'\"'", ",", "''", ")", ")", ")", "headers", "=", "{", "}", "headers", "[", "hdrs", ".", "CONTENT_TYPE", "]", "=", "'text/plain; charset=utf-8'", "msg", "=", "''", "if", "status", "==", "500", "or", "not", "message", "else", "message", "headers", "[", "hdrs", ".", "CONTENT_LENGTH", "]", "=", "str", "(", "len", "(", "msg", ")", ")", "headers", "[", "hdrs", ".", "SERVER", "]", "=", "self", ".", "_server_header", "or", "''", "resp", "=", "web", ".", "Response", "(", "status", "=", "status", ",", "# type: ignore", "text", "=", "msg", ",", "headers", "=", "headers", ")", "# type: web.Response", "resp", ".", "force_close", "(", ")", "# type: ignore", "# some data already got sent, connection is broken", "if", "request", ".", "writer", ".", "output_size", ">", "0", "or", "self", ".", "transport", "is", "None", ":", "self", ".", "force_close", "(", ")", "# type: ignore", "elif", "self", ".", "transport", "is", "not", "None", ":", "request_ip", "=", "RequestHandler", ".", "get_request_ip", "(", "request", ",", "None", ")", "if", "not", "request_ip", ":", "peername", "=", "request", ".", "transport", ".", "get_extra_info", "(", "'peername'", ")", "if", "peername", ":", "request_ip", ",", "_", "=", "peername", "if", "self", ".", "_access_log", ":", "logging", ".", "getLogger", "(", "'transport.http'", ")", ".", "info", "(", "'[{}] [{}] {} {} \"INVALID\" {} - \"\" -'", ".", "format", "(", "RequestHandler", ".", "colorize_status", "(", "'http'", ",", "status", ")", ",", "RequestHandler", ".", "colorize_status", "(", "status", ")", ",", "request_ip", "or", "''", ",", "'\"{}\"'", ".", "format", "(", "request", ".", "_cache", "[", "'auth'", "]", ".", "login", ".", "replace", "(", "'\"'", ",", "''", ")", ")", "if", "request", ".", "_cache", ".", "get", "(", "'auth'", ")", "and", "getattr", "(", "request", ".", "_cache", ".", "get", "(", "'auth'", ")", ",", "'login'", ",", "None", ")", "else", "'-'", ",", "len", "(", "msg", ")", ")", ")", "return", "resp" ]
Handle errors. Returns HTTP response with specific status code. Logs additional information. It always closes current connection.
[ "Handle", "errors", "." ]
python
train
53.035714
treycucco/bidon
bidon/db/model/model_base.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/model/model_base.py#L42-L56
def has_attr(cls, attr_name): """Check to see if an attribute is defined for the model.""" if attr_name in cls.attrs: return True if isinstance(cls.primary_key_name, str) and cls.primary_key_name == attr_name: return True if isinstance(cls.primary_key_name, tuple) and attr_name in cls.primary_key_name: return True if cls.timestamps is not None and attr_name in cls.timestamps: return True return False
[ "def", "has_attr", "(", "cls", ",", "attr_name", ")", ":", "if", "attr_name", "in", "cls", ".", "attrs", ":", "return", "True", "if", "isinstance", "(", "cls", ".", "primary_key_name", ",", "str", ")", "and", "cls", ".", "primary_key_name", "==", "attr_name", ":", "return", "True", "if", "isinstance", "(", "cls", ".", "primary_key_name", ",", "tuple", ")", "and", "attr_name", "in", "cls", ".", "primary_key_name", ":", "return", "True", "if", "cls", ".", "timestamps", "is", "not", "None", "and", "attr_name", "in", "cls", ".", "timestamps", ":", "return", "True", "return", "False" ]
Check to see if an attribute is defined for the model.
[ "Check", "to", "see", "if", "an", "attribute", "is", "defined", "for", "the", "model", "." ]
python
train
29.4
FreekKalter/geoselect
geoselect.py
https://github.com/FreekKalter/geoselect/blob/2c6ab869d50215eba21dce3e1770bcf8ecdb41f3/geoselect.py#L93-L110
def build_dict(img_iterator): """ Build a dict from files from iterator. {'absolute_filename': {'EXIF field': 'exif tag value'}} Parse DateTime from filename in the same loop, added as 'TIME'. """ files_with_tags = dict() for f in img_iterator: with open(str(f.abspath()), 'rb') as jpg: tags = exifread.process_file(jpg) # Dont waste space on thumbnails try: del tags['JPEGThumbnail'] except KeyError: pass tags['TIME'] = get_time(str(f.abspath()), tags) files_with_tags[str(f.abspath())] = tags return files_with_tags
[ "def", "build_dict", "(", "img_iterator", ")", ":", "files_with_tags", "=", "dict", "(", ")", "for", "f", "in", "img_iterator", ":", "with", "open", "(", "str", "(", "f", ".", "abspath", "(", ")", ")", ",", "'rb'", ")", "as", "jpg", ":", "tags", "=", "exifread", ".", "process_file", "(", "jpg", ")", "# Dont waste space on thumbnails\r", "try", ":", "del", "tags", "[", "'JPEGThumbnail'", "]", "except", "KeyError", ":", "pass", "tags", "[", "'TIME'", "]", "=", "get_time", "(", "str", "(", "f", ".", "abspath", "(", ")", ")", ",", "tags", ")", "files_with_tags", "[", "str", "(", "f", ".", "abspath", "(", ")", ")", "]", "=", "tags", "return", "files_with_tags" ]
Build a dict from files from iterator. {'absolute_filename': {'EXIF field': 'exif tag value'}} Parse DateTime from filename in the same loop, added as 'TIME'.
[ "Build", "a", "dict", "from", "files", "from", "iterator", ".", "{", "absolute_filename", ":", "{", "EXIF", "field", ":", "exif", "tag", "value", "}}", "Parse", "DateTime", "from", "filename", "in", "the", "same", "loop", "added", "as", "TIME", "." ]
python
train
36.777778
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py#L206-L227
def has_comment(src): """Indicate whether an input line has (i.e. ends in, or is) a comment. This uses tokenize, so it can distinguish comments from # inside strings. Parameters ---------- src : string A single line input string. Returns ------- Boolean: True if source has a comment. """ readline = StringIO(src).readline toktypes = set() try: for t in tokenize.generate_tokens(readline): toktypes.add(t[0]) except tokenize.TokenError: pass return(tokenize.COMMENT in toktypes)
[ "def", "has_comment", "(", "src", ")", ":", "readline", "=", "StringIO", "(", "src", ")", ".", "readline", "toktypes", "=", "set", "(", ")", "try", ":", "for", "t", "in", "tokenize", ".", "generate_tokens", "(", "readline", ")", ":", "toktypes", ".", "add", "(", "t", "[", "0", "]", ")", "except", "tokenize", ".", "TokenError", ":", "pass", "return", "(", "tokenize", ".", "COMMENT", "in", "toktypes", ")" ]
Indicate whether an input line has (i.e. ends in, or is) a comment. This uses tokenize, so it can distinguish comments from # inside strings. Parameters ---------- src : string A single line input string. Returns ------- Boolean: True if source has a comment.
[ "Indicate", "whether", "an", "input", "line", "has", "(", "i", ".", "e", ".", "ends", "in", "or", "is", ")", "a", "comment", ".", "This", "uses", "tokenize", "so", "it", "can", "distinguish", "comments", "from", "#", "inside", "strings", ".", "Parameters", "----------", "src", ":", "string", "A", "single", "line", "input", "string", ".", "Returns", "-------", "Boolean", ":", "True", "if", "source", "has", "a", "comment", "." ]
python
test
25.545455
bigchaindb/bigchaindb
bigchaindb/web/views/blocks.py
https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/web/views/blocks.py#L38-L57
def get(self): """API endpoint to get the related blocks for a transaction. Return: A ``list`` of ``block_id``s that contain the given transaction. The list may be filtered when provided a status query parameter: "valid", "invalid", "undecided". """ parser = reqparse.RequestParser() parser.add_argument('transaction_id', type=str, required=True) args = parser.parse_args(strict=True) tx_id = args['transaction_id'] pool = current_app.config['bigchain_pool'] with pool() as bigchain: blocks = bigchain.get_block_containing_tx(tx_id) return blocks
[ "def", "get", "(", "self", ")", ":", "parser", "=", "reqparse", ".", "RequestParser", "(", ")", "parser", ".", "add_argument", "(", "'transaction_id'", ",", "type", "=", "str", ",", "required", "=", "True", ")", "args", "=", "parser", ".", "parse_args", "(", "strict", "=", "True", ")", "tx_id", "=", "args", "[", "'transaction_id'", "]", "pool", "=", "current_app", ".", "config", "[", "'bigchain_pool'", "]", "with", "pool", "(", ")", "as", "bigchain", ":", "blocks", "=", "bigchain", ".", "get_block_containing_tx", "(", "tx_id", ")", "return", "blocks" ]
API endpoint to get the related blocks for a transaction. Return: A ``list`` of ``block_id``s that contain the given transaction. The list may be filtered when provided a status query parameter: "valid", "invalid", "undecided".
[ "API", "endpoint", "to", "get", "the", "related", "blocks", "for", "a", "transaction", "." ]
python
train
33
neherlab/treetime
treetime/gtr.py
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/gtr.py#L399-L491
def infer(cls, nij, Ti, root_state, fixed_pi=None, pc=5.0, gap_limit=0.01, **kwargs): """ Infer a GTR model by specifying the number of transitions and time spent in each character. The basic equation that is being solved is :math:`n_{ij} = pi_i W_{ij} T_j` where :math:`n_{ij}` are the transitions, :math:`pi_i` are the equilibrium state frequencies, :math:`W_{ij}` is the "substitution attempt matrix", while :math:`T_i` is the time on the tree spent in character state :math:`i`. To regularize the process, we add pseudocounts and also need to account for the fact that the root of the tree is in a particular state. the modified equation is :math:`n_{ij} + pc = pi_i W_{ij} (T_j+pc+root\_state)` Parameters ---------- nij : nxn matrix The number of times a change in character state is observed between state j and i Ti :n vector The time spent in each character state root_state : n vector The number of characters in state i in the sequence of the root node. pc : float Pseudocounts, this determines the lower cutoff on the rate when no substitutions are observed **kwargs: Key word arguments to be passed Keyword Args ------------ alphabet : str Specify alphabet when applicable. If the alphabet specification is required, but no alphabet is specified, the nucleotide alphabet will be used as default. """ from scipy import linalg as LA gtr = cls(**kwargs) gtr.logger("GTR: model inference ",1) dp = 1e-5 Nit = 40 pc_mat = pc*np.ones_like(nij) np.fill_diagonal(pc_mat, 0.0) count = 0 pi_old = np.zeros_like(Ti) if fixed_pi is None: pi = np.ones_like(Ti) else: pi = np.copy(fixed_pi) pi/=pi.sum() W_ij = np.ones_like(nij) mu = nij.sum()/Ti.sum() # if pi is fixed, this will immediately converge while LA.norm(pi_old-pi) > dp and count < Nit: gtr.logger(' '.join(map(str, ['GTR inference iteration',count,'change:',LA.norm(pi_old-pi)])), 3) count += 1 pi_old = np.copy(pi) W_ij = (nij+nij.T+2*pc_mat)/mu/(np.outer(pi,Ti) + np.outer(Ti,pi) + ttconf.TINY_NUMBER + 2*pc_mat) np.fill_diagonal(W_ij, 0) scale_factor = np.einsum('i,ij,j',pi,W_ij,pi) W_ij = W_ij/scale_factor if fixed_pi is None: pi = (np.sum(nij+pc_mat,axis=1)+root_state)/(ttconf.TINY_NUMBER + mu*np.dot(W_ij,Ti)+root_state.sum()+np.sum(pc_mat, axis=1)) pi /= pi.sum() mu = nij.sum()/(ttconf.TINY_NUMBER + np.sum(pi * (W_ij.dot(Ti)))) if count >= Nit: gtr.logger('WARNING: maximum number of iterations has been reached in GTR inference',3, warn=True) if LA.norm(pi_old-pi) > dp: gtr.logger('the iterative scheme has not converged',3,warn=True) elif np.abs(1-np.max(pi.sum(axis=0))) > dp: gtr.logger('the iterative scheme has converged, but proper normalization was not reached',3,warn=True) if gtr.gap_index is not None: if pi[gtr.gap_index]<gap_limit: gtr.logger('The model allows for gaps which are estimated to occur at a low fraction of %1.3e'%pi[gtr.gap_index]+ '\n\t\tthis can potentially result in artificats.'+ '\n\t\tgap fraction will be set to %1.4f'%gap_limit,2,warn=True) pi[gtr.gap_index] = gap_limit pi /= pi.sum() gtr.assign_rates(mu=mu, W=W_ij, pi=pi) return gtr
[ "def", "infer", "(", "cls", ",", "nij", ",", "Ti", ",", "root_state", ",", "fixed_pi", "=", "None", ",", "pc", "=", "5.0", ",", "gap_limit", "=", "0.01", ",", "*", "*", "kwargs", ")", ":", "from", "scipy", "import", "linalg", "as", "LA", "gtr", "=", "cls", "(", "*", "*", "kwargs", ")", "gtr", ".", "logger", "(", "\"GTR: model inference \"", ",", "1", ")", "dp", "=", "1e-5", "Nit", "=", "40", "pc_mat", "=", "pc", "*", "np", ".", "ones_like", "(", "nij", ")", "np", ".", "fill_diagonal", "(", "pc_mat", ",", "0.0", ")", "count", "=", "0", "pi_old", "=", "np", ".", "zeros_like", "(", "Ti", ")", "if", "fixed_pi", "is", "None", ":", "pi", "=", "np", ".", "ones_like", "(", "Ti", ")", "else", ":", "pi", "=", "np", ".", "copy", "(", "fixed_pi", ")", "pi", "/=", "pi", ".", "sum", "(", ")", "W_ij", "=", "np", ".", "ones_like", "(", "nij", ")", "mu", "=", "nij", ".", "sum", "(", ")", "/", "Ti", ".", "sum", "(", ")", "# if pi is fixed, this will immediately converge", "while", "LA", ".", "norm", "(", "pi_old", "-", "pi", ")", ">", "dp", "and", "count", "<", "Nit", ":", "gtr", ".", "logger", "(", "' '", ".", "join", "(", "map", "(", "str", ",", "[", "'GTR inference iteration'", ",", "count", ",", "'change:'", ",", "LA", ".", "norm", "(", "pi_old", "-", "pi", ")", "]", ")", ")", ",", "3", ")", "count", "+=", "1", "pi_old", "=", "np", ".", "copy", "(", "pi", ")", "W_ij", "=", "(", "nij", "+", "nij", ".", "T", "+", "2", "*", "pc_mat", ")", "/", "mu", "/", "(", "np", ".", "outer", "(", "pi", ",", "Ti", ")", "+", "np", ".", "outer", "(", "Ti", ",", "pi", ")", "+", "ttconf", ".", "TINY_NUMBER", "+", "2", "*", "pc_mat", ")", "np", ".", "fill_diagonal", "(", "W_ij", ",", "0", ")", "scale_factor", "=", "np", ".", "einsum", "(", "'i,ij,j'", ",", "pi", ",", "W_ij", ",", "pi", ")", "W_ij", "=", "W_ij", "/", "scale_factor", "if", "fixed_pi", "is", "None", ":", "pi", "=", "(", "np", ".", "sum", "(", "nij", "+", "pc_mat", ",", "axis", "=", "1", ")", "+", "root_state", ")", "/", "(", "ttconf", ".", "TINY_NUMBER", "+", "mu", "*", "np", ".", "dot", "(", "W_ij", ",", "Ti", ")", "+", "root_state", ".", "sum", "(", ")", "+", "np", ".", "sum", "(", "pc_mat", ",", "axis", "=", "1", ")", ")", "pi", "/=", "pi", ".", "sum", "(", ")", "mu", "=", "nij", ".", "sum", "(", ")", "/", "(", "ttconf", ".", "TINY_NUMBER", "+", "np", ".", "sum", "(", "pi", "*", "(", "W_ij", ".", "dot", "(", "Ti", ")", ")", ")", ")", "if", "count", ">=", "Nit", ":", "gtr", ".", "logger", "(", "'WARNING: maximum number of iterations has been reached in GTR inference'", ",", "3", ",", "warn", "=", "True", ")", "if", "LA", ".", "norm", "(", "pi_old", "-", "pi", ")", ">", "dp", ":", "gtr", ".", "logger", "(", "'the iterative scheme has not converged'", ",", "3", ",", "warn", "=", "True", ")", "elif", "np", ".", "abs", "(", "1", "-", "np", ".", "max", "(", "pi", ".", "sum", "(", "axis", "=", "0", ")", ")", ")", ">", "dp", ":", "gtr", ".", "logger", "(", "'the iterative scheme has converged, but proper normalization was not reached'", ",", "3", ",", "warn", "=", "True", ")", "if", "gtr", ".", "gap_index", "is", "not", "None", ":", "if", "pi", "[", "gtr", ".", "gap_index", "]", "<", "gap_limit", ":", "gtr", ".", "logger", "(", "'The model allows for gaps which are estimated to occur at a low fraction of %1.3e'", "%", "pi", "[", "gtr", ".", "gap_index", "]", "+", "'\\n\\t\\tthis can potentially result in artificats.'", "+", "'\\n\\t\\tgap fraction will be set to %1.4f'", "%", "gap_limit", ",", "2", ",", "warn", "=", "True", ")", "pi", "[", "gtr", ".", "gap_index", "]", "=", "gap_limit", "pi", "/=", "pi", ".", "sum", "(", ")", "gtr", ".", "assign_rates", "(", "mu", "=", "mu", ",", "W", "=", "W_ij", ",", "pi", "=", "pi", ")", "return", "gtr" ]
Infer a GTR model by specifying the number of transitions and time spent in each character. The basic equation that is being solved is :math:`n_{ij} = pi_i W_{ij} T_j` where :math:`n_{ij}` are the transitions, :math:`pi_i` are the equilibrium state frequencies, :math:`W_{ij}` is the "substitution attempt matrix", while :math:`T_i` is the time on the tree spent in character state :math:`i`. To regularize the process, we add pseudocounts and also need to account for the fact that the root of the tree is in a particular state. the modified equation is :math:`n_{ij} + pc = pi_i W_{ij} (T_j+pc+root\_state)` Parameters ---------- nij : nxn matrix The number of times a change in character state is observed between state j and i Ti :n vector The time spent in each character state root_state : n vector The number of characters in state i in the sequence of the root node. pc : float Pseudocounts, this determines the lower cutoff on the rate when no substitutions are observed **kwargs: Key word arguments to be passed Keyword Args ------------ alphabet : str Specify alphabet when applicable. If the alphabet specification is required, but no alphabet is specified, the nucleotide alphabet will be used as default.
[ "Infer", "a", "GTR", "model", "by", "specifying", "the", "number", "of", "transitions", "and", "time", "spent", "in", "each", "character", ".", "The", "basic", "equation", "that", "is", "being", "solved", "is" ]
python
test
40.989247
project-rig/rig
rig/machine_control/regions.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/regions.py#L20-L52
def get_region_for_chip(x, y, level=3): """Get the region word for the given chip co-ordinates. Parameters ---------- x : int x co-ordinate y : int y co-ordinate level : int Level of region to build. 0 is the most coarse and 3 is the finest. When 3 is used the specified region will ONLY select the given chip, for other regions surrounding chips will also be selected. Returns ------- int A 32-bit value representing the co-ordinates of the chunk of SpiNNaker chips that should be selected and the blocks within this chunk that are selected. As long as bits (31:16) are the same these values may be OR-ed together to increase the number of sub-blocks selected. """ shift = 6 - 2*level bit = ((x >> shift) & 3) + 4*((y >> shift) & 3) # bit in bits 15:0 to set mask = 0xffff ^ ((4 << shift) - 1) # in {0xfffc, 0xfff0, 0xffc0, 0xff00} nx = x & mask # The mask guarantees that bits 1:0 will be cleared ny = y & mask # The mask guarantees that bits 1:0 will be cleared # sig bits x | sig bits y | 2-bit level | region select bits region = (nx << 24) | (ny << 16) | (level << 16) | (1 << bit) return region
[ "def", "get_region_for_chip", "(", "x", ",", "y", ",", "level", "=", "3", ")", ":", "shift", "=", "6", "-", "2", "*", "level", "bit", "=", "(", "(", "x", ">>", "shift", ")", "&", "3", ")", "+", "4", "*", "(", "(", "y", ">>", "shift", ")", "&", "3", ")", "# bit in bits 15:0 to set", "mask", "=", "0xffff", "^", "(", "(", "4", "<<", "shift", ")", "-", "1", ")", "# in {0xfffc, 0xfff0, 0xffc0, 0xff00}", "nx", "=", "x", "&", "mask", "# The mask guarantees that bits 1:0 will be cleared", "ny", "=", "y", "&", "mask", "# The mask guarantees that bits 1:0 will be cleared", "# sig bits x | sig bits y | 2-bit level | region select bits", "region", "=", "(", "nx", "<<", "24", ")", "|", "(", "ny", "<<", "16", ")", "|", "(", "level", "<<", "16", ")", "|", "(", "1", "<<", "bit", ")", "return", "region" ]
Get the region word for the given chip co-ordinates. Parameters ---------- x : int x co-ordinate y : int y co-ordinate level : int Level of region to build. 0 is the most coarse and 3 is the finest. When 3 is used the specified region will ONLY select the given chip, for other regions surrounding chips will also be selected. Returns ------- int A 32-bit value representing the co-ordinates of the chunk of SpiNNaker chips that should be selected and the blocks within this chunk that are selected. As long as bits (31:16) are the same these values may be OR-ed together to increase the number of sub-blocks selected.
[ "Get", "the", "region", "word", "for", "the", "given", "chip", "co", "-", "ordinates", "." ]
python
train
37.333333
jedie/PyHardLinkBackup
PyHardLinkBackup/phlb/filesystem_walk.py
https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/filesystem_walk.py#L85-L118
def iter_filtered_dir_entry(dir_entries, match_patterns, on_skip): """ Filter a list of DirEntryPath instances with the given pattern :param dir_entries: list of DirEntryPath instances :param match_patterns: used with Path.match() e.g.: "__pycache__/*", "*.tmp", "*.cache" :param on_skip: function that will be called if 'match_patterns' hits. e.g.: def on_skip(entry, pattern): log.error("Skip pattern %r hit: %s" % (pattern, entry.path)) :return: yields None or DirEntryPath instances """ def match(dir_entry_path, match_patterns, on_skip): for match_pattern in match_patterns: if dir_entry_path.path_instance.match(match_pattern): on_skip(dir_entry_path, match_pattern) return True return False for entry in dir_entries: try: dir_entry_path = DirEntryPath(entry) except FileNotFoundError as err: # e.g.: A file was deleted after the first filesystem scan # Will be obsolete if we use shadow-copy / snapshot function from filesystem # see: https://github.com/jedie/PyHardLinkBackup/issues/6 log.error("Can't make DirEntryPath() instance: %s" % err) continue if match(dir_entry_path, match_patterns, on_skip): yield None else: yield dir_entry_path
[ "def", "iter_filtered_dir_entry", "(", "dir_entries", ",", "match_patterns", ",", "on_skip", ")", ":", "def", "match", "(", "dir_entry_path", ",", "match_patterns", ",", "on_skip", ")", ":", "for", "match_pattern", "in", "match_patterns", ":", "if", "dir_entry_path", ".", "path_instance", ".", "match", "(", "match_pattern", ")", ":", "on_skip", "(", "dir_entry_path", ",", "match_pattern", ")", "return", "True", "return", "False", "for", "entry", "in", "dir_entries", ":", "try", ":", "dir_entry_path", "=", "DirEntryPath", "(", "entry", ")", "except", "FileNotFoundError", "as", "err", ":", "# e.g.: A file was deleted after the first filesystem scan", "# Will be obsolete if we use shadow-copy / snapshot function from filesystem", "# see: https://github.com/jedie/PyHardLinkBackup/issues/6", "log", ".", "error", "(", "\"Can't make DirEntryPath() instance: %s\"", "%", "err", ")", "continue", "if", "match", "(", "dir_entry_path", ",", "match_patterns", ",", "on_skip", ")", ":", "yield", "None", "else", ":", "yield", "dir_entry_path" ]
Filter a list of DirEntryPath instances with the given pattern :param dir_entries: list of DirEntryPath instances :param match_patterns: used with Path.match() e.g.: "__pycache__/*", "*.tmp", "*.cache" :param on_skip: function that will be called if 'match_patterns' hits. e.g.: def on_skip(entry, pattern): log.error("Skip pattern %r hit: %s" % (pattern, entry.path)) :return: yields None or DirEntryPath instances
[ "Filter", "a", "list", "of", "DirEntryPath", "instances", "with", "the", "given", "pattern" ]
python
train
40.529412
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1941-L1946
def newTextReaderFilename(URI): """Create an xmlTextReader structure fed with the resource at @URI """ ret = libxml2mod.xmlNewTextReaderFilename(URI) if ret is None:raise treeError('xmlNewTextReaderFilename() failed') return xmlTextReader(_obj=ret)
[ "def", "newTextReaderFilename", "(", "URI", ")", ":", "ret", "=", "libxml2mod", ".", "xmlNewTextReaderFilename", "(", "URI", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlNewTextReaderFilename() failed'", ")", "return", "xmlTextReader", "(", "_obj", "=", "ret", ")" ]
Create an xmlTextReader structure fed with the resource at @URI
[ "Create", "an", "xmlTextReader", "structure", "fed", "with", "the", "resource", "at" ]
python
train
44.333333
willemarcel/osmcha
osmcha/changeset.py
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L311-L331
def verify_words(self): """Verify the fields source, imagery_used and comment of the changeset for some suspect words. """ if self.comment: if find_words(self.comment, self.suspect_words, self.excluded_words): self.label_suspicious('suspect_word') if self.source: for word in self.illegal_sources: if word in self.source.lower(): self.label_suspicious('suspect_word') break if self.imagery_used: for word in self.illegal_sources: if word in self.imagery_used.lower(): self.label_suspicious('suspect_word') break self.suspicion_reasons = list(set(self.suspicion_reasons))
[ "def", "verify_words", "(", "self", ")", ":", "if", "self", ".", "comment", ":", "if", "find_words", "(", "self", ".", "comment", ",", "self", ".", "suspect_words", ",", "self", ".", "excluded_words", ")", ":", "self", ".", "label_suspicious", "(", "'suspect_word'", ")", "if", "self", ".", "source", ":", "for", "word", "in", "self", ".", "illegal_sources", ":", "if", "word", "in", "self", ".", "source", ".", "lower", "(", ")", ":", "self", ".", "label_suspicious", "(", "'suspect_word'", ")", "break", "if", "self", ".", "imagery_used", ":", "for", "word", "in", "self", ".", "illegal_sources", ":", "if", "word", "in", "self", ".", "imagery_used", ".", "lower", "(", ")", ":", "self", ".", "label_suspicious", "(", "'suspect_word'", ")", "break", "self", ".", "suspicion_reasons", "=", "list", "(", "set", "(", "self", ".", "suspicion_reasons", ")", ")" ]
Verify the fields source, imagery_used and comment of the changeset for some suspect words.
[ "Verify", "the", "fields", "source", "imagery_used", "and", "comment", "of", "the", "changeset", "for", "some", "suspect", "words", "." ]
python
valid
36.809524
amsehili/auditok
auditok/util.py
https://github.com/amsehili/auditok/blob/df6eb1d80f8cd9034be47b24869ce59b74f5f4db/auditok/util.py#L106-L119
def set_data(self, data): """ Set a new data buffer. :Parameters: `data` : a basestring object New data buffer. """ if not isinstance(data, basestring): raise ValueError("data must an instance of basestring") self._data = data self._current = 0
[ "def", "set_data", "(", "self", ",", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "basestring", ")", ":", "raise", "ValueError", "(", "\"data must an instance of basestring\"", ")", "self", ".", "_data", "=", "data", "self", ".", "_current", "=", "0" ]
Set a new data buffer. :Parameters: `data` : a basestring object New data buffer.
[ "Set", "a", "new", "data", "buffer", "." ]
python
train
23.642857
neurosynth/neurosynth
neurosynth/base/mask.py
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/mask.py#L39-L68
def add(self, layers, above=None, below=None): """ Add one or more layers to the stack of masking layers. Args: layers: A string, NiBabel image, list, or dict. If anything other than a dict is passed, assigns sequential layer names based on the current position in stack; if a dict, uses key as the name and value as the mask image. """ def add_named_layer(name, image): image = self.get_image(image, output='vector') if above is not None: image[image < above] = 0. if below is not None: image[image > below] = 0. self.layers[name] = image self.stack.append(name) if isinstance(layers, dict): for (name, image) in layers.items(): add_named_layer(name, image) else: if not isinstance(layers, list): layers = [layers] for image in layers: name = 'layer_%d' % len(self.stack) add_named_layer(name, image) self.set_mask()
[ "def", "add", "(", "self", ",", "layers", ",", "above", "=", "None", ",", "below", "=", "None", ")", ":", "def", "add_named_layer", "(", "name", ",", "image", ")", ":", "image", "=", "self", ".", "get_image", "(", "image", ",", "output", "=", "'vector'", ")", "if", "above", "is", "not", "None", ":", "image", "[", "image", "<", "above", "]", "=", "0.", "if", "below", "is", "not", "None", ":", "image", "[", "image", ">", "below", "]", "=", "0.", "self", ".", "layers", "[", "name", "]", "=", "image", "self", ".", "stack", ".", "append", "(", "name", ")", "if", "isinstance", "(", "layers", ",", "dict", ")", ":", "for", "(", "name", ",", "image", ")", "in", "layers", ".", "items", "(", ")", ":", "add_named_layer", "(", "name", ",", "image", ")", "else", ":", "if", "not", "isinstance", "(", "layers", ",", "list", ")", ":", "layers", "=", "[", "layers", "]", "for", "image", "in", "layers", ":", "name", "=", "'layer_%d'", "%", "len", "(", "self", ".", "stack", ")", "add_named_layer", "(", "name", ",", "image", ")", "self", ".", "set_mask", "(", ")" ]
Add one or more layers to the stack of masking layers. Args: layers: A string, NiBabel image, list, or dict. If anything other than a dict is passed, assigns sequential layer names based on the current position in stack; if a dict, uses key as the name and value as the mask image.
[ "Add", "one", "or", "more", "layers", "to", "the", "stack", "of", "masking", "layers", ".", "Args", ":", "layers", ":", "A", "string", "NiBabel", "image", "list", "or", "dict", ".", "If", "anything", "other", "than", "a", "dict", "is", "passed", "assigns", "sequential", "layer", "names", "based", "on", "the", "current", "position", "in", "stack", ";", "if", "a", "dict", "uses", "key", "as", "the", "name", "and", "value", "as", "the", "mask", "image", "." ]
python
test
36.666667
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L693-L705
def expires(self): """Union[datetime.datetime, None]: Datetime at which the table will be deleted. Raises: ValueError: For invalid value types. """ expiration_time = self._properties.get("expirationTime") if expiration_time is not None: # expiration_time will be in milliseconds. return google.cloud._helpers._datetime_from_microseconds( 1000.0 * float(expiration_time) )
[ "def", "expires", "(", "self", ")", ":", "expiration_time", "=", "self", ".", "_properties", ".", "get", "(", "\"expirationTime\"", ")", "if", "expiration_time", "is", "not", "None", ":", "# expiration_time will be in milliseconds.", "return", "google", ".", "cloud", ".", "_helpers", ".", "_datetime_from_microseconds", "(", "1000.0", "*", "float", "(", "expiration_time", ")", ")" ]
Union[datetime.datetime, None]: Datetime at which the table will be deleted. Raises: ValueError: For invalid value types.
[ "Union", "[", "datetime", ".", "datetime", "None", "]", ":", "Datetime", "at", "which", "the", "table", "will", "be", "deleted", "." ]
python
train
36.307692
Alignak-monitoring/alignak
alignak/scheduler.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L250-L263
def reset(self): # pylint: disable=not-context-manager """Reset scheduler:: * Remove waiting results * Clear checks and actions lists :return: None """ logger.info("Scheduling loop reset") with self.waiting_results.mutex: self.waiting_results.queue.clear() self.checks.clear() self.actions.clear()
[ "def", "reset", "(", "self", ")", ":", "# pylint: disable=not-context-manager", "logger", ".", "info", "(", "\"Scheduling loop reset\"", ")", "with", "self", ".", "waiting_results", ".", "mutex", ":", "self", ".", "waiting_results", ".", "queue", ".", "clear", "(", ")", "self", ".", "checks", ".", "clear", "(", ")", "self", ".", "actions", ".", "clear", "(", ")" ]
Reset scheduler:: * Remove waiting results * Clear checks and actions lists :return: None
[ "Reset", "scheduler", "::" ]
python
train
27
bitesofcode/projex
projex/xmlutil.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/xmlutil.py#L86-L105
def fromXml(cls, xml): """ Restores an object from XML. :param xml | <xml.etree.ElementTree.Element> :return subclass of <XmlObject> """ clsname = xml.get('class') if clsname: subcls = XmlObject.byName(clsname) if subcls is None: inst = MissingXmlObject(clsname) else: inst = subcls() else: inst = cls() inst.loadXml(xml) return inst
[ "def", "fromXml", "(", "cls", ",", "xml", ")", ":", "clsname", "=", "xml", ".", "get", "(", "'class'", ")", "if", "clsname", ":", "subcls", "=", "XmlObject", ".", "byName", "(", "clsname", ")", "if", "subcls", "is", "None", ":", "inst", "=", "MissingXmlObject", "(", "clsname", ")", "else", ":", "inst", "=", "subcls", "(", ")", "else", ":", "inst", "=", "cls", "(", ")", "inst", ".", "loadXml", "(", "xml", ")", "return", "inst" ]
Restores an object from XML. :param xml | <xml.etree.ElementTree.Element> :return subclass of <XmlObject>
[ "Restores", "an", "object", "from", "XML", "." ]
python
train
24.3
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/html5parser.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/html5parser.py#L56-L64
def document_fromstring(html, guess_charset=True, parser=None): """Parse a whole document into a string.""" if not isinstance(html, _strings): raise TypeError('string required') if parser is None: parser = html_parser return parser.parse(html, useChardet=guess_charset).getroot()
[ "def", "document_fromstring", "(", "html", ",", "guess_charset", "=", "True", ",", "parser", "=", "None", ")", ":", "if", "not", "isinstance", "(", "html", ",", "_strings", ")", ":", "raise", "TypeError", "(", "'string required'", ")", "if", "parser", "is", "None", ":", "parser", "=", "html_parser", "return", "parser", ".", "parse", "(", "html", ",", "useChardet", "=", "guess_charset", ")", ".", "getroot", "(", ")" ]
Parse a whole document into a string.
[ "Parse", "a", "whole", "document", "into", "a", "string", "." ]
python
test
33.888889
gwastro/pycbc-glue
pycbc_glue/segmentdb/segmentdb_utils.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/segmentdb/segmentdb_utils.py#L467-L515
def build_segment_list_one(engine, gps_start_time, gps_end_time, ifo, segment_name, version = None, start_pad = 0, end_pad = 0): """Builds a list of segments satisfying the given criteria """ seg_result = segmentlist([]) sum_result = segmentlist([]) # Is there any way to get segment and segement summary in one query? # Maybe some sort of outer join where we keep track of which segment # summaries we've already seen. sql = "SELECT segment_summary.start_time, segment_summary.end_time " sql += "FROM segment_definer, segment_summary " sql += "WHERE segment_summary.segment_def_id = segment_definer.segment_def_id " sql += "AND segment_definer.ifos = '%s' " % ifo if engine.__class__ == query_engine.LdbdQueryEngine: sql += "AND segment_summary.segment_def_cdb = segment_definer.creator_db " sql += "AND segment_definer.name = '%s' " % segment_name sql += "AND segment_definer.version = %s " % version sql += "AND NOT (%s > segment_summary.end_time OR segment_summary.start_time > %s)" % (gps_start_time, gps_end_time) rows = engine.query(sql) for sum_start_time, sum_end_time in rows: sum_start_time = (sum_start_time < gps_start_time) and gps_start_time or sum_start_time sum_end_time = (sum_end_time > gps_end_time) and gps_end_time or sum_end_time sum_result |= segmentlist([segment(sum_start_time, sum_end_time)]) # We can't use queries paramaterized with ? since the ldbd protocol doesn't support it... sql = "SELECT segment.start_time + %d, segment.end_time + %d " % (start_pad, end_pad) sql += "FROM segment, segment_definer " sql += "WHERE segment.segment_def_id = segment_definer.segment_def_id " if engine.__class__ == query_engine.LdbdQueryEngine: sql += "AND segment.segment_def_cdb = segment_definer.creator_db " sql += "AND segment_definer.ifos = '%s' " % ifo sql += "AND segment_definer.name = '%s' " % segment_name sql += "AND segment_definer.version = %s " % version sql += "AND NOT (%s > segment.end_time OR segment.start_time > %s)" % (gps_start_time, gps_end_time) rows = engine.query(sql) for seg_start_time, seg_end_time in rows: seg_start_time = (seg_start_time < gps_start_time) and gps_start_time or seg_start_time seg_end_time = (seg_end_time > gps_end_time) and gps_end_time or seg_end_time seg_result |= segmentlist([segment(seg_start_time, seg_end_time)]) engine.close() return sum_result, seg_result
[ "def", "build_segment_list_one", "(", "engine", ",", "gps_start_time", ",", "gps_end_time", ",", "ifo", ",", "segment_name", ",", "version", "=", "None", ",", "start_pad", "=", "0", ",", "end_pad", "=", "0", ")", ":", "seg_result", "=", "segmentlist", "(", "[", "]", ")", "sum_result", "=", "segmentlist", "(", "[", "]", ")", "# Is there any way to get segment and segement summary in one query?", "# Maybe some sort of outer join where we keep track of which segment", "# summaries we've already seen.", "sql", "=", "\"SELECT segment_summary.start_time, segment_summary.end_time \"", "sql", "+=", "\"FROM segment_definer, segment_summary \"", "sql", "+=", "\"WHERE segment_summary.segment_def_id = segment_definer.segment_def_id \"", "sql", "+=", "\"AND segment_definer.ifos = '%s' \"", "%", "ifo", "if", "engine", ".", "__class__", "==", "query_engine", ".", "LdbdQueryEngine", ":", "sql", "+=", "\"AND segment_summary.segment_def_cdb = segment_definer.creator_db \"", "sql", "+=", "\"AND segment_definer.name = '%s' \"", "%", "segment_name", "sql", "+=", "\"AND segment_definer.version = %s \"", "%", "version", "sql", "+=", "\"AND NOT (%s > segment_summary.end_time OR segment_summary.start_time > %s)\"", "%", "(", "gps_start_time", ",", "gps_end_time", ")", "rows", "=", "engine", ".", "query", "(", "sql", ")", "for", "sum_start_time", ",", "sum_end_time", "in", "rows", ":", "sum_start_time", "=", "(", "sum_start_time", "<", "gps_start_time", ")", "and", "gps_start_time", "or", "sum_start_time", "sum_end_time", "=", "(", "sum_end_time", ">", "gps_end_time", ")", "and", "gps_end_time", "or", "sum_end_time", "sum_result", "|=", "segmentlist", "(", "[", "segment", "(", "sum_start_time", ",", "sum_end_time", ")", "]", ")", "# We can't use queries paramaterized with ? since the ldbd protocol doesn't support it...", "sql", "=", "\"SELECT segment.start_time + %d, segment.end_time + %d \"", "%", "(", "start_pad", ",", "end_pad", ")", "sql", "+=", "\"FROM segment, segment_definer \"", "sql", "+=", "\"WHERE segment.segment_def_id = segment_definer.segment_def_id \"", "if", "engine", ".", "__class__", "==", "query_engine", ".", "LdbdQueryEngine", ":", "sql", "+=", "\"AND segment.segment_def_cdb = segment_definer.creator_db \"", "sql", "+=", "\"AND segment_definer.ifos = '%s' \"", "%", "ifo", "sql", "+=", "\"AND segment_definer.name = '%s' \"", "%", "segment_name", "sql", "+=", "\"AND segment_definer.version = %s \"", "%", "version", "sql", "+=", "\"AND NOT (%s > segment.end_time OR segment.start_time > %s)\"", "%", "(", "gps_start_time", ",", "gps_end_time", ")", "rows", "=", "engine", ".", "query", "(", "sql", ")", "for", "seg_start_time", ",", "seg_end_time", "in", "rows", ":", "seg_start_time", "=", "(", "seg_start_time", "<", "gps_start_time", ")", "and", "gps_start_time", "or", "seg_start_time", "seg_end_time", "=", "(", "seg_end_time", ">", "gps_end_time", ")", "and", "gps_end_time", "or", "seg_end_time", "seg_result", "|=", "segmentlist", "(", "[", "segment", "(", "seg_start_time", ",", "seg_end_time", ")", "]", ")", "engine", ".", "close", "(", ")", "return", "sum_result", ",", "seg_result" ]
Builds a list of segments satisfying the given criteria
[ "Builds", "a", "list", "of", "segments", "satisfying", "the", "given", "criteria" ]
python
train
50.77551
buildbot/buildbot
pkg/buildbot_pkg.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/pkg/buildbot_pkg.py#L89-L114
def getVersionFromArchiveId(git_archive_id='$Format:%ct %d$'): """ Extract the tag if a source is from git archive. When source is exported via `git archive`, the git_archive_id init value is modified and placeholders are expanded to the "archived" revision: %ct: committer date, UNIX timestamp %d: ref names, like the --decorate option of git-log See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details. """ # mangle the magic string to make sure it is not replaced by git archive if not git_archive_id.startswith('$For''mat:'): # source was modified by git archive, try to parse the version from # the value of git_archive_id match = re.search(r'tag:\s*v([^,)]+)', git_archive_id) if match: # archived revision is tagged, use the tag return gitDescribeToPep440(match.group(1)) # archived revision is not tagged, use the commit date tstamp = git_archive_id.strip().split()[0] d = datetime.datetime.utcfromtimestamp(int(tstamp)) return d.strftime('%Y.%m.%d') return None
[ "def", "getVersionFromArchiveId", "(", "git_archive_id", "=", "'$Format:%ct %d$'", ")", ":", "# mangle the magic string to make sure it is not replaced by git archive", "if", "not", "git_archive_id", ".", "startswith", "(", "'$For'", "'mat:'", ")", ":", "# source was modified by git archive, try to parse the version from", "# the value of git_archive_id", "match", "=", "re", ".", "search", "(", "r'tag:\\s*v([^,)]+)'", ",", "git_archive_id", ")", "if", "match", ":", "# archived revision is tagged, use the tag", "return", "gitDescribeToPep440", "(", "match", ".", "group", "(", "1", ")", ")", "# archived revision is not tagged, use the commit date", "tstamp", "=", "git_archive_id", ".", "strip", "(", ")", ".", "split", "(", ")", "[", "0", "]", "d", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "int", "(", "tstamp", ")", ")", "return", "d", ".", "strftime", "(", "'%Y.%m.%d'", ")", "return", "None" ]
Extract the tag if a source is from git archive. When source is exported via `git archive`, the git_archive_id init value is modified and placeholders are expanded to the "archived" revision: %ct: committer date, UNIX timestamp %d: ref names, like the --decorate option of git-log See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details.
[ "Extract", "the", "tag", "if", "a", "source", "is", "from", "git", "archive", "." ]
python
train
43.230769
mjirik/io3d
io3d/dcmtools.py
https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dcmtools.py#L27-L59
def get_sitk_image_from_ndarray(data3d): """ Prepare SimpleItk Image object and rescale data to unsigned types. Simple ITK with version higher than 1.0.0 can not write signed int16. This function check the SimpleITK version and use work around with Rescale Intercept and Rescale Slope :param data3d: :return: """ import SimpleITK as sitk rescale_intercept = None if sitk.Version.MajorVersion() > 0: if data3d.dtype == np.int8: rescale_intercept = -2**7 data3d = (data3d - rescale_intercept).astype(np.uint8) elif data3d.dtype == np.int16: # simpleitk is not able to store this. It uses only 11 bites # rescale_intercept = -2**15 rescale_intercept = -2**10 data3d = (data3d - rescale_intercept).astype(np.uint16) elif data3d.dtype == np.int32: rescale_intercept = -2**31 data3d = (data3d - rescale_intercept).astype(np.uint16) dim = sitk.GetImageFromArray(data3d) if sitk.Version.MajorVersion() > 0: if rescale_intercept is not None: # rescale slope (0028|1053), rescale intercept (0028|1052) dim.SetMetaData("0028|1052", str(rescale_intercept)) dim.SetMetaData("0028|1053", "1") return dim
[ "def", "get_sitk_image_from_ndarray", "(", "data3d", ")", ":", "import", "SimpleITK", "as", "sitk", "rescale_intercept", "=", "None", "if", "sitk", ".", "Version", ".", "MajorVersion", "(", ")", ">", "0", ":", "if", "data3d", ".", "dtype", "==", "np", ".", "int8", ":", "rescale_intercept", "=", "-", "2", "**", "7", "data3d", "=", "(", "data3d", "-", "rescale_intercept", ")", ".", "astype", "(", "np", ".", "uint8", ")", "elif", "data3d", ".", "dtype", "==", "np", ".", "int16", ":", "# simpleitk is not able to store this. It uses only 11 bites", "# rescale_intercept = -2**15", "rescale_intercept", "=", "-", "2", "**", "10", "data3d", "=", "(", "data3d", "-", "rescale_intercept", ")", ".", "astype", "(", "np", ".", "uint16", ")", "elif", "data3d", ".", "dtype", "==", "np", ".", "int32", ":", "rescale_intercept", "=", "-", "2", "**", "31", "data3d", "=", "(", "data3d", "-", "rescale_intercept", ")", ".", "astype", "(", "np", ".", "uint16", ")", "dim", "=", "sitk", ".", "GetImageFromArray", "(", "data3d", ")", "if", "sitk", ".", "Version", ".", "MajorVersion", "(", ")", ">", "0", ":", "if", "rescale_intercept", "is", "not", "None", ":", "# rescale slope (0028|1053), rescale intercept (0028|1052)", "dim", ".", "SetMetaData", "(", "\"0028|1052\"", ",", "str", "(", "rescale_intercept", ")", ")", "dim", ".", "SetMetaData", "(", "\"0028|1053\"", ",", "\"1\"", ")", "return", "dim" ]
Prepare SimpleItk Image object and rescale data to unsigned types. Simple ITK with version higher than 1.0.0 can not write signed int16. This function check the SimpleITK version and use work around with Rescale Intercept and Rescale Slope :param data3d: :return:
[ "Prepare", "SimpleItk", "Image", "object", "and", "rescale", "data", "to", "unsigned", "types", "." ]
python
train
38.69697
stephantul/somber
somber/base.py
https://github.com/stephantul/somber/blob/b7a13e646239500cc393668c01a7169c3e50b7b5/somber/base.py#L593-L625
def load(cls, path): """ Load a SOM from a JSON file saved with this package. Parameters ---------- path : str The path to the JSON file. Returns ------- s : cls A som of the specified class. """ data = json.load(open(path)) weights = data['weights'] weights = np.asarray(weights, dtype=np.float64) s = cls(data['num_neurons'], data['data_dimensionality'], data['params']['lr']['orig'], neighborhood=data['params']['infl']['orig'], valfunc=data['valfunc'], argfunc=data['argfunc'], lr_lambda=data['params']['lr']['factor'], nb_lambda=data['params']['nb']['factor']) s.weights = weights s.trained = True return s
[ "def", "load", "(", "cls", ",", "path", ")", ":", "data", "=", "json", ".", "load", "(", "open", "(", "path", ")", ")", "weights", "=", "data", "[", "'weights'", "]", "weights", "=", "np", ".", "asarray", "(", "weights", ",", "dtype", "=", "np", ".", "float64", ")", "s", "=", "cls", "(", "data", "[", "'num_neurons'", "]", ",", "data", "[", "'data_dimensionality'", "]", ",", "data", "[", "'params'", "]", "[", "'lr'", "]", "[", "'orig'", "]", ",", "neighborhood", "=", "data", "[", "'params'", "]", "[", "'infl'", "]", "[", "'orig'", "]", ",", "valfunc", "=", "data", "[", "'valfunc'", "]", ",", "argfunc", "=", "data", "[", "'argfunc'", "]", ",", "lr_lambda", "=", "data", "[", "'params'", "]", "[", "'lr'", "]", "[", "'factor'", "]", ",", "nb_lambda", "=", "data", "[", "'params'", "]", "[", "'nb'", "]", "[", "'factor'", "]", ")", "s", ".", "weights", "=", "weights", "s", ".", "trained", "=", "True", "return", "s" ]
Load a SOM from a JSON file saved with this package. Parameters ---------- path : str The path to the JSON file. Returns ------- s : cls A som of the specified class.
[ "Load", "a", "SOM", "from", "a", "JSON", "file", "saved", "with", "this", "package", "." ]
python
train
25.757576
saltstack/salt
salt/cloud/clouds/ec2.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/ec2.py#L4597-L4662
def create_snapshot(kwargs=None, call=None, wait_to_finish=False): ''' Create a snapshot. volume_id The ID of the Volume from which to create a snapshot. description The optional description of the snapshot. CLI Exampe: .. code-block:: bash salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826 salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826 \\ description="My Snapshot Description" ''' if call != 'function': raise SaltCloudSystemExit( 'The create_snapshot function must be called with -f ' 'or --function.' ) if kwargs is None: kwargs = {} volume_id = kwargs.get('volume_id', None) description = kwargs.get('description', '') if volume_id is None: raise SaltCloudSystemExit( 'A volume_id must be specified to create a snapshot.' ) params = {'Action': 'CreateSnapshot', 'VolumeId': volume_id, 'Description': description} log.debug(params) data = aws.query(params, return_url=True, return_root=True, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4')[0] r_data = {} for d in data: for k, v in six.iteritems(d): r_data[k] = v if 'snapshotId' in r_data: snapshot_id = r_data['snapshotId'] # Waits till volume is available if wait_to_finish: salt.utils.cloud.run_func_until_ret_arg(fun=describe_snapshots, kwargs={'snapshot_id': snapshot_id}, fun_call=call, argument_being_watched='status', required_argument_response='completed') return r_data
[ "def", "create_snapshot", "(", "kwargs", "=", "None", ",", "call", "=", "None", ",", "wait_to_finish", "=", "False", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The create_snapshot function must be called with -f '", "'or --function.'", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "volume_id", "=", "kwargs", ".", "get", "(", "'volume_id'", ",", "None", ")", "description", "=", "kwargs", ".", "get", "(", "'description'", ",", "''", ")", "if", "volume_id", "is", "None", ":", "raise", "SaltCloudSystemExit", "(", "'A volume_id must be specified to create a snapshot.'", ")", "params", "=", "{", "'Action'", ":", "'CreateSnapshot'", ",", "'VolumeId'", ":", "volume_id", ",", "'Description'", ":", "description", "}", "log", ".", "debug", "(", "params", ")", "data", "=", "aws", ".", "query", "(", "params", ",", "return_url", "=", "True", ",", "return_root", "=", "True", ",", "location", "=", "get_location", "(", ")", ",", "provider", "=", "get_provider", "(", ")", ",", "opts", "=", "__opts__", ",", "sigver", "=", "'4'", ")", "[", "0", "]", "r_data", "=", "{", "}", "for", "d", "in", "data", ":", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "d", ")", ":", "r_data", "[", "k", "]", "=", "v", "if", "'snapshotId'", "in", "r_data", ":", "snapshot_id", "=", "r_data", "[", "'snapshotId'", "]", "# Waits till volume is available", "if", "wait_to_finish", ":", "salt", ".", "utils", ".", "cloud", ".", "run_func_until_ret_arg", "(", "fun", "=", "describe_snapshots", ",", "kwargs", "=", "{", "'snapshot_id'", ":", "snapshot_id", "}", ",", "fun_call", "=", "call", ",", "argument_being_watched", "=", "'status'", ",", "required_argument_response", "=", "'completed'", ")", "return", "r_data" ]
Create a snapshot. volume_id The ID of the Volume from which to create a snapshot. description The optional description of the snapshot. CLI Exampe: .. code-block:: bash salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826 salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826 \\ description="My Snapshot Description"
[ "Create", "a", "snapshot", "." ]
python
train
29.757576
AtsushiSakai/SimpleTkGUIKit
SimpleTkGUIKit/SimpleTkGUIKit.py
https://github.com/AtsushiSakai/SimpleTkGUIKit/blob/e7cbb06ff32afb165cdaa4fe396ca2f172c66ff0/SimpleTkGUIKit/SimpleTkGUIKit.py#L12-L34
def GetFilePathsWithDialog(fileTypes=[]): """ Multipul File Select with dialog fileTypes: you can choise file extension ex) fileTypes=[('Excel Files', '.xlsx')] """ root = tkinter.Tk() root.withdraw() filepath = filedialog.askopenfilenames( filetypes=fileTypes, parent=root) if isinstance(filepath, str): fileList = filepath.split(" ") elif isinstance(filepath, tuple): fileList = list(filepath) elif isinstance(filepath, list): fileList = filepath root.destroy() print(str(len(fileList)) + " files are selected") return fileList
[ "def", "GetFilePathsWithDialog", "(", "fileTypes", "=", "[", "]", ")", ":", "root", "=", "tkinter", ".", "Tk", "(", ")", "root", ".", "withdraw", "(", ")", "filepath", "=", "filedialog", ".", "askopenfilenames", "(", "filetypes", "=", "fileTypes", ",", "parent", "=", "root", ")", "if", "isinstance", "(", "filepath", ",", "str", ")", ":", "fileList", "=", "filepath", ".", "split", "(", "\" \"", ")", "elif", "isinstance", "(", "filepath", ",", "tuple", ")", ":", "fileList", "=", "list", "(", "filepath", ")", "elif", "isinstance", "(", "filepath", ",", "list", ")", ":", "fileList", "=", "filepath", "root", ".", "destroy", "(", ")", "print", "(", "str", "(", "len", "(", "fileList", ")", ")", "+", "\" files are selected\"", ")", "return", "fileList" ]
Multipul File Select with dialog fileTypes: you can choise file extension ex) fileTypes=[('Excel Files', '.xlsx')]
[ "Multipul", "File", "Select", "with", "dialog" ]
python
train
26.73913
Pytwitcher/pytwitcherapi
src/pytwitcherapi/oauth.py
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/oauth.py#L160-L170
def _add_bearer_token(self, *args, **kwargs): """Add a bearer token to the request uri, body or authorization header. This is overwritten to change the headers slightly. """ s = super(TwitchOAuthClient, self) uri, headers, body = s._add_bearer_token(*args, **kwargs) authheader = headers.get('Authorization') if authheader: headers['Authorization'] = authheader.replace('Bearer', 'OAuth') return uri, headers, body
[ "def", "_add_bearer_token", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "s", "=", "super", "(", "TwitchOAuthClient", ",", "self", ")", "uri", ",", "headers", ",", "body", "=", "s", ".", "_add_bearer_token", "(", "*", "args", ",", "*", "*", "kwargs", ")", "authheader", "=", "headers", ".", "get", "(", "'Authorization'", ")", "if", "authheader", ":", "headers", "[", "'Authorization'", "]", "=", "authheader", ".", "replace", "(", "'Bearer'", ",", "'OAuth'", ")", "return", "uri", ",", "headers", ",", "body" ]
Add a bearer token to the request uri, body or authorization header. This is overwritten to change the headers slightly.
[ "Add", "a", "bearer", "token", "to", "the", "request", "uri", "body", "or", "authorization", "header", "." ]
python
train
43.727273
Antidote1911/cryptoshop
cryptoshop/_nonce_engine.py
https://github.com/Antidote1911/cryptoshop/blob/0b7ff4a6848f2733f4737606957e8042a4d6ca0b/cryptoshop/_nonce_engine.py#L45-L53
def generate_nonce_timestamp(): """ Generate unique nonce with counter, uuid and rng.""" global count rng = botan.rng().get(30) uuid4 = uuid.uuid4().bytes # 16 byte tmpnonce = (bytes(str(count).encode('utf-8'))) + uuid4 + rng nonce = tmpnonce[:41] # 41 byte (328 bit) count += 1 return nonce
[ "def", "generate_nonce_timestamp", "(", ")", ":", "global", "count", "rng", "=", "botan", ".", "rng", "(", ")", ".", "get", "(", "30", ")", "uuid4", "=", "uuid", ".", "uuid4", "(", ")", ".", "bytes", "# 16 byte", "tmpnonce", "=", "(", "bytes", "(", "str", "(", "count", ")", ".", "encode", "(", "'utf-8'", ")", ")", ")", "+", "uuid4", "+", "rng", "nonce", "=", "tmpnonce", "[", ":", "41", "]", "# 41 byte (328 bit)", "count", "+=", "1", "return", "nonce" ]
Generate unique nonce with counter, uuid and rng.
[ "Generate", "unique", "nonce", "with", "counter", "uuid", "and", "rng", "." ]
python
train
35.222222
thebigmunch/google-music
src/google_music/clients/mobileclient.py
https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L294-L302
def devices(self): """Get a listing of devices registered to the Google Music account.""" response = self._call( mc_calls.DeviceManagementInfo ) registered_devices = response.body.get('data', {}).get('items', []) return registered_devices
[ "def", "devices", "(", "self", ")", ":", "response", "=", "self", ".", "_call", "(", "mc_calls", ".", "DeviceManagementInfo", ")", "registered_devices", "=", "response", ".", "body", ".", "get", "(", "'data'", ",", "{", "}", ")", ".", "get", "(", "'items'", ",", "[", "]", ")", "return", "registered_devices" ]
Get a listing of devices registered to the Google Music account.
[ "Get", "a", "listing", "of", "devices", "registered", "to", "the", "Google", "Music", "account", "." ]
python
train
27.222222
vrtsystems/hszinc
hszinc/grid.py
https://github.com/vrtsystems/hszinc/blob/d52a7c6b5bc466f3c1a77b71814c8c0776aba995/hszinc/grid.py#L164-L175
def _assert_version(self, version): ''' Assert that the grid version is equal to or above the given value. If no version is set, set the version. ''' if self.nearest_version < version: if self._version_given: raise ValueError( 'Data type requires version %s' \ % version) else: self._version = version
[ "def", "_assert_version", "(", "self", ",", "version", ")", ":", "if", "self", ".", "nearest_version", "<", "version", ":", "if", "self", ".", "_version_given", ":", "raise", "ValueError", "(", "'Data type requires version %s'", "%", "version", ")", "else", ":", "self", ".", "_version", "=", "version" ]
Assert that the grid version is equal to or above the given value. If no version is set, set the version.
[ "Assert", "that", "the", "grid", "version", "is", "equal", "to", "or", "above", "the", "given", "value", ".", "If", "no", "version", "is", "set", "set", "the", "version", "." ]
python
valid
36.166667
jobovy/galpy
galpy/potential/SpiralArmsPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/SpiralArmsPotential.py#L570-L574
def _B(self, R): """Return numpy array from B1 up to and including Bn. (eqn. 6)""" HNn_R = self._HNn / R return HNn_R / self._sin_alpha * (0.4 * HNn_R / self._sin_alpha + 1)
[ "def", "_B", "(", "self", ",", "R", ")", ":", "HNn_R", "=", "self", ".", "_HNn", "/", "R", "return", "HNn_R", "/", "self", ".", "_sin_alpha", "*", "(", "0.4", "*", "HNn_R", "/", "self", ".", "_sin_alpha", "+", "1", ")" ]
Return numpy array from B1 up to and including Bn. (eqn. 6)
[ "Return", "numpy", "array", "from", "B1", "up", "to", "and", "including", "Bn", ".", "(", "eqn", ".", "6", ")" ]
python
train
38.8
wooster/biplist
biplist/__init__.py
https://github.com/wooster/biplist/blob/4c3d0c94132621188ca4c1fa9bda3b2d4d52ab82/biplist/__init__.py#L797-L811
def writeObjectReference(self, obj, output): """Tries to write an object reference, adding it to the references table. Does not write the actual object bytes or set the reference position. Returns a tuple of whether the object was a new reference (True if it was, False if it already was in the reference table) and the new output. """ position = self.positionOfObjectReference(obj) if position is None: self.writtenReferences[obj] = len(self.writtenReferences) output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize) return (True, output) else: output += self.binaryInt(position, byteSize=self.trailer.objectRefSize) return (False, output)
[ "def", "writeObjectReference", "(", "self", ",", "obj", ",", "output", ")", ":", "position", "=", "self", ".", "positionOfObjectReference", "(", "obj", ")", "if", "position", "is", "None", ":", "self", ".", "writtenReferences", "[", "obj", "]", "=", "len", "(", "self", ".", "writtenReferences", ")", "output", "+=", "self", ".", "binaryInt", "(", "len", "(", "self", ".", "writtenReferences", ")", "-", "1", ",", "byteSize", "=", "self", ".", "trailer", ".", "objectRefSize", ")", "return", "(", "True", ",", "output", ")", "else", ":", "output", "+=", "self", ".", "binaryInt", "(", "position", ",", "byteSize", "=", "self", ".", "trailer", ".", "objectRefSize", ")", "return", "(", "False", ",", "output", ")" ]
Tries to write an object reference, adding it to the references table. Does not write the actual object bytes or set the reference position. Returns a tuple of whether the object was a new reference (True if it was, False if it already was in the reference table) and the new output.
[ "Tries", "to", "write", "an", "object", "reference", "adding", "it", "to", "the", "references", "table", ".", "Does", "not", "write", "the", "actual", "object", "bytes", "or", "set", "the", "reference", "position", ".", "Returns", "a", "tuple", "of", "whether", "the", "object", "was", "a", "new", "reference", "(", "True", "if", "it", "was", "False", "if", "it", "already", "was", "in", "the", "reference", "table", ")", "and", "the", "new", "output", "." ]
python
train
53.933333
QualiSystems/cloudshell-networking-devices
cloudshell/devices/runners/configuration_runner.py
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/runners/configuration_runner.py#L123-L149
def orchestration_save(self, mode="shallow", custom_params=None): """Orchestration Save command :param mode: :param custom_params: json with all required action to configure or remove vlans from certain port :return Serialized OrchestrationSavedArtifact to json :rtype json """ save_params = {'folder_path': '', 'configuration_type': 'running', 'return_artifact': True} params = dict() if custom_params: params = jsonpickle.decode(custom_params) save_params.update(params.get('custom_params', {})) save_params['folder_path'] = self.get_path(save_params['folder_path']) saved_artifact = self.save(**save_params) saved_artifact_info = OrchestrationSavedArtifactInfo(resource_name=self.resource_config.name, created_date=datetime.datetime.now(), restore_rules=self.get_restore_rules(), saved_artifact=saved_artifact) save_response = OrchestrationSaveResult(saved_artifacts_info=saved_artifact_info) self._validate_artifact_info(saved_artifact_info) return serialize_to_json(save_response)
[ "def", "orchestration_save", "(", "self", ",", "mode", "=", "\"shallow\"", ",", "custom_params", "=", "None", ")", ":", "save_params", "=", "{", "'folder_path'", ":", "''", ",", "'configuration_type'", ":", "'running'", ",", "'return_artifact'", ":", "True", "}", "params", "=", "dict", "(", ")", "if", "custom_params", ":", "params", "=", "jsonpickle", ".", "decode", "(", "custom_params", ")", "save_params", ".", "update", "(", "params", ".", "get", "(", "'custom_params'", ",", "{", "}", ")", ")", "save_params", "[", "'folder_path'", "]", "=", "self", ".", "get_path", "(", "save_params", "[", "'folder_path'", "]", ")", "saved_artifact", "=", "self", ".", "save", "(", "*", "*", "save_params", ")", "saved_artifact_info", "=", "OrchestrationSavedArtifactInfo", "(", "resource_name", "=", "self", ".", "resource_config", ".", "name", ",", "created_date", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ",", "restore_rules", "=", "self", ".", "get_restore_rules", "(", ")", ",", "saved_artifact", "=", "saved_artifact", ")", "save_response", "=", "OrchestrationSaveResult", "(", "saved_artifacts_info", "=", "saved_artifact_info", ")", "self", ".", "_validate_artifact_info", "(", "saved_artifact_info", ")", "return", "serialize_to_json", "(", "save_response", ")" ]
Orchestration Save command :param mode: :param custom_params: json with all required action to configure or remove vlans from certain port :return Serialized OrchestrationSavedArtifact to json :rtype json
[ "Orchestration", "Save", "command" ]
python
train
47.703704
limodou/uliweb
uliweb/contrib/datadict/datadict_subcommands.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/datadict/datadict_subcommands.py#L82-L100
def _parse_prop(self, dd, row): """ :param dd: datadict :param _row: (tablename, row) :return: """ key = row['name'] if key.startswith('#'): deprecated = True else: deprecated = False v = dd.get(key) _value = self._get_value(row) if not v: v = dd.setdefault(key, {}) v[_value] = deprecated else: if not _value in v: v[_value] = deprecated
[ "def", "_parse_prop", "(", "self", ",", "dd", ",", "row", ")", ":", "key", "=", "row", "[", "'name'", "]", "if", "key", ".", "startswith", "(", "'#'", ")", ":", "deprecated", "=", "True", "else", ":", "deprecated", "=", "False", "v", "=", "dd", ".", "get", "(", "key", ")", "_value", "=", "self", ".", "_get_value", "(", "row", ")", "if", "not", "v", ":", "v", "=", "dd", ".", "setdefault", "(", "key", ",", "{", "}", ")", "v", "[", "_value", "]", "=", "deprecated", "else", ":", "if", "not", "_value", "in", "v", ":", "v", "[", "_value", "]", "=", "deprecated" ]
:param dd: datadict :param _row: (tablename, row) :return:
[ ":", "param", "dd", ":", "datadict", ":", "param", "_row", ":", "(", "tablename", "row", ")", ":", "return", ":" ]
python
train
25.894737
SBRG/ssbio
ssbio/databases/swissmodel.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/swissmodel.py#L141-L166
def organize_models(self, outdir, force_rerun=False): """Organize and rename SWISS-MODEL models to a single folder with a name containing template information. Args: outdir (str): New directory to copy renamed models to force_rerun (bool): If models should be copied again even if they already exist Returns: dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values """ uniprot_to_swissmodel = defaultdict(list) for u, models in self.all_models.items(): for m in models: original_filename = '{}_{}_{}_{}'.format(m['from'], m['to'], m['template'], m['coordinate_id']) file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:], 'swissmodel', '{}.pdb'.format(original_filename)) if op.exists(file_path): new_filename = '{}_{}_{}_{}.pdb'.format(u, m['from'], m['to'], m['template'][:4]) shutil.copy(file_path, op.join(outdir, new_filename)) uniprot_to_swissmodel[u].append(new_filename) else: log.warning('{}: no file {} found for model'.format(u, file_path)) return uniprot_to_swissmodel
[ "def", "organize_models", "(", "self", ",", "outdir", ",", "force_rerun", "=", "False", ")", ":", "uniprot_to_swissmodel", "=", "defaultdict", "(", "list", ")", "for", "u", ",", "models", "in", "self", ".", "all_models", ".", "items", "(", ")", ":", "for", "m", "in", "models", ":", "original_filename", "=", "'{}_{}_{}_{}'", ".", "format", "(", "m", "[", "'from'", "]", ",", "m", "[", "'to'", "]", ",", "m", "[", "'template'", "]", ",", "m", "[", "'coordinate_id'", "]", ")", "file_path", "=", "op", ".", "join", "(", "self", ".", "metadata_dir", ",", "u", "[", ":", "2", "]", ",", "u", "[", "2", ":", "4", "]", ",", "u", "[", "4", ":", "]", ",", "'swissmodel'", ",", "'{}.pdb'", ".", "format", "(", "original_filename", ")", ")", "if", "op", ".", "exists", "(", "file_path", ")", ":", "new_filename", "=", "'{}_{}_{}_{}.pdb'", ".", "format", "(", "u", ",", "m", "[", "'from'", "]", ",", "m", "[", "'to'", "]", ",", "m", "[", "'template'", "]", "[", ":", "4", "]", ")", "shutil", ".", "copy", "(", "file_path", ",", "op", ".", "join", "(", "outdir", ",", "new_filename", ")", ")", "uniprot_to_swissmodel", "[", "u", "]", ".", "append", "(", "new_filename", ")", "else", ":", "log", ".", "warning", "(", "'{}: no file {} found for model'", ".", "format", "(", "u", ",", "file_path", ")", ")", "return", "uniprot_to_swissmodel" ]
Organize and rename SWISS-MODEL models to a single folder with a name containing template information. Args: outdir (str): New directory to copy renamed models to force_rerun (bool): If models should be copied again even if they already exist Returns: dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values
[ "Organize", "and", "rename", "SWISS", "-", "MODEL", "models", "to", "a", "single", "folder", "with", "a", "name", "containing", "template", "information", "." ]
python
train
50.461538
eqcorrscan/EQcorrscan
eqcorrscan/utils/catalog_to_dd.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_to_dd.py#L130-L191
def readSTATION0(path, stations): """ Read a Seisan STATION0.HYP file on the path given. Outputs the information, and writes to station.dat file. :type path: str :param path: Path to the STATION0.HYP file :type stations: list :param stations: Stations to look for :returns: List of tuples of station, lat, long, elevation :rtype: list >>> # Get the path to the test data >>> import eqcorrscan >>> import os >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB']) [('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \ 170.36038333333335, 95.0), ('BOB', 41.408166666666666, \ -174.87116666666665, 101.0)] """ stalist = [] f = open(path + '/STATION0.HYP', 'r') for line in f: if line[1:6].strip() in stations: station = line[1:6].strip() lat = line[6:14] # Format is either ddmm.mmS/N or ddmm(.)mmmS/N if lat[-1] == 'S': NS = -1 else: NS = 1 if lat[4] == '.': lat = (int(lat[0:2]) + float(lat[2:-1]) / 60) * NS else: lat = (int(lat[0:2]) + float(lat[2:4] + '.' + lat[4:-1]) / 60) * NS lon = line[14:23] if lon[-1] == 'W': EW = -1 else: EW = 1 if lon[5] == '.': lon = (int(lon[0:3]) + float(lon[3:-1]) / 60) * EW else: lon = (int(lon[0:3]) + float(lon[3:5] + '.' + lon[5:-1]) / 60) * EW elev = float(line[23:-1].strip()) # Note, negative altitude can be indicated in 1st column if line[0] == '-': elev *= -1 stalist.append((station, lat, lon, elev)) f.close() f = open('station.dat', 'w') for sta in stalist: line = ''.join([sta[0].ljust(5), _cc_round(sta[1], 4).ljust(10), _cc_round(sta[2], 4).ljust(10), _cc_round(sta[3] / 1000, 4).rjust(7), '\n']) f.write(line) f.close() return stalist
[ "def", "readSTATION0", "(", "path", ",", "stations", ")", ":", "stalist", "=", "[", "]", "f", "=", "open", "(", "path", "+", "'/STATION0.HYP'", ",", "'r'", ")", "for", "line", "in", "f", ":", "if", "line", "[", "1", ":", "6", "]", ".", "strip", "(", ")", "in", "stations", ":", "station", "=", "line", "[", "1", ":", "6", "]", ".", "strip", "(", ")", "lat", "=", "line", "[", "6", ":", "14", "]", "# Format is either ddmm.mmS/N or ddmm(.)mmmS/N", "if", "lat", "[", "-", "1", "]", "==", "'S'", ":", "NS", "=", "-", "1", "else", ":", "NS", "=", "1", "if", "lat", "[", "4", "]", "==", "'.'", ":", "lat", "=", "(", "int", "(", "lat", "[", "0", ":", "2", "]", ")", "+", "float", "(", "lat", "[", "2", ":", "-", "1", "]", ")", "/", "60", ")", "*", "NS", "else", ":", "lat", "=", "(", "int", "(", "lat", "[", "0", ":", "2", "]", ")", "+", "float", "(", "lat", "[", "2", ":", "4", "]", "+", "'.'", "+", "lat", "[", "4", ":", "-", "1", "]", ")", "/", "60", ")", "*", "NS", "lon", "=", "line", "[", "14", ":", "23", "]", "if", "lon", "[", "-", "1", "]", "==", "'W'", ":", "EW", "=", "-", "1", "else", ":", "EW", "=", "1", "if", "lon", "[", "5", "]", "==", "'.'", ":", "lon", "=", "(", "int", "(", "lon", "[", "0", ":", "3", "]", ")", "+", "float", "(", "lon", "[", "3", ":", "-", "1", "]", ")", "/", "60", ")", "*", "EW", "else", ":", "lon", "=", "(", "int", "(", "lon", "[", "0", ":", "3", "]", ")", "+", "float", "(", "lon", "[", "3", ":", "5", "]", "+", "'.'", "+", "lon", "[", "5", ":", "-", "1", "]", ")", "/", "60", ")", "*", "EW", "elev", "=", "float", "(", "line", "[", "23", ":", "-", "1", "]", ".", "strip", "(", ")", ")", "# Note, negative altitude can be indicated in 1st column", "if", "line", "[", "0", "]", "==", "'-'", ":", "elev", "*=", "-", "1", "stalist", ".", "append", "(", "(", "station", ",", "lat", ",", "lon", ",", "elev", ")", ")", "f", ".", "close", "(", ")", "f", "=", "open", "(", "'station.dat'", ",", "'w'", ")", "for", "sta", "in", "stalist", ":", "line", "=", "''", ".", "join", "(", "[", "sta", "[", "0", "]", ".", "ljust", "(", "5", ")", ",", "_cc_round", "(", "sta", "[", "1", "]", ",", "4", ")", ".", "ljust", "(", "10", ")", ",", "_cc_round", "(", "sta", "[", "2", "]", ",", "4", ")", ".", "ljust", "(", "10", ")", ",", "_cc_round", "(", "sta", "[", "3", "]", "/", "1000", ",", "4", ")", ".", "rjust", "(", "7", ")", ",", "'\\n'", "]", ")", "f", ".", "write", "(", "line", ")", "f", ".", "close", "(", ")", "return", "stalist" ]
Read a Seisan STATION0.HYP file on the path given. Outputs the information, and writes to station.dat file. :type path: str :param path: Path to the STATION0.HYP file :type stations: list :param stations: Stations to look for :returns: List of tuples of station, lat, long, elevation :rtype: list >>> # Get the path to the test data >>> import eqcorrscan >>> import os >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB']) [('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \ 170.36038333333335, 95.0), ('BOB', 41.408166666666666, \ -174.87116666666665, 101.0)]
[ "Read", "a", "Seisan", "STATION0", ".", "HYP", "file", "on", "the", "path", "given", "." ]
python
train
34.548387
Koed00/django-q
django_q/tasks.py
https://github.com/Koed00/django-q/blob/c84fd11a67c9a47d821786dfcdc189bb258c6f54/django_q/tasks.py#L198-L219
def fetch(task_id, wait=0, cached=Conf.CACHED): """ Return the processed task. :param task_id: the task name or uuid :type task_id: str or uuid :param wait: the number of milliseconds to wait for a result :type wait: int :param bool cached: run this against the cache backend :return: the full task object :rtype: Task """ if cached: return fetch_cached(task_id, wait) start = time() while True: t = Task.get_task(task_id) if t: return t if (time() - start) * 1000 >= wait >= 0: break sleep(0.01)
[ "def", "fetch", "(", "task_id", ",", "wait", "=", "0", ",", "cached", "=", "Conf", ".", "CACHED", ")", ":", "if", "cached", ":", "return", "fetch_cached", "(", "task_id", ",", "wait", ")", "start", "=", "time", "(", ")", "while", "True", ":", "t", "=", "Task", ".", "get_task", "(", "task_id", ")", "if", "t", ":", "return", "t", "if", "(", "time", "(", ")", "-", "start", ")", "*", "1000", ">=", "wait", ">=", "0", ":", "break", "sleep", "(", "0.01", ")" ]
Return the processed task. :param task_id: the task name or uuid :type task_id: str or uuid :param wait: the number of milliseconds to wait for a result :type wait: int :param bool cached: run this against the cache backend :return: the full task object :rtype: Task
[ "Return", "the", "processed", "task", "." ]
python
train
26.909091
pytroll/pyspectral
pyspectral/radiance_tb_conversion.py
https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/pyspectral/radiance_tb_conversion.py#L160-L170
def _getsatname(self): """ Get the satellite name used in the rsr-reader, from the platform and number """ if self.platform_name.startswith("Meteosat"): return self.platform_name else: raise NotImplementedError( 'Platform {0} not yet supported...'.format(self.platform_name))
[ "def", "_getsatname", "(", "self", ")", ":", "if", "self", ".", "platform_name", ".", "startswith", "(", "\"Meteosat\"", ")", ":", "return", "self", ".", "platform_name", "else", ":", "raise", "NotImplementedError", "(", "'Platform {0} not yet supported...'", ".", "format", "(", "self", ".", "platform_name", ")", ")" ]
Get the satellite name used in the rsr-reader, from the platform and number
[ "Get", "the", "satellite", "name", "used", "in", "the", "rsr", "-", "reader", "from", "the", "platform", "and", "number" ]
python
train
32.181818
gwastro/pycbc
pycbc/tmpltbank/em_progenitors.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/em_progenitors.py#L232-L269
def bh_effective_spin(chi,incl): """ Determines the effective [as defined in Stone, Loeb, Berger, PRD 87, 084053 (2013)] aligned dimensionless spin parameter of a NS-BH binary with tilted BH spin. This means finding the root chi_eff of ISCO_eq_chi_first(chi_eff, PG_ISSO_solver(chi,incl)) with the correct sign. Parameters ----------- chi: float the BH dimensionless spin parameter incl: float the inclination angle between the BH spin and the orbital angular momentum in radians Returns ---------- chi_eff: float the effective dimensionless spin parameter solution """ if incl == 0: chi_eff = chi else: # ISSO radius for the given BH spin magnitude and inclination rISSO = PG_ISSO_solver(chi,incl) # Angle at which the branch of positive solutions has its minumum incl_flip = scipy.optimize.fmin(pos_branch, math.pi/4, args=tuple([chi]), full_output=False, disp=False)[-1] # Use incl_flip to determine the initial guess: the sign difference # in the initial_guess ensures that chi_eff has the correct sign if incl>incl_flip: initial_guess = -1.1 else: initial_guess = 1.0 chi_eff = scipy.optimize.fsolve(ISCO_eq_chi_first, initial_guess, args=(rISSO)) return chi_eff
[ "def", "bh_effective_spin", "(", "chi", ",", "incl", ")", ":", "if", "incl", "==", "0", ":", "chi_eff", "=", "chi", "else", ":", "# ISSO radius for the given BH spin magnitude and inclination", "rISSO", "=", "PG_ISSO_solver", "(", "chi", ",", "incl", ")", "# Angle at which the branch of positive solutions has its minumum", "incl_flip", "=", "scipy", ".", "optimize", ".", "fmin", "(", "pos_branch", ",", "math", ".", "pi", "/", "4", ",", "args", "=", "tuple", "(", "[", "chi", "]", ")", ",", "full_output", "=", "False", ",", "disp", "=", "False", ")", "[", "-", "1", "]", "# Use incl_flip to determine the initial guess: the sign difference", "# in the initial_guess ensures that chi_eff has the correct sign", "if", "incl", ">", "incl_flip", ":", "initial_guess", "=", "-", "1.1", "else", ":", "initial_guess", "=", "1.0", "chi_eff", "=", "scipy", ".", "optimize", ".", "fsolve", "(", "ISCO_eq_chi_first", ",", "initial_guess", ",", "args", "=", "(", "rISSO", ")", ")", "return", "chi_eff" ]
Determines the effective [as defined in Stone, Loeb, Berger, PRD 87, 084053 (2013)] aligned dimensionless spin parameter of a NS-BH binary with tilted BH spin. This means finding the root chi_eff of ISCO_eq_chi_first(chi_eff, PG_ISSO_solver(chi,incl)) with the correct sign. Parameters ----------- chi: float the BH dimensionless spin parameter incl: float the inclination angle between the BH spin and the orbital angular momentum in radians Returns ---------- chi_eff: float the effective dimensionless spin parameter solution
[ "Determines", "the", "effective", "[", "as", "defined", "in", "Stone", "Loeb", "Berger", "PRD", "87", "084053", "(", "2013", ")", "]", "aligned", "dimensionless", "spin", "parameter", "of", "a", "NS", "-", "BH", "binary", "with", "tilted", "BH", "spin", ".", "This", "means", "finding", "the", "root", "chi_eff", "of", "ISCO_eq_chi_first", "(", "chi_eff", "PG_ISSO_solver", "(", "chi", "incl", "))", "with", "the", "correct", "sign", "." ]
python
train
35.289474
EconForge/dolo
dolo/compiler/function_compiler_sympy.py
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/compiler/function_compiler_sympy.py#L5-L11
def ast_to_sympy(expr): '''Converts an AST expression to a sympy expression (STUPID)''' from dolang import to_source s = to_source(expr) not_to_be_treated_as_functions = ['alpha','beta', 'gamma','zeta', 'Chi'] d = {v: sympy.Symbol(v) for v in not_to_be_treated_as_functions} return sympy.sympify(s, locals=d)
[ "def", "ast_to_sympy", "(", "expr", ")", ":", "from", "dolang", "import", "to_source", "s", "=", "to_source", "(", "expr", ")", "not_to_be_treated_as_functions", "=", "[", "'alpha'", ",", "'beta'", ",", "'gamma'", ",", "'zeta'", ",", "'Chi'", "]", "d", "=", "{", "v", ":", "sympy", ".", "Symbol", "(", "v", ")", "for", "v", "in", "not_to_be_treated_as_functions", "}", "return", "sympy", ".", "sympify", "(", "s", ",", "locals", "=", "d", ")" ]
Converts an AST expression to a sympy expression (STUPID)
[ "Converts", "an", "AST", "expression", "to", "a", "sympy", "expression", "(", "STUPID", ")" ]
python
train
46.571429
yyuu/botornado
boto/mturk/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/mturk/connection.py#L499-L505
def block_worker(self, worker_id, reason): """ Block a worker from working on my tasks. """ params = {'WorkerId': worker_id, 'Reason': reason} return self._process_request('BlockWorker', params)
[ "def", "block_worker", "(", "self", ",", "worker_id", ",", "reason", ")", ":", "params", "=", "{", "'WorkerId'", ":", "worker_id", ",", "'Reason'", ":", "reason", "}", "return", "self", ".", "_process_request", "(", "'BlockWorker'", ",", "params", ")" ]
Block a worker from working on my tasks.
[ "Block", "a", "worker", "from", "working", "on", "my", "tasks", "." ]
python
train
32.714286
deepmind/sonnet
sonnet/python/modules/basic.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/basic.py#L197-L265
def _build(self, inputs): """Connects the Linear module into the graph, with input Tensor `inputs`. If this is not the first time the module has been connected to the graph, the Tensor provided here must have the same final dimension, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs: A 2D Tensor of size [batch_size, input_size]. Returns: A 2D Tensor of size [batch_size, output_size]. Raises: base.IncompatibleShapeError: If the input is not a 2-D `Tensor` with the size of the second dimension specified. base.IncompatibleShapeError: If reconnecting an already connected module into the graph, and the shape of the input is not compatible with previous inputs. """ input_shape = tuple(inputs.get_shape().as_list()) if len(input_shape) != 2: raise base.IncompatibleShapeError( "{}: rank of shape must be 2 not: {}".format( self.scope_name, len(input_shape))) if input_shape[1] is None: raise base.IncompatibleShapeError( "{}: Input size must be specified at module build time".format( self.scope_name)) if self._input_shape is not None and input_shape[1] != self._input_shape[1]: raise base.IncompatibleShapeError( "{}: Input shape must be [batch_size, {}] not: [batch_size, {}]" .format(self.scope_name, self._input_shape[1], input_shape[1])) self._input_shape = input_shape dtype = inputs.dtype if "w" not in self._initializers: self._initializers["w"] = create_linear_initializer(self._input_shape[1], dtype) if "b" not in self._initializers and self._use_bias: self._initializers["b"] = create_bias_initializer(self._input_shape[1], dtype) weight_shape = (self._input_shape[1], self.output_size) self._w = tf.get_variable("w", shape=weight_shape, dtype=dtype, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) outputs = tf.matmul(inputs, self._w) if self._use_bias: bias_shape = (self.output_size,) self._b = tf.get_variable("b", shape=bias_shape, dtype=dtype, initializer=self._initializers["b"], partitioner=self._partitioners.get("b", None), regularizer=self._regularizers.get("b", None)) outputs += self._b return outputs
[ "def", "_build", "(", "self", ",", "inputs", ")", ":", "input_shape", "=", "tuple", "(", "inputs", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", ")", "if", "len", "(", "input_shape", ")", "!=", "2", ":", "raise", "base", ".", "IncompatibleShapeError", "(", "\"{}: rank of shape must be 2 not: {}\"", ".", "format", "(", "self", ".", "scope_name", ",", "len", "(", "input_shape", ")", ")", ")", "if", "input_shape", "[", "1", "]", "is", "None", ":", "raise", "base", ".", "IncompatibleShapeError", "(", "\"{}: Input size must be specified at module build time\"", ".", "format", "(", "self", ".", "scope_name", ")", ")", "if", "self", ".", "_input_shape", "is", "not", "None", "and", "input_shape", "[", "1", "]", "!=", "self", ".", "_input_shape", "[", "1", "]", ":", "raise", "base", ".", "IncompatibleShapeError", "(", "\"{}: Input shape must be [batch_size, {}] not: [batch_size, {}]\"", ".", "format", "(", "self", ".", "scope_name", ",", "self", ".", "_input_shape", "[", "1", "]", ",", "input_shape", "[", "1", "]", ")", ")", "self", ".", "_input_shape", "=", "input_shape", "dtype", "=", "inputs", ".", "dtype", "if", "\"w\"", "not", "in", "self", ".", "_initializers", ":", "self", ".", "_initializers", "[", "\"w\"", "]", "=", "create_linear_initializer", "(", "self", ".", "_input_shape", "[", "1", "]", ",", "dtype", ")", "if", "\"b\"", "not", "in", "self", ".", "_initializers", "and", "self", ".", "_use_bias", ":", "self", ".", "_initializers", "[", "\"b\"", "]", "=", "create_bias_initializer", "(", "self", ".", "_input_shape", "[", "1", "]", ",", "dtype", ")", "weight_shape", "=", "(", "self", ".", "_input_shape", "[", "1", "]", ",", "self", ".", "output_size", ")", "self", ".", "_w", "=", "tf", ".", "get_variable", "(", "\"w\"", ",", "shape", "=", "weight_shape", ",", "dtype", "=", "dtype", ",", "initializer", "=", "self", ".", "_initializers", "[", "\"w\"", "]", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "\"w\"", ",", "None", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "\"w\"", ",", "None", ")", ")", "outputs", "=", "tf", ".", "matmul", "(", "inputs", ",", "self", ".", "_w", ")", "if", "self", ".", "_use_bias", ":", "bias_shape", "=", "(", "self", ".", "output_size", ",", ")", "self", ".", "_b", "=", "tf", ".", "get_variable", "(", "\"b\"", ",", "shape", "=", "bias_shape", ",", "dtype", "=", "dtype", ",", "initializer", "=", "self", ".", "_initializers", "[", "\"b\"", "]", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "\"b\"", ",", "None", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "\"b\"", ",", "None", ")", ")", "outputs", "+=", "self", ".", "_b", "return", "outputs" ]
Connects the Linear module into the graph, with input Tensor `inputs`. If this is not the first time the module has been connected to the graph, the Tensor provided here must have the same final dimension, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs: A 2D Tensor of size [batch_size, input_size]. Returns: A 2D Tensor of size [batch_size, output_size]. Raises: base.IncompatibleShapeError: If the input is not a 2-D `Tensor` with the size of the second dimension specified. base.IncompatibleShapeError: If reconnecting an already connected module into the graph, and the shape of the input is not compatible with previous inputs.
[ "Connects", "the", "Linear", "module", "into", "the", "graph", "with", "input", "Tensor", "inputs", "." ]
python
train
41.057971
hotdoc/hotdoc
hotdoc/utils/loggable.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/loggable.py#L201-L204
def register_error_code(code, exception_type, domain='core'): """Register a new error code""" Logger._error_code_to_exception[code] = (exception_type, domain) Logger._domain_codes[domain].add(code)
[ "def", "register_error_code", "(", "code", ",", "exception_type", ",", "domain", "=", "'core'", ")", ":", "Logger", ".", "_error_code_to_exception", "[", "code", "]", "=", "(", "exception_type", ",", "domain", ")", "Logger", ".", "_domain_codes", "[", "domain", "]", ".", "add", "(", "code", ")" ]
Register a new error code
[ "Register", "a", "new", "error", "code" ]
python
train
54.5
hyperledger/indy-plenum
plenum/common/did_method.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/did_method.py#L34-L43
def get(self, didMethodName, required=True) -> DidMethod: """ :param didMethodName: name of DID Method :param required: if not found and True, throws an exception, else None :return: DID Method """ dm = self.d.get(didMethodName) if didMethodName else self.default if not dm and required: raise DidMethodNotFound return dm
[ "def", "get", "(", "self", ",", "didMethodName", ",", "required", "=", "True", ")", "->", "DidMethod", ":", "dm", "=", "self", ".", "d", ".", "get", "(", "didMethodName", ")", "if", "didMethodName", "else", "self", ".", "default", "if", "not", "dm", "and", "required", ":", "raise", "DidMethodNotFound", "return", "dm" ]
:param didMethodName: name of DID Method :param required: if not found and True, throws an exception, else None :return: DID Method
[ ":", "param", "didMethodName", ":", "name", "of", "DID", "Method", ":", "param", "required", ":", "if", "not", "found", "and", "True", "throws", "an", "exception", "else", "None", ":", "return", ":", "DID", "Method" ]
python
train
38.8
bugra/angel-list
angel/angel.py
https://github.com/bugra/angel-list/blob/75ac453e873727675ba18e1f45b5bc0cfda26fd7/angel/angel.py#L455-L462
def get_reviews(self, user_id): """ Get reviews for a particular user """ url = _REVIEWS_USER.format(c_api=_C_API_BEGINNING, api=_API_VERSION, user_id=user_id, at=self.access_token) return _get_request(url)
[ "def", "get_reviews", "(", "self", ",", "user_id", ")", ":", "url", "=", "_REVIEWS_USER", ".", "format", "(", "c_api", "=", "_C_API_BEGINNING", ",", "api", "=", "_API_VERSION", ",", "user_id", "=", "user_id", ",", "at", "=", "self", ".", "access_token", ")", "return", "_get_request", "(", "url", ")" ]
Get reviews for a particular user
[ "Get", "reviews", "for", "a", "particular", "user" ]
python
train
44.875
SheffieldML/GPy
GPy/util/pca.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/pca.py#L41-L54
def center(self, X): """ Center `X` in PCA space. """ X = X.copy() inan = numpy.isnan(X) if self.mu is None: X_ = numpy.ma.masked_array(X, inan) self.mu = X_.mean(0).base self.sigma = X_.std(0).base reduce(lambda y,x: setitem(x[0], x[1], x[2]), zip(X.T, inan.T, self.mu), None) X = X - self.mu X = X / numpy.where(self.sigma == 0, 1e-30, self.sigma) return X
[ "def", "center", "(", "self", ",", "X", ")", ":", "X", "=", "X", ".", "copy", "(", ")", "inan", "=", "numpy", ".", "isnan", "(", "X", ")", "if", "self", ".", "mu", "is", "None", ":", "X_", "=", "numpy", ".", "ma", ".", "masked_array", "(", "X", ",", "inan", ")", "self", ".", "mu", "=", "X_", ".", "mean", "(", "0", ")", ".", "base", "self", ".", "sigma", "=", "X_", ".", "std", "(", "0", ")", ".", "base", "reduce", "(", "lambda", "y", ",", "x", ":", "setitem", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ",", "x", "[", "2", "]", ")", ",", "zip", "(", "X", ".", "T", ",", "inan", ".", "T", ",", "self", ".", "mu", ")", ",", "None", ")", "X", "=", "X", "-", "self", ".", "mu", "X", "=", "X", "/", "numpy", ".", "where", "(", "self", ".", "sigma", "==", "0", ",", "1e-30", ",", "self", ".", "sigma", ")", "return", "X" ]
Center `X` in PCA space.
[ "Center", "X", "in", "PCA", "space", "." ]
python
train
32.928571
twilio/twilio-python
twilio/rest/insights/v1/summary.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/insights/v1/summary.py#L130-L145
def fetch(self): """ Fetch a CallSummaryInstance :returns: Fetched CallSummaryInstance :rtype: twilio.rest.insights.v1.summary.CallSummaryInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return CallSummaryInstance(self._version, payload, call_sid=self._solution['call_sid'], )
[ "def", "fetch", "(", "self", ")", ":", "params", "=", "values", ".", "of", "(", "{", "}", ")", "payload", "=", "self", ".", "_version", ".", "fetch", "(", "'GET'", ",", "self", ".", "_uri", ",", "params", "=", "params", ",", ")", "return", "CallSummaryInstance", "(", "self", ".", "_version", ",", "payload", ",", "call_sid", "=", "self", ".", "_solution", "[", "'call_sid'", "]", ",", ")" ]
Fetch a CallSummaryInstance :returns: Fetched CallSummaryInstance :rtype: twilio.rest.insights.v1.summary.CallSummaryInstance
[ "Fetch", "a", "CallSummaryInstance" ]
python
train
26.5625
gem/oq-engine
openquake/hmtk/seismicity/gcmt_catalogue.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/gcmt_catalogue.py#L447-L503
def gcmt_to_simple_array(self, centroid_location=True): """ Converts the GCMT catalogue to a simple array of [ID, year, month, day, hour, minute, second, long., lat., depth, Mw, strike1, dip1, rake1, strike2, dip2, rake2, b-plunge, b-azimuth, b-eigenvalue, p-plunge, p-azimuth, p-eigenvalue, t-plunge, t-azimuth, t-eigenvalue, moment, f_clvd, erel] """ catalogue = np.zeros([self.get_number_tensors(), 29], dtype=float) for iloc, tensor in enumerate(self.gcmts): catalogue[iloc, 0] = iloc if centroid_location: catalogue[iloc, 1] = float(tensor.centroid.date.year) catalogue[iloc, 2] = float(tensor.centroid.date.month) catalogue[iloc, 3] = float(tensor.centroid.date.day) catalogue[iloc, 4] = float(tensor.centroid.time.hour) catalogue[iloc, 5] = float(tensor.centroid.time.minute) catalogue[iloc, 6] = np.round( np.float(tensor.centroid.time.second) + np.float(tensor.centroid.time.microsecond) / 1000000., 2) catalogue[iloc, 7] = tensor.centroid.longitude catalogue[iloc, 8] = tensor.centroid.latitude catalogue[iloc, 9] = tensor.centroid.depth else: catalogue[iloc, 1] = float(tensor.hypocentre.date.year) catalogue[iloc, 2] = float(tensor.hypocentre.date.month) catalogue[iloc, 3] = float(tensor.hypocentre.date.day) catalogue[iloc, 4] = float(tensor.hypocentre.time.hour) catalogue[iloc, 5] = float(tensor.hypocentre.time.minute) catalogue[iloc, 6] = np.round( np.float(tensor.centroid.time.second) + np.float(tensor.centroid.time.microsecond) / 1000000., 2) catalogue[iloc, 7] = tensor.hypocentre.longitude catalogue[iloc, 8] = tensor.hypocentre.latitude catalogue[iloc, 9] = tensor.hypocentre.depth catalogue[iloc, 10] = tensor.magnitude catalogue[iloc, 11] = tensor.moment catalogue[iloc, 12] = tensor.f_clvd catalogue[iloc, 13] = tensor.e_rel # Nodal planes catalogue[iloc, 14] = tensor.nodal_planes.nodal_plane_1['strike'] catalogue[iloc, 15] = tensor.nodal_planes.nodal_plane_1['dip'] catalogue[iloc, 16] = tensor.nodal_planes.nodal_plane_1['rake'] catalogue[iloc, 17] = tensor.nodal_planes.nodal_plane_2['strike'] catalogue[iloc, 18] = tensor.nodal_planes.nodal_plane_2['dip'] catalogue[iloc, 19] = tensor.nodal_planes.nodal_plane_2['rake'] # Principal axes catalogue[iloc, 20] = tensor.principal_axes.b_axis['eigenvalue'] catalogue[iloc, 21] = tensor.principal_axes.b_axis['azimuth'] catalogue[iloc, 22] = tensor.principal_axes.b_axis['plunge'] catalogue[iloc, 23] = tensor.principal_axes.p_axis['eigenvalue'] catalogue[iloc, 24] = tensor.principal_axes.p_axis['azimuth'] catalogue[iloc, 25] = tensor.principal_axes.p_axis['plunge'] catalogue[iloc, 26] = tensor.principal_axes.t_axis['eigenvalue'] catalogue[iloc, 27] = tensor.principal_axes.t_axis['azimuth'] catalogue[iloc, 28] = tensor.principal_axes.t_axis['plunge'] return catalogue
[ "def", "gcmt_to_simple_array", "(", "self", ",", "centroid_location", "=", "True", ")", ":", "catalogue", "=", "np", ".", "zeros", "(", "[", "self", ".", "get_number_tensors", "(", ")", ",", "29", "]", ",", "dtype", "=", "float", ")", "for", "iloc", ",", "tensor", "in", "enumerate", "(", "self", ".", "gcmts", ")", ":", "catalogue", "[", "iloc", ",", "0", "]", "=", "iloc", "if", "centroid_location", ":", "catalogue", "[", "iloc", ",", "1", "]", "=", "float", "(", "tensor", ".", "centroid", ".", "date", ".", "year", ")", "catalogue", "[", "iloc", ",", "2", "]", "=", "float", "(", "tensor", ".", "centroid", ".", "date", ".", "month", ")", "catalogue", "[", "iloc", ",", "3", "]", "=", "float", "(", "tensor", ".", "centroid", ".", "date", ".", "day", ")", "catalogue", "[", "iloc", ",", "4", "]", "=", "float", "(", "tensor", ".", "centroid", ".", "time", ".", "hour", ")", "catalogue", "[", "iloc", ",", "5", "]", "=", "float", "(", "tensor", ".", "centroid", ".", "time", ".", "minute", ")", "catalogue", "[", "iloc", ",", "6", "]", "=", "np", ".", "round", "(", "np", ".", "float", "(", "tensor", ".", "centroid", ".", "time", ".", "second", ")", "+", "np", ".", "float", "(", "tensor", ".", "centroid", ".", "time", ".", "microsecond", ")", "/", "1000000.", ",", "2", ")", "catalogue", "[", "iloc", ",", "7", "]", "=", "tensor", ".", "centroid", ".", "longitude", "catalogue", "[", "iloc", ",", "8", "]", "=", "tensor", ".", "centroid", ".", "latitude", "catalogue", "[", "iloc", ",", "9", "]", "=", "tensor", ".", "centroid", ".", "depth", "else", ":", "catalogue", "[", "iloc", ",", "1", "]", "=", "float", "(", "tensor", ".", "hypocentre", ".", "date", ".", "year", ")", "catalogue", "[", "iloc", ",", "2", "]", "=", "float", "(", "tensor", ".", "hypocentre", ".", "date", ".", "month", ")", "catalogue", "[", "iloc", ",", "3", "]", "=", "float", "(", "tensor", ".", "hypocentre", ".", "date", ".", "day", ")", "catalogue", "[", "iloc", ",", "4", "]", "=", "float", "(", "tensor", ".", "hypocentre", ".", "time", ".", "hour", ")", "catalogue", "[", "iloc", ",", "5", "]", "=", "float", "(", "tensor", ".", "hypocentre", ".", "time", ".", "minute", ")", "catalogue", "[", "iloc", ",", "6", "]", "=", "np", ".", "round", "(", "np", ".", "float", "(", "tensor", ".", "centroid", ".", "time", ".", "second", ")", "+", "np", ".", "float", "(", "tensor", ".", "centroid", ".", "time", ".", "microsecond", ")", "/", "1000000.", ",", "2", ")", "catalogue", "[", "iloc", ",", "7", "]", "=", "tensor", ".", "hypocentre", ".", "longitude", "catalogue", "[", "iloc", ",", "8", "]", "=", "tensor", ".", "hypocentre", ".", "latitude", "catalogue", "[", "iloc", ",", "9", "]", "=", "tensor", ".", "hypocentre", ".", "depth", "catalogue", "[", "iloc", ",", "10", "]", "=", "tensor", ".", "magnitude", "catalogue", "[", "iloc", ",", "11", "]", "=", "tensor", ".", "moment", "catalogue", "[", "iloc", ",", "12", "]", "=", "tensor", ".", "f_clvd", "catalogue", "[", "iloc", ",", "13", "]", "=", "tensor", ".", "e_rel", "# Nodal planes", "catalogue", "[", "iloc", ",", "14", "]", "=", "tensor", ".", "nodal_planes", ".", "nodal_plane_1", "[", "'strike'", "]", "catalogue", "[", "iloc", ",", "15", "]", "=", "tensor", ".", "nodal_planes", ".", "nodal_plane_1", "[", "'dip'", "]", "catalogue", "[", "iloc", ",", "16", "]", "=", "tensor", ".", "nodal_planes", ".", "nodal_plane_1", "[", "'rake'", "]", "catalogue", "[", "iloc", ",", "17", "]", "=", "tensor", ".", "nodal_planes", ".", "nodal_plane_2", "[", "'strike'", "]", "catalogue", "[", "iloc", ",", "18", "]", "=", "tensor", ".", "nodal_planes", ".", "nodal_plane_2", "[", "'dip'", "]", "catalogue", "[", "iloc", ",", "19", "]", "=", "tensor", ".", "nodal_planes", ".", "nodal_plane_2", "[", "'rake'", "]", "# Principal axes", "catalogue", "[", "iloc", ",", "20", "]", "=", "tensor", ".", "principal_axes", ".", "b_axis", "[", "'eigenvalue'", "]", "catalogue", "[", "iloc", ",", "21", "]", "=", "tensor", ".", "principal_axes", ".", "b_axis", "[", "'azimuth'", "]", "catalogue", "[", "iloc", ",", "22", "]", "=", "tensor", ".", "principal_axes", ".", "b_axis", "[", "'plunge'", "]", "catalogue", "[", "iloc", ",", "23", "]", "=", "tensor", ".", "principal_axes", ".", "p_axis", "[", "'eigenvalue'", "]", "catalogue", "[", "iloc", ",", "24", "]", "=", "tensor", ".", "principal_axes", ".", "p_axis", "[", "'azimuth'", "]", "catalogue", "[", "iloc", ",", "25", "]", "=", "tensor", ".", "principal_axes", ".", "p_axis", "[", "'plunge'", "]", "catalogue", "[", "iloc", ",", "26", "]", "=", "tensor", ".", "principal_axes", ".", "t_axis", "[", "'eigenvalue'", "]", "catalogue", "[", "iloc", ",", "27", "]", "=", "tensor", ".", "principal_axes", ".", "t_axis", "[", "'azimuth'", "]", "catalogue", "[", "iloc", ",", "28", "]", "=", "tensor", ".", "principal_axes", ".", "t_axis", "[", "'plunge'", "]", "return", "catalogue" ]
Converts the GCMT catalogue to a simple array of [ID, year, month, day, hour, minute, second, long., lat., depth, Mw, strike1, dip1, rake1, strike2, dip2, rake2, b-plunge, b-azimuth, b-eigenvalue, p-plunge, p-azimuth, p-eigenvalue, t-plunge, t-azimuth, t-eigenvalue, moment, f_clvd, erel]
[ "Converts", "the", "GCMT", "catalogue", "to", "a", "simple", "array", "of", "[", "ID", "year", "month", "day", "hour", "minute", "second", "long", ".", "lat", ".", "depth", "Mw", "strike1", "dip1", "rake1", "strike2", "dip2", "rake2", "b", "-", "plunge", "b", "-", "azimuth", "b", "-", "eigenvalue", "p", "-", "plunge", "p", "-", "azimuth", "p", "-", "eigenvalue", "t", "-", "plunge", "t", "-", "azimuth", "t", "-", "eigenvalue", "moment", "f_clvd", "erel", "]" ]
python
train
60.192982
berkeley-cocosci/Wallace
examples/rogers/experiment.py
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/examples/rogers/experiment.py#L168-L194
def data_check(self, participant): """Check a participants data.""" participant_id = participant.uniqueid nodes = Node.query.filter_by(participant_id=participant_id).all() if len(nodes) != self.experiment_repeats + self.practice_repeats: print("Error: Participant has {} nodes. Data check failed" .format(len(nodes))) return False nets = [n.network_id for n in nodes] if len(nets) != len(set(nets)): print "Error: Participant participated in the same network \ multiple times. Data check failed" return False if None in [n.fitness for n in nodes]: print "Error: some of participants nodes are missing a fitness. \ Data check failed." return False if None in [n.score for n in nodes]: print "Error: some of participants nodes are missing a score. \ Data check failed" return False return True
[ "def", "data_check", "(", "self", ",", "participant", ")", ":", "participant_id", "=", "participant", ".", "uniqueid", "nodes", "=", "Node", ".", "query", ".", "filter_by", "(", "participant_id", "=", "participant_id", ")", ".", "all", "(", ")", "if", "len", "(", "nodes", ")", "!=", "self", ".", "experiment_repeats", "+", "self", ".", "practice_repeats", ":", "print", "(", "\"Error: Participant has {} nodes. Data check failed\"", ".", "format", "(", "len", "(", "nodes", ")", ")", ")", "return", "False", "nets", "=", "[", "n", ".", "network_id", "for", "n", "in", "nodes", "]", "if", "len", "(", "nets", ")", "!=", "len", "(", "set", "(", "nets", ")", ")", ":", "print", "\"Error: Participant participated in the same network \\\n multiple times. Data check failed\"", "return", "False", "if", "None", "in", "[", "n", ".", "fitness", "for", "n", "in", "nodes", "]", ":", "print", "\"Error: some of participants nodes are missing a fitness. \\\n Data check failed.\"", "return", "False", "if", "None", "in", "[", "n", ".", "score", "for", "n", "in", "nodes", "]", ":", "print", "\"Error: some of participants nodes are missing a score. \\\n Data check failed\"", "return", "False", "return", "True" ]
Check a participants data.
[ "Check", "a", "participants", "data", "." ]
python
train
37.518519
pkgw/pwkit
pwkit/lsqmdl.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/lsqmdl.py#L656-L660
def debug_derivative(self, guess): """returns (explicit, auto)""" from .lmmin import check_derivative return check_derivative(self.component.npar, self.data.size, self.lm_model, self.lm_deriv, guess)
[ "def", "debug_derivative", "(", "self", ",", "guess", ")", ":", "from", ".", "lmmin", "import", "check_derivative", "return", "check_derivative", "(", "self", ".", "component", ".", "npar", ",", "self", ".", "data", ".", "size", ",", "self", ".", "lm_model", ",", "self", ".", "lm_deriv", ",", "guess", ")" ]
returns (explicit, auto)
[ "returns", "(", "explicit", "auto", ")" ]
python
train
50.2
pantsbuild/pants
src/python/pants/java/nailgun_io.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/java/nailgun_io.py#L148-L156
def open(cls, sock, chunk_type, isatty, chunk_eof_type=None, buf_size=None, select_timeout=None): """Yields the write side of a pipe that will copy appropriately chunked values to a socket.""" with cls.open_multi(sock, (chunk_type,), (isatty,), chunk_eof_type, buf_size, select_timeout) as ctx: yield ctx
[ "def", "open", "(", "cls", ",", "sock", ",", "chunk_type", ",", "isatty", ",", "chunk_eof_type", "=", "None", ",", "buf_size", "=", "None", ",", "select_timeout", "=", "None", ")", ":", "with", "cls", ".", "open_multi", "(", "sock", ",", "(", "chunk_type", ",", ")", ",", "(", "isatty", ",", ")", ",", "chunk_eof_type", ",", "buf_size", ",", "select_timeout", ")", "as", "ctx", ":", "yield", "ctx" ]
Yields the write side of a pipe that will copy appropriately chunked values to a socket.
[ "Yields", "the", "write", "side", "of", "a", "pipe", "that", "will", "copy", "appropriately", "chunked", "values", "to", "a", "socket", "." ]
python
train
47.777778
FujiMakoto/IPS-Vagrant
ips_vagrant/installer/__init__.py
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/installer/__init__.py#L17-L42
def installer(cv, ctx, site, force=False): """ Installer factory @param cv: Current version (The version of IPS we are installing) @type cv: ips_vagrant.common.version.Version @type ctx: ips_vagrant.cli.Context @param site: The IPS Site we are installing @type site: ips_vagrant.models.sites.Site @param force: Overwrite existing files / databases @type force: bool @return: Installer instance @rtype: ips_vagrant.installer.latest.Installer """ log = logging.getLogger('ipsv.installer') log.info('Loading installer for IPS %s', cv) iv = None for v in versions: vstring = '.'.join(map(str, v)) if v else 'latest' # cvstring = '.'.join(map(str, cv)) if cv else 'latest' log.debug('Checking if version %s >= %s', vstring, cv.vstring) if (v is None) or (v >= cv.vtuple): log.debug('Changing installer version to %s', vstring) iv = v log.info('Returning installer version %s', '.'.join(map(str, iv)) if iv else 'latest') return versions[iv](ctx, site, force)
[ "def", "installer", "(", "cv", ",", "ctx", ",", "site", ",", "force", "=", "False", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "'ipsv.installer'", ")", "log", ".", "info", "(", "'Loading installer for IPS %s'", ",", "cv", ")", "iv", "=", "None", "for", "v", "in", "versions", ":", "vstring", "=", "'.'", ".", "join", "(", "map", "(", "str", ",", "v", ")", ")", "if", "v", "else", "'latest'", "# cvstring = '.'.join(map(str, cv)) if cv else 'latest'", "log", ".", "debug", "(", "'Checking if version %s >= %s'", ",", "vstring", ",", "cv", ".", "vstring", ")", "if", "(", "v", "is", "None", ")", "or", "(", "v", ">=", "cv", ".", "vtuple", ")", ":", "log", ".", "debug", "(", "'Changing installer version to %s'", ",", "vstring", ")", "iv", "=", "v", "log", ".", "info", "(", "'Returning installer version %s'", ",", "'.'", ".", "join", "(", "map", "(", "str", ",", "iv", ")", ")", "if", "iv", "else", "'latest'", ")", "return", "versions", "[", "iv", "]", "(", "ctx", ",", "site", ",", "force", ")" ]
Installer factory @param cv: Current version (The version of IPS we are installing) @type cv: ips_vagrant.common.version.Version @type ctx: ips_vagrant.cli.Context @param site: The IPS Site we are installing @type site: ips_vagrant.models.sites.Site @param force: Overwrite existing files / databases @type force: bool @return: Installer instance @rtype: ips_vagrant.installer.latest.Installer
[ "Installer", "factory" ]
python
train
42
gem/oq-engine
openquake/calculators/views.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/views.py#L382-L393
def view_portfolio_losses(token, dstore): """ The losses for the full portfolio, for each realization and loss type, extracted from the event loss table. """ oq = dstore['oqparam'] loss_dt = oq.loss_dt() data = portfolio_loss(dstore).view(loss_dt)[:, 0] rlzids = [str(r) for r in range(len(data))] array = util.compose_arrays(numpy.array(rlzids), data, 'rlz') # this is very sensitive to rounding errors, so I am using a low precision return rst_table(array, fmt='%.5E')
[ "def", "view_portfolio_losses", "(", "token", ",", "dstore", ")", ":", "oq", "=", "dstore", "[", "'oqparam'", "]", "loss_dt", "=", "oq", ".", "loss_dt", "(", ")", "data", "=", "portfolio_loss", "(", "dstore", ")", ".", "view", "(", "loss_dt", ")", "[", ":", ",", "0", "]", "rlzids", "=", "[", "str", "(", "r", ")", "for", "r", "in", "range", "(", "len", "(", "data", ")", ")", "]", "array", "=", "util", ".", "compose_arrays", "(", "numpy", ".", "array", "(", "rlzids", ")", ",", "data", ",", "'rlz'", ")", "# this is very sensitive to rounding errors, so I am using a low precision", "return", "rst_table", "(", "array", ",", "fmt", "=", "'%.5E'", ")" ]
The losses for the full portfolio, for each realization and loss type, extracted from the event loss table.
[ "The", "losses", "for", "the", "full", "portfolio", "for", "each", "realization", "and", "loss", "type", "extracted", "from", "the", "event", "loss", "table", "." ]
python
train
41.916667
IBMStreams/pypi.streamsx
streamsx/spl/op.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/spl/op.py#L240-L254
def output(self, stream, value): """SPL output port assignment expression. Arguments: stream(Stream): Output stream the assignment is for. value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator. """ if stream not in self.outputs: raise ValueError("Stream is not an output of this operator.") e = self.expression(value) e._stream = stream return e
[ "def", "output", "(", "self", ",", "stream", ",", "value", ")", ":", "if", "stream", "not", "in", "self", ".", "outputs", ":", "raise", "ValueError", "(", "\"Stream is not an output of this operator.\"", ")", "e", "=", "self", ".", "expression", "(", "value", ")", "e", ".", "_stream", "=", "stream", "return", "e" ]
SPL output port assignment expression. Arguments: stream(Stream): Output stream the assignment is for. value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator.
[ "SPL", "output", "port", "assignment", "expression", "." ]
python
train
40.733333
koordinates/python-client
koordinates/base.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/base.py#L279-L296
def filter(self, **filters): """ Add a filter to this query. Appends to any previous filters set. :rtype: Query """ q = self._clone() for key, value in filters.items(): filter_key = re.split('__', key) filter_attr = filter_key[0] if filter_attr not in self._valid_filter_attrs: raise ClientValidationError("Invalid filter attribute: %s" % key) # we use __ as a separator in the Python library, the APIs use '.' q._filters['.'.join(filter_key)].append(value) return q
[ "def", "filter", "(", "self", ",", "*", "*", "filters", ")", ":", "q", "=", "self", ".", "_clone", "(", ")", "for", "key", ",", "value", "in", "filters", ".", "items", "(", ")", ":", "filter_key", "=", "re", ".", "split", "(", "'__'", ",", "key", ")", "filter_attr", "=", "filter_key", "[", "0", "]", "if", "filter_attr", "not", "in", "self", ".", "_valid_filter_attrs", ":", "raise", "ClientValidationError", "(", "\"Invalid filter attribute: %s\"", "%", "key", ")", "# we use __ as a separator in the Python library, the APIs use '.'", "q", ".", "_filters", "[", "'.'", ".", "join", "(", "filter_key", ")", "]", ".", "append", "(", "value", ")", "return", "q" ]
Add a filter to this query. Appends to any previous filters set. :rtype: Query
[ "Add", "a", "filter", "to", "this", "query", ".", "Appends", "to", "any", "previous", "filters", "set", "." ]
python
train
32.888889
Erotemic/ubelt
ubelt/util_list.py
https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/ubelt/util_list.py#L324-L353
def unique_flags(items, key=None): """ Returns a list of booleans corresponding to the first instance of each unique item. Args: items (Sequence): indexable collection of items key (Callable, optional): custom normalization function. If specified returns items where `key(item)` is unique. Returns: List[bool] : flags the items that are unique Example: >>> import ubelt as ub >>> items = [0, 2, 1, 1, 0, 9, 2] >>> flags = unique_flags(items) >>> assert flags == [True, True, True, False, False, True, False] >>> flags = unique_flags(items, key=lambda x: x % 2 == 0) >>> assert flags == [True, False, True, False, False, False, False] """ len_ = len(items) if key is None: item_to_index = dict(zip(reversed(items), reversed(range(len_)))) indices = item_to_index.values() else: indices = argunique(items, key=key) flags = boolmask(indices, len_) return flags
[ "def", "unique_flags", "(", "items", ",", "key", "=", "None", ")", ":", "len_", "=", "len", "(", "items", ")", "if", "key", "is", "None", ":", "item_to_index", "=", "dict", "(", "zip", "(", "reversed", "(", "items", ")", ",", "reversed", "(", "range", "(", "len_", ")", ")", ")", ")", "indices", "=", "item_to_index", ".", "values", "(", ")", "else", ":", "indices", "=", "argunique", "(", "items", ",", "key", "=", "key", ")", "flags", "=", "boolmask", "(", "indices", ",", "len_", ")", "return", "flags" ]
Returns a list of booleans corresponding to the first instance of each unique item. Args: items (Sequence): indexable collection of items key (Callable, optional): custom normalization function. If specified returns items where `key(item)` is unique. Returns: List[bool] : flags the items that are unique Example: >>> import ubelt as ub >>> items = [0, 2, 1, 1, 0, 9, 2] >>> flags = unique_flags(items) >>> assert flags == [True, True, True, False, False, True, False] >>> flags = unique_flags(items, key=lambda x: x % 2 == 0) >>> assert flags == [True, False, True, False, False, False, False]
[ "Returns", "a", "list", "of", "booleans", "corresponding", "to", "the", "first", "instance", "of", "each", "unique", "item", "." ]
python
valid
32.933333
b3j0f/schema
b3j0f/schema/registry.py
https://github.com/b3j0f/schema/blob/1c88c23337f5fef50254e65bd407112c43396dd9/b3j0f/schema/registry.py#L122-L132
def unregister(self, uuid): """Unregister a schema registered with input uuid. :raises: KeyError if uuid is not already registered. """ schema = self._schbyuuid.pop(uuid) # clean schemas by name self._schbyname[schema.name].remove(schema) if not self._schbyname[schema.name]: del self._schbyname[schema.name]
[ "def", "unregister", "(", "self", ",", "uuid", ")", ":", "schema", "=", "self", ".", "_schbyuuid", ".", "pop", "(", "uuid", ")", "# clean schemas by name", "self", ".", "_schbyname", "[", "schema", ".", "name", "]", ".", "remove", "(", "schema", ")", "if", "not", "self", ".", "_schbyname", "[", "schema", ".", "name", "]", ":", "del", "self", ".", "_schbyname", "[", "schema", ".", "name", "]" ]
Unregister a schema registered with input uuid. :raises: KeyError if uuid is not already registered.
[ "Unregister", "a", "schema", "registered", "with", "input", "uuid", "." ]
python
train
33.454545
IdentityPython/oidcendpoint
src/oidcendpoint/user_info/__init__.py
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/user_info/__init__.py#L41-L66
def filter(self, userinfo, user_info_claims=None): """ Return only those claims that are asked for. It's a best effort task; if essential claims are not present no error is flagged. :param userinfo: A dictionary containing the available info for one user :param user_info_claims: A dictionary specifying the asked for claims :return: A dictionary of filtered claims. """ if user_info_claims is None: return copy.copy(userinfo) else: result = {} missing = [] optional = [] for key, restr in user_info_claims.items(): try: result[key] = userinfo[key] except KeyError: if restr == {"essential": True}: missing.append(key) else: optional.append(key) return result
[ "def", "filter", "(", "self", ",", "userinfo", ",", "user_info_claims", "=", "None", ")", ":", "if", "user_info_claims", "is", "None", ":", "return", "copy", ".", "copy", "(", "userinfo", ")", "else", ":", "result", "=", "{", "}", "missing", "=", "[", "]", "optional", "=", "[", "]", "for", "key", ",", "restr", "in", "user_info_claims", ".", "items", "(", ")", ":", "try", ":", "result", "[", "key", "]", "=", "userinfo", "[", "key", "]", "except", "KeyError", ":", "if", "restr", "==", "{", "\"essential\"", ":", "True", "}", ":", "missing", ".", "append", "(", "key", ")", "else", ":", "optional", ".", "append", "(", "key", ")", "return", "result" ]
Return only those claims that are asked for. It's a best effort task; if essential claims are not present no error is flagged. :param userinfo: A dictionary containing the available info for one user :param user_info_claims: A dictionary specifying the asked for claims :return: A dictionary of filtered claims.
[ "Return", "only", "those", "claims", "that", "are", "asked", "for", ".", "It", "s", "a", "best", "effort", "task", ";", "if", "essential", "claims", "are", "not", "present", "no", "error", "is", "flagged", "." ]
python
train
35.692308
pyBookshelf/bookshelf
bookshelf/api_v1.py
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v1.py#L1498-L1506
def load_state_from_disk(): """ loads the state from a local data.json file """ if is_there_state(): with open('data.json', 'r') as f: data = json.load(f) return data else: return False
[ "def", "load_state_from_disk", "(", ")", ":", "if", "is_there_state", "(", ")", ":", "with", "open", "(", "'data.json'", ",", "'r'", ")", "as", "f", ":", "data", "=", "json", ".", "load", "(", "f", ")", "return", "data", "else", ":", "return", "False" ]
loads the state from a local data.json file
[ "loads", "the", "state", "from", "a", "local", "data", ".", "json", "file" ]
python
train
25.444444
saltstack/salt
salt/modules/win_pkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pkg.py#L1757-L2044
def remove(name=None, pkgs=None, **kwargs): ''' Remove the passed package(s) from the system using winrepo .. versionadded:: 0.16.0 Args: name (str): The name(s) of the package(s) to be uninstalled. Can be a single package or a comma delimited list of packages, no spaces. pkgs (list): A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Kwargs: version (str): The version of the package to be uninstalled. If this option is used to to uninstall multiple packages, then this version will be applied to all targeted packages. Recommended using only when uninstalling a single package. If this parameter is omitted, the latest version will be uninstalled. saltenv (str): Salt environment. Default ``base`` refresh (bool): Refresh package metadata. Default ``False`` Returns: dict: Returns a dict containing the changes. If the package is removed by ``pkg.remove``: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} If the package is already uninstalled: {'<package>': {'current': 'not installed'}} CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' saltenv = kwargs.get('saltenv', 'base') refresh = salt.utils.data.is_true(kwargs.get('refresh', False)) # no need to call _refresh_db_conditional as list_pkgs will do it ret = {} # Make sure name or pkgs is passed if not name and not pkgs: return 'Must pass a single package or a list of packages' # Get package parameters pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0] # Get a list of currently installed software for comparison at the end old = list_pkgs(saltenv=saltenv, refresh=refresh, versions_as_list=True) # Loop through each package changed = [] # list of changed package names for pkgname, version_num in six.iteritems(pkg_params): # Load package information for the package pkginfo = _get_package_info(pkgname, saltenv=saltenv) # Make sure pkginfo was found if not pkginfo: msg = 'Unable to locate package {0}'.format(pkgname) log.error(msg) ret[pkgname] = msg continue # Check to see if package is installed on the system if pkgname not in old: log.debug('%s %s not installed', pkgname, version_num if version_num else '') ret[pkgname] = {'current': 'not installed'} continue removal_targets = [] # Only support a single version number if version_num is not None: # Using the salt cmdline with version=5.3 might be interpreted # as a float it must be converted to a string in order for # string matching to work. version_num = six.text_type(version_num) # At least one version of the software is installed. if version_num is None: for ver_install in old[pkgname]: if ver_install not in pkginfo and 'latest' in pkginfo: log.debug('%s %s using package latest entry to to remove', pkgname, version_num) removal_targets.append('latest') else: removal_targets.append(ver_install) else: if version_num in pkginfo: # we known how to remove this version if version_num in old[pkgname]: removal_targets.append(version_num) else: log.debug('%s %s not installed', pkgname, version_num) ret[pkgname] = {'current': '{0} not installed'.format(version_num)} continue elif 'latest' in pkginfo: # we do not have version entry, assume software can self upgrade and use latest log.debug('%s %s using package latest entry to to remove', pkgname, version_num) removal_targets.append('latest') if not removal_targets: log.error('%s %s no definition to remove this version', pkgname, version_num) ret[pkgname] = { 'current': '{0} no definition, cannot removed'.format(version_num) } continue for target in removal_targets: # Get the uninstaller uninstaller = pkginfo[target].get('uninstaller', '') cache_dir = pkginfo[target].get('cache_dir', False) uninstall_flags = pkginfo[target].get('uninstall_flags', '') # If no uninstaller found, use the installer with uninstall flags if not uninstaller and uninstall_flags: uninstaller = pkginfo[target].get('installer', '') # If still no uninstaller found, fail if not uninstaller: log.error( 'No installer or uninstaller configured for package %s', pkgname, ) ret[pkgname] = {'no uninstaller defined': target} continue # Where is the uninstaller if uninstaller.startswith(('salt:', 'http:', 'https:', 'ftp:')): # Check for the 'cache_dir' parameter in the .sls file # If true, the entire directory will be cached instead of the # individual file. This is useful for installations that are not # single files if cache_dir and uninstaller.startswith('salt:'): path, _ = os.path.split(uninstaller) __salt__['cp.cache_dir'](path, saltenv, False, None, '[email protected]$') # Check to see if the uninstaller is cached cached_pkg = __salt__['cp.is_cached'](uninstaller, saltenv) if not cached_pkg: # It's not cached. Cache it, mate. cached_pkg = __salt__['cp.cache_file'](uninstaller, saltenv) # Check if the uninstaller was cached successfully if not cached_pkg: log.error('Unable to cache %s', uninstaller) ret[pkgname] = {'unable to cache': uninstaller} continue # Compare the hash of the cached installer to the source only if # the file is hosted on salt: # TODO cp.cache_file does cache and hash checking? So why do it again? if uninstaller.startswith('salt:'): if __salt__['cp.hash_file'](uninstaller, saltenv) != \ __salt__['cp.hash_file'](cached_pkg): try: cached_pkg = __salt__['cp.cache_file']( uninstaller, saltenv) except MinionError as exc: return '{0}: {1}'.format(exc, uninstaller) # Check if the installer was cached successfully if not cached_pkg: log.error('Unable to cache %s', uninstaller) ret[pkgname] = {'unable to cache': uninstaller} continue else: # Run the uninstaller directly # (not hosted on salt:, https:, etc.) cached_pkg = os.path.expandvars(uninstaller) # Fix non-windows slashes cached_pkg = cached_pkg.replace('/', '\\') cache_path, _ = os.path.split(cached_pkg) # os.path.expandvars is not required as we run everything through cmd.exe /s /c if kwargs.get('extra_uninstall_flags'): uninstall_flags = '{0} {1}'.format( uninstall_flags, kwargs.get('extra_uninstall_flags', '')) # Compute msiexec string use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False)) cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR'))) # Build cmd and arguments # cmd and arguments must be separated for use with the task scheduler if use_msiexec: # Check if uninstaller is set to {guid}, if not we assume its a remote msi file. # which has already been downloaded. arguments = '"{0}" /X "{1}"'.format(msiexec, cached_pkg) else: arguments = '"{0}"'.format(cached_pkg) if uninstall_flags: arguments = '{0} {1}'.format(arguments, uninstall_flags) # Uninstall the software changed.append(pkgname) # Check Use Scheduler Option if pkginfo[target].get('use_scheduler', False): # Create Scheduled Task __salt__['task.create_task'](name='update-salt-software', user_name='System', force=True, action_type='Execute', cmd=cmd_shell, arguments='/s /c "{0}"'.format(arguments), start_in=cache_path, trigger_type='Once', start_date='1975-01-01', start_time='01:00', ac_only=False, stop_if_on_batteries=False) # Run Scheduled Task if not __salt__['task.run_wait'](name='update-salt-software'): log.error('Failed to remove %s', pkgname) log.error('Scheduled Task failed to run') ret[pkgname] = {'uninstall status': 'failed'} else: # Launch the command result = __salt__['cmd.run_all']( '"{0}" /s /c "{1}"'.format(cmd_shell, arguments), output_loglevel='trace', python_shell=False, redirect_stderr=True) if not result['retcode']: ret[pkgname] = {'uninstall status': 'success'} changed.append(pkgname) elif result['retcode'] == 3010: # 3010 is ERROR_SUCCESS_REBOOT_REQUIRED report_reboot_exit_codes = kwargs.pop( 'report_reboot_exit_codes', True) if report_reboot_exit_codes: __salt__['system.set_reboot_required_witnessed']() ret[pkgname] = {'uninstall status': 'success, reboot required'} changed.append(pkgname) elif result['retcode'] == 1641: # 1641 is ERROR_SUCCESS_REBOOT_INITIATED ret[pkgname] = {'uninstall status': 'success, reboot initiated'} changed.append(pkgname) else: log.error('Failed to remove %s', pkgname) log.error('retcode %s', result['retcode']) log.error('uninstaller output: %s', result['stdout']) ret[pkgname] = {'uninstall status': 'failed'} # Get a new list of installed software new = list_pkgs(saltenv=saltenv, refresh=False) # Take the "old" package list and convert the values to strings in # preparation for the comparison below. __salt__['pkg_resource.stringify'](old) # Check for changes in the registry difference = salt.utils.data.compare_dicts(old, new) found_chgs = all(name in difference for name in changed) end_t = time.time() + 3 # give it 3 seconds to catch up. while not found_chgs and time.time() < end_t: time.sleep(0.5) new = list_pkgs(saltenv=saltenv, refresh=False) difference = salt.utils.data.compare_dicts(old, new) found_chgs = all(name in difference for name in changed) if not found_chgs: log.warning('Expected changes for package removal may not have occured') # Compare the software list before and after # Add the difference to ret ret.update(difference) return ret
[ "def", "remove", "(", "name", "=", "None", ",", "pkgs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "saltenv", "=", "kwargs", ".", "get", "(", "'saltenv'", ",", "'base'", ")", "refresh", "=", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "kwargs", ".", "get", "(", "'refresh'", ",", "False", ")", ")", "# no need to call _refresh_db_conditional as list_pkgs will do it", "ret", "=", "{", "}", "# Make sure name or pkgs is passed", "if", "not", "name", "and", "not", "pkgs", ":", "return", "'Must pass a single package or a list of packages'", "# Get package parameters", "pkg_params", "=", "__salt__", "[", "'pkg_resource.parse_targets'", "]", "(", "name", ",", "pkgs", ",", "*", "*", "kwargs", ")", "[", "0", "]", "# Get a list of currently installed software for comparison at the end", "old", "=", "list_pkgs", "(", "saltenv", "=", "saltenv", ",", "refresh", "=", "refresh", ",", "versions_as_list", "=", "True", ")", "# Loop through each package", "changed", "=", "[", "]", "# list of changed package names", "for", "pkgname", ",", "version_num", "in", "six", ".", "iteritems", "(", "pkg_params", ")", ":", "# Load package information for the package", "pkginfo", "=", "_get_package_info", "(", "pkgname", ",", "saltenv", "=", "saltenv", ")", "# Make sure pkginfo was found", "if", "not", "pkginfo", ":", "msg", "=", "'Unable to locate package {0}'", ".", "format", "(", "pkgname", ")", "log", ".", "error", "(", "msg", ")", "ret", "[", "pkgname", "]", "=", "msg", "continue", "# Check to see if package is installed on the system", "if", "pkgname", "not", "in", "old", ":", "log", ".", "debug", "(", "'%s %s not installed'", ",", "pkgname", ",", "version_num", "if", "version_num", "else", "''", ")", "ret", "[", "pkgname", "]", "=", "{", "'current'", ":", "'not installed'", "}", "continue", "removal_targets", "=", "[", "]", "# Only support a single version number", "if", "version_num", "is", "not", "None", ":", "# Using the salt cmdline with version=5.3 might be interpreted", "# as a float it must be converted to a string in order for", "# string matching to work.", "version_num", "=", "six", ".", "text_type", "(", "version_num", ")", "# At least one version of the software is installed.", "if", "version_num", "is", "None", ":", "for", "ver_install", "in", "old", "[", "pkgname", "]", ":", "if", "ver_install", "not", "in", "pkginfo", "and", "'latest'", "in", "pkginfo", ":", "log", ".", "debug", "(", "'%s %s using package latest entry to to remove'", ",", "pkgname", ",", "version_num", ")", "removal_targets", ".", "append", "(", "'latest'", ")", "else", ":", "removal_targets", ".", "append", "(", "ver_install", ")", "else", ":", "if", "version_num", "in", "pkginfo", ":", "# we known how to remove this version", "if", "version_num", "in", "old", "[", "pkgname", "]", ":", "removal_targets", ".", "append", "(", "version_num", ")", "else", ":", "log", ".", "debug", "(", "'%s %s not installed'", ",", "pkgname", ",", "version_num", ")", "ret", "[", "pkgname", "]", "=", "{", "'current'", ":", "'{0} not installed'", ".", "format", "(", "version_num", ")", "}", "continue", "elif", "'latest'", "in", "pkginfo", ":", "# we do not have version entry, assume software can self upgrade and use latest", "log", ".", "debug", "(", "'%s %s using package latest entry to to remove'", ",", "pkgname", ",", "version_num", ")", "removal_targets", ".", "append", "(", "'latest'", ")", "if", "not", "removal_targets", ":", "log", ".", "error", "(", "'%s %s no definition to remove this version'", ",", "pkgname", ",", "version_num", ")", "ret", "[", "pkgname", "]", "=", "{", "'current'", ":", "'{0} no definition, cannot removed'", ".", "format", "(", "version_num", ")", "}", "continue", "for", "target", "in", "removal_targets", ":", "# Get the uninstaller", "uninstaller", "=", "pkginfo", "[", "target", "]", ".", "get", "(", "'uninstaller'", ",", "''", ")", "cache_dir", "=", "pkginfo", "[", "target", "]", ".", "get", "(", "'cache_dir'", ",", "False", ")", "uninstall_flags", "=", "pkginfo", "[", "target", "]", ".", "get", "(", "'uninstall_flags'", ",", "''", ")", "# If no uninstaller found, use the installer with uninstall flags", "if", "not", "uninstaller", "and", "uninstall_flags", ":", "uninstaller", "=", "pkginfo", "[", "target", "]", ".", "get", "(", "'installer'", ",", "''", ")", "# If still no uninstaller found, fail", "if", "not", "uninstaller", ":", "log", ".", "error", "(", "'No installer or uninstaller configured for package %s'", ",", "pkgname", ",", ")", "ret", "[", "pkgname", "]", "=", "{", "'no uninstaller defined'", ":", "target", "}", "continue", "# Where is the uninstaller", "if", "uninstaller", ".", "startswith", "(", "(", "'salt:'", ",", "'http:'", ",", "'https:'", ",", "'ftp:'", ")", ")", ":", "# Check for the 'cache_dir' parameter in the .sls file", "# If true, the entire directory will be cached instead of the", "# individual file. This is useful for installations that are not", "# single files", "if", "cache_dir", "and", "uninstaller", ".", "startswith", "(", "'salt:'", ")", ":", "path", ",", "_", "=", "os", ".", "path", ".", "split", "(", "uninstaller", ")", "__salt__", "[", "'cp.cache_dir'", "]", "(", "path", ",", "saltenv", ",", "False", ",", "None", ",", "'[email protected]$'", ")", "# Check to see if the uninstaller is cached", "cached_pkg", "=", "__salt__", "[", "'cp.is_cached'", "]", "(", "uninstaller", ",", "saltenv", ")", "if", "not", "cached_pkg", ":", "# It's not cached. Cache it, mate.", "cached_pkg", "=", "__salt__", "[", "'cp.cache_file'", "]", "(", "uninstaller", ",", "saltenv", ")", "# Check if the uninstaller was cached successfully", "if", "not", "cached_pkg", ":", "log", ".", "error", "(", "'Unable to cache %s'", ",", "uninstaller", ")", "ret", "[", "pkgname", "]", "=", "{", "'unable to cache'", ":", "uninstaller", "}", "continue", "# Compare the hash of the cached installer to the source only if", "# the file is hosted on salt:", "# TODO cp.cache_file does cache and hash checking? So why do it again?", "if", "uninstaller", ".", "startswith", "(", "'salt:'", ")", ":", "if", "__salt__", "[", "'cp.hash_file'", "]", "(", "uninstaller", ",", "saltenv", ")", "!=", "__salt__", "[", "'cp.hash_file'", "]", "(", "cached_pkg", ")", ":", "try", ":", "cached_pkg", "=", "__salt__", "[", "'cp.cache_file'", "]", "(", "uninstaller", ",", "saltenv", ")", "except", "MinionError", "as", "exc", ":", "return", "'{0}: {1}'", ".", "format", "(", "exc", ",", "uninstaller", ")", "# Check if the installer was cached successfully", "if", "not", "cached_pkg", ":", "log", ".", "error", "(", "'Unable to cache %s'", ",", "uninstaller", ")", "ret", "[", "pkgname", "]", "=", "{", "'unable to cache'", ":", "uninstaller", "}", "continue", "else", ":", "# Run the uninstaller directly", "# (not hosted on salt:, https:, etc.)", "cached_pkg", "=", "os", ".", "path", ".", "expandvars", "(", "uninstaller", ")", "# Fix non-windows slashes", "cached_pkg", "=", "cached_pkg", ".", "replace", "(", "'/'", ",", "'\\\\'", ")", "cache_path", ",", "_", "=", "os", ".", "path", ".", "split", "(", "cached_pkg", ")", "# os.path.expandvars is not required as we run everything through cmd.exe /s /c", "if", "kwargs", ".", "get", "(", "'extra_uninstall_flags'", ")", ":", "uninstall_flags", "=", "'{0} {1}'", ".", "format", "(", "uninstall_flags", ",", "kwargs", ".", "get", "(", "'extra_uninstall_flags'", ",", "''", ")", ")", "# Compute msiexec string", "use_msiexec", ",", "msiexec", "=", "_get_msiexec", "(", "pkginfo", "[", "target", "]", ".", "get", "(", "'msiexec'", ",", "False", ")", ")", "cmd_shell", "=", "os", ".", "getenv", "(", "'ComSpec'", ",", "'{0}\\\\system32\\\\cmd.exe'", ".", "format", "(", "os", ".", "getenv", "(", "'WINDIR'", ")", ")", ")", "# Build cmd and arguments", "# cmd and arguments must be separated for use with the task scheduler", "if", "use_msiexec", ":", "# Check if uninstaller is set to {guid}, if not we assume its a remote msi file.", "# which has already been downloaded.", "arguments", "=", "'\"{0}\" /X \"{1}\"'", ".", "format", "(", "msiexec", ",", "cached_pkg", ")", "else", ":", "arguments", "=", "'\"{0}\"'", ".", "format", "(", "cached_pkg", ")", "if", "uninstall_flags", ":", "arguments", "=", "'{0} {1}'", ".", "format", "(", "arguments", ",", "uninstall_flags", ")", "# Uninstall the software", "changed", ".", "append", "(", "pkgname", ")", "# Check Use Scheduler Option", "if", "pkginfo", "[", "target", "]", ".", "get", "(", "'use_scheduler'", ",", "False", ")", ":", "# Create Scheduled Task", "__salt__", "[", "'task.create_task'", "]", "(", "name", "=", "'update-salt-software'", ",", "user_name", "=", "'System'", ",", "force", "=", "True", ",", "action_type", "=", "'Execute'", ",", "cmd", "=", "cmd_shell", ",", "arguments", "=", "'/s /c \"{0}\"'", ".", "format", "(", "arguments", ")", ",", "start_in", "=", "cache_path", ",", "trigger_type", "=", "'Once'", ",", "start_date", "=", "'1975-01-01'", ",", "start_time", "=", "'01:00'", ",", "ac_only", "=", "False", ",", "stop_if_on_batteries", "=", "False", ")", "# Run Scheduled Task", "if", "not", "__salt__", "[", "'task.run_wait'", "]", "(", "name", "=", "'update-salt-software'", ")", ":", "log", ".", "error", "(", "'Failed to remove %s'", ",", "pkgname", ")", "log", ".", "error", "(", "'Scheduled Task failed to run'", ")", "ret", "[", "pkgname", "]", "=", "{", "'uninstall status'", ":", "'failed'", "}", "else", ":", "# Launch the command", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "'\"{0}\" /s /c \"{1}\"'", ".", "format", "(", "cmd_shell", ",", "arguments", ")", ",", "output_loglevel", "=", "'trace'", ",", "python_shell", "=", "False", ",", "redirect_stderr", "=", "True", ")", "if", "not", "result", "[", "'retcode'", "]", ":", "ret", "[", "pkgname", "]", "=", "{", "'uninstall status'", ":", "'success'", "}", "changed", ".", "append", "(", "pkgname", ")", "elif", "result", "[", "'retcode'", "]", "==", "3010", ":", "# 3010 is ERROR_SUCCESS_REBOOT_REQUIRED", "report_reboot_exit_codes", "=", "kwargs", ".", "pop", "(", "'report_reboot_exit_codes'", ",", "True", ")", "if", "report_reboot_exit_codes", ":", "__salt__", "[", "'system.set_reboot_required_witnessed'", "]", "(", ")", "ret", "[", "pkgname", "]", "=", "{", "'uninstall status'", ":", "'success, reboot required'", "}", "changed", ".", "append", "(", "pkgname", ")", "elif", "result", "[", "'retcode'", "]", "==", "1641", ":", "# 1641 is ERROR_SUCCESS_REBOOT_INITIATED", "ret", "[", "pkgname", "]", "=", "{", "'uninstall status'", ":", "'success, reboot initiated'", "}", "changed", ".", "append", "(", "pkgname", ")", "else", ":", "log", ".", "error", "(", "'Failed to remove %s'", ",", "pkgname", ")", "log", ".", "error", "(", "'retcode %s'", ",", "result", "[", "'retcode'", "]", ")", "log", ".", "error", "(", "'uninstaller output: %s'", ",", "result", "[", "'stdout'", "]", ")", "ret", "[", "pkgname", "]", "=", "{", "'uninstall status'", ":", "'failed'", "}", "# Get a new list of installed software", "new", "=", "list_pkgs", "(", "saltenv", "=", "saltenv", ",", "refresh", "=", "False", ")", "# Take the \"old\" package list and convert the values to strings in", "# preparation for the comparison below.", "__salt__", "[", "'pkg_resource.stringify'", "]", "(", "old", ")", "# Check for changes in the registry", "difference", "=", "salt", ".", "utils", ".", "data", ".", "compare_dicts", "(", "old", ",", "new", ")", "found_chgs", "=", "all", "(", "name", "in", "difference", "for", "name", "in", "changed", ")", "end_t", "=", "time", ".", "time", "(", ")", "+", "3", "# give it 3 seconds to catch up.", "while", "not", "found_chgs", "and", "time", ".", "time", "(", ")", "<", "end_t", ":", "time", ".", "sleep", "(", "0.5", ")", "new", "=", "list_pkgs", "(", "saltenv", "=", "saltenv", ",", "refresh", "=", "False", ")", "difference", "=", "salt", ".", "utils", ".", "data", ".", "compare_dicts", "(", "old", ",", "new", ")", "found_chgs", "=", "all", "(", "name", "in", "difference", "for", "name", "in", "changed", ")", "if", "not", "found_chgs", ":", "log", ".", "warning", "(", "'Expected changes for package removal may not have occured'", ")", "# Compare the software list before and after", "# Add the difference to ret", "ret", ".", "update", "(", "difference", ")", "return", "ret" ]
Remove the passed package(s) from the system using winrepo .. versionadded:: 0.16.0 Args: name (str): The name(s) of the package(s) to be uninstalled. Can be a single package or a comma delimited list of packages, no spaces. pkgs (list): A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Kwargs: version (str): The version of the package to be uninstalled. If this option is used to to uninstall multiple packages, then this version will be applied to all targeted packages. Recommended using only when uninstalling a single package. If this parameter is omitted, the latest version will be uninstalled. saltenv (str): Salt environment. Default ``base`` refresh (bool): Refresh package metadata. Default ``False`` Returns: dict: Returns a dict containing the changes. If the package is removed by ``pkg.remove``: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} If the package is already uninstalled: {'<package>': {'current': 'not installed'}} CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]'
[ "Remove", "the", "passed", "package", "(", "s", ")", "from", "the", "system", "using", "winrepo" ]
python
train
43.9375
ibis-project/ibis
ibis/mapd/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/mapd/client.py#L485-L508
def drop_database(self, name, force=False): """ Drop an MapD database Parameters ---------- name : string Database name force : boolean, default False If False and there are any tables in this database, raises an IntegrityError """ tables = [] if not force or self.database(name): tables = self.list_tables(database=name) if not force and len(tables): raise com.IntegrityError( 'Database {0} must be empty before being dropped, or set ' 'force=True'.format(name) ) statement = ddl.DropDatabase(name) self._execute(statement)
[ "def", "drop_database", "(", "self", ",", "name", ",", "force", "=", "False", ")", ":", "tables", "=", "[", "]", "if", "not", "force", "or", "self", ".", "database", "(", "name", ")", ":", "tables", "=", "self", ".", "list_tables", "(", "database", "=", "name", ")", "if", "not", "force", "and", "len", "(", "tables", ")", ":", "raise", "com", ".", "IntegrityError", "(", "'Database {0} must be empty before being dropped, or set '", "'force=True'", ".", "format", "(", "name", ")", ")", "statement", "=", "ddl", ".", "DropDatabase", "(", "name", ")", "self", ".", "_execute", "(", "statement", ")" ]
Drop an MapD database Parameters ---------- name : string Database name force : boolean, default False If False and there are any tables in this database, raises an IntegrityError
[ "Drop", "an", "MapD", "database" ]
python
train
29.083333
yyuu/botornado
boto/s3/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/connection.py#L193-L290
def build_post_form_args(self, bucket_name, key, expires_in = 6000, acl = None, success_action_redirect = None, max_content_length = None, http_method = "http", fields=None, conditions=None): """ Taken from the AWS book Python examples and modified for use with boto This only returns the arguments required for the post form, not the actual form. This does not return the file input field which also needs to be added :type bucket_name: string :param bucket_name: Bucket to submit to :type key: string :param key: Key name, optionally add ${filename} to the end to attach the submitted filename :type expires_in: integer :param expires_in: Time (in seconds) before this expires, defaults to 6000 :type acl: :class:`boto.s3.acl.ACL` :param acl: ACL rule to use, if any :type success_action_redirect: string :param success_action_redirect: URL to redirect to on success :type max_content_length: integer :param max_content_length: Maximum size for this file :type http_method: string :param http_method: HTTP Method to use, "http" or "https" :rtype: dict :return: A dictionary containing field names/values as well as a url to POST to .. code-block:: python { "action": action_url_to_post_to, "fields": [ { "name": field_name, "value": field_value }, { "name": field_name2, "value": field_value2 } ] } """ if fields == None: fields = [] if conditions == None: conditions = [] expiration = time.gmtime(int(time.time() + expires_in)) # Generate policy document conditions.append('{"bucket": "%s"}' % bucket_name) if key.endswith("${filename}"): conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")]) else: conditions.append('{"key": "%s"}' % key) if acl: conditions.append('{"acl": "%s"}' % acl) fields.append({ "name": "acl", "value": acl}) if success_action_redirect: conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect) fields.append({ "name": "success_action_redirect", "value": success_action_redirect}) if max_content_length: conditions.append('["content-length-range", 0, %i]' % max_content_length) fields.append({"name":'content-length-range', "value": "0,%i" % max_content_length}) policy = self.build_post_policy(expiration, conditions) # Add the base64-encoded policy document as the 'policy' field policy_b64 = base64.b64encode(policy) fields.append({"name": "policy", "value": policy_b64}) # Add the AWS access key as the 'AWSAccessKeyId' field fields.append({"name": "AWSAccessKeyId", "value": self.aws_access_key_id}) # Add signature for encoded policy document as the 'AWSAccessKeyId' field signature = self._auth_handler.sign_string(policy_b64) fields.append({"name": "signature", "value": signature}) fields.append({"name": "key", "value": key}) # HTTPS protocol will be used if the secure HTTP option is enabled. url = '%s://%s/' % (http_method, self.calling_format.build_host(self.server_name(), bucket_name)) return {"action": url, "fields": fields}
[ "def", "build_post_form_args", "(", "self", ",", "bucket_name", ",", "key", ",", "expires_in", "=", "6000", ",", "acl", "=", "None", ",", "success_action_redirect", "=", "None", ",", "max_content_length", "=", "None", ",", "http_method", "=", "\"http\"", ",", "fields", "=", "None", ",", "conditions", "=", "None", ")", ":", "if", "fields", "==", "None", ":", "fields", "=", "[", "]", "if", "conditions", "==", "None", ":", "conditions", "=", "[", "]", "expiration", "=", "time", ".", "gmtime", "(", "int", "(", "time", ".", "time", "(", ")", "+", "expires_in", ")", ")", "# Generate policy document", "conditions", ".", "append", "(", "'{\"bucket\": \"%s\"}'", "%", "bucket_name", ")", "if", "key", ".", "endswith", "(", "\"${filename}\"", ")", ":", "conditions", ".", "append", "(", "'[\"starts-with\", \"$key\", \"%s\"]'", "%", "key", "[", ":", "-", "len", "(", "\"${filename}\"", ")", "]", ")", "else", ":", "conditions", ".", "append", "(", "'{\"key\": \"%s\"}'", "%", "key", ")", "if", "acl", ":", "conditions", ".", "append", "(", "'{\"acl\": \"%s\"}'", "%", "acl", ")", "fields", ".", "append", "(", "{", "\"name\"", ":", "\"acl\"", ",", "\"value\"", ":", "acl", "}", ")", "if", "success_action_redirect", ":", "conditions", ".", "append", "(", "'{\"success_action_redirect\": \"%s\"}'", "%", "success_action_redirect", ")", "fields", ".", "append", "(", "{", "\"name\"", ":", "\"success_action_redirect\"", ",", "\"value\"", ":", "success_action_redirect", "}", ")", "if", "max_content_length", ":", "conditions", ".", "append", "(", "'[\"content-length-range\", 0, %i]'", "%", "max_content_length", ")", "fields", ".", "append", "(", "{", "\"name\"", ":", "'content-length-range'", ",", "\"value\"", ":", "\"0,%i\"", "%", "max_content_length", "}", ")", "policy", "=", "self", ".", "build_post_policy", "(", "expiration", ",", "conditions", ")", "# Add the base64-encoded policy document as the 'policy' field", "policy_b64", "=", "base64", ".", "b64encode", "(", "policy", ")", "fields", ".", "append", "(", "{", "\"name\"", ":", "\"policy\"", ",", "\"value\"", ":", "policy_b64", "}", ")", "# Add the AWS access key as the 'AWSAccessKeyId' field", "fields", ".", "append", "(", "{", "\"name\"", ":", "\"AWSAccessKeyId\"", ",", "\"value\"", ":", "self", ".", "aws_access_key_id", "}", ")", "# Add signature for encoded policy document as the 'AWSAccessKeyId' field", "signature", "=", "self", ".", "_auth_handler", ".", "sign_string", "(", "policy_b64", ")", "fields", ".", "append", "(", "{", "\"name\"", ":", "\"signature\"", ",", "\"value\"", ":", "signature", "}", ")", "fields", ".", "append", "(", "{", "\"name\"", ":", "\"key\"", ",", "\"value\"", ":", "key", "}", ")", "# HTTPS protocol will be used if the secure HTTP option is enabled.", "url", "=", "'%s://%s/'", "%", "(", "http_method", ",", "self", ".", "calling_format", ".", "build_host", "(", "self", ".", "server_name", "(", ")", ",", "bucket_name", ")", ")", "return", "{", "\"action\"", ":", "url", ",", "\"fields\"", ":", "fields", "}" ]
Taken from the AWS book Python examples and modified for use with boto This only returns the arguments required for the post form, not the actual form. This does not return the file input field which also needs to be added :type bucket_name: string :param bucket_name: Bucket to submit to :type key: string :param key: Key name, optionally add ${filename} to the end to attach the submitted filename :type expires_in: integer :param expires_in: Time (in seconds) before this expires, defaults to 6000 :type acl: :class:`boto.s3.acl.ACL` :param acl: ACL rule to use, if any :type success_action_redirect: string :param success_action_redirect: URL to redirect to on success :type max_content_length: integer :param max_content_length: Maximum size for this file :type http_method: string :param http_method: HTTP Method to use, "http" or "https" :rtype: dict :return: A dictionary containing field names/values as well as a url to POST to .. code-block:: python { "action": action_url_to_post_to, "fields": [ { "name": field_name, "value": field_value }, { "name": field_name2, "value": field_value2 } ] }
[ "Taken", "from", "the", "AWS", "book", "Python", "examples", "and", "modified", "for", "use", "with", "boto", "This", "only", "returns", "the", "arguments", "required", "for", "the", "post", "form", "not", "the", "actual", "form", ".", "This", "does", "not", "return", "the", "file", "input", "field", "which", "also", "needs", "to", "be", "added", ":", "type", "bucket_name", ":", "string", ":", "param", "bucket_name", ":", "Bucket", "to", "submit", "to", ":", "type", "key", ":", "string", ":", "param", "key", ":", "Key", "name", "optionally", "add", "$", "{", "filename", "}", "to", "the", "end", "to", "attach", "the", "submitted", "filename", ":", "type", "expires_in", ":", "integer", ":", "param", "expires_in", ":", "Time", "(", "in", "seconds", ")", "before", "this", "expires", "defaults", "to", "6000", ":", "type", "acl", ":", ":", "class", ":", "boto", ".", "s3", ".", "acl", ".", "ACL", ":", "param", "acl", ":", "ACL", "rule", "to", "use", "if", "any", ":", "type", "success_action_redirect", ":", "string", ":", "param", "success_action_redirect", ":", "URL", "to", "redirect", "to", "on", "success", ":", "type", "max_content_length", ":", "integer", ":", "param", "max_content_length", ":", "Maximum", "size", "for", "this", "file", ":", "type", "http_method", ":", "string", ":", "param", "http_method", ":", "HTTP", "Method", "to", "use", "http", "or", "https", ":", "rtype", ":", "dict", ":", "return", ":", "A", "dictionary", "containing", "field", "names", "/", "values", "as", "well", "as", "a", "url", "to", "POST", "to", "..", "code", "-", "block", "::", "python", "{", "action", ":", "action_url_to_post_to", "fields", ":", "[", "{", "name", ":", "field_name", "value", ":", "field_value", "}", "{", "name", ":", "field_name2", "value", ":", "field_value2", "}", "]", "}" ]
python
train
40.857143
ponty/eagexp
eagexp/partlist.py
https://github.com/ponty/eagexp/blob/1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2/eagexp/partlist.py#L90-L100
def structured_partlist(input, timeout=20, showgui=False): '''export partlist by eagle, then parse it :param input: .sch or .brd file name :param timeout: int :param showgui: Bool, True -> do not hide eagle GUI :rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..]) ''' s = raw_partlist(input=input, timeout=timeout, showgui=showgui) return parse_partlist(s)
[ "def", "structured_partlist", "(", "input", ",", "timeout", "=", "20", ",", "showgui", "=", "False", ")", ":", "s", "=", "raw_partlist", "(", "input", "=", "input", ",", "timeout", "=", "timeout", ",", "showgui", "=", "showgui", ")", "return", "parse_partlist", "(", "s", ")" ]
export partlist by eagle, then parse it :param input: .sch or .brd file name :param timeout: int :param showgui: Bool, True -> do not hide eagle GUI :rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..])
[ "export", "partlist", "by", "eagle", "then", "parse", "it" ]
python
train
38.909091
eandersson/amqpstorm
amqpstorm/management/exchange.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/exchange.py#L14-L31
def get(self, exchange, virtual_host='/'): """Get Exchange details. :param str exchange: Exchange name :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') return self.http_client.get( API_EXCHANGE % ( virtual_host, exchange) )
[ "def", "get", "(", "self", ",", "exchange", ",", "virtual_host", "=", "'/'", ")", ":", "virtual_host", "=", "quote", "(", "virtual_host", ",", "''", ")", "return", "self", ".", "http_client", ".", "get", "(", "API_EXCHANGE", "%", "(", "virtual_host", ",", "exchange", ")", ")" ]
Get Exchange details. :param str exchange: Exchange name :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
[ "Get", "Exchange", "details", "." ]
python
train
29.611111
HiPERCAM/hcam_widgets
hcam_widgets/widgets.py
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/widgets.py#L1202-L1210
def set_min(self, fmin): """ Updates minimum value """ if round(100000*fmin) != 100000*fmin: raise DriverError('utils.widgets.Expose.set_min: ' + 'fmin must be a multiple of 0.00001') self.fmin = fmin self.set(self.fmin)
[ "def", "set_min", "(", "self", ",", "fmin", ")", ":", "if", "round", "(", "100000", "*", "fmin", ")", "!=", "100000", "*", "fmin", ":", "raise", "DriverError", "(", "'utils.widgets.Expose.set_min: '", "+", "'fmin must be a multiple of 0.00001'", ")", "self", ".", "fmin", "=", "fmin", "self", ".", "set", "(", "self", ".", "fmin", ")" ]
Updates minimum value
[ "Updates", "minimum", "value" ]
python
train
33.555556
cloud-custodian/cloud-custodian
tools/c7n_salactus/c7n_salactus/cli.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_salactus/c7n_salactus/cli.py#L587-L641
def inspect_partitions(bucket): """Discover the partitions on a bucket via introspection. For large buckets which lack s3 inventories, salactus will attempt to process objects in parallel on the bucket by breaking the bucket into a separate keyspace partitions. It does this with a heurestic that attempts to sample the keyspace and determine appropriate subparts. This command provides additional visibility into the partitioning of a bucket by showing how salactus would partition a given bucket. """ logging.basicConfig( level=logging.INFO, format="%(asctime)s: %(name)s:%(levelname)s %(message)s") logging.getLogger('botocore').setLevel(level=logging.WARNING) state = db.db() # add db.bucket accessor found = None for b in state.buckets(): if b.name == bucket: found = b break if not found: click.echo("no bucket named: %s" % bucket) return keyset = [] partitions = [] def process_keyset(bid, page): keyset.append(len(page)) def process_bucket_iterator(bid, prefix, delimiter="", **continuation): partitions.append(prefix) # synchronous execution def invoke(f, *args, **kw): return f(*args, **kw) # unleash the monkies ;-) worker.connection.hincrby = lambda x, y, z: True worker.invoke = invoke worker.process_keyset = process_keyset worker.process_bucket_iterator = process_bucket_iterator # kick it off worker.process_bucket_partitions(b.bucket_id) keys_scanned = sum(keyset) click.echo( "Found %d partitions %s keys scanned during partitioning" % ( len(partitions), keys_scanned)) click.echo("\n".join(partitions))
[ "def", "inspect_partitions", "(", "bucket", ")", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "\"%(asctime)s: %(name)s:%(levelname)s %(message)s\"", ")", "logging", ".", "getLogger", "(", "'botocore'", ")", ".", "setLevel", "(", "level", "=", "logging", ".", "WARNING", ")", "state", "=", "db", ".", "db", "(", ")", "# add db.bucket accessor", "found", "=", "None", "for", "b", "in", "state", ".", "buckets", "(", ")", ":", "if", "b", ".", "name", "==", "bucket", ":", "found", "=", "b", "break", "if", "not", "found", ":", "click", ".", "echo", "(", "\"no bucket named: %s\"", "%", "bucket", ")", "return", "keyset", "=", "[", "]", "partitions", "=", "[", "]", "def", "process_keyset", "(", "bid", ",", "page", ")", ":", "keyset", ".", "append", "(", "len", "(", "page", ")", ")", "def", "process_bucket_iterator", "(", "bid", ",", "prefix", ",", "delimiter", "=", "\"\"", ",", "*", "*", "continuation", ")", ":", "partitions", ".", "append", "(", "prefix", ")", "# synchronous execution", "def", "invoke", "(", "f", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kw", ")", "# unleash the monkies ;-)", "worker", ".", "connection", ".", "hincrby", "=", "lambda", "x", ",", "y", ",", "z", ":", "True", "worker", ".", "invoke", "=", "invoke", "worker", ".", "process_keyset", "=", "process_keyset", "worker", ".", "process_bucket_iterator", "=", "process_bucket_iterator", "# kick it off", "worker", ".", "process_bucket_partitions", "(", "b", ".", "bucket_id", ")", "keys_scanned", "=", "sum", "(", "keyset", ")", "click", ".", "echo", "(", "\"Found %d partitions %s keys scanned during partitioning\"", "%", "(", "len", "(", "partitions", ")", ",", "keys_scanned", ")", ")", "click", ".", "echo", "(", "\"\\n\"", ".", "join", "(", "partitions", ")", ")" ]
Discover the partitions on a bucket via introspection. For large buckets which lack s3 inventories, salactus will attempt to process objects in parallel on the bucket by breaking the bucket into a separate keyspace partitions. It does this with a heurestic that attempts to sample the keyspace and determine appropriate subparts. This command provides additional visibility into the partitioning of a bucket by showing how salactus would partition a given bucket.
[ "Discover", "the", "partitions", "on", "a", "bucket", "via", "introspection", "." ]
python
train
31.090909
thombashi/pytablereader
pytablereader/factory/_url.py
https://github.com/thombashi/pytablereader/blob/bc3c057a2cc775bcce690e0e9019c2907b638101/pytablereader/factory/_url.py#L225-L242
def _get_format_name_loader_mapping(self): """ :return: Mappings of format-name and loader class. :rtype: dict """ loader_table = self._get_common_loader_mapping() loader_table.update( { "excel": ExcelTableFileLoader, "json_lines": JsonLinesTableTextLoader, "markdown": MarkdownTableTextLoader, "mediawiki": MediaWikiTableTextLoader, "ssv": CsvTableFileLoader, } ) return loader_table
[ "def", "_get_format_name_loader_mapping", "(", "self", ")", ":", "loader_table", "=", "self", ".", "_get_common_loader_mapping", "(", ")", "loader_table", ".", "update", "(", "{", "\"excel\"", ":", "ExcelTableFileLoader", ",", "\"json_lines\"", ":", "JsonLinesTableTextLoader", ",", "\"markdown\"", ":", "MarkdownTableTextLoader", ",", "\"mediawiki\"", ":", "MediaWikiTableTextLoader", ",", "\"ssv\"", ":", "CsvTableFileLoader", ",", "}", ")", "return", "loader_table" ]
:return: Mappings of format-name and loader class. :rtype: dict
[ ":", "return", ":", "Mappings", "of", "format", "-", "name", "and", "loader", "class", ".", ":", "rtype", ":", "dict" ]
python
train
29.833333
Anaconda-Platform/anaconda-client
binstar_client/__init__.py
https://github.com/Anaconda-Platform/anaconda-client/blob/b276f0572744c73c184a8b43a897cfa7fc1dc523/binstar_client/__init__.py#L249-L291
def user_packages( self, login=None, platform=None, package_type=None, type_=None, access=None): ''' Returns a list of packages for a given user and optionally filter by `platform`, `package_type` and `type_`. :param login: (optional) the login name of the user or None. If login is None this method will return the packages for the authenticated user. :param platform: only find packages that include files for this platform. (e.g. 'linux-64', 'osx-64', 'win-32') :param package_type: only find packages that have this kind of file (e.g. 'env', 'conda', 'pypi') :param type_: only find packages that have this conda `type` (i.e. 'app') :param access: only find packages that have this access level (e.g. 'private', 'authenticated', 'public') ''' if login: url = '{0}/packages/{1}'.format(self.domain, login) else: url = '{0}/packages'.format(self.domain) arguments = collections.OrderedDict() if platform: arguments['platform'] = platform if package_type: arguments['package_type'] = package_type if type_: arguments['type'] = type_ if access: arguments['access'] = access res = self.session.get(url, params=arguments) self._check_response(res) return res.json()
[ "def", "user_packages", "(", "self", ",", "login", "=", "None", ",", "platform", "=", "None", ",", "package_type", "=", "None", ",", "type_", "=", "None", ",", "access", "=", "None", ")", ":", "if", "login", ":", "url", "=", "'{0}/packages/{1}'", ".", "format", "(", "self", ".", "domain", ",", "login", ")", "else", ":", "url", "=", "'{0}/packages'", ".", "format", "(", "self", ".", "domain", ")", "arguments", "=", "collections", ".", "OrderedDict", "(", ")", "if", "platform", ":", "arguments", "[", "'platform'", "]", "=", "platform", "if", "package_type", ":", "arguments", "[", "'package_type'", "]", "=", "package_type", "if", "type_", ":", "arguments", "[", "'type'", "]", "=", "type_", "if", "access", ":", "arguments", "[", "'access'", "]", "=", "access", "res", "=", "self", ".", "session", ".", "get", "(", "url", ",", "params", "=", "arguments", ")", "self", ".", "_check_response", "(", "res", ")", "return", "res", ".", "json", "(", ")" ]
Returns a list of packages for a given user and optionally filter by `platform`, `package_type` and `type_`. :param login: (optional) the login name of the user or None. If login is None this method will return the packages for the authenticated user. :param platform: only find packages that include files for this platform. (e.g. 'linux-64', 'osx-64', 'win-32') :param package_type: only find packages that have this kind of file (e.g. 'env', 'conda', 'pypi') :param type_: only find packages that have this conda `type` (i.e. 'app') :param access: only find packages that have this access level (e.g. 'private', 'authenticated', 'public')
[ "Returns", "a", "list", "of", "packages", "for", "a", "given", "user", "and", "optionally", "filter", "by", "platform", "package_type", "and", "type_", "." ]
python
train
35.116279
willkg/markus
markus/main.py
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/main.py#L320-L359
def timer(self, stat, tags=None): """Contextmanager for easily computing timings. :arg string stat: A period delimited alphanumeric key. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> mymetrics = get_metrics(__name__) >>> def long_function(): ... with mymetrics.timer('long_function'): ... # perform some thing we want to keep metrics on ... pass .. Note:: All timings generated with this are in milliseconds. """ if six.PY3: start_time = time.perf_counter() else: start_time = time.time() yield if six.PY3: end_time = time.perf_counter() else: end_time = time.time() delta = end_time - start_time self.timing(stat, value=delta * 1000.0, tags=tags)
[ "def", "timer", "(", "self", ",", "stat", ",", "tags", "=", "None", ")", ":", "if", "six", ".", "PY3", ":", "start_time", "=", "time", ".", "perf_counter", "(", ")", "else", ":", "start_time", "=", "time", ".", "time", "(", ")", "yield", "if", "six", ".", "PY3", ":", "end_time", "=", "time", ".", "perf_counter", "(", ")", "else", ":", "end_time", "=", "time", ".", "time", "(", ")", "delta", "=", "end_time", "-", "start_time", "self", ".", "timing", "(", "stat", ",", "value", "=", "delta", "*", "1000.0", ",", "tags", "=", "tags", ")" ]
Contextmanager for easily computing timings. :arg string stat: A period delimited alphanumeric key. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> mymetrics = get_metrics(__name__) >>> def long_function(): ... with mymetrics.timer('long_function'): ... # perform some thing we want to keep metrics on ... pass .. Note:: All timings generated with this are in milliseconds.
[ "Contextmanager", "for", "easily", "computing", "timings", "." ]
python
test
26.55