repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
ToucanToco/toucan-data-sdk
toucan_data_sdk/utils/postprocess/converter.py
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/converter.py#L42-L96
def change_date_format( df, *, column: str, output_format: str, input_format: str = None, new_column: str = None, new_time_zone=None ): """ Convert the format of a date --- ### Parameters *mandatory :* - `column` (*str*): name of the column to change the format - `output_format` (*str*): format of the output values (see [available formats]( https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior)) *optional :* - `input_format` (*str*): format of the input values (by default let the parser detect it) - `new_column` (*str*): name of the output column (by default overwrite `column`) - `new_time_zone` (*str*): name of new time zone (by default no time zone conversion is done) --- ### Example **Input** label | date :------:|:----: France | 2017-03-22 Europe | 2016-03-22 ```cson change_date_format: column: 'date' input_format: '%Y-%m-%d' output_format: '%Y-%m' ``` Output : label | date :------:|:----: France | 2017-03 Europe | 2016-03 """ new_column = new_column or column df[new_column] = (pd.to_datetime(df[column], format=input_format, utc=True) .dt.tz_convert(new_time_zone) .dt.strftime(output_format)) return df
[ "def", "change_date_format", "(", "df", ",", "*", ",", "column", ":", "str", ",", "output_format", ":", "str", ",", "input_format", ":", "str", "=", "None", ",", "new_column", ":", "str", "=", "None", ",", "new_time_zone", "=", "None", ")", ":", "new_column", "=", "new_column", "or", "column", "df", "[", "new_column", "]", "=", "(", "pd", ".", "to_datetime", "(", "df", "[", "column", "]", ",", "format", "=", "input_format", ",", "utc", "=", "True", ")", ".", "dt", ".", "tz_convert", "(", "new_time_zone", ")", ".", "dt", ".", "strftime", "(", "output_format", ")", ")", "return", "df" ]
Convert the format of a date --- ### Parameters *mandatory :* - `column` (*str*): name of the column to change the format - `output_format` (*str*): format of the output values (see [available formats]( https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior)) *optional :* - `input_format` (*str*): format of the input values (by default let the parser detect it) - `new_column` (*str*): name of the output column (by default overwrite `column`) - `new_time_zone` (*str*): name of new time zone (by default no time zone conversion is done) --- ### Example **Input** label | date :------:|:----: France | 2017-03-22 Europe | 2016-03-22 ```cson change_date_format: column: 'date' input_format: '%Y-%m-%d' output_format: '%Y-%m' ``` Output : label | date :------:|:----: France | 2017-03 Europe | 2016-03
[ "Convert", "the", "format", "of", "a", "date" ]
python
test
johntruckenbrodt/spatialist
spatialist/vector.py
https://github.com/johntruckenbrodt/spatialist/blob/007f49296a156de8d7168ad235b5a5b8e8d3633d/spatialist/vector.py#L356-L373
def getFeatureByIndex(self, index): """ get features by numerical (positional) index Parameters ---------- index: int the queried index Returns ------- :osgeo:class:`ogr.Feature` the requested feature """ feature = self.layer[index] if feature is None: feature = self.getfeatures()[index] return feature
[ "def", "getFeatureByIndex", "(", "self", ",", "index", ")", ":", "feature", "=", "self", ".", "layer", "[", "index", "]", "if", "feature", "is", "None", ":", "feature", "=", "self", ".", "getfeatures", "(", ")", "[", "index", "]", "return", "feature" ]
get features by numerical (positional) index Parameters ---------- index: int the queried index Returns ------- :osgeo:class:`ogr.Feature` the requested feature
[ "get", "features", "by", "numerical", "(", "positional", ")", "index" ]
python
train
numenta/nupic
src/nupic/support/__init__.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/__init__.py#L246-L386
def initLogging(verbose=False, console='stdout', consoleLevel='DEBUG'): """ Initilize NuPic logging by reading in from the logging configuration file. The logging configuration file is named ``nupic-logging.conf`` and is expected to be in the format defined by the python logging module. If the environment variable ``NTA_CONF_PATH`` is defined, then the logging configuration file is expected to be in the ``NTA_CONF_PATH`` directory. If ``NTA_CONF_PATH`` is not defined, then it is found in the 'conf/default' subdirectory of the NuPic installation directory (typically ~/nupic/current/conf/default) The logging configuration file can use the environment variable ``NTA_LOG_DIR`` to set the locations of log files. If this variable is not defined, logging to files will be disabled. :param console: Defines console output for the default "root" logging configuration; this may be one of 'stdout', 'stderr', or None; Use None to suppress console logging output :param consoleLevel: Logging-level filter string for console output corresponding to logging levels in the logging module; may be one of: 'DEBUG', 'INFO', 'WARNING', 'ERROR', or 'CRITICAL'. E.g., a value of'WARNING' suppresses DEBUG and INFO level output to console, but allows WARNING, ERROR, and CRITICAL """ # NOTE: If you call this twice from the same process there seems to be a # bug - logged messages don't show up for loggers that you do another # logging.getLogger() on. global gLoggingInitialized if gLoggingInitialized: if verbose: print >> sys.stderr, "Logging already initialized, doing nothing." return consoleStreamMappings = { 'stdout' : 'stdoutConsoleHandler', 'stderr' : 'stderrConsoleHandler', } consoleLogLevels = ['DEBUG', 'INFO', 'WARNING', 'WARN', 'ERROR', 'CRITICAL', 'FATAL'] assert console is None or console in consoleStreamMappings.keys(), ( 'Unexpected console arg value: %r') % (console,) assert consoleLevel in consoleLogLevels, ( 'Unexpected consoleLevel arg value: %r') % (consoleLevel) # ----------------------------------------------------------------------- # Setup logging. Look for the nupic-logging.conf file, first in the # NTA_CONFIG_DIR path (if defined), then in a subdirectory of the nupic # module configFilename = 'nupic-logging.conf' configFilePath = resource_filename("nupic.support", configFilename) configLogDir = os.environ.get('NTA_LOG_DIR', None) # Load in the logging configuration file if verbose: print >> sys.stderr, ( "Using logging configuration file: %s") % (configFilePath) # This dict will hold our replacement strings for logging configuration replacements = dict() def makeKey(name): """ Makes replacement key """ return "$$%s$$" % (name) platform = sys.platform.lower() if platform.startswith('java'): # Jython import java.lang platform = java.lang.System.getProperty("os.name").lower() if platform.startswith('mac os x'): platform = 'darwin' if platform.startswith('darwin'): replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/var/run/syslog"' elif platform.startswith('linux'): replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/dev/log"' elif platform.startswith('win'): replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"log"' else: raise RuntimeError("This platform is neither darwin, win32, nor linux: %s" % ( sys.platform,)) # Nupic logs go to file replacements[makeKey('PERSISTENT_LOG_HANDLER')] = 'fileHandler' if platform.startswith('win'): replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = '"NUL"' else: replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = '"/dev/null"' # Set up log file path for the default file handler and configure handlers handlers = list() if configLogDir is not None: logFilePath = _genLoggingFilePath() makeDirectoryFromAbsolutePath(os.path.dirname(logFilePath)) replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = repr(logFilePath) handlers.append(replacements[makeKey('PERSISTENT_LOG_HANDLER')]) if console is not None: handlers.append(consoleStreamMappings[console]) replacements[makeKey('ROOT_LOGGER_HANDLERS')] = ", ".join(handlers) # Set up log level for console handlers replacements[makeKey('CONSOLE_LOG_LEVEL')] = consoleLevel customConfig = StringIO() # Using pkg_resources to get the logging file, which should be packaged and # associated with this source file name. loggingFileContents = resource_string(__name__, configFilename) for lineNum, line in enumerate(loggingFileContents.splitlines()): if "$$" in line: for (key, value) in replacements.items(): line = line.replace(key, value) # If there is still a replacement string in the line, we're missing it # from our replacements dict if "$$" in line and "$$<key>$$" not in line: raise RuntimeError(("The text %r, found at line #%d of file %r, " "contains a string not found in our replacement " "dict.") % (line, lineNum, configFilePath)) customConfig.write("%s\n" % line) customConfig.seek(0) if python_version()[:3] >= '2.6': logging.config.fileConfig(customConfig, disable_existing_loggers=False) else: logging.config.fileConfig(customConfig) gLoggingInitialized = True
[ "def", "initLogging", "(", "verbose", "=", "False", ",", "console", "=", "'stdout'", ",", "consoleLevel", "=", "'DEBUG'", ")", ":", "# NOTE: If you call this twice from the same process there seems to be a", "# bug - logged messages don't show up for loggers that you do another", "# logging.getLogger() on.", "global", "gLoggingInitialized", "if", "gLoggingInitialized", ":", "if", "verbose", ":", "print", ">>", "sys", ".", "stderr", ",", "\"Logging already initialized, doing nothing.\"", "return", "consoleStreamMappings", "=", "{", "'stdout'", ":", "'stdoutConsoleHandler'", ",", "'stderr'", ":", "'stderrConsoleHandler'", ",", "}", "consoleLogLevels", "=", "[", "'DEBUG'", ",", "'INFO'", ",", "'WARNING'", ",", "'WARN'", ",", "'ERROR'", ",", "'CRITICAL'", ",", "'FATAL'", "]", "assert", "console", "is", "None", "or", "console", "in", "consoleStreamMappings", ".", "keys", "(", ")", ",", "(", "'Unexpected console arg value: %r'", ")", "%", "(", "console", ",", ")", "assert", "consoleLevel", "in", "consoleLogLevels", ",", "(", "'Unexpected consoleLevel arg value: %r'", ")", "%", "(", "consoleLevel", ")", "# -----------------------------------------------------------------------", "# Setup logging. Look for the nupic-logging.conf file, first in the", "# NTA_CONFIG_DIR path (if defined), then in a subdirectory of the nupic", "# module", "configFilename", "=", "'nupic-logging.conf'", "configFilePath", "=", "resource_filename", "(", "\"nupic.support\"", ",", "configFilename", ")", "configLogDir", "=", "os", ".", "environ", ".", "get", "(", "'NTA_LOG_DIR'", ",", "None", ")", "# Load in the logging configuration file", "if", "verbose", ":", "print", ">>", "sys", ".", "stderr", ",", "(", "\"Using logging configuration file: %s\"", ")", "%", "(", "configFilePath", ")", "# This dict will hold our replacement strings for logging configuration", "replacements", "=", "dict", "(", ")", "def", "makeKey", "(", "name", ")", ":", "\"\"\" Makes replacement key \"\"\"", "return", "\"$$%s$$\"", "%", "(", "name", ")", "platform", "=", "sys", ".", "platform", ".", "lower", "(", ")", "if", "platform", ".", "startswith", "(", "'java'", ")", ":", "# Jython", "import", "java", ".", "lang", "platform", "=", "java", ".", "lang", ".", "System", ".", "getProperty", "(", "\"os.name\"", ")", ".", "lower", "(", ")", "if", "platform", ".", "startswith", "(", "'mac os x'", ")", ":", "platform", "=", "'darwin'", "if", "platform", ".", "startswith", "(", "'darwin'", ")", ":", "replacements", "[", "makeKey", "(", "'SYSLOG_HANDLER_ADDRESS'", ")", "]", "=", "'\"/var/run/syslog\"'", "elif", "platform", ".", "startswith", "(", "'linux'", ")", ":", "replacements", "[", "makeKey", "(", "'SYSLOG_HANDLER_ADDRESS'", ")", "]", "=", "'\"/dev/log\"'", "elif", "platform", ".", "startswith", "(", "'win'", ")", ":", "replacements", "[", "makeKey", "(", "'SYSLOG_HANDLER_ADDRESS'", ")", "]", "=", "'\"log\"'", "else", ":", "raise", "RuntimeError", "(", "\"This platform is neither darwin, win32, nor linux: %s\"", "%", "(", "sys", ".", "platform", ",", ")", ")", "# Nupic logs go to file", "replacements", "[", "makeKey", "(", "'PERSISTENT_LOG_HANDLER'", ")", "]", "=", "'fileHandler'", "if", "platform", ".", "startswith", "(", "'win'", ")", ":", "replacements", "[", "makeKey", "(", "'FILE_HANDLER_LOG_FILENAME'", ")", "]", "=", "'\"NUL\"'", "else", ":", "replacements", "[", "makeKey", "(", "'FILE_HANDLER_LOG_FILENAME'", ")", "]", "=", "'\"/dev/null\"'", "# Set up log file path for the default file handler and configure handlers", "handlers", "=", "list", "(", ")", "if", "configLogDir", "is", "not", "None", ":", "logFilePath", "=", "_genLoggingFilePath", "(", ")", "makeDirectoryFromAbsolutePath", "(", "os", ".", "path", ".", "dirname", "(", "logFilePath", ")", ")", "replacements", "[", "makeKey", "(", "'FILE_HANDLER_LOG_FILENAME'", ")", "]", "=", "repr", "(", "logFilePath", ")", "handlers", ".", "append", "(", "replacements", "[", "makeKey", "(", "'PERSISTENT_LOG_HANDLER'", ")", "]", ")", "if", "console", "is", "not", "None", ":", "handlers", ".", "append", "(", "consoleStreamMappings", "[", "console", "]", ")", "replacements", "[", "makeKey", "(", "'ROOT_LOGGER_HANDLERS'", ")", "]", "=", "\", \"", ".", "join", "(", "handlers", ")", "# Set up log level for console handlers", "replacements", "[", "makeKey", "(", "'CONSOLE_LOG_LEVEL'", ")", "]", "=", "consoleLevel", "customConfig", "=", "StringIO", "(", ")", "# Using pkg_resources to get the logging file, which should be packaged and", "# associated with this source file name.", "loggingFileContents", "=", "resource_string", "(", "__name__", ",", "configFilename", ")", "for", "lineNum", ",", "line", "in", "enumerate", "(", "loggingFileContents", ".", "splitlines", "(", ")", ")", ":", "if", "\"$$\"", "in", "line", ":", "for", "(", "key", ",", "value", ")", "in", "replacements", ".", "items", "(", ")", ":", "line", "=", "line", ".", "replace", "(", "key", ",", "value", ")", "# If there is still a replacement string in the line, we're missing it", "# from our replacements dict", "if", "\"$$\"", "in", "line", "and", "\"$$<key>$$\"", "not", "in", "line", ":", "raise", "RuntimeError", "(", "(", "\"The text %r, found at line #%d of file %r, \"", "\"contains a string not found in our replacement \"", "\"dict.\"", ")", "%", "(", "line", ",", "lineNum", ",", "configFilePath", ")", ")", "customConfig", ".", "write", "(", "\"%s\\n\"", "%", "line", ")", "customConfig", ".", "seek", "(", "0", ")", "if", "python_version", "(", ")", "[", ":", "3", "]", ">=", "'2.6'", ":", "logging", ".", "config", ".", "fileConfig", "(", "customConfig", ",", "disable_existing_loggers", "=", "False", ")", "else", ":", "logging", ".", "config", ".", "fileConfig", "(", "customConfig", ")", "gLoggingInitialized", "=", "True" ]
Initilize NuPic logging by reading in from the logging configuration file. The logging configuration file is named ``nupic-logging.conf`` and is expected to be in the format defined by the python logging module. If the environment variable ``NTA_CONF_PATH`` is defined, then the logging configuration file is expected to be in the ``NTA_CONF_PATH`` directory. If ``NTA_CONF_PATH`` is not defined, then it is found in the 'conf/default' subdirectory of the NuPic installation directory (typically ~/nupic/current/conf/default) The logging configuration file can use the environment variable ``NTA_LOG_DIR`` to set the locations of log files. If this variable is not defined, logging to files will be disabled. :param console: Defines console output for the default "root" logging configuration; this may be one of 'stdout', 'stderr', or None; Use None to suppress console logging output :param consoleLevel: Logging-level filter string for console output corresponding to logging levels in the logging module; may be one of: 'DEBUG', 'INFO', 'WARNING', 'ERROR', or 'CRITICAL'. E.g., a value of'WARNING' suppresses DEBUG and INFO level output to console, but allows WARNING, ERROR, and CRITICAL
[ "Initilize", "NuPic", "logging", "by", "reading", "in", "from", "the", "logging", "configuration", "file", ".", "The", "logging", "configuration", "file", "is", "named", "nupic", "-", "logging", ".", "conf", "and", "is", "expected", "to", "be", "in", "the", "format", "defined", "by", "the", "python", "logging", "module", "." ]
python
valid
balloob/aiohue
aiohue/discovery.py
https://github.com/balloob/aiohue/blob/c0270637a8a6ce3f5684c8559decac79fb0f0192/aiohue/discovery.py#L6-L10
async def discover_nupnp(websession): """Discover bridges via NUPNP.""" async with websession.get(URL_NUPNP) as res: return [Bridge(item['internalipaddress'], websession=websession) for item in (await res.json())]
[ "async", "def", "discover_nupnp", "(", "websession", ")", ":", "async", "with", "websession", ".", "get", "(", "URL_NUPNP", ")", "as", "res", ":", "return", "[", "Bridge", "(", "item", "[", "'internalipaddress'", "]", ",", "websession", "=", "websession", ")", "for", "item", "in", "(", "await", "res", ".", "json", "(", ")", ")", "]" ]
Discover bridges via NUPNP.
[ "Discover", "bridges", "via", "NUPNP", "." ]
python
train
dhermes/bezier
src/bezier/surface.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/surface.py#L1100-L1128
def _make_intersection(edge_info, all_edge_nodes): """Convert a description of edges into a curved polygon. .. note:: This is a helper used only by :meth:`.Surface.intersect`. Args: edge_info (Tuple[Tuple[int, float, float], ...]): Information describing each edge in the curved polygon by indicating which surface / edge on the surface and then start and end parameters along that edge. (See :func:`.ends_to_curve`.) all_edge_nodes (Tuple[numpy.ndarray, ...]): The nodes of three edges of the first surface being intersected followed by the nodes of the three edges of the second. Returns: .CurvedPolygon: The intersection corresponding to ``edge_info``. """ edges = [] for index, start, end in edge_info: nodes = all_edge_nodes[index] new_nodes = _curve_helpers.specialize_curve(nodes, start, end) degree = new_nodes.shape[1] - 1 edge = _curve_mod.Curve(new_nodes, degree, _copy=False) edges.append(edge) return curved_polygon.CurvedPolygon( *edges, metadata=edge_info, _verify=False )
[ "def", "_make_intersection", "(", "edge_info", ",", "all_edge_nodes", ")", ":", "edges", "=", "[", "]", "for", "index", ",", "start", ",", "end", "in", "edge_info", ":", "nodes", "=", "all_edge_nodes", "[", "index", "]", "new_nodes", "=", "_curve_helpers", ".", "specialize_curve", "(", "nodes", ",", "start", ",", "end", ")", "degree", "=", "new_nodes", ".", "shape", "[", "1", "]", "-", "1", "edge", "=", "_curve_mod", ".", "Curve", "(", "new_nodes", ",", "degree", ",", "_copy", "=", "False", ")", "edges", ".", "append", "(", "edge", ")", "return", "curved_polygon", ".", "CurvedPolygon", "(", "*", "edges", ",", "metadata", "=", "edge_info", ",", "_verify", "=", "False", ")" ]
Convert a description of edges into a curved polygon. .. note:: This is a helper used only by :meth:`.Surface.intersect`. Args: edge_info (Tuple[Tuple[int, float, float], ...]): Information describing each edge in the curved polygon by indicating which surface / edge on the surface and then start and end parameters along that edge. (See :func:`.ends_to_curve`.) all_edge_nodes (Tuple[numpy.ndarray, ...]): The nodes of three edges of the first surface being intersected followed by the nodes of the three edges of the second. Returns: .CurvedPolygon: The intersection corresponding to ``edge_info``.
[ "Convert", "a", "description", "of", "edges", "into", "a", "curved", "polygon", "." ]
python
train
kejbaly2/metrique
metrique/reporting.py
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/reporting.py#L101-L109
def write_report(self, force=False): ''' Writes the report to a file. ''' path = self.title + '.html' value = self._template.format( title=self.title, body=self.body, sidebar=self.sidebar) write_file(path, value, force=force) plt.ion()
[ "def", "write_report", "(", "self", ",", "force", "=", "False", ")", ":", "path", "=", "self", ".", "title", "+", "'.html'", "value", "=", "self", ".", "_template", ".", "format", "(", "title", "=", "self", ".", "title", ",", "body", "=", "self", ".", "body", ",", "sidebar", "=", "self", ".", "sidebar", ")", "write_file", "(", "path", ",", "value", ",", "force", "=", "force", ")", "plt", ".", "ion", "(", ")" ]
Writes the report to a file.
[ "Writes", "the", "report", "to", "a", "file", "." ]
python
train
wolfhong/formic
formic/treewalk.py
https://github.com/wolfhong/formic/blob/0d81eb88dcbb6fa705194fc6ccf2993f4abbaa76/formic/treewalk.py#L34-L45
def tree_walk(cls, directory, tree): """Walks a tree returned by `cls.list_to_tree` returning a list of 3-tuples as if from os.walk().""" results = [] dirs = [d for d in tree if d != FILE_MARKER] files = tree[FILE_MARKER] results.append((directory, dirs, files)) for d in dirs: subdir = os.path.join(directory, d) subtree = tree[d] results.extend(cls.tree_walk(subdir, subtree)) return results
[ "def", "tree_walk", "(", "cls", ",", "directory", ",", "tree", ")", ":", "results", "=", "[", "]", "dirs", "=", "[", "d", "for", "d", "in", "tree", "if", "d", "!=", "FILE_MARKER", "]", "files", "=", "tree", "[", "FILE_MARKER", "]", "results", ".", "append", "(", "(", "directory", ",", "dirs", ",", "files", ")", ")", "for", "d", "in", "dirs", ":", "subdir", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "d", ")", "subtree", "=", "tree", "[", "d", "]", "results", ".", "extend", "(", "cls", ".", "tree_walk", "(", "subdir", ",", "subtree", ")", ")", "return", "results" ]
Walks a tree returned by `cls.list_to_tree` returning a list of 3-tuples as if from os.walk().
[ "Walks", "a", "tree", "returned", "by", "cls", ".", "list_to_tree", "returning", "a", "list", "of", "3", "-", "tuples", "as", "if", "from", "os", ".", "walk", "()", "." ]
python
train
manns/pyspread
pyspread/src/gui/_dialogs.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L99-L108
def Validate(self, win): """Returns True if Value in digits, False otherwise""" val = self.GetWindow().GetValue() for x in val: if x not in string.digits: return False return True
[ "def", "Validate", "(", "self", ",", "win", ")", ":", "val", "=", "self", ".", "GetWindow", "(", ")", ".", "GetValue", "(", ")", "for", "x", "in", "val", ":", "if", "x", "not", "in", "string", ".", "digits", ":", "return", "False", "return", "True" ]
Returns True if Value in digits, False otherwise
[ "Returns", "True", "if", "Value", "in", "digits", "False", "otherwise" ]
python
train
saschpe/rapport
rapport/plugin.py
https://github.com/saschpe/rapport/blob/ccceb8f84bd7e8add88ab5e137cdab6424aa4683/rapport/plugin.py#L104-L116
def _path_to_module(path): """Translates paths to *.py? files into module paths. >>> _path_to_module("rapport/bar.py") 'rapport.bar' >>> _path_to_module("/usr/lib/rapport/bar.py") 'rapport.bar' """ # Split of preceeding path elements: path = "rapport" + path.split("rapport")[1] # Split of ending and replace os.sep with dots: path = path.replace(os.sep, ".").rsplit(".", 1)[0] return path
[ "def", "_path_to_module", "(", "path", ")", ":", "# Split of preceeding path elements:", "path", "=", "\"rapport\"", "+", "path", ".", "split", "(", "\"rapport\"", ")", "[", "1", "]", "# Split of ending and replace os.sep with dots:", "path", "=", "path", ".", "replace", "(", "os", ".", "sep", ",", "\".\"", ")", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "return", "path" ]
Translates paths to *.py? files into module paths. >>> _path_to_module("rapport/bar.py") 'rapport.bar' >>> _path_to_module("/usr/lib/rapport/bar.py") 'rapport.bar'
[ "Translates", "paths", "to", "*", ".", "py?", "files", "into", "module", "paths", "." ]
python
train
portfors-lab/sparkle
sparkle/tools/spikestats.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/tools/spikestats.py#L22-L74
def spike_times(signal, threshold, fs, absval=True): """Detect spikes from a given signal :param signal: Spike trace recording (vector) :type signal: numpy array :param threshold: Threshold value to determine spikes :type threshold: float :param absval: Whether to apply absolute value to signal before thresholding :type absval: bool :returns: list(float) of spike times in seconds For every continuous set of points over given threshold, returns the time of the maximum""" times = [] if absval: signal = np.abs(signal) over, = np.where(signal>threshold) segments, = np.where(np.diff(over) > 1) if len(over) > 1: if len(segments) == 0: segments = [0, len(over)-1] else: # add end points to sections for looping if segments[0] != 0: segments = np.insert(segments, [0], [0]) else: #first point in singleton times.append(float(over[0])/fs) if 1 not in segments: # make sure that first point is in there segments[0] = 1 if segments[-1] != len(over)-1: segments = np.insert(segments, [len(segments)], [len(over)-1]) else: times.append(float(over[-1])/fs) for iseg in range(1,len(segments)): if segments[iseg] - segments[iseg-1] == 1: # only single point over threshold idx = over[segments[iseg]] else: segments[0] = segments[0]-1 # find maximum of continuous set over max idx = over[segments[iseg-1]+1] + np.argmax(signal[over[segments[iseg-1]+1]:over[segments[iseg]]]) times.append(float(idx)/fs) elif len(over) == 1: times.append(float(over[0])/fs) if len(times)>0: return refractory(times) else: return times
[ "def", "spike_times", "(", "signal", ",", "threshold", ",", "fs", ",", "absval", "=", "True", ")", ":", "times", "=", "[", "]", "if", "absval", ":", "signal", "=", "np", ".", "abs", "(", "signal", ")", "over", ",", "=", "np", ".", "where", "(", "signal", ">", "threshold", ")", "segments", ",", "=", "np", ".", "where", "(", "np", ".", "diff", "(", "over", ")", ">", "1", ")", "if", "len", "(", "over", ")", ">", "1", ":", "if", "len", "(", "segments", ")", "==", "0", ":", "segments", "=", "[", "0", ",", "len", "(", "over", ")", "-", "1", "]", "else", ":", "# add end points to sections for looping", "if", "segments", "[", "0", "]", "!=", "0", ":", "segments", "=", "np", ".", "insert", "(", "segments", ",", "[", "0", "]", ",", "[", "0", "]", ")", "else", ":", "#first point in singleton", "times", ".", "append", "(", "float", "(", "over", "[", "0", "]", ")", "/", "fs", ")", "if", "1", "not", "in", "segments", ":", "# make sure that first point is in there", "segments", "[", "0", "]", "=", "1", "if", "segments", "[", "-", "1", "]", "!=", "len", "(", "over", ")", "-", "1", ":", "segments", "=", "np", ".", "insert", "(", "segments", ",", "[", "len", "(", "segments", ")", "]", ",", "[", "len", "(", "over", ")", "-", "1", "]", ")", "else", ":", "times", ".", "append", "(", "float", "(", "over", "[", "-", "1", "]", ")", "/", "fs", ")", "for", "iseg", "in", "range", "(", "1", ",", "len", "(", "segments", ")", ")", ":", "if", "segments", "[", "iseg", "]", "-", "segments", "[", "iseg", "-", "1", "]", "==", "1", ":", "# only single point over threshold", "idx", "=", "over", "[", "segments", "[", "iseg", "]", "]", "else", ":", "segments", "[", "0", "]", "=", "segments", "[", "0", "]", "-", "1", "# find maximum of continuous set over max", "idx", "=", "over", "[", "segments", "[", "iseg", "-", "1", "]", "+", "1", "]", "+", "np", ".", "argmax", "(", "signal", "[", "over", "[", "segments", "[", "iseg", "-", "1", "]", "+", "1", "]", ":", "over", "[", "segments", "[", "iseg", "]", "]", "]", ")", "times", ".", "append", "(", "float", "(", "idx", ")", "/", "fs", ")", "elif", "len", "(", "over", ")", "==", "1", ":", "times", ".", "append", "(", "float", "(", "over", "[", "0", "]", ")", "/", "fs", ")", "if", "len", "(", "times", ")", ">", "0", ":", "return", "refractory", "(", "times", ")", "else", ":", "return", "times" ]
Detect spikes from a given signal :param signal: Spike trace recording (vector) :type signal: numpy array :param threshold: Threshold value to determine spikes :type threshold: float :param absval: Whether to apply absolute value to signal before thresholding :type absval: bool :returns: list(float) of spike times in seconds For every continuous set of points over given threshold, returns the time of the maximum
[ "Detect", "spikes", "from", "a", "given", "signal" ]
python
train
timofurrer/tag-expressions
tagexpressions/parser.py
https://github.com/timofurrer/tag-expressions/blob/9b58c34296b31530f31517ffefc8715516c73da3/tagexpressions/parser.py#L101-L118
def create_and_push_expression(token, expressions): """Creates an expression from the given token and adds it to the stack of the given expression. In the case of "and" and "or" expressions the last expression is poped from the expression stack to link it to the new created one. """ if token == 'and': right_expr = expressions.pop() expressions.append(And(expressions.pop(), right_expr)) elif token == 'or': right_expr = expressions.pop() expressions.append(Or(expressions.pop(), right_expr)) elif token == 'not': expressions.append(Not(expressions.pop())) else: expressions.append(Literal(token))
[ "def", "create_and_push_expression", "(", "token", ",", "expressions", ")", ":", "if", "token", "==", "'and'", ":", "right_expr", "=", "expressions", ".", "pop", "(", ")", "expressions", ".", "append", "(", "And", "(", "expressions", ".", "pop", "(", ")", ",", "right_expr", ")", ")", "elif", "token", "==", "'or'", ":", "right_expr", "=", "expressions", ".", "pop", "(", ")", "expressions", ".", "append", "(", "Or", "(", "expressions", ".", "pop", "(", ")", ",", "right_expr", ")", ")", "elif", "token", "==", "'not'", ":", "expressions", ".", "append", "(", "Not", "(", "expressions", ".", "pop", "(", ")", ")", ")", "else", ":", "expressions", ".", "append", "(", "Literal", "(", "token", ")", ")" ]
Creates an expression from the given token and adds it to the stack of the given expression. In the case of "and" and "or" expressions the last expression is poped from the expression stack to link it to the new created one.
[ "Creates", "an", "expression", "from", "the", "given", "token", "and", "adds", "it", "to", "the", "stack", "of", "the", "given", "expression", "." ]
python
train
saltstack/salt
salt/modules/dig.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dig.py#L267-L293
def TXT(host, nameserver=None): ''' Return the TXT record for ``host``. Always returns a list. CLI Example: .. code-block:: bash salt ns1 dig.TXT google.com ''' dig = ['dig', '+short', six.text_type(host), 'TXT'] if nameserver is not None: dig.append('@{0}'.format(nameserver)) cmd = __salt__['cmd.run_all'](dig, python_shell=False) if cmd['retcode'] != 0: log.warning( 'dig returned exit code \'%s\'. Returning empty list as fallback.', cmd['retcode'] ) return [] return [i for i in cmd['stdout'].split('\n')]
[ "def", "TXT", "(", "host", ",", "nameserver", "=", "None", ")", ":", "dig", "=", "[", "'dig'", ",", "'+short'", ",", "six", ".", "text_type", "(", "host", ")", ",", "'TXT'", "]", "if", "nameserver", "is", "not", "None", ":", "dig", ".", "append", "(", "'@{0}'", ".", "format", "(", "nameserver", ")", ")", "cmd", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "dig", ",", "python_shell", "=", "False", ")", "if", "cmd", "[", "'retcode'", "]", "!=", "0", ":", "log", ".", "warning", "(", "'dig returned exit code \\'%s\\'. Returning empty list as fallback.'", ",", "cmd", "[", "'retcode'", "]", ")", "return", "[", "]", "return", "[", "i", "for", "i", "in", "cmd", "[", "'stdout'", "]", ".", "split", "(", "'\\n'", ")", "]" ]
Return the TXT record for ``host``. Always returns a list. CLI Example: .. code-block:: bash salt ns1 dig.TXT google.com
[ "Return", "the", "TXT", "record", "for", "host", "." ]
python
train
instaloader/instaloader
instaloader/structures.py
https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/structures.py#L256-L262
def tagged_users(self) -> List[str]: """List of all lowercased users that are tagged in the Post.""" try: return [edge['node']['user']['username'].lower() for edge in self._field('edge_media_to_tagged_user', 'edges')] except KeyError: return []
[ "def", "tagged_users", "(", "self", ")", "->", "List", "[", "str", "]", ":", "try", ":", "return", "[", "edge", "[", "'node'", "]", "[", "'user'", "]", "[", "'username'", "]", ".", "lower", "(", ")", "for", "edge", "in", "self", ".", "_field", "(", "'edge_media_to_tagged_user'", ",", "'edges'", ")", "]", "except", "KeyError", ":", "return", "[", "]" ]
List of all lowercased users that are tagged in the Post.
[ "List", "of", "all", "lowercased", "users", "that", "are", "tagged", "in", "the", "Post", "." ]
python
train
kapot65/python-df-parser
dfparser/rsh_parser.py
https://github.com/kapot65/python-df-parser/blob/bb3eec0fb7ca85d72cb1d9ed7415efe074594f26/dfparser/rsh_parser.py#L381-L402
def update_event_data(self, num, data): """Update event data in dataset.""" if num < 0 or num >= self.params["events_num"]: raise IndexError("Index out of range [0:%s]" % (self.params["events_num"])) if isinstance(data, np.ndarray): raise TypeError("data should be np.ndarray") if data.dtype != np.short: raise TypeError("data array dtype should be dtype('int16')") ch_num = self.params['channel_number'] ev_size = self.params['b_size'] if data.shape != (ch_num * ev_size,): raise Exception("data should contain same number of elements " "(%s)" % (ch_num * ev_size)) self.file.seek(7168 + num * (96 + 2 * ch_num * ev_size) + 96) self.file.write(data.tostring()) self.file.flush()
[ "def", "update_event_data", "(", "self", ",", "num", ",", "data", ")", ":", "if", "num", "<", "0", "or", "num", ">=", "self", ".", "params", "[", "\"events_num\"", "]", ":", "raise", "IndexError", "(", "\"Index out of range [0:%s]\"", "%", "(", "self", ".", "params", "[", "\"events_num\"", "]", ")", ")", "if", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "\"data should be np.ndarray\"", ")", "if", "data", ".", "dtype", "!=", "np", ".", "short", ":", "raise", "TypeError", "(", "\"data array dtype should be dtype('int16')\"", ")", "ch_num", "=", "self", ".", "params", "[", "'channel_number'", "]", "ev_size", "=", "self", ".", "params", "[", "'b_size'", "]", "if", "data", ".", "shape", "!=", "(", "ch_num", "*", "ev_size", ",", ")", ":", "raise", "Exception", "(", "\"data should contain same number of elements \"", "\"(%s)\"", "%", "(", "ch_num", "*", "ev_size", ")", ")", "self", ".", "file", ".", "seek", "(", "7168", "+", "num", "*", "(", "96", "+", "2", "*", "ch_num", "*", "ev_size", ")", "+", "96", ")", "self", ".", "file", ".", "write", "(", "data", ".", "tostring", "(", ")", ")", "self", ".", "file", ".", "flush", "(", ")" ]
Update event data in dataset.
[ "Update", "event", "data", "in", "dataset", "." ]
python
train
jonhadfield/python-hosts
python_hosts/utils.py
https://github.com/jonhadfield/python-hosts/blob/9ccaa8edc63418a91f10bf732b26070f21dd2ad0/python_hosts/utils.py#L37-L49
def valid_hostnames(hostname_list): """ Check if the supplied list of strings are valid hostnames :param hostname_list: A list of strings :return: True if the strings are valid hostnames, False if not """ for entry in hostname_list: if len(entry) > 255: return False allowed = re.compile('(?!-)[A-Z\d-]{1,63}(?<!-)$', re.IGNORECASE) if not all(allowed.match(x) for x in entry.split(".")): return False return True
[ "def", "valid_hostnames", "(", "hostname_list", ")", ":", "for", "entry", "in", "hostname_list", ":", "if", "len", "(", "entry", ")", ">", "255", ":", "return", "False", "allowed", "=", "re", ".", "compile", "(", "'(?!-)[A-Z\\d-]{1,63}(?<!-)$'", ",", "re", ".", "IGNORECASE", ")", "if", "not", "all", "(", "allowed", ".", "match", "(", "x", ")", "for", "x", "in", "entry", ".", "split", "(", "\".\"", ")", ")", ":", "return", "False", "return", "True" ]
Check if the supplied list of strings are valid hostnames :param hostname_list: A list of strings :return: True if the strings are valid hostnames, False if not
[ "Check", "if", "the", "supplied", "list", "of", "strings", "are", "valid", "hostnames", ":", "param", "hostname_list", ":", "A", "list", "of", "strings", ":", "return", ":", "True", "if", "the", "strings", "are", "valid", "hostnames", "False", "if", "not" ]
python
train
saltstack/salt
salt/modules/mac_softwareupdate.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_softwareupdate.py#L319-L349
def list_downloads(): ''' Return a list of all updates that have been downloaded locally. :return: A list of updates that have been downloaded :rtype: list CLI Example: .. code-block:: bash salt '*' softwareupdate.list_downloads ''' outfiles = [] for root, subFolder, files in salt.utils.path.os_walk('/Library/Updates'): for f in files: outfiles.append(os.path.join(root, f)) dist_files = [] for f in outfiles: if f.endswith('.dist'): dist_files.append(f) ret = [] for update in _get_available(): for f in dist_files: with salt.utils.files.fopen(f) as fhr: if update.rsplit('-', 1)[0] in salt.utils.stringutils.to_unicode(fhr.read()): ret.append(update) return ret
[ "def", "list_downloads", "(", ")", ":", "outfiles", "=", "[", "]", "for", "root", ",", "subFolder", ",", "files", "in", "salt", ".", "utils", ".", "path", ".", "os_walk", "(", "'/Library/Updates'", ")", ":", "for", "f", "in", "files", ":", "outfiles", ".", "append", "(", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", ")", "dist_files", "=", "[", "]", "for", "f", "in", "outfiles", ":", "if", "f", ".", "endswith", "(", "'.dist'", ")", ":", "dist_files", ".", "append", "(", "f", ")", "ret", "=", "[", "]", "for", "update", "in", "_get_available", "(", ")", ":", "for", "f", "in", "dist_files", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "f", ")", "as", "fhr", ":", "if", "update", ".", "rsplit", "(", "'-'", ",", "1", ")", "[", "0", "]", "in", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "fhr", ".", "read", "(", ")", ")", ":", "ret", ".", "append", "(", "update", ")", "return", "ret" ]
Return a list of all updates that have been downloaded locally. :return: A list of updates that have been downloaded :rtype: list CLI Example: .. code-block:: bash salt '*' softwareupdate.list_downloads
[ "Return", "a", "list", "of", "all", "updates", "that", "have", "been", "downloaded", "locally", "." ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/cmd/autogender.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/autogender.py#L80-L89
def run(self, *args): """Autocomplete gender information.""" params = self.parser.parse_args(args) api_token = params.api_token genderize_all = params.genderize_all code = self.autogender(api_token=api_token, genderize_all=genderize_all) return code
[ "def", "run", "(", "self", ",", "*", "args", ")", ":", "params", "=", "self", ".", "parser", ".", "parse_args", "(", "args", ")", "api_token", "=", "params", ".", "api_token", "genderize_all", "=", "params", ".", "genderize_all", "code", "=", "self", ".", "autogender", "(", "api_token", "=", "api_token", ",", "genderize_all", "=", "genderize_all", ")", "return", "code" ]
Autocomplete gender information.
[ "Autocomplete", "gender", "information", "." ]
python
train
corydodt/Crosscap
crosscap/tree.py
https://github.com/corydodt/Crosscap/blob/388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e/crosscap/tree.py#L53-L69
def openAPIDoc(**kwargs): """ Update a function's docstring to include the OpenAPI Yaml generated by running the openAPIGraph object """ s = yaml.dump(kwargs, default_flow_style=False) def deco(routeHandler): # Wrap routeHandler, retaining name and __doc__, then edit __doc__. # The only reason we need to do this is so we can be certain # that __doc__ will be modifiable. partial() objects have # a modifiable __doc__, but native function objects do not. ret = functools.wraps(routeHandler)(routeHandler) if not ret.__doc__: ret.__doc__ = '' ret.__doc__ = cleandoc(ret.__doc__) + '\n---\n' + s return ret return deco
[ "def", "openAPIDoc", "(", "*", "*", "kwargs", ")", ":", "s", "=", "yaml", ".", "dump", "(", "kwargs", ",", "default_flow_style", "=", "False", ")", "def", "deco", "(", "routeHandler", ")", ":", "# Wrap routeHandler, retaining name and __doc__, then edit __doc__.", "# The only reason we need to do this is so we can be certain", "# that __doc__ will be modifiable. partial() objects have", "# a modifiable __doc__, but native function objects do not.", "ret", "=", "functools", ".", "wraps", "(", "routeHandler", ")", "(", "routeHandler", ")", "if", "not", "ret", ".", "__doc__", ":", "ret", ".", "__doc__", "=", "''", "ret", ".", "__doc__", "=", "cleandoc", "(", "ret", ".", "__doc__", ")", "+", "'\\n---\\n'", "+", "s", "return", "ret", "return", "deco" ]
Update a function's docstring to include the OpenAPI Yaml generated by running the openAPIGraph object
[ "Update", "a", "function", "s", "docstring", "to", "include", "the", "OpenAPI", "Yaml", "generated", "by", "running", "the", "openAPIGraph", "object" ]
python
train
KenjiTakahashi/td
td/main.py
https://github.com/KenjiTakahashi/td/blob/7311eabc63efe6fe6600687c3026f0837454c2e4/td/main.py#L555-L563
def add(self, **args): """Handles the 'a' command. :args: Arguments supplied to the 'a' command. """ kwargs = self.getKwargs(args) if kwargs: self.model.add(**kwargs)
[ "def", "add", "(", "self", ",", "*", "*", "args", ")", ":", "kwargs", "=", "self", ".", "getKwargs", "(", "args", ")", "if", "kwargs", ":", "self", ".", "model", ".", "add", "(", "*", "*", "kwargs", ")" ]
Handles the 'a' command. :args: Arguments supplied to the 'a' command.
[ "Handles", "the", "a", "command", "." ]
python
train
portfoliome/foil
foil/formatters.py
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/formatters.py#L6-L11
def format_repr(obj, attributes) -> str: """Format an object's repr method with specific attributes.""" attribute_repr = ', '.join(('{}={}'.format(attr, repr(getattr(obj, attr))) for attr in attributes)) return "{0}({1})".format(obj.__class__.__qualname__, attribute_repr)
[ "def", "format_repr", "(", "obj", ",", "attributes", ")", "->", "str", ":", "attribute_repr", "=", "', '", ".", "join", "(", "(", "'{}={}'", ".", "format", "(", "attr", ",", "repr", "(", "getattr", "(", "obj", ",", "attr", ")", ")", ")", "for", "attr", "in", "attributes", ")", ")", "return", "\"{0}({1})\"", ".", "format", "(", "obj", ".", "__class__", ".", "__qualname__", ",", "attribute_repr", ")" ]
Format an object's repr method with specific attributes.
[ "Format", "an", "object", "s", "repr", "method", "with", "specific", "attributes", "." ]
python
train
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L445-L490
def _get_disks(dom): ''' Get domain disks from a libvirt domain object. ''' disks = {} doc = ElementTree.fromstring(dom.XMLDesc(0)) for elem in doc.findall('devices/disk'): source = elem.find('source') if source is None: continue target = elem.find('target') if target is None: continue if 'dev' in target.attrib: qemu_target = source.get('file', '') if not qemu_target: qemu_target = source.get('dev', '') elif qemu_target.startswith('/dev/zvol/'): disks[target.get('dev')] = { 'file': qemu_target, 'zfs': True} continue if not qemu_target and 'protocol' in source.attrib and 'name' in source.attrib: # for rbd network qemu_target = '{0}:{1}'.format( source.get('protocol'), source.get('name')) if not qemu_target: continue disk = {'file': qemu_target, 'type': elem.get('device')} driver = elem.find('driver') if driver is not None and driver.get('type') == 'qcow2': try: stdout = subprocess.Popen( ['qemu-img', 'info', '--output', 'json', '--backing-chain', disk['file']], shell=False, stdout=subprocess.PIPE).communicate()[0] qemu_output = salt.utils.stringutils.to_str(stdout) output = _parse_qemu_img_info(qemu_output) disk.update(output) except TypeError: disk.update({'file': 'Does not exist'}) disks[target.get('dev')] = disk return disks
[ "def", "_get_disks", "(", "dom", ")", ":", "disks", "=", "{", "}", "doc", "=", "ElementTree", ".", "fromstring", "(", "dom", ".", "XMLDesc", "(", "0", ")", ")", "for", "elem", "in", "doc", ".", "findall", "(", "'devices/disk'", ")", ":", "source", "=", "elem", ".", "find", "(", "'source'", ")", "if", "source", "is", "None", ":", "continue", "target", "=", "elem", ".", "find", "(", "'target'", ")", "if", "target", "is", "None", ":", "continue", "if", "'dev'", "in", "target", ".", "attrib", ":", "qemu_target", "=", "source", ".", "get", "(", "'file'", ",", "''", ")", "if", "not", "qemu_target", ":", "qemu_target", "=", "source", ".", "get", "(", "'dev'", ",", "''", ")", "elif", "qemu_target", ".", "startswith", "(", "'/dev/zvol/'", ")", ":", "disks", "[", "target", ".", "get", "(", "'dev'", ")", "]", "=", "{", "'file'", ":", "qemu_target", ",", "'zfs'", ":", "True", "}", "continue", "if", "not", "qemu_target", "and", "'protocol'", "in", "source", ".", "attrib", "and", "'name'", "in", "source", ".", "attrib", ":", "# for rbd network", "qemu_target", "=", "'{0}:{1}'", ".", "format", "(", "source", ".", "get", "(", "'protocol'", ")", ",", "source", ".", "get", "(", "'name'", ")", ")", "if", "not", "qemu_target", ":", "continue", "disk", "=", "{", "'file'", ":", "qemu_target", ",", "'type'", ":", "elem", ".", "get", "(", "'device'", ")", "}", "driver", "=", "elem", ".", "find", "(", "'driver'", ")", "if", "driver", "is", "not", "None", "and", "driver", ".", "get", "(", "'type'", ")", "==", "'qcow2'", ":", "try", ":", "stdout", "=", "subprocess", ".", "Popen", "(", "[", "'qemu-img'", ",", "'info'", ",", "'--output'", ",", "'json'", ",", "'--backing-chain'", ",", "disk", "[", "'file'", "]", "]", ",", "shell", "=", "False", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")", "[", "0", "]", "qemu_output", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "stdout", ")", "output", "=", "_parse_qemu_img_info", "(", "qemu_output", ")", "disk", ".", "update", "(", "output", ")", "except", "TypeError", ":", "disk", ".", "update", "(", "{", "'file'", ":", "'Does not exist'", "}", ")", "disks", "[", "target", ".", "get", "(", "'dev'", ")", "]", "=", "disk", "return", "disks" ]
Get domain disks from a libvirt domain object.
[ "Get", "domain", "disks", "from", "a", "libvirt", "domain", "object", "." ]
python
train
gmr/rejected
rejected/process.py
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/process.py#L572-L592
def _run(self): """Run method that can be profiled""" self.set_state(self.STATE_INITIALIZING) self.ioloop = ioloop.IOLoop.current() self.consumer_lock = locks.Lock() self.sentry_client = self.setup_sentry( self._kwargs['config'], self.consumer_name) try: self.setup() except (AttributeError, ImportError): return self.on_startup_error( 'Failed to import the Python module for {}'.format( self.consumer_name)) if not self.is_stopped: try: self.ioloop.start() except KeyboardInterrupt: LOGGER.warning('CTRL-C while waiting for clean shutdown')
[ "def", "_run", "(", "self", ")", ":", "self", ".", "set_state", "(", "self", ".", "STATE_INITIALIZING", ")", "self", ".", "ioloop", "=", "ioloop", ".", "IOLoop", ".", "current", "(", ")", "self", ".", "consumer_lock", "=", "locks", ".", "Lock", "(", ")", "self", ".", "sentry_client", "=", "self", ".", "setup_sentry", "(", "self", ".", "_kwargs", "[", "'config'", "]", ",", "self", ".", "consumer_name", ")", "try", ":", "self", ".", "setup", "(", ")", "except", "(", "AttributeError", ",", "ImportError", ")", ":", "return", "self", ".", "on_startup_error", "(", "'Failed to import the Python module for {}'", ".", "format", "(", "self", ".", "consumer_name", ")", ")", "if", "not", "self", ".", "is_stopped", ":", "try", ":", "self", ".", "ioloop", ".", "start", "(", ")", "except", "KeyboardInterrupt", ":", "LOGGER", ".", "warning", "(", "'CTRL-C while waiting for clean shutdown'", ")" ]
Run method that can be profiled
[ "Run", "method", "that", "can", "be", "profiled" ]
python
train
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/utils/images.py
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/images.py#L58-L73
def input_relative_images(input_path, image_destination, rootname, config): """ The method used to handle Input-Relative image inclusion. """ log.debug('Looking for input relative images') input_dirname = os.path.dirname(input_path) for path in config.input_relative_images: if '*' in path: path = path.replace('*', rootname) log.debug('Wildcard expansion for image directory: {0}'.format(path)) images = os.path.normpath(os.path.join(input_dirname, path)) if os.path.isdir(images): log.info('Input-Relative image directory found: {0}'.format(images)) shutil.copytree(images, image_destination) return True return False
[ "def", "input_relative_images", "(", "input_path", ",", "image_destination", ",", "rootname", ",", "config", ")", ":", "log", ".", "debug", "(", "'Looking for input relative images'", ")", "input_dirname", "=", "os", ".", "path", ".", "dirname", "(", "input_path", ")", "for", "path", "in", "config", ".", "input_relative_images", ":", "if", "'*'", "in", "path", ":", "path", "=", "path", ".", "replace", "(", "'*'", ",", "rootname", ")", "log", ".", "debug", "(", "'Wildcard expansion for image directory: {0}'", ".", "format", "(", "path", ")", ")", "images", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "input_dirname", ",", "path", ")", ")", "if", "os", ".", "path", ".", "isdir", "(", "images", ")", ":", "log", ".", "info", "(", "'Input-Relative image directory found: {0}'", ".", "format", "(", "images", ")", ")", "shutil", ".", "copytree", "(", "images", ",", "image_destination", ")", "return", "True", "return", "False" ]
The method used to handle Input-Relative image inclusion.
[ "The", "method", "used", "to", "handle", "Input", "-", "Relative", "image", "inclusion", "." ]
python
train
lvh/txampext
txampext/multiplexing.py
https://github.com/lvh/txampext/blob/a7d6cb9f1e9200dba597378cd40eb6a2096d4fd9/txampext/multiplexing.py#L99-L109
def addFactory(self, identifier, factory): """Adds a factory. After calling this method, remote clients will be able to connect to it. This will call ``factory.doStart``. """ factory.doStart() self._factories[identifier] = factory
[ "def", "addFactory", "(", "self", ",", "identifier", ",", "factory", ")", ":", "factory", ".", "doStart", "(", ")", "self", ".", "_factories", "[", "identifier", "]", "=", "factory" ]
Adds a factory. After calling this method, remote clients will be able to connect to it. This will call ``factory.doStart``.
[ "Adds", "a", "factory", "." ]
python
test
pantsbuild/pants
src/python/pants/reporting/html_reporter.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/reporting/html_reporter.py#L437-L453
def _overwrite(self, filename, func, force=False): """Overwrite a file with the specified contents. Write times are tracked, too-frequent overwrites are skipped, for performance reasons. :param filename: The path under the html dir to write to. :param func: A no-arg function that returns the contents to write. :param force: Whether to force a write now, regardless of the last overwrite time. """ now = int(time.time() * 1000) last_overwrite_time = self._last_overwrite_time.get(filename) or now # Overwrite only once per second. if (now - last_overwrite_time >= 1000) or force: if os.path.exists(self._html_dir): # Make sure we're not immediately after a clean-all. with open(os.path.join(self._html_dir, filename), 'w') as f: f.write(func()) self._last_overwrite_time[filename] = now
[ "def", "_overwrite", "(", "self", ",", "filename", ",", "func", ",", "force", "=", "False", ")", ":", "now", "=", "int", "(", "time", ".", "time", "(", ")", "*", "1000", ")", "last_overwrite_time", "=", "self", ".", "_last_overwrite_time", ".", "get", "(", "filename", ")", "or", "now", "# Overwrite only once per second.", "if", "(", "now", "-", "last_overwrite_time", ">=", "1000", ")", "or", "force", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "_html_dir", ")", ":", "# Make sure we're not immediately after a clean-all.", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_html_dir", ",", "filename", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "func", "(", ")", ")", "self", ".", "_last_overwrite_time", "[", "filename", "]", "=", "now" ]
Overwrite a file with the specified contents. Write times are tracked, too-frequent overwrites are skipped, for performance reasons. :param filename: The path under the html dir to write to. :param func: A no-arg function that returns the contents to write. :param force: Whether to force a write now, regardless of the last overwrite time.
[ "Overwrite", "a", "file", "with", "the", "specified", "contents", "." ]
python
train
bitshares/uptick
uptick/cli.py
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/cli.py#L40-L45
def set(ctx, key, value): """ Set configuration parameters """ if key == "default_account" and value[0] == "@": value = value[1:] ctx.bitshares.config[key] = value
[ "def", "set", "(", "ctx", ",", "key", ",", "value", ")", ":", "if", "key", "==", "\"default_account\"", "and", "value", "[", "0", "]", "==", "\"@\"", ":", "value", "=", "value", "[", "1", ":", "]", "ctx", ".", "bitshares", ".", "config", "[", "key", "]", "=", "value" ]
Set configuration parameters
[ "Set", "configuration", "parameters" ]
python
train
sbg/sevenbridges-python
sevenbridges/transfer/upload.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/transfer/upload.py#L16-L31
def _get_part_url(api, url, upload, part): """ Used by the worker to fetch url for the part that is to be uploaded. :param api: Api instance. :param url: Part url. :param upload: Upload identifier. :param part: Part number. :return: Storage generated URL for the part. """ try: response = api.get(url.format(upload_id=upload, part_number=part)) return response.json()['url'] except Exception: raise SbgError( 'Unable to get upload url for part number {}'.format(part) )
[ "def", "_get_part_url", "(", "api", ",", "url", ",", "upload", ",", "part", ")", ":", "try", ":", "response", "=", "api", ".", "get", "(", "url", ".", "format", "(", "upload_id", "=", "upload", ",", "part_number", "=", "part", ")", ")", "return", "response", ".", "json", "(", ")", "[", "'url'", "]", "except", "Exception", ":", "raise", "SbgError", "(", "'Unable to get upload url for part number {}'", ".", "format", "(", "part", ")", ")" ]
Used by the worker to fetch url for the part that is to be uploaded. :param api: Api instance. :param url: Part url. :param upload: Upload identifier. :param part: Part number. :return: Storage generated URL for the part.
[ "Used", "by", "the", "worker", "to", "fetch", "url", "for", "the", "part", "that", "is", "to", "be", "uploaded", ".", ":", "param", "api", ":", "Api", "instance", ".", ":", "param", "url", ":", "Part", "url", ".", ":", "param", "upload", ":", "Upload", "identifier", ".", ":", "param", "part", ":", "Part", "number", ".", ":", "return", ":", "Storage", "generated", "URL", "for", "the", "part", "." ]
python
train
abseil/abseil-py
absl/logging/__init__.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/logging/__init__.py#L1021-L1038
def log(self, level, msg, *args, **kwargs): """Logs a message at a cetain level substituting in the supplied arguments. This method behaves differently in python and c++ modes. Args: level: int, the standard logging level at which to log the message. msg: str, the text of the message to log. *args: The arguments to substitute in the message. **kwargs: The keyword arguments to substitute in the message. """ if level >= logging.FATAL: # Add property to the LogRecord created by this logger. # This will be used by the ABSLHandler to determine whether it should # treat CRITICAL/FATAL logs as really FATAL. extra = kwargs.setdefault('extra', {}) extra[_ABSL_LOG_FATAL] = True super(ABSLLogger, self).log(level, msg, *args, **kwargs)
[ "def", "log", "(", "self", ",", "level", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "level", ">=", "logging", ".", "FATAL", ":", "# Add property to the LogRecord created by this logger.", "# This will be used by the ABSLHandler to determine whether it should", "# treat CRITICAL/FATAL logs as really FATAL.", "extra", "=", "kwargs", ".", "setdefault", "(", "'extra'", ",", "{", "}", ")", "extra", "[", "_ABSL_LOG_FATAL", "]", "=", "True", "super", "(", "ABSLLogger", ",", "self", ")", ".", "log", "(", "level", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Logs a message at a cetain level substituting in the supplied arguments. This method behaves differently in python and c++ modes. Args: level: int, the standard logging level at which to log the message. msg: str, the text of the message to log. *args: The arguments to substitute in the message. **kwargs: The keyword arguments to substitute in the message.
[ "Logs", "a", "message", "at", "a", "cetain", "level", "substituting", "in", "the", "supplied", "arguments", "." ]
python
train
FactoryBoy/factory_boy
factory/builder.py
https://github.com/FactoryBoy/factory_boy/blob/edaa7c7f5a14065b229927903bd7989cc93cd069/factory/builder.py#L58-L65
def join(cls, root, subkey): """Rebuild a full declaration name from its components. for every string x, we have `join(split(x)) == x`. """ if subkey is None: return root return enums.SPLITTER.join((root, subkey))
[ "def", "join", "(", "cls", ",", "root", ",", "subkey", ")", ":", "if", "subkey", "is", "None", ":", "return", "root", "return", "enums", ".", "SPLITTER", ".", "join", "(", "(", "root", ",", "subkey", ")", ")" ]
Rebuild a full declaration name from its components. for every string x, we have `join(split(x)) == x`.
[ "Rebuild", "a", "full", "declaration", "name", "from", "its", "components", "." ]
python
train
phoebe-project/phoebe2
phoebe/parameters/compute.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/compute.py#L162-L190
def photodynam(**kwargs): """ Compute options for using Josh Carter's 'photodynam' code as a backend (must be installed). Generally, this will be used as an input to the kind argument in :meth:`phoebe.frontend.bundle.Bundle.add_compute` Please see :func:`phoebe.backend.backends.photodynam` for a list of sources to cite when using this backend. :parameter **kwargs: defaults for the values of any of the parameters :return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly created :class:`phoebe.parameters.parameters.Parameter`s """ if not conf.devel: raise NotImplementedError("'photodynam' backend not officially supported for this release. Enable developer mode to test.") params = [] params += [BoolParameter(qualifier='enabled', copy_for={'context': 'dataset', 'kind': ['lc', 'rv', 'orb'], 'dataset': '*'}, dataset='_default', value=kwargs.get('enabled', True), description='Whether to create synthetics in compute/fitting run')] params += [FloatParameter(qualifier='stepsize', value=kwargs.get('stepsize', 0.01), default_unit=None, description='blah')] params += [FloatParameter(qualifier='orbiterror', value=kwargs.get('orbiterror', 1e-20), default_unit=None, description='blah')] # TODO: remove this option and instead use time0@system #params += [FloatParameter(qualifier='time0', value=kwargs.get('time0', 0.0), default_unit=u.d, description='Time to start the integration')] return ParameterSet(params)
[ "def", "photodynam", "(", "*", "*", "kwargs", ")", ":", "if", "not", "conf", ".", "devel", ":", "raise", "NotImplementedError", "(", "\"'photodynam' backend not officially supported for this release. Enable developer mode to test.\"", ")", "params", "=", "[", "]", "params", "+=", "[", "BoolParameter", "(", "qualifier", "=", "'enabled'", ",", "copy_for", "=", "{", "'context'", ":", "'dataset'", ",", "'kind'", ":", "[", "'lc'", ",", "'rv'", ",", "'orb'", "]", ",", "'dataset'", ":", "'*'", "}", ",", "dataset", "=", "'_default'", ",", "value", "=", "kwargs", ".", "get", "(", "'enabled'", ",", "True", ")", ",", "description", "=", "'Whether to create synthetics in compute/fitting run'", ")", "]", "params", "+=", "[", "FloatParameter", "(", "qualifier", "=", "'stepsize'", ",", "value", "=", "kwargs", ".", "get", "(", "'stepsize'", ",", "0.01", ")", ",", "default_unit", "=", "None", ",", "description", "=", "'blah'", ")", "]", "params", "+=", "[", "FloatParameter", "(", "qualifier", "=", "'orbiterror'", ",", "value", "=", "kwargs", ".", "get", "(", "'orbiterror'", ",", "1e-20", ")", ",", "default_unit", "=", "None", ",", "description", "=", "'blah'", ")", "]", "# TODO: remove this option and instead use time0@system", "#params += [FloatParameter(qualifier='time0', value=kwargs.get('time0', 0.0), default_unit=u.d, description='Time to start the integration')]", "return", "ParameterSet", "(", "params", ")" ]
Compute options for using Josh Carter's 'photodynam' code as a backend (must be installed). Generally, this will be used as an input to the kind argument in :meth:`phoebe.frontend.bundle.Bundle.add_compute` Please see :func:`phoebe.backend.backends.photodynam` for a list of sources to cite when using this backend. :parameter **kwargs: defaults for the values of any of the parameters :return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly created :class:`phoebe.parameters.parameters.Parameter`s
[ "Compute", "options", "for", "using", "Josh", "Carter", "s", "photodynam", "code", "as", "a", "backend", "(", "must", "be", "installed", ")", "." ]
python
train
wkentaro/pytorch-fcn
torchfcn/models/fcn32s.py
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/models/fcn32s.py#L10-L23
def get_upsampling_weight(in_channels, out_channels, kernel_size): """Make a 2D bilinear kernel suitable for upsampling""" factor = (kernel_size + 1) // 2 if kernel_size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:kernel_size, :kernel_size] filt = (1 - abs(og[0] - center) / factor) * \ (1 - abs(og[1] - center) / factor) weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64) weight[range(in_channels), range(out_channels), :, :] = filt return torch.from_numpy(weight).float()
[ "def", "get_upsampling_weight", "(", "in_channels", ",", "out_channels", ",", "kernel_size", ")", ":", "factor", "=", "(", "kernel_size", "+", "1", ")", "//", "2", "if", "kernel_size", "%", "2", "==", "1", ":", "center", "=", "factor", "-", "1", "else", ":", "center", "=", "factor", "-", "0.5", "og", "=", "np", ".", "ogrid", "[", ":", "kernel_size", ",", ":", "kernel_size", "]", "filt", "=", "(", "1", "-", "abs", "(", "og", "[", "0", "]", "-", "center", ")", "/", "factor", ")", "*", "(", "1", "-", "abs", "(", "og", "[", "1", "]", "-", "center", ")", "/", "factor", ")", "weight", "=", "np", ".", "zeros", "(", "(", "in_channels", ",", "out_channels", ",", "kernel_size", ",", "kernel_size", ")", ",", "dtype", "=", "np", ".", "float64", ")", "weight", "[", "range", "(", "in_channels", ")", ",", "range", "(", "out_channels", ")", ",", ":", ",", ":", "]", "=", "filt", "return", "torch", ".", "from_numpy", "(", "weight", ")", ".", "float", "(", ")" ]
Make a 2D bilinear kernel suitable for upsampling
[ "Make", "a", "2D", "bilinear", "kernel", "suitable", "for", "upsampling" ]
python
train
ktdreyer/txkoji
txkoji/task.py
https://github.com/ktdreyer/txkoji/blob/a7de380f29f745bf11730b27217208f6d4da7733/txkoji/task.py#L390-L398
def url(self): """ Return a kojiweb URL for this resource. :returns: ``str``, kojiweb URL like "http://cbs.centos.org/koji/taskinfo?taskID=381617" """ endpoint = 'taskinfo?taskID=%d' % self.id return posixpath.join(self.connection.weburl, endpoint)
[ "def", "url", "(", "self", ")", ":", "endpoint", "=", "'taskinfo?taskID=%d'", "%", "self", ".", "id", "return", "posixpath", ".", "join", "(", "self", ".", "connection", ".", "weburl", ",", "endpoint", ")" ]
Return a kojiweb URL for this resource. :returns: ``str``, kojiweb URL like "http://cbs.centos.org/koji/taskinfo?taskID=381617"
[ "Return", "a", "kojiweb", "URL", "for", "this", "resource", "." ]
python
train
xray7224/PyPump
pypump/models/person.py
https://github.com/xray7224/PyPump/blob/f921f691c39fe021f4fd124b6bc91718c9e49b4a/pypump/models/person.py#L123-L138
def lists(self): """ :class:`Lists feed <pypump.models.feed.Lists>` with all lists owned by the person. Example: >>> for list in pump.me.lists: ... print(list) ... Acquaintances Family Coworkers Friends """ if self._lists is None: self._lists = Lists(self.links['lists'], pypump=self._pump) return self._lists
[ "def", "lists", "(", "self", ")", ":", "if", "self", ".", "_lists", "is", "None", ":", "self", ".", "_lists", "=", "Lists", "(", "self", ".", "links", "[", "'lists'", "]", ",", "pypump", "=", "self", ".", "_pump", ")", "return", "self", ".", "_lists" ]
:class:`Lists feed <pypump.models.feed.Lists>` with all lists owned by the person. Example: >>> for list in pump.me.lists: ... print(list) ... Acquaintances Family Coworkers Friends
[ ":", "class", ":", "Lists", "feed", "<pypump", ".", "models", ".", "feed", ".", "Lists", ">", "with", "all", "lists", "owned", "by", "the", "person", "." ]
python
train
mlperf/training
compliance/verify_submission/mlperf_submission_helper/checks.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/compliance/verify_submission/mlperf_submission_helper/checks.py#L274-L309
def verify_and_extract_time(self, log_file, division, result_name): """Verifies and result and returns timing. Uses submodule mlp_compliance (https://github.com/bitfort/mlp_compliance) Args: log_file: Absolute path to result file. division: open, closed result_name: name of the benchmark, ncf, ssd, etc Returns: Time for the result or `INFINITE_TIME` if not a success Raises: Exception: If expected compliance level is not hit or cannot figure out expected compliance level. """ expected_level = constants.DIVISION_COMPLIANCE_CHECK_LEVEL.get( division, None) print(result_name) if expected_level is None: raise Exception('Unknown division: {}'.format(division)) start_time, level, dt, _, success = self.get_compliance(log_file) print(float(start_time)) if int(level) != expected_level: raise Exception('Error Level {} does not match needed level {}:{}'.format( level, expected_level, log_file)) # Sets failure to converge to "infinite time" per the rules if success and dt: return dt, start_time else: print('Result was not a success set to INFINITE_TIME({})'.format( INFINITE_TIME)) return INFINITE_TIME, start_time
[ "def", "verify_and_extract_time", "(", "self", ",", "log_file", ",", "division", ",", "result_name", ")", ":", "expected_level", "=", "constants", ".", "DIVISION_COMPLIANCE_CHECK_LEVEL", ".", "get", "(", "division", ",", "None", ")", "print", "(", "result_name", ")", "if", "expected_level", "is", "None", ":", "raise", "Exception", "(", "'Unknown division: {}'", ".", "format", "(", "division", ")", ")", "start_time", ",", "level", ",", "dt", ",", "_", ",", "success", "=", "self", ".", "get_compliance", "(", "log_file", ")", "print", "(", "float", "(", "start_time", ")", ")", "if", "int", "(", "level", ")", "!=", "expected_level", ":", "raise", "Exception", "(", "'Error Level {} does not match needed level {}:{}'", ".", "format", "(", "level", ",", "expected_level", ",", "log_file", ")", ")", "# Sets failure to converge to \"infinite time\" per the rules", "if", "success", "and", "dt", ":", "return", "dt", ",", "start_time", "else", ":", "print", "(", "'Result was not a success set to INFINITE_TIME({})'", ".", "format", "(", "INFINITE_TIME", ")", ")", "return", "INFINITE_TIME", ",", "start_time" ]
Verifies and result and returns timing. Uses submodule mlp_compliance (https://github.com/bitfort/mlp_compliance) Args: log_file: Absolute path to result file. division: open, closed result_name: name of the benchmark, ncf, ssd, etc Returns: Time for the result or `INFINITE_TIME` if not a success Raises: Exception: If expected compliance level is not hit or cannot figure out expected compliance level.
[ "Verifies", "and", "result", "and", "returns", "timing", "." ]
python
train
campaignmonitor/createsend-python
lib/createsend/transactional.py
https://github.com/campaignmonitor/createsend-python/blob/4bfe2fd5cb2fc9d8f12280b23569eea0a6c66426/lib/createsend/transactional.py#L16-L24
def smart_email_list(self, status="all", client_id=None): """Gets the smart email list.""" if client_id is None: response = self._get( "/transactional/smartEmail?status=%s" % status) else: response = self._get( "/transactional/smartEmail?status=%s&clientID=%s" % (status, client_id)) return json_to_py(response)
[ "def", "smart_email_list", "(", "self", ",", "status", "=", "\"all\"", ",", "client_id", "=", "None", ")", ":", "if", "client_id", "is", "None", ":", "response", "=", "self", ".", "_get", "(", "\"/transactional/smartEmail?status=%s\"", "%", "status", ")", "else", ":", "response", "=", "self", ".", "_get", "(", "\"/transactional/smartEmail?status=%s&clientID=%s\"", "%", "(", "status", ",", "client_id", ")", ")", "return", "json_to_py", "(", "response", ")" ]
Gets the smart email list.
[ "Gets", "the", "smart", "email", "list", "." ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/core/match_filter.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L548-L634
def decluster(self, trig_int, timing='detect', metric='avg_cor'): """ De-cluster a Party of detections by enforcing a detection separation. De-clustering occurs between events detected by different (or the same) templates. If multiple detections occur within trig_int then the preferred detection will be determined by the metric argument. This can be either the average single-station correlation coefficient which is calculated as Detection.detect_val / Detection.no_chans, or the raw cross channel correlation sum which is simply Detection.detect_val. :type trig_int: float :param trig_int: Minimum detection separation in seconds. :type metric: str :param metric: What metric to sort peaks by. Either 'avg_cor' which takes the single station average correlation or 'cor_sum' which takes the total correlation sum across all channels. :type timing: str :param timing: Either 'detect' or 'origin' to decluster based on either the detection time or the origin time. .. Warning:: Works in place on object, if you need to keep the original safe then run this on a copy of the object! .. rubric:: Example >>> party = Party().read() >>> len(party) 4 >>> declustered = party.decluster(20) >>> len(party) 3 """ all_detections = [] for fam in self.families: all_detections.extend(fam.detections) if timing == 'detect': if metric == 'avg_cor': detect_info = [(d.detect_time, d.detect_val / d.no_chans) for d in all_detections] elif metric == 'cor_sum': detect_info = [(d.detect_time, d.detect_val) for d in all_detections] else: raise MatchFilterError('metric is not cor_sum or avg_cor') elif timing == 'origin': if metric == 'avg_cor': detect_info = [(_get_origin(d.event).time, d.detect_val / d.no_chans) for d in all_detections] elif metric == 'cor_sum': detect_info = [(_get_origin(d.event).time, d.detect_val) for d in all_detections] else: raise MatchFilterError('metric is not cor_sum or avg_cor') else: raise MatchFilterError('timing is not detect or origin') min_det = sorted([d[0] for d in detect_info])[0] detect_vals = np.array([d[1] for d in detect_info]) detect_times = np.array([ _total_microsec(d[0].datetime, min_det.datetime) for d in detect_info]) # Trig_int must be converted from seconds to micro-seconds peaks_out = decluster( peaks=detect_vals, index=detect_times, trig_int=trig_int * 10 ** 6) # Need to match both the time and the detection value declustered_detections = [] for ind in peaks_out: matching_time_indeces = np.where(detect_times == ind[-1])[0] matches = matching_time_indeces[ np.where(detect_vals[matching_time_indeces] == ind[0])[0][0]] declustered_detections.append(all_detections[matches]) # Convert this list into families template_names = list(set([d.template_name for d in declustered_detections])) new_families = [] for template_name in template_names: template = [fam.template for fam in self.families if fam.template.name == template_name][0] new_families.append(Family( template=template, detections=[d for d in declustered_detections if d.template_name == template_name])) self.families = new_families return self
[ "def", "decluster", "(", "self", ",", "trig_int", ",", "timing", "=", "'detect'", ",", "metric", "=", "'avg_cor'", ")", ":", "all_detections", "=", "[", "]", "for", "fam", "in", "self", ".", "families", ":", "all_detections", ".", "extend", "(", "fam", ".", "detections", ")", "if", "timing", "==", "'detect'", ":", "if", "metric", "==", "'avg_cor'", ":", "detect_info", "=", "[", "(", "d", ".", "detect_time", ",", "d", ".", "detect_val", "/", "d", ".", "no_chans", ")", "for", "d", "in", "all_detections", "]", "elif", "metric", "==", "'cor_sum'", ":", "detect_info", "=", "[", "(", "d", ".", "detect_time", ",", "d", ".", "detect_val", ")", "for", "d", "in", "all_detections", "]", "else", ":", "raise", "MatchFilterError", "(", "'metric is not cor_sum or avg_cor'", ")", "elif", "timing", "==", "'origin'", ":", "if", "metric", "==", "'avg_cor'", ":", "detect_info", "=", "[", "(", "_get_origin", "(", "d", ".", "event", ")", ".", "time", ",", "d", ".", "detect_val", "/", "d", ".", "no_chans", ")", "for", "d", "in", "all_detections", "]", "elif", "metric", "==", "'cor_sum'", ":", "detect_info", "=", "[", "(", "_get_origin", "(", "d", ".", "event", ")", ".", "time", ",", "d", ".", "detect_val", ")", "for", "d", "in", "all_detections", "]", "else", ":", "raise", "MatchFilterError", "(", "'metric is not cor_sum or avg_cor'", ")", "else", ":", "raise", "MatchFilterError", "(", "'timing is not detect or origin'", ")", "min_det", "=", "sorted", "(", "[", "d", "[", "0", "]", "for", "d", "in", "detect_info", "]", ")", "[", "0", "]", "detect_vals", "=", "np", ".", "array", "(", "[", "d", "[", "1", "]", "for", "d", "in", "detect_info", "]", ")", "detect_times", "=", "np", ".", "array", "(", "[", "_total_microsec", "(", "d", "[", "0", "]", ".", "datetime", ",", "min_det", ".", "datetime", ")", "for", "d", "in", "detect_info", "]", ")", "# Trig_int must be converted from seconds to micro-seconds", "peaks_out", "=", "decluster", "(", "peaks", "=", "detect_vals", ",", "index", "=", "detect_times", ",", "trig_int", "=", "trig_int", "*", "10", "**", "6", ")", "# Need to match both the time and the detection value", "declustered_detections", "=", "[", "]", "for", "ind", "in", "peaks_out", ":", "matching_time_indeces", "=", "np", ".", "where", "(", "detect_times", "==", "ind", "[", "-", "1", "]", ")", "[", "0", "]", "matches", "=", "matching_time_indeces", "[", "np", ".", "where", "(", "detect_vals", "[", "matching_time_indeces", "]", "==", "ind", "[", "0", "]", ")", "[", "0", "]", "[", "0", "]", "]", "declustered_detections", ".", "append", "(", "all_detections", "[", "matches", "]", ")", "# Convert this list into families", "template_names", "=", "list", "(", "set", "(", "[", "d", ".", "template_name", "for", "d", "in", "declustered_detections", "]", ")", ")", "new_families", "=", "[", "]", "for", "template_name", "in", "template_names", ":", "template", "=", "[", "fam", ".", "template", "for", "fam", "in", "self", ".", "families", "if", "fam", ".", "template", ".", "name", "==", "template_name", "]", "[", "0", "]", "new_families", ".", "append", "(", "Family", "(", "template", "=", "template", ",", "detections", "=", "[", "d", "for", "d", "in", "declustered_detections", "if", "d", ".", "template_name", "==", "template_name", "]", ")", ")", "self", ".", "families", "=", "new_families", "return", "self" ]
De-cluster a Party of detections by enforcing a detection separation. De-clustering occurs between events detected by different (or the same) templates. If multiple detections occur within trig_int then the preferred detection will be determined by the metric argument. This can be either the average single-station correlation coefficient which is calculated as Detection.detect_val / Detection.no_chans, or the raw cross channel correlation sum which is simply Detection.detect_val. :type trig_int: float :param trig_int: Minimum detection separation in seconds. :type metric: str :param metric: What metric to sort peaks by. Either 'avg_cor' which takes the single station average correlation or 'cor_sum' which takes the total correlation sum across all channels. :type timing: str :param timing: Either 'detect' or 'origin' to decluster based on either the detection time or the origin time. .. Warning:: Works in place on object, if you need to keep the original safe then run this on a copy of the object! .. rubric:: Example >>> party = Party().read() >>> len(party) 4 >>> declustered = party.decluster(20) >>> len(party) 3
[ "De", "-", "cluster", "a", "Party", "of", "detections", "by", "enforcing", "a", "detection", "separation", "." ]
python
train
ceph/ceph-deploy
ceph_deploy/util/system.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/util/system.py#L133-L148
def start_service(conn, service='ceph'): """ Stop a service on a remote host depending on the type of init system. Obviously, this should be done for RHEL/Fedora/CentOS systems. This function does not do any kind of detection. """ if is_systemd(conn): remoto.process.run( conn, [ 'systemctl', 'start', '{service}'.format(service=service), ] )
[ "def", "start_service", "(", "conn", ",", "service", "=", "'ceph'", ")", ":", "if", "is_systemd", "(", "conn", ")", ":", "remoto", ".", "process", ".", "run", "(", "conn", ",", "[", "'systemctl'", ",", "'start'", ",", "'{service}'", ".", "format", "(", "service", "=", "service", ")", ",", "]", ")" ]
Stop a service on a remote host depending on the type of init system. Obviously, this should be done for RHEL/Fedora/CentOS systems. This function does not do any kind of detection.
[ "Stop", "a", "service", "on", "a", "remote", "host", "depending", "on", "the", "type", "of", "init", "system", ".", "Obviously", "this", "should", "be", "done", "for", "RHEL", "/", "Fedora", "/", "CentOS", "systems", "." ]
python
train
PyCQA/pylint
pylint/checkers/classes.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/classes.py#L742-L754
def visit_classdef(self, node): """init visit variable _accessed """ self._check_bases_classes(node) # if not an exception or a metaclass if node.type == "class" and has_known_bases(node): try: node.local_attr("__init__") except astroid.NotFoundError: self.add_message("no-init", args=node, node=node) self._check_slots(node) self._check_proper_bases(node) self._check_consistent_mro(node)
[ "def", "visit_classdef", "(", "self", ",", "node", ")", ":", "self", ".", "_check_bases_classes", "(", "node", ")", "# if not an exception or a metaclass", "if", "node", ".", "type", "==", "\"class\"", "and", "has_known_bases", "(", "node", ")", ":", "try", ":", "node", ".", "local_attr", "(", "\"__init__\"", ")", "except", "astroid", ".", "NotFoundError", ":", "self", ".", "add_message", "(", "\"no-init\"", ",", "args", "=", "node", ",", "node", "=", "node", ")", "self", ".", "_check_slots", "(", "node", ")", "self", ".", "_check_proper_bases", "(", "node", ")", "self", ".", "_check_consistent_mro", "(", "node", ")" ]
init visit variable _accessed
[ "init", "visit", "variable", "_accessed" ]
python
test
3ll3d00d/vibe
backend/src/core/BaseConfig.py
https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/core/BaseConfig.py#L87-L93
def _getConfigPath(self): """ Gets the currently configured config path. :return: the path, raises ValueError if it doesn't exist. """ confHome = environ.get('VIBE_CONFIG_HOME') return confHome if confHome is not None else path.join(path.expanduser("~"), '.vibe')
[ "def", "_getConfigPath", "(", "self", ")", ":", "confHome", "=", "environ", ".", "get", "(", "'VIBE_CONFIG_HOME'", ")", "return", "confHome", "if", "confHome", "is", "not", "None", "else", "path", ".", "join", "(", "path", ".", "expanduser", "(", "\"~\"", ")", ",", "'.vibe'", ")" ]
Gets the currently configured config path. :return: the path, raises ValueError if it doesn't exist.
[ "Gets", "the", "currently", "configured", "config", "path", ".", ":", "return", ":", "the", "path", "raises", "ValueError", "if", "it", "doesn", "t", "exist", "." ]
python
train
estnltk/estnltk
estnltk/vabamorf/morf.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L101-L111
def instance(): """Return an PyVabamorf instance. It returns the previously initialized instance or creates a new one if nothing exists. Also creates new instance in case the process has been forked. """ if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid(): Vabamorf.pid = os.getpid() Vabamorf.morf = Vabamorf() return Vabamorf.morf
[ "def", "instance", "(", ")", ":", "if", "not", "hasattr", "(", "Vabamorf", ",", "'pid'", ")", "or", "Vabamorf", ".", "pid", "!=", "os", ".", "getpid", "(", ")", ":", "Vabamorf", ".", "pid", "=", "os", ".", "getpid", "(", ")", "Vabamorf", ".", "morf", "=", "Vabamorf", "(", ")", "return", "Vabamorf", ".", "morf" ]
Return an PyVabamorf instance. It returns the previously initialized instance or creates a new one if nothing exists. Also creates new instance in case the process has been forked.
[ "Return", "an", "PyVabamorf", "instance", "." ]
python
train
IBMStreams/pypi.streamsx
streamsx/topology/schema.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/topology/schema.py#L659-L675
def _fnop_style(schema, op, name): """Set an operator's parameter representing the style of this schema.""" if is_common(schema): if name in op.params: del op.params[name] return if _is_pending(schema): ntp = 'pending' elif schema.style is tuple: ntp = 'tuple' elif schema.style is _spl_dict: ntp = 'dict' elif _is_namedtuple(schema.style) and hasattr(schema.style, '_splpy_namedtuple'): ntp = 'namedtuple:' + schema.style._splpy_namedtuple else: return op.params[name] = ntp
[ "def", "_fnop_style", "(", "schema", ",", "op", ",", "name", ")", ":", "if", "is_common", "(", "schema", ")", ":", "if", "name", "in", "op", ".", "params", ":", "del", "op", ".", "params", "[", "name", "]", "return", "if", "_is_pending", "(", "schema", ")", ":", "ntp", "=", "'pending'", "elif", "schema", ".", "style", "is", "tuple", ":", "ntp", "=", "'tuple'", "elif", "schema", ".", "style", "is", "_spl_dict", ":", "ntp", "=", "'dict'", "elif", "_is_namedtuple", "(", "schema", ".", "style", ")", "and", "hasattr", "(", "schema", ".", "style", ",", "'_splpy_namedtuple'", ")", ":", "ntp", "=", "'namedtuple:'", "+", "schema", ".", "style", ".", "_splpy_namedtuple", "else", ":", "return", "op", ".", "params", "[", "name", "]", "=", "ntp" ]
Set an operator's parameter representing the style of this schema.
[ "Set", "an", "operator", "s", "parameter", "representing", "the", "style", "of", "this", "schema", "." ]
python
train
rohithpr/py-timed-dialog
ptd/dialog.py
https://github.com/rohithpr/py-timed-dialog/blob/6ca2d8d3ea4da5bac016d4b2f56c3c3a822ecf56/ptd/dialog.py#L25-L59
def button_input(self, title, message, buttons, default, timeout=None, dimensions=None): ''' Function to accept input in the form of a button click. ''' # Create the dialog box self.response = default self.top = tkinter.Tk() self.top.title(title) # Use dimensions if passes if dimensions is not None: self.top.minsize(width=dimensions[0], height=dimensions[1]) self.top.maxsize(width=dimensions[0], height=dimensions[1]) # Display a message labelString = tkinter.StringVar() labelString.set(message) label = tkinter.Label(self.top, textvariable=labelString, relief=tkinter.RAISED) label.pack(ipadx=100, ipady=10) # Populate dialog box with buttons for key in buttons.keys(): button = tkinter.Button(self.top, text=buttons[key], command=lambda key=key: self.selected(key)) button.pack(fill='both', pady=5, padx=10) # Destroy the dialog box if there has been no button click within the timeout period if timeout != None: try: self.top.after(timeout, lambda: self.top.destroy()) except: pass self.top.mainloop() return self.response
[ "def", "button_input", "(", "self", ",", "title", ",", "message", ",", "buttons", ",", "default", ",", "timeout", "=", "None", ",", "dimensions", "=", "None", ")", ":", "# Create the dialog box", "self", ".", "response", "=", "default", "self", ".", "top", "=", "tkinter", ".", "Tk", "(", ")", "self", ".", "top", ".", "title", "(", "title", ")", "# Use dimensions if passes", "if", "dimensions", "is", "not", "None", ":", "self", ".", "top", ".", "minsize", "(", "width", "=", "dimensions", "[", "0", "]", ",", "height", "=", "dimensions", "[", "1", "]", ")", "self", ".", "top", ".", "maxsize", "(", "width", "=", "dimensions", "[", "0", "]", ",", "height", "=", "dimensions", "[", "1", "]", ")", "# Display a message", "labelString", "=", "tkinter", ".", "StringVar", "(", ")", "labelString", ".", "set", "(", "message", ")", "label", "=", "tkinter", ".", "Label", "(", "self", ".", "top", ",", "textvariable", "=", "labelString", ",", "relief", "=", "tkinter", ".", "RAISED", ")", "label", ".", "pack", "(", "ipadx", "=", "100", ",", "ipady", "=", "10", ")", "# Populate dialog box with buttons", "for", "key", "in", "buttons", ".", "keys", "(", ")", ":", "button", "=", "tkinter", ".", "Button", "(", "self", ".", "top", ",", "text", "=", "buttons", "[", "key", "]", ",", "command", "=", "lambda", "key", "=", "key", ":", "self", ".", "selected", "(", "key", ")", ")", "button", ".", "pack", "(", "fill", "=", "'both'", ",", "pady", "=", "5", ",", "padx", "=", "10", ")", "# Destroy the dialog box if there has been no button click within the timeout period", "if", "timeout", "!=", "None", ":", "try", ":", "self", ".", "top", ".", "after", "(", "timeout", ",", "lambda", ":", "self", ".", "top", ".", "destroy", "(", ")", ")", "except", ":", "pass", "self", ".", "top", ".", "mainloop", "(", ")", "return", "self", ".", "response" ]
Function to accept input in the form of a button click.
[ "Function", "to", "accept", "input", "in", "the", "form", "of", "a", "button", "click", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/completion_html.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/completion_html.py#L297-L300
def select_right(self): """move cursor right""" r, c = self._index self._select_index(r, c+1)
[ "def", "select_right", "(", "self", ")", ":", "r", ",", "c", "=", "self", ".", "_index", "self", ".", "_select_index", "(", "r", ",", "c", "+", "1", ")" ]
move cursor right
[ "move", "cursor", "right" ]
python
test
RJT1990/pyflux
pyflux/inference/metropolis_hastings.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/inference/metropolis_hastings.py#L102-L171
def sample(self): """ Sample from M-H algorithm Returns ---------- chain : np.array Chains for each parameter mean_est : np.array Mean values for each parameter median_est : np.array Median values for each parameter upper_95_est : np.array Upper 95% credibility interval for each parameter lower_95_est : np.array Lower 95% credibility interval for each parameter """ acceptance = 1 finish = 0 while (acceptance < 0.234 or acceptance > 0.4) or finish == 0: # If acceptance is in range, proceed to sample, else continue tuning if not (acceptance < 0.234 or acceptance > 0.4): finish = 1 if not self.quiet_progress: print("") print("Tuning complete! Now sampling.") sims_to_do = self.nsims else: sims_to_do = int(self.nsims/2) # For acceptance rate tuning # Holds data on acceptance rates and uniform random numbers a_rate = np.zeros([sims_to_do,1]) crit = np.random.rand(sims_to_do,1) post = multivariate_normal(np.zeros(self.param_no), self.cov_matrix) rnums = post.rvs()*self.scale for k in range(1,sims_to_do): rnums = np.vstack((rnums,post.rvs()*self.scale)) self.phi, a_rate = metropolis_sampler(sims_to_do, self.phi, self.posterior, a_rate, rnums, crit) acceptance = a_rate.sum()/a_rate.shape[0] self.scale = self.tune_scale(acceptance,self.scale) if not self.quiet_progress: print("Acceptance rate of Metropolis-Hastings is " + str(acceptance)) # Remove warm-up and thin self.phi = self.phi[int(self.nsims/2):,:][::self.thinning,:] chain = np.array([self.phi[i][0] for i in range(0, self.phi.shape[0])]) for m in range(1, self.param_no): chain = np.vstack((chain, [self.phi[i][m] for i in range(0,self.phi.shape[0])])) if self.param_no == 1: chain = np.array([chain]) mean_est = np.array([np.mean(np.array([self.phi[i][j] for i in range(0,self.phi.shape[0])])) for j in range(self.param_no)]) median_est = np.array([np.median(np.array([self.phi[i][j] for i in range(0,self.phi.shape[0])])) for j in range(self.param_no)]) upper_95_est = np.array([np.percentile(np.array([self.phi[i][j] for i in range(0,self.phi.shape[0])]), 95) for j in range(self.param_no)]) lower_95_est = np.array([np.percentile(np.array([self.phi[i][j] for i in range(0,self.phi.shape[0])]), 5) for j in range(self.param_no)]) return chain, mean_est, median_est, upper_95_est, lower_95_est
[ "def", "sample", "(", "self", ")", ":", "acceptance", "=", "1", "finish", "=", "0", "while", "(", "acceptance", "<", "0.234", "or", "acceptance", ">", "0.4", ")", "or", "finish", "==", "0", ":", "# If acceptance is in range, proceed to sample, else continue tuning", "if", "not", "(", "acceptance", "<", "0.234", "or", "acceptance", ">", "0.4", ")", ":", "finish", "=", "1", "if", "not", "self", ".", "quiet_progress", ":", "print", "(", "\"\"", ")", "print", "(", "\"Tuning complete! Now sampling.\"", ")", "sims_to_do", "=", "self", ".", "nsims", "else", ":", "sims_to_do", "=", "int", "(", "self", ".", "nsims", "/", "2", ")", "# For acceptance rate tuning", "# Holds data on acceptance rates and uniform random numbers", "a_rate", "=", "np", ".", "zeros", "(", "[", "sims_to_do", ",", "1", "]", ")", "crit", "=", "np", ".", "random", ".", "rand", "(", "sims_to_do", ",", "1", ")", "post", "=", "multivariate_normal", "(", "np", ".", "zeros", "(", "self", ".", "param_no", ")", ",", "self", ".", "cov_matrix", ")", "rnums", "=", "post", ".", "rvs", "(", ")", "*", "self", ".", "scale", "for", "k", "in", "range", "(", "1", ",", "sims_to_do", ")", ":", "rnums", "=", "np", ".", "vstack", "(", "(", "rnums", ",", "post", ".", "rvs", "(", ")", "*", "self", ".", "scale", ")", ")", "self", ".", "phi", ",", "a_rate", "=", "metropolis_sampler", "(", "sims_to_do", ",", "self", ".", "phi", ",", "self", ".", "posterior", ",", "a_rate", ",", "rnums", ",", "crit", ")", "acceptance", "=", "a_rate", ".", "sum", "(", ")", "/", "a_rate", ".", "shape", "[", "0", "]", "self", ".", "scale", "=", "self", ".", "tune_scale", "(", "acceptance", ",", "self", ".", "scale", ")", "if", "not", "self", ".", "quiet_progress", ":", "print", "(", "\"Acceptance rate of Metropolis-Hastings is \"", "+", "str", "(", "acceptance", ")", ")", "# Remove warm-up and thin", "self", ".", "phi", "=", "self", ".", "phi", "[", "int", "(", "self", ".", "nsims", "/", "2", ")", ":", ",", ":", "]", "[", ":", ":", "self", ".", "thinning", ",", ":", "]", "chain", "=", "np", ".", "array", "(", "[", "self", ".", "phi", "[", "i", "]", "[", "0", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "phi", ".", "shape", "[", "0", "]", ")", "]", ")", "for", "m", "in", "range", "(", "1", ",", "self", ".", "param_no", ")", ":", "chain", "=", "np", ".", "vstack", "(", "(", "chain", ",", "[", "self", ".", "phi", "[", "i", "]", "[", "m", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "phi", ".", "shape", "[", "0", "]", ")", "]", ")", ")", "if", "self", ".", "param_no", "==", "1", ":", "chain", "=", "np", ".", "array", "(", "[", "chain", "]", ")", "mean_est", "=", "np", ".", "array", "(", "[", "np", ".", "mean", "(", "np", ".", "array", "(", "[", "self", ".", "phi", "[", "i", "]", "[", "j", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "phi", ".", "shape", "[", "0", "]", ")", "]", ")", ")", "for", "j", "in", "range", "(", "self", ".", "param_no", ")", "]", ")", "median_est", "=", "np", ".", "array", "(", "[", "np", ".", "median", "(", "np", ".", "array", "(", "[", "self", ".", "phi", "[", "i", "]", "[", "j", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "phi", ".", "shape", "[", "0", "]", ")", "]", ")", ")", "for", "j", "in", "range", "(", "self", ".", "param_no", ")", "]", ")", "upper_95_est", "=", "np", ".", "array", "(", "[", "np", ".", "percentile", "(", "np", ".", "array", "(", "[", "self", ".", "phi", "[", "i", "]", "[", "j", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "phi", ".", "shape", "[", "0", "]", ")", "]", ")", ",", "95", ")", "for", "j", "in", "range", "(", "self", ".", "param_no", ")", "]", ")", "lower_95_est", "=", "np", ".", "array", "(", "[", "np", ".", "percentile", "(", "np", ".", "array", "(", "[", "self", ".", "phi", "[", "i", "]", "[", "j", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "phi", ".", "shape", "[", "0", "]", ")", "]", ")", ",", "5", ")", "for", "j", "in", "range", "(", "self", ".", "param_no", ")", "]", ")", "return", "chain", ",", "mean_est", ",", "median_est", ",", "upper_95_est", ",", "lower_95_est" ]
Sample from M-H algorithm Returns ---------- chain : np.array Chains for each parameter mean_est : np.array Mean values for each parameter median_est : np.array Median values for each parameter upper_95_est : np.array Upper 95% credibility interval for each parameter lower_95_est : np.array Lower 95% credibility interval for each parameter
[ "Sample", "from", "M", "-", "H", "algorithm" ]
python
train
neuropsychology/NeuroKit.py
neurokit/eeg/eeg_data.py
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/eeg/eeg_data.py#L439-L467
def eeg_to_df(eeg, index=None, include="all", exclude=None, hemisphere="both", central=True): """ Convert mne Raw or Epochs object to dataframe or dict of dataframes. DOCS INCOMPLETE :( """ if isinstance(eeg, mne.Epochs): data = {} if index is None: index = range(len(eeg)) for epoch_index, epoch in zip(index, eeg.get_data()): epoch = pd.DataFrame(epoch.T) epoch.columns = eeg.ch_names epoch.index = eeg.times selection = eeg_select_electrodes(eeg, include=include, exclude=exclude, hemisphere=hemisphere, central=central) data[epoch_index] = epoch[selection] else: # it might be a Raw object data = eeg.get_data().T data = pd.DataFrame(data) data.columns = eeg.ch_names data.index = eeg.times return(data)
[ "def", "eeg_to_df", "(", "eeg", ",", "index", "=", "None", ",", "include", "=", "\"all\"", ",", "exclude", "=", "None", ",", "hemisphere", "=", "\"both\"", ",", "central", "=", "True", ")", ":", "if", "isinstance", "(", "eeg", ",", "mne", ".", "Epochs", ")", ":", "data", "=", "{", "}", "if", "index", "is", "None", ":", "index", "=", "range", "(", "len", "(", "eeg", ")", ")", "for", "epoch_index", ",", "epoch", "in", "zip", "(", "index", ",", "eeg", ".", "get_data", "(", ")", ")", ":", "epoch", "=", "pd", ".", "DataFrame", "(", "epoch", ".", "T", ")", "epoch", ".", "columns", "=", "eeg", ".", "ch_names", "epoch", ".", "index", "=", "eeg", ".", "times", "selection", "=", "eeg_select_electrodes", "(", "eeg", ",", "include", "=", "include", ",", "exclude", "=", "exclude", ",", "hemisphere", "=", "hemisphere", ",", "central", "=", "central", ")", "data", "[", "epoch_index", "]", "=", "epoch", "[", "selection", "]", "else", ":", "# it might be a Raw object", "data", "=", "eeg", ".", "get_data", "(", ")", ".", "T", "data", "=", "pd", ".", "DataFrame", "(", "data", ")", "data", ".", "columns", "=", "eeg", ".", "ch_names", "data", ".", "index", "=", "eeg", ".", "times", "return", "(", "data", ")" ]
Convert mne Raw or Epochs object to dataframe or dict of dataframes. DOCS INCOMPLETE :(
[ "Convert", "mne", "Raw", "or", "Epochs", "object", "to", "dataframe", "or", "dict", "of", "dataframes", "." ]
python
train
crunchyroll/ef-open
efopen/ef_aws_resolver.py
https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_aws_resolver.py#L358-L371
def elbv2_load_balancer_arn_suffix(self, lookup, default=None): """ Args: lookup: the friendly name of the v2 elb to look up default: value to return in case of no match Returns: The shorthand fragment of the ALB's ARN, of the form `app/*/*` """ try: elb = self._elbv2_load_balancer(lookup) m = re.search(r'.+?(app\/[^\/]+\/[^\/]+)$', elb['LoadBalancerArn']) return m.group(1) except ClientError: return default
[ "def", "elbv2_load_balancer_arn_suffix", "(", "self", ",", "lookup", ",", "default", "=", "None", ")", ":", "try", ":", "elb", "=", "self", ".", "_elbv2_load_balancer", "(", "lookup", ")", "m", "=", "re", ".", "search", "(", "r'.+?(app\\/[^\\/]+\\/[^\\/]+)$'", ",", "elb", "[", "'LoadBalancerArn'", "]", ")", "return", "m", ".", "group", "(", "1", ")", "except", "ClientError", ":", "return", "default" ]
Args: lookup: the friendly name of the v2 elb to look up default: value to return in case of no match Returns: The shorthand fragment of the ALB's ARN, of the form `app/*/*`
[ "Args", ":", "lookup", ":", "the", "friendly", "name", "of", "the", "v2", "elb", "to", "look", "up", "default", ":", "value", "to", "return", "in", "case", "of", "no", "match", "Returns", ":", "The", "shorthand", "fragment", "of", "the", "ALB", "s", "ARN", "of", "the", "form", "app", "/", "*", "/", "*" ]
python
train
scanny/python-pptx
pptx/oxml/table.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/table.py#L497-L501
def iter_except_top_row_tcs(self): """Generate each `a:tc` element in non-first rows of range.""" for tr in self._tbl.tr_lst[self._top + 1:self._bottom]: for tc in tr.tc_lst[self._left:self._right]: yield tc
[ "def", "iter_except_top_row_tcs", "(", "self", ")", ":", "for", "tr", "in", "self", ".", "_tbl", ".", "tr_lst", "[", "self", ".", "_top", "+", "1", ":", "self", ".", "_bottom", "]", ":", "for", "tc", "in", "tr", ".", "tc_lst", "[", "self", ".", "_left", ":", "self", ".", "_right", "]", ":", "yield", "tc" ]
Generate each `a:tc` element in non-first rows of range.
[ "Generate", "each", "a", ":", "tc", "element", "in", "non", "-", "first", "rows", "of", "range", "." ]
python
train
sander76/aio-powerview-api
aiopvapi/resources/shade.py
https://github.com/sander76/aio-powerview-api/blob/08b6ac747aba9de19842359a981a7ff1292f5a6c/aiopvapi/resources/shade.py#L133-L142
async def get_current_position(self, refresh=True) -> dict: """Return the current shade position. :param refresh: If True it queries the hub for the latest info. :return: Dictionary with position data. """ if refresh: await self.refresh() position = self._raw_data.get(ATTR_POSITION_DATA) return position
[ "async", "def", "get_current_position", "(", "self", ",", "refresh", "=", "True", ")", "->", "dict", ":", "if", "refresh", ":", "await", "self", ".", "refresh", "(", ")", "position", "=", "self", ".", "_raw_data", ".", "get", "(", "ATTR_POSITION_DATA", ")", "return", "position" ]
Return the current shade position. :param refresh: If True it queries the hub for the latest info. :return: Dictionary with position data.
[ "Return", "the", "current", "shade", "position", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py#L124-L138
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail") local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name") local_interface_name_key.text = kwargs.pop('local_interface_name') remote_interface_name = ET.SubElement(lldp_neighbor_detail, "remote-interface-name") remote_interface_name.text = kwargs.pop('remote_interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_interface_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_lldp_neighbor_detail", "=", "ET", ".", "Element", "(", "\"get_lldp_neighbor_detail\"", ")", "config", "=", "get_lldp_neighbor_detail", "output", "=", "ET", ".", "SubElement", "(", "get_lldp_neighbor_detail", ",", "\"output\"", ")", "lldp_neighbor_detail", "=", "ET", ".", "SubElement", "(", "output", ",", "\"lldp-neighbor-detail\"", ")", "local_interface_name_key", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"local-interface-name\"", ")", "local_interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'local_interface_name'", ")", "remote_interface_name", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"remote-interface-name\"", ")", "remote_interface_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'remote_interface_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/__init__.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/__init__.py#L128-L184
def stream(**kwargs): """Provides an :py:func:`open`-like interface to the streaming encryptor/decryptor classes. .. warning:: Take care when decrypting framed messages with large frame length and large non-framed messages. In order to protect the authenticity of the encrypted data, no plaintext is returned until it has been authenticated. Because of this, potentially large amounts of data may be read into memory. In the case of framed messages, the entire contents of each frame are read into memory and authenticated before returning any plaintext. In the case of non-framed messages, the entire message is read into memory and authenticated before returning any plaintext. The authenticated plaintext is held in memory until it is requested. .. note:: Consequently, keep the above decrypting consideration in mind when encrypting messages to ensure that issues are not encountered when decrypting those messages. .. code:: python >>> import aws_encryption_sdk >>> kms_key_provider = aws_encryption_sdk.KMSMasterKeyProvider(key_ids=[ ... 'arn:aws:kms:us-east-1:2222222222222:key/22222222-2222-2222-2222-222222222222', ... 'arn:aws:kms:us-east-1:3333333333333:key/33333333-3333-3333-3333-333333333333' ... ]) >>> plaintext_filename = 'my-secret-data.dat' >>> ciphertext_filename = 'my-encrypted-data.ct' >>> with open(plaintext_filename, 'rb') as pt_file, open(ciphertext_filename, 'wb') as ct_file: ... with aws_encryption_sdk.stream( ... mode='e', ... source=pt_file, ... key_provider=kms_key_provider ... ) as encryptor: ... for chunk in encryptor: ... ct_file.write(chunk) >>> new_plaintext_filename = 'my-decrypted-data.dat' >>> with open(ciphertext_filename, 'rb') as ct_file, open(new_plaintext_filename, 'wb') as pt_file: ... with aws_encryption_sdk.stream( ... mode='d', ... source=ct_file, ... key_provider=kms_key_provider ... ) as decryptor: ... for chunk in decryptor: ... pt_file.write(chunk) :param str mode: Type of streaming client to return (e/encrypt: encryptor, d/decrypt: decryptor) :param **kwargs: All other parameters provided are passed to the appropriate Streaming client :returns: Streaming Encryptor or Decryptor, as requested :rtype: :class:`aws_encryption_sdk.streaming_client.StreamEncryptor` or :class:`aws_encryption_sdk.streaming_client.StreamDecryptor` :raises ValueError: if supplied with an unsupported mode value """ mode = kwargs.pop("mode") _stream_map = {"e": StreamEncryptor, "encrypt": StreamEncryptor, "d": StreamDecryptor, "decrypt": StreamDecryptor} try: return _stream_map[mode.lower()](**kwargs) except KeyError: raise ValueError("Unsupported mode: {}".format(mode))
[ "def", "stream", "(", "*", "*", "kwargs", ")", ":", "mode", "=", "kwargs", ".", "pop", "(", "\"mode\"", ")", "_stream_map", "=", "{", "\"e\"", ":", "StreamEncryptor", ",", "\"encrypt\"", ":", "StreamEncryptor", ",", "\"d\"", ":", "StreamDecryptor", ",", "\"decrypt\"", ":", "StreamDecryptor", "}", "try", ":", "return", "_stream_map", "[", "mode", ".", "lower", "(", ")", "]", "(", "*", "*", "kwargs", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Unsupported mode: {}\"", ".", "format", "(", "mode", ")", ")" ]
Provides an :py:func:`open`-like interface to the streaming encryptor/decryptor classes. .. warning:: Take care when decrypting framed messages with large frame length and large non-framed messages. In order to protect the authenticity of the encrypted data, no plaintext is returned until it has been authenticated. Because of this, potentially large amounts of data may be read into memory. In the case of framed messages, the entire contents of each frame are read into memory and authenticated before returning any plaintext. In the case of non-framed messages, the entire message is read into memory and authenticated before returning any plaintext. The authenticated plaintext is held in memory until it is requested. .. note:: Consequently, keep the above decrypting consideration in mind when encrypting messages to ensure that issues are not encountered when decrypting those messages. .. code:: python >>> import aws_encryption_sdk >>> kms_key_provider = aws_encryption_sdk.KMSMasterKeyProvider(key_ids=[ ... 'arn:aws:kms:us-east-1:2222222222222:key/22222222-2222-2222-2222-222222222222', ... 'arn:aws:kms:us-east-1:3333333333333:key/33333333-3333-3333-3333-333333333333' ... ]) >>> plaintext_filename = 'my-secret-data.dat' >>> ciphertext_filename = 'my-encrypted-data.ct' >>> with open(plaintext_filename, 'rb') as pt_file, open(ciphertext_filename, 'wb') as ct_file: ... with aws_encryption_sdk.stream( ... mode='e', ... source=pt_file, ... key_provider=kms_key_provider ... ) as encryptor: ... for chunk in encryptor: ... ct_file.write(chunk) >>> new_plaintext_filename = 'my-decrypted-data.dat' >>> with open(ciphertext_filename, 'rb') as ct_file, open(new_plaintext_filename, 'wb') as pt_file: ... with aws_encryption_sdk.stream( ... mode='d', ... source=ct_file, ... key_provider=kms_key_provider ... ) as decryptor: ... for chunk in decryptor: ... pt_file.write(chunk) :param str mode: Type of streaming client to return (e/encrypt: encryptor, d/decrypt: decryptor) :param **kwargs: All other parameters provided are passed to the appropriate Streaming client :returns: Streaming Encryptor or Decryptor, as requested :rtype: :class:`aws_encryption_sdk.streaming_client.StreamEncryptor` or :class:`aws_encryption_sdk.streaming_client.StreamDecryptor` :raises ValueError: if supplied with an unsupported mode value
[ "Provides", "an", ":", "py", ":", "func", ":", "open", "-", "like", "interface", "to", "the", "streaming", "encryptor", "/", "decryptor", "classes", "." ]
python
train
hugapi/hug
hug/interface.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/interface.py#L817-L827
def urls(self, version=None): """Returns all URLS that are mapped to this interface""" urls = [] for base_url, routes in self.api.http.routes.items(): for url, methods in routes.items(): for method, versions in methods.items(): for interface_version, interface in versions.items(): if interface_version == version and interface == self: if not url in urls: urls.append(('/v{0}'.format(version) if version else '') + url) return urls
[ "def", "urls", "(", "self", ",", "version", "=", "None", ")", ":", "urls", "=", "[", "]", "for", "base_url", ",", "routes", "in", "self", ".", "api", ".", "http", ".", "routes", ".", "items", "(", ")", ":", "for", "url", ",", "methods", "in", "routes", ".", "items", "(", ")", ":", "for", "method", ",", "versions", "in", "methods", ".", "items", "(", ")", ":", "for", "interface_version", ",", "interface", "in", "versions", ".", "items", "(", ")", ":", "if", "interface_version", "==", "version", "and", "interface", "==", "self", ":", "if", "not", "url", "in", "urls", ":", "urls", ".", "append", "(", "(", "'/v{0}'", ".", "format", "(", "version", ")", "if", "version", "else", "''", ")", "+", "url", ")", "return", "urls" ]
Returns all URLS that are mapped to this interface
[ "Returns", "all", "URLS", "that", "are", "mapped", "to", "this", "interface" ]
python
train
christophertbrown/bioscripts
ctbBio/rRNA_insertions_gff.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L42-L76
def parse_orf(insertion, gff): """ parse ORF to gff format """ offset = insertion['offset'] if type(insertion['orf']) is not str: return gff for orf in parse_fasta(insertion['orf'].split('|')): ID = orf[0].split('>')[1].split()[0] Start, End, strand = [int(i) for i in orf[0].split(' # ')[1:4]] if strand == 1: strand = '+' else: strand = '-' GeneStrand = insertion['strand'] if strand != GeneStrand: if strand == '+': strand = '-' else: strand = '+' Start, End = End - 2, Start - 2 Start, End = abs(Start + offset) - 1, abs(End + offset) - 1 annot = orf[0].split()[1] if annot == 'n/a': annot = 'unknown' gff['#seqname'].append(insertion['ID']) gff['source'].append('Prodigal and Pfam') gff['feature'].append('CDS') gff['start'].append(Start) gff['end'].append(End) gff['score'].append('.') gff['strand'].append(strand) gff['frame'].append('.') gff['attribute'].append('ID=%s; Name=%s' % (ID, annot)) return gff
[ "def", "parse_orf", "(", "insertion", ",", "gff", ")", ":", "offset", "=", "insertion", "[", "'offset'", "]", "if", "type", "(", "insertion", "[", "'orf'", "]", ")", "is", "not", "str", ":", "return", "gff", "for", "orf", "in", "parse_fasta", "(", "insertion", "[", "'orf'", "]", ".", "split", "(", "'|'", ")", ")", ":", "ID", "=", "orf", "[", "0", "]", ".", "split", "(", "'>'", ")", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", "Start", ",", "End", ",", "strand", "=", "[", "int", "(", "i", ")", "for", "i", "in", "orf", "[", "0", "]", ".", "split", "(", "' # '", ")", "[", "1", ":", "4", "]", "]", "if", "strand", "==", "1", ":", "strand", "=", "'+'", "else", ":", "strand", "=", "'-'", "GeneStrand", "=", "insertion", "[", "'strand'", "]", "if", "strand", "!=", "GeneStrand", ":", "if", "strand", "==", "'+'", ":", "strand", "=", "'-'", "else", ":", "strand", "=", "'+'", "Start", ",", "End", "=", "End", "-", "2", ",", "Start", "-", "2", "Start", ",", "End", "=", "abs", "(", "Start", "+", "offset", ")", "-", "1", ",", "abs", "(", "End", "+", "offset", ")", "-", "1", "annot", "=", "orf", "[", "0", "]", ".", "split", "(", ")", "[", "1", "]", "if", "annot", "==", "'n/a'", ":", "annot", "=", "'unknown'", "gff", "[", "'#seqname'", "]", ".", "append", "(", "insertion", "[", "'ID'", "]", ")", "gff", "[", "'source'", "]", ".", "append", "(", "'Prodigal and Pfam'", ")", "gff", "[", "'feature'", "]", ".", "append", "(", "'CDS'", ")", "gff", "[", "'start'", "]", ".", "append", "(", "Start", ")", "gff", "[", "'end'", "]", ".", "append", "(", "End", ")", "gff", "[", "'score'", "]", ".", "append", "(", "'.'", ")", "gff", "[", "'strand'", "]", ".", "append", "(", "strand", ")", "gff", "[", "'frame'", "]", ".", "append", "(", "'.'", ")", "gff", "[", "'attribute'", "]", ".", "append", "(", "'ID=%s; Name=%s'", "%", "(", "ID", ",", "annot", ")", ")", "return", "gff" ]
parse ORF to gff format
[ "parse", "ORF", "to", "gff", "format" ]
python
train
fastai/fastai
fastai/callbacks/tensorboard.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/tensorboard.py#L132-L137
def _write_model_stats(self, iteration:int)->None: "Writes gradient statistics to Tensorboard." # We don't want to write stats when model is not iterated on and hence has zeroed out gradients gen_mode = self.learn.gan_trainer.gen_mode if gen_mode and not self.gen_stats_updated: self._write_gen_model_stats(iteration=iteration) if not gen_mode and not self.crit_stats_updated: self._write_critic_model_stats(iteration=iteration)
[ "def", "_write_model_stats", "(", "self", ",", "iteration", ":", "int", ")", "->", "None", ":", "# We don't want to write stats when model is not iterated on and hence has zeroed out gradients", "gen_mode", "=", "self", ".", "learn", ".", "gan_trainer", ".", "gen_mode", "if", "gen_mode", "and", "not", "self", ".", "gen_stats_updated", ":", "self", ".", "_write_gen_model_stats", "(", "iteration", "=", "iteration", ")", "if", "not", "gen_mode", "and", "not", "self", ".", "crit_stats_updated", ":", "self", ".", "_write_critic_model_stats", "(", "iteration", "=", "iteration", ")" ]
Writes gradient statistics to Tensorboard.
[ "Writes", "gradient", "statistics", "to", "Tensorboard", "." ]
python
train
bububa/pyTOP
pyTOP/logistics.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/logistics.py#L445-L458
def detail_get(self, session, fields=[], **kwargs): '''taobao.logistics.orders.detail.get 批量查询物流订单,返回详细信息 查询物流订单的详细信息,涉及用户隐私字段。(注:该API主要是提供给卖家查询物流订单使用,买家查询物流订单,建议使用taobao.logistics.trace.search)''' request = TOPRequest('taobao.logistics.orders.detail.get') if not fields: shipping = Shipping() fields = shipping.fields request['fields'] = fields for k, v in kwargs.iteritems(): if k not in ('tid', 'buyer_nick', 'status', 'seller_confirm', 'receiver_name', 'start_created', 'end_created', 'freight_payer', 'type', 'page_no', 'page_size') and v==None: continue request[k] = v self.create(self.execute(request, session)) return self.shippings
[ "def", "detail_get", "(", "self", ",", "session", ",", "fields", "=", "[", "]", ",", "*", "*", "kwargs", ")", ":", "request", "=", "TOPRequest", "(", "'taobao.logistics.orders.detail.get'", ")", "if", "not", "fields", ":", "shipping", "=", "Shipping", "(", ")", "fields", "=", "shipping", ".", "fields", "request", "[", "'fields'", "]", "=", "fields", "for", "k", ",", "v", "in", "kwargs", ".", "iteritems", "(", ")", ":", "if", "k", "not", "in", "(", "'tid'", ",", "'buyer_nick'", ",", "'status'", ",", "'seller_confirm'", ",", "'receiver_name'", ",", "'start_created'", ",", "'end_created'", ",", "'freight_payer'", ",", "'type'", ",", "'page_no'", ",", "'page_size'", ")", "and", "v", "==", "None", ":", "continue", "request", "[", "k", "]", "=", "v", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ",", "session", ")", ")", "return", "self", ".", "shippings" ]
taobao.logistics.orders.detail.get 批量查询物流订单,返回详细信息 查询物流订单的详细信息,涉及用户隐私字段。(注:该API主要是提供给卖家查询物流订单使用,买家查询物流订单,建议使用taobao.logistics.trace.search)
[ "taobao", ".", "logistics", ".", "orders", ".", "detail", ".", "get", "批量查询物流订单", "返回详细信息", "查询物流订单的详细信息,涉及用户隐私字段。(注:该API主要是提供给卖家查询物流订单使用,买家查询物流订单,建议使用taobao", ".", "logistics", ".", "trace", ".", "search)" ]
python
train
PiotrDabkowski/Js2Py
js2py/legecy_translators/jsparser.py
https://github.com/PiotrDabkowski/Js2Py/blob/c0fa43f5679cf91ca8986c5747fcb07a433dc584/js2py/legecy_translators/jsparser.py#L308-L326
def split_at_single(text, sep, not_before=[], not_after=[]): """Works like text.split(sep) but separated fragments cant end with not_before or start with not_after""" n = 0 lt, s = len(text), len(sep) last = 0 while n < lt: if not s + n > lt: if sep == text[n:n + s]: if any(text[last:n].endswith(e) for e in not_before): pass elif any(text[n + s:].startswith(e) for e in not_after): pass else: yield text[last:n] last = n + s n += s - 1 n += 1 yield text[last:]
[ "def", "split_at_single", "(", "text", ",", "sep", ",", "not_before", "=", "[", "]", ",", "not_after", "=", "[", "]", ")", ":", "n", "=", "0", "lt", ",", "s", "=", "len", "(", "text", ")", ",", "len", "(", "sep", ")", "last", "=", "0", "while", "n", "<", "lt", ":", "if", "not", "s", "+", "n", ">", "lt", ":", "if", "sep", "==", "text", "[", "n", ":", "n", "+", "s", "]", ":", "if", "any", "(", "text", "[", "last", ":", "n", "]", ".", "endswith", "(", "e", ")", "for", "e", "in", "not_before", ")", ":", "pass", "elif", "any", "(", "text", "[", "n", "+", "s", ":", "]", ".", "startswith", "(", "e", ")", "for", "e", "in", "not_after", ")", ":", "pass", "else", ":", "yield", "text", "[", "last", ":", "n", "]", "last", "=", "n", "+", "s", "n", "+=", "s", "-", "1", "n", "+=", "1", "yield", "text", "[", "last", ":", "]" ]
Works like text.split(sep) but separated fragments cant end with not_before or start with not_after
[ "Works", "like", "text", ".", "split", "(", "sep", ")", "but", "separated", "fragments", "cant", "end", "with", "not_before", "or", "start", "with", "not_after" ]
python
valid
wilson-eft/wilson
wilson/classes.py
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/classes.py#L59-L63
def get_option(self, key): """Return the current value of the option `key` (string). Instance method, only refers to current instance.""" return self._options.get(key, self._default_options[key])
[ "def", "get_option", "(", "self", ",", "key", ")", ":", "return", "self", ".", "_options", ".", "get", "(", "key", ",", "self", ".", "_default_options", "[", "key", "]", ")" ]
Return the current value of the option `key` (string). Instance method, only refers to current instance.
[ "Return", "the", "current", "value", "of", "the", "option", "key", "(", "string", ")", "." ]
python
train
ludbb/secp256k1-py
secp256k1/__init__.py
https://github.com/ludbb/secp256k1-py/blob/f5e455227bf1e833128adf80de8ee0ebcebf218c/secp256k1/__init__.py#L247-L261
def combine(self, pubkeys): """Add a number of public keys together.""" assert len(pubkeys) > 0 outpub = ffi.new('secp256k1_pubkey *') for item in pubkeys: assert ffi.typeof(item) is ffi.typeof('secp256k1_pubkey *') res = lib.secp256k1_ec_pubkey_combine( self.ctx, outpub, pubkeys, len(pubkeys)) if not res: raise Exception('failed to combine public keys') self.public_key = outpub return outpub
[ "def", "combine", "(", "self", ",", "pubkeys", ")", ":", "assert", "len", "(", "pubkeys", ")", ">", "0", "outpub", "=", "ffi", ".", "new", "(", "'secp256k1_pubkey *'", ")", "for", "item", "in", "pubkeys", ":", "assert", "ffi", ".", "typeof", "(", "item", ")", "is", "ffi", ".", "typeof", "(", "'secp256k1_pubkey *'", ")", "res", "=", "lib", ".", "secp256k1_ec_pubkey_combine", "(", "self", ".", "ctx", ",", "outpub", ",", "pubkeys", ",", "len", "(", "pubkeys", ")", ")", "if", "not", "res", ":", "raise", "Exception", "(", "'failed to combine public keys'", ")", "self", ".", "public_key", "=", "outpub", "return", "outpub" ]
Add a number of public keys together.
[ "Add", "a", "number", "of", "public", "keys", "together", "." ]
python
train
pappasam/latexbuild
latexbuild/__init__.py
https://github.com/pappasam/latexbuild/blob/596a2a0a4c42eaa5eb9503d64f9073ad5d0640d5/latexbuild/__init__.py#L22-L38
def build_pdf(path_jinja2, template_name, path_outfile, template_kwargs=None): '''Helper function for building a pdf from a latex jinja2 template :param path_jinja2: the root directory for latex jinja2 templates :param template_name: the relative path, to path_jinja2, to the desired jinja2 Latex template :param path_outfile: the full path to the desired final output file Must contain the same file extension as files generated by cmd_wo_infile, otherwise the process will fail :param template_kwargs: a dictionary of key/values for jinja2 variables ''' latex_template_object = LatexBuild( path_jinja2, template_name, template_kwargs, ) return latex_template_object.build_pdf(path_outfile)
[ "def", "build_pdf", "(", "path_jinja2", ",", "template_name", ",", "path_outfile", ",", "template_kwargs", "=", "None", ")", ":", "latex_template_object", "=", "LatexBuild", "(", "path_jinja2", ",", "template_name", ",", "template_kwargs", ",", ")", "return", "latex_template_object", ".", "build_pdf", "(", "path_outfile", ")" ]
Helper function for building a pdf from a latex jinja2 template :param path_jinja2: the root directory for latex jinja2 templates :param template_name: the relative path, to path_jinja2, to the desired jinja2 Latex template :param path_outfile: the full path to the desired final output file Must contain the same file extension as files generated by cmd_wo_infile, otherwise the process will fail :param template_kwargs: a dictionary of key/values for jinja2 variables
[ "Helper", "function", "for", "building", "a", "pdf", "from", "a", "latex", "jinja2", "template" ]
python
train
six8/polydatum
src/polydatum/dal.py
https://github.com/six8/polydatum/blob/c98a498f8e7972218903ec027f6de78089726c1d/src/polydatum/dal.py#L98-L106
def register_context_middleware(self, *middleware): """ :param middleware: Middleware in order of execution """ for m in middleware: if not is_generator(m): raise Exception('Middleware {} must be a Python generator callable.'.format(m)) self._middleware.extend(middleware)
[ "def", "register_context_middleware", "(", "self", ",", "*", "middleware", ")", ":", "for", "m", "in", "middleware", ":", "if", "not", "is_generator", "(", "m", ")", ":", "raise", "Exception", "(", "'Middleware {} must be a Python generator callable.'", ".", "format", "(", "m", ")", ")", "self", ".", "_middleware", ".", "extend", "(", "middleware", ")" ]
:param middleware: Middleware in order of execution
[ ":", "param", "middleware", ":", "Middleware", "in", "order", "of", "execution" ]
python
test
jobovy/galpy
galpy/actionAngle/actionAngleStaeckel.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/actionAngle/actionAngleStaeckel.py#L941-L958
def FZStaeckel(u,v,pot,delta): #pragma: no cover because unused """ NAME: FZStaeckel PURPOSE: return the vertical force INPUT: u - confocal u v - confocal v pot - potential delta - focus OUTPUT: FZ(u,v) HISTORY: 2012-11-30 - Written - Bovy (IAS) """ R,z= bovy_coords.uv_to_Rz(u,v,delta=delta) return _evaluatezforces(pot,R,z)
[ "def", "FZStaeckel", "(", "u", ",", "v", ",", "pot", ",", "delta", ")", ":", "#pragma: no cover because unused", "R", ",", "z", "=", "bovy_coords", ".", "uv_to_Rz", "(", "u", ",", "v", ",", "delta", "=", "delta", ")", "return", "_evaluatezforces", "(", "pot", ",", "R", ",", "z", ")" ]
NAME: FZStaeckel PURPOSE: return the vertical force INPUT: u - confocal u v - confocal v pot - potential delta - focus OUTPUT: FZ(u,v) HISTORY: 2012-11-30 - Written - Bovy (IAS)
[ "NAME", ":", "FZStaeckel", "PURPOSE", ":", "return", "the", "vertical", "force", "INPUT", ":", "u", "-", "confocal", "u", "v", "-", "confocal", "v", "pot", "-", "potential", "delta", "-", "focus", "OUTPUT", ":", "FZ", "(", "u", "v", ")", "HISTORY", ":", "2012", "-", "11", "-", "30", "-", "Written", "-", "Bovy", "(", "IAS", ")" ]
python
train
bububa/pyTOP
pyTOP/packages/requests/packages/urllib3/connectionpool.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/packages/requests/packages/urllib3/connectionpool.py#L424-L473
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, basic_auth=None): """ Shortcuts for generating request headers. :param keep_alive: If ``True``, adds 'connection: keep-alive' header. :param accept_encoding: Can be a boolean, list, or string. ``True`` translates to 'gzip,deflate'. List will get joined by comma. String will be used as provided. :param user_agent: String representing the user-agent you want, such as "python-urllib3/0.6" :param basic_auth: Colon-separated username:password string for 'authorization: basic ...' auth header. Example: :: >>> make_headers(keep_alive=True, user_agent="Batman/1.0") {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} >>> make_headers(accept_encoding=True) {'accept-encoding': 'gzip,deflate'} """ headers = {} if accept_encoding: if isinstance(accept_encoding, str): pass elif isinstance(accept_encoding, list): accept_encoding = ','.join(accept_encoding) else: accept_encoding = 'gzip,deflate' headers['accept-encoding'] = accept_encoding if user_agent: headers['user-agent'] = user_agent if keep_alive: headers['connection'] = 'keep-alive' if basic_auth: headers['authorization'] = 'Basic ' + \ basic_auth.encode('base64').strip() return headers
[ "def", "make_headers", "(", "keep_alive", "=", "None", ",", "accept_encoding", "=", "None", ",", "user_agent", "=", "None", ",", "basic_auth", "=", "None", ")", ":", "headers", "=", "{", "}", "if", "accept_encoding", ":", "if", "isinstance", "(", "accept_encoding", ",", "str", ")", ":", "pass", "elif", "isinstance", "(", "accept_encoding", ",", "list", ")", ":", "accept_encoding", "=", "','", ".", "join", "(", "accept_encoding", ")", "else", ":", "accept_encoding", "=", "'gzip,deflate'", "headers", "[", "'accept-encoding'", "]", "=", "accept_encoding", "if", "user_agent", ":", "headers", "[", "'user-agent'", "]", "=", "user_agent", "if", "keep_alive", ":", "headers", "[", "'connection'", "]", "=", "'keep-alive'", "if", "basic_auth", ":", "headers", "[", "'authorization'", "]", "=", "'Basic '", "+", "basic_auth", ".", "encode", "(", "'base64'", ")", ".", "strip", "(", ")", "return", "headers" ]
Shortcuts for generating request headers. :param keep_alive: If ``True``, adds 'connection: keep-alive' header. :param accept_encoding: Can be a boolean, list, or string. ``True`` translates to 'gzip,deflate'. List will get joined by comma. String will be used as provided. :param user_agent: String representing the user-agent you want, such as "python-urllib3/0.6" :param basic_auth: Colon-separated username:password string for 'authorization: basic ...' auth header. Example: :: >>> make_headers(keep_alive=True, user_agent="Batman/1.0") {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} >>> make_headers(accept_encoding=True) {'accept-encoding': 'gzip,deflate'}
[ "Shortcuts", "for", "generating", "request", "headers", "." ]
python
train
cloud-custodian/cloud-custodian
c7n/utils.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/utils.py#L325-L341
def generate_arn( service, resource, partition='aws', region=None, account_id=None, resource_type=None, separator='/'): """Generate an Amazon Resource Name. See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html. """ if region and region in REGION_PARTITION_MAP: partition = REGION_PARTITION_MAP[region] if service == 's3': region = '' arn = 'arn:%s:%s:%s:%s:' % ( partition, service, region if region else '', account_id if account_id else '') if resource_type: arn = arn + '%s%s%s' % (resource_type, separator, resource) else: arn = arn + resource return arn
[ "def", "generate_arn", "(", "service", ",", "resource", ",", "partition", "=", "'aws'", ",", "region", "=", "None", ",", "account_id", "=", "None", ",", "resource_type", "=", "None", ",", "separator", "=", "'/'", ")", ":", "if", "region", "and", "region", "in", "REGION_PARTITION_MAP", ":", "partition", "=", "REGION_PARTITION_MAP", "[", "region", "]", "if", "service", "==", "'s3'", ":", "region", "=", "''", "arn", "=", "'arn:%s:%s:%s:%s:'", "%", "(", "partition", ",", "service", ",", "region", "if", "region", "else", "''", ",", "account_id", "if", "account_id", "else", "''", ")", "if", "resource_type", ":", "arn", "=", "arn", "+", "'%s%s%s'", "%", "(", "resource_type", ",", "separator", ",", "resource", ")", "else", ":", "arn", "=", "arn", "+", "resource", "return", "arn" ]
Generate an Amazon Resource Name. See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html.
[ "Generate", "an", "Amazon", "Resource", "Name", ".", "See", "http", ":", "//", "docs", ".", "aws", ".", "amazon", ".", "com", "/", "general", "/", "latest", "/", "gr", "/", "aws", "-", "arns", "-", "and", "-", "namespaces", ".", "html", "." ]
python
train
pypa/pipenv
pipenv/patched/notpip/_vendor/retrying.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/retrying.py#L237-L249
def get(self, wrap_exception=False): """ Return the return value of this Attempt instance or raise an Exception. If wrap_exception is true, this Attempt is wrapped inside of a RetryError before being raised. """ if self.has_exception: if wrap_exception: raise RetryError(self) else: six.reraise(self.value[0], self.value[1], self.value[2]) else: return self.value
[ "def", "get", "(", "self", ",", "wrap_exception", "=", "False", ")", ":", "if", "self", ".", "has_exception", ":", "if", "wrap_exception", ":", "raise", "RetryError", "(", "self", ")", "else", ":", "six", ".", "reraise", "(", "self", ".", "value", "[", "0", "]", ",", "self", ".", "value", "[", "1", "]", ",", "self", ".", "value", "[", "2", "]", ")", "else", ":", "return", "self", ".", "value" ]
Return the return value of this Attempt instance or raise an Exception. If wrap_exception is true, this Attempt is wrapped inside of a RetryError before being raised.
[ "Return", "the", "return", "value", "of", "this", "Attempt", "instance", "or", "raise", "an", "Exception", ".", "If", "wrap_exception", "is", "true", "this", "Attempt", "is", "wrapped", "inside", "of", "a", "RetryError", "before", "being", "raised", "." ]
python
train
Erotemic/utool
utool/util_list.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L3366-L3395
def aslist(sequence): r""" Ensures that the sequence object is a Python list. Handles, numpy arrays, and python sequences (e.g. tuples, and iterables). Args: sequence (sequence): a list-like object Returns: list: list_ - `sequence` as a Python list Example: >>> # DISABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> s1 = [1, 2, 3] >>> s2 = (1, 2, 3) >>> assert aslist(s1) is s1 >>> assert aslist(s2) is not s2 >>> aslist(np.array([[1, 2], [3, 4], [5, 6]])) [[1, 2], [3, 4], [5, 6]] >>> aslist(range(3)) [0, 1, 2] """ if isinstance(sequence, list): return sequence elif util_type.HAVE_NUMPY and isinstance(sequence, np.ndarray): list_ = sequence.tolist() else: list_ = list(sequence) return list_
[ "def", "aslist", "(", "sequence", ")", ":", "if", "isinstance", "(", "sequence", ",", "list", ")", ":", "return", "sequence", "elif", "util_type", ".", "HAVE_NUMPY", "and", "isinstance", "(", "sequence", ",", "np", ".", "ndarray", ")", ":", "list_", "=", "sequence", ".", "tolist", "(", ")", "else", ":", "list_", "=", "list", "(", "sequence", ")", "return", "list_" ]
r""" Ensures that the sequence object is a Python list. Handles, numpy arrays, and python sequences (e.g. tuples, and iterables). Args: sequence (sequence): a list-like object Returns: list: list_ - `sequence` as a Python list Example: >>> # DISABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> s1 = [1, 2, 3] >>> s2 = (1, 2, 3) >>> assert aslist(s1) is s1 >>> assert aslist(s2) is not s2 >>> aslist(np.array([[1, 2], [3, 4], [5, 6]])) [[1, 2], [3, 4], [5, 6]] >>> aslist(range(3)) [0, 1, 2]
[ "r", "Ensures", "that", "the", "sequence", "object", "is", "a", "Python", "list", ".", "Handles", "numpy", "arrays", "and", "python", "sequences", "(", "e", ".", "g", ".", "tuples", "and", "iterables", ")", "." ]
python
train
benley/butcher
butcher/main.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L75-L89
def setup_function(self): """Runs prior to the global main function.""" log.options.LogOptions.set_stderr_log_level('google:INFO') if app.get_options().debug: log.options.LogOptions.set_stderr_log_level('google:DEBUG') if not app.get_options().build_root: app.set_option('build_root', os.path.join( app.get_options().butcher_basedir, 'build')) self.buildroot = app.get_options().build_root if not os.path.exists(self.buildroot): os.makedirs(self.buildroot) if app.get_options().disable_cache_fetch: self.options['cache_fetch'] = False if app.get_options().disable_hardlinks: base.BaseBuilder.linkfiles = False
[ "def", "setup_function", "(", "self", ")", ":", "log", ".", "options", ".", "LogOptions", ".", "set_stderr_log_level", "(", "'google:INFO'", ")", "if", "app", ".", "get_options", "(", ")", ".", "debug", ":", "log", ".", "options", ".", "LogOptions", ".", "set_stderr_log_level", "(", "'google:DEBUG'", ")", "if", "not", "app", ".", "get_options", "(", ")", ".", "build_root", ":", "app", ".", "set_option", "(", "'build_root'", ",", "os", ".", "path", ".", "join", "(", "app", ".", "get_options", "(", ")", ".", "butcher_basedir", ",", "'build'", ")", ")", "self", ".", "buildroot", "=", "app", ".", "get_options", "(", ")", ".", "build_root", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "buildroot", ")", ":", "os", ".", "makedirs", "(", "self", ".", "buildroot", ")", "if", "app", ".", "get_options", "(", ")", ".", "disable_cache_fetch", ":", "self", ".", "options", "[", "'cache_fetch'", "]", "=", "False", "if", "app", ".", "get_options", "(", ")", ".", "disable_hardlinks", ":", "base", ".", "BaseBuilder", ".", "linkfiles", "=", "False" ]
Runs prior to the global main function.
[ "Runs", "prior", "to", "the", "global", "main", "function", "." ]
python
train
ui/django-post_office
post_office/mail.py
https://github.com/ui/django-post_office/blob/03e1ffb69829b475402f0f3ecd9f8a90af7da4bd/post_office/mail.py#L157-L166
def send_many(kwargs_list): """ Similar to mail.send(), but this function accepts a list of kwargs. Internally, it uses Django's bulk_create command for efficiency reasons. Currently send_many() can't be used to send emails with priority = 'now'. """ emails = [] for kwargs in kwargs_list: emails.append(send(commit=False, **kwargs)) Email.objects.bulk_create(emails)
[ "def", "send_many", "(", "kwargs_list", ")", ":", "emails", "=", "[", "]", "for", "kwargs", "in", "kwargs_list", ":", "emails", ".", "append", "(", "send", "(", "commit", "=", "False", ",", "*", "*", "kwargs", ")", ")", "Email", ".", "objects", ".", "bulk_create", "(", "emails", ")" ]
Similar to mail.send(), but this function accepts a list of kwargs. Internally, it uses Django's bulk_create command for efficiency reasons. Currently send_many() can't be used to send emails with priority = 'now'.
[ "Similar", "to", "mail", ".", "send", "()", "but", "this", "function", "accepts", "a", "list", "of", "kwargs", ".", "Internally", "it", "uses", "Django", "s", "bulk_create", "command", "for", "efficiency", "reasons", ".", "Currently", "send_many", "()", "can", "t", "be", "used", "to", "send", "emails", "with", "priority", "=", "now", "." ]
python
train
improbable-research/keanu
keanu-python/keanu/algorithm/sampling.py
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/algorithm/sampling.py#L130-L185
def sample(net: BayesNet, sample_from: Iterable[Vertex], sampling_algorithm: PosteriorSamplingAlgorithm = None, draws: int = 500, drop: int = 0, down_sample_interval: int = 1, plot: bool = False, ax: Any = None) -> sample_types: """ :param net: Bayesian Network containing latent variables. :param sample_from: Vertices to include in the returned samples. :param sampling_algorithm: The posterior sampling algorithm to use. Options are :class:`keanu.algorithm.MetropolisHastingsSampler`, :class:`keanu.algorithm.NUTSSampler` and :class:`keanu.algorithm.ForwardSampler` If not set, :class:`keanu.algorithm.MetropolisHastingsSampler` is chosen with 'prior' as its proposal distribution. :param draws: The number of samples to take. :param drop: The number of samples to drop before collecting anything. If this is zero then no samples will be dropped before collecting. :param down_sample_interval: Collect 1 sample for every `down_sample_interval`. If this is 1 then there will be no down-sampling. If this is 2 then every other sample will be taken. If this is 3 then 2 samples will be dropped before one is taken. And so on. :param plot: Flag for plotting the trace after sampling. Call `matplotlib.pyplot.show <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.show.html>`_ to display the plot. :param Axes ax: `matplotlib.axes.Axes <https://matplotlib.org/api/axes_api.html>`_. If not set, a new one is created. :raises ValueError: If `sample_from` contains vertices without labels. :return: Dictionary of samples at an index (tuple) for each vertex label (str). If all the vertices in `sample_from` are scalar, the dictionary is only keyed by label. """ sample_from = list(sample_from) id_to_label = __check_if_vertices_are_labelled(sample_from) if sampling_algorithm is None: sampling_algorithm = MetropolisHastingsSampler(proposal_distribution="prior", latents=sample_from) vertices_unwrapped: JavaList = k.to_java_object_list(sample_from) probabilistic_model = ProbabilisticModel(net) if ( isinstance(sampling_algorithm, MetropolisHastingsSampler) or isinstance(sampling_algorithm, ForwardSampler)) else ProbabilisticModelWithGradient(net) network_samples: JavaObject = sampling_algorithm.get_sampler().getPosteriorSamples( probabilistic_model.unwrap(), vertices_unwrapped, draws).drop(drop).downSample(down_sample_interval) if __all_scalar(sample_from): vertex_samples = __create_single_indexed_samples(network_samples, vertices_unwrapped, id_to_label) else: vertex_samples = __create_multi_indexed_samples(vertices_unwrapped, network_samples, id_to_label) if plot: traceplot(vertex_samples, ax=ax) return vertex_samples
[ "def", "sample", "(", "net", ":", "BayesNet", ",", "sample_from", ":", "Iterable", "[", "Vertex", "]", ",", "sampling_algorithm", ":", "PosteriorSamplingAlgorithm", "=", "None", ",", "draws", ":", "int", "=", "500", ",", "drop", ":", "int", "=", "0", ",", "down_sample_interval", ":", "int", "=", "1", ",", "plot", ":", "bool", "=", "False", ",", "ax", ":", "Any", "=", "None", ")", "->", "sample_types", ":", "sample_from", "=", "list", "(", "sample_from", ")", "id_to_label", "=", "__check_if_vertices_are_labelled", "(", "sample_from", ")", "if", "sampling_algorithm", "is", "None", ":", "sampling_algorithm", "=", "MetropolisHastingsSampler", "(", "proposal_distribution", "=", "\"prior\"", ",", "latents", "=", "sample_from", ")", "vertices_unwrapped", ":", "JavaList", "=", "k", ".", "to_java_object_list", "(", "sample_from", ")", "probabilistic_model", "=", "ProbabilisticModel", "(", "net", ")", "if", "(", "isinstance", "(", "sampling_algorithm", ",", "MetropolisHastingsSampler", ")", "or", "isinstance", "(", "sampling_algorithm", ",", "ForwardSampler", ")", ")", "else", "ProbabilisticModelWithGradient", "(", "net", ")", "network_samples", ":", "JavaObject", "=", "sampling_algorithm", ".", "get_sampler", "(", ")", ".", "getPosteriorSamples", "(", "probabilistic_model", ".", "unwrap", "(", ")", ",", "vertices_unwrapped", ",", "draws", ")", ".", "drop", "(", "drop", ")", ".", "downSample", "(", "down_sample_interval", ")", "if", "__all_scalar", "(", "sample_from", ")", ":", "vertex_samples", "=", "__create_single_indexed_samples", "(", "network_samples", ",", "vertices_unwrapped", ",", "id_to_label", ")", "else", ":", "vertex_samples", "=", "__create_multi_indexed_samples", "(", "vertices_unwrapped", ",", "network_samples", ",", "id_to_label", ")", "if", "plot", ":", "traceplot", "(", "vertex_samples", ",", "ax", "=", "ax", ")", "return", "vertex_samples" ]
:param net: Bayesian Network containing latent variables. :param sample_from: Vertices to include in the returned samples. :param sampling_algorithm: The posterior sampling algorithm to use. Options are :class:`keanu.algorithm.MetropolisHastingsSampler`, :class:`keanu.algorithm.NUTSSampler` and :class:`keanu.algorithm.ForwardSampler` If not set, :class:`keanu.algorithm.MetropolisHastingsSampler` is chosen with 'prior' as its proposal distribution. :param draws: The number of samples to take. :param drop: The number of samples to drop before collecting anything. If this is zero then no samples will be dropped before collecting. :param down_sample_interval: Collect 1 sample for every `down_sample_interval`. If this is 1 then there will be no down-sampling. If this is 2 then every other sample will be taken. If this is 3 then 2 samples will be dropped before one is taken. And so on. :param plot: Flag for plotting the trace after sampling. Call `matplotlib.pyplot.show <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.show.html>`_ to display the plot. :param Axes ax: `matplotlib.axes.Axes <https://matplotlib.org/api/axes_api.html>`_. If not set, a new one is created. :raises ValueError: If `sample_from` contains vertices without labels. :return: Dictionary of samples at an index (tuple) for each vertex label (str). If all the vertices in `sample_from` are scalar, the dictionary is only keyed by label.
[ ":", "param", "net", ":", "Bayesian", "Network", "containing", "latent", "variables", ".", ":", "param", "sample_from", ":", "Vertices", "to", "include", "in", "the", "returned", "samples", ".", ":", "param", "sampling_algorithm", ":", "The", "posterior", "sampling", "algorithm", "to", "use", ".", "Options", "are", ":", "class", ":", "keanu", ".", "algorithm", ".", "MetropolisHastingsSampler", ":", "class", ":", "keanu", ".", "algorithm", ".", "NUTSSampler", "and", ":", "class", ":", "keanu", ".", "algorithm", ".", "ForwardSampler", "If", "not", "set", ":", "class", ":", "keanu", ".", "algorithm", ".", "MetropolisHastingsSampler", "is", "chosen", "with", "prior", "as", "its", "proposal", "distribution", ".", ":", "param", "draws", ":", "The", "number", "of", "samples", "to", "take", ".", ":", "param", "drop", ":", "The", "number", "of", "samples", "to", "drop", "before", "collecting", "anything", ".", "If", "this", "is", "zero", "then", "no", "samples", "will", "be", "dropped", "before", "collecting", ".", ":", "param", "down_sample_interval", ":", "Collect", "1", "sample", "for", "every", "down_sample_interval", ".", "If", "this", "is", "1", "then", "there", "will", "be", "no", "down", "-", "sampling", ".", "If", "this", "is", "2", "then", "every", "other", "sample", "will", "be", "taken", ".", "If", "this", "is", "3", "then", "2", "samples", "will", "be", "dropped", "before", "one", "is", "taken", ".", "And", "so", "on", ".", ":", "param", "plot", ":", "Flag", "for", "plotting", "the", "trace", "after", "sampling", ".", "Call", "matplotlib", ".", "pyplot", ".", "show", "<https", ":", "//", "matplotlib", ".", "org", "/", "api", "/", "_as_gen", "/", "matplotlib", ".", "pyplot", ".", "show", ".", "html", ">", "_", "to", "display", "the", "plot", ".", ":", "param", "Axes", "ax", ":", "matplotlib", ".", "axes", ".", "Axes", "<https", ":", "//", "matplotlib", ".", "org", "/", "api", "/", "axes_api", ".", "html", ">", "_", ".", "If", "not", "set", "a", "new", "one", "is", "created", "." ]
python
train
proteanhq/protean
src/protean/core/queryset.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L102-L109
def offset(self, offset): """Fetch results after `offset` value""" clone = self._clone() if isinstance(offset, int): clone._offset = offset return clone
[ "def", "offset", "(", "self", ",", "offset", ")", ":", "clone", "=", "self", ".", "_clone", "(", ")", "if", "isinstance", "(", "offset", ",", "int", ")", ":", "clone", ".", "_offset", "=", "offset", "return", "clone" ]
Fetch results after `offset` value
[ "Fetch", "results", "after", "offset", "value" ]
python
train
donovan-duplessis/pwnurl
manage.py
https://github.com/donovan-duplessis/pwnurl/blob/a13e27694f738228d186ea437b4d15ef5a925a87/manage.py#L63-L68
def profile(length=25): """ Start the application under the code profiler """ from werkzeug.contrib.profiler import ProfilerMiddleware app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length]) app.run()
[ "def", "profile", "(", "length", "=", "25", ")", ":", "from", "werkzeug", ".", "contrib", ".", "profiler", "import", "ProfilerMiddleware", "app", ".", "wsgi_app", "=", "ProfilerMiddleware", "(", "app", ".", "wsgi_app", ",", "restrictions", "=", "[", "length", "]", ")", "app", ".", "run", "(", ")" ]
Start the application under the code profiler
[ "Start", "the", "application", "under", "the", "code", "profiler" ]
python
train
onnx/onnxmltools
onnxmltools/utils/utils_backend.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/utils/utils_backend.py#L151-L176
def extract_options(name): """ Extracts comparison option from filename. As example, ``Binarizer-SkipDim1`` means options *SkipDim1* is enabled. ``(1, 2)`` and ``(2,)`` are considered equal. Available options: * `'SkipDim1'`: reshape arrays by skipping 1-dimension: ``(1, 2)`` --> ``(2,)`` * `'OneOff'`: inputs comes in a list for the predictions are computed with a call for each of them, not with one call * ... See function *dump_data_and_model* to get the full list. """ opts = name.replace("\\", "/").split("/")[-1].split('.')[0].split('-') if len(opts) == 1: return {} else: res = {} for opt in opts[1:]: if opt in ("SkipDim1", "OneOff", "NoProb", "Dec4", "Dec3", 'Out0', 'Dec2', 'Reshape', 'Opp'): res[opt] = True else: raise NameError("Unable to parse option '{}'".format(opts[1:])) return res
[ "def", "extract_options", "(", "name", ")", ":", "opts", "=", "name", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", ".", "split", "(", "'-'", ")", "if", "len", "(", "opts", ")", "==", "1", ":", "return", "{", "}", "else", ":", "res", "=", "{", "}", "for", "opt", "in", "opts", "[", "1", ":", "]", ":", "if", "opt", "in", "(", "\"SkipDim1\"", ",", "\"OneOff\"", ",", "\"NoProb\"", ",", "\"Dec4\"", ",", "\"Dec3\"", ",", "'Out0'", ",", "'Dec2'", ",", "'Reshape'", ",", "'Opp'", ")", ":", "res", "[", "opt", "]", "=", "True", "else", ":", "raise", "NameError", "(", "\"Unable to parse option '{}'\"", ".", "format", "(", "opts", "[", "1", ":", "]", ")", ")", "return", "res" ]
Extracts comparison option from filename. As example, ``Binarizer-SkipDim1`` means options *SkipDim1* is enabled. ``(1, 2)`` and ``(2,)`` are considered equal. Available options: * `'SkipDim1'`: reshape arrays by skipping 1-dimension: ``(1, 2)`` --> ``(2,)`` * `'OneOff'`: inputs comes in a list for the predictions are computed with a call for each of them, not with one call * ... See function *dump_data_and_model* to get the full list.
[ "Extracts", "comparison", "option", "from", "filename", ".", "As", "example", "Binarizer", "-", "SkipDim1", "means", "options", "*", "SkipDim1", "*", "is", "enabled", ".", "(", "1", "2", ")", "and", "(", "2", ")", "are", "considered", "equal", ".", "Available", "options", ":" ]
python
train
iskandr/fancyimpute
fancyimpute/dictionary_helpers.py
https://github.com/iskandr/fancyimpute/blob/9f0837d387c7303d5c8c925a9989ca77a1a96e3e/fancyimpute/dictionary_helpers.py#L310-L330
def reverse_lookup_from_nested_dict(values_dict): """ Create reverse-lookup dictionary mapping each row key to a list of triplets: [(column key, value), ...] Parameters ---------- nested_values_dict : dict column_key -> row_key -> value weights_dict : dict column_key -> row_key -> sample weight Returns dictionary mapping row_key -> [(column key, value)] """ reverse_lookup = defaultdict(list) for column_key, column_dict in values_dict.items(): for row_key, value in column_dict.items(): entry = (column_key, value) reverse_lookup[row_key].append(entry) return reverse_lookup
[ "def", "reverse_lookup_from_nested_dict", "(", "values_dict", ")", ":", "reverse_lookup", "=", "defaultdict", "(", "list", ")", "for", "column_key", ",", "column_dict", "in", "values_dict", ".", "items", "(", ")", ":", "for", "row_key", ",", "value", "in", "column_dict", ".", "items", "(", ")", ":", "entry", "=", "(", "column_key", ",", "value", ")", "reverse_lookup", "[", "row_key", "]", ".", "append", "(", "entry", ")", "return", "reverse_lookup" ]
Create reverse-lookup dictionary mapping each row key to a list of triplets: [(column key, value), ...] Parameters ---------- nested_values_dict : dict column_key -> row_key -> value weights_dict : dict column_key -> row_key -> sample weight Returns dictionary mapping row_key -> [(column key, value)]
[ "Create", "reverse", "-", "lookup", "dictionary", "mapping", "each", "row", "key", "to", "a", "list", "of", "triplets", ":", "[", "(", "column", "key", "value", ")", "...", "]" ]
python
train
fossasia/knittingpattern
knittingpattern/convert/SVGBuilder.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/convert/SVGBuilder.py#L121-L127
def place_svg_use(self, symbol_id, layer_id, group=None): """Same as :meth:`place_svg_use_coords`. With implicit `x` and `y` which are set to `0` in this method and then :meth:`place_svg_use_coords` is called. """ self.place_svg_use_coords(0, 0, symbol_id, layer_id, group)
[ "def", "place_svg_use", "(", "self", ",", "symbol_id", ",", "layer_id", ",", "group", "=", "None", ")", ":", "self", ".", "place_svg_use_coords", "(", "0", ",", "0", ",", "symbol_id", ",", "layer_id", ",", "group", ")" ]
Same as :meth:`place_svg_use_coords`. With implicit `x` and `y` which are set to `0` in this method and then :meth:`place_svg_use_coords` is called.
[ "Same", "as", ":", "meth", ":", "place_svg_use_coords", "." ]
python
valid
saltstack/salt
salt/client/ssh/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/__init__.py#L979-L1003
def run(self, deploy_attempted=False): ''' Execute the routine, the routine can be either: 1. Execute a raw shell command 2. Execute a wrapper func 3. Execute a remote Salt command If a (re)deploy is needed, then retry the operation after a deploy attempt Returns tuple of (stdout, stderr, retcode) ''' stdout = stderr = retcode = None if self.opts.get('raw_shell', False): cmd_str = ' '.join([self._escape_arg(arg) for arg in self.argv]) stdout, stderr, retcode = self.shell.exec_cmd(cmd_str) elif self.fun in self.wfuncs or self.mine: stdout, retcode = self.run_wfunc() else: stdout, stderr, retcode = self.cmd_block() return stdout, stderr, retcode
[ "def", "run", "(", "self", ",", "deploy_attempted", "=", "False", ")", ":", "stdout", "=", "stderr", "=", "retcode", "=", "None", "if", "self", ".", "opts", ".", "get", "(", "'raw_shell'", ",", "False", ")", ":", "cmd_str", "=", "' '", ".", "join", "(", "[", "self", ".", "_escape_arg", "(", "arg", ")", "for", "arg", "in", "self", ".", "argv", "]", ")", "stdout", ",", "stderr", ",", "retcode", "=", "self", ".", "shell", ".", "exec_cmd", "(", "cmd_str", ")", "elif", "self", ".", "fun", "in", "self", ".", "wfuncs", "or", "self", ".", "mine", ":", "stdout", ",", "retcode", "=", "self", ".", "run_wfunc", "(", ")", "else", ":", "stdout", ",", "stderr", ",", "retcode", "=", "self", ".", "cmd_block", "(", ")", "return", "stdout", ",", "stderr", ",", "retcode" ]
Execute the routine, the routine can be either: 1. Execute a raw shell command 2. Execute a wrapper func 3. Execute a remote Salt command If a (re)deploy is needed, then retry the operation after a deploy attempt Returns tuple of (stdout, stderr, retcode)
[ "Execute", "the", "routine", "the", "routine", "can", "be", "either", ":", "1", ".", "Execute", "a", "raw", "shell", "command", "2", ".", "Execute", "a", "wrapper", "func", "3", ".", "Execute", "a", "remote", "Salt", "command" ]
python
train
saltstack/salt
salt/utils/cache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/cache.py#L300-L315
def get(self, pattern): ''' Get a compiled regular expression object based on pattern and cache it when it is not in the cache already ''' try: self.cache[pattern][0] += 1 return self.cache[pattern][1] except KeyError: pass if len(self.cache) > self.size: self.sweep() regex = re.compile('{0}{1}{2}'.format( self.prepend, pattern, self.append)) self.cache[pattern] = [1, regex, pattern, time.time()] return regex
[ "def", "get", "(", "self", ",", "pattern", ")", ":", "try", ":", "self", ".", "cache", "[", "pattern", "]", "[", "0", "]", "+=", "1", "return", "self", ".", "cache", "[", "pattern", "]", "[", "1", "]", "except", "KeyError", ":", "pass", "if", "len", "(", "self", ".", "cache", ")", ">", "self", ".", "size", ":", "self", ".", "sweep", "(", ")", "regex", "=", "re", ".", "compile", "(", "'{0}{1}{2}'", ".", "format", "(", "self", ".", "prepend", ",", "pattern", ",", "self", ".", "append", ")", ")", "self", ".", "cache", "[", "pattern", "]", "=", "[", "1", ",", "regex", ",", "pattern", ",", "time", ".", "time", "(", ")", "]", "return", "regex" ]
Get a compiled regular expression object based on pattern and cache it when it is not in the cache already
[ "Get", "a", "compiled", "regular", "expression", "object", "based", "on", "pattern", "and", "cache", "it", "when", "it", "is", "not", "in", "the", "cache", "already" ]
python
train
quantifiedcode/checkmate
checkmate/lib/code/environment.py
https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/lib/code/environment.py#L691-L774
def save_file_revisions(self,snapshot,file_revisions): """ We convert various items in the file revision to documents, so that we can easily search and retrieve them... """ annotations = defaultdict(list) for file_revision in file_revisions: issues_results = {} for analyzer_name,results in file_revision.results.items(): if 'issues' in results: issues_results[analyzer_name] = results['issues'] del results['issues'] if len(issues_results) > 1000: issues_results[analyzer_name] = [{ 'code' : 'TooManyIssues', 'analyzer' : analyzer_name, }] with self.project.backend.transaction(): self.project.backend.save(file_revision) def location_sorter(issue): if issue['location'] and issue['location'][0] and issue['location'][0][0]: return issue['location'][0][0][0] return 0 with self.project.backend.transaction(): for analyzer_name,issues in issues_results.items(): grouped_issues = group_issues_by_fingerprint(issues) for issue_dict in grouped_issues: hasher = Hasher() hasher.add(analyzer_name) hasher.add(issue_dict['code']) hasher.add(issue_dict['fingerprint']) issue_dict['hash'] = hasher.digest.hexdigest() try: #we check if the issue already exists issue = self.project.backend.get(Issue,{'hash' : issue_dict['hash'], 'project' : self.project }) except Issue.DoesNotExist: #if not, we create it d = issue_dict.copy() d['analyzer'] = analyzer_name if 'location' in d: del d['location'] if 'occurrences' in d: del d['occurrences'] issue = Issue(d) issue.project = self.project self.project.backend.save(issue) for occurrence in issue_dict['occurrences']: hasher = Hasher() hasher.add(file_revision.hash) hasher.add(issue.hash) hasher.add(occurrence.get('from_row')) hasher.add(occurrence.get('from_column')) hasher.add(occurrence.get('to_row')) hasher.add(occurrence.get('to_column')) hasher.add(occurrence.get('sequence')) occurrence['hash'] = hasher.digest.hexdigest() try: #we check if the occurrence already exists occurrence = self.project.backend.get(IssueOccurrence,{'hash' : occurrence['hash'], 'issue' : issue }) except IssueOccurrence.DoesNotExist: #if not, we create it occurrence = IssueOccurrence(occurrence) occurrence.issue = issue occurrence.file_revision = file_revision self.project.backend.save(occurrence) annotations['occurrences'].append(occurrence) annotations['issues'].append(issue) return annotations
[ "def", "save_file_revisions", "(", "self", ",", "snapshot", ",", "file_revisions", ")", ":", "annotations", "=", "defaultdict", "(", "list", ")", "for", "file_revision", "in", "file_revisions", ":", "issues_results", "=", "{", "}", "for", "analyzer_name", ",", "results", "in", "file_revision", ".", "results", ".", "items", "(", ")", ":", "if", "'issues'", "in", "results", ":", "issues_results", "[", "analyzer_name", "]", "=", "results", "[", "'issues'", "]", "del", "results", "[", "'issues'", "]", "if", "len", "(", "issues_results", ")", ">", "1000", ":", "issues_results", "[", "analyzer_name", "]", "=", "[", "{", "'code'", ":", "'TooManyIssues'", ",", "'analyzer'", ":", "analyzer_name", ",", "}", "]", "with", "self", ".", "project", ".", "backend", ".", "transaction", "(", ")", ":", "self", ".", "project", ".", "backend", ".", "save", "(", "file_revision", ")", "def", "location_sorter", "(", "issue", ")", ":", "if", "issue", "[", "'location'", "]", "and", "issue", "[", "'location'", "]", "[", "0", "]", "and", "issue", "[", "'location'", "]", "[", "0", "]", "[", "0", "]", ":", "return", "issue", "[", "'location'", "]", "[", "0", "]", "[", "0", "]", "[", "0", "]", "return", "0", "with", "self", ".", "project", ".", "backend", ".", "transaction", "(", ")", ":", "for", "analyzer_name", ",", "issues", "in", "issues_results", ".", "items", "(", ")", ":", "grouped_issues", "=", "group_issues_by_fingerprint", "(", "issues", ")", "for", "issue_dict", "in", "grouped_issues", ":", "hasher", "=", "Hasher", "(", ")", "hasher", ".", "add", "(", "analyzer_name", ")", "hasher", ".", "add", "(", "issue_dict", "[", "'code'", "]", ")", "hasher", ".", "add", "(", "issue_dict", "[", "'fingerprint'", "]", ")", "issue_dict", "[", "'hash'", "]", "=", "hasher", ".", "digest", ".", "hexdigest", "(", ")", "try", ":", "#we check if the issue already exists", "issue", "=", "self", ".", "project", ".", "backend", ".", "get", "(", "Issue", ",", "{", "'hash'", ":", "issue_dict", "[", "'hash'", "]", ",", "'project'", ":", "self", ".", "project", "}", ")", "except", "Issue", ".", "DoesNotExist", ":", "#if not, we create it", "d", "=", "issue_dict", ".", "copy", "(", ")", "d", "[", "'analyzer'", "]", "=", "analyzer_name", "if", "'location'", "in", "d", ":", "del", "d", "[", "'location'", "]", "if", "'occurrences'", "in", "d", ":", "del", "d", "[", "'occurrences'", "]", "issue", "=", "Issue", "(", "d", ")", "issue", ".", "project", "=", "self", ".", "project", "self", ".", "project", ".", "backend", ".", "save", "(", "issue", ")", "for", "occurrence", "in", "issue_dict", "[", "'occurrences'", "]", ":", "hasher", "=", "Hasher", "(", ")", "hasher", ".", "add", "(", "file_revision", ".", "hash", ")", "hasher", ".", "add", "(", "issue", ".", "hash", ")", "hasher", ".", "add", "(", "occurrence", ".", "get", "(", "'from_row'", ")", ")", "hasher", ".", "add", "(", "occurrence", ".", "get", "(", "'from_column'", ")", ")", "hasher", ".", "add", "(", "occurrence", ".", "get", "(", "'to_row'", ")", ")", "hasher", ".", "add", "(", "occurrence", ".", "get", "(", "'to_column'", ")", ")", "hasher", ".", "add", "(", "occurrence", ".", "get", "(", "'sequence'", ")", ")", "occurrence", "[", "'hash'", "]", "=", "hasher", ".", "digest", ".", "hexdigest", "(", ")", "try", ":", "#we check if the occurrence already exists", "occurrence", "=", "self", ".", "project", ".", "backend", ".", "get", "(", "IssueOccurrence", ",", "{", "'hash'", ":", "occurrence", "[", "'hash'", "]", ",", "'issue'", ":", "issue", "}", ")", "except", "IssueOccurrence", ".", "DoesNotExist", ":", "#if not, we create it", "occurrence", "=", "IssueOccurrence", "(", "occurrence", ")", "occurrence", ".", "issue", "=", "issue", "occurrence", ".", "file_revision", "=", "file_revision", "self", ".", "project", ".", "backend", ".", "save", "(", "occurrence", ")", "annotations", "[", "'occurrences'", "]", ".", "append", "(", "occurrence", ")", "annotations", "[", "'issues'", "]", ".", "append", "(", "issue", ")", "return", "annotations" ]
We convert various items in the file revision to documents, so that we can easily search and retrieve them...
[ "We", "convert", "various", "items", "in", "the", "file", "revision", "to", "documents", "so", "that", "we", "can", "easily", "search", "and", "retrieve", "them", "..." ]
python
train
genepattern/genepattern-python
gp/modules.py
https://github.com/genepattern/genepattern-python/blob/9478ea65362b91c72a94f7300c3de8d710bebb71/gp/modules.py#L304-L312
def _all_params(arr): """ Ensures that the argument is a list that either is empty or contains only GPParamSpec's :param arr: list :return: """ if not isinstance([], list): raise TypeError("non-list value found for parameters") return all(isinstance(x, GPParamSpec) for x in arr)
[ "def", "_all_params", "(", "arr", ")", ":", "if", "not", "isinstance", "(", "[", "]", ",", "list", ")", ":", "raise", "TypeError", "(", "\"non-list value found for parameters\"", ")", "return", "all", "(", "isinstance", "(", "x", ",", "GPParamSpec", ")", "for", "x", "in", "arr", ")" ]
Ensures that the argument is a list that either is empty or contains only GPParamSpec's :param arr: list :return:
[ "Ensures", "that", "the", "argument", "is", "a", "list", "that", "either", "is", "empty", "or", "contains", "only", "GPParamSpec", "s", ":", "param", "arr", ":", "list", ":", "return", ":" ]
python
train
buriburisuri/sugartensor
sugartensor/sg_train.py
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_train.py#L83-L121
def sg_print(tensor_list): r"""Simple tensor printing function for debugging. Prints the value, shape, and data type of each tensor in the list. Args: tensor_list: A list/tuple of tensors or a single tensor. Returns: The value of the tensors. For example, ```python import sugartensor as tf a = tf.constant([1.]) b = tf.constant([2.]) out = tf.sg_print([a, b]) # Should print [ 1.] (1,) float32 # [ 2.] (1,) float32 print(out) # Should print [array([ 1.], dtype=float32), array([ 2.], dtype=float32)] ``` """ # to list if type(tensor_list) is not list and type(tensor_list) is not tuple: tensor_list = [tensor_list] # evaluate tensor list with queue runner with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: sg_init(sess) with tf.sg_queue_context(): res = sess.run(tensor_list) for r in res: print(r, r.shape, r.dtype) if len(res) == 1: return res[0] else: return res
[ "def", "sg_print", "(", "tensor_list", ")", ":", "# to list", "if", "type", "(", "tensor_list", ")", "is", "not", "list", "and", "type", "(", "tensor_list", ")", "is", "not", "tuple", ":", "tensor_list", "=", "[", "tensor_list", "]", "# evaluate tensor list with queue runner", "with", "tf", ".", "Session", "(", "config", "=", "tf", ".", "ConfigProto", "(", "allow_soft_placement", "=", "True", ")", ")", "as", "sess", ":", "sg_init", "(", "sess", ")", "with", "tf", ".", "sg_queue_context", "(", ")", ":", "res", "=", "sess", ".", "run", "(", "tensor_list", ")", "for", "r", "in", "res", ":", "print", "(", "r", ",", "r", ".", "shape", ",", "r", ".", "dtype", ")", "if", "len", "(", "res", ")", "==", "1", ":", "return", "res", "[", "0", "]", "else", ":", "return", "res" ]
r"""Simple tensor printing function for debugging. Prints the value, shape, and data type of each tensor in the list. Args: tensor_list: A list/tuple of tensors or a single tensor. Returns: The value of the tensors. For example, ```python import sugartensor as tf a = tf.constant([1.]) b = tf.constant([2.]) out = tf.sg_print([a, b]) # Should print [ 1.] (1,) float32 # [ 2.] (1,) float32 print(out) # Should print [array([ 1.], dtype=float32), array([ 2.], dtype=float32)] ```
[ "r", "Simple", "tensor", "printing", "function", "for", "debugging", ".", "Prints", "the", "value", "shape", "and", "data", "type", "of", "each", "tensor", "in", "the", "list", ".", "Args", ":", "tensor_list", ":", "A", "list", "/", "tuple", "of", "tensors", "or", "a", "single", "tensor", ".", "Returns", ":", "The", "value", "of", "the", "tensors", ".", "For", "example", "python", "import", "sugartensor", "as", "tf", "a", "=", "tf", ".", "constant", "(", "[", "1", ".", "]", ")", "b", "=", "tf", ".", "constant", "(", "[", "2", ".", "]", ")", "out", "=", "tf", ".", "sg_print", "(", "[", "a", "b", "]", ")", "#", "Should", "print", "[", "1", ".", "]", "(", "1", ")", "float32", "#", "[", "2", ".", "]", "(", "1", ")", "float32", "print", "(", "out", ")", "#", "Should", "print", "[", "array", "(", "[", "1", ".", "]", "dtype", "=", "float32", ")", "array", "(", "[", "2", ".", "]", "dtype", "=", "float32", ")", "]" ]
python
train
pjuren/pyokit
src/pyokit/scripts/conservationProfile.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/conservationProfile.py#L84-L93
def center_start(r, window_size): """ Center a region on its start and expand it to window_size bases. :return: the new region. """ res = copy.copy(r) res.end = res.start + window_size / 2 res.start = res.end - window_size return res
[ "def", "center_start", "(", "r", ",", "window_size", ")", ":", "res", "=", "copy", ".", "copy", "(", "r", ")", "res", ".", "end", "=", "res", ".", "start", "+", "window_size", "/", "2", "res", ".", "start", "=", "res", ".", "end", "-", "window_size", "return", "res" ]
Center a region on its start and expand it to window_size bases. :return: the new region.
[ "Center", "a", "region", "on", "its", "start", "and", "expand", "it", "to", "window_size", "bases", "." ]
python
train
Atomistica/atomistica
src/python/atomistica/atomic_strain.py
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/atomic_strain.py#L89-L101
def get_delta_plus_epsilon(nat, i_now, dr_now, dr_old): """ Calculate delta_ij+epsilon_ij, i.e. the deformation gradient matrix """ XIJ = get_XIJ(nat, i_now, dr_now, dr_old) YIJ = get_YIJ(nat, i_now, dr_old) YIJ_invert = array_inverse(YIJ) # Perform sum_k X_ik Y_jk^-1 epsilon = np.sum(XIJ.reshape(-1,3,1,3)*YIJ_invert.reshape(-1,1,3,3), axis=3) return epsilon
[ "def", "get_delta_plus_epsilon", "(", "nat", ",", "i_now", ",", "dr_now", ",", "dr_old", ")", ":", "XIJ", "=", "get_XIJ", "(", "nat", ",", "i_now", ",", "dr_now", ",", "dr_old", ")", "YIJ", "=", "get_YIJ", "(", "nat", ",", "i_now", ",", "dr_old", ")", "YIJ_invert", "=", "array_inverse", "(", "YIJ", ")", "# Perform sum_k X_ik Y_jk^-1", "epsilon", "=", "np", ".", "sum", "(", "XIJ", ".", "reshape", "(", "-", "1", ",", "3", ",", "1", ",", "3", ")", "*", "YIJ_invert", ".", "reshape", "(", "-", "1", ",", "1", ",", "3", ",", "3", ")", ",", "axis", "=", "3", ")", "return", "epsilon" ]
Calculate delta_ij+epsilon_ij, i.e. the deformation gradient matrix
[ "Calculate", "delta_ij", "+", "epsilon_ij", "i", ".", "e", ".", "the", "deformation", "gradient", "matrix" ]
python
train
apache/incubator-superset
superset/views/core.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2866-L2881
def profile(self, username): """User profile page""" if not username and g.user: username = g.user.username payload = { 'user': bootstrap_user_data(username, include_perms=True), 'common': self.common_bootsrap_payload(), } return self.render_template( 'superset/basic.html', title=_("%(user)s's profile", user=username), entry='profile', bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser), )
[ "def", "profile", "(", "self", ",", "username", ")", ":", "if", "not", "username", "and", "g", ".", "user", ":", "username", "=", "g", ".", "user", ".", "username", "payload", "=", "{", "'user'", ":", "bootstrap_user_data", "(", "username", ",", "include_perms", "=", "True", ")", ",", "'common'", ":", "self", ".", "common_bootsrap_payload", "(", ")", ",", "}", "return", "self", ".", "render_template", "(", "'superset/basic.html'", ",", "title", "=", "_", "(", "\"%(user)s's profile\"", ",", "user", "=", "username", ")", ",", "entry", "=", "'profile'", ",", "bootstrap_data", "=", "json", ".", "dumps", "(", "payload", ",", "default", "=", "utils", ".", "json_iso_dttm_ser", ")", ",", ")" ]
User profile page
[ "User", "profile", "page" ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1215-L1221
def commands(self): """ Returns a list of commands supported by the motor controller. """ self._commands, value = self.get_attr_set(self._commands, 'commands') return value
[ "def", "commands", "(", "self", ")", ":", "self", ".", "_commands", ",", "value", "=", "self", ".", "get_attr_set", "(", "self", ".", "_commands", ",", "'commands'", ")", "return", "value" ]
Returns a list of commands supported by the motor controller.
[ "Returns", "a", "list", "of", "commands", "supported", "by", "the", "motor", "controller", "." ]
python
train
Microsoft/nni
src/sdk/pynni/nni/networkmorphism_tuner/networkmorphism_tuner.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/networkmorphism_tuner.py#L230-L253
def add_model(self, metric_value, model_id): """ Add model to the history, x_queue and y_queue Parameters ---------- metric_value : float graph : dict model_id : int Returns ------- model : dict """ if self.verbose: logger.info("Saving model.") # Update best_model text file ret = {"model_id": model_id, "metric_value": metric_value} self.history.append(ret) if model_id == self.get_best_model_id(): file = open(os.path.join(self.path, "best_model.txt"), "w") file.write("best model: " + str(model_id)) file.close() return ret
[ "def", "add_model", "(", "self", ",", "metric_value", ",", "model_id", ")", ":", "if", "self", ".", "verbose", ":", "logger", ".", "info", "(", "\"Saving model.\"", ")", "# Update best_model text file", "ret", "=", "{", "\"model_id\"", ":", "model_id", ",", "\"metric_value\"", ":", "metric_value", "}", "self", ".", "history", ".", "append", "(", "ret", ")", "if", "model_id", "==", "self", ".", "get_best_model_id", "(", ")", ":", "file", "=", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "\"best_model.txt\"", ")", ",", "\"w\"", ")", "file", ".", "write", "(", "\"best model: \"", "+", "str", "(", "model_id", ")", ")", "file", ".", "close", "(", ")", "return", "ret" ]
Add model to the history, x_queue and y_queue Parameters ---------- metric_value : float graph : dict model_id : int Returns ------- model : dict
[ "Add", "model", "to", "the", "history", "x_queue", "and", "y_queue" ]
python
train
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L610-L624
def upload(self, remote, reader): """ Uploads a file :param remote: remote file name :param reader: an object that implements the read(size) method (typically a file descriptor) :return: """ fd = self.open(remote, 'w') while True: chunk = reader.read(512 * 1024) if chunk == b'': break self.write(fd, chunk) self.close(fd)
[ "def", "upload", "(", "self", ",", "remote", ",", "reader", ")", ":", "fd", "=", "self", ".", "open", "(", "remote", ",", "'w'", ")", "while", "True", ":", "chunk", "=", "reader", ".", "read", "(", "512", "*", "1024", ")", "if", "chunk", "==", "b''", ":", "break", "self", ".", "write", "(", "fd", ",", "chunk", ")", "self", ".", "close", "(", "fd", ")" ]
Uploads a file :param remote: remote file name :param reader: an object that implements the read(size) method (typically a file descriptor) :return:
[ "Uploads", "a", "file", ":", "param", "remote", ":", "remote", "file", "name", ":", "param", "reader", ":", "an", "object", "that", "implements", "the", "read", "(", "size", ")", "method", "(", "typically", "a", "file", "descriptor", ")", ":", "return", ":" ]
python
train
djordon/queueing-tool
queueing_tool/queues/agents.py
https://github.com/djordon/queueing-tool/blob/ccd418cf647ac03a54f78ba5e3725903f541b808/queueing_tool/queues/agents.py#L67-L109
def desired_destination(self, network, edge): """Returns the agents next destination given their current location on the network. An ``Agent`` chooses one of the out edges at random. The probability that the ``Agent`` will travel along a specific edge is specified in the :class:`QueueNetwork's<.QueueNetwork>` transition matrix. Parameters ---------- network : :class:`.QueueNetwork` The :class:`.QueueNetwork` where the Agent resides. edge : tuple A 4-tuple indicating which edge this agent is located at. The first two slots indicate the current edge's source and target vertices, while the third slot indicates this edges ``edge_index``. The last slot indicates the edge type of that edge Returns ------- out : int Returns an the edge index corresponding to the agents next edge to visit in the network. See Also -------- :meth:`.transitions` : :class:`QueueNetwork's<.QueueNetwork>` method that returns the transition probabilities for each edge in the graph. """ n = len(network.out_edges[edge[1]]) if n <= 1: return network.out_edges[edge[1]][0] u = uniform() pr = network._route_probs[edge[1]] k = _choice(pr, u, n) # _choice returns an integer between 0 and n-1 where the # probability of k being selected is equal to pr[k]. return network.out_edges[edge[1]][k]
[ "def", "desired_destination", "(", "self", ",", "network", ",", "edge", ")", ":", "n", "=", "len", "(", "network", ".", "out_edges", "[", "edge", "[", "1", "]", "]", ")", "if", "n", "<=", "1", ":", "return", "network", ".", "out_edges", "[", "edge", "[", "1", "]", "]", "[", "0", "]", "u", "=", "uniform", "(", ")", "pr", "=", "network", ".", "_route_probs", "[", "edge", "[", "1", "]", "]", "k", "=", "_choice", "(", "pr", ",", "u", ",", "n", ")", "# _choice returns an integer between 0 and n-1 where the", "# probability of k being selected is equal to pr[k].", "return", "network", ".", "out_edges", "[", "edge", "[", "1", "]", "]", "[", "k", "]" ]
Returns the agents next destination given their current location on the network. An ``Agent`` chooses one of the out edges at random. The probability that the ``Agent`` will travel along a specific edge is specified in the :class:`QueueNetwork's<.QueueNetwork>` transition matrix. Parameters ---------- network : :class:`.QueueNetwork` The :class:`.QueueNetwork` where the Agent resides. edge : tuple A 4-tuple indicating which edge this agent is located at. The first two slots indicate the current edge's source and target vertices, while the third slot indicates this edges ``edge_index``. The last slot indicates the edge type of that edge Returns ------- out : int Returns an the edge index corresponding to the agents next edge to visit in the network. See Also -------- :meth:`.transitions` : :class:`QueueNetwork's<.QueueNetwork>` method that returns the transition probabilities for each edge in the graph.
[ "Returns", "the", "agents", "next", "destination", "given", "their", "current", "location", "on", "the", "network", "." ]
python
valid
spacetelescope/pysynphot
pysynphot/spectrum.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/spectrum.py#L2117-L2134
def convert(self, targetunits): """Set new user unit, for wavelength only. This effectively converts the spectrum wavelength to given unit. Note that actual data are always kept in internal unit (Angstrom), and only converted to user unit by :meth:`GetWaveSet` during actual computation. User unit is stored in ``self.waveunits``. Throughput is unitless and cannot be converted. Parameters ---------- targetunits : str New unit name, as accepted by `~pysynphot.units.Units`. """ nunits = units.Units(targetunits) self.waveunits = nunits
[ "def", "convert", "(", "self", ",", "targetunits", ")", ":", "nunits", "=", "units", ".", "Units", "(", "targetunits", ")", "self", ".", "waveunits", "=", "nunits" ]
Set new user unit, for wavelength only. This effectively converts the spectrum wavelength to given unit. Note that actual data are always kept in internal unit (Angstrom), and only converted to user unit by :meth:`GetWaveSet` during actual computation. User unit is stored in ``self.waveunits``. Throughput is unitless and cannot be converted. Parameters ---------- targetunits : str New unit name, as accepted by `~pysynphot.units.Units`.
[ "Set", "new", "user", "unit", "for", "wavelength", "only", "." ]
python
train
adamrehn/ue4cli
ue4cli/UnrealManagerBase.py
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/UnrealManagerBase.py#L355-L398
def packageProject(self, dir=os.getcwd(), configuration='Shipping', extraArgs=[]): """ Packages a build of the Unreal project in the specified directory, using common packaging options """ # Verify that the specified build configuration is valid if configuration not in self.validBuildConfigurations(): raise UnrealManagerException('invalid build configuration "' + configuration + '"') # Strip out the `-NoCompileEditor` flag if the user has specified it, since the Development version # of the Editor modules for the project are needed in order to run the commandlet that cooks content extraArgs = Utility.stripArgs(extraArgs, ['-nocompileeditor']) # Prevent the user from specifying multiple `-platform=` or `-targetplatform=` arguments, # and use the current host platform if no platform argument was explicitly specified platformArgs = Utility.findArgs(extraArgs, ['-platform=', '-targetplatform=']) platform = Utility.getArgValue(platformArgs[0]) if len(platformArgs) > 0 else self.getPlatformIdentifier() extraArgs = Utility.stripArgs(extraArgs, platformArgs) + ['-platform={}'.format(platform)] # If we are packaging a Shipping build, do not include debug symbols if configuration == 'Shipping': extraArgs.append('-nodebuginfo') # Do not create a .pak file when packaging for HTML5 pakArg = '-package' if platform.upper() == 'HTML5' else '-pak' # Invoke UAT to package the build distDir = os.path.join(os.path.abspath(dir), 'dist') self.runUAT([ 'BuildCookRun', '-utf8output', '-clientconfig=' + configuration, '-serverconfig=' + configuration, '-project=' + self.getProjectDescriptor(dir), '-noP4', '-cook', '-allmaps', '-build', '-stage', '-prereqs', pakArg, '-archive', '-archivedirectory=' + distDir ] + extraArgs)
[ "def", "packageProject", "(", "self", ",", "dir", "=", "os", ".", "getcwd", "(", ")", ",", "configuration", "=", "'Shipping'", ",", "extraArgs", "=", "[", "]", ")", ":", "# Verify that the specified build configuration is valid", "if", "configuration", "not", "in", "self", ".", "validBuildConfigurations", "(", ")", ":", "raise", "UnrealManagerException", "(", "'invalid build configuration \"'", "+", "configuration", "+", "'\"'", ")", "# Strip out the `-NoCompileEditor` flag if the user has specified it, since the Development version", "# of the Editor modules for the project are needed in order to run the commandlet that cooks content", "extraArgs", "=", "Utility", ".", "stripArgs", "(", "extraArgs", ",", "[", "'-nocompileeditor'", "]", ")", "# Prevent the user from specifying multiple `-platform=` or `-targetplatform=` arguments,", "# and use the current host platform if no platform argument was explicitly specified", "platformArgs", "=", "Utility", ".", "findArgs", "(", "extraArgs", ",", "[", "'-platform='", ",", "'-targetplatform='", "]", ")", "platform", "=", "Utility", ".", "getArgValue", "(", "platformArgs", "[", "0", "]", ")", "if", "len", "(", "platformArgs", ")", ">", "0", "else", "self", ".", "getPlatformIdentifier", "(", ")", "extraArgs", "=", "Utility", ".", "stripArgs", "(", "extraArgs", ",", "platformArgs", ")", "+", "[", "'-platform={}'", ".", "format", "(", "platform", ")", "]", "# If we are packaging a Shipping build, do not include debug symbols", "if", "configuration", "==", "'Shipping'", ":", "extraArgs", ".", "append", "(", "'-nodebuginfo'", ")", "# Do not create a .pak file when packaging for HTML5", "pakArg", "=", "'-package'", "if", "platform", ".", "upper", "(", ")", "==", "'HTML5'", "else", "'-pak'", "# Invoke UAT to package the build", "distDir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "dir", ")", ",", "'dist'", ")", "self", ".", "runUAT", "(", "[", "'BuildCookRun'", ",", "'-utf8output'", ",", "'-clientconfig='", "+", "configuration", ",", "'-serverconfig='", "+", "configuration", ",", "'-project='", "+", "self", ".", "getProjectDescriptor", "(", "dir", ")", ",", "'-noP4'", ",", "'-cook'", ",", "'-allmaps'", ",", "'-build'", ",", "'-stage'", ",", "'-prereqs'", ",", "pakArg", ",", "'-archive'", ",", "'-archivedirectory='", "+", "distDir", "]", "+", "extraArgs", ")" ]
Packages a build of the Unreal project in the specified directory, using common packaging options
[ "Packages", "a", "build", "of", "the", "Unreal", "project", "in", "the", "specified", "directory", "using", "common", "packaging", "options" ]
python
train
dls-controls/pymalcolm
malcolm/core/controller.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/controller.py#L185-L210
def _handle_put(self, request): # type: (Put) -> CallbackResponses """Called with the lock taken""" attribute_name = request.path[1] attribute = self._block[attribute_name] assert isinstance(attribute, AttributeModel), \ "Cannot Put to %s which is a %s" % (attribute.path, type(attribute)) self.check_field_writeable(attribute) put_function = self.get_put_function(attribute_name) value = attribute.meta.validate(request.value) with self.lock_released: result = put_function(value) if request.get and result is None: # We asked for a Get, and didn't get given a return, so do return # the current value. Don't serialize here as value is immutable # (as long as we don't try too hard to break the rules) result = self._block[attribute_name].value elif not request.get: # We didn't ask for a Get, so throw result away result = None ret = [request.return_response(result)] return ret
[ "def", "_handle_put", "(", "self", ",", "request", ")", ":", "# type: (Put) -> CallbackResponses", "attribute_name", "=", "request", ".", "path", "[", "1", "]", "attribute", "=", "self", ".", "_block", "[", "attribute_name", "]", "assert", "isinstance", "(", "attribute", ",", "AttributeModel", ")", ",", "\"Cannot Put to %s which is a %s\"", "%", "(", "attribute", ".", "path", ",", "type", "(", "attribute", ")", ")", "self", ".", "check_field_writeable", "(", "attribute", ")", "put_function", "=", "self", ".", "get_put_function", "(", "attribute_name", ")", "value", "=", "attribute", ".", "meta", ".", "validate", "(", "request", ".", "value", ")", "with", "self", ".", "lock_released", ":", "result", "=", "put_function", "(", "value", ")", "if", "request", ".", "get", "and", "result", "is", "None", ":", "# We asked for a Get, and didn't get given a return, so do return", "# the current value. Don't serialize here as value is immutable", "# (as long as we don't try too hard to break the rules)", "result", "=", "self", ".", "_block", "[", "attribute_name", "]", ".", "value", "elif", "not", "request", ".", "get", ":", "# We didn't ask for a Get, so throw result away", "result", "=", "None", "ret", "=", "[", "request", ".", "return_response", "(", "result", ")", "]", "return", "ret" ]
Called with the lock taken
[ "Called", "with", "the", "lock", "taken" ]
python
train
Parquery/icontract
icontract/_checkers.py
https://github.com/Parquery/icontract/blob/846e3187869a9ba790e9b893c98e5055e1cce274/icontract/_checkers.py#L360-L407
def _decorate_with_invariants(func: CallableT, is_init: bool) -> CallableT: """ Decorate the function ``func`` of the class ``cls`` with invariant checks. If the function has been already decorated with invariant checks, the function returns immediately. :param func: function to be wrapped :param is_init: True if the ``func`` is __init__ :return: function wrapped with invariant checks """ if _already_decorated_with_invariants(func=func): return func sign = inspect.signature(func) param_names = list(sign.parameters.keys()) if is_init: def wrapper(*args, **kwargs): """Wrap __init__ method of a class by checking the invariants *after* the invocation.""" result = func(*args, **kwargs) instance = _find_self(param_names=param_names, args=args, kwargs=kwargs) for contract in instance.__class__.__invariants__: _assert_invariant(contract=contract, instance=instance) return result else: def wrapper(*args, **kwargs): """Wrap a function of a class by checking the invariants *before* and *after* the invocation.""" instance = _find_self(param_names=param_names, args=args, kwargs=kwargs) for contract in instance.__class__.__invariants__: _assert_invariant(contract=contract, instance=instance) result = func(*args, **kwargs) for contract in instance.__class__.__invariants__: _assert_invariant(contract=contract, instance=instance) return result functools.update_wrapper(wrapper=wrapper, wrapped=func) setattr(wrapper, "__is_invariant_check__", True) return wrapper
[ "def", "_decorate_with_invariants", "(", "func", ":", "CallableT", ",", "is_init", ":", "bool", ")", "->", "CallableT", ":", "if", "_already_decorated_with_invariants", "(", "func", "=", "func", ")", ":", "return", "func", "sign", "=", "inspect", ".", "signature", "(", "func", ")", "param_names", "=", "list", "(", "sign", ".", "parameters", ".", "keys", "(", ")", ")", "if", "is_init", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrap __init__ method of a class by checking the invariants *after* the invocation.\"\"\"", "result", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "instance", "=", "_find_self", "(", "param_names", "=", "param_names", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "for", "contract", "in", "instance", ".", "__class__", ".", "__invariants__", ":", "_assert_invariant", "(", "contract", "=", "contract", ",", "instance", "=", "instance", ")", "return", "result", "else", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrap a function of a class by checking the invariants *before* and *after* the invocation.\"\"\"", "instance", "=", "_find_self", "(", "param_names", "=", "param_names", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "for", "contract", "in", "instance", ".", "__class__", ".", "__invariants__", ":", "_assert_invariant", "(", "contract", "=", "contract", ",", "instance", "=", "instance", ")", "result", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "contract", "in", "instance", ".", "__class__", ".", "__invariants__", ":", "_assert_invariant", "(", "contract", "=", "contract", ",", "instance", "=", "instance", ")", "return", "result", "functools", ".", "update_wrapper", "(", "wrapper", "=", "wrapper", ",", "wrapped", "=", "func", ")", "setattr", "(", "wrapper", ",", "\"__is_invariant_check__\"", ",", "True", ")", "return", "wrapper" ]
Decorate the function ``func`` of the class ``cls`` with invariant checks. If the function has been already decorated with invariant checks, the function returns immediately. :param func: function to be wrapped :param is_init: True if the ``func`` is __init__ :return: function wrapped with invariant checks
[ "Decorate", "the", "function", "func", "of", "the", "class", "cls", "with", "invariant", "checks", "." ]
python
train
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/s3.py#L282-L301
def put_multipart(self, local_path, destination_s3_path, part_size=DEFAULT_PART_SIZE, **kwargs): """ Put an object stored locally to an S3 path using S3 multi-part upload (for files > 8Mb). :param local_path: Path to source local file :param destination_s3_path: URL for target S3 location :param part_size: Part size in bytes. Default: 8388608 (8MB) :param kwargs: Keyword arguments are passed to the boto function `upload_fileobj` as ExtraArgs """ self._check_deprecated_argument(**kwargs) from boto3.s3.transfer import TransferConfig # default part size for boto3 is 8Mb, changing it to fit part_size # provided as a parameter transfer_config = TransferConfig(multipart_chunksize=part_size) (bucket, key) = self._path_to_bucket_and_key(destination_s3_path) self.s3.meta.client.upload_fileobj( Fileobj=open(local_path, 'rb'), Bucket=bucket, Key=key, Config=transfer_config, ExtraArgs=kwargs)
[ "def", "put_multipart", "(", "self", ",", "local_path", ",", "destination_s3_path", ",", "part_size", "=", "DEFAULT_PART_SIZE", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_check_deprecated_argument", "(", "*", "*", "kwargs", ")", "from", "boto3", ".", "s3", ".", "transfer", "import", "TransferConfig", "# default part size for boto3 is 8Mb, changing it to fit part_size", "# provided as a parameter", "transfer_config", "=", "TransferConfig", "(", "multipart_chunksize", "=", "part_size", ")", "(", "bucket", ",", "key", ")", "=", "self", ".", "_path_to_bucket_and_key", "(", "destination_s3_path", ")", "self", ".", "s3", ".", "meta", ".", "client", ".", "upload_fileobj", "(", "Fileobj", "=", "open", "(", "local_path", ",", "'rb'", ")", ",", "Bucket", "=", "bucket", ",", "Key", "=", "key", ",", "Config", "=", "transfer_config", ",", "ExtraArgs", "=", "kwargs", ")" ]
Put an object stored locally to an S3 path using S3 multi-part upload (for files > 8Mb). :param local_path: Path to source local file :param destination_s3_path: URL for target S3 location :param part_size: Part size in bytes. Default: 8388608 (8MB) :param kwargs: Keyword arguments are passed to the boto function `upload_fileobj` as ExtraArgs
[ "Put", "an", "object", "stored", "locally", "to", "an", "S3", "path", "using", "S3", "multi", "-", "part", "upload", "(", "for", "files", ">", "8Mb", ")", ".", ":", "param", "local_path", ":", "Path", "to", "source", "local", "file", ":", "param", "destination_s3_path", ":", "URL", "for", "target", "S3", "location", ":", "param", "part_size", ":", "Part", "size", "in", "bytes", ".", "Default", ":", "8388608", "(", "8MB", ")", ":", "param", "kwargs", ":", "Keyword", "arguments", "are", "passed", "to", "the", "boto", "function", "upload_fileobj", "as", "ExtraArgs" ]
python
train
inveniosoftware-contrib/invenio-groups
invenio_groups/models.py
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/models.py#L789-L804
def query_admins_by_group_ids(cls, groups_ids=None): """Get count of admins per group.""" assert groups_ids is None or isinstance(groups_ids, list) query = db.session.query( Group.id, func.count(GroupAdmin.id) ).join( GroupAdmin ).group_by( Group.id ) if groups_ids: query = query.filter(Group.id.in_(groups_ids)) return query
[ "def", "query_admins_by_group_ids", "(", "cls", ",", "groups_ids", "=", "None", ")", ":", "assert", "groups_ids", "is", "None", "or", "isinstance", "(", "groups_ids", ",", "list", ")", "query", "=", "db", ".", "session", ".", "query", "(", "Group", ".", "id", ",", "func", ".", "count", "(", "GroupAdmin", ".", "id", ")", ")", ".", "join", "(", "GroupAdmin", ")", ".", "group_by", "(", "Group", ".", "id", ")", "if", "groups_ids", ":", "query", "=", "query", ".", "filter", "(", "Group", ".", "id", ".", "in_", "(", "groups_ids", ")", ")", "return", "query" ]
Get count of admins per group.
[ "Get", "count", "of", "admins", "per", "group", "." ]
python
valid
pgjones/quart
quart/app.py
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/app.py#L1263-L1284
async def do_teardown_websocket( self, exc: Optional[BaseException], websocket_context: Optional[WebsocketContext]=None, ) -> None: """Teardown the websocket, calling the teardown functions. Arguments: exc: Any exception not handled that has caused the websocket to teardown. websocket_context: The websocket context, optional as Flask omits this argument. """ websocket_ = (websocket_context or _websocket_ctx_stack.top).websocket functions = self.teardown_websocket_funcs[None] blueprint = websocket_.blueprint if blueprint is not None: functions = chain(functions, self.teardown_websocket_funcs[blueprint]) # type: ignore for function in functions: await function(exc=exc) await websocket_tearing_down.send(self, exc=exc)
[ "async", "def", "do_teardown_websocket", "(", "self", ",", "exc", ":", "Optional", "[", "BaseException", "]", ",", "websocket_context", ":", "Optional", "[", "WebsocketContext", "]", "=", "None", ",", ")", "->", "None", ":", "websocket_", "=", "(", "websocket_context", "or", "_websocket_ctx_stack", ".", "top", ")", ".", "websocket", "functions", "=", "self", ".", "teardown_websocket_funcs", "[", "None", "]", "blueprint", "=", "websocket_", ".", "blueprint", "if", "blueprint", "is", "not", "None", ":", "functions", "=", "chain", "(", "functions", ",", "self", ".", "teardown_websocket_funcs", "[", "blueprint", "]", ")", "# type: ignore", "for", "function", "in", "functions", ":", "await", "function", "(", "exc", "=", "exc", ")", "await", "websocket_tearing_down", ".", "send", "(", "self", ",", "exc", "=", "exc", ")" ]
Teardown the websocket, calling the teardown functions. Arguments: exc: Any exception not handled that has caused the websocket to teardown. websocket_context: The websocket context, optional as Flask omits this argument.
[ "Teardown", "the", "websocket", "calling", "the", "teardown", "functions", "." ]
python
train
aparo/pyes
pyes/connection.py
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/connection.py#L173-L185
def connect(self): """Create new connection unless we already have one.""" if not getattr(self._local, 'conn', None): try: server = self._servers.get() logger.debug('Connecting to %s', server) self._local.conn = ClientTransport(server, self._framed_transport, self._timeout, self._recycle) except (Thrift.TException, socket.timeout, socket.error): logger.warning('Connection to %s failed.', server) self._servers.mark_dead(server) return self.connect() return self._local.conn
[ "def", "connect", "(", "self", ")", ":", "if", "not", "getattr", "(", "self", ".", "_local", ",", "'conn'", ",", "None", ")", ":", "try", ":", "server", "=", "self", ".", "_servers", ".", "get", "(", ")", "logger", ".", "debug", "(", "'Connecting to %s'", ",", "server", ")", "self", ".", "_local", ".", "conn", "=", "ClientTransport", "(", "server", ",", "self", ".", "_framed_transport", ",", "self", ".", "_timeout", ",", "self", ".", "_recycle", ")", "except", "(", "Thrift", ".", "TException", ",", "socket", ".", "timeout", ",", "socket", ".", "error", ")", ":", "logger", ".", "warning", "(", "'Connection to %s failed.'", ",", "server", ")", "self", ".", "_servers", ".", "mark_dead", "(", "server", ")", "return", "self", ".", "connect", "(", ")", "return", "self", ".", "_local", ".", "conn" ]
Create new connection unless we already have one.
[ "Create", "new", "connection", "unless", "we", "already", "have", "one", "." ]
python
train
rgs1/zk_shell
zk_shell/xclient.py
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/xclient.py#L82-L85
def create(self, path, value=b"", acl=None, ephemeral=False, sequence=False): """ wrapper that handles encoding (yay Py3k) """ super(XTransactionRequest, self).create(path, to_bytes(value), acl, ephemeral, sequence)
[ "def", "create", "(", "self", ",", "path", ",", "value", "=", "b\"\"", ",", "acl", "=", "None", ",", "ephemeral", "=", "False", ",", "sequence", "=", "False", ")", ":", "super", "(", "XTransactionRequest", ",", "self", ")", ".", "create", "(", "path", ",", "to_bytes", "(", "value", ")", ",", "acl", ",", "ephemeral", ",", "sequence", ")" ]
wrapper that handles encoding (yay Py3k)
[ "wrapper", "that", "handles", "encoding", "(", "yay", "Py3k", ")" ]
python
train
klmitch/tendril
tendril/manager.py
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/manager.py#L291-L312
def get_local_addr(self, timeout=None): """ Retrieve the current local address. :param timeout: If not given or given as ``None``, waits until the local address is available. Otherwise, waits for as long as specified. If the local address is not set by the time the timeout expires, returns ``None``. """ # If we're not running, just return None if not self.running: return None # OK, we're running; wait on the _local_addr_event if not self._local_addr_event.wait(timeout): # Still not set after timeout return None # We have a local address! return self._local_addr
[ "def", "get_local_addr", "(", "self", ",", "timeout", "=", "None", ")", ":", "# If we're not running, just return None", "if", "not", "self", ".", "running", ":", "return", "None", "# OK, we're running; wait on the _local_addr_event", "if", "not", "self", ".", "_local_addr_event", ".", "wait", "(", "timeout", ")", ":", "# Still not set after timeout", "return", "None", "# We have a local address!", "return", "self", ".", "_local_addr" ]
Retrieve the current local address. :param timeout: If not given or given as ``None``, waits until the local address is available. Otherwise, waits for as long as specified. If the local address is not set by the time the timeout expires, returns ``None``.
[ "Retrieve", "the", "current", "local", "address", "." ]
python
train
tanghaibao/goatools
goatools/wr_tbl.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/wr_tbl.py#L133-L155
def prt_tsv_sections(prt, tsv_data, **kws): """Write tsv file containing section names followed by lines of namedtuple data.""" prt_hdr_min = 10 # Print hdr on the 1st section and for any following 'large' sections num_items = 0 if tsv_data: # Basic data checks assert len(tsv_data[0]) == 2, "wr_tsv_sections EXPECTED: [(section, nts), ..." assert tsv_data[0][1], \ "wr_tsv_sections EXPECTED SECTION({S}) LIST TO HAVE DATA".format(S=tsv_data[0][0]) hdrs_wrote = False sep = "\t" if 'sep' not in kws else kws['sep'] prt_flds = kws['prt_flds'] if 'prt_flds' in kws else tsv_data[0]._fields fill = sep*(len(prt_flds) - 1) # Write data for section_text, data_nts in tsv_data: prt.write("{SEC}{FILL}\n".format(SEC=section_text, FILL=fill)) if hdrs_wrote is False or len(data_nts) > prt_hdr_min: prt_tsv_hdr(prt, data_nts, **kws) hdrs_wrote = True num_items += prt_tsv_dat(prt, data_nts, **kws) return num_items else: return 0
[ "def", "prt_tsv_sections", "(", "prt", ",", "tsv_data", ",", "*", "*", "kws", ")", ":", "prt_hdr_min", "=", "10", "# Print hdr on the 1st section and for any following 'large' sections", "num_items", "=", "0", "if", "tsv_data", ":", "# Basic data checks", "assert", "len", "(", "tsv_data", "[", "0", "]", ")", "==", "2", ",", "\"wr_tsv_sections EXPECTED: [(section, nts), ...\"", "assert", "tsv_data", "[", "0", "]", "[", "1", "]", ",", "\"wr_tsv_sections EXPECTED SECTION({S}) LIST TO HAVE DATA\"", ".", "format", "(", "S", "=", "tsv_data", "[", "0", "]", "[", "0", "]", ")", "hdrs_wrote", "=", "False", "sep", "=", "\"\\t\"", "if", "'sep'", "not", "in", "kws", "else", "kws", "[", "'sep'", "]", "prt_flds", "=", "kws", "[", "'prt_flds'", "]", "if", "'prt_flds'", "in", "kws", "else", "tsv_data", "[", "0", "]", ".", "_fields", "fill", "=", "sep", "*", "(", "len", "(", "prt_flds", ")", "-", "1", ")", "# Write data", "for", "section_text", ",", "data_nts", "in", "tsv_data", ":", "prt", ".", "write", "(", "\"{SEC}{FILL}\\n\"", ".", "format", "(", "SEC", "=", "section_text", ",", "FILL", "=", "fill", ")", ")", "if", "hdrs_wrote", "is", "False", "or", "len", "(", "data_nts", ")", ">", "prt_hdr_min", ":", "prt_tsv_hdr", "(", "prt", ",", "data_nts", ",", "*", "*", "kws", ")", "hdrs_wrote", "=", "True", "num_items", "+=", "prt_tsv_dat", "(", "prt", ",", "data_nts", ",", "*", "*", "kws", ")", "return", "num_items", "else", ":", "return", "0" ]
Write tsv file containing section names followed by lines of namedtuple data.
[ "Write", "tsv", "file", "containing", "section", "names", "followed", "by", "lines", "of", "namedtuple", "data", "." ]
python
train
saltstack/salt
salt/modules/pagerduty_util.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pagerduty_util.py#L210-L252
def get_resource(resource_name, key, identifier_fields, profile='pagerduty', subdomain=None, api_key=None): ''' Get any single pagerduty resource by key. We allow flexible lookup by any of a list of identifier_fields. So, for example, you can look up users by email address or name by calling: get_resource('users', key, ['name', 'email'], ...) This method is mainly used to translate state sls into pagerduty id's for dependent objects. For example, a pagerduty escalation policy contains one or more schedules, which must be passed by their pagerduty id. We look up the schedules by name (using this method), and then translate the names into id's. This method is implemented by getting all objects of the resource type (cached into __context__), then brute force searching through the list and trying to match any of the identifier_fields. The __context__ cache is purged after any create, update or delete to the resource. ''' # cache the expensive 'get all resources' calls into __context__ so that we do them once per salt run if 'pagerduty_util.resource_cache' not in __context__: __context__['pagerduty_util.resource_cache'] = {} if resource_name not in __context__['pagerduty_util.resource_cache']: if resource_name == 'services': action = resource_name + '?include[]=escalation_policy' else: action = resource_name __context__['pagerduty_util.resource_cache'][resource_name] = _query(action=action, profile=profile, subdomain=subdomain, api_key=api_key)[resource_name] for resource in __context__['pagerduty_util.resource_cache'][resource_name]: for field in identifier_fields: if resource[field] == key: # PagerDuty's /schedules endpoint returns less data than /schedules/:id. # so, now that we found the schedule, we need to get all the data for it. if resource_name == 'schedules': full_resource_info = _query(action='{0}/{1}'.format(resource_name, resource['id']), profile=profile, subdomain=subdomain, api_key=api_key) return full_resource_info return resource return None
[ "def", "get_resource", "(", "resource_name", ",", "key", ",", "identifier_fields", ",", "profile", "=", "'pagerduty'", ",", "subdomain", "=", "None", ",", "api_key", "=", "None", ")", ":", "# cache the expensive 'get all resources' calls into __context__ so that we do them once per salt run", "if", "'pagerduty_util.resource_cache'", "not", "in", "__context__", ":", "__context__", "[", "'pagerduty_util.resource_cache'", "]", "=", "{", "}", "if", "resource_name", "not", "in", "__context__", "[", "'pagerduty_util.resource_cache'", "]", ":", "if", "resource_name", "==", "'services'", ":", "action", "=", "resource_name", "+", "'?include[]=escalation_policy'", "else", ":", "action", "=", "resource_name", "__context__", "[", "'pagerduty_util.resource_cache'", "]", "[", "resource_name", "]", "=", "_query", "(", "action", "=", "action", ",", "profile", "=", "profile", ",", "subdomain", "=", "subdomain", ",", "api_key", "=", "api_key", ")", "[", "resource_name", "]", "for", "resource", "in", "__context__", "[", "'pagerduty_util.resource_cache'", "]", "[", "resource_name", "]", ":", "for", "field", "in", "identifier_fields", ":", "if", "resource", "[", "field", "]", "==", "key", ":", "# PagerDuty's /schedules endpoint returns less data than /schedules/:id.", "# so, now that we found the schedule, we need to get all the data for it.", "if", "resource_name", "==", "'schedules'", ":", "full_resource_info", "=", "_query", "(", "action", "=", "'{0}/{1}'", ".", "format", "(", "resource_name", ",", "resource", "[", "'id'", "]", ")", ",", "profile", "=", "profile", ",", "subdomain", "=", "subdomain", ",", "api_key", "=", "api_key", ")", "return", "full_resource_info", "return", "resource", "return", "None" ]
Get any single pagerduty resource by key. We allow flexible lookup by any of a list of identifier_fields. So, for example, you can look up users by email address or name by calling: get_resource('users', key, ['name', 'email'], ...) This method is mainly used to translate state sls into pagerduty id's for dependent objects. For example, a pagerduty escalation policy contains one or more schedules, which must be passed by their pagerduty id. We look up the schedules by name (using this method), and then translate the names into id's. This method is implemented by getting all objects of the resource type (cached into __context__), then brute force searching through the list and trying to match any of the identifier_fields. The __context__ cache is purged after any create, update or delete to the resource.
[ "Get", "any", "single", "pagerduty", "resource", "by", "key", "." ]
python
train
nugget/python-anthemav
anthemav/protocol.py
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L208-L216
def _populate_inputs(self, total): """Request the names for all active, configured inputs on the device. Once we learn how many inputs are configured, this function is called which will ask for the name of each active input. """ total = total + 1 for input_number in range(1, total): self.query('ISN'+str(input_number).zfill(2))
[ "def", "_populate_inputs", "(", "self", ",", "total", ")", ":", "total", "=", "total", "+", "1", "for", "input_number", "in", "range", "(", "1", ",", "total", ")", ":", "self", ".", "query", "(", "'ISN'", "+", "str", "(", "input_number", ")", ".", "zfill", "(", "2", ")", ")" ]
Request the names for all active, configured inputs on the device. Once we learn how many inputs are configured, this function is called which will ask for the name of each active input.
[ "Request", "the", "names", "for", "all", "active", "configured", "inputs", "on", "the", "device", "." ]
python
train
jeongyoonlee/Kaggler
kaggler/preprocessing/data.py
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/preprocessing/data.py#L212-L237
def _transform_col(self, x, i): """Encode one categorical column into sparse matrix with one-hot-encoding. Args: x (pandas.Series): a categorical column to encode i (int): column index Returns: X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical variable into dummy variables """ labels = self.label_encoder._transform_col(x, i) label_max = self.label_encoder.label_maxes[i] # build row and column index for non-zero values of a sparse matrix index = np.array(range(len(labels))) i = index[labels > 0] j = labels[labels > 0] - 1 # column index starts from 0 if len(i) > 0: return sparse.coo_matrix((np.ones_like(i), (i, j)), shape=(x.shape[0], label_max)) else: # if there is no non-zero value, return no matrix return None
[ "def", "_transform_col", "(", "self", ",", "x", ",", "i", ")", ":", "labels", "=", "self", ".", "label_encoder", ".", "_transform_col", "(", "x", ",", "i", ")", "label_max", "=", "self", ".", "label_encoder", ".", "label_maxes", "[", "i", "]", "# build row and column index for non-zero values of a sparse matrix", "index", "=", "np", ".", "array", "(", "range", "(", "len", "(", "labels", ")", ")", ")", "i", "=", "index", "[", "labels", ">", "0", "]", "j", "=", "labels", "[", "labels", ">", "0", "]", "-", "1", "# column index starts from 0", "if", "len", "(", "i", ")", ">", "0", ":", "return", "sparse", ".", "coo_matrix", "(", "(", "np", ".", "ones_like", "(", "i", ")", ",", "(", "i", ",", "j", ")", ")", ",", "shape", "=", "(", "x", ".", "shape", "[", "0", "]", ",", "label_max", ")", ")", "else", ":", "# if there is no non-zero value, return no matrix", "return", "None" ]
Encode one categorical column into sparse matrix with one-hot-encoding. Args: x (pandas.Series): a categorical column to encode i (int): column index Returns: X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical variable into dummy variables
[ "Encode", "one", "categorical", "column", "into", "sparse", "matrix", "with", "one", "-", "hot", "-", "encoding", "." ]
python
train
cpburnz/python-path-specification
pathspec/util.py
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L57-L126
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links): """ Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories. """ dir_full = os.path.join(root_full, dir_rel) dir_real = os.path.realpath(dir_full) # Remember each encountered ancestor directory and its canonical # (real) path. If a canonical path is encountered more than once, # recursion has occurred. if dir_real not in memo: memo[dir_real] = dir_rel else: raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel) for node in os.listdir(dir_full): node_rel = os.path.join(dir_rel, node) node_full = os.path.join(root_full, node_rel) # Inspect child node. try: node_stat = os.lstat(node_full) except OSError as e: if on_error is not None: on_error(e) continue if stat.S_ISLNK(node_stat.st_mode): # Child node is a link, inspect the target node. is_link = True try: node_stat = os.stat(node_full) except OSError as e: if on_error is not None: on_error(e) continue else: is_link = False if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link): # Child node is a directory, recurse into it and yield its # decendant files. for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links): yield file_rel elif stat.S_ISREG(node_stat.st_mode): # Child node is a file, yield it. yield node_rel # NOTE: Make sure to remove the canonical (real) path of the directory # from the ancestors memo once we are done with it. This allows the # same directory to appear multiple times. If this is not done, the # second occurance of the directory will be incorrectly interpreted as # a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>. del memo[dir_real]
[ "def", "_iter_tree_next", "(", "root_full", ",", "dir_rel", ",", "memo", ",", "on_error", ",", "follow_links", ")", ":", "dir_full", "=", "os", ".", "path", ".", "join", "(", "root_full", ",", "dir_rel", ")", "dir_real", "=", "os", ".", "path", ".", "realpath", "(", "dir_full", ")", "# Remember each encountered ancestor directory and its canonical", "# (real) path. If a canonical path is encountered more than once,", "# recursion has occurred.", "if", "dir_real", "not", "in", "memo", ":", "memo", "[", "dir_real", "]", "=", "dir_rel", "else", ":", "raise", "RecursionError", "(", "real_path", "=", "dir_real", ",", "first_path", "=", "memo", "[", "dir_real", "]", ",", "second_path", "=", "dir_rel", ")", "for", "node", "in", "os", ".", "listdir", "(", "dir_full", ")", ":", "node_rel", "=", "os", ".", "path", ".", "join", "(", "dir_rel", ",", "node", ")", "node_full", "=", "os", ".", "path", ".", "join", "(", "root_full", ",", "node_rel", ")", "# Inspect child node.", "try", ":", "node_stat", "=", "os", ".", "lstat", "(", "node_full", ")", "except", "OSError", "as", "e", ":", "if", "on_error", "is", "not", "None", ":", "on_error", "(", "e", ")", "continue", "if", "stat", ".", "S_ISLNK", "(", "node_stat", ".", "st_mode", ")", ":", "# Child node is a link, inspect the target node.", "is_link", "=", "True", "try", ":", "node_stat", "=", "os", ".", "stat", "(", "node_full", ")", "except", "OSError", "as", "e", ":", "if", "on_error", "is", "not", "None", ":", "on_error", "(", "e", ")", "continue", "else", ":", "is_link", "=", "False", "if", "stat", ".", "S_ISDIR", "(", "node_stat", ".", "st_mode", ")", "and", "(", "follow_links", "or", "not", "is_link", ")", ":", "# Child node is a directory, recurse into it and yield its", "# decendant files.", "for", "file_rel", "in", "_iter_tree_next", "(", "root_full", ",", "node_rel", ",", "memo", ",", "on_error", ",", "follow_links", ")", ":", "yield", "file_rel", "elif", "stat", ".", "S_ISREG", "(", "node_stat", ".", "st_mode", ")", ":", "# Child node is a file, yield it.", "yield", "node_rel", "# NOTE: Make sure to remove the canonical (real) path of the directory", "# from the ancestors memo once we are done with it. This allows the", "# same directory to appear multiple times. If this is not done, the", "# second occurance of the directory will be incorrectly interpreted as", "# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.", "del", "memo", "[", "dir_real", "]" ]
Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories.
[ "Scan", "the", "directory", "for", "all", "descendant", "files", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L1044-L1050
def project_set_properties(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /project-xxxx/setProperties API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2FsetProperties """ return DXHTTPRequest('/%s/setProperties' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "project_set_properties", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/setProperties'", "%", "object_id", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /project-xxxx/setProperties API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2FsetProperties
[ "Invokes", "the", "/", "project", "-", "xxxx", "/", "setProperties", "API", "method", "." ]
python
train