repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
ssato/python-anyconfig
src/anyconfig/processors.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/processors.py#L286-L304
def list_by_x(self, item=None): """ :param item: Grouping key, one of "cid", "type" and "extensions" :return: A list of :class:`Processor` or its children classes grouped by given 'item', [(cid, [:class:`Processor`)]] by default """ prs = self._processors if item is None or item == "cid": # Default. res = [(cid, [prs[cid]]) for cid in sorted(prs.keys())] elif item in ("type", "extensions"): res = list_by_x(prs.values(), item) else: raise ValueError("keyword argument 'item' must be one of " "None, 'cid', 'type' and 'extensions' " "but it was '%s'" % item) return res
[ "def", "list_by_x", "(", "self", ",", "item", "=", "None", ")", ":", "prs", "=", "self", ".", "_processors", "if", "item", "is", "None", "or", "item", "==", "\"cid\"", ":", "# Default.", "res", "=", "[", "(", "cid", ",", "[", "prs", "[", "cid", "]", "]", ")", "for", "cid", "in", "sorted", "(", "prs", ".", "keys", "(", ")", ")", "]", "elif", "item", "in", "(", "\"type\"", ",", "\"extensions\"", ")", ":", "res", "=", "list_by_x", "(", "prs", ".", "values", "(", ")", ",", "item", ")", "else", ":", "raise", "ValueError", "(", "\"keyword argument 'item' must be one of \"", "\"None, 'cid', 'type' and 'extensions' \"", "\"but it was '%s'\"", "%", "item", ")", "return", "res" ]
:param item: Grouping key, one of "cid", "type" and "extensions" :return: A list of :class:`Processor` or its children classes grouped by given 'item', [(cid, [:class:`Processor`)]] by default
[ ":", "param", "item", ":", "Grouping", "key", "one", "of", "cid", "type", "and", "extensions", ":", "return", ":", "A", "list", "of", ":", "class", ":", "Processor", "or", "its", "children", "classes", "grouped", "by", "given", "item", "[", "(", "cid", "[", ":", "class", ":", "Processor", ")", "]]", "by", "default" ]
python
train
39.263158
psss/fmf
fmf/base.py
https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L217-L276
def grow(self, path): """ Grow the metadata tree for the given directory path Note: For each path, grow() should be run only once. Growing the tree from the same path multiple times with attribute adding using the "+" sign leads to adding the value more than once! """ if path is None: return path = path.rstrip("/") log.info("Walking through directory {0}".format( os.path.abspath(path))) dirpath, dirnames, filenames = next(os.walk(path)) # Investigate main.fmf as the first file (for correct inheritance) filenames = sorted( [filename for filename in filenames if filename.endswith(SUFFIX)]) try: filenames.insert(0, filenames.pop(filenames.index(MAIN))) except ValueError: pass # Check every metadata file and load data (ignore hidden) for filename in filenames: if filename.startswith("."): continue fullpath = os.path.abspath(os.path.join(dirpath, filename)) log.info("Checking file {0}".format(fullpath)) try: with open(fullpath) as datafile: data = yaml.load(datafile, Loader=FullLoader) except yaml.scanner.ScannerError as error: raise(utils.FileError("Failed to parse '{0}'\n{1}".format( fullpath, error))) log.data(pretty(data)) # Handle main.fmf as data for self if filename == MAIN: self.sources.append(fullpath) self.update(data) # Handle other *.fmf files as children else: self.child(os.path.splitext(filename)[0], data, fullpath) # Explore every child directory (ignore hidden dirs and subtrees) for dirname in sorted(dirnames): if dirname.startswith("."): continue # Ignore metadata subtrees if os.path.isdir(os.path.join(path, dirname, SUFFIX)): log.debug("Ignoring metadata tree '{0}'.".format(dirname)) continue self.child(dirname, os.path.join(path, dirname)) # Remove empty children (ignore directories without metadata) for name in list(self.children.keys()): child = self.children[name] if not child.data and not child.children: del(self.children[name]) log.debug("Empty tree '{0}' removed.".format(child.name)) # Apply inheritance when all scattered data are gathered. # This is done only once, from the top parent object. if self.parent is None: self.inherit()
[ "def", "grow", "(", "self", ",", "path", ")", ":", "if", "path", "is", "None", ":", "return", "path", "=", "path", ".", "rstrip", "(", "\"/\"", ")", "log", ".", "info", "(", "\"Walking through directory {0}\"", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "path", ")", ")", ")", "dirpath", ",", "dirnames", ",", "filenames", "=", "next", "(", "os", ".", "walk", "(", "path", ")", ")", "# Investigate main.fmf as the first file (for correct inheritance)", "filenames", "=", "sorted", "(", "[", "filename", "for", "filename", "in", "filenames", "if", "filename", ".", "endswith", "(", "SUFFIX", ")", "]", ")", "try", ":", "filenames", ".", "insert", "(", "0", ",", "filenames", ".", "pop", "(", "filenames", ".", "index", "(", "MAIN", ")", ")", ")", "except", "ValueError", ":", "pass", "# Check every metadata file and load data (ignore hidden)", "for", "filename", "in", "filenames", ":", "if", "filename", ".", "startswith", "(", "\".\"", ")", ":", "continue", "fullpath", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "dirpath", ",", "filename", ")", ")", "log", ".", "info", "(", "\"Checking file {0}\"", ".", "format", "(", "fullpath", ")", ")", "try", ":", "with", "open", "(", "fullpath", ")", "as", "datafile", ":", "data", "=", "yaml", ".", "load", "(", "datafile", ",", "Loader", "=", "FullLoader", ")", "except", "yaml", ".", "scanner", ".", "ScannerError", "as", "error", ":", "raise", "(", "utils", ".", "FileError", "(", "\"Failed to parse '{0}'\\n{1}\"", ".", "format", "(", "fullpath", ",", "error", ")", ")", ")", "log", ".", "data", "(", "pretty", "(", "data", ")", ")", "# Handle main.fmf as data for self", "if", "filename", "==", "MAIN", ":", "self", ".", "sources", ".", "append", "(", "fullpath", ")", "self", ".", "update", "(", "data", ")", "# Handle other *.fmf files as children", "else", ":", "self", ".", "child", "(", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", ",", "data", ",", "fullpath", ")", "# Explore every child directory (ignore hidden dirs and subtrees)", "for", "dirname", "in", "sorted", "(", "dirnames", ")", ":", "if", "dirname", ".", "startswith", "(", "\".\"", ")", ":", "continue", "# Ignore metadata subtrees", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "path", ",", "dirname", ",", "SUFFIX", ")", ")", ":", "log", ".", "debug", "(", "\"Ignoring metadata tree '{0}'.\"", ".", "format", "(", "dirname", ")", ")", "continue", "self", ".", "child", "(", "dirname", ",", "os", ".", "path", ".", "join", "(", "path", ",", "dirname", ")", ")", "# Remove empty children (ignore directories without metadata)", "for", "name", "in", "list", "(", "self", ".", "children", ".", "keys", "(", ")", ")", ":", "child", "=", "self", ".", "children", "[", "name", "]", "if", "not", "child", ".", "data", "and", "not", "child", ".", "children", ":", "del", "(", "self", ".", "children", "[", "name", "]", ")", "log", ".", "debug", "(", "\"Empty tree '{0}' removed.\"", ".", "format", "(", "child", ".", "name", ")", ")", "# Apply inheritance when all scattered data are gathered.", "# This is done only once, from the top parent object.", "if", "self", ".", "parent", "is", "None", ":", "self", ".", "inherit", "(", ")" ]
Grow the metadata tree for the given directory path Note: For each path, grow() should be run only once. Growing the tree from the same path multiple times with attribute adding using the "+" sign leads to adding the value more than once!
[ "Grow", "the", "metadata", "tree", "for", "the", "given", "directory", "path" ]
python
train
45.083333
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10539-L10559
def adjust_to_360(val, key): """ Take in a value and a key. If the key is of the type: declination/longitude/azimuth/direction, adjust it to be within the range 0-360 as required by the MagIC data model """ CheckDec = ['_dec', '_lon', '_azimuth', 'dip_direction'] adjust = False for dec_key in CheckDec: if dec_key in key: if key.endswith(dec_key) or key.endswith('_'): adjust = True if not val: return '' elif not adjust: return val elif adjust: new_val = float(val) % 360 if new_val != float(val): print('-I- adjusted {} {} to 0=>360.: {}'.format(key, val, new_val)) return new_val
[ "def", "adjust_to_360", "(", "val", ",", "key", ")", ":", "CheckDec", "=", "[", "'_dec'", ",", "'_lon'", ",", "'_azimuth'", ",", "'dip_direction'", "]", "adjust", "=", "False", "for", "dec_key", "in", "CheckDec", ":", "if", "dec_key", "in", "key", ":", "if", "key", ".", "endswith", "(", "dec_key", ")", "or", "key", ".", "endswith", "(", "'_'", ")", ":", "adjust", "=", "True", "if", "not", "val", ":", "return", "''", "elif", "not", "adjust", ":", "return", "val", "elif", "adjust", ":", "new_val", "=", "float", "(", "val", ")", "%", "360", "if", "new_val", "!=", "float", "(", "val", ")", ":", "print", "(", "'-I- adjusted {} {} to 0=>360.: {}'", ".", "format", "(", "key", ",", "val", ",", "new_val", ")", ")", "return", "new_val" ]
Take in a value and a key. If the key is of the type: declination/longitude/azimuth/direction, adjust it to be within the range 0-360 as required by the MagIC data model
[ "Take", "in", "a", "value", "and", "a", "key", ".", "If", "the", "key", "is", "of", "the", "type", ":", "declination", "/", "longitude", "/", "azimuth", "/", "direction", "adjust", "it", "to", "be", "within", "the", "range", "0", "-", "360", "as", "required", "by", "the", "MagIC", "data", "model" ]
python
train
33.142857
codeinn/vcs
vcs/utils/termcolors.py
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/utils/termcolors.py#L14-L56
def colorize(text='', opts=(), **kwargs): """ Returns your text, enclosed in ANSI graphics codes. Depends on the keyword arguments 'fg' and 'bg', and the contents of the opts tuple/list. Returns the RESET code if no parameters are given. Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold' 'underscore' 'blink' 'reverse' 'conceal' 'noreset' - string will not be auto-terminated with the RESET code Examples: colorize('hello', fg='red', bg='blue', opts=('blink',)) colorize() colorize('goodbye', opts=('underscore',)) print colorize('first line', fg='red', opts=('noreset',)) print 'this should be red too' print colorize('and so should this') print 'this should not be red' """ code_list = [] if text == '' and len(opts) == 1 and opts[0] == 'reset': return '\x1b[%sm' % RESET for k, v in kwargs.iteritems(): if k == 'fg': code_list.append(foreground[v]) elif k == 'bg': code_list.append(background[v]) for o in opts: if o in opt_dict: code_list.append(opt_dict[o]) if 'noreset' not in opts: text = text + '\x1b[%sm' % RESET return ('\x1b[%sm' % ';'.join(code_list)) + text
[ "def", "colorize", "(", "text", "=", "''", ",", "opts", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "code_list", "=", "[", "]", "if", "text", "==", "''", "and", "len", "(", "opts", ")", "==", "1", "and", "opts", "[", "0", "]", "==", "'reset'", ":", "return", "'\\x1b[%sm'", "%", "RESET", "for", "k", ",", "v", "in", "kwargs", ".", "iteritems", "(", ")", ":", "if", "k", "==", "'fg'", ":", "code_list", ".", "append", "(", "foreground", "[", "v", "]", ")", "elif", "k", "==", "'bg'", ":", "code_list", ".", "append", "(", "background", "[", "v", "]", ")", "for", "o", "in", "opts", ":", "if", "o", "in", "opt_dict", ":", "code_list", ".", "append", "(", "opt_dict", "[", "o", "]", ")", "if", "'noreset'", "not", "in", "opts", ":", "text", "=", "text", "+", "'\\x1b[%sm'", "%", "RESET", "return", "(", "'\\x1b[%sm'", "%", "';'", ".", "join", "(", "code_list", ")", ")", "+", "text" ]
Returns your text, enclosed in ANSI graphics codes. Depends on the keyword arguments 'fg' and 'bg', and the contents of the opts tuple/list. Returns the RESET code if no parameters are given. Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold' 'underscore' 'blink' 'reverse' 'conceal' 'noreset' - string will not be auto-terminated with the RESET code Examples: colorize('hello', fg='red', bg='blue', opts=('blink',)) colorize() colorize('goodbye', opts=('underscore',)) print colorize('first line', fg='red', opts=('noreset',)) print 'this should be red too' print colorize('and so should this') print 'this should not be red'
[ "Returns", "your", "text", "enclosed", "in", "ANSI", "graphics", "codes", "." ]
python
train
31.093023
mperlet/PyDect200
PyDect200/PyDect200.py
https://github.com/mperlet/PyDect200/blob/4758d80c663324a612c2772e6442db1472016913/PyDect200/PyDect200.py#L130-L139
def switch_toggle(self, device): """Toggles the current state of the given device""" state = self.get_state(device) if(state == '1'): return self.switch_off(device) elif(state == '0'): return self.switch_on(device) else: return state
[ "def", "switch_toggle", "(", "self", ",", "device", ")", ":", "state", "=", "self", ".", "get_state", "(", "device", ")", "if", "(", "state", "==", "'1'", ")", ":", "return", "self", ".", "switch_off", "(", "device", ")", "elif", "(", "state", "==", "'0'", ")", ":", "return", "self", ".", "switch_on", "(", "device", ")", "else", ":", "return", "state" ]
Toggles the current state of the given device
[ "Toggles", "the", "current", "state", "of", "the", "given", "device" ]
python
train
30.1
inasafe/inasafe
safe/utilities/metadata.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/metadata.py#L183-L247
def read_iso19115_metadata(layer_uri, keyword=None, version_35=False): """Retrieve keywords from a metadata object :param layer_uri: Uri to layer. :type layer_uri: basestring :param keyword: The key of keyword that want to be read. If None, return all keywords in dictionary. :type keyword: basestring :returns: Dictionary of keywords or value of key as string. :rtype: dict, basestring """ xml_uri = os.path.splitext(layer_uri)[0] + '.xml' # Remove the prefix for local file. For example csv. file_prefix = 'file:' if xml_uri.startswith(file_prefix): xml_uri = xml_uri[len(file_prefix):] if not os.path.exists(xml_uri): xml_uri = None if not xml_uri and os.path.exists(layer_uri): message = 'Layer based file but no xml file.\n' message += 'Layer path: %s.' % layer_uri raise NoKeywordsFoundError(message) if version_35: metadata = GenericLayerMetadata35(layer_uri, xml_uri) else: metadata = GenericLayerMetadata(layer_uri, xml_uri) active_metadata_classes = METADATA_CLASSES if version_35: active_metadata_classes = METADATA_CLASSES35 if metadata.layer_purpose in active_metadata_classes: metadata = active_metadata_classes[ metadata.layer_purpose](layer_uri, xml_uri) # dictionary comprehension keywords = { x[0]: x[1]['value'] for x in list(metadata.dict['properties'].items()) if x[1]['value'] is not None} if 'keyword_version' not in list(keywords.keys()) and xml_uri: message = 'No keyword version found. Metadata xml file is invalid.\n' message += 'Layer uri: %s\n' % layer_uri message += 'Keywords file: %s\n' % os.path.exists( os.path.splitext(layer_uri)[0] + '.xml') message += 'keywords:\n' for k, v in list(keywords.items()): message += '%s: %s\n' % (k, v) raise MetadataReadError(message) # Get dictionary keywords that has value != None keywords = { x[0]: x[1]['value'] for x in list(metadata.dict['properties'].items()) if x[1]['value'] is not None} if keyword: try: return keywords[keyword] except KeyError: message = 'Keyword with key %s is not found. ' % keyword message += 'Layer path: %s' % layer_uri raise KeywordNotFoundError(message) return keywords
[ "def", "read_iso19115_metadata", "(", "layer_uri", ",", "keyword", "=", "None", ",", "version_35", "=", "False", ")", ":", "xml_uri", "=", "os", ".", "path", ".", "splitext", "(", "layer_uri", ")", "[", "0", "]", "+", "'.xml'", "# Remove the prefix for local file. For example csv.", "file_prefix", "=", "'file:'", "if", "xml_uri", ".", "startswith", "(", "file_prefix", ")", ":", "xml_uri", "=", "xml_uri", "[", "len", "(", "file_prefix", ")", ":", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "xml_uri", ")", ":", "xml_uri", "=", "None", "if", "not", "xml_uri", "and", "os", ".", "path", ".", "exists", "(", "layer_uri", ")", ":", "message", "=", "'Layer based file but no xml file.\\n'", "message", "+=", "'Layer path: %s.'", "%", "layer_uri", "raise", "NoKeywordsFoundError", "(", "message", ")", "if", "version_35", ":", "metadata", "=", "GenericLayerMetadata35", "(", "layer_uri", ",", "xml_uri", ")", "else", ":", "metadata", "=", "GenericLayerMetadata", "(", "layer_uri", ",", "xml_uri", ")", "active_metadata_classes", "=", "METADATA_CLASSES", "if", "version_35", ":", "active_metadata_classes", "=", "METADATA_CLASSES35", "if", "metadata", ".", "layer_purpose", "in", "active_metadata_classes", ":", "metadata", "=", "active_metadata_classes", "[", "metadata", ".", "layer_purpose", "]", "(", "layer_uri", ",", "xml_uri", ")", "# dictionary comprehension", "keywords", "=", "{", "x", "[", "0", "]", ":", "x", "[", "1", "]", "[", "'value'", "]", "for", "x", "in", "list", "(", "metadata", ".", "dict", "[", "'properties'", "]", ".", "items", "(", ")", ")", "if", "x", "[", "1", "]", "[", "'value'", "]", "is", "not", "None", "}", "if", "'keyword_version'", "not", "in", "list", "(", "keywords", ".", "keys", "(", ")", ")", "and", "xml_uri", ":", "message", "=", "'No keyword version found. Metadata xml file is invalid.\\n'", "message", "+=", "'Layer uri: %s\\n'", "%", "layer_uri", "message", "+=", "'Keywords file: %s\\n'", "%", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "splitext", "(", "layer_uri", ")", "[", "0", "]", "+", "'.xml'", ")", "message", "+=", "'keywords:\\n'", "for", "k", ",", "v", "in", "list", "(", "keywords", ".", "items", "(", ")", ")", ":", "message", "+=", "'%s: %s\\n'", "%", "(", "k", ",", "v", ")", "raise", "MetadataReadError", "(", "message", ")", "# Get dictionary keywords that has value != None", "keywords", "=", "{", "x", "[", "0", "]", ":", "x", "[", "1", "]", "[", "'value'", "]", "for", "x", "in", "list", "(", "metadata", ".", "dict", "[", "'properties'", "]", ".", "items", "(", ")", ")", "if", "x", "[", "1", "]", "[", "'value'", "]", "is", "not", "None", "}", "if", "keyword", ":", "try", ":", "return", "keywords", "[", "keyword", "]", "except", "KeyError", ":", "message", "=", "'Keyword with key %s is not found. '", "%", "keyword", "message", "+=", "'Layer path: %s'", "%", "layer_uri", "raise", "KeywordNotFoundError", "(", "message", ")", "return", "keywords" ]
Retrieve keywords from a metadata object :param layer_uri: Uri to layer. :type layer_uri: basestring :param keyword: The key of keyword that want to be read. If None, return all keywords in dictionary. :type keyword: basestring :returns: Dictionary of keywords or value of key as string. :rtype: dict, basestring
[ "Retrieve", "keywords", "from", "a", "metadata", "object" ]
python
train
36.646154
MonsieurV/PiPocketGeiger
PiPocketGeiger/__init__.py
https://github.com/MonsieurV/PiPocketGeiger/blob/b0e7c303df46deeea3715fb8da3ebbefaf660f91/PiPocketGeiger/__init__.py#L60-L75
def status(self): """Return current readings, as a dictionary with: duration -- the duration of the measurements, in seconds; cpm -- the radiation count by minute; uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h); uSvhError -- the incertitude for the radiation dose.""" minutes = min(self.duration, MAX_CPM_TIME) / 1000 / 60.0 cpm = self.count / minutes if minutes > 0 else 0 return dict( duration=round(self.duration / 1000.0, 2), cpm=round(cpm, 2), uSvh=round(cpm / K_ALPHA, 3), uSvhError=round(math.sqrt(self.count) / minutes / K_ALPHA, 3) if minutes > 0 else 0, )
[ "def", "status", "(", "self", ")", ":", "minutes", "=", "min", "(", "self", ".", "duration", ",", "MAX_CPM_TIME", ")", "/", "1000", "/", "60.0", "cpm", "=", "self", ".", "count", "/", "minutes", "if", "minutes", ">", "0", "else", "0", "return", "dict", "(", "duration", "=", "round", "(", "self", ".", "duration", "/", "1000.0", ",", "2", ")", ",", "cpm", "=", "round", "(", "cpm", ",", "2", ")", ",", "uSvh", "=", "round", "(", "cpm", "/", "K_ALPHA", ",", "3", ")", ",", "uSvhError", "=", "round", "(", "math", ".", "sqrt", "(", "self", ".", "count", ")", "/", "minutes", "/", "K_ALPHA", ",", "3", ")", "if", "minutes", ">", "0", "else", "0", ",", ")" ]
Return current readings, as a dictionary with: duration -- the duration of the measurements, in seconds; cpm -- the radiation count by minute; uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h); uSvhError -- the incertitude for the radiation dose.
[ "Return", "current", "readings", "as", "a", "dictionary", "with", ":", "duration", "--", "the", "duration", "of", "the", "measurements", "in", "seconds", ";", "cpm", "--", "the", "radiation", "count", "by", "minute", ";", "uSvh", "--", "the", "radiation", "dose", "exprimed", "in", "Sievert", "per", "house", "(", "uSv", "/", "h", ")", ";", "uSvhError", "--", "the", "incertitude", "for", "the", "radiation", "dose", "." ]
python
train
45.5625
liampauling/betfair
betfairlightweight/baseclient.py
https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/baseclient.py#L117-L123
def session_expired(self): """ Returns True if login_time not set or seconds since login time is greater than 200 mins. """ if not self._login_time or (datetime.datetime.now()-self._login_time).total_seconds() > 12000: return True
[ "def", "session_expired", "(", "self", ")", ":", "if", "not", "self", ".", "_login_time", "or", "(", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "self", ".", "_login_time", ")", ".", "total_seconds", "(", ")", ">", "12000", ":", "return", "True" ]
Returns True if login_time not set or seconds since login time is greater than 200 mins.
[ "Returns", "True", "if", "login_time", "not", "set", "or", "seconds", "since", "login", "time", "is", "greater", "than", "200", "mins", "." ]
python
train
39.428571
radical-cybertools/radical.entk
src/radical/entk/appman/appmanager.py
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/appmanager.py#L527-L589
def _setup_mqs(self): """ **Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue 'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager. Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new one. """ try: self._prof.prof('init mqs setup', uid=self._uid) self._logger.debug('Setting up mq connection and channel') mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port)) mq_channel = mq_connection.channel() self._logger.debug('Connection and channel setup successful') self._logger.debug('Setting up all exchanges and queues') qs = [ '%s-tmgr-to-sync' % self._sid, '%s-cb-to-sync' % self._sid, '%s-enq-to-sync' % self._sid, '%s-deq-to-sync' % self._sid, '%s-sync-to-tmgr' % self._sid, '%s-sync-to-cb' % self._sid, '%s-sync-to-enq' % self._sid, '%s-sync-to-deq' % self._sid ] for i in range(1, self._num_pending_qs + 1): queue_name = '%s-pendingq-%s' % (self._sid, i) self._pending_queue.append(queue_name) qs.append(queue_name) for i in range(1, self._num_completed_qs + 1): queue_name = '%s-completedq-%s' % (self._sid, i) self._completed_queue.append(queue_name) qs.append(queue_name) f = open('.%s.txt' % self._sid, 'w') for q in qs: # Durable Qs will not be lost if rabbitmq server crashes mq_channel.queue_declare(queue=q) f.write(q + '\n') f.close() self._logger.debug('All exchanges and queues are setup') self._prof.prof('mqs setup done', uid=self._uid) return True except Exception, ex: self._logger.exception('Error setting RabbitMQ system: %s' % ex) raise
[ "def", "_setup_mqs", "(", "self", ")", ":", "try", ":", "self", ".", "_prof", ".", "prof", "(", "'init mqs setup'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "debug", "(", "'Setting up mq connection and channel'", ")", "mq_connection", "=", "pika", ".", "BlockingConnection", "(", "pika", ".", "ConnectionParameters", "(", "host", "=", "self", ".", "_mq_hostname", ",", "port", "=", "self", ".", "_port", ")", ")", "mq_channel", "=", "mq_connection", ".", "channel", "(", ")", "self", ".", "_logger", ".", "debug", "(", "'Connection and channel setup successful'", ")", "self", ".", "_logger", ".", "debug", "(", "'Setting up all exchanges and queues'", ")", "qs", "=", "[", "'%s-tmgr-to-sync'", "%", "self", ".", "_sid", ",", "'%s-cb-to-sync'", "%", "self", ".", "_sid", ",", "'%s-enq-to-sync'", "%", "self", ".", "_sid", ",", "'%s-deq-to-sync'", "%", "self", ".", "_sid", ",", "'%s-sync-to-tmgr'", "%", "self", ".", "_sid", ",", "'%s-sync-to-cb'", "%", "self", ".", "_sid", ",", "'%s-sync-to-enq'", "%", "self", ".", "_sid", ",", "'%s-sync-to-deq'", "%", "self", ".", "_sid", "]", "for", "i", "in", "range", "(", "1", ",", "self", ".", "_num_pending_qs", "+", "1", ")", ":", "queue_name", "=", "'%s-pendingq-%s'", "%", "(", "self", ".", "_sid", ",", "i", ")", "self", ".", "_pending_queue", ".", "append", "(", "queue_name", ")", "qs", ".", "append", "(", "queue_name", ")", "for", "i", "in", "range", "(", "1", ",", "self", ".", "_num_completed_qs", "+", "1", ")", ":", "queue_name", "=", "'%s-completedq-%s'", "%", "(", "self", ".", "_sid", ",", "i", ")", "self", ".", "_completed_queue", ".", "append", "(", "queue_name", ")", "qs", ".", "append", "(", "queue_name", ")", "f", "=", "open", "(", "'.%s.txt'", "%", "self", ".", "_sid", ",", "'w'", ")", "for", "q", "in", "qs", ":", "# Durable Qs will not be lost if rabbitmq server crashes", "mq_channel", ".", "queue_declare", "(", "queue", "=", "q", ")", "f", ".", "write", "(", "q", "+", "'\\n'", ")", "f", ".", "close", "(", ")", "self", ".", "_logger", ".", "debug", "(", "'All exchanges and queues are setup'", ")", "self", ".", "_prof", ".", "prof", "(", "'mqs setup done'", ",", "uid", "=", "self", ".", "_uid", ")", "return", "True", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Error setting RabbitMQ system: %s'", "%", "ex", ")", "raise" ]
**Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue 'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager. Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new one.
[ "**", "Purpose", "**", ":", "Setup", "RabbitMQ", "system", "on", "the", "client", "side", ".", "We", "instantiate", "queue", "(", "s", ")", "pendingq", "-", "*", "for", "communication", "between", "the", "enqueuer", "thread", "and", "the", "task", "manager", "process", ".", "We", "instantiate", "queue", "(", "s", ")", "completedq", "-", "*", "for", "communication", "between", "the", "task", "manager", "and", "dequeuer", "thread", ".", "We", "instantiate", "queue", "sync", "-", "to", "-", "master", "for", "communication", "from", "enqueuer", "/", "dequeuer", "/", "task_manager", "to", "the", "synchronizer", "thread", ".", "We", "instantiate", "queue", "sync", "-", "ack", "for", "communication", "from", "synchronizer", "thread", "to", "enqueuer", "/", "dequeuer", "/", "task_manager", "." ]
python
train
43.253968
vtkiorg/vtki
vtki/pointset.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/pointset.py#L488-L536
def save(self, filename, binary=True): """ Writes a surface mesh to disk. Written file may be an ASCII or binary ply, stl, or vtk mesh file. Parameters ---------- filename : str Filename of mesh to be written. File type is inferred from the extension of the filename unless overridden with ftype. Can be one of the following types (.ply, .stl, .vtk) binary : bool, optional Writes the file as binary when True and ASCII when False. Notes ----- Binary files write much faster than ASCII and have a smaller file size. """ filename = os.path.abspath(os.path.expanduser(filename)) file_mode = True # Check filetype ftype = filename[-3:] if ftype == 'ply': writer = vtk.vtkPLYWriter() elif ftype == 'vtp': writer = vtk.vtkXMLPolyDataWriter() file_mode = False if binary: writer.SetDataModeToBinary() else: writer.SetDataModeToAscii() elif ftype == 'stl': writer = vtk.vtkSTLWriter() elif ftype == 'vtk': writer = vtk.vtkPolyDataWriter() else: raise Exception('Filetype must be either "ply", "stl", or "vtk"') writer.SetFileName(filename) writer.SetInputData(self) if binary and file_mode: writer.SetFileTypeToBinary() elif file_mode: writer.SetFileTypeToASCII() writer.Write()
[ "def", "save", "(", "self", ",", "filename", ",", "binary", "=", "True", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "filename", ")", ")", "file_mode", "=", "True", "# Check filetype", "ftype", "=", "filename", "[", "-", "3", ":", "]", "if", "ftype", "==", "'ply'", ":", "writer", "=", "vtk", ".", "vtkPLYWriter", "(", ")", "elif", "ftype", "==", "'vtp'", ":", "writer", "=", "vtk", ".", "vtkXMLPolyDataWriter", "(", ")", "file_mode", "=", "False", "if", "binary", ":", "writer", ".", "SetDataModeToBinary", "(", ")", "else", ":", "writer", ".", "SetDataModeToAscii", "(", ")", "elif", "ftype", "==", "'stl'", ":", "writer", "=", "vtk", ".", "vtkSTLWriter", "(", ")", "elif", "ftype", "==", "'vtk'", ":", "writer", "=", "vtk", ".", "vtkPolyDataWriter", "(", ")", "else", ":", "raise", "Exception", "(", "'Filetype must be either \"ply\", \"stl\", or \"vtk\"'", ")", "writer", ".", "SetFileName", "(", "filename", ")", "writer", ".", "SetInputData", "(", "self", ")", "if", "binary", "and", "file_mode", ":", "writer", ".", "SetFileTypeToBinary", "(", ")", "elif", "file_mode", ":", "writer", ".", "SetFileTypeToASCII", "(", ")", "writer", ".", "Write", "(", ")" ]
Writes a surface mesh to disk. Written file may be an ASCII or binary ply, stl, or vtk mesh file. Parameters ---------- filename : str Filename of mesh to be written. File type is inferred from the extension of the filename unless overridden with ftype. Can be one of the following types (.ply, .stl, .vtk) binary : bool, optional Writes the file as binary when True and ASCII when False. Notes ----- Binary files write much faster than ASCII and have a smaller file size.
[ "Writes", "a", "surface", "mesh", "to", "disk", "." ]
python
train
31.632653
genepattern/genepattern-python
gp/data.py
https://github.com/genepattern/genepattern-python/blob/9478ea65362b91c72a94f7300c3de8d710bebb71/gp/data.py#L273-L294
def _extract_header_value(line): """ Extracts a key / value pair from a header line in an ODF file """ # Skip blank lines, returning None if not line: return None # Attempt to split by equals sign halves = line.split('=') if len(halves) > 1: key = halves[0].strip() value = halves[1].strip() return {key: value} # Otherwise, attempt to split by colon else: halves = line.split(':') key = halves[0].strip() value = halves[1].strip() return {key: value}
[ "def", "_extract_header_value", "(", "line", ")", ":", "# Skip blank lines, returning None", "if", "not", "line", ":", "return", "None", "# Attempt to split by equals sign", "halves", "=", "line", ".", "split", "(", "'='", ")", "if", "len", "(", "halves", ")", ">", "1", ":", "key", "=", "halves", "[", "0", "]", ".", "strip", "(", ")", "value", "=", "halves", "[", "1", "]", ".", "strip", "(", ")", "return", "{", "key", ":", "value", "}", "# Otherwise, attempt to split by colon", "else", ":", "halves", "=", "line", ".", "split", "(", "':'", ")", "key", "=", "halves", "[", "0", "]", ".", "strip", "(", ")", "value", "=", "halves", "[", "1", "]", ".", "strip", "(", ")", "return", "{", "key", ":", "value", "}" ]
Extracts a key / value pair from a header line in an ODF file
[ "Extracts", "a", "key", "/", "value", "pair", "from", "a", "header", "line", "in", "an", "ODF", "file" ]
python
train
24.409091
guyzmo/git-repo
git_repo/services/service.py
https://github.com/guyzmo/git-repo/blob/2974c3f52bc64fa8a467ac2b0e9a485ba7ed333b/git_repo/services/service.py#L57-L66
def register_target(repo_cmd, repo_service): """Decorator to register a class with an repo_service""" def decorate(klass): log.debug('Loading service module class: {}'.format(klass.__name__) ) klass.command = repo_cmd klass.name = repo_service RepositoryService.service_map[repo_service] = klass RepositoryService.command_map[repo_cmd] = repo_service return klass return decorate
[ "def", "register_target", "(", "repo_cmd", ",", "repo_service", ")", ":", "def", "decorate", "(", "klass", ")", ":", "log", ".", "debug", "(", "'Loading service module class: {}'", ".", "format", "(", "klass", ".", "__name__", ")", ")", "klass", ".", "command", "=", "repo_cmd", "klass", ".", "name", "=", "repo_service", "RepositoryService", ".", "service_map", "[", "repo_service", "]", "=", "klass", "RepositoryService", ".", "command_map", "[", "repo_cmd", "]", "=", "repo_service", "return", "klass", "return", "decorate" ]
Decorator to register a class with an repo_service
[ "Decorator", "to", "register", "a", "class", "with", "an", "repo_service" ]
python
train
43
saltstack/salt
salt/modules/nginx.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nginx.py#L139-L175
def status(url="http://127.0.0.1/status"): """ Return the data from an Nginx status page as a dictionary. http://wiki.nginx.org/HttpStubStatusModule url The URL of the status page. Defaults to 'http://127.0.0.1/status' CLI Example: .. code-block:: bash salt '*' nginx.status """ resp = _urlopen(url) status_data = resp.read() resp.close() lines = status_data.splitlines() if not len(lines) == 4: return # "Active connections: 1 " active_connections = lines[0].split()[2] # "server accepts handled requests" # " 12 12 9 " accepted, handled, requests = lines[2].split() # "Reading: 0 Writing: 1 Waiting: 0 " _, reading, _, writing, _, waiting = lines[3].split() return { 'active connections': int(active_connections), 'accepted': int(accepted), 'handled': int(handled), 'requests': int(requests), 'reading': int(reading), 'writing': int(writing), 'waiting': int(waiting), }
[ "def", "status", "(", "url", "=", "\"http://127.0.0.1/status\"", ")", ":", "resp", "=", "_urlopen", "(", "url", ")", "status_data", "=", "resp", ".", "read", "(", ")", "resp", ".", "close", "(", ")", "lines", "=", "status_data", ".", "splitlines", "(", ")", "if", "not", "len", "(", "lines", ")", "==", "4", ":", "return", "# \"Active connections: 1 \"", "active_connections", "=", "lines", "[", "0", "]", ".", "split", "(", ")", "[", "2", "]", "# \"server accepts handled requests\"", "# \" 12 12 9 \"", "accepted", ",", "handled", ",", "requests", "=", "lines", "[", "2", "]", ".", "split", "(", ")", "# \"Reading: 0 Writing: 1 Waiting: 0 \"", "_", ",", "reading", ",", "_", ",", "writing", ",", "_", ",", "waiting", "=", "lines", "[", "3", "]", ".", "split", "(", ")", "return", "{", "'active connections'", ":", "int", "(", "active_connections", ")", ",", "'accepted'", ":", "int", "(", "accepted", ")", ",", "'handled'", ":", "int", "(", "handled", ")", ",", "'requests'", ":", "int", "(", "requests", ")", ",", "'reading'", ":", "int", "(", "reading", ")", ",", "'writing'", ":", "int", "(", "writing", ")", ",", "'waiting'", ":", "int", "(", "waiting", ")", ",", "}" ]
Return the data from an Nginx status page as a dictionary. http://wiki.nginx.org/HttpStubStatusModule url The URL of the status page. Defaults to 'http://127.0.0.1/status' CLI Example: .. code-block:: bash salt '*' nginx.status
[ "Return", "the", "data", "from", "an", "Nginx", "status", "page", "as", "a", "dictionary", ".", "http", ":", "//", "wiki", ".", "nginx", ".", "org", "/", "HttpStubStatusModule" ]
python
train
27.162162
jssimporter/python-jss
jss/jssobject.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jssobject.py#L301-L308
def delete(self, data=None): """Delete this object from the JSS.""" if not self.can_delete: raise JSSMethodNotAllowedError(self.__class__.__name__) if data: self.jss.delete(self.url, data) else: self.jss.delete(self.url)
[ "def", "delete", "(", "self", ",", "data", "=", "None", ")", ":", "if", "not", "self", ".", "can_delete", ":", "raise", "JSSMethodNotAllowedError", "(", "self", ".", "__class__", ".", "__name__", ")", "if", "data", ":", "self", ".", "jss", ".", "delete", "(", "self", ".", "url", ",", "data", ")", "else", ":", "self", ".", "jss", ".", "delete", "(", "self", ".", "url", ")" ]
Delete this object from the JSS.
[ "Delete", "this", "object", "from", "the", "JSS", "." ]
python
train
35.125
cloud9ers/gurumate
environment/lib/python2.7/site-packages/psutil/__init__.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/psutil/__init__.py#L402-L468
def get_children(self, recursive=False): """Return the children of this process as a list of Process objects. If recursive is True return all the parent descendants. Example (A == this process): A ─┐ │ ├─ B (child) ─┐ │ └─ X (grandchild) ─┐ │ └─ Y (great grandchild) ├─ C (child) └─ D (child) >>> p.get_children() B, C, D >>> p.get_children(recursive=True) B, X, Y, C, D Note that in the example above if process X disappears process Y won't be returned either as the reference to process A is lost. """ if not self.is_running(): name = self._platform_impl._process_name raise NoSuchProcess(self.pid, name) ret = [] if not recursive: for p in process_iter(): try: if p.ppid == self.pid: # if child happens to be older than its parent # (self) it means child's PID has been reused if self.create_time <= p.create_time: ret.append(p) except NoSuchProcess: pass else: # construct a dict where 'values' are all the processes # having 'key' as their parent table = defaultdict(list) for p in process_iter(): try: table[p.ppid].append(p) except NoSuchProcess: pass # At this point we have a mapping table where table[self.pid] # are the current process's children. # Below, we look for all descendants recursively, similarly # to a recursive function call. checkpids = [self.pid] for pid in checkpids: for child in table[pid]: try: # if child happens to be older than its parent # (self) it means child's PID has been reused intime = self.create_time <= child.create_time except NoSuchProcess: pass else: if intime: ret.append(child) if child.pid not in checkpids: checkpids.append(child.pid) return ret
[ "def", "get_children", "(", "self", ",", "recursive", "=", "False", ")", ":", "if", "not", "self", ".", "is_running", "(", ")", ":", "name", "=", "self", ".", "_platform_impl", ".", "_process_name", "raise", "NoSuchProcess", "(", "self", ".", "pid", ",", "name", ")", "ret", "=", "[", "]", "if", "not", "recursive", ":", "for", "p", "in", "process_iter", "(", ")", ":", "try", ":", "if", "p", ".", "ppid", "==", "self", ".", "pid", ":", "# if child happens to be older than its parent", "# (self) it means child's PID has been reused", "if", "self", ".", "create_time", "<=", "p", ".", "create_time", ":", "ret", ".", "append", "(", "p", ")", "except", "NoSuchProcess", ":", "pass", "else", ":", "# construct a dict where 'values' are all the processes", "# having 'key' as their parent", "table", "=", "defaultdict", "(", "list", ")", "for", "p", "in", "process_iter", "(", ")", ":", "try", ":", "table", "[", "p", ".", "ppid", "]", ".", "append", "(", "p", ")", "except", "NoSuchProcess", ":", "pass", "# At this point we have a mapping table where table[self.pid]", "# are the current process's children.", "# Below, we look for all descendants recursively, similarly", "# to a recursive function call.", "checkpids", "=", "[", "self", ".", "pid", "]", "for", "pid", "in", "checkpids", ":", "for", "child", "in", "table", "[", "pid", "]", ":", "try", ":", "# if child happens to be older than its parent", "# (self) it means child's PID has been reused", "intime", "=", "self", ".", "create_time", "<=", "child", ".", "create_time", "except", "NoSuchProcess", ":", "pass", "else", ":", "if", "intime", ":", "ret", ".", "append", "(", "child", ")", "if", "child", ".", "pid", "not", "in", "checkpids", ":", "checkpids", ".", "append", "(", "child", ".", "pid", ")", "return", "ret" ]
Return the children of this process as a list of Process objects. If recursive is True return all the parent descendants. Example (A == this process): A ─┐ │ ├─ B (child) ─┐ │ └─ X (grandchild) ─┐ │ └─ Y (great grandchild) ├─ C (child) └─ D (child) >>> p.get_children() B, C, D >>> p.get_children(recursive=True) B, X, Y, C, D Note that in the example above if process X disappears process Y won't be returned either as the reference to process A is lost.
[ "Return", "the", "children", "of", "this", "process", "as", "a", "list", "of", "Process", "objects", ".", "If", "recursive", "is", "True", "return", "all", "the", "parent", "descendants", "." ]
python
test
37.014925
materialsproject/pymatgen
pymatgen/analysis/gb/grain.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/gb/grain.py#L2208-L2241
def vec_to_surface(vec): """ Transform a float vector to a surface miller index with integers. Args: vec (1 by 3 array float vector): input float vector Return: the surface miller index of the input vector. """ miller = [None] * 3 index = [] for i, value in enumerate(vec): if abs(value) < 1.e-8: miller[i] = 0 else: index.append(i) if len(index) == 1: miller[index[0]] = 1 else: min_index = np.argmin([i for i in vec if i != 0]) true_index = index[min_index] index.pop(min_index) frac = [] for i, value in enumerate(index): frac.append(Fraction(vec[value] / vec[true_index]).limit_denominator(100)) if len(index) == 1: miller[true_index] = frac[0].denominator miller[index[0]] = frac[0].numerator else: com_lcm = lcm(frac[0].denominator, frac[1].denominator) miller[true_index] = com_lcm miller[index[0]] = frac[0].numerator * int(round((com_lcm / frac[0].denominator))) miller[index[1]] = frac[1].numerator * int(round((com_lcm / frac[1].denominator))) return miller
[ "def", "vec_to_surface", "(", "vec", ")", ":", "miller", "=", "[", "None", "]", "*", "3", "index", "=", "[", "]", "for", "i", ",", "value", "in", "enumerate", "(", "vec", ")", ":", "if", "abs", "(", "value", ")", "<", "1.e-8", ":", "miller", "[", "i", "]", "=", "0", "else", ":", "index", ".", "append", "(", "i", ")", "if", "len", "(", "index", ")", "==", "1", ":", "miller", "[", "index", "[", "0", "]", "]", "=", "1", "else", ":", "min_index", "=", "np", ".", "argmin", "(", "[", "i", "for", "i", "in", "vec", "if", "i", "!=", "0", "]", ")", "true_index", "=", "index", "[", "min_index", "]", "index", ".", "pop", "(", "min_index", ")", "frac", "=", "[", "]", "for", "i", ",", "value", "in", "enumerate", "(", "index", ")", ":", "frac", ".", "append", "(", "Fraction", "(", "vec", "[", "value", "]", "/", "vec", "[", "true_index", "]", ")", ".", "limit_denominator", "(", "100", ")", ")", "if", "len", "(", "index", ")", "==", "1", ":", "miller", "[", "true_index", "]", "=", "frac", "[", "0", "]", ".", "denominator", "miller", "[", "index", "[", "0", "]", "]", "=", "frac", "[", "0", "]", ".", "numerator", "else", ":", "com_lcm", "=", "lcm", "(", "frac", "[", "0", "]", ".", "denominator", ",", "frac", "[", "1", "]", ".", "denominator", ")", "miller", "[", "true_index", "]", "=", "com_lcm", "miller", "[", "index", "[", "0", "]", "]", "=", "frac", "[", "0", "]", ".", "numerator", "*", "int", "(", "round", "(", "(", "com_lcm", "/", "frac", "[", "0", "]", ".", "denominator", ")", ")", ")", "miller", "[", "index", "[", "1", "]", "]", "=", "frac", "[", "1", "]", ".", "numerator", "*", "int", "(", "round", "(", "(", "com_lcm", "/", "frac", "[", "1", "]", ".", "denominator", ")", ")", ")", "return", "miller" ]
Transform a float vector to a surface miller index with integers. Args: vec (1 by 3 array float vector): input float vector Return: the surface miller index of the input vector.
[ "Transform", "a", "float", "vector", "to", "a", "surface", "miller", "index", "with", "integers", "." ]
python
train
38.588235
tanghaibao/jcvi
jcvi/projects/synfind.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/synfind.py#L125-L174
def yeasttruth(args): """ %prog yeasttruth Pillars.tab *.gff Prepare pairs data for 14 yeasts. """ p = OptionParser(yeasttruth.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) pillars = args[0] gffiles = args[1:] aliases = {} pivot = {} for gffile in gffiles: is_pivot = op.basename(gffile).startswith("Saccharomyces_cerevisiae") gff = Gff(gffile) for g in gff: if g.type != "gene": continue for a in g.attributes["Alias"]: aliases[a] = g.accn if is_pivot: pivot[a] = g.accn logging.debug("Aliases imported: {0}".format(len(aliases))) logging.debug("Pivot imported: {0}".format(len(pivot))) fw = open("yeast.aliases", "w") for k, v in sorted(aliases.items()): print("\t".join((k, v)), file=fw) fw.close() fp = open(pillars) pairs = set() fw = must_open(opts.outfile, "w") for row in fp: atoms = [x for x in row.split() if x != "---"] pps = [pivot[x] for x in atoms if x in pivot] atoms = [aliases[x] for x in atoms if x in aliases] for p in pps: for a in atoms: if p == a: continue pairs.add(tuple(sorted((p, a)))) for a, b in sorted(pairs): print("\t".join((a, b)), file=fw) fw.close()
[ "def", "yeasttruth", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "yeasttruth", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "pillars", "=", "args", "[", "0", "]", "gffiles", "=", "args", "[", "1", ":", "]", "aliases", "=", "{", "}", "pivot", "=", "{", "}", "for", "gffile", "in", "gffiles", ":", "is_pivot", "=", "op", ".", "basename", "(", "gffile", ")", ".", "startswith", "(", "\"Saccharomyces_cerevisiae\"", ")", "gff", "=", "Gff", "(", "gffile", ")", "for", "g", "in", "gff", ":", "if", "g", ".", "type", "!=", "\"gene\"", ":", "continue", "for", "a", "in", "g", ".", "attributes", "[", "\"Alias\"", "]", ":", "aliases", "[", "a", "]", "=", "g", ".", "accn", "if", "is_pivot", ":", "pivot", "[", "a", "]", "=", "g", ".", "accn", "logging", ".", "debug", "(", "\"Aliases imported: {0}\"", ".", "format", "(", "len", "(", "aliases", ")", ")", ")", "logging", ".", "debug", "(", "\"Pivot imported: {0}\"", ".", "format", "(", "len", "(", "pivot", ")", ")", ")", "fw", "=", "open", "(", "\"yeast.aliases\"", ",", "\"w\"", ")", "for", "k", ",", "v", "in", "sorted", "(", "aliases", ".", "items", "(", ")", ")", ":", "print", "(", "\"\\t\"", ".", "join", "(", "(", "k", ",", "v", ")", ")", ",", "file", "=", "fw", ")", "fw", ".", "close", "(", ")", "fp", "=", "open", "(", "pillars", ")", "pairs", "=", "set", "(", ")", "fw", "=", "must_open", "(", "opts", ".", "outfile", ",", "\"w\"", ")", "for", "row", "in", "fp", ":", "atoms", "=", "[", "x", "for", "x", "in", "row", ".", "split", "(", ")", "if", "x", "!=", "\"---\"", "]", "pps", "=", "[", "pivot", "[", "x", "]", "for", "x", "in", "atoms", "if", "x", "in", "pivot", "]", "atoms", "=", "[", "aliases", "[", "x", "]", "for", "x", "in", "atoms", "if", "x", "in", "aliases", "]", "for", "p", "in", "pps", ":", "for", "a", "in", "atoms", ":", "if", "p", "==", "a", ":", "continue", "pairs", ".", "add", "(", "tuple", "(", "sorted", "(", "(", "p", ",", "a", ")", ")", ")", ")", "for", "a", ",", "b", "in", "sorted", "(", "pairs", ")", ":", "print", "(", "\"\\t\"", ".", "join", "(", "(", "a", ",", "b", ")", ")", ",", "file", "=", "fw", ")", "fw", ".", "close", "(", ")" ]
%prog yeasttruth Pillars.tab *.gff Prepare pairs data for 14 yeasts.
[ "%prog", "yeasttruth", "Pillars", ".", "tab", "*", ".", "gff" ]
python
train
28.58
pantsbuild/pants
src/python/pants/backend/jvm/tasks/jvm_compile/jvm_compile.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/jvm_compile/jvm_compile.py#L579-L584
def _find_logs(self, compile_workunit): """Finds all logs under the given workunit.""" for idx, workunit in enumerate(compile_workunit.children): for output_name, outpath in workunit.output_paths().items(): if output_name in ('stdout', 'stderr'): yield idx, workunit.name, output_name, outpath
[ "def", "_find_logs", "(", "self", ",", "compile_workunit", ")", ":", "for", "idx", ",", "workunit", "in", "enumerate", "(", "compile_workunit", ".", "children", ")", ":", "for", "output_name", ",", "outpath", "in", "workunit", ".", "output_paths", "(", ")", ".", "items", "(", ")", ":", "if", "output_name", "in", "(", "'stdout'", ",", "'stderr'", ")", ":", "yield", "idx", ",", "workunit", ".", "name", ",", "output_name", ",", "outpath" ]
Finds all logs under the given workunit.
[ "Finds", "all", "logs", "under", "the", "given", "workunit", "." ]
python
train
53.333333
mete0r/hypua2jamo
setup.py
https://github.com/mete0r/hypua2jamo/blob/caceb33a26c27645703d659a82bb1152deef1469/setup.py#L33-L42
def setup_dir(f): ''' Decorate f to run inside the directory where setup.py resides. ''' setup_dir = os.path.dirname(os.path.abspath(__file__)) def wrapped(*args, **kwargs): with chdir(setup_dir): return f(*args, **kwargs) return wrapped
[ "def", "setup_dir", "(", "f", ")", ":", "setup_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "chdir", "(", "setup_dir", ")", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped" ]
Decorate f to run inside the directory where setup.py resides.
[ "Decorate", "f", "to", "run", "inside", "the", "directory", "where", "setup", ".", "py", "resides", "." ]
python
train
27
tensorflow/probability
tensorflow_probability/python/distributions/batch_reshape.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/batch_reshape.py#L380-L409
def calculate_reshape(original_shape, new_shape, validate=False, name=None): """Calculates the reshaped dimensions (replacing up to one -1 in reshape).""" batch_shape_static = tensorshape_util.constant_value_as_shape(new_shape) if tensorshape_util.is_fully_defined(batch_shape_static): return np.int32(batch_shape_static), batch_shape_static, [] with tf.name_scope(name or "calculate_reshape"): original_size = tf.reduce_prod(input_tensor=original_shape) implicit_dim = tf.equal(new_shape, -1) size_implicit_dim = ( original_size // tf.maximum(1, -tf.reduce_prod(input_tensor=new_shape))) new_ndims = tf.shape(input=new_shape) expanded_new_shape = tf.where( # Assumes exactly one `-1`. implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape) validations = [] if not validate else [ # pylint: disable=g-long-ternary assert_util.assert_rank( original_shape, 1, message="Original shape must be a vector."), assert_util.assert_rank( new_shape, 1, message="New shape must be a vector."), assert_util.assert_less_equal( tf.math.count_nonzero(implicit_dim, dtype=tf.int32), 1, message="At most one dimension can be unknown."), assert_util.assert_positive( expanded_new_shape, message="Shape elements must be >=-1."), assert_util.assert_equal( tf.reduce_prod(input_tensor=expanded_new_shape), original_size, message="Shape sizes do not match."), ] return expanded_new_shape, batch_shape_static, validations
[ "def", "calculate_reshape", "(", "original_shape", ",", "new_shape", ",", "validate", "=", "False", ",", "name", "=", "None", ")", ":", "batch_shape_static", "=", "tensorshape_util", ".", "constant_value_as_shape", "(", "new_shape", ")", "if", "tensorshape_util", ".", "is_fully_defined", "(", "batch_shape_static", ")", ":", "return", "np", ".", "int32", "(", "batch_shape_static", ")", ",", "batch_shape_static", ",", "[", "]", "with", "tf", ".", "name_scope", "(", "name", "or", "\"calculate_reshape\"", ")", ":", "original_size", "=", "tf", ".", "reduce_prod", "(", "input_tensor", "=", "original_shape", ")", "implicit_dim", "=", "tf", ".", "equal", "(", "new_shape", ",", "-", "1", ")", "size_implicit_dim", "=", "(", "original_size", "//", "tf", ".", "maximum", "(", "1", ",", "-", "tf", ".", "reduce_prod", "(", "input_tensor", "=", "new_shape", ")", ")", ")", "new_ndims", "=", "tf", ".", "shape", "(", "input", "=", "new_shape", ")", "expanded_new_shape", "=", "tf", ".", "where", "(", "# Assumes exactly one `-1`.", "implicit_dim", ",", "tf", ".", "fill", "(", "new_ndims", ",", "size_implicit_dim", ")", ",", "new_shape", ")", "validations", "=", "[", "]", "if", "not", "validate", "else", "[", "# pylint: disable=g-long-ternary", "assert_util", ".", "assert_rank", "(", "original_shape", ",", "1", ",", "message", "=", "\"Original shape must be a vector.\"", ")", ",", "assert_util", ".", "assert_rank", "(", "new_shape", ",", "1", ",", "message", "=", "\"New shape must be a vector.\"", ")", ",", "assert_util", ".", "assert_less_equal", "(", "tf", ".", "math", ".", "count_nonzero", "(", "implicit_dim", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "1", ",", "message", "=", "\"At most one dimension can be unknown.\"", ")", ",", "assert_util", ".", "assert_positive", "(", "expanded_new_shape", ",", "message", "=", "\"Shape elements must be >=-1.\"", ")", ",", "assert_util", ".", "assert_equal", "(", "tf", ".", "reduce_prod", "(", "input_tensor", "=", "expanded_new_shape", ")", ",", "original_size", ",", "message", "=", "\"Shape sizes do not match.\"", ")", ",", "]", "return", "expanded_new_shape", ",", "batch_shape_static", ",", "validations" ]
Calculates the reshaped dimensions (replacing up to one -1 in reshape).
[ "Calculates", "the", "reshaped", "dimensions", "(", "replacing", "up", "to", "one", "-", "1", "in", "reshape", ")", "." ]
python
test
52.9
urain39/KngetPy
knget/base.py
https://github.com/urain39/KngetPy/blob/00986bc16a497cee08aceb1c072f6187f152ee5d/knget/base.py#L174-L183
def _debug_info(self): """Show a list of recently variables info. """ self._msg('DEBUG') self._msg2('WorkDir: {0}'.format(self._curdir)) self._msg2('Cookies: {0}'.format(self._session.cookies)) self._msg2('Headers: {0}'.format(self._session.headers)) self._msg2('Configs: {0}'.format(self._config)) self._msg2('Customs: {0}'.format(self._custom)) self._msg2('Account: {0}'.format(self._account))
[ "def", "_debug_info", "(", "self", ")", ":", "self", ".", "_msg", "(", "'DEBUG'", ")", "self", ".", "_msg2", "(", "'WorkDir: {0}'", ".", "format", "(", "self", ".", "_curdir", ")", ")", "self", ".", "_msg2", "(", "'Cookies: {0}'", ".", "format", "(", "self", ".", "_session", ".", "cookies", ")", ")", "self", ".", "_msg2", "(", "'Headers: {0}'", ".", "format", "(", "self", ".", "_session", ".", "headers", ")", ")", "self", ".", "_msg2", "(", "'Configs: {0}'", ".", "format", "(", "self", ".", "_config", ")", ")", "self", ".", "_msg2", "(", "'Customs: {0}'", ".", "format", "(", "self", ".", "_custom", ")", ")", "self", ".", "_msg2", "(", "'Account: {0}'", ".", "format", "(", "self", ".", "_account", ")", ")" ]
Show a list of recently variables info.
[ "Show", "a", "list", "of", "recently", "variables", "info", "." ]
python
train
45.8
tuomas2/automate
src/automate/extensions/rpc/rpc.py
https://github.com/tuomas2/automate/blob/d8a8cd03cd0da047e033a2d305f3f260f8c4e017/src/automate/extensions/rpc/rpc.py#L73-L77
def get_actuators(self): """ Get actuators as a dictionary of format ``{name: status}`` """ return {i.name: i.status for i in self.system.actuators}
[ "def", "get_actuators", "(", "self", ")", ":", "return", "{", "i", ".", "name", ":", "i", ".", "status", "for", "i", "in", "self", ".", "system", ".", "actuators", "}" ]
Get actuators as a dictionary of format ``{name: status}``
[ "Get", "actuators", "as", "a", "dictionary", "of", "format", "{", "name", ":", "status", "}" ]
python
train
36
OCHA-DAP/hdx-python-api
src/hdx/data/resource_view.py
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/resource_view.py#L179-L197
def copy(self, resource_view): # type: (Union[ResourceView,Dict,str]) -> None """Copies all fields except id, resource_id and package_id from another resource view. Args: resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary Returns: None """ if isinstance(resource_view, str): if is_valid_uuid(resource_view) is False: raise HDXError('%s is not a valid resource view id!' % resource_view) resource_view = ResourceView.read_from_hdx(resource_view) if not isinstance(resource_view, dict) and not isinstance(resource_view, ResourceView): raise HDXError('%s is not a valid resource view!' % resource_view) for key in resource_view: if key not in ('id', 'resource_id', 'package_id'): self.data[key] = resource_view[key]
[ "def", "copy", "(", "self", ",", "resource_view", ")", ":", "# type: (Union[ResourceView,Dict,str]) -> None", "if", "isinstance", "(", "resource_view", ",", "str", ")", ":", "if", "is_valid_uuid", "(", "resource_view", ")", "is", "False", ":", "raise", "HDXError", "(", "'%s is not a valid resource view id!'", "%", "resource_view", ")", "resource_view", "=", "ResourceView", ".", "read_from_hdx", "(", "resource_view", ")", "if", "not", "isinstance", "(", "resource_view", ",", "dict", ")", "and", "not", "isinstance", "(", "resource_view", ",", "ResourceView", ")", ":", "raise", "HDXError", "(", "'%s is not a valid resource view!'", "%", "resource_view", ")", "for", "key", "in", "resource_view", ":", "if", "key", "not", "in", "(", "'id'", ",", "'resource_id'", ",", "'package_id'", ")", ":", "self", ".", "data", "[", "key", "]", "=", "resource_view", "[", "key", "]" ]
Copies all fields except id, resource_id and package_id from another resource view. Args: resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary Returns: None
[ "Copies", "all", "fields", "except", "id", "resource_id", "and", "package_id", "from", "another", "resource", "view", "." ]
python
train
50.578947
mcocdawc/chemcoord
src/chemcoord/cartesian_coordinates/xyz_functions.py
https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/src/chemcoord/cartesian_coordinates/xyz_functions.py#L341-L359
def get_rotation_matrix(axis, angle): """Returns the rotation matrix. This function returns a matrix for the counterclockwise rotation around the given axis. The Input angle is in radians. Args: axis (vector): angle (float): Returns: Rotation matrix (np.array): """ axis = normalize(np.array(axis)) if not (np.array([1, 1, 1]).shape) == (3, ): raise ValueError('axis.shape has to be 3') angle = float(angle) return _jit_get_rotation_matrix(axis, angle)
[ "def", "get_rotation_matrix", "(", "axis", ",", "angle", ")", ":", "axis", "=", "normalize", "(", "np", ".", "array", "(", "axis", ")", ")", "if", "not", "(", "np", ".", "array", "(", "[", "1", ",", "1", ",", "1", "]", ")", ".", "shape", ")", "==", "(", "3", ",", ")", ":", "raise", "ValueError", "(", "'axis.shape has to be 3'", ")", "angle", "=", "float", "(", "angle", ")", "return", "_jit_get_rotation_matrix", "(", "axis", ",", "angle", ")" ]
Returns the rotation matrix. This function returns a matrix for the counterclockwise rotation around the given axis. The Input angle is in radians. Args: axis (vector): angle (float): Returns: Rotation matrix (np.array):
[ "Returns", "the", "rotation", "matrix", "." ]
python
train
27
MarcoFavorito/flloat
flloat/flloat.py
https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/flloat.py#L18-L31
def find_atomics(formula: Formula) -> Set[PLAtomic]: """Finds all the atomic formulas""" f = formula res = set() if isinstance(formula, PLFormula): res = formula.find_atomics() # elif isinstance(f, PLNot): # res = res.union(find_atomics(f.f)) # elif isinstance(f, PLBinaryOperator): # for subf in f.formulas: # res = res.union(find_atomics(subf)) else: res.add(f) return res
[ "def", "find_atomics", "(", "formula", ":", "Formula", ")", "->", "Set", "[", "PLAtomic", "]", ":", "f", "=", "formula", "res", "=", "set", "(", ")", "if", "isinstance", "(", "formula", ",", "PLFormula", ")", ":", "res", "=", "formula", ".", "find_atomics", "(", ")", "# elif isinstance(f, PLNot):", "# res = res.union(find_atomics(f.f))", "# elif isinstance(f, PLBinaryOperator):", "# for subf in f.formulas:", "# res = res.union(find_atomics(subf))", "else", ":", "res", ".", "add", "(", "f", ")", "return", "res" ]
Finds all the atomic formulas
[ "Finds", "all", "the", "atomic", "formulas" ]
python
train
31.214286
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/directory.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/directory.py#L2849-L2862
def ostype_2_json(self): """ transform ariane_clip3 OS Type object to Ariane server JSON obj :return: Ariane JSON obj """ LOGGER.debug("OSType.ostype_2_json") json_obj = { 'osTypeID': self.id, 'osTypeName': self.name, 'osTypeArchitecture': self.architecture, 'osTypeCompanyID': self.company_id, 'osTypeOSInstancesID': self.osi_ids } return json.dumps(json_obj)
[ "def", "ostype_2_json", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"OSType.ostype_2_json\"", ")", "json_obj", "=", "{", "'osTypeID'", ":", "self", ".", "id", ",", "'osTypeName'", ":", "self", ".", "name", ",", "'osTypeArchitecture'", ":", "self", ".", "architecture", ",", "'osTypeCompanyID'", ":", "self", ".", "company_id", ",", "'osTypeOSInstancesID'", ":", "self", ".", "osi_ids", "}", "return", "json", ".", "dumps", "(", "json_obj", ")" ]
transform ariane_clip3 OS Type object to Ariane server JSON obj :return: Ariane JSON obj
[ "transform", "ariane_clip3", "OS", "Type", "object", "to", "Ariane", "server", "JSON", "obj", ":", "return", ":", "Ariane", "JSON", "obj" ]
python
train
33.642857
msmbuilder/osprey
osprey/plugins/plugin_pylearn2.py
https://github.com/msmbuilder/osprey/blob/ea09da24e45820e1300e24a52fefa6c849f7a986/osprey/plugins/plugin_pylearn2.py#L278-L298
def load(self): """ Load the dataset using pylearn2.config.yaml_parse. """ from pylearn2.config import yaml_parse from pylearn2.datasets import Dataset dataset = yaml_parse.load(self.yaml_string) assert isinstance(dataset, Dataset) data = dataset.iterator(mode='sequential', num_batches=1, data_specs=dataset.data_specs, return_tuple=True).next() if len(data) == 2: X, y = data y = np.squeeze(y) if self.one_hot: y = np.argmax(y, axis=1) else: X = data y = None return X, y
[ "def", "load", "(", "self", ")", ":", "from", "pylearn2", ".", "config", "import", "yaml_parse", "from", "pylearn2", ".", "datasets", "import", "Dataset", "dataset", "=", "yaml_parse", ".", "load", "(", "self", ".", "yaml_string", ")", "assert", "isinstance", "(", "dataset", ",", "Dataset", ")", "data", "=", "dataset", ".", "iterator", "(", "mode", "=", "'sequential'", ",", "num_batches", "=", "1", ",", "data_specs", "=", "dataset", ".", "data_specs", ",", "return_tuple", "=", "True", ")", ".", "next", "(", ")", "if", "len", "(", "data", ")", "==", "2", ":", "X", ",", "y", "=", "data", "y", "=", "np", ".", "squeeze", "(", "y", ")", "if", "self", ".", "one_hot", ":", "y", "=", "np", ".", "argmax", "(", "y", ",", "axis", "=", "1", ")", "else", ":", "X", "=", "data", "y", "=", "None", "return", "X", ",", "y" ]
Load the dataset using pylearn2.config.yaml_parse.
[ "Load", "the", "dataset", "using", "pylearn2", ".", "config", ".", "yaml_parse", "." ]
python
valid
32.47619
Microsoft/LightGBM
python-package/lightgbm/sklearn.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/sklearn.py#L756-L765
def predict(self, X, raw_score=False, num_iteration=None, pred_leaf=False, pred_contrib=False, **kwargs): """Docstring is inherited from the LGBMModel.""" result = self.predict_proba(X, raw_score, num_iteration, pred_leaf, pred_contrib, **kwargs) if raw_score or pred_leaf or pred_contrib: return result else: class_index = np.argmax(result, axis=1) return self._le.inverse_transform(class_index)
[ "def", "predict", "(", "self", ",", "X", ",", "raw_score", "=", "False", ",", "num_iteration", "=", "None", ",", "pred_leaf", "=", "False", ",", "pred_contrib", "=", "False", ",", "*", "*", "kwargs", ")", ":", "result", "=", "self", ".", "predict_proba", "(", "X", ",", "raw_score", ",", "num_iteration", ",", "pred_leaf", ",", "pred_contrib", ",", "*", "*", "kwargs", ")", "if", "raw_score", "or", "pred_leaf", "or", "pred_contrib", ":", "return", "result", "else", ":", "class_index", "=", "np", ".", "argmax", "(", "result", ",", "axis", "=", "1", ")", "return", "self", ".", "_le", ".", "inverse_transform", "(", "class_index", ")" ]
Docstring is inherited from the LGBMModel.
[ "Docstring", "is", "inherited", "from", "the", "LGBMModel", "." ]
python
train
50.7
Dallinger/Dallinger
dallinger/recruiters.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/recruiters.py#L128-L141
def open_recruitment(self, n=1): """Return initial experiment URL list, plus instructions for finding subsequent recruitment events in experiemnt logs. """ logger.info("Opening CLI recruitment for {} participants".format(n)) recruitments = self.recruit(n) message = ( 'Search for "{}" in the logs for subsequent recruitment URLs.\n' "Open the logs for this experiment with " '"dallinger logs --app {}"'.format( NEW_RECRUIT_LOG_PREFIX, self.config.get("id") ) ) return {"items": recruitments, "message": message}
[ "def", "open_recruitment", "(", "self", ",", "n", "=", "1", ")", ":", "logger", ".", "info", "(", "\"Opening CLI recruitment for {} participants\"", ".", "format", "(", "n", ")", ")", "recruitments", "=", "self", ".", "recruit", "(", "n", ")", "message", "=", "(", "'Search for \"{}\" in the logs for subsequent recruitment URLs.\\n'", "\"Open the logs for this experiment with \"", "'\"dallinger logs --app {}\"'", ".", "format", "(", "NEW_RECRUIT_LOG_PREFIX", ",", "self", ".", "config", ".", "get", "(", "\"id\"", ")", ")", ")", "return", "{", "\"items\"", ":", "recruitments", ",", "\"message\"", ":", "message", "}" ]
Return initial experiment URL list, plus instructions for finding subsequent recruitment events in experiemnt logs.
[ "Return", "initial", "experiment", "URL", "list", "plus", "instructions", "for", "finding", "subsequent", "recruitment", "events", "in", "experiemnt", "logs", "." ]
python
train
44.714286
google/grumpy
third_party/stdlib/copy.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/copy.py#L148-L197
def deepcopy(x, memo=None, _nil=[]): """Deep copy operation on arbitrary Python objects. See the module's __doc__ string for more info. """ if memo is None: memo = {} d = id(x) y = memo.get(d, _nil) if y is not _nil: return y cls = type(x) copier = _deepcopy_dispatch.get(cls) if copier: y = copier(x, memo) else: try: issc = issubclass(cls, type) except TypeError: # cls is not a class (old Boost; see SF #502085) issc = 0 if issc: y = _deepcopy_atomic(x, memo) else: copier = getattr(x, "__deepcopy__", None) if copier: y = copier(memo) else: reductor = dispatch_table.get(cls) if reductor: rv = reductor(x) else: reductor = getattr(x, "__reduce_ex__", None) if reductor: rv = reductor(2) else: reductor = getattr(x, "__reduce__", None) if reductor: rv = reductor() else: raise Error( "un(deep)copyable object of type %s" % cls) y = _reconstruct(x, rv, 1, memo) memo[d] = y _keep_alive(x, memo) # Make sure x lives at least as long as d return y
[ "def", "deepcopy", "(", "x", ",", "memo", "=", "None", ",", "_nil", "=", "[", "]", ")", ":", "if", "memo", "is", "None", ":", "memo", "=", "{", "}", "d", "=", "id", "(", "x", ")", "y", "=", "memo", ".", "get", "(", "d", ",", "_nil", ")", "if", "y", "is", "not", "_nil", ":", "return", "y", "cls", "=", "type", "(", "x", ")", "copier", "=", "_deepcopy_dispatch", ".", "get", "(", "cls", ")", "if", "copier", ":", "y", "=", "copier", "(", "x", ",", "memo", ")", "else", ":", "try", ":", "issc", "=", "issubclass", "(", "cls", ",", "type", ")", "except", "TypeError", ":", "# cls is not a class (old Boost; see SF #502085)", "issc", "=", "0", "if", "issc", ":", "y", "=", "_deepcopy_atomic", "(", "x", ",", "memo", ")", "else", ":", "copier", "=", "getattr", "(", "x", ",", "\"__deepcopy__\"", ",", "None", ")", "if", "copier", ":", "y", "=", "copier", "(", "memo", ")", "else", ":", "reductor", "=", "dispatch_table", ".", "get", "(", "cls", ")", "if", "reductor", ":", "rv", "=", "reductor", "(", "x", ")", "else", ":", "reductor", "=", "getattr", "(", "x", ",", "\"__reduce_ex__\"", ",", "None", ")", "if", "reductor", ":", "rv", "=", "reductor", "(", "2", ")", "else", ":", "reductor", "=", "getattr", "(", "x", ",", "\"__reduce__\"", ",", "None", ")", "if", "reductor", ":", "rv", "=", "reductor", "(", ")", "else", ":", "raise", "Error", "(", "\"un(deep)copyable object of type %s\"", "%", "cls", ")", "y", "=", "_reconstruct", "(", "x", ",", "rv", ",", "1", ",", "memo", ")", "memo", "[", "d", "]", "=", "y", "_keep_alive", "(", "x", ",", "memo", ")", "# Make sure x lives at least as long as d", "return", "y" ]
Deep copy operation on arbitrary Python objects. See the module's __doc__ string for more info.
[ "Deep", "copy", "operation", "on", "arbitrary", "Python", "objects", "." ]
python
valid
28.64
cole/aiosmtplib
src/aiosmtplib/connection.py
https://github.com/cole/aiosmtplib/blob/0cd00e5059005371cbdfca995feff9183a16a51f/src/aiosmtplib/connection.py#L355-L373
def get_transport_info(self, key: str) -> Any: """ Get extra info from the transport. Supported keys: - ``peername`` - ``socket`` - ``sockname`` - ``compression`` - ``cipher`` - ``peercert`` - ``sslcontext`` - ``sslobject`` :raises SMTPServerDisconnected: connection lost """ self._raise_error_if_disconnected() return self.transport.get_extra_info(key)
[ "def", "get_transport_info", "(", "self", ",", "key", ":", "str", ")", "->", "Any", ":", "self", ".", "_raise_error_if_disconnected", "(", ")", "return", "self", ".", "transport", ".", "get_extra_info", "(", "key", ")" ]
Get extra info from the transport. Supported keys: - ``peername`` - ``socket`` - ``sockname`` - ``compression`` - ``cipher`` - ``peercert`` - ``sslcontext`` - ``sslobject`` :raises SMTPServerDisconnected: connection lost
[ "Get", "extra", "info", "from", "the", "transport", ".", "Supported", "keys", ":" ]
python
train
25.789474
PMEAL/porespy
porespy/networks/__funcs__.py
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/networks/__funcs__.py#L444-L489
def label_boundary_cells(network=None, boundary_faces=None): r""" Takes 2D or 3D network and assign labels to boundary pores Parameters ---------- network : dictionary A dictionary as produced by the SNOW network extraction algorithms containing edge/vertex, site/bond, node/link information. boundary_faces : list of strings The user can choose ‘left’, ‘right’, ‘top’, ‘bottom’, ‘front’ and ‘back’ face labels to assign boundary nodes. If no label is assigned then all six faces will be selected as boundary nodes automatically which can be trimmed later on based on user requirements. Returns ------- The same dictionar s pass ing, but containing boundary nodes labels. For example network['pore.left'], network['pore.right'], network['pore.top'], network['pore.bottom'] etc. Notes ----- The dictionary names use the OpenPNM convention so it may be converted directly to an OpenPNM network object using the ``update`` command. """ f = boundary_faces if f is not None: coords = network['pore.coords'] condition = coords[~network['pore.boundary']] dic = {'left': 0, 'right': 0, 'front': 1, 'back': 1, 'top': 2, 'bottom': 2} if all(coords[:, 2] == 0): dic['top'] = 1 dic['bottom'] = 1 for i in f: if i in ['left', 'front', 'bottom']: network['pore.{}'.format(i)] = (coords[:, dic[i]] < min(condition[:, dic[i]])) elif i in ['right', 'back', 'top']: network['pore.{}'.format(i)] = (coords[:, dic[i]] > max(condition[:, dic[i]])) return network
[ "def", "label_boundary_cells", "(", "network", "=", "None", ",", "boundary_faces", "=", "None", ")", ":", "f", "=", "boundary_faces", "if", "f", "is", "not", "None", ":", "coords", "=", "network", "[", "'pore.coords'", "]", "condition", "=", "coords", "[", "~", "network", "[", "'pore.boundary'", "]", "]", "dic", "=", "{", "'left'", ":", "0", ",", "'right'", ":", "0", ",", "'front'", ":", "1", ",", "'back'", ":", "1", ",", "'top'", ":", "2", ",", "'bottom'", ":", "2", "}", "if", "all", "(", "coords", "[", ":", ",", "2", "]", "==", "0", ")", ":", "dic", "[", "'top'", "]", "=", "1", "dic", "[", "'bottom'", "]", "=", "1", "for", "i", "in", "f", ":", "if", "i", "in", "[", "'left'", ",", "'front'", ",", "'bottom'", "]", ":", "network", "[", "'pore.{}'", ".", "format", "(", "i", ")", "]", "=", "(", "coords", "[", ":", ",", "dic", "[", "i", "]", "]", "<", "min", "(", "condition", "[", ":", ",", "dic", "[", "i", "]", "]", ")", ")", "elif", "i", "in", "[", "'right'", ",", "'back'", ",", "'top'", "]", ":", "network", "[", "'pore.{}'", ".", "format", "(", "i", ")", "]", "=", "(", "coords", "[", ":", ",", "dic", "[", "i", "]", "]", ">", "max", "(", "condition", "[", ":", ",", "dic", "[", "i", "]", "]", ")", ")", "return", "network" ]
r""" Takes 2D or 3D network and assign labels to boundary pores Parameters ---------- network : dictionary A dictionary as produced by the SNOW network extraction algorithms containing edge/vertex, site/bond, node/link information. boundary_faces : list of strings The user can choose ‘left’, ‘right’, ‘top’, ‘bottom’, ‘front’ and ‘back’ face labels to assign boundary nodes. If no label is assigned then all six faces will be selected as boundary nodes automatically which can be trimmed later on based on user requirements. Returns ------- The same dictionar s pass ing, but containing boundary nodes labels. For example network['pore.left'], network['pore.right'], network['pore.top'], network['pore.bottom'] etc. Notes ----- The dictionary names use the OpenPNM convention so it may be converted directly to an OpenPNM network object using the ``update`` command.
[ "r", "Takes", "2D", "or", "3D", "network", "and", "assign", "labels", "to", "boundary", "pores" ]
python
train
38.217391
zhanglab/psamm
psamm/importers/sbml.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/importers/sbml.py#L59-L66
def import_model(self, source): """Import and return model instance.""" source = self._resolve_source(source) self._context = FilePathContext(source) with self._context.open() as f: self._reader = self._open_reader(f) return self._reader.create_model()
[ "def", "import_model", "(", "self", ",", "source", ")", ":", "source", "=", "self", ".", "_resolve_source", "(", "source", ")", "self", ".", "_context", "=", "FilePathContext", "(", "source", ")", "with", "self", ".", "_context", ".", "open", "(", ")", "as", "f", ":", "self", ".", "_reader", "=", "self", ".", "_open_reader", "(", "f", ")", "return", "self", ".", "_reader", ".", "create_model", "(", ")" ]
Import and return model instance.
[ "Import", "and", "return", "model", "instance", "." ]
python
train
37.25
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/natsd/driver.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/natsd/driver.py#L876-L889
def make_requester(self, my_args=None): """ make a new requester instance and handle it from driver :param my_args: dict like {request_q}. Default : None :return: created requester proxy """ LOGGER.debug("natsd.Driver.make_requester") if my_args is None: raise exceptions.ArianeConfError('requester factory arguments') if not self.configuration_OK or self.connection_args is None: raise exceptions.ArianeConfError('NATS connection arguments') requester = Requester.start(my_args, self.connection_args).proxy() self.requester_registry.append(requester) return requester
[ "def", "make_requester", "(", "self", ",", "my_args", "=", "None", ")", ":", "LOGGER", ".", "debug", "(", "\"natsd.Driver.make_requester\"", ")", "if", "my_args", "is", "None", ":", "raise", "exceptions", ".", "ArianeConfError", "(", "'requester factory arguments'", ")", "if", "not", "self", ".", "configuration_OK", "or", "self", ".", "connection_args", "is", "None", ":", "raise", "exceptions", ".", "ArianeConfError", "(", "'NATS connection arguments'", ")", "requester", "=", "Requester", ".", "start", "(", "my_args", ",", "self", ".", "connection_args", ")", ".", "proxy", "(", ")", "self", ".", "requester_registry", ".", "append", "(", "requester", ")", "return", "requester" ]
make a new requester instance and handle it from driver :param my_args: dict like {request_q}. Default : None :return: created requester proxy
[ "make", "a", "new", "requester", "instance", "and", "handle", "it", "from", "driver", ":", "param", "my_args", ":", "dict", "like", "{", "request_q", "}", ".", "Default", ":", "None", ":", "return", ":", "created", "requester", "proxy" ]
python
train
47.642857
KE-works/pykechain
pykechain/models/property_attachment.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/property_attachment.py#L32-L51
def value(self): """Retrieve the data value of this attachment. Will show the filename of the attachment if there is an attachment available otherwise None Use save_as in order to download as a file. Example ------- >>> file_attachment_property = project.part('Bike').property('file_attachment') >>> if file_attachment_property.value: ... file_attachment_property.save_as('file.ext') ... else: ... print('file attachment not set, its value is None') """ if 'value' in self._json_data and self._json_data['value']: return "[Attachment: {}]".format(self._json_data['value'].split('/')[-1]) else: return None
[ "def", "value", "(", "self", ")", ":", "if", "'value'", "in", "self", ".", "_json_data", "and", "self", ".", "_json_data", "[", "'value'", "]", ":", "return", "\"[Attachment: {}]\"", ".", "format", "(", "self", ".", "_json_data", "[", "'value'", "]", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ")", "else", ":", "return", "None" ]
Retrieve the data value of this attachment. Will show the filename of the attachment if there is an attachment available otherwise None Use save_as in order to download as a file. Example ------- >>> file_attachment_property = project.part('Bike').property('file_attachment') >>> if file_attachment_property.value: ... file_attachment_property.save_as('file.ext') ... else: ... print('file attachment not set, its value is None')
[ "Retrieve", "the", "data", "value", "of", "this", "attachment", "." ]
python
train
36.3
SmokinCaterpillar/pypet
examples/example_20_using_deap_manual_runs.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/examples/example_20_using_deap_manual_runs.py#L39-L45
def eval_one_max(traj, individual): """The fitness function""" traj.f_add_result('$set.$.individual', list(individual)) fitness = sum(individual) traj.f_add_result('$set.$.fitness', fitness) traj.f_store() return (fitness,)
[ "def", "eval_one_max", "(", "traj", ",", "individual", ")", ":", "traj", ".", "f_add_result", "(", "'$set.$.individual'", ",", "list", "(", "individual", ")", ")", "fitness", "=", "sum", "(", "individual", ")", "traj", ".", "f_add_result", "(", "'$set.$.fitness'", ",", "fitness", ")", "traj", ".", "f_store", "(", ")", "return", "(", "fitness", ",", ")" ]
The fitness function
[ "The", "fitness", "function" ]
python
test
34.428571
josuebrunel/myql
myql/myql.py
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/myql.py#L124-L155
def _clause_formatter(self, cond): '''Formats conditions args is a list of ['field', 'operator', 'value'] ''' if len(cond) == 2 : cond = ' '.join(cond) return cond if 'in' in cond[1].lower() : if not isinstance(cond[2], (tuple, list)): raise TypeError('("{0}") must be of type <type tuple> or <type list>'.format(cond[2])) if 'select' not in cond[2][0].lower() : cond[2] = "({0})".format(','.join(map(str,["'{0}'".format(e) for e in cond[2]]))) else: cond[2] = "({0})".format(','.join(map(str,["{0}".format(e) for e in cond[2]]))) cond = " ".join(cond) else: #if isinstance(cond[2], str): # var = re.match('^@(\w+)$', cond[2]) #else: # var = None #if var : if isinstance(cond[2], str) and cond[2].startswith('@'): cond[2] = "{0}".format(cond[2]) else : cond[2] = "'{0}'".format(cond[2]) cond = ' '.join(cond) return cond
[ "def", "_clause_formatter", "(", "self", ",", "cond", ")", ":", "if", "len", "(", "cond", ")", "==", "2", ":", "cond", "=", "' '", ".", "join", "(", "cond", ")", "return", "cond", "if", "'in'", "in", "cond", "[", "1", "]", ".", "lower", "(", ")", ":", "if", "not", "isinstance", "(", "cond", "[", "2", "]", ",", "(", "tuple", ",", "list", ")", ")", ":", "raise", "TypeError", "(", "'(\"{0}\") must be of type <type tuple> or <type list>'", ".", "format", "(", "cond", "[", "2", "]", ")", ")", "if", "'select'", "not", "in", "cond", "[", "2", "]", "[", "0", "]", ".", "lower", "(", ")", ":", "cond", "[", "2", "]", "=", "\"({0})\"", ".", "format", "(", "','", ".", "join", "(", "map", "(", "str", ",", "[", "\"'{0}'\"", ".", "format", "(", "e", ")", "for", "e", "in", "cond", "[", "2", "]", "]", ")", ")", ")", "else", ":", "cond", "[", "2", "]", "=", "\"({0})\"", ".", "format", "(", "','", ".", "join", "(", "map", "(", "str", ",", "[", "\"{0}\"", ".", "format", "(", "e", ")", "for", "e", "in", "cond", "[", "2", "]", "]", ")", ")", ")", "cond", "=", "\" \"", ".", "join", "(", "cond", ")", "else", ":", "#if isinstance(cond[2], str):", "# var = re.match('^@(\\w+)$', cond[2])", "#else:", "# var = None", "#if var :", "if", "isinstance", "(", "cond", "[", "2", "]", ",", "str", ")", "and", "cond", "[", "2", "]", ".", "startswith", "(", "'@'", ")", ":", "cond", "[", "2", "]", "=", "\"{0}\"", ".", "format", "(", "cond", "[", "2", "]", ")", "else", ":", "cond", "[", "2", "]", "=", "\"'{0}'\"", ".", "format", "(", "cond", "[", "2", "]", ")", "cond", "=", "' '", ".", "join", "(", "cond", ")", "return", "cond" ]
Formats conditions args is a list of ['field', 'operator', 'value']
[ "Formats", "conditions", "args", "is", "a", "list", "of", "[", "field", "operator", "value", "]" ]
python
train
34.65625
intake/intake
intake/gui/base.py
https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/gui/base.py#L202-L214
def add(self, items): """Add items to options""" options = self._create_options(items) for k, v in options.items(): if k in self.labels and v not in self.items: options.pop(k) count = 0 while f'{k}_{count}' in self.labels: count += 1 options[f'{k}_{count}'] = v self.widget.options.update(options) self.widget.param.trigger('options') self.widget.value = list(options.values())[:1]
[ "def", "add", "(", "self", ",", "items", ")", ":", "options", "=", "self", ".", "_create_options", "(", "items", ")", "for", "k", ",", "v", "in", "options", ".", "items", "(", ")", ":", "if", "k", "in", "self", ".", "labels", "and", "v", "not", "in", "self", ".", "items", ":", "options", ".", "pop", "(", "k", ")", "count", "=", "0", "while", "f'{k}_{count}'", "in", "self", ".", "labels", ":", "count", "+=", "1", "options", "[", "f'{k}_{count}'", "]", "=", "v", "self", ".", "widget", ".", "options", ".", "update", "(", "options", ")", "self", ".", "widget", ".", "param", ".", "trigger", "(", "'options'", ")", "self", ".", "widget", ".", "value", "=", "list", "(", "options", ".", "values", "(", ")", ")", "[", ":", "1", "]" ]
Add items to options
[ "Add", "items", "to", "options" ]
python
train
39.461538
liftoff/pyminifier
pyminifier/analyze.py
https://github.com/liftoff/pyminifier/blob/087ea7b0c8c964f1f907c3f350f5ce281798db86/pyminifier/analyze.py#L345-L363
def enumerate_builtins(tokens): """ Returns a list of all the builtins being used in *tokens*. """ out = [] for index, tok in enumerate(tokens): token_type = tok[0] token_string = tok[1] if token_string in builtins: # Note: I need to test if print can be replaced in Python 3 special_special = ['print'] # Print is special in Python 2 if py3: special_special = [] if token_string not in special_special: if not token_string.startswith('__'): # Don't count magic funcs if tokens[index-1][1] != '.' and tokens[index+1][1] != '=': if token_string not in out: out.append(token_string) return out
[ "def", "enumerate_builtins", "(", "tokens", ")", ":", "out", "=", "[", "]", "for", "index", ",", "tok", "in", "enumerate", "(", "tokens", ")", ":", "token_type", "=", "tok", "[", "0", "]", "token_string", "=", "tok", "[", "1", "]", "if", "token_string", "in", "builtins", ":", "# Note: I need to test if print can be replaced in Python 3", "special_special", "=", "[", "'print'", "]", "# Print is special in Python 2", "if", "py3", ":", "special_special", "=", "[", "]", "if", "token_string", "not", "in", "special_special", ":", "if", "not", "token_string", ".", "startswith", "(", "'__'", ")", ":", "# Don't count magic funcs", "if", "tokens", "[", "index", "-", "1", "]", "[", "1", "]", "!=", "'.'", "and", "tokens", "[", "index", "+", "1", "]", "[", "1", "]", "!=", "'='", ":", "if", "token_string", "not", "in", "out", ":", "out", ".", "append", "(", "token_string", ")", "return", "out" ]
Returns a list of all the builtins being used in *tokens*.
[ "Returns", "a", "list", "of", "all", "the", "builtins", "being", "used", "in", "*", "tokens", "*", "." ]
python
train
40.684211
quantopian/zipline
zipline/pipeline/factors/factor.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L721-L781
def spearmanr(self, target, correlation_length, mask=NotSpecified): """ Construct a new Factor that computes rolling spearman rank correlation coefficients between `target` and the columns of `self`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term used to compute correlations against each column of data produced by `self`. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, correlations are computed asset-wise. correlation_length : int Length of the lookback window over which to compute each correlation coefficient. mask : zipline.pipeline.Filter, optional A Filter describing which assets should have their correlation with the target slice computed each day. Returns ------- correlations : zipline.pipeline.factors.RollingSpearman A new Factor that will compute correlations between `target` and the columns of `self`. Examples -------- Suppose we want to create a factor that computes the correlation between AAPL's 10-day returns and the 10-day returns of all other assets, computing each correlation over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_correlations = returns.spearmanr( target=returns_slice, correlation_length=30, ) This is equivalent to doing:: aapl_correlations = RollingSpearmanOfReturns( target=sid(24), returns_length=10, correlation_length=30, ) See Also -------- :func:`scipy.stats.spearmanr` :class:`zipline.pipeline.factors.RollingSpearmanOfReturns` :meth:`Factor.pearsonr` """ from .statistical import RollingSpearman return RollingSpearman( base_factor=self, target=target, correlation_length=correlation_length, mask=mask, )
[ "def", "spearmanr", "(", "self", ",", "target", ",", "correlation_length", ",", "mask", "=", "NotSpecified", ")", ":", "from", ".", "statistical", "import", "RollingSpearman", "return", "RollingSpearman", "(", "base_factor", "=", "self", ",", "target", "=", "target", ",", "correlation_length", "=", "correlation_length", ",", "mask", "=", "mask", ",", ")" ]
Construct a new Factor that computes rolling spearman rank correlation coefficients between `target` and the columns of `self`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term used to compute correlations against each column of data produced by `self`. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, correlations are computed asset-wise. correlation_length : int Length of the lookback window over which to compute each correlation coefficient. mask : zipline.pipeline.Filter, optional A Filter describing which assets should have their correlation with the target slice computed each day. Returns ------- correlations : zipline.pipeline.factors.RollingSpearman A new Factor that will compute correlations between `target` and the columns of `self`. Examples -------- Suppose we want to create a factor that computes the correlation between AAPL's 10-day returns and the 10-day returns of all other assets, computing each correlation over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_correlations = returns.spearmanr( target=returns_slice, correlation_length=30, ) This is equivalent to doing:: aapl_correlations = RollingSpearmanOfReturns( target=sid(24), returns_length=10, correlation_length=30, ) See Also -------- :func:`scipy.stats.spearmanr` :class:`zipline.pipeline.factors.RollingSpearmanOfReturns` :meth:`Factor.pearsonr`
[ "Construct", "a", "new", "Factor", "that", "computes", "rolling", "spearman", "rank", "correlation", "coefficients", "between", "target", "and", "the", "columns", "of", "self", "." ]
python
train
38.622951
numenta/nupic
src/nupic/encoders/base.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/base.py#L478-L491
def pprint(self, output, prefix=""): """ Pretty-print the encoded output using ascii art. :param output: to print :param prefix: printed before the header if specified """ print prefix, description = self.getDescription() + [("end", self.getWidth())] for i in xrange(len(description) - 1): offset = description[i][1] nextoffset = description[i+1][1] print "%s |" % bitsToString(output[offset:nextoffset]), print
[ "def", "pprint", "(", "self", ",", "output", ",", "prefix", "=", "\"\"", ")", ":", "print", "prefix", ",", "description", "=", "self", ".", "getDescription", "(", ")", "+", "[", "(", "\"end\"", ",", "self", ".", "getWidth", "(", ")", ")", "]", "for", "i", "in", "xrange", "(", "len", "(", "description", ")", "-", "1", ")", ":", "offset", "=", "description", "[", "i", "]", "[", "1", "]", "nextoffset", "=", "description", "[", "i", "+", "1", "]", "[", "1", "]", "print", "\"%s |\"", "%", "bitsToString", "(", "output", "[", "offset", ":", "nextoffset", "]", ")", ",", "print" ]
Pretty-print the encoded output using ascii art. :param output: to print :param prefix: printed before the header if specified
[ "Pretty", "-", "print", "the", "encoded", "output", "using", "ascii", "art", "." ]
python
valid
32.357143
networks-lab/metaknowledge
metaknowledge/recordCollection.py
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/recordCollection.py#L192-L214
def dropNonJournals(self, ptVal = 'J', dropBad = True, invert = False): """Drops the non journal type `Records` from the collection, this is done by checking _ptVal_ against the PT tag # Parameters _ptVal_ : `optional [str]` > Default `'J'`, The value of the PT tag to be kept, default is `'J'` the journal tag, other tags can be substituted. _dropBad_ : `optional [bool]` > Default `True`, if `True` bad `Records` will be dropped as well those that are not journal entries _invert_ : `optional [bool]` > Default `False`, Set `True` to drop journals (or the PT tag given by _ptVal_) instead of keeping them. **Note**, it still drops bad Records if _dropBad_ is `True` """ if dropBad: self.dropBadEntries() if invert: self._collection = {r for r in self._collection if r['pubType'] != ptVal.upper()} else: self._collection = {r for r in self._collection if r['pubType'] == ptVal.upper()}
[ "def", "dropNonJournals", "(", "self", ",", "ptVal", "=", "'J'", ",", "dropBad", "=", "True", ",", "invert", "=", "False", ")", ":", "if", "dropBad", ":", "self", ".", "dropBadEntries", "(", ")", "if", "invert", ":", "self", ".", "_collection", "=", "{", "r", "for", "r", "in", "self", ".", "_collection", "if", "r", "[", "'pubType'", "]", "!=", "ptVal", ".", "upper", "(", ")", "}", "else", ":", "self", ".", "_collection", "=", "{", "r", "for", "r", "in", "self", ".", "_collection", "if", "r", "[", "'pubType'", "]", "==", "ptVal", ".", "upper", "(", ")", "}" ]
Drops the non journal type `Records` from the collection, this is done by checking _ptVal_ against the PT tag # Parameters _ptVal_ : `optional [str]` > Default `'J'`, The value of the PT tag to be kept, default is `'J'` the journal tag, other tags can be substituted. _dropBad_ : `optional [bool]` > Default `True`, if `True` bad `Records` will be dropped as well those that are not journal entries _invert_ : `optional [bool]` > Default `False`, Set `True` to drop journals (or the PT tag given by _ptVal_) instead of keeping them. **Note**, it still drops bad Records if _dropBad_ is `True`
[ "Drops", "the", "non", "journal", "type", "Records", "from", "the", "collection", "this", "is", "done", "by", "checking", "_ptVal_", "against", "the", "PT", "tag" ]
python
train
43.608696
saltstack/salt
salt/modules/dnsmasq.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dnsmasq.py#L70-L131
def set_config(config_file='/etc/dnsmasq.conf', follow=True, **kwargs): ''' Sets a value or a set of values in the specified file. By default, if conf-dir is configured in this file, salt will attempt to set the option in any file inside the conf-dir where it has already been enabled. If it does not find it inside any files, it will append it to the main config file. Setting follow to False will turn off this behavior. If a config option currently appears multiple times (such as dhcp-host, which is specified at least once per host), the new option will be added to the end of the main config file (and not to any includes). If you need an option added to a specific include file, specify it as the config_file. :param string config_file: config file where settings should be updated / added. :param bool follow: attempt to set the config option inside any file within the ``conf-dir`` where it has already been enabled. :param kwargs: key value pairs that contain the configuration settings that you want set. CLI Examples: .. code-block:: bash salt '*' dnsmasq.set_config domain=mydomain.com salt '*' dnsmasq.set_config follow=False domain=mydomain.com salt '*' dnsmasq.set_config config_file=/etc/dnsmasq.conf domain=mydomain.com ''' dnsopts = get_config(config_file) includes = [config_file] if follow is True and 'conf-dir' in dnsopts: for filename in os.listdir(dnsopts['conf-dir']): if filename.startswith('.'): continue if filename.endswith('~'): continue if filename.endswith('bak'): continue if filename.endswith('#') and filename.endswith('#'): continue includes.append('{0}/{1}'.format(dnsopts['conf-dir'], filename)) ret_kwargs = {} for key in kwargs: # Filter out __pub keys as they should not be added to the config file # See Issue #34263 for more information if key.startswith('__'): continue ret_kwargs[key] = kwargs[key] if key in dnsopts: if isinstance(dnsopts[key], six.string_types): for config in includes: __salt__['file.sed'](path=config, before='^{0}=.*'.format(key), after='{0}={1}'.format(key, kwargs[key])) else: __salt__['file.append'](config_file, '{0}={1}'.format(key, kwargs[key])) else: __salt__['file.append'](config_file, '{0}={1}'.format(key, kwargs[key])) return ret_kwargs
[ "def", "set_config", "(", "config_file", "=", "'/etc/dnsmasq.conf'", ",", "follow", "=", "True", ",", "*", "*", "kwargs", ")", ":", "dnsopts", "=", "get_config", "(", "config_file", ")", "includes", "=", "[", "config_file", "]", "if", "follow", "is", "True", "and", "'conf-dir'", "in", "dnsopts", ":", "for", "filename", "in", "os", ".", "listdir", "(", "dnsopts", "[", "'conf-dir'", "]", ")", ":", "if", "filename", ".", "startswith", "(", "'.'", ")", ":", "continue", "if", "filename", ".", "endswith", "(", "'~'", ")", ":", "continue", "if", "filename", ".", "endswith", "(", "'bak'", ")", ":", "continue", "if", "filename", ".", "endswith", "(", "'#'", ")", "and", "filename", ".", "endswith", "(", "'#'", ")", ":", "continue", "includes", ".", "append", "(", "'{0}/{1}'", ".", "format", "(", "dnsopts", "[", "'conf-dir'", "]", ",", "filename", ")", ")", "ret_kwargs", "=", "{", "}", "for", "key", "in", "kwargs", ":", "# Filter out __pub keys as they should not be added to the config file", "# See Issue #34263 for more information", "if", "key", ".", "startswith", "(", "'__'", ")", ":", "continue", "ret_kwargs", "[", "key", "]", "=", "kwargs", "[", "key", "]", "if", "key", "in", "dnsopts", ":", "if", "isinstance", "(", "dnsopts", "[", "key", "]", ",", "six", ".", "string_types", ")", ":", "for", "config", "in", "includes", ":", "__salt__", "[", "'file.sed'", "]", "(", "path", "=", "config", ",", "before", "=", "'^{0}=.*'", ".", "format", "(", "key", ")", ",", "after", "=", "'{0}={1}'", ".", "format", "(", "key", ",", "kwargs", "[", "key", "]", ")", ")", "else", ":", "__salt__", "[", "'file.append'", "]", "(", "config_file", ",", "'{0}={1}'", ".", "format", "(", "key", ",", "kwargs", "[", "key", "]", ")", ")", "else", ":", "__salt__", "[", "'file.append'", "]", "(", "config_file", ",", "'{0}={1}'", ".", "format", "(", "key", ",", "kwargs", "[", "key", "]", ")", ")", "return", "ret_kwargs" ]
Sets a value or a set of values in the specified file. By default, if conf-dir is configured in this file, salt will attempt to set the option in any file inside the conf-dir where it has already been enabled. If it does not find it inside any files, it will append it to the main config file. Setting follow to False will turn off this behavior. If a config option currently appears multiple times (such as dhcp-host, which is specified at least once per host), the new option will be added to the end of the main config file (and not to any includes). If you need an option added to a specific include file, specify it as the config_file. :param string config_file: config file where settings should be updated / added. :param bool follow: attempt to set the config option inside any file within the ``conf-dir`` where it has already been enabled. :param kwargs: key value pairs that contain the configuration settings that you want set. CLI Examples: .. code-block:: bash salt '*' dnsmasq.set_config domain=mydomain.com salt '*' dnsmasq.set_config follow=False domain=mydomain.com salt '*' dnsmasq.set_config config_file=/etc/dnsmasq.conf domain=mydomain.com
[ "Sets", "a", "value", "or", "a", "set", "of", "values", "in", "the", "specified", "file", ".", "By", "default", "if", "conf", "-", "dir", "is", "configured", "in", "this", "file", "salt", "will", "attempt", "to", "set", "the", "option", "in", "any", "file", "inside", "the", "conf", "-", "dir", "where", "it", "has", "already", "been", "enabled", ".", "If", "it", "does", "not", "find", "it", "inside", "any", "files", "it", "will", "append", "it", "to", "the", "main", "config", "file", ".", "Setting", "follow", "to", "False", "will", "turn", "off", "this", "behavior", "." ]
python
train
44
wooyek/django-powerbank
src/django_powerbank/db/models/fields/__init__.py
https://github.com/wooyek/django-powerbank/blob/df91189f2ac18bacc545ccf3c81c4465fb993949/src/django_powerbank/db/models/fields/__init__.py#L189-L194
def get_prep_value(self, value): """Convert value to JSON string before save""" try: return json.dumps(value) except Exception as err: raise ValidationError(str(err))
[ "def", "get_prep_value", "(", "self", ",", "value", ")", ":", "try", ":", "return", "json", ".", "dumps", "(", "value", ")", "except", "Exception", "as", "err", ":", "raise", "ValidationError", "(", "str", "(", "err", ")", ")" ]
Convert value to JSON string before save
[ "Convert", "value", "to", "JSON", "string", "before", "save" ]
python
train
34.833333
expfactory/expfactory
expfactory/utils.py
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/utils.py#L236-L254
def getenv(variable_key, default=None, required=False, silent=True): '''getenv will attempt to get an environment variable. If the variable is not found, None is returned. :param variable_key: the variable name :param required: exit with error if not found :param silent: Do not print debugging information for variable ''' variable = os.environ.get(variable_key, default) if variable is None and required: bot.error("Cannot find environment variable %s, exiting." %variable_key) sys.exit(1) if not silent: if variable is not None: bot.verbose2("%s found as %s" %(variable_key,variable)) else: bot.verbose2("%s not defined (None)" %variable_key) return variable
[ "def", "getenv", "(", "variable_key", ",", "default", "=", "None", ",", "required", "=", "False", ",", "silent", "=", "True", ")", ":", "variable", "=", "os", ".", "environ", ".", "get", "(", "variable_key", ",", "default", ")", "if", "variable", "is", "None", "and", "required", ":", "bot", ".", "error", "(", "\"Cannot find environment variable %s, exiting.\"", "%", "variable_key", ")", "sys", ".", "exit", "(", "1", ")", "if", "not", "silent", ":", "if", "variable", "is", "not", "None", ":", "bot", ".", "verbose2", "(", "\"%s found as %s\"", "%", "(", "variable_key", ",", "variable", ")", ")", "else", ":", "bot", ".", "verbose2", "(", "\"%s not defined (None)\"", "%", "variable_key", ")", "return", "variable" ]
getenv will attempt to get an environment variable. If the variable is not found, None is returned. :param variable_key: the variable name :param required: exit with error if not found :param silent: Do not print debugging information for variable
[ "getenv", "will", "attempt", "to", "get", "an", "environment", "variable", ".", "If", "the", "variable", "is", "not", "found", "None", "is", "returned", ".", ":", "param", "variable_key", ":", "the", "variable", "name", ":", "param", "required", ":", "exit", "with", "error", "if", "not", "found", ":", "param", "silent", ":", "Do", "not", "print", "debugging", "information", "for", "variable" ]
python
train
39
72squared/redpipe
redpipe/keyspaces.py
https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/keyspaces.py#L265-L273
def ttl(self, name): """ get the number of seconds until the key's expiration :param name: str the name of the redis key :return: Future() """ with self.pipe as pipe: return pipe.ttl(self.redis_key(name))
[ "def", "ttl", "(", "self", ",", "name", ")", ":", "with", "self", ".", "pipe", "as", "pipe", ":", "return", "pipe", ".", "ttl", "(", "self", ".", "redis_key", "(", "name", ")", ")" ]
get the number of seconds until the key's expiration :param name: str the name of the redis key :return: Future()
[ "get", "the", "number", "of", "seconds", "until", "the", "key", "s", "expiration" ]
python
train
29
zhmcclient/python-zhmcclient
zhmcclient/_storage_group.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_storage_group.py#L304-L312
def storage_volumes(self): """ :class:`~zhmcclient.StorageVolumeManager`: Access to the :term:`storage volumes <storage volume>` in this storage group. """ # We do here some lazy loading. if not self._storage_volumes: self._storage_volumes = StorageVolumeManager(self) return self._storage_volumes
[ "def", "storage_volumes", "(", "self", ")", ":", "# We do here some lazy loading.", "if", "not", "self", ".", "_storage_volumes", ":", "self", ".", "_storage_volumes", "=", "StorageVolumeManager", "(", "self", ")", "return", "self", ".", "_storage_volumes" ]
:class:`~zhmcclient.StorageVolumeManager`: Access to the :term:`storage volumes <storage volume>` in this storage group.
[ ":", "class", ":", "~zhmcclient", ".", "StorageVolumeManager", ":", "Access", "to", "the", ":", "term", ":", "storage", "volumes", "<storage", "volume", ">", "in", "this", "storage", "group", "." ]
python
train
39.666667
LogicalDash/LiSE
LiSE/LiSE/engine.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/LiSE/LiSE/engine.py#L448-L461
def percent_chance(self, pct): """Given a ``pct``% chance of something happening right now, decide at random whether it actually happens, and return ``True`` or ``False`` as appropriate. Values not between 0 and 100 are treated as though they were 0 or 100, whichever is nearer. """ if pct <= 0: return False if pct >= 100: return True return pct / 100 < self.random()
[ "def", "percent_chance", "(", "self", ",", "pct", ")", ":", "if", "pct", "<=", "0", ":", "return", "False", "if", "pct", ">=", "100", ":", "return", "True", "return", "pct", "/", "100", "<", "self", ".", "random", "(", ")" ]
Given a ``pct``% chance of something happening right now, decide at random whether it actually happens, and return ``True`` or ``False`` as appropriate. Values not between 0 and 100 are treated as though they were 0 or 100, whichever is nearer.
[ "Given", "a", "pct", "%", "chance", "of", "something", "happening", "right", "now", "decide", "at", "random", "whether", "it", "actually", "happens", "and", "return", "True", "or", "False", "as", "appropriate", "." ]
python
train
32.357143
kyper-data/python-highcharts
highcharts/highstock/highstock_helper.py
https://github.com/kyper-data/python-highcharts/blob/a4c488ae5c2e125616efad5a722f3dfd8a9bc450/highcharts/highstock/highstock_helper.py#L99-L110
def is_js_date_utc(json): """Check if the string contains Date.UTC function and return match group(s) if there is """ JS_date_utc_pattern = r'Date\.UTC\(([0-9]+,[0-9]+,[0-9]+)(,[0-9]+,[0-9]+,[0-9]+)?(,[0-9]+)?\)' re_date = re.compile(JS_date_utc_pattern, re.M) if re_date.search(json): return re_date.search(json).group(0) else: return False
[ "def", "is_js_date_utc", "(", "json", ")", ":", "JS_date_utc_pattern", "=", "r'Date\\.UTC\\(([0-9]+,[0-9]+,[0-9]+)(,[0-9]+,[0-9]+,[0-9]+)?(,[0-9]+)?\\)'", "re_date", "=", "re", ".", "compile", "(", "JS_date_utc_pattern", ",", "re", ".", "M", ")", "if", "re_date", ".", "search", "(", "json", ")", ":", "return", "re_date", ".", "search", "(", "json", ")", ".", "group", "(", "0", ")", "else", ":", "return", "False" ]
Check if the string contains Date.UTC function and return match group(s) if there is
[ "Check", "if", "the", "string", "contains", "Date", ".", "UTC", "function", "and", "return", "match", "group", "(", "s", ")", "if", "there", "is" ]
python
train
35.083333
ethereum/py-trie
trie/branches.py
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/branches.py#L155-L165
def get_witness_for_key_prefix(db, node_hash, key): """ Get all witness given a keypath prefix. Include 1. witness along the keypath and 2. witness in the subtrie of the last node in keypath """ validate_is_bytes(key) return tuple(_get_witness_for_key_prefix(db, node_hash, encode_to_bin(key)))
[ "def", "get_witness_for_key_prefix", "(", "db", ",", "node_hash", ",", "key", ")", ":", "validate_is_bytes", "(", "key", ")", "return", "tuple", "(", "_get_witness_for_key_prefix", "(", "db", ",", "node_hash", ",", "encode_to_bin", "(", "key", ")", ")", ")" ]
Get all witness given a keypath prefix. Include 1. witness along the keypath and 2. witness in the subtrie of the last node in keypath
[ "Get", "all", "witness", "given", "a", "keypath", "prefix", ".", "Include" ]
python
train
28.909091
uw-it-aca/uw-restclients-sws
uw_sws/section.py
https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section.py#L101-L111
def get_sections_by_building_and_term(building, term): """ Returns a list of uw_sws.models.SectionReference objects for the passed building and term. """ url = "{}?{}".format( section_res_url_prefix, urlencode([("quarter", term.quarter.lower(),), ("facility_code", building,), ("year", term.year,), ])) return _json_to_sectionref(get_resource(url))
[ "def", "get_sections_by_building_and_term", "(", "building", ",", "term", ")", ":", "url", "=", "\"{}?{}\"", ".", "format", "(", "section_res_url_prefix", ",", "urlencode", "(", "[", "(", "\"quarter\"", ",", "term", ".", "quarter", ".", "lower", "(", ")", ",", ")", ",", "(", "\"facility_code\"", ",", "building", ",", ")", ",", "(", "\"year\"", ",", "term", ".", "year", ",", ")", ",", "]", ")", ")", "return", "_json_to_sectionref", "(", "get_resource", "(", "url", ")", ")" ]
Returns a list of uw_sws.models.SectionReference objects for the passed building and term.
[ "Returns", "a", "list", "of", "uw_sws", ".", "models", ".", "SectionReference", "objects", "for", "the", "passed", "building", "and", "term", "." ]
python
train
37.818182
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L2818-L3154
def parse_version_information(self, version_struct): """Parse version information structure. The date will be made available in three attributes of the PE object. VS_VERSIONINFO will contain the first three fields of the main structure: 'Length', 'ValueLength', and 'Type' VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes: 'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS', 'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags', 'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS' FileInfo is a list of all StringFileInfo and VarFileInfo structures. StringFileInfo structures will have a list as an attribute named 'StringTable' containing all the StringTable structures. Each of those structures contains a dictionary 'entries' with all the key/value version information string pairs. VarFileInfo structures will have a list as an attribute named 'Var' containing all Var structures. Each Var structure will have a dictionary as an attribute named 'entry' which will contain the name and value of the Var. """ # Retrieve the data for the version info resource # start_offset = self.get_offset_from_rva( version_struct.OffsetToData ) raw_data = self.__data__[ start_offset : start_offset+version_struct.Size ] # Map the main structure and the subsequent string # versioninfo_struct = self.__unpack_data__( self.__VS_VERSIONINFO_format__, raw_data, file_offset = start_offset ) if versioninfo_struct is None: return ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof() try: versioninfo_string = self.get_string_u_at_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read VS_VERSION_INFO string. Can\'t ' + 'read unicode string at offset 0x%x' % ( ustr_offset ) ) versioninfo_string = None # If the structure does not contain the expected name, it's assumed to be invalid # if versioninfo_string != u'VS_VERSION_INFO': self.__warnings.append('Invalid VS_VERSION_INFO block') return # Set the PE object's VS_VERSIONINFO to this one # self.VS_VERSIONINFO = versioninfo_struct # The the Key attribute to point to the unicode string identifying the structure # self.VS_VERSIONINFO.Key = versioninfo_string # Process the fixed version information, get the offset and structure # fixedfileinfo_offset = self.dword_align( versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1), version_struct.OffsetToData) fixedfileinfo_struct = self.__unpack_data__( self.__VS_FIXEDFILEINFO_format__, raw_data[fixedfileinfo_offset:], file_offset = start_offset+fixedfileinfo_offset ) if not fixedfileinfo_struct: return # Set the PE object's VS_FIXEDFILEINFO to this one # self.VS_FIXEDFILEINFO = fixedfileinfo_struct # Start parsing all the StringFileInfo and VarFileInfo structures # # Get the first one # stringfileinfo_offset = self.dword_align( fixedfileinfo_offset + fixedfileinfo_struct.sizeof(), version_struct.OffsetToData) original_stringfileinfo_offset = stringfileinfo_offset # Set the PE object's attribute that will contain them all. # self.FileInfo = list() while True: # Process the StringFileInfo/VarFileInfo struct # stringfileinfo_struct = self.__unpack_data__( self.__StringFileInfo_format__, raw_data[stringfileinfo_offset:], file_offset = start_offset+stringfileinfo_offset ) if stringfileinfo_struct is None: self.__warnings.append( 'Error parsing StringFileInfo/VarFileInfo struct' ) return None # Get the subsequent string defining the structure. # ustr_offset = ( version_struct.OffsetToData + stringfileinfo_offset + versioninfo_struct.sizeof() ) try: stringfileinfo_string = self.get_string_u_at_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read StringFileInfo string. Can\'t ' + 'read unicode string at offset 0x%x' % ( ustr_offset ) ) break # Set such string as the Key attribute # stringfileinfo_struct.Key = stringfileinfo_string # Append the structure to the PE object's list # self.FileInfo.append(stringfileinfo_struct) # Parse a StringFileInfo entry # if stringfileinfo_string and stringfileinfo_string.startswith(u'StringFileInfo'): if stringfileinfo_struct.Type == 1 and stringfileinfo_struct.ValueLength == 0: stringtable_offset = self.dword_align( stringfileinfo_offset + stringfileinfo_struct.sizeof() + 2*(len(stringfileinfo_string)+1), version_struct.OffsetToData) stringfileinfo_struct.StringTable = list() # Process the String Table entries # while True: stringtable_struct = self.__unpack_data__( self.__StringTable_format__, raw_data[stringtable_offset:], file_offset = start_offset+stringtable_offset ) if not stringtable_struct: break ustr_offset = ( version_struct.OffsetToData + stringtable_offset + stringtable_struct.sizeof() ) try: stringtable_string = self.get_string_u_at_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read StringTable string. Can\'t ' + 'read unicode string at offset 0x%x' % ( ustr_offset ) ) break stringtable_struct.LangID = stringtable_string stringtable_struct.entries = dict() stringtable_struct.entries_offsets = dict() stringtable_struct.entries_lengths = dict() stringfileinfo_struct.StringTable.append(stringtable_struct) entry_offset = self.dword_align( stringtable_offset + stringtable_struct.sizeof() + 2*(len(stringtable_string)+1), version_struct.OffsetToData) # Process all entries in the string table # while entry_offset < stringtable_offset + stringtable_struct.Length: string_struct = self.__unpack_data__( self.__String_format__, raw_data[entry_offset:], file_offset = start_offset+entry_offset ) if not string_struct: break ustr_offset = ( version_struct.OffsetToData + entry_offset + string_struct.sizeof() ) try: key = self.get_string_u_at_rva( ustr_offset ) key_offset = self.get_offset_from_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read StringTable Key string. Can\'t ' + 'read unicode string at offset 0x%x' % ( ustr_offset ) ) break value_offset = self.dword_align( 2*(len(key)+1) + entry_offset + string_struct.sizeof(), version_struct.OffsetToData) ustr_offset = version_struct.OffsetToData + value_offset try: value = self.get_string_u_at_rva( ustr_offset, max_length = string_struct.ValueLength ) value_offset = self.get_offset_from_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read StringTable Value string. ' + 'Can\'t read unicode string at offset 0x%x' % ( ustr_offset ) ) break if string_struct.Length == 0: entry_offset = stringtable_offset + stringtable_struct.Length else: entry_offset = self.dword_align( string_struct.Length+entry_offset, version_struct.OffsetToData) key_as_char = [] for c in key: if ord(c)>128: key_as_char.append('\\x%02x' %ord(c)) else: key_as_char.append(c) key_as_char = ''.join(key_as_char) setattr(stringtable_struct, key_as_char, value) stringtable_struct.entries[key] = value stringtable_struct.entries_offsets[key] = (key_offset, value_offset) stringtable_struct.entries_lengths[key] = (len(key), len(value)) new_stringtable_offset = self.dword_align( stringtable_struct.Length + stringtable_offset, version_struct.OffsetToData) # check if the entry is crafted in a way that would lead to an infinite # loop and break if so # if new_stringtable_offset == stringtable_offset: break stringtable_offset = new_stringtable_offset if stringtable_offset >= stringfileinfo_struct.Length: break # Parse a VarFileInfo entry # elif stringfileinfo_string and stringfileinfo_string.startswith( u'VarFileInfo' ): varfileinfo_struct = stringfileinfo_struct varfileinfo_struct.name = 'VarFileInfo' if varfileinfo_struct.Type == 1 and varfileinfo_struct.ValueLength == 0: var_offset = self.dword_align( stringfileinfo_offset + varfileinfo_struct.sizeof() + 2*(len(stringfileinfo_string)+1), version_struct.OffsetToData) varfileinfo_struct.Var = list() # Process all entries # while True: var_struct = self.__unpack_data__( self.__Var_format__, raw_data[var_offset:], file_offset = start_offset+var_offset ) if not var_struct: break ustr_offset = ( version_struct.OffsetToData + var_offset + var_struct.sizeof() ) try: var_string = self.get_string_u_at_rva( ustr_offset ) except PEFormatError, excp: self.__warnings.append( 'Error parsing the version information, ' + 'attempting to read VarFileInfo Var string. ' + 'Can\'t read unicode string at offset 0x%x' % (ustr_offset)) break varfileinfo_struct.Var.append(var_struct) varword_offset = self.dword_align( 2*(len(var_string)+1) + var_offset + var_struct.sizeof(), version_struct.OffsetToData) orig_varword_offset = varword_offset while varword_offset < orig_varword_offset + var_struct.ValueLength: word1 = self.get_word_from_data( raw_data[varword_offset:varword_offset+2], 0) word2 = self.get_word_from_data( raw_data[varword_offset+2:varword_offset+4], 0) varword_offset += 4 if isinstance(word1, (int, long)) and isinstance(word1, (int, long)): var_struct.entry = {var_string: '0x%04x 0x%04x' % (word1, word2)} var_offset = self.dword_align( var_offset+var_struct.Length, version_struct.OffsetToData) if var_offset <= var_offset+var_struct.Length: break # Increment and align the offset # stringfileinfo_offset = self.dword_align( stringfileinfo_struct.Length+stringfileinfo_offset, version_struct.OffsetToData) # Check if all the StringFileInfo and VarFileInfo items have been processed # if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length: break
[ "def", "parse_version_information", "(", "self", ",", "version_struct", ")", ":", "# Retrieve the data for the version info resource", "#", "start_offset", "=", "self", ".", "get_offset_from_rva", "(", "version_struct", ".", "OffsetToData", ")", "raw_data", "=", "self", ".", "__data__", "[", "start_offset", ":", "start_offset", "+", "version_struct", ".", "Size", "]", "# Map the main structure and the subsequent string", "#", "versioninfo_struct", "=", "self", ".", "__unpack_data__", "(", "self", ".", "__VS_VERSIONINFO_format__", ",", "raw_data", ",", "file_offset", "=", "start_offset", ")", "if", "versioninfo_struct", "is", "None", ":", "return", "ustr_offset", "=", "version_struct", ".", "OffsetToData", "+", "versioninfo_struct", ".", "sizeof", "(", ")", "try", ":", "versioninfo_string", "=", "self", ".", "get_string_u_at_rva", "(", "ustr_offset", ")", "except", "PEFormatError", ",", "excp", ":", "self", ".", "__warnings", ".", "append", "(", "'Error parsing the version information, '", "+", "'attempting to read VS_VERSION_INFO string. Can\\'t '", "+", "'read unicode string at offset 0x%x'", "%", "(", "ustr_offset", ")", ")", "versioninfo_string", "=", "None", "# If the structure does not contain the expected name, it's assumed to be invalid", "#", "if", "versioninfo_string", "!=", "u'VS_VERSION_INFO'", ":", "self", ".", "__warnings", ".", "append", "(", "'Invalid VS_VERSION_INFO block'", ")", "return", "# Set the PE object's VS_VERSIONINFO to this one", "#", "self", ".", "VS_VERSIONINFO", "=", "versioninfo_struct", "# The the Key attribute to point to the unicode string identifying the structure", "#", "self", ".", "VS_VERSIONINFO", ".", "Key", "=", "versioninfo_string", "# Process the fixed version information, get the offset and structure", "#", "fixedfileinfo_offset", "=", "self", ".", "dword_align", "(", "versioninfo_struct", ".", "sizeof", "(", ")", "+", "2", "*", "(", "len", "(", "versioninfo_string", ")", "+", "1", ")", ",", "version_struct", ".", "OffsetToData", ")", "fixedfileinfo_struct", "=", "self", ".", "__unpack_data__", "(", "self", ".", "__VS_FIXEDFILEINFO_format__", ",", "raw_data", "[", "fixedfileinfo_offset", ":", "]", ",", "file_offset", "=", "start_offset", "+", "fixedfileinfo_offset", ")", "if", "not", "fixedfileinfo_struct", ":", "return", "# Set the PE object's VS_FIXEDFILEINFO to this one", "#", "self", ".", "VS_FIXEDFILEINFO", "=", "fixedfileinfo_struct", "# Start parsing all the StringFileInfo and VarFileInfo structures", "#", "# Get the first one", "#", "stringfileinfo_offset", "=", "self", ".", "dword_align", "(", "fixedfileinfo_offset", "+", "fixedfileinfo_struct", ".", "sizeof", "(", ")", ",", "version_struct", ".", "OffsetToData", ")", "original_stringfileinfo_offset", "=", "stringfileinfo_offset", "# Set the PE object's attribute that will contain them all.", "#", "self", ".", "FileInfo", "=", "list", "(", ")", "while", "True", ":", "# Process the StringFileInfo/VarFileInfo struct", "#", "stringfileinfo_struct", "=", "self", ".", "__unpack_data__", "(", "self", ".", "__StringFileInfo_format__", ",", "raw_data", "[", "stringfileinfo_offset", ":", "]", ",", "file_offset", "=", "start_offset", "+", "stringfileinfo_offset", ")", "if", "stringfileinfo_struct", "is", "None", ":", "self", ".", "__warnings", ".", "append", "(", "'Error parsing StringFileInfo/VarFileInfo struct'", ")", "return", "None", "# Get the subsequent string defining the structure.", "#", "ustr_offset", "=", "(", "version_struct", ".", "OffsetToData", "+", "stringfileinfo_offset", "+", "versioninfo_struct", ".", "sizeof", "(", ")", ")", "try", ":", "stringfileinfo_string", "=", "self", ".", "get_string_u_at_rva", "(", "ustr_offset", ")", "except", "PEFormatError", ",", "excp", ":", "self", ".", "__warnings", ".", "append", "(", "'Error parsing the version information, '", "+", "'attempting to read StringFileInfo string. Can\\'t '", "+", "'read unicode string at offset 0x%x'", "%", "(", "ustr_offset", ")", ")", "break", "# Set such string as the Key attribute", "#", "stringfileinfo_struct", ".", "Key", "=", "stringfileinfo_string", "# Append the structure to the PE object's list", "#", "self", ".", "FileInfo", ".", "append", "(", "stringfileinfo_struct", ")", "# Parse a StringFileInfo entry", "#", "if", "stringfileinfo_string", "and", "stringfileinfo_string", ".", "startswith", "(", "u'StringFileInfo'", ")", ":", "if", "stringfileinfo_struct", ".", "Type", "==", "1", "and", "stringfileinfo_struct", ".", "ValueLength", "==", "0", ":", "stringtable_offset", "=", "self", ".", "dword_align", "(", "stringfileinfo_offset", "+", "stringfileinfo_struct", ".", "sizeof", "(", ")", "+", "2", "*", "(", "len", "(", "stringfileinfo_string", ")", "+", "1", ")", ",", "version_struct", ".", "OffsetToData", ")", "stringfileinfo_struct", ".", "StringTable", "=", "list", "(", ")", "# Process the String Table entries", "#", "while", "True", ":", "stringtable_struct", "=", "self", ".", "__unpack_data__", "(", "self", ".", "__StringTable_format__", ",", "raw_data", "[", "stringtable_offset", ":", "]", ",", "file_offset", "=", "start_offset", "+", "stringtable_offset", ")", "if", "not", "stringtable_struct", ":", "break", "ustr_offset", "=", "(", "version_struct", ".", "OffsetToData", "+", "stringtable_offset", "+", "stringtable_struct", ".", "sizeof", "(", ")", ")", "try", ":", "stringtable_string", "=", "self", ".", "get_string_u_at_rva", "(", "ustr_offset", ")", "except", "PEFormatError", ",", "excp", ":", "self", ".", "__warnings", ".", "append", "(", "'Error parsing the version information, '", "+", "'attempting to read StringTable string. Can\\'t '", "+", "'read unicode string at offset 0x%x'", "%", "(", "ustr_offset", ")", ")", "break", "stringtable_struct", ".", "LangID", "=", "stringtable_string", "stringtable_struct", ".", "entries", "=", "dict", "(", ")", "stringtable_struct", ".", "entries_offsets", "=", "dict", "(", ")", "stringtable_struct", ".", "entries_lengths", "=", "dict", "(", ")", "stringfileinfo_struct", ".", "StringTable", ".", "append", "(", "stringtable_struct", ")", "entry_offset", "=", "self", ".", "dword_align", "(", "stringtable_offset", "+", "stringtable_struct", ".", "sizeof", "(", ")", "+", "2", "*", "(", "len", "(", "stringtable_string", ")", "+", "1", ")", ",", "version_struct", ".", "OffsetToData", ")", "# Process all entries in the string table", "#", "while", "entry_offset", "<", "stringtable_offset", "+", "stringtable_struct", ".", "Length", ":", "string_struct", "=", "self", ".", "__unpack_data__", "(", "self", ".", "__String_format__", ",", "raw_data", "[", "entry_offset", ":", "]", ",", "file_offset", "=", "start_offset", "+", "entry_offset", ")", "if", "not", "string_struct", ":", "break", "ustr_offset", "=", "(", "version_struct", ".", "OffsetToData", "+", "entry_offset", "+", "string_struct", ".", "sizeof", "(", ")", ")", "try", ":", "key", "=", "self", ".", "get_string_u_at_rva", "(", "ustr_offset", ")", "key_offset", "=", "self", ".", "get_offset_from_rva", "(", "ustr_offset", ")", "except", "PEFormatError", ",", "excp", ":", "self", ".", "__warnings", ".", "append", "(", "'Error parsing the version information, '", "+", "'attempting to read StringTable Key string. Can\\'t '", "+", "'read unicode string at offset 0x%x'", "%", "(", "ustr_offset", ")", ")", "break", "value_offset", "=", "self", ".", "dword_align", "(", "2", "*", "(", "len", "(", "key", ")", "+", "1", ")", "+", "entry_offset", "+", "string_struct", ".", "sizeof", "(", ")", ",", "version_struct", ".", "OffsetToData", ")", "ustr_offset", "=", "version_struct", ".", "OffsetToData", "+", "value_offset", "try", ":", "value", "=", "self", ".", "get_string_u_at_rva", "(", "ustr_offset", ",", "max_length", "=", "string_struct", ".", "ValueLength", ")", "value_offset", "=", "self", ".", "get_offset_from_rva", "(", "ustr_offset", ")", "except", "PEFormatError", ",", "excp", ":", "self", ".", "__warnings", ".", "append", "(", "'Error parsing the version information, '", "+", "'attempting to read StringTable Value string. '", "+", "'Can\\'t read unicode string at offset 0x%x'", "%", "(", "ustr_offset", ")", ")", "break", "if", "string_struct", ".", "Length", "==", "0", ":", "entry_offset", "=", "stringtable_offset", "+", "stringtable_struct", ".", "Length", "else", ":", "entry_offset", "=", "self", ".", "dword_align", "(", "string_struct", ".", "Length", "+", "entry_offset", ",", "version_struct", ".", "OffsetToData", ")", "key_as_char", "=", "[", "]", "for", "c", "in", "key", ":", "if", "ord", "(", "c", ")", ">", "128", ":", "key_as_char", ".", "append", "(", "'\\\\x%02x'", "%", "ord", "(", "c", ")", ")", "else", ":", "key_as_char", ".", "append", "(", "c", ")", "key_as_char", "=", "''", ".", "join", "(", "key_as_char", ")", "setattr", "(", "stringtable_struct", ",", "key_as_char", ",", "value", ")", "stringtable_struct", ".", "entries", "[", "key", "]", "=", "value", "stringtable_struct", ".", "entries_offsets", "[", "key", "]", "=", "(", "key_offset", ",", "value_offset", ")", "stringtable_struct", ".", "entries_lengths", "[", "key", "]", "=", "(", "len", "(", "key", ")", ",", "len", "(", "value", ")", ")", "new_stringtable_offset", "=", "self", ".", "dword_align", "(", "stringtable_struct", ".", "Length", "+", "stringtable_offset", ",", "version_struct", ".", "OffsetToData", ")", "# check if the entry is crafted in a way that would lead to an infinite", "# loop and break if so", "#", "if", "new_stringtable_offset", "==", "stringtable_offset", ":", "break", "stringtable_offset", "=", "new_stringtable_offset", "if", "stringtable_offset", ">=", "stringfileinfo_struct", ".", "Length", ":", "break", "# Parse a VarFileInfo entry", "#", "elif", "stringfileinfo_string", "and", "stringfileinfo_string", ".", "startswith", "(", "u'VarFileInfo'", ")", ":", "varfileinfo_struct", "=", "stringfileinfo_struct", "varfileinfo_struct", ".", "name", "=", "'VarFileInfo'", "if", "varfileinfo_struct", ".", "Type", "==", "1", "and", "varfileinfo_struct", ".", "ValueLength", "==", "0", ":", "var_offset", "=", "self", ".", "dword_align", "(", "stringfileinfo_offset", "+", "varfileinfo_struct", ".", "sizeof", "(", ")", "+", "2", "*", "(", "len", "(", "stringfileinfo_string", ")", "+", "1", ")", ",", "version_struct", ".", "OffsetToData", ")", "varfileinfo_struct", ".", "Var", "=", "list", "(", ")", "# Process all entries", "#", "while", "True", ":", "var_struct", "=", "self", ".", "__unpack_data__", "(", "self", ".", "__Var_format__", ",", "raw_data", "[", "var_offset", ":", "]", ",", "file_offset", "=", "start_offset", "+", "var_offset", ")", "if", "not", "var_struct", ":", "break", "ustr_offset", "=", "(", "version_struct", ".", "OffsetToData", "+", "var_offset", "+", "var_struct", ".", "sizeof", "(", ")", ")", "try", ":", "var_string", "=", "self", ".", "get_string_u_at_rva", "(", "ustr_offset", ")", "except", "PEFormatError", ",", "excp", ":", "self", ".", "__warnings", ".", "append", "(", "'Error parsing the version information, '", "+", "'attempting to read VarFileInfo Var string. '", "+", "'Can\\'t read unicode string at offset 0x%x'", "%", "(", "ustr_offset", ")", ")", "break", "varfileinfo_struct", ".", "Var", ".", "append", "(", "var_struct", ")", "varword_offset", "=", "self", ".", "dword_align", "(", "2", "*", "(", "len", "(", "var_string", ")", "+", "1", ")", "+", "var_offset", "+", "var_struct", ".", "sizeof", "(", ")", ",", "version_struct", ".", "OffsetToData", ")", "orig_varword_offset", "=", "varword_offset", "while", "varword_offset", "<", "orig_varword_offset", "+", "var_struct", ".", "ValueLength", ":", "word1", "=", "self", ".", "get_word_from_data", "(", "raw_data", "[", "varword_offset", ":", "varword_offset", "+", "2", "]", ",", "0", ")", "word2", "=", "self", ".", "get_word_from_data", "(", "raw_data", "[", "varword_offset", "+", "2", ":", "varword_offset", "+", "4", "]", ",", "0", ")", "varword_offset", "+=", "4", "if", "isinstance", "(", "word1", ",", "(", "int", ",", "long", ")", ")", "and", "isinstance", "(", "word1", ",", "(", "int", ",", "long", ")", ")", ":", "var_struct", ".", "entry", "=", "{", "var_string", ":", "'0x%04x 0x%04x'", "%", "(", "word1", ",", "word2", ")", "}", "var_offset", "=", "self", ".", "dword_align", "(", "var_offset", "+", "var_struct", ".", "Length", ",", "version_struct", ".", "OffsetToData", ")", "if", "var_offset", "<=", "var_offset", "+", "var_struct", ".", "Length", ":", "break", "# Increment and align the offset", "#", "stringfileinfo_offset", "=", "self", ".", "dword_align", "(", "stringfileinfo_struct", ".", "Length", "+", "stringfileinfo_offset", ",", "version_struct", ".", "OffsetToData", ")", "# Check if all the StringFileInfo and VarFileInfo items have been processed", "#", "if", "stringfileinfo_struct", ".", "Length", "==", "0", "or", "stringfileinfo_offset", ">=", "versioninfo_struct", ".", "Length", ":", "break" ]
Parse version information structure. The date will be made available in three attributes of the PE object. VS_VERSIONINFO will contain the first three fields of the main structure: 'Length', 'ValueLength', and 'Type' VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes: 'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS', 'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags', 'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS' FileInfo is a list of all StringFileInfo and VarFileInfo structures. StringFileInfo structures will have a list as an attribute named 'StringTable' containing all the StringTable structures. Each of those structures contains a dictionary 'entries' with all the key/value version information string pairs. VarFileInfo structures will have a list as an attribute named 'Var' containing all Var structures. Each Var structure will have a dictionary as an attribute named 'entry' which will contain the name and value of the Var.
[ "Parse", "version", "information", "structure", ".", "The", "date", "will", "be", "made", "available", "in", "three", "attributes", "of", "the", "PE", "object", ".", "VS_VERSIONINFO", "will", "contain", "the", "first", "three", "fields", "of", "the", "main", "structure", ":", "Length", "ValueLength", "and", "Type", "VS_FIXEDFILEINFO", "will", "hold", "the", "rest", "of", "the", "fields", "accessible", "as", "sub", "-", "attributes", ":", "Signature", "StrucVersion", "FileVersionMS", "FileVersionLS", "ProductVersionMS", "ProductVersionLS", "FileFlagsMask", "FileFlags", "FileOS", "FileType", "FileSubtype", "FileDateMS", "FileDateLS", "FileInfo", "is", "a", "list", "of", "all", "StringFileInfo", "and", "VarFileInfo", "structures", ".", "StringFileInfo", "structures", "will", "have", "a", "list", "as", "an", "attribute", "named", "StringTable", "containing", "all", "the", "StringTable", "structures", ".", "Each", "of", "those", "structures", "contains", "a", "dictionary", "entries", "with", "all", "the", "key", "/", "value", "version", "information", "string", "pairs", ".", "VarFileInfo", "structures", "will", "have", "a", "list", "as", "an", "attribute", "named", "Var", "containing", "all", "Var", "structures", ".", "Each", "Var", "structure", "will", "have", "a", "dictionary", "as", "an", "attribute", "named", "entry", "which", "will", "contain", "the", "name", "and", "value", "of", "the", "Var", "." ]
python
train
47.068249
dogoncouch/logdissect
logdissect/core.py
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L82-L95
def run_parse(self): """Parse one or more log files""" # Data set already has source file names from load_inputs parsedset = {} parsedset['data_set'] = [] for log in self.input_files: parsemodule = self.parse_modules[self.args.parser] try: if self.args.tzone: parsemodule.tzone = self.args.tzone except NameError: pass parsedset['data_set'].append(parsemodule.parse_file(log)) self.data_set = parsedset del(parsedset)
[ "def", "run_parse", "(", "self", ")", ":", "# Data set already has source file names from load_inputs", "parsedset", "=", "{", "}", "parsedset", "[", "'data_set'", "]", "=", "[", "]", "for", "log", "in", "self", ".", "input_files", ":", "parsemodule", "=", "self", ".", "parse_modules", "[", "self", ".", "args", ".", "parser", "]", "try", ":", "if", "self", ".", "args", ".", "tzone", ":", "parsemodule", ".", "tzone", "=", "self", ".", "args", ".", "tzone", "except", "NameError", ":", "pass", "parsedset", "[", "'data_set'", "]", ".", "append", "(", "parsemodule", ".", "parse_file", "(", "log", ")", ")", "self", ".", "data_set", "=", "parsedset", "del", "(", "parsedset", ")" ]
Parse one or more log files
[ "Parse", "one", "or", "more", "log", "files" ]
python
train
38.857143
kennethreitz/maya
maya/core.py
https://github.com/kennethreitz/maya/blob/774b141d91a83a5d77cb5351db3d02bf50564b21/maya/core.py#L22-L43
def validate_class_type_arguments(operator): """ Decorator to validate all the arguments to function are of the type of calling class for passed operator """ def inner(function): def wrapper(self, *args, **kwargs): for arg in args + tuple(kwargs.values()): if not isinstance(arg, self.__class__): raise TypeError( 'unorderable types: {}() {} {}()'.format( type(self).__name__, operator, type(arg).__name__ ) ) return function(self, *args, **kwargs) return wrapper return inner
[ "def", "validate_class_type_arguments", "(", "operator", ")", ":", "def", "inner", "(", "function", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "arg", "in", "args", "+", "tuple", "(", "kwargs", ".", "values", "(", ")", ")", ":", "if", "not", "isinstance", "(", "arg", ",", "self", ".", "__class__", ")", ":", "raise", "TypeError", "(", "'unorderable types: {}() {} {}()'", ".", "format", "(", "type", "(", "self", ")", ".", "__name__", ",", "operator", ",", "type", "(", "arg", ")", ".", "__name__", ")", ")", "return", "function", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "inner" ]
Decorator to validate all the arguments to function are of the type of calling class for passed operator
[ "Decorator", "to", "validate", "all", "the", "arguments", "to", "function", "are", "of", "the", "type", "of", "calling", "class", "for", "passed", "operator" ]
python
train
29.818182
jonathf/chaospy
chaospy/quad/collection/genz_keister/gk24.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/quad/collection/genz_keister/gk24.py#L7-L35
def quad_genz_keister_24 ( order ): """ Hermite Genz-Keister 24 rule. Args: order (int): The quadrature order. Must be in the interval (0, 8). Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]): Abscissas and weights Examples: >>> abscissas, weights = quad_genz_keister_24(1) >>> print(numpy.around(abscissas, 4)) [-1.7321 0. 1.7321] >>> print(numpy.around(weights, 4)) [0.1667 0.6667 0.1667] """ order = sorted(GENZ_KEISTER_24.keys())[order] abscissas, weights = GENZ_KEISTER_24[order] abscissas = numpy.array(abscissas) weights = numpy.array(weights) weights /= numpy.sum(weights) abscissas *= numpy.sqrt(2) return abscissas, weights
[ "def", "quad_genz_keister_24", "(", "order", ")", ":", "order", "=", "sorted", "(", "GENZ_KEISTER_24", ".", "keys", "(", ")", ")", "[", "order", "]", "abscissas", ",", "weights", "=", "GENZ_KEISTER_24", "[", "order", "]", "abscissas", "=", "numpy", ".", "array", "(", "abscissas", ")", "weights", "=", "numpy", ".", "array", "(", "weights", ")", "weights", "/=", "numpy", ".", "sum", "(", "weights", ")", "abscissas", "*=", "numpy", ".", "sqrt", "(", "2", ")", "return", "abscissas", ",", "weights" ]
Hermite Genz-Keister 24 rule. Args: order (int): The quadrature order. Must be in the interval (0, 8). Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]): Abscissas and weights Examples: >>> abscissas, weights = quad_genz_keister_24(1) >>> print(numpy.around(abscissas, 4)) [-1.7321 0. 1.7321] >>> print(numpy.around(weights, 4)) [0.1667 0.6667 0.1667]
[ "Hermite", "Genz", "-", "Keister", "24", "rule", "." ]
python
train
26.310345
sthysel/knobs
src/environment.py
https://github.com/sthysel/knobs/blob/1d01f50f643068076e38118a93fed9375ea3ac81/src/environment.py#L120-L127
def dotenv_values(dotenv_path): """ :param dotenv_path: env file :return: ordered dict """ values = OrderedDict(parse_dotenv(dotenv_path)) values = resolve_nested_variables(values) return values
[ "def", "dotenv_values", "(", "dotenv_path", ")", ":", "values", "=", "OrderedDict", "(", "parse_dotenv", "(", "dotenv_path", ")", ")", "values", "=", "resolve_nested_variables", "(", "values", ")", "return", "values" ]
:param dotenv_path: env file :return: ordered dict
[ ":", "param", "dotenv_path", ":", "env", "file", ":", "return", ":", "ordered", "dict" ]
python
train
26.875
JohnVinyard/zounds
zounds/learn/wgan.py
https://github.com/JohnVinyard/zounds/blob/337b3f98753d09eaab1c72dcd37bb852a3fa5ac6/zounds/learn/wgan.py#L61-L99
def _gradient_penalty(self, real_samples, fake_samples, kwargs): """ Compute the norm of the gradients for each sample in a batch, and penalize anything on either side of unit norm """ import torch from torch.autograd import Variable, grad real_samples = real_samples.view(fake_samples.shape) subset_size = real_samples.shape[0] real_samples = real_samples[:subset_size] fake_samples = fake_samples[:subset_size] alpha = torch.rand(subset_size) if self.use_cuda: alpha = alpha.cuda() alpha = alpha.view((-1,) + ((1,) * (real_samples.dim() - 1))) interpolates = alpha * real_samples + ((1 - alpha) * fake_samples) interpolates = Variable(interpolates, requires_grad=True) if self.use_cuda: interpolates = interpolates.cuda() d_output = self.critic(interpolates, **kwargs) grad_ouputs = torch.ones(d_output.size()) if self.use_cuda: grad_ouputs = grad_ouputs.cuda() gradients = grad( outputs=d_output, inputs=interpolates, grad_outputs=grad_ouputs, create_graph=True, retain_graph=True, only_inputs=True)[0] return ((gradients.norm(2, dim=1) - 1) ** 2).mean() * 10
[ "def", "_gradient_penalty", "(", "self", ",", "real_samples", ",", "fake_samples", ",", "kwargs", ")", ":", "import", "torch", "from", "torch", ".", "autograd", "import", "Variable", ",", "grad", "real_samples", "=", "real_samples", ".", "view", "(", "fake_samples", ".", "shape", ")", "subset_size", "=", "real_samples", ".", "shape", "[", "0", "]", "real_samples", "=", "real_samples", "[", ":", "subset_size", "]", "fake_samples", "=", "fake_samples", "[", ":", "subset_size", "]", "alpha", "=", "torch", ".", "rand", "(", "subset_size", ")", "if", "self", ".", "use_cuda", ":", "alpha", "=", "alpha", ".", "cuda", "(", ")", "alpha", "=", "alpha", ".", "view", "(", "(", "-", "1", ",", ")", "+", "(", "(", "1", ",", ")", "*", "(", "real_samples", ".", "dim", "(", ")", "-", "1", ")", ")", ")", "interpolates", "=", "alpha", "*", "real_samples", "+", "(", "(", "1", "-", "alpha", ")", "*", "fake_samples", ")", "interpolates", "=", "Variable", "(", "interpolates", ",", "requires_grad", "=", "True", ")", "if", "self", ".", "use_cuda", ":", "interpolates", "=", "interpolates", ".", "cuda", "(", ")", "d_output", "=", "self", ".", "critic", "(", "interpolates", ",", "*", "*", "kwargs", ")", "grad_ouputs", "=", "torch", ".", "ones", "(", "d_output", ".", "size", "(", ")", ")", "if", "self", ".", "use_cuda", ":", "grad_ouputs", "=", "grad_ouputs", ".", "cuda", "(", ")", "gradients", "=", "grad", "(", "outputs", "=", "d_output", ",", "inputs", "=", "interpolates", ",", "grad_outputs", "=", "grad_ouputs", ",", "create_graph", "=", "True", ",", "retain_graph", "=", "True", ",", "only_inputs", "=", "True", ")", "[", "0", "]", "return", "(", "(", "gradients", ".", "norm", "(", "2", ",", "dim", "=", "1", ")", "-", "1", ")", "**", "2", ")", ".", "mean", "(", ")", "*", "10" ]
Compute the norm of the gradients for each sample in a batch, and penalize anything on either side of unit norm
[ "Compute", "the", "norm", "of", "the", "gradients", "for", "each", "sample", "in", "a", "batch", "and", "penalize", "anything", "on", "either", "side", "of", "unit", "norm" ]
python
train
33.538462
pyviz/holoviews
holoviews/core/boundingregion.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/boundingregion.py#L147-L154
def contains(self, x, y): """ Returns true if the given point is contained within the bounding box, where all boundaries of the box are considered to be inclusive. """ left, bottom, right, top = self.aarect().lbrt() return (left <= x <= right) and (bottom <= y <= top)
[ "def", "contains", "(", "self", ",", "x", ",", "y", ")", ":", "left", ",", "bottom", ",", "right", ",", "top", "=", "self", ".", "aarect", "(", ")", ".", "lbrt", "(", ")", "return", "(", "left", "<=", "x", "<=", "right", ")", "and", "(", "bottom", "<=", "y", "<=", "top", ")" ]
Returns true if the given point is contained within the bounding box, where all boundaries of the box are considered to be inclusive.
[ "Returns", "true", "if", "the", "given", "point", "is", "contained", "within", "the", "bounding", "box", "where", "all", "boundaries", "of", "the", "box", "are", "considered", "to", "be", "inclusive", "." ]
python
train
39.625
bitesofcode/projexui
projexui/xcommands.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/xcommands.py#L472-L553
def setup(applicationName, applicationType=None, style='plastique', splash='', splashType=None, splashTextColor='white', splashTextAlign=None, theme=''): """ Wrapper system for the QApplication creation process to handle all proper pre-application setup. This method will verify that there is no application running, creating one if necessary. If no application is created, a None value is returned - signaling that there is already an app running. If you need to specify your own QApplication subclass, you can do so through the applicationType parameter. :note This method should always be used with the exec_ method to handle the post setup process. :param applicationName | <str> applicationType | <subclass of QApplication> || None style | <str> || <QStyle> | style to use for the new app splash | <str> | filepath to use for a splash screen splashType | <subclass of QSplashScreen> || None splashTextColor | <str> || <QColor> splashTextAlign | <Qt.Alignment> :usage |import projexui | |def main(argv): | # initialize the application | data = projexui.setup() | | # do some initialization code | window = MyWindow() | window.show() | | # execute the application | projexui.exec_(window, data) :return { <str> key: <variant> value, .. } """ import_qt(globals()) output = {} # check to see if there is a qapplication running if not QtGui.QApplication.instance(): # make sure we have a valid QApplication type if applicationType is None: applicationType = QtGui.QApplication app = applicationType([applicationName]) app.setApplicationName(applicationName) app.setQuitOnLastWindowClosed(True) stylize(app, style=style, theme=theme) # utilized with the projexui.config.xschemeconfig app.setProperty('useScheme', wrapVariant(True)) output['app'] = app # create a new splash screen if desired if splash: if not splashType: splashType = XLoggerSplashScreen pixmap = QtGui.QPixmap(splash) screen = splashType(pixmap) if splashTextAlign is None: splashTextAlign = QtCore.Qt.AlignLeft | QtCore.Qt.AlignBottom screen.setTextColor(QtGui.QColor(splashTextColor)) screen.setTextAlignment(splashTextAlign) screen.show() QtGui.QApplication.instance().processEvents() output['splash'] = screen return output
[ "def", "setup", "(", "applicationName", ",", "applicationType", "=", "None", ",", "style", "=", "'plastique'", ",", "splash", "=", "''", ",", "splashType", "=", "None", ",", "splashTextColor", "=", "'white'", ",", "splashTextAlign", "=", "None", ",", "theme", "=", "''", ")", ":", "import_qt", "(", "globals", "(", ")", ")", "output", "=", "{", "}", "# check to see if there is a qapplication running\r", "if", "not", "QtGui", ".", "QApplication", ".", "instance", "(", ")", ":", "# make sure we have a valid QApplication type\r", "if", "applicationType", "is", "None", ":", "applicationType", "=", "QtGui", ".", "QApplication", "app", "=", "applicationType", "(", "[", "applicationName", "]", ")", "app", ".", "setApplicationName", "(", "applicationName", ")", "app", ".", "setQuitOnLastWindowClosed", "(", "True", ")", "stylize", "(", "app", ",", "style", "=", "style", ",", "theme", "=", "theme", ")", "# utilized with the projexui.config.xschemeconfig\r", "app", ".", "setProperty", "(", "'useScheme'", ",", "wrapVariant", "(", "True", ")", ")", "output", "[", "'app'", "]", "=", "app", "# create a new splash screen if desired\r", "if", "splash", ":", "if", "not", "splashType", ":", "splashType", "=", "XLoggerSplashScreen", "pixmap", "=", "QtGui", ".", "QPixmap", "(", "splash", ")", "screen", "=", "splashType", "(", "pixmap", ")", "if", "splashTextAlign", "is", "None", ":", "splashTextAlign", "=", "QtCore", ".", "Qt", ".", "AlignLeft", "|", "QtCore", ".", "Qt", ".", "AlignBottom", "screen", ".", "setTextColor", "(", "QtGui", ".", "QColor", "(", "splashTextColor", ")", ")", "screen", ".", "setTextAlignment", "(", "splashTextAlign", ")", "screen", ".", "show", "(", ")", "QtGui", ".", "QApplication", ".", "instance", "(", ")", ".", "processEvents", "(", ")", "output", "[", "'splash'", "]", "=", "screen", "return", "output" ]
Wrapper system for the QApplication creation process to handle all proper pre-application setup. This method will verify that there is no application running, creating one if necessary. If no application is created, a None value is returned - signaling that there is already an app running. If you need to specify your own QApplication subclass, you can do so through the applicationType parameter. :note This method should always be used with the exec_ method to handle the post setup process. :param applicationName | <str> applicationType | <subclass of QApplication> || None style | <str> || <QStyle> | style to use for the new app splash | <str> | filepath to use for a splash screen splashType | <subclass of QSplashScreen> || None splashTextColor | <str> || <QColor> splashTextAlign | <Qt.Alignment> :usage |import projexui | |def main(argv): | # initialize the application | data = projexui.setup() | | # do some initialization code | window = MyWindow() | window.show() | | # execute the application | projexui.exec_(window, data) :return { <str> key: <variant> value, .. }
[ "Wrapper", "system", "for", "the", "QApplication", "creation", "process", "to", "handle", "all", "proper", "pre", "-", "application", "setup", ".", "This", "method", "will", "verify", "that", "there", "is", "no", "application", "running", "creating", "one", "if", "necessary", ".", "If", "no", "application", "is", "created", "a", "None", "value", "is", "returned", "-", "signaling", "that", "there", "is", "already", "an", "app", "running", ".", "If", "you", "need", "to", "specify", "your", "own", "QApplication", "subclass", "you", "can", "do", "so", "through", "the", "applicationType", "parameter", ".", ":", "note", "This", "method", "should", "always", "be", "used", "with", "the", "exec_", "method", "to", "handle", "the", "post", "setup", "process", ".", ":", "param", "applicationName", "|", "<str", ">", "applicationType", "|", "<subclass", "of", "QApplication", ">", "||", "None", "style", "|", "<str", ">", "||", "<QStyle", ">", "|", "style", "to", "use", "for", "the", "new", "app", "splash", "|", "<str", ">", "|", "filepath", "to", "use", "for", "a", "splash", "screen", "splashType", "|", "<subclass", "of", "QSplashScreen", ">", "||", "None", "splashTextColor", "|", "<str", ">", "||", "<QColor", ">", "splashTextAlign", "|", "<Qt", ".", "Alignment", ">", ":", "usage", "|import", "projexui", "|", "|def", "main", "(", "argv", ")", ":", "|", "#", "initialize", "the", "application", "|", "data", "=", "projexui", ".", "setup", "()", "|", "|", "#", "do", "some", "initialization", "code", "|", "window", "=", "MyWindow", "()", "|", "window", ".", "show", "()", "|", "|", "#", "execute", "the", "application", "|", "projexui", ".", "exec_", "(", "window", "data", ")", ":", "return", "{", "<str", ">", "key", ":", "<variant", ">", "value", "..", "}" ]
python
train
36.292683
kubernetes-client/python
kubernetes/client/apis/node_v1beta1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/node_v1beta1_api.py#L38-L60
def create_runtime_class(self, body, **kwargs): """ create a RuntimeClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_runtime_class(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1beta1RuntimeClass body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1beta1RuntimeClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_runtime_class_with_http_info(body, **kwargs) else: (data) = self.create_runtime_class_with_http_info(body, **kwargs) return data
[ "def", "create_runtime_class", "(", "self", ",", "body", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "create_runtime_class_with_http_info", "(", "body", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "create_runtime_class_with_http_info", "(", "body", ",", "*", "*", "kwargs", ")", "return", "data" ]
create a RuntimeClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_runtime_class(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1beta1RuntimeClass body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1beta1RuntimeClass If the method is called asynchronously, returns the request thread.
[ "create", "a", "RuntimeClass", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "create_runtime_class", "(", "body", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
62.434783
lsbardel/python-stdnet
stdnet/utils/skiplist.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/skiplist.py#L74-L87
def rank(self, score): '''Return the 0-based index (rank) of ``score``. If the score is not available it returns a negative integer which absolute score is the left most closest index with score less than *score*.''' node = self.__head rank = 0 for i in range(self.__level-1, -1, -1): while node.next[i] and node.next[i].score <= score: rank += node.width[i] node = node.next[i] if node.score == score: return rank - 1 else: return -1 - rank
[ "def", "rank", "(", "self", ",", "score", ")", ":", "node", "=", "self", ".", "__head", "rank", "=", "0", "for", "i", "in", "range", "(", "self", ".", "__level", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "while", "node", ".", "next", "[", "i", "]", "and", "node", ".", "next", "[", "i", "]", ".", "score", "<=", "score", ":", "rank", "+=", "node", ".", "width", "[", "i", "]", "node", "=", "node", ".", "next", "[", "i", "]", "if", "node", ".", "score", "==", "score", ":", "return", "rank", "-", "1", "else", ":", "return", "-", "1", "-", "rank" ]
Return the 0-based index (rank) of ``score``. If the score is not available it returns a negative integer which absolute score is the left most closest index with score less than *score*.
[ "Return", "the", "0", "-", "based", "index", "(", "rank", ")", "of", "score", ".", "If", "the", "score", "is", "not", "available", "it", "returns", "a", "negative", "integer", "which", "absolute", "score", "is", "the", "left", "most", "closest", "index", "with", "score", "less", "than", "*", "score", "*", "." ]
python
train
38.857143
zlobspb/txtarantool
txtarantool.py
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L196-L209
def pack_str(cls, value): """ Pack string field <field> ::= <int32_varint><data> :param value: string to be packed :type value: bytes or str :return: packed value :rtype: bytes """ assert isinstance(value, str) value_len_packed = cls.pack_int_base128(len(value)) return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value)
[ "def", "pack_str", "(", "cls", ",", "value", ")", ":", "assert", "isinstance", "(", "value", ",", "str", ")", "value_len_packed", "=", "cls", ".", "pack_int_base128", "(", "len", "(", "value", ")", ")", "return", "struct", ".", "pack", "(", "\"<%ds%ds\"", "%", "(", "len", "(", "value_len_packed", ")", ",", "len", "(", "value", ")", ")", ",", "value_len_packed", ",", "value", ")" ]
Pack string field <field> ::= <int32_varint><data> :param value: string to be packed :type value: bytes or str :return: packed value :rtype: bytes
[ "Pack", "string", "field", "<field", ">", "::", "=", "<int32_varint", ">", "<data", ">" ]
python
train
30.928571
Opentrons/opentrons
api/src/opentrons/legacy_api/instruments/pipette.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/legacy_api/instruments/pipette.py#L1195-L1224
def distribute(self, volume, source, dest, *args, **kwargs): """ Distribute will move a volume of liquid from a single of source to a list of target locations. See :any:`Transfer` for details and a full list of optional arguments. Returns ------- This instance of :class:`Pipette`. Examples -------- .. >>> from opentrons import instruments, labware, robot # doctest: +SKIP >>> robot.reset() # doctest: +SKIP >>> plate = labware.load('96-flat', '3') # doctest: +SKIP >>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP >>> p300.distribute(50, plate[1], plate.cols[0]) # doctest: +SKIP """ # Note: currently it varies whether the pipette should have a tip on # or not depending on the parameters for this call, so we cannot # create a very reliable assertion on tip status args = [volume, source, dest, *args] kwargs['mode'] = 'distribute' kwargs['mix_after'] = (0, 0) if 'disposal_vol' not in kwargs: kwargs['disposal_vol'] = self.min_volume return self.transfer(*args, **kwargs)
[ "def", "distribute", "(", "self", ",", "volume", ",", "source", ",", "dest", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Note: currently it varies whether the pipette should have a tip on", "# or not depending on the parameters for this call, so we cannot", "# create a very reliable assertion on tip status", "args", "=", "[", "volume", ",", "source", ",", "dest", ",", "*", "args", "]", "kwargs", "[", "'mode'", "]", "=", "'distribute'", "kwargs", "[", "'mix_after'", "]", "=", "(", "0", ",", "0", ")", "if", "'disposal_vol'", "not", "in", "kwargs", ":", "kwargs", "[", "'disposal_vol'", "]", "=", "self", ".", "min_volume", "return", "self", ".", "transfer", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Distribute will move a volume of liquid from a single of source to a list of target locations. See :any:`Transfer` for details and a full list of optional arguments. Returns ------- This instance of :class:`Pipette`. Examples -------- .. >>> from opentrons import instruments, labware, robot # doctest: +SKIP >>> robot.reset() # doctest: +SKIP >>> plate = labware.load('96-flat', '3') # doctest: +SKIP >>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP >>> p300.distribute(50, plate[1], plate.cols[0]) # doctest: +SKIP
[ "Distribute", "will", "move", "a", "volume", "of", "liquid", "from", "a", "single", "of", "source", "to", "a", "list", "of", "target", "locations", ".", "See", ":", "any", ":", "Transfer", "for", "details", "and", "a", "full", "list", "of", "optional", "arguments", "." ]
python
train
39.066667
desbma/sacad
sacad/rate_watcher.py
https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/rate_watcher.py#L62-L66
def __access(self, ts): """ Record an API access. """ with self.connection: self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)", (ts, self.domain))
[ "def", "__access", "(", "self", ",", "ts", ")", ":", "with", "self", ".", "connection", ":", "self", ".", "connection", ".", "execute", "(", "\"INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)\"", ",", "(", "ts", ",", "self", ".", "domain", ")", ")" ]
Record an API access.
[ "Record", "an", "API", "access", "." ]
python
train
47
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/ext/_bundled/mplexporter.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/ext/_bundled/mplexporter.py#L638-L673
def draw_path(self, data, coordinates, pathcodes, style, offset=None, offset_coordinates="data", mplobj=None): """ Draw a path. In matplotlib, paths are created by filled regions, histograms, contour plots, patches, etc. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, 'figure' for figure (pixel) coordinates, or "points" for raw point coordinates (useful in conjunction with offsets, below). pathcodes : list A list of single-character SVG pathcodes associated with the data. Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't', 'S', 's', 'C', 'c', 'Z', 'z'] See the SVG specification for details. Note that some path codes consume more than one datapoint (while 'Z' consumes none), so in general, the length of the pathcodes list will not be the same as that of the data array. style : dictionary a dictionary specifying the appearance of the line. offset : list (optional) the (x, y) offset of the path. If not given, no offset will be used. offset_coordinates : string (optional) A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. mplobj : matplotlib object the matplotlib plot element which generated this path """ raise NotImplementedError()
[ "def", "draw_path", "(", "self", ",", "data", ",", "coordinates", ",", "pathcodes", ",", "style", ",", "offset", "=", "None", ",", "offset_coordinates", "=", "\"data\"", ",", "mplobj", "=", "None", ")", ":", "raise", "NotImplementedError", "(", ")" ]
Draw a path. In matplotlib, paths are created by filled regions, histograms, contour plots, patches, etc. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, 'figure' for figure (pixel) coordinates, or "points" for raw point coordinates (useful in conjunction with offsets, below). pathcodes : list A list of single-character SVG pathcodes associated with the data. Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't', 'S', 's', 'C', 'c', 'Z', 'z'] See the SVG specification for details. Note that some path codes consume more than one datapoint (while 'Z' consumes none), so in general, the length of the pathcodes list will not be the same as that of the data array. style : dictionary a dictionary specifying the appearance of the line. offset : list (optional) the (x, y) offset of the path. If not given, no offset will be used. offset_coordinates : string (optional) A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. mplobj : matplotlib object the matplotlib plot element which generated this path
[ "Draw", "a", "path", "." ]
python
train
46.277778
pybel/pybel-tools
src/pybel_tools/analysis/neurommsig/algorithm.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/analysis/neurommsig/algorithm.py#L127-L163
def get_neurommsig_score(graph: BELGraph, genes: List[Gene], ora_weight: Optional[float] = None, hub_weight: Optional[float] = None, top_percent: Optional[float] = None, topology_weight: Optional[float] = None) -> float: """Calculate the composite NeuroMMSig Score for a given list of genes. :param graph: A BEL graph :param genes: A list of gene nodes :param ora_weight: The relative weight of the over-enrichment analysis score from :py:func:`neurommsig_gene_ora`. Defaults to 1.0. :param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`. Defaults to 1.0. :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05). :param topology_weight: The relative weight of the topolgical analysis core from :py:func:`neurommsig_topology`. Defaults to 1.0. :return: The NeuroMMSig composite score """ ora_weight = ora_weight or 1.0 hub_weight = hub_weight or 1.0 topology_weight = topology_weight or 1.0 total_weight = ora_weight + hub_weight + topology_weight genes = list(genes) ora_score = neurommsig_gene_ora(graph, genes) hub_score = neurommsig_hubs(graph, genes, top_percent=top_percent) topology_score = neurommsig_topology(graph, genes) weighted_sum = ( ora_weight * ora_score + hub_weight * hub_score + topology_weight * topology_score ) return weighted_sum / total_weight
[ "def", "get_neurommsig_score", "(", "graph", ":", "BELGraph", ",", "genes", ":", "List", "[", "Gene", "]", ",", "ora_weight", ":", "Optional", "[", "float", "]", "=", "None", ",", "hub_weight", ":", "Optional", "[", "float", "]", "=", "None", ",", "top_percent", ":", "Optional", "[", "float", "]", "=", "None", ",", "topology_weight", ":", "Optional", "[", "float", "]", "=", "None", ")", "->", "float", ":", "ora_weight", "=", "ora_weight", "or", "1.0", "hub_weight", "=", "hub_weight", "or", "1.0", "topology_weight", "=", "topology_weight", "or", "1.0", "total_weight", "=", "ora_weight", "+", "hub_weight", "+", "topology_weight", "genes", "=", "list", "(", "genes", ")", "ora_score", "=", "neurommsig_gene_ora", "(", "graph", ",", "genes", ")", "hub_score", "=", "neurommsig_hubs", "(", "graph", ",", "genes", ",", "top_percent", "=", "top_percent", ")", "topology_score", "=", "neurommsig_topology", "(", "graph", ",", "genes", ")", "weighted_sum", "=", "(", "ora_weight", "*", "ora_score", "+", "hub_weight", "*", "hub_score", "+", "topology_weight", "*", "topology_score", ")", "return", "weighted_sum", "/", "total_weight" ]
Calculate the composite NeuroMMSig Score for a given list of genes. :param graph: A BEL graph :param genes: A list of gene nodes :param ora_weight: The relative weight of the over-enrichment analysis score from :py:func:`neurommsig_gene_ora`. Defaults to 1.0. :param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`. Defaults to 1.0. :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05). :param topology_weight: The relative weight of the topolgical analysis core from :py:func:`neurommsig_topology`. Defaults to 1.0. :return: The NeuroMMSig composite score
[ "Calculate", "the", "composite", "NeuroMMSig", "Score", "for", "a", "given", "list", "of", "genes", "." ]
python
valid
42.297297
aetros/aetros-cli
aetros/backend.py
https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/backend.py#L65-L101
def Popen(*args, **kwargs): """ Executes a command using subprocess.Popen and redirects output to AETROS and stdout. Parses stdout as well for stdout API calls. Use read_line argument to read stdout of command's stdout line by line. Use returned process stdin to communicate with the command. :return: subprocess.Popen """ read_line = None if 'read_line' in kwargs: read_line = kwargs['read_line'] del kwargs['read_line'] p = subprocess.Popen(*args, **kwargs) wait_stdout = None wait_stderr = None if p.stdout: wait_stdout = sys.stdout.attach(p.stdout, read_line=read_line) if p.stderr: wait_stderr = sys.stderr.attach(p.stderr) original_wait = p.wait def wait(): original_wait() if wait_stdout: wait_stdout() if wait_stderr: wait_stderr() p.wait = wait return p
[ "def", "Popen", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "read_line", "=", "None", "if", "'read_line'", "in", "kwargs", ":", "read_line", "=", "kwargs", "[", "'read_line'", "]", "del", "kwargs", "[", "'read_line'", "]", "p", "=", "subprocess", ".", "Popen", "(", "*", "args", ",", "*", "*", "kwargs", ")", "wait_stdout", "=", "None", "wait_stderr", "=", "None", "if", "p", ".", "stdout", ":", "wait_stdout", "=", "sys", ".", "stdout", ".", "attach", "(", "p", ".", "stdout", ",", "read_line", "=", "read_line", ")", "if", "p", ".", "stderr", ":", "wait_stderr", "=", "sys", ".", "stderr", ".", "attach", "(", "p", ".", "stderr", ")", "original_wait", "=", "p", ".", "wait", "def", "wait", "(", ")", ":", "original_wait", "(", ")", "if", "wait_stdout", ":", "wait_stdout", "(", ")", "if", "wait_stderr", ":", "wait_stderr", "(", ")", "p", ".", "wait", "=", "wait", "return", "p" ]
Executes a command using subprocess.Popen and redirects output to AETROS and stdout. Parses stdout as well for stdout API calls. Use read_line argument to read stdout of command's stdout line by line. Use returned process stdin to communicate with the command. :return: subprocess.Popen
[ "Executes", "a", "command", "using", "subprocess", ".", "Popen", "and", "redirects", "output", "to", "AETROS", "and", "stdout", ".", "Parses", "stdout", "as", "well", "for", "stdout", "API", "calls", "." ]
python
train
23.972973
clalancette/pycdlib
pycdlib/eltorito.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/eltorito.py#L422-L434
def set_inode(self, ino): # type: (inode.Inode) -> None ''' A method to set the Inode associated with this El Torito Entry. Parameters: ino - The Inode object corresponding to this entry. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('El Torito Entry not yet initialized') self.inode = ino
[ "def", "set_inode", "(", "self", ",", "ino", ")", ":", "# type: (inode.Inode) -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'El Torito Entry not yet initialized'", ")", "self", ".", "inode", "=", "ino" ]
A method to set the Inode associated with this El Torito Entry. Parameters: ino - The Inode object corresponding to this entry. Returns: Nothing.
[ "A", "method", "to", "set", "the", "Inode", "associated", "with", "this", "El", "Torito", "Entry", "." ]
python
train
32.153846
osrg/ryu
ryu/lib/ovs/vsctl.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/ovs/vsctl.py#L623-L685
def add_port(self, br_name, port_name, may_exist, fake_iface, iface_names, settings=None): """ :type settings: list of (column, value_json) where column is str, value_json is json that is represented by Datum.to_json() """ settings = settings or [] self.populate_cache() if may_exist: vsctl_port = self.find_port(port_name, False) if vsctl_port: want_names = set(iface_names) have_names = set(ovsrec_iface.name for ovsrec_iface in vsctl_port.port_cfg.interfaces) if vsctl_port.bridge().name != br_name: vsctl_fatal('"%s" but %s is actually attached to ' 'vsctl_bridge %s' % (br_name, port_name, vsctl_port.bridge().name)) if want_names != have_names: want_names_string = ','.join(want_names) have_names_string = ','.join(have_names) vsctl_fatal('"%s" but %s actually has interface(s) %s' % (want_names_string, port_name, have_names_string)) return self.check_conflicts(port_name, 'cannot create a port named %s' % port_name) for iface_name in iface_names: self.check_conflicts( iface_name, 'cannot create an interface named %s' % iface_name) vsctl_bridge = self.find_bridge(br_name, True) ifaces = [] for iface_name in iface_names: ovsrec_iface = self.txn.insert( self.idl.tables[vswitch_idl.OVSREC_TABLE_INTERFACE]) ovsrec_iface.name = iface_name ifaces.append(ovsrec_iface) ovsrec_port = self.txn.insert( self.idl.tables[vswitch_idl.OVSREC_TABLE_PORT]) ovsrec_port.name = port_name ovsrec_port.interfaces = ifaces ovsrec_port.bond_fake_iface = fake_iface if vsctl_bridge.parent: tag = vsctl_bridge.vlan ovsrec_port.tag = tag for column, value in settings: # TODO:XXX self.symtab: self.set_column(ovsrec_port, column, value) if vsctl_bridge.parent: ovsrec_bridge = vsctl_bridge.parent.br_cfg else: ovsrec_bridge = vsctl_bridge.br_cfg self.bridge_insert_port(ovsrec_bridge, ovsrec_port) vsctl_port = self.add_port_to_cache(vsctl_bridge, ovsrec_port) for ovsrec_iface in ifaces: self.add_iface_to_cache(vsctl_port, ovsrec_iface)
[ "def", "add_port", "(", "self", ",", "br_name", ",", "port_name", ",", "may_exist", ",", "fake_iface", ",", "iface_names", ",", "settings", "=", "None", ")", ":", "settings", "=", "settings", "or", "[", "]", "self", ".", "populate_cache", "(", ")", "if", "may_exist", ":", "vsctl_port", "=", "self", ".", "find_port", "(", "port_name", ",", "False", ")", "if", "vsctl_port", ":", "want_names", "=", "set", "(", "iface_names", ")", "have_names", "=", "set", "(", "ovsrec_iface", ".", "name", "for", "ovsrec_iface", "in", "vsctl_port", ".", "port_cfg", ".", "interfaces", ")", "if", "vsctl_port", ".", "bridge", "(", ")", ".", "name", "!=", "br_name", ":", "vsctl_fatal", "(", "'\"%s\" but %s is actually attached to '", "'vsctl_bridge %s'", "%", "(", "br_name", ",", "port_name", ",", "vsctl_port", ".", "bridge", "(", ")", ".", "name", ")", ")", "if", "want_names", "!=", "have_names", ":", "want_names_string", "=", "','", ".", "join", "(", "want_names", ")", "have_names_string", "=", "','", ".", "join", "(", "have_names", ")", "vsctl_fatal", "(", "'\"%s\" but %s actually has interface(s) %s'", "%", "(", "want_names_string", ",", "port_name", ",", "have_names_string", ")", ")", "return", "self", ".", "check_conflicts", "(", "port_name", ",", "'cannot create a port named %s'", "%", "port_name", ")", "for", "iface_name", "in", "iface_names", ":", "self", ".", "check_conflicts", "(", "iface_name", ",", "'cannot create an interface named %s'", "%", "iface_name", ")", "vsctl_bridge", "=", "self", ".", "find_bridge", "(", "br_name", ",", "True", ")", "ifaces", "=", "[", "]", "for", "iface_name", "in", "iface_names", ":", "ovsrec_iface", "=", "self", ".", "txn", ".", "insert", "(", "self", ".", "idl", ".", "tables", "[", "vswitch_idl", ".", "OVSREC_TABLE_INTERFACE", "]", ")", "ovsrec_iface", ".", "name", "=", "iface_name", "ifaces", ".", "append", "(", "ovsrec_iface", ")", "ovsrec_port", "=", "self", ".", "txn", ".", "insert", "(", "self", ".", "idl", ".", "tables", "[", "vswitch_idl", ".", "OVSREC_TABLE_PORT", "]", ")", "ovsrec_port", ".", "name", "=", "port_name", "ovsrec_port", ".", "interfaces", "=", "ifaces", "ovsrec_port", ".", "bond_fake_iface", "=", "fake_iface", "if", "vsctl_bridge", ".", "parent", ":", "tag", "=", "vsctl_bridge", ".", "vlan", "ovsrec_port", ".", "tag", "=", "tag", "for", "column", ",", "value", "in", "settings", ":", "# TODO:XXX self.symtab:", "self", ".", "set_column", "(", "ovsrec_port", ",", "column", ",", "value", ")", "if", "vsctl_bridge", ".", "parent", ":", "ovsrec_bridge", "=", "vsctl_bridge", ".", "parent", ".", "br_cfg", "else", ":", "ovsrec_bridge", "=", "vsctl_bridge", ".", "br_cfg", "self", ".", "bridge_insert_port", "(", "ovsrec_bridge", ",", "ovsrec_port", ")", "vsctl_port", "=", "self", ".", "add_port_to_cache", "(", "vsctl_bridge", ",", "ovsrec_port", ")", "for", "ovsrec_iface", "in", "ifaces", ":", "self", ".", "add_iface_to_cache", "(", "vsctl_port", ",", "ovsrec_iface", ")" ]
:type settings: list of (column, value_json) where column is str, value_json is json that is represented by Datum.to_json()
[ ":", "type", "settings", ":", "list", "of", "(", "column", "value_json", ")", "where", "column", "is", "str", "value_json", "is", "json", "that", "is", "represented", "by", "Datum", ".", "to_json", "()" ]
python
train
43.206349
csparpa/pyowm
pyowm/weatherapi25/owm25.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/owm25.py#L641-L680
def daily_forecast(self, name, limit=None): """ Queries the OWM Weather API for daily weather forecast for the specified location (eg: "London,uk"). A *Forecaster* object is returned, containing a *Forecast* instance covering a global streak of fourteen days by default: this instance encapsulates *Weather* objects, with a time interval of one day one from each other :param name: the location's toponym :type name: str or unicode :param limit: the maximum number of daily *Weather* items to be retrieved (default is ``None``, which stands for any number of items) :type limit: int or ``None`` :returns: a *Forecaster* instance or ``None`` if forecast data is not available for the specified location :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* if negative values are supplied for limit """ assert isinstance(name, str), "Value must be a string" encoded_name = name if limit is not None: assert isinstance(limit, int), "'limit' must be an int or None" if limit < 1: raise ValueError("'limit' must be None or greater than zero") params = {'q': encoded_name, 'lang': self._language} if limit is not None: params['cnt'] = limit uri = http_client.HttpClient.to_url(DAILY_FORECAST_URL, self._API_key, self._subscription_type, self._use_ssl) _, json_data = self._wapi.cacheable_get_json(uri, params=params) forecast = self._parsers['forecast'].parse_JSON(json_data) if forecast is not None: forecast.set_interval("daily") return forecaster.Forecaster(forecast) else: return None
[ "def", "daily_forecast", "(", "self", ",", "name", ",", "limit", "=", "None", ")", ":", "assert", "isinstance", "(", "name", ",", "str", ")", ",", "\"Value must be a string\"", "encoded_name", "=", "name", "if", "limit", "is", "not", "None", ":", "assert", "isinstance", "(", "limit", ",", "int", ")", ",", "\"'limit' must be an int or None\"", "if", "limit", "<", "1", ":", "raise", "ValueError", "(", "\"'limit' must be None or greater than zero\"", ")", "params", "=", "{", "'q'", ":", "encoded_name", ",", "'lang'", ":", "self", ".", "_language", "}", "if", "limit", "is", "not", "None", ":", "params", "[", "'cnt'", "]", "=", "limit", "uri", "=", "http_client", ".", "HttpClient", ".", "to_url", "(", "DAILY_FORECAST_URL", ",", "self", ".", "_API_key", ",", "self", ".", "_subscription_type", ",", "self", ".", "_use_ssl", ")", "_", ",", "json_data", "=", "self", ".", "_wapi", ".", "cacheable_get_json", "(", "uri", ",", "params", "=", "params", ")", "forecast", "=", "self", ".", "_parsers", "[", "'forecast'", "]", ".", "parse_JSON", "(", "json_data", ")", "if", "forecast", "is", "not", "None", ":", "forecast", ".", "set_interval", "(", "\"daily\"", ")", "return", "forecaster", ".", "Forecaster", "(", "forecast", ")", "else", ":", "return", "None" ]
Queries the OWM Weather API for daily weather forecast for the specified location (eg: "London,uk"). A *Forecaster* object is returned, containing a *Forecast* instance covering a global streak of fourteen days by default: this instance encapsulates *Weather* objects, with a time interval of one day one from each other :param name: the location's toponym :type name: str or unicode :param limit: the maximum number of daily *Weather* items to be retrieved (default is ``None``, which stands for any number of items) :type limit: int or ``None`` :returns: a *Forecaster* instance or ``None`` if forecast data is not available for the specified location :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* if negative values are supplied for limit
[ "Queries", "the", "OWM", "Weather", "API", "for", "daily", "weather", "forecast", "for", "the", "specified", "location", "(", "eg", ":", "London", "uk", ")", ".", "A", "*", "Forecaster", "*", "object", "is", "returned", "containing", "a", "*", "Forecast", "*", "instance", "covering", "a", "global", "streak", "of", "fourteen", "days", "by", "default", ":", "this", "instance", "encapsulates", "*", "Weather", "*", "objects", "with", "a", "time", "interval", "of", "one", "day", "one", "from", "each", "other" ]
python
train
50.425
alphagov/performanceplatform-collector
setup.py
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/setup.py#L41-L49
def _get_requirements(fname): """ Create a list of requirements from the output of the pip freeze command saved in a text file. """ packages = _read(fname).split('\n') packages = (p.strip() for p in packages) packages = (p for p in packages if p and not p.startswith('#')) return list(packages)
[ "def", "_get_requirements", "(", "fname", ")", ":", "packages", "=", "_read", "(", "fname", ")", ".", "split", "(", "'\\n'", ")", "packages", "=", "(", "p", ".", "strip", "(", ")", "for", "p", "in", "packages", ")", "packages", "=", "(", "p", "for", "p", "in", "packages", "if", "p", "and", "not", "p", ".", "startswith", "(", "'#'", ")", ")", "return", "list", "(", "packages", ")" ]
Create a list of requirements from the output of the pip freeze command saved in a text file.
[ "Create", "a", "list", "of", "requirements", "from", "the", "output", "of", "the", "pip", "freeze", "command", "saved", "in", "a", "text", "file", "." ]
python
train
35.333333
pkgw/pwkit
pwkit/environments/heasoft.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/environments/heasoft.py#L42-L89
def modify_environment (self, env): """The headas-init.sh script generates its variables in a bit of a funky way -- it runs a script that generates a list of settings. These are their transcriptions. """ plat = self._platform def path (*args): return os.path.join (self._installdir, *args) env['CALDB'] = b'http://heasarc.gsfc.nasa.gov/FTP/caldb' env['CALDBCONFIG'] = path ('caldb.config') env['CALDBALIAS'] = path ('alias_config.fits') env['HEADAS'] = path (plat) env['LHEASOFT'] = env['HEADAS'] env['FTOOLS'] = env['HEADAS'] prepend_environ_path (env, 'PATH', path (plat, 'bin')) prepend_environ_path (env, 'LD_LIBRARY_PATH', path (plat, 'lib')) prepend_environ_path (env, 'PERLLIB', path (plat, 'lib', 'perl')) prepend_environ_path (env, 'PERL5LIB', path (plat, 'lib', 'perl')) prepend_environ_path (env, 'PYTHONPATH', path (plat, 'lib')) prepend_environ_path (env, 'PYTHONPATH', path (plat, 'lib', 'python')) userpfiles = user_data_path ('hea-pfiles') io.ensure_dir (userpfiles, parents=True) env['PFILES'] = ';'.join ([userpfiles, path (plat, 'syspfiles')]) env['LHEA_DATA'] = path (plat, 'refdata') env['LHEA_HELP'] = path (plat, 'help') env['PGPLOT_DIR'] = path (plat, 'lib') env['PGPLOT_FONT'] = path (plat, 'lib', 'grfont.dat') env['PGPLOT_RGB'] = path (plat, 'lib', 'rgb.txt') env['POW_LIBRARY'] = path (plat, 'lib', 'pow') env['TCLRL_LIBDIR'] = path (plat, 'lib') env['XANADU'] = path () env['XANBIN'] = path (plat) env['XRDEFAULTS'] = path (plat, 'xrdefaults') env['EXT'] = b'lnx' # XXX portability probably ... env['LHEAPERL'] = b'/usr/bin/perl' # what could go wrong? env['PFCLOBBER'] = b'1' env['FTOOLSINPUT'] = b'stdin' env['FTOOLSOUTPUT'] = b'stdout' return env
[ "def", "modify_environment", "(", "self", ",", "env", ")", ":", "plat", "=", "self", ".", "_platform", "def", "path", "(", "*", "args", ")", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "_installdir", ",", "*", "args", ")", "env", "[", "'CALDB'", "]", "=", "b'http://heasarc.gsfc.nasa.gov/FTP/caldb'", "env", "[", "'CALDBCONFIG'", "]", "=", "path", "(", "'caldb.config'", ")", "env", "[", "'CALDBALIAS'", "]", "=", "path", "(", "'alias_config.fits'", ")", "env", "[", "'HEADAS'", "]", "=", "path", "(", "plat", ")", "env", "[", "'LHEASOFT'", "]", "=", "env", "[", "'HEADAS'", "]", "env", "[", "'FTOOLS'", "]", "=", "env", "[", "'HEADAS'", "]", "prepend_environ_path", "(", "env", ",", "'PATH'", ",", "path", "(", "plat", ",", "'bin'", ")", ")", "prepend_environ_path", "(", "env", ",", "'LD_LIBRARY_PATH'", ",", "path", "(", "plat", ",", "'lib'", ")", ")", "prepend_environ_path", "(", "env", ",", "'PERLLIB'", ",", "path", "(", "plat", ",", "'lib'", ",", "'perl'", ")", ")", "prepend_environ_path", "(", "env", ",", "'PERL5LIB'", ",", "path", "(", "plat", ",", "'lib'", ",", "'perl'", ")", ")", "prepend_environ_path", "(", "env", ",", "'PYTHONPATH'", ",", "path", "(", "plat", ",", "'lib'", ")", ")", "prepend_environ_path", "(", "env", ",", "'PYTHONPATH'", ",", "path", "(", "plat", ",", "'lib'", ",", "'python'", ")", ")", "userpfiles", "=", "user_data_path", "(", "'hea-pfiles'", ")", "io", ".", "ensure_dir", "(", "userpfiles", ",", "parents", "=", "True", ")", "env", "[", "'PFILES'", "]", "=", "';'", ".", "join", "(", "[", "userpfiles", ",", "path", "(", "plat", ",", "'syspfiles'", ")", "]", ")", "env", "[", "'LHEA_DATA'", "]", "=", "path", "(", "plat", ",", "'refdata'", ")", "env", "[", "'LHEA_HELP'", "]", "=", "path", "(", "plat", ",", "'help'", ")", "env", "[", "'PGPLOT_DIR'", "]", "=", "path", "(", "plat", ",", "'lib'", ")", "env", "[", "'PGPLOT_FONT'", "]", "=", "path", "(", "plat", ",", "'lib'", ",", "'grfont.dat'", ")", "env", "[", "'PGPLOT_RGB'", "]", "=", "path", "(", "plat", ",", "'lib'", ",", "'rgb.txt'", ")", "env", "[", "'POW_LIBRARY'", "]", "=", "path", "(", "plat", ",", "'lib'", ",", "'pow'", ")", "env", "[", "'TCLRL_LIBDIR'", "]", "=", "path", "(", "plat", ",", "'lib'", ")", "env", "[", "'XANADU'", "]", "=", "path", "(", ")", "env", "[", "'XANBIN'", "]", "=", "path", "(", "plat", ")", "env", "[", "'XRDEFAULTS'", "]", "=", "path", "(", "plat", ",", "'xrdefaults'", ")", "env", "[", "'EXT'", "]", "=", "b'lnx'", "# XXX portability probably ...", "env", "[", "'LHEAPERL'", "]", "=", "b'/usr/bin/perl'", "# what could go wrong?", "env", "[", "'PFCLOBBER'", "]", "=", "b'1'", "env", "[", "'FTOOLSINPUT'", "]", "=", "b'stdin'", "env", "[", "'FTOOLSOUTPUT'", "]", "=", "b'stdout'", "return", "env" ]
The headas-init.sh script generates its variables in a bit of a funky way -- it runs a script that generates a list of settings. These are their transcriptions.
[ "The", "headas", "-", "init", ".", "sh", "script", "generates", "its", "variables", "in", "a", "bit", "of", "a", "funky", "way", "--", "it", "runs", "a", "script", "that", "generates", "a", "list", "of", "settings", ".", "These", "are", "their", "transcriptions", "." ]
python
train
41.291667
gbiggs/rtctree
rtctree/component.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/component.py#L1007-L1038
def add_logger(self, cb, level='NORMAL', filters='ALL'): '''Add a callback to receive log events from this component. @param cb The callback function to receive log events. It must have the signature cb(name, time, source, level, message), where name is the name of the component the log record came from, time is a floating-point time stamp, source is the name of the logger that provided the log record, level is the log level of the record and message is a text string. @param level The maximum level of log records to receive. @param filters Filter the objects from which to receive log messages. @return An ID for this logger. Use this ID in future operations such as removing this logger. @raises AddLoggerError ''' with self._mutex: obs = sdo.RTCLogger(self, cb) uuid_val = uuid.uuid4() intf_type = obs._this()._NP_RepositoryId props = {'logger.log_level': level, 'logger.filter': filters} props = utils.dict_to_nvlist(props) sprof = SDOPackage.ServiceProfile(id=uuid_val.get_bytes(), interface_type=intf_type, service=obs._this(), properties=props) conf = self.object.get_configuration() res = conf.add_service_profile(sprof) if res: self._loggers[uuid_val] = obs return uuid_val raise exceptions.AddLoggerError(self.name)
[ "def", "add_logger", "(", "self", ",", "cb", ",", "level", "=", "'NORMAL'", ",", "filters", "=", "'ALL'", ")", ":", "with", "self", ".", "_mutex", ":", "obs", "=", "sdo", ".", "RTCLogger", "(", "self", ",", "cb", ")", "uuid_val", "=", "uuid", ".", "uuid4", "(", ")", "intf_type", "=", "obs", ".", "_this", "(", ")", ".", "_NP_RepositoryId", "props", "=", "{", "'logger.log_level'", ":", "level", ",", "'logger.filter'", ":", "filters", "}", "props", "=", "utils", ".", "dict_to_nvlist", "(", "props", ")", "sprof", "=", "SDOPackage", ".", "ServiceProfile", "(", "id", "=", "uuid_val", ".", "get_bytes", "(", ")", ",", "interface_type", "=", "intf_type", ",", "service", "=", "obs", ".", "_this", "(", ")", ",", "properties", "=", "props", ")", "conf", "=", "self", ".", "object", ".", "get_configuration", "(", ")", "res", "=", "conf", ".", "add_service_profile", "(", "sprof", ")", "if", "res", ":", "self", ".", "_loggers", "[", "uuid_val", "]", "=", "obs", "return", "uuid_val", "raise", "exceptions", ".", "AddLoggerError", "(", "self", ".", "name", ")" ]
Add a callback to receive log events from this component. @param cb The callback function to receive log events. It must have the signature cb(name, time, source, level, message), where name is the name of the component the log record came from, time is a floating-point time stamp, source is the name of the logger that provided the log record, level is the log level of the record and message is a text string. @param level The maximum level of log records to receive. @param filters Filter the objects from which to receive log messages. @return An ID for this logger. Use this ID in future operations such as removing this logger. @raises AddLoggerError
[ "Add", "a", "callback", "to", "receive", "log", "events", "from", "this", "component", "." ]
python
train
48.53125
senaite/senaite.jsonapi
src/senaite/jsonapi/dataproviders.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/dataproviders.py#L83-L114
def extract_fields(self): """Extract the given fieldnames from the object :returns: Schema name/value mapping :rtype: dict """ # get the proper data manager for the object dm = IDataManager(self.context) # filter out ignored fields fieldnames = filter(lambda name: name not in self.ignore, self.keys) # schema mapping out = dict() for fieldname in fieldnames: try: # get the field value with the data manager fieldvalue = dm.json_data(fieldname) # https://github.com/collective/plone.jsonapi.routes/issues/52 # -> skip restricted fields except Unauthorized: logger.debug("Skipping restricted field '%s'" % fieldname) continue except ValueError: logger.debug("Skipping invalid field '%s'" % fieldname) continue out[fieldname] = api.to_json_value(self.context, fieldname, fieldvalue) return out
[ "def", "extract_fields", "(", "self", ")", ":", "# get the proper data manager for the object", "dm", "=", "IDataManager", "(", "self", ".", "context", ")", "# filter out ignored fields", "fieldnames", "=", "filter", "(", "lambda", "name", ":", "name", "not", "in", "self", ".", "ignore", ",", "self", ".", "keys", ")", "# schema mapping", "out", "=", "dict", "(", ")", "for", "fieldname", "in", "fieldnames", ":", "try", ":", "# get the field value with the data manager", "fieldvalue", "=", "dm", ".", "json_data", "(", "fieldname", ")", "# https://github.com/collective/plone.jsonapi.routes/issues/52", "# -> skip restricted fields", "except", "Unauthorized", ":", "logger", ".", "debug", "(", "\"Skipping restricted field '%s'\"", "%", "fieldname", ")", "continue", "except", "ValueError", ":", "logger", ".", "debug", "(", "\"Skipping invalid field '%s'\"", "%", "fieldname", ")", "continue", "out", "[", "fieldname", "]", "=", "api", ".", "to_json_value", "(", "self", ".", "context", ",", "fieldname", ",", "fieldvalue", ")", "return", "out" ]
Extract the given fieldnames from the object :returns: Schema name/value mapping :rtype: dict
[ "Extract", "the", "given", "fieldnames", "from", "the", "object" ]
python
train
32.25
addisonlynch/iexfinance
iexfinance/stocks/base.py
https://github.com/addisonlynch/iexfinance/blob/40f0bdcc51b329031d06178020fd774494250456/iexfinance/stocks/base.py#L418-L441
def get_fund_ownership(self, **kwargs): """Fund Ownership Returns the top 10 fund holders, meaning any firm not defined as buy-side or sell-side such as mutual funds, pension funds, endowments, investment firms, and other large entities that manage funds on behalf of others. Reference: https://iexcloud.io/docs/api/#fund-ownership Data Weighting: ``10000`` per symbol per period Returns ------- list or pandas.DataFrame Stocks Fund Ownership endpoint data """ def fmt_p(out): out = {(symbol, owner["entityProperName"]): owner for symbol in out for owner in out[symbol]} return pd.DataFrame(out) return self._get_endpoint("fund-ownership", fmt_p=fmt_p, params=kwargs)
[ "def", "get_fund_ownership", "(", "self", ",", "*", "*", "kwargs", ")", ":", "def", "fmt_p", "(", "out", ")", ":", "out", "=", "{", "(", "symbol", ",", "owner", "[", "\"entityProperName\"", "]", ")", ":", "owner", "for", "symbol", "in", "out", "for", "owner", "in", "out", "[", "symbol", "]", "}", "return", "pd", ".", "DataFrame", "(", "out", ")", "return", "self", ".", "_get_endpoint", "(", "\"fund-ownership\"", ",", "fmt_p", "=", "fmt_p", ",", "params", "=", "kwargs", ")" ]
Fund Ownership Returns the top 10 fund holders, meaning any firm not defined as buy-side or sell-side such as mutual funds, pension funds, endowments, investment firms, and other large entities that manage funds on behalf of others. Reference: https://iexcloud.io/docs/api/#fund-ownership Data Weighting: ``10000`` per symbol per period Returns ------- list or pandas.DataFrame Stocks Fund Ownership endpoint data
[ "Fund", "Ownership", "Returns", "the", "top", "10", "fund", "holders", "meaning", "any", "firm", "not", "defined", "as", "buy", "-", "side", "or", "sell", "-", "side", "such", "as", "mutual", "funds", "pension", "funds", "endowments", "investment", "firms", "and", "other", "large", "entities", "that", "manage", "funds", "on", "behalf", "of", "others", ".", "Reference", ":", "https", ":", "//", "iexcloud", ".", "io", "/", "docs", "/", "api", "/", "#fund", "-", "ownership", "Data", "Weighting", ":", "10000", "per", "symbol", "per", "period", "Returns", "-------", "list", "or", "pandas", ".", "DataFrame", "Stocks", "Fund", "Ownership", "endpoint", "data" ]
python
train
35.416667
pyhys/minimalmodbus
minimalmodbus.py
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2127-L2150
def _checkResponseRegisterAddress(payload, registeraddress): """Check that the start adress as given in the response is correct. The first two bytes in the payload holds the address value. Args: * payload (string): The payload * registeraddress (int): The register address (use decimal numbers, not hex). Raises: TypeError, ValueError """ _checkString(payload, minlength=2, description='payload') _checkRegisteraddress(registeraddress) BYTERANGE_FOR_STARTADDRESS = slice(0, 2) bytesForStartAddress = payload[BYTERANGE_FOR_STARTADDRESS] receivedStartAddress = _twoByteStringToNum(bytesForStartAddress) if receivedStartAddress != registeraddress: raise ValueError('Wrong given write start adress: {0}, but commanded is {1}. The data payload is: {2!r}'.format( \ receivedStartAddress, registeraddress, payload))
[ "def", "_checkResponseRegisterAddress", "(", "payload", ",", "registeraddress", ")", ":", "_checkString", "(", "payload", ",", "minlength", "=", "2", ",", "description", "=", "'payload'", ")", "_checkRegisteraddress", "(", "registeraddress", ")", "BYTERANGE_FOR_STARTADDRESS", "=", "slice", "(", "0", ",", "2", ")", "bytesForStartAddress", "=", "payload", "[", "BYTERANGE_FOR_STARTADDRESS", "]", "receivedStartAddress", "=", "_twoByteStringToNum", "(", "bytesForStartAddress", ")", "if", "receivedStartAddress", "!=", "registeraddress", ":", "raise", "ValueError", "(", "'Wrong given write start adress: {0}, but commanded is {1}. The data payload is: {2!r}'", ".", "format", "(", "receivedStartAddress", ",", "registeraddress", ",", "payload", ")", ")" ]
Check that the start adress as given in the response is correct. The first two bytes in the payload holds the address value. Args: * payload (string): The payload * registeraddress (int): The register address (use decimal numbers, not hex). Raises: TypeError, ValueError
[ "Check", "that", "the", "start", "adress", "as", "given", "in", "the", "response", "is", "correct", "." ]
python
train
36.666667
Aluriak/ACCC
accc/langspec/langspec.py
https://github.com/Aluriak/ACCC/blob/9092f985bef7ed784264c86bc19c980f4ce2309f/accc/langspec/langspec.py#L63-L138
def translated(structure, values, lang_spec): """Return code associated to given structure and values, translate with given language specification.""" # LANGUAGE SPECS indentation = '\t' endline = '\n' object_code = "" stack = [] # define shortcuts to behavior push = lambda x: stack.append(x) pop = lambda : stack.pop() last = lambda : stack[-1] if len(stack) > 0 else ' ' def indented_code(s, level, end): return lang_spec[INDENTATION]*level + s + end # recreate python structure, and replace type by value level = 0 CONDITIONS = [LEXEM_TYPE_PREDICAT, LEXEM_TYPE_CONDITION] ACTION = LEXEM_TYPE_ACTION DOWNLEVEL = LEXEM_TYPE_DOWNLEVEL for lexem_type in structure: if lexem_type is ACTION: # place previous conditions if necessary if last() in CONDITIONS: # construct conditions lines value, values = values[0:len(stack)], values[len(stack):] object_code += (indented_code(lang_spec[BEG_CONDITION] + lang_spec[LOGICAL_AND].join(value) + lang_spec[END_CONDITION], level, lang_spec[END_LINE] )) # if provided, print the begin block token on a new line if len(lang_spec[BEG_BLOCK]) > 0: object_code += indented_code( lang_spec[BEG_BLOCK], level, lang_spec[END_LINE] ) stack = [] level += 1 # and place the action object_code += indented_code( lang_spec[BEG_ACTION] + values[0], level, lang_spec[END_ACTION]+lang_spec[END_LINE] ) values = values[1:] elif lexem_type in CONDITIONS: push(lexem_type) elif lexem_type is DOWNLEVEL: if last() not in CONDITIONS: # down level, and add a END_BLOCK only if needed level -= 1 if level >= 0: object_code += indented_code( lang_spec[END_BLOCK], level, lang_spec[END_LINE] ) else: level = 0 # add END_BLOCK while needed for reach level 0 while level > 0: level -= 1 if level >= 0: object_code += indented_code( lang_spec[END_BLOCK], level, lang_spec[END_LINE] ) else: level = 0 # Finished ! return object_code
[ "def", "translated", "(", "structure", ",", "values", ",", "lang_spec", ")", ":", "# LANGUAGE SPECS", "indentation", "=", "'\\t'", "endline", "=", "'\\n'", "object_code", "=", "\"\"", "stack", "=", "[", "]", "# define shortcuts to behavior", "push", "=", "lambda", "x", ":", "stack", ".", "append", "(", "x", ")", "pop", "=", "lambda", ":", "stack", ".", "pop", "(", ")", "last", "=", "lambda", ":", "stack", "[", "-", "1", "]", "if", "len", "(", "stack", ")", ">", "0", "else", "' '", "def", "indented_code", "(", "s", ",", "level", ",", "end", ")", ":", "return", "lang_spec", "[", "INDENTATION", "]", "*", "level", "+", "s", "+", "end", "# recreate python structure, and replace type by value", "level", "=", "0", "CONDITIONS", "=", "[", "LEXEM_TYPE_PREDICAT", ",", "LEXEM_TYPE_CONDITION", "]", "ACTION", "=", "LEXEM_TYPE_ACTION", "DOWNLEVEL", "=", "LEXEM_TYPE_DOWNLEVEL", "for", "lexem_type", "in", "structure", ":", "if", "lexem_type", "is", "ACTION", ":", "# place previous conditions if necessary", "if", "last", "(", ")", "in", "CONDITIONS", ":", "# construct conditions lines", "value", ",", "values", "=", "values", "[", "0", ":", "len", "(", "stack", ")", "]", ",", "values", "[", "len", "(", "stack", ")", ":", "]", "object_code", "+=", "(", "indented_code", "(", "lang_spec", "[", "BEG_CONDITION", "]", "+", "lang_spec", "[", "LOGICAL_AND", "]", ".", "join", "(", "value", ")", "+", "lang_spec", "[", "END_CONDITION", "]", ",", "level", ",", "lang_spec", "[", "END_LINE", "]", ")", ")", "# if provided, print the begin block token on a new line", "if", "len", "(", "lang_spec", "[", "BEG_BLOCK", "]", ")", ">", "0", ":", "object_code", "+=", "indented_code", "(", "lang_spec", "[", "BEG_BLOCK", "]", ",", "level", ",", "lang_spec", "[", "END_LINE", "]", ")", "stack", "=", "[", "]", "level", "+=", "1", "# and place the action", "object_code", "+=", "indented_code", "(", "lang_spec", "[", "BEG_ACTION", "]", "+", "values", "[", "0", "]", ",", "level", ",", "lang_spec", "[", "END_ACTION", "]", "+", "lang_spec", "[", "END_LINE", "]", ")", "values", "=", "values", "[", "1", ":", "]", "elif", "lexem_type", "in", "CONDITIONS", ":", "push", "(", "lexem_type", ")", "elif", "lexem_type", "is", "DOWNLEVEL", ":", "if", "last", "(", ")", "not", "in", "CONDITIONS", ":", "# down level, and add a END_BLOCK only if needed", "level", "-=", "1", "if", "level", ">=", "0", ":", "object_code", "+=", "indented_code", "(", "lang_spec", "[", "END_BLOCK", "]", ",", "level", ",", "lang_spec", "[", "END_LINE", "]", ")", "else", ":", "level", "=", "0", "# add END_BLOCK while needed for reach level 0", "while", "level", ">", "0", ":", "level", "-=", "1", "if", "level", ">=", "0", ":", "object_code", "+=", "indented_code", "(", "lang_spec", "[", "END_BLOCK", "]", ",", "level", ",", "lang_spec", "[", "END_LINE", "]", ")", "else", ":", "level", "=", "0", "# Finished !", "return", "object_code" ]
Return code associated to given structure and values, translate with given language specification.
[ "Return", "code", "associated", "to", "given", "structure", "and", "values", "translate", "with", "given", "language", "specification", "." ]
python
train
34.631579
buildinspace/peru
peru/cache.py
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/cache.py#L527-L536
def _format_file_lines(files): '''Given a list of filenames that we're about to print, limit it to a reasonable number of lines.''' LINES_TO_SHOW = 10 if len(files) <= LINES_TO_SHOW: lines = '\n'.join(files) else: lines = ('\n'.join(files[:LINES_TO_SHOW - 1]) + '\n...{} total'.format( len(files))) return lines
[ "def", "_format_file_lines", "(", "files", ")", ":", "LINES_TO_SHOW", "=", "10", "if", "len", "(", "files", ")", "<=", "LINES_TO_SHOW", ":", "lines", "=", "'\\n'", ".", "join", "(", "files", ")", "else", ":", "lines", "=", "(", "'\\n'", ".", "join", "(", "files", "[", ":", "LINES_TO_SHOW", "-", "1", "]", ")", "+", "'\\n...{} total'", ".", "format", "(", "len", "(", "files", ")", ")", ")", "return", "lines" ]
Given a list of filenames that we're about to print, limit it to a reasonable number of lines.
[ "Given", "a", "list", "of", "filenames", "that", "we", "re", "about", "to", "print", "limit", "it", "to", "a", "reasonable", "number", "of", "lines", "." ]
python
train
35.4
rwl/pylon
pylon/main.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/main.py#L48-L74
def read_case(input, format=None): """ Returns a case object from the given input file object. The data format may be optionally specified. """ # Map of data file types to readers. format_map = {"matpower": MATPOWERReader, "psse": PSSEReader, "pickle": PickleReader} # Read case data. if format_map.has_key(format): reader_klass = format_map[format] reader = reader_klass() case = reader.read(input) else: # Try each of the readers at random. for reader_klass in format_map.values(): reader = reader_klass() try: case = reader.read(input) if case is not None: break except: pass else: case = None return case
[ "def", "read_case", "(", "input", ",", "format", "=", "None", ")", ":", "# Map of data file types to readers.", "format_map", "=", "{", "\"matpower\"", ":", "MATPOWERReader", ",", "\"psse\"", ":", "PSSEReader", ",", "\"pickle\"", ":", "PickleReader", "}", "# Read case data.", "if", "format_map", ".", "has_key", "(", "format", ")", ":", "reader_klass", "=", "format_map", "[", "format", "]", "reader", "=", "reader_klass", "(", ")", "case", "=", "reader", ".", "read", "(", "input", ")", "else", ":", "# Try each of the readers at random.", "for", "reader_klass", "in", "format_map", ".", "values", "(", ")", ":", "reader", "=", "reader_klass", "(", ")", "try", ":", "case", "=", "reader", ".", "read", "(", "input", ")", "if", "case", "is", "not", "None", ":", "break", "except", ":", "pass", "else", ":", "case", "=", "None", "return", "case" ]
Returns a case object from the given input file object. The data format may be optionally specified.
[ "Returns", "a", "case", "object", "from", "the", "given", "input", "file", "object", ".", "The", "data", "format", "may", "be", "optionally", "specified", "." ]
python
train
29.333333
manahl/arctic
arctic/hosts.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/hosts.py#L21-L40
def get_arctic_lib(connection_string, **kwargs): """ Returns a mongo library for the given connection string Parameters --------- connection_string: `str` Format must be one of the following: library@trading for known mongo servers library@hostname:port Returns: -------- Arctic library """ m = CONNECTION_STR.match(connection_string) if not m: raise ValueError("connection string incorrectly formed: %s" % connection_string) library, host = m.group(1), m.group(2) return _get_arctic(host, **kwargs)[library]
[ "def", "get_arctic_lib", "(", "connection_string", ",", "*", "*", "kwargs", ")", ":", "m", "=", "CONNECTION_STR", ".", "match", "(", "connection_string", ")", "if", "not", "m", ":", "raise", "ValueError", "(", "\"connection string incorrectly formed: %s\"", "%", "connection_string", ")", "library", ",", "host", "=", "m", ".", "group", "(", "1", ")", ",", "m", ".", "group", "(", "2", ")", "return", "_get_arctic", "(", "host", ",", "*", "*", "kwargs", ")", "[", "library", "]" ]
Returns a mongo library for the given connection string Parameters --------- connection_string: `str` Format must be one of the following: library@trading for known mongo servers library@hostname:port Returns: -------- Arctic library
[ "Returns", "a", "mongo", "library", "for", "the", "given", "connection", "string" ]
python
train
29.15
hhatto/pgmagick
pgmagick/api.py
https://github.com/hhatto/pgmagick/blob/5dce5fa4681400b4c059431ad69233e6a3e5799a/pgmagick/api.py#L939-L946
def stroke_antialias(self, flag=True): """stroke antialias :param flag: True or False. (default is True) :type flag: bool """ antialias = pgmagick.DrawableStrokeAntialias(flag) self.drawer.append(antialias)
[ "def", "stroke_antialias", "(", "self", ",", "flag", "=", "True", ")", ":", "antialias", "=", "pgmagick", ".", "DrawableStrokeAntialias", "(", "flag", ")", "self", ".", "drawer", ".", "append", "(", "antialias", ")" ]
stroke antialias :param flag: True or False. (default is True) :type flag: bool
[ "stroke", "antialias" ]
python
valid
31
bitesofcode/projex
projex/urls.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/urls.py#L101-L112
def register(scheme): """ Registers a new scheme to the urlparser. :param schema | <str> """ scheme = nstr(scheme) urlparse.uses_fragment.append(scheme) urlparse.uses_netloc.append(scheme) urlparse.uses_params.append(scheme) urlparse.uses_query.append(scheme) urlparse.uses_relative.append(scheme)
[ "def", "register", "(", "scheme", ")", ":", "scheme", "=", "nstr", "(", "scheme", ")", "urlparse", ".", "uses_fragment", ".", "append", "(", "scheme", ")", "urlparse", ".", "uses_netloc", ".", "append", "(", "scheme", ")", "urlparse", ".", "uses_params", ".", "append", "(", "scheme", ")", "urlparse", ".", "uses_query", ".", "append", "(", "scheme", ")", "urlparse", ".", "uses_relative", ".", "append", "(", "scheme", ")" ]
Registers a new scheme to the urlparser. :param schema | <str>
[ "Registers", "a", "new", "scheme", "to", "the", "urlparser", ".", ":", "param", "schema", "|", "<str", ">" ]
python
train
28
tanghaibao/jcvi
jcvi/annotation/stats.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/stats.py#L69-L118
def summary(args): """ %prog summary gffile fastafile Print summary stats, including: - Gene/Exon/Intron - Number - Average size (bp) - Median size (bp) - Total length (Mb) - % of genome - % GC """ p = OptionParser(summary.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gff_file, ref = args s = Fasta(ref) g = make_index(gff_file) geneseqs, exonseqs, intronseqs = [], [], [] # Calc % GC for f in g.features_of_type("gene"): fid = f.id fseq = s.sequence({'chr': f.chrom, 'start': f.start, 'stop': f.stop}) geneseqs.append(fseq) exons = set((c.chrom, c.start, c.stop) for c in g.children(fid, 2) \ if c.featuretype == "exon") exons = list(exons) for chrom, start, stop in exons: fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop}) exonseqs.append(fseq) introns = range_interleave(exons) for chrom, start, stop in introns: fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop}) intronseqs.append(fseq) r = {} # Report for t, tseqs in zip(("Gene", "Exon", "Intron"), (geneseqs, exonseqs, intronseqs)): tsizes = [len(x) for x in tseqs] tsummary = SummaryStats(tsizes, dtype="int") r[t, "Number"] = tsummary.size r[t, "Average size (bp)"] = tsummary.mean r[t, "Median size (bp)"] = tsummary.median r[t, "Total length (Mb)"] = human_size(tsummary.sum, precision=0, target="Mb") r[t, "% of genome"] = percentage(tsummary.sum, s.totalsize, precision=0, mode=-1) r[t, "% GC"] = gc(tseqs) print(tabulate(r), file=sys.stderr)
[ "def", "summary", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "summary", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "gff_file", ",", "ref", "=", "args", "s", "=", "Fasta", "(", "ref", ")", "g", "=", "make_index", "(", "gff_file", ")", "geneseqs", ",", "exonseqs", ",", "intronseqs", "=", "[", "]", ",", "[", "]", ",", "[", "]", "# Calc % GC", "for", "f", "in", "g", ".", "features_of_type", "(", "\"gene\"", ")", ":", "fid", "=", "f", ".", "id", "fseq", "=", "s", ".", "sequence", "(", "{", "'chr'", ":", "f", ".", "chrom", ",", "'start'", ":", "f", ".", "start", ",", "'stop'", ":", "f", ".", "stop", "}", ")", "geneseqs", ".", "append", "(", "fseq", ")", "exons", "=", "set", "(", "(", "c", ".", "chrom", ",", "c", ".", "start", ",", "c", ".", "stop", ")", "for", "c", "in", "g", ".", "children", "(", "fid", ",", "2", ")", "if", "c", ".", "featuretype", "==", "\"exon\"", ")", "exons", "=", "list", "(", "exons", ")", "for", "chrom", ",", "start", ",", "stop", "in", "exons", ":", "fseq", "=", "s", ".", "sequence", "(", "{", "'chr'", ":", "chrom", ",", "'start'", ":", "start", ",", "'stop'", ":", "stop", "}", ")", "exonseqs", ".", "append", "(", "fseq", ")", "introns", "=", "range_interleave", "(", "exons", ")", "for", "chrom", ",", "start", ",", "stop", "in", "introns", ":", "fseq", "=", "s", ".", "sequence", "(", "{", "'chr'", ":", "chrom", ",", "'start'", ":", "start", ",", "'stop'", ":", "stop", "}", ")", "intronseqs", ".", "append", "(", "fseq", ")", "r", "=", "{", "}", "# Report", "for", "t", ",", "tseqs", "in", "zip", "(", "(", "\"Gene\"", ",", "\"Exon\"", ",", "\"Intron\"", ")", ",", "(", "geneseqs", ",", "exonseqs", ",", "intronseqs", ")", ")", ":", "tsizes", "=", "[", "len", "(", "x", ")", "for", "x", "in", "tseqs", "]", "tsummary", "=", "SummaryStats", "(", "tsizes", ",", "dtype", "=", "\"int\"", ")", "r", "[", "t", ",", "\"Number\"", "]", "=", "tsummary", ".", "size", "r", "[", "t", ",", "\"Average size (bp)\"", "]", "=", "tsummary", ".", "mean", "r", "[", "t", ",", "\"Median size (bp)\"", "]", "=", "tsummary", ".", "median", "r", "[", "t", ",", "\"Total length (Mb)\"", "]", "=", "human_size", "(", "tsummary", ".", "sum", ",", "precision", "=", "0", ",", "target", "=", "\"Mb\"", ")", "r", "[", "t", ",", "\"% of genome\"", "]", "=", "percentage", "(", "tsummary", ".", "sum", ",", "s", ".", "totalsize", ",", "precision", "=", "0", ",", "mode", "=", "-", "1", ")", "r", "[", "t", ",", "\"% GC\"", "]", "=", "gc", "(", "tseqs", ")", "print", "(", "tabulate", "(", "r", ")", ",", "file", "=", "sys", ".", "stderr", ")" ]
%prog summary gffile fastafile Print summary stats, including: - Gene/Exon/Intron - Number - Average size (bp) - Median size (bp) - Total length (Mb) - % of genome - % GC
[ "%prog", "summary", "gffile", "fastafile" ]
python
train
34.52
chaoss/grimoirelab-sortinghat
sortinghat/cmd/affiliate.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/affiliate.py#L62-L69
def run(self, *args): """Affiliate unique identities to organizations.""" self.parser.parse_args(args) code = self.affiliate() return code
[ "def", "run", "(", "self", ",", "*", "args", ")", ":", "self", ".", "parser", ".", "parse_args", "(", "args", ")", "code", "=", "self", ".", "affiliate", "(", ")", "return", "code" ]
Affiliate unique identities to organizations.
[ "Affiliate", "unique", "identities", "to", "organizations", "." ]
python
train
20.75
SuperCowPowers/workbench
workbench/server/workbench_server.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/server/workbench_server.py#L756-L758
def _store_work_results(self, results, collection, md5): """ Internal: Stores the work results of a worker.""" self.data_store.store_work_results(results, collection, md5)
[ "def", "_store_work_results", "(", "self", ",", "results", ",", "collection", ",", "md5", ")", ":", "self", ".", "data_store", ".", "store_work_results", "(", "results", ",", "collection", ",", "md5", ")" ]
Internal: Stores the work results of a worker.
[ "Internal", ":", "Stores", "the", "work", "results", "of", "a", "worker", "." ]
python
train
61.666667
sentinel-hub/sentinelhub-py
sentinelhub/os_utils.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/os_utils.py#L21-L30
def get_folder_list(folder='.'): """ Get list of sub-folders contained in input folder :param folder: input folder to list sub-folders. Default is ``'.'`` :type folder: str :return: list of sub-folders :rtype: list(str) """ dir_list = get_content_list(folder) return [f for f in dir_list if not os.path.isfile(os.path.join(folder, f))]
[ "def", "get_folder_list", "(", "folder", "=", "'.'", ")", ":", "dir_list", "=", "get_content_list", "(", "folder", ")", "return", "[", "f", "for", "f", "in", "dir_list", "if", "not", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "folder", ",", "f", ")", ")", "]" ]
Get list of sub-folders contained in input folder :param folder: input folder to list sub-folders. Default is ``'.'`` :type folder: str :return: list of sub-folders :rtype: list(str)
[ "Get", "list", "of", "sub", "-", "folders", "contained", "in", "input", "folder" ]
python
train
35.9
saltstack/salt
salt/states/rbenv.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/rbenv.py#L109-L147
def installed(name, default=False, user=None): ''' Verify that the specified ruby is installed with rbenv. Rbenv is installed if necessary. name The version of ruby to install default : False Whether to make this ruby the default. user: None The user to run rbenv as. .. versionadded:: 0.17.0 .. versionadded:: 0.16.0 ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} rbenv_installed_ret = copy.deepcopy(ret) if name.startswith('ruby-'): name = re.sub(r'^ruby-', '', name) if __opts__['test']: ret = _ruby_installed(ret, name, user=user) if not ret['result']: ret['comment'] = 'Ruby {0} is set to be installed'.format(name) else: ret['comment'] = 'Ruby {0} is already installed'.format(name) return ret rbenv_installed_ret = _check_and_install_rbenv(rbenv_installed_ret, user) if rbenv_installed_ret['result'] is False: ret['result'] = False ret['comment'] = 'Rbenv failed to install' return ret else: return _check_and_install_ruby(ret, name, default, user=user)
[ "def", "installed", "(", "name", ",", "default", "=", "False", ",", "user", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "rbenv_installed_ret", "=", "copy", ".", "deepcopy", "(", "ret", ")", "if", "name", ".", "startswith", "(", "'ruby-'", ")", ":", "name", "=", "re", ".", "sub", "(", "r'^ruby-'", ",", "''", ",", "name", ")", "if", "__opts__", "[", "'test'", "]", ":", "ret", "=", "_ruby_installed", "(", "ret", ",", "name", ",", "user", "=", "user", ")", "if", "not", "ret", "[", "'result'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Ruby {0} is set to be installed'", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'Ruby {0} is already installed'", ".", "format", "(", "name", ")", "return", "ret", "rbenv_installed_ret", "=", "_check_and_install_rbenv", "(", "rbenv_installed_ret", ",", "user", ")", "if", "rbenv_installed_ret", "[", "'result'", "]", "is", "False", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Rbenv failed to install'", "return", "ret", "else", ":", "return", "_check_and_install_ruby", "(", "ret", ",", "name", ",", "default", ",", "user", "=", "user", ")" ]
Verify that the specified ruby is installed with rbenv. Rbenv is installed if necessary. name The version of ruby to install default : False Whether to make this ruby the default. user: None The user to run rbenv as. .. versionadded:: 0.17.0 .. versionadded:: 0.16.0
[ "Verify", "that", "the", "specified", "ruby", "is", "installed", "with", "rbenv", ".", "Rbenv", "is", "installed", "if", "necessary", "." ]
python
train
29.282051
ga4gh/ga4gh-server
ga4gh/server/frontend.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/frontend.py#L179-L189
def getVariantAnnotationSets(self, datasetId): """ Returns the list of ReferenceSets for this server. """ # TODO this should be displayed per-variant set, not per dataset. variantAnnotationSets = [] dataset = app.backend.getDataRepository().getDataset(datasetId) for variantSet in dataset.getVariantSets(): variantAnnotationSets.extend( variantSet.getVariantAnnotationSets()) return variantAnnotationSets
[ "def", "getVariantAnnotationSets", "(", "self", ",", "datasetId", ")", ":", "# TODO this should be displayed per-variant set, not per dataset.", "variantAnnotationSets", "=", "[", "]", "dataset", "=", "app", ".", "backend", ".", "getDataRepository", "(", ")", ".", "getDataset", "(", "datasetId", ")", "for", "variantSet", "in", "dataset", ".", "getVariantSets", "(", ")", ":", "variantAnnotationSets", ".", "extend", "(", "variantSet", ".", "getVariantAnnotationSets", "(", ")", ")", "return", "variantAnnotationSets" ]
Returns the list of ReferenceSets for this server.
[ "Returns", "the", "list", "of", "ReferenceSets", "for", "this", "server", "." ]
python
train
44.181818
infoxchange/supervisor-logging
supervisor_logging/__init__.py
https://github.com/infoxchange/supervisor-logging/blob/2d4411378fb52799bc506a68f1a914cbe671b13b/supervisor_logging/__init__.py#L49-L56
def formatTime(self, record, datefmt=None): """ Format time, including milliseconds. """ formatted = super(PalletFormatter, self).formatTime( record, datefmt=datefmt) return formatted + '.%03dZ' % record.msecs
[ "def", "formatTime", "(", "self", ",", "record", ",", "datefmt", "=", "None", ")", ":", "formatted", "=", "super", "(", "PalletFormatter", ",", "self", ")", ".", "formatTime", "(", "record", ",", "datefmt", "=", "datefmt", ")", "return", "formatted", "+", "'.%03dZ'", "%", "record", ".", "msecs" ]
Format time, including milliseconds.
[ "Format", "time", "including", "milliseconds", "." ]
python
train
31.875
materialsproject/pymatgen
pymatgen/util/string.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/util/string.py#L109-L121
def latexify_spacegroup(spacegroup_symbol): """ Generates a latex formatted spacegroup. E.g., P2_1/c is converted to P2$_{1}$/c and P-1 is converted to P$\\overline{1}$. Args: spacegroup_symbol (str): A spacegroup symbol Returns: A latex formatted spacegroup with proper subscripts and overlines. """ sym = re.sub(r"_(\d+)", r"$_{\1}$", spacegroup_symbol) return re.sub(r"-(\d)", r"$\\overline{\1}$", sym)
[ "def", "latexify_spacegroup", "(", "spacegroup_symbol", ")", ":", "sym", "=", "re", ".", "sub", "(", "r\"_(\\d+)\"", ",", "r\"$_{\\1}$\"", ",", "spacegroup_symbol", ")", "return", "re", ".", "sub", "(", "r\"-(\\d)\"", ",", "r\"$\\\\overline{\\1}$\"", ",", "sym", ")" ]
Generates a latex formatted spacegroup. E.g., P2_1/c is converted to P2$_{1}$/c and P-1 is converted to P$\\overline{1}$. Args: spacegroup_symbol (str): A spacegroup symbol Returns: A latex formatted spacegroup with proper subscripts and overlines.
[ "Generates", "a", "latex", "formatted", "spacegroup", ".", "E", ".", "g", ".", "P2_1", "/", "c", "is", "converted", "to", "P2$_", "{", "1", "}", "$", "/", "c", "and", "P", "-", "1", "is", "converted", "to", "P$", "\\\\", "overline", "{", "1", "}", "$", "." ]
python
train
34.076923
pyQode/pyqode.core
pyqode/core/widgets/filesystem_treeview.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/filesystem_treeview.py#L269-L277
def filePath(self, index): """ Gets the file path of the item at the specified ``index``. :param index: item index - QModelIndex :return: str """ return self._fs_model_source.filePath( self._fs_model_proxy.mapToSource(index))
[ "def", "filePath", "(", "self", ",", "index", ")", ":", "return", "self", ".", "_fs_model_source", ".", "filePath", "(", "self", ".", "_fs_model_proxy", ".", "mapToSource", "(", "index", ")", ")" ]
Gets the file path of the item at the specified ``index``. :param index: item index - QModelIndex :return: str
[ "Gets", "the", "file", "path", "of", "the", "item", "at", "the", "specified", "index", "." ]
python
train
30.888889
edx/edx-django-utils
edx_django_utils/monitoring/middleware.py
https://github.com/edx/edx-django-utils/blob/16cb4ac617e53c572bf68ccd19d24afeff1ca769/edx_django_utils/monitoring/middleware.py#L177-L202
def _log_diff_memory_data(self, prefix, new_memory_data, old_memory_data): """ Computes and logs the difference in memory utilization between the given old and new memory data. """ def _vmem_used(memory_data): return memory_data['machine_data'].used def _process_mem_percent(memory_data): return memory_data['process_data']['memory_percent'] def _process_rss(memory_data): return memory_data['process_data']['memory_info'].rss def _process_vms(memory_data): return memory_data['process_data']['memory_info'].vms if new_memory_data and old_memory_data: log.info( u"%s Diff Vmem used: %s, Diff percent memory: %s, Diff rss: %s, Diff vms: %s", prefix, _vmem_used(new_memory_data) - _vmem_used(old_memory_data), _process_mem_percent(new_memory_data) - _process_mem_percent(old_memory_data), _process_rss(new_memory_data) - _process_rss(old_memory_data), _process_vms(new_memory_data) - _process_vms(old_memory_data), )
[ "def", "_log_diff_memory_data", "(", "self", ",", "prefix", ",", "new_memory_data", ",", "old_memory_data", ")", ":", "def", "_vmem_used", "(", "memory_data", ")", ":", "return", "memory_data", "[", "'machine_data'", "]", ".", "used", "def", "_process_mem_percent", "(", "memory_data", ")", ":", "return", "memory_data", "[", "'process_data'", "]", "[", "'memory_percent'", "]", "def", "_process_rss", "(", "memory_data", ")", ":", "return", "memory_data", "[", "'process_data'", "]", "[", "'memory_info'", "]", ".", "rss", "def", "_process_vms", "(", "memory_data", ")", ":", "return", "memory_data", "[", "'process_data'", "]", "[", "'memory_info'", "]", ".", "vms", "if", "new_memory_data", "and", "old_memory_data", ":", "log", ".", "info", "(", "u\"%s Diff Vmem used: %s, Diff percent memory: %s, Diff rss: %s, Diff vms: %s\"", ",", "prefix", ",", "_vmem_used", "(", "new_memory_data", ")", "-", "_vmem_used", "(", "old_memory_data", ")", ",", "_process_mem_percent", "(", "new_memory_data", ")", "-", "_process_mem_percent", "(", "old_memory_data", ")", ",", "_process_rss", "(", "new_memory_data", ")", "-", "_process_rss", "(", "old_memory_data", ")", ",", "_process_vms", "(", "new_memory_data", ")", "-", "_process_vms", "(", "old_memory_data", ")", ",", ")" ]
Computes and logs the difference in memory utilization between the given old and new memory data.
[ "Computes", "and", "logs", "the", "difference", "in", "memory", "utilization", "between", "the", "given", "old", "and", "new", "memory", "data", "." ]
python
train
43.576923
bhmm/bhmm
bhmm/hmm/generic_hmm.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/hmm/generic_hmm.py#L433-L479
def generate_synthetic_state_trajectory(self, nsteps, initial_Pi=None, start=None, stop=None, dtype=np.int32): """Generate a synthetic state trajectory. Parameters ---------- nsteps : int Number of steps in the synthetic state trajectory to be generated. initial_Pi : np.array of shape (nstates,), optional, default=None The initial probability distribution, if samples are not to be taken from the intrinsic initial distribution. start : int starting state. Exclusive with initial_Pi stop : int stopping state. Trajectory will terminate when reaching the stopping state before length number of steps. dtype : numpy.dtype, optional, default=numpy.int32 The numpy dtype to use to store the synthetic trajectory. Returns ------- states : np.array of shape (nstates,) of dtype=np.int32 The trajectory of hidden states, with each element in range(0,nstates). Examples -------- Generate a synthetic state trajectory of a specified length. >>> from bhmm import testsystems >>> model = testsystems.dalton_model() >>> states = model.generate_synthetic_state_trajectory(nsteps=100) """ # consistency check if initial_Pi is not None and start is not None: raise ValueError('Arguments initial_Pi and start are exclusive. Only set one of them.') # Generate first state sample. if start is None: if initial_Pi is not None: start = np.random.choice(range(self._nstates), size=1, p=initial_Pi) else: start = np.random.choice(range(self._nstates), size=1, p=self._Pi) # Generate and return trajectory from msmtools import generation as msmgen traj = msmgen.generate_traj(self.transition_matrix, nsteps, start=start, stop=stop, dt=1) return traj.astype(dtype)
[ "def", "generate_synthetic_state_trajectory", "(", "self", ",", "nsteps", ",", "initial_Pi", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "dtype", "=", "np", ".", "int32", ")", ":", "# consistency check", "if", "initial_Pi", "is", "not", "None", "and", "start", "is", "not", "None", ":", "raise", "ValueError", "(", "'Arguments initial_Pi and start are exclusive. Only set one of them.'", ")", "# Generate first state sample.", "if", "start", "is", "None", ":", "if", "initial_Pi", "is", "not", "None", ":", "start", "=", "np", ".", "random", ".", "choice", "(", "range", "(", "self", ".", "_nstates", ")", ",", "size", "=", "1", ",", "p", "=", "initial_Pi", ")", "else", ":", "start", "=", "np", ".", "random", ".", "choice", "(", "range", "(", "self", ".", "_nstates", ")", ",", "size", "=", "1", ",", "p", "=", "self", ".", "_Pi", ")", "# Generate and return trajectory", "from", "msmtools", "import", "generation", "as", "msmgen", "traj", "=", "msmgen", ".", "generate_traj", "(", "self", ".", "transition_matrix", ",", "nsteps", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "dt", "=", "1", ")", "return", "traj", ".", "astype", "(", "dtype", ")" ]
Generate a synthetic state trajectory. Parameters ---------- nsteps : int Number of steps in the synthetic state trajectory to be generated. initial_Pi : np.array of shape (nstates,), optional, default=None The initial probability distribution, if samples are not to be taken from the intrinsic initial distribution. start : int starting state. Exclusive with initial_Pi stop : int stopping state. Trajectory will terminate when reaching the stopping state before length number of steps. dtype : numpy.dtype, optional, default=numpy.int32 The numpy dtype to use to store the synthetic trajectory. Returns ------- states : np.array of shape (nstates,) of dtype=np.int32 The trajectory of hidden states, with each element in range(0,nstates). Examples -------- Generate a synthetic state trajectory of a specified length. >>> from bhmm import testsystems >>> model = testsystems.dalton_model() >>> states = model.generate_synthetic_state_trajectory(nsteps=100)
[ "Generate", "a", "synthetic", "state", "trajectory", "." ]
python
train
41.787234
CivicSpleen/ambry
ambry/cli/__init__.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/cli/__init__.py#L109-L123
def get_extra_commands(): """Use the configuration to discover additional CLI packages to load""" from ambry.run import find_config_file from ambry.dbexceptions import ConfigurationError from ambry.util import yaml try: plugins_dir = find_config_file('cli.yaml') except ConfigurationError: return [] with open(plugins_dir) as f: cli_modules = yaml.load(f) return cli_modules
[ "def", "get_extra_commands", "(", ")", ":", "from", "ambry", ".", "run", "import", "find_config_file", "from", "ambry", ".", "dbexceptions", "import", "ConfigurationError", "from", "ambry", ".", "util", "import", "yaml", "try", ":", "plugins_dir", "=", "find_config_file", "(", "'cli.yaml'", ")", "except", "ConfigurationError", ":", "return", "[", "]", "with", "open", "(", "plugins_dir", ")", "as", "f", ":", "cli_modules", "=", "yaml", ".", "load", "(", "f", ")", "return", "cli_modules" ]
Use the configuration to discover additional CLI packages to load
[ "Use", "the", "configuration", "to", "discover", "additional", "CLI", "packages", "to", "load" ]
python
train
27.933333
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/gloo/wrappers.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/wrappers.py#L253-L271
def set_blend_func(self, srgb='one', drgb='zero', salpha=None, dalpha=None): """Specify pixel arithmetic for RGB and alpha Parameters ---------- srgb : str Source RGB factor. drgb : str Destination RGB factor. salpha : str | None Source alpha factor. If None, ``srgb`` is used. dalpha : str Destination alpha factor. If None, ``drgb`` is used. """ salpha = srgb if salpha is None else salpha dalpha = drgb if dalpha is None else dalpha self.glir.command('FUNC', 'glBlendFuncSeparate', srgb, drgb, salpha, dalpha)
[ "def", "set_blend_func", "(", "self", ",", "srgb", "=", "'one'", ",", "drgb", "=", "'zero'", ",", "salpha", "=", "None", ",", "dalpha", "=", "None", ")", ":", "salpha", "=", "srgb", "if", "salpha", "is", "None", "else", "salpha", "dalpha", "=", "drgb", "if", "dalpha", "is", "None", "else", "dalpha", "self", ".", "glir", ".", "command", "(", "'FUNC'", ",", "'glBlendFuncSeparate'", ",", "srgb", ",", "drgb", ",", "salpha", ",", "dalpha", ")" ]
Specify pixel arithmetic for RGB and alpha Parameters ---------- srgb : str Source RGB factor. drgb : str Destination RGB factor. salpha : str | None Source alpha factor. If None, ``srgb`` is used. dalpha : str Destination alpha factor. If None, ``drgb`` is used.
[ "Specify", "pixel", "arithmetic", "for", "RGB", "and", "alpha", "Parameters", "----------", "srgb", ":", "str", "Source", "RGB", "factor", ".", "drgb", ":", "str", "Destination", "RGB", "factor", ".", "salpha", ":", "str", "|", "None", "Source", "alpha", "factor", ".", "If", "None", "srgb", "is", "used", ".", "dalpha", ":", "str", "Destination", "alpha", "factor", ".", "If", "None", "drgb", "is", "used", "." ]
python
train
36.052632
MillionIntegrals/vel
vel/rl/algo/policy_gradient/trpo.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/policy_gradient/trpo.py#L263-L272
def create(max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters, discount_factor, gae_lambda=1.0, improvement_acceptance_ratio=0.1, max_grad_norm=0.5): """ Vel factory function """ return TrpoPolicyGradient( max_kl, int(cg_iters), int(line_search_iters), cg_damping, entropy_coef, vf_iters, discount_factor=discount_factor, gae_lambda=gae_lambda, improvement_acceptance_ratio=improvement_acceptance_ratio, max_grad_norm=max_grad_norm )
[ "def", "create", "(", "max_kl", ",", "cg_iters", ",", "line_search_iters", ",", "cg_damping", ",", "entropy_coef", ",", "vf_iters", ",", "discount_factor", ",", "gae_lambda", "=", "1.0", ",", "improvement_acceptance_ratio", "=", "0.1", ",", "max_grad_norm", "=", "0.5", ")", ":", "return", "TrpoPolicyGradient", "(", "max_kl", ",", "int", "(", "cg_iters", ")", ",", "int", "(", "line_search_iters", ")", ",", "cg_damping", ",", "entropy_coef", ",", "vf_iters", ",", "discount_factor", "=", "discount_factor", ",", "gae_lambda", "=", "gae_lambda", ",", "improvement_acceptance_ratio", "=", "improvement_acceptance_ratio", ",", "max_grad_norm", "=", "max_grad_norm", ")" ]
Vel factory function
[ "Vel", "factory", "function" ]
python
train
50.8
GetmeUK/MongoFrames
mongoframes/frames.py
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/mongoframes/frames.py#L712-L722
def timestamp_update(sender, frames): """ Timestamp the modified field for all documents. This method should be bound to a frame class like so: ``` MyFrameClass.listen('update', MyFrameClass.timestamp_update) ``` """ for frame in frames: frame.modified = datetime.now(timezone.utc)
[ "def", "timestamp_update", "(", "sender", ",", "frames", ")", ":", "for", "frame", "in", "frames", ":", "frame", ".", "modified", "=", "datetime", ".", "now", "(", "timezone", ".", "utc", ")" ]
Timestamp the modified field for all documents. This method should be bound to a frame class like so: ``` MyFrameClass.listen('update', MyFrameClass.timestamp_update) ```
[ "Timestamp", "the", "modified", "field", "for", "all", "documents", ".", "This", "method", "should", "be", "bound", "to", "a", "frame", "class", "like", "so", ":" ]
python
train
31.636364
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L1232-L1318
def _partial_search_validator(self, sub, sup, anagram=False, subsequence=False, supersequence=False): """ It's responsible for validating the partial results of `search` method. If it returns True, the search would return its result. Else, search method would discard what it found and look for others. First, checks to see if all elements of `sub` is in `sup` with at least the same frequency and then checks to see if every element `sub` appears in `sup` with the same order (index-wise). If advanced control sturctures are specified, the containment condition won't be checked. The code for index checking is from [1]_. Parameters ---------- sub : list sup : list anagram : bool, optional Default is `False` subsequence : bool, optional Default is `False` supersequence : bool, optional Default is `False` Returns ------- bool References ---------- .. [1] : ` https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist` """ def get_all_in(one, another): for element in one: if element in another: yield element def containment_check(sub, sup): return (set(Counter(sub).keys()).issubset( set(Counter(sup).keys()))) def containment_freq_check(sub, sup): return (all([Counter(sub)[element] <= Counter(sup)[element] for element in Counter(sub)])) def extra_freq_check(sub, sup, list_of_tups): # Would be used for matching anagrams, subsequences etc. return (len(list_of_tups) > 0 and all([Counter(sub)[tup[0]] <= Counter(sup)[tup[1]] for tup in list_of_tups])) # Regarding containment checking while having extra conditions, # there's no good way to map each anagram or subseuqnece etc. that was # found to the query word, without making it more complicated than # it already is, because a query word can be anagram/subsequence etc. # to multiple words of the timestamps yet finding the one with the # right index would be the problem. # Therefore we just approximate the solution by just counting # the elements. if len(sub) > len(sup): return False for pred, func in set([(anagram, self._is_anagram_of), (subsequence, self._is_subsequence_of), (supersequence, self._is_supersequence_of)]): if pred: pred_seive = [(sub_key, sup_key) for sub_key in set(Counter(sub).keys()) for sup_key in set(Counter(sup).keys()) if func(sub_key, sup_key)] if not extra_freq_check(sub, sup, pred_seive): return False if ( not any([anagram, subsequence, supersequence]) and (not containment_check(sub, sup) or not containment_freq_check(sub, sup)) ): return False for x1, x2 in zip(get_all_in(sup, sub), get_all_in(sub, sup)): if x1 != x2: return False return True
[ "def", "_partial_search_validator", "(", "self", ",", "sub", ",", "sup", ",", "anagram", "=", "False", ",", "subsequence", "=", "False", ",", "supersequence", "=", "False", ")", ":", "def", "get_all_in", "(", "one", ",", "another", ")", ":", "for", "element", "in", "one", ":", "if", "element", "in", "another", ":", "yield", "element", "def", "containment_check", "(", "sub", ",", "sup", ")", ":", "return", "(", "set", "(", "Counter", "(", "sub", ")", ".", "keys", "(", ")", ")", ".", "issubset", "(", "set", "(", "Counter", "(", "sup", ")", ".", "keys", "(", ")", ")", ")", ")", "def", "containment_freq_check", "(", "sub", ",", "sup", ")", ":", "return", "(", "all", "(", "[", "Counter", "(", "sub", ")", "[", "element", "]", "<=", "Counter", "(", "sup", ")", "[", "element", "]", "for", "element", "in", "Counter", "(", "sub", ")", "]", ")", ")", "def", "extra_freq_check", "(", "sub", ",", "sup", ",", "list_of_tups", ")", ":", "# Would be used for matching anagrams, subsequences etc.", "return", "(", "len", "(", "list_of_tups", ")", ">", "0", "and", "all", "(", "[", "Counter", "(", "sub", ")", "[", "tup", "[", "0", "]", "]", "<=", "Counter", "(", "sup", ")", "[", "tup", "[", "1", "]", "]", "for", "tup", "in", "list_of_tups", "]", ")", ")", "# Regarding containment checking while having extra conditions,", "# there's no good way to map each anagram or subseuqnece etc. that was", "# found to the query word, without making it more complicated than", "# it already is, because a query word can be anagram/subsequence etc.", "# to multiple words of the timestamps yet finding the one with the", "# right index would be the problem.", "# Therefore we just approximate the solution by just counting", "# the elements.", "if", "len", "(", "sub", ")", ">", "len", "(", "sup", ")", ":", "return", "False", "for", "pred", ",", "func", "in", "set", "(", "[", "(", "anagram", ",", "self", ".", "_is_anagram_of", ")", ",", "(", "subsequence", ",", "self", ".", "_is_subsequence_of", ")", ",", "(", "supersequence", ",", "self", ".", "_is_supersequence_of", ")", "]", ")", ":", "if", "pred", ":", "pred_seive", "=", "[", "(", "sub_key", ",", "sup_key", ")", "for", "sub_key", "in", "set", "(", "Counter", "(", "sub", ")", ".", "keys", "(", ")", ")", "for", "sup_key", "in", "set", "(", "Counter", "(", "sup", ")", ".", "keys", "(", ")", ")", "if", "func", "(", "sub_key", ",", "sup_key", ")", "]", "if", "not", "extra_freq_check", "(", "sub", ",", "sup", ",", "pred_seive", ")", ":", "return", "False", "if", "(", "not", "any", "(", "[", "anagram", ",", "subsequence", ",", "supersequence", "]", ")", "and", "(", "not", "containment_check", "(", "sub", ",", "sup", ")", "or", "not", "containment_freq_check", "(", "sub", ",", "sup", ")", ")", ")", ":", "return", "False", "for", "x1", ",", "x2", "in", "zip", "(", "get_all_in", "(", "sup", ",", "sub", ")", ",", "get_all_in", "(", "sub", ",", "sup", ")", ")", ":", "if", "x1", "!=", "x2", ":", "return", "False", "return", "True" ]
It's responsible for validating the partial results of `search` method. If it returns True, the search would return its result. Else, search method would discard what it found and look for others. First, checks to see if all elements of `sub` is in `sup` with at least the same frequency and then checks to see if every element `sub` appears in `sup` with the same order (index-wise). If advanced control sturctures are specified, the containment condition won't be checked. The code for index checking is from [1]_. Parameters ---------- sub : list sup : list anagram : bool, optional Default is `False` subsequence : bool, optional Default is `False` supersequence : bool, optional Default is `False` Returns ------- bool References ---------- .. [1] : ` https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist`
[ "It", "s", "responsible", "for", "validating", "the", "partial", "results", "of", "search", "method", ".", "If", "it", "returns", "True", "the", "search", "would", "return", "its", "result", ".", "Else", "search", "method", "would", "discard", "what", "it", "found", "and", "look", "for", "others", "." ]
python
train
39.08046
thomasw/djproxy
djproxy/views.py
https://github.com/thomasw/djproxy/blob/c8b3a44e330683f0625b67dfe3d6d995684b6e4a/djproxy/views.py#L63-L90
def proxy(self): """Retrieve the upstream content and build an HttpResponse.""" headers = self.request.headers.filter(self.ignored_request_headers) qs = self.request.query_string if self.pass_query_string else '' # Fix for django 1.10.0 bug https://code.djangoproject.com/ticket/27005 if (self.request.META.get('CONTENT_LENGTH', None) == '' and get_django_version() == '1.10'): del self.request.META['CONTENT_LENGTH'] request_kwargs = self.middleware.process_request( self, self.request, method=self.request.method, url=self.proxy_url, headers=headers, data=self.request.body, params=qs, allow_redirects=False, verify=self.verify_ssl, cert=self.cert, timeout=self.timeout) result = request(**request_kwargs) response = HttpResponse(result.content, status=result.status_code) # Attach forwardable headers to response forwardable_headers = HeaderDict(result.headers).filter( self.ignored_upstream_headers) for header, value in iteritems(forwardable_headers): response[header] = value return self.middleware.process_response( self, self.request, result, response)
[ "def", "proxy", "(", "self", ")", ":", "headers", "=", "self", ".", "request", ".", "headers", ".", "filter", "(", "self", ".", "ignored_request_headers", ")", "qs", "=", "self", ".", "request", ".", "query_string", "if", "self", ".", "pass_query_string", "else", "''", "# Fix for django 1.10.0 bug https://code.djangoproject.com/ticket/27005", "if", "(", "self", ".", "request", ".", "META", ".", "get", "(", "'CONTENT_LENGTH'", ",", "None", ")", "==", "''", "and", "get_django_version", "(", ")", "==", "'1.10'", ")", ":", "del", "self", ".", "request", ".", "META", "[", "'CONTENT_LENGTH'", "]", "request_kwargs", "=", "self", ".", "middleware", ".", "process_request", "(", "self", ",", "self", ".", "request", ",", "method", "=", "self", ".", "request", ".", "method", ",", "url", "=", "self", ".", "proxy_url", ",", "headers", "=", "headers", ",", "data", "=", "self", ".", "request", ".", "body", ",", "params", "=", "qs", ",", "allow_redirects", "=", "False", ",", "verify", "=", "self", ".", "verify_ssl", ",", "cert", "=", "self", ".", "cert", ",", "timeout", "=", "self", ".", "timeout", ")", "result", "=", "request", "(", "*", "*", "request_kwargs", ")", "response", "=", "HttpResponse", "(", "result", ".", "content", ",", "status", "=", "result", ".", "status_code", ")", "# Attach forwardable headers to response", "forwardable_headers", "=", "HeaderDict", "(", "result", ".", "headers", ")", ".", "filter", "(", "self", ".", "ignored_upstream_headers", ")", "for", "header", ",", "value", "in", "iteritems", "(", "forwardable_headers", ")", ":", "response", "[", "header", "]", "=", "value", "return", "self", ".", "middleware", ".", "process_response", "(", "self", ",", "self", ".", "request", ",", "result", ",", "response", ")" ]
Retrieve the upstream content and build an HttpResponse.
[ "Retrieve", "the", "upstream", "content", "and", "build", "an", "HttpResponse", "." ]
python
train
44.535714
fmd/lazyconf
lazyconf/lazyconf.py
https://github.com/fmd/lazyconf/blob/78e94320c7ff2c08988df04b4e43968f0a7ae06e/lazyconf/lazyconf.py#L160-L234
def configure(self): """ The main configure function. Uses a schema file and an optional data file, and combines them with user prompts to write a new data file. """ # Make the lazy folder if it doesn't already exist. path = os.getcwd() + '/' + self.lazy_folder if not os.path.exists(path): os.makedirs(path) schema_file = self.schema_file data_file = self.data_file # Initialise the schema and data objects. schema, data = Schema(), Schema() # Load the schema from a file. try: schema.load(schema_file) except IOError as e: # If we can't load the schema, choose from templates. self.prompt.error("Could not find schema in " + schema_file + " - Choosing from default templates...") schema = self.choose_schema(schema_file) except (Exception, ValueError) as e: self.prompt.error("Error: " + str(e) + " - Aborting...") return False else: sp, sf = os.path.split(schema_file) self.prompt.success('Loaded schema from ' + self.lazy_folder + sf) # Load the data from a file. try: data.load(data_file) except (Exception, IOError, ValueError) as e: self.prompt.error('Could not find data file. Copying from schema...') else: sp, sf = os.path.split(data_file) self.prompt.success('Loaded data from ' + self.lazy_folder + sf) # Store the internals of the schema (labels, selects, etc.) in data. data.internal = schema.internal # If we have data from a data file, merge the schema file into it. if data.data: # Create a new Merge instance using the data from the schema and data files. m = Merge(schema.data, data.data) mods = m.merge() for a in mods['added']: self.prompt.success('Added ' + a + ' to data.') for r in mods['removed']: self.prompt.error('Removed ' + r + ' from data.') for k,m in mods['modified']: self.prompt.notice('Modified ' + k + ': ' + m[0] + ' became ' + m[1] + '.' ) # Otherwise, reference the data from the schema file verbatim. else: data.data = schema.data # Store the data. self.data = data # Configure the data. self.configure_data(data.data) # Save the data to the out file. self.data.save(self.data_file) self.add_ignore() sp, sf = os.path.split(self.data_file) self.prompt.success('Saved to ' + self.lazy_folder + sf + '.')
[ "def", "configure", "(", "self", ")", ":", "# Make the lazy folder if it doesn't already exist.", "path", "=", "os", ".", "getcwd", "(", ")", "+", "'/'", "+", "self", ".", "lazy_folder", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "schema_file", "=", "self", ".", "schema_file", "data_file", "=", "self", ".", "data_file", "# Initialise the schema and data objects.", "schema", ",", "data", "=", "Schema", "(", ")", ",", "Schema", "(", ")", "# Load the schema from a file.", "try", ":", "schema", ".", "load", "(", "schema_file", ")", "except", "IOError", "as", "e", ":", "# If we can't load the schema, choose from templates.", "self", ".", "prompt", ".", "error", "(", "\"Could not find schema in \"", "+", "schema_file", "+", "\" - Choosing from default templates...\"", ")", "schema", "=", "self", ".", "choose_schema", "(", "schema_file", ")", "except", "(", "Exception", ",", "ValueError", ")", "as", "e", ":", "self", ".", "prompt", ".", "error", "(", "\"Error: \"", "+", "str", "(", "e", ")", "+", "\" - Aborting...\"", ")", "return", "False", "else", ":", "sp", ",", "sf", "=", "os", ".", "path", ".", "split", "(", "schema_file", ")", "self", ".", "prompt", ".", "success", "(", "'Loaded schema from '", "+", "self", ".", "lazy_folder", "+", "sf", ")", "# Load the data from a file.", "try", ":", "data", ".", "load", "(", "data_file", ")", "except", "(", "Exception", ",", "IOError", ",", "ValueError", ")", "as", "e", ":", "self", ".", "prompt", ".", "error", "(", "'Could not find data file. Copying from schema...'", ")", "else", ":", "sp", ",", "sf", "=", "os", ".", "path", ".", "split", "(", "data_file", ")", "self", ".", "prompt", ".", "success", "(", "'Loaded data from '", "+", "self", ".", "lazy_folder", "+", "sf", ")", "# Store the internals of the schema (labels, selects, etc.) in data.", "data", ".", "internal", "=", "schema", ".", "internal", "# If we have data from a data file, merge the schema file into it.", "if", "data", ".", "data", ":", "# Create a new Merge instance using the data from the schema and data files.", "m", "=", "Merge", "(", "schema", ".", "data", ",", "data", ".", "data", ")", "mods", "=", "m", ".", "merge", "(", ")", "for", "a", "in", "mods", "[", "'added'", "]", ":", "self", ".", "prompt", ".", "success", "(", "'Added '", "+", "a", "+", "' to data.'", ")", "for", "r", "in", "mods", "[", "'removed'", "]", ":", "self", ".", "prompt", ".", "error", "(", "'Removed '", "+", "r", "+", "' from data.'", ")", "for", "k", ",", "m", "in", "mods", "[", "'modified'", "]", ":", "self", ".", "prompt", ".", "notice", "(", "'Modified '", "+", "k", "+", "': '", "+", "m", "[", "0", "]", "+", "' became '", "+", "m", "[", "1", "]", "+", "'.'", ")", "# Otherwise, reference the data from the schema file verbatim.", "else", ":", "data", ".", "data", "=", "schema", ".", "data", "# Store the data.", "self", ".", "data", "=", "data", "# Configure the data.", "self", ".", "configure_data", "(", "data", ".", "data", ")", "# Save the data to the out file.", "self", ".", "data", ".", "save", "(", "self", ".", "data_file", ")", "self", ".", "add_ignore", "(", ")", "sp", ",", "sf", "=", "os", ".", "path", ".", "split", "(", "self", ".", "data_file", ")", "self", ".", "prompt", ".", "success", "(", "'Saved to '", "+", "self", ".", "lazy_folder", "+", "sf", "+", "'.'", ")" ]
The main configure function. Uses a schema file and an optional data file, and combines them with user prompts to write a new data file.
[ "The", "main", "configure", "function", ".", "Uses", "a", "schema", "file", "and", "an", "optional", "data", "file", "and", "combines", "them", "with", "user", "prompts", "to", "write", "a", "new", "data", "file", "." ]
python
train
35.346667