repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
tkem/uritools
uritools/split.py
https://github.com/tkem/uritools/blob/e77ba4acd937b68da9850138563debd4c925ef9f/uritools/split.py#L189-L199
def getquerydict(self, sep='&', encoding='utf-8', errors='strict'): """Split the query component into individual `name=value` pairs separated by `sep` and return a dictionary of query variables. The dictionary keys are the unique query variable names and the values are lists of values for each name. """ dict = collections.defaultdict(list) for name, value in self.getquerylist(sep, encoding, errors): dict[name].append(value) return dict
[ "def", "getquerydict", "(", "self", ",", "sep", "=", "'&'", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'strict'", ")", ":", "dict", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "name", ",", "value", "in", "self", ".", "getquerylist", "(", "sep", ",", "encoding", ",", "errors", ")", ":", "dict", "[", "name", "]", ".", "append", "(", "value", ")", "return", "dict" ]
Split the query component into individual `name=value` pairs separated by `sep` and return a dictionary of query variables. The dictionary keys are the unique query variable names and the values are lists of values for each name.
[ "Split", "the", "query", "component", "into", "individual", "name", "=", "value", "pairs", "separated", "by", "sep", "and", "return", "a", "dictionary", "of", "query", "variables", ".", "The", "dictionary", "keys", "are", "the", "unique", "query", "variable", "names", "and", "the", "values", "are", "lists", "of", "values", "for", "each", "name", "." ]
python
train
46
BlueHack-Core/blueforge
blueforge/util/file.py
https://github.com/BlueHack-Core/blueforge/blob/ac40a888ee9c388638a8f312c51f7500b8891b6c/blueforge/util/file.py#L69-L81
def move_file(originals, destination): """ Move file from original path to destination path. :type originals: Array of str :param originals: The original path :type destination: str :param destination: The destination path """ for original in originals: if os.path.exists(original): shutil.move(original, destination)
[ "def", "move_file", "(", "originals", ",", "destination", ")", ":", "for", "original", "in", "originals", ":", "if", "os", ".", "path", ".", "exists", "(", "original", ")", ":", "shutil", ".", "move", "(", "original", ",", "destination", ")" ]
Move file from original path to destination path. :type originals: Array of str :param originals: The original path :type destination: str :param destination: The destination path
[ "Move", "file", "from", "original", "path", "to", "destination", "path", "." ]
python
train
29.153846
Calysto/calysto
calysto/ai/conx.py
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L2128-L2136
def activationFunctionASIG(self, x): """ Determine the activation of a node based on that nodes net input. """ def act(v): if v < -15.0: return 0.0 elif v > 15.0: return 1.0 else: return 1.0 / (1.0 + Numeric.exp(-v)) return Numeric.array(list(map(act, x)), 'f')
[ "def", "activationFunctionASIG", "(", "self", ",", "x", ")", ":", "def", "act", "(", "v", ")", ":", "if", "v", "<", "-", "15.0", ":", "return", "0.0", "elif", "v", ">", "15.0", ":", "return", "1.0", "else", ":", "return", "1.0", "/", "(", "1.0", "+", "Numeric", ".", "exp", "(", "-", "v", ")", ")", "return", "Numeric", ".", "array", "(", "list", "(", "map", "(", "act", ",", "x", ")", ")", ",", "'f'", ")" ]
Determine the activation of a node based on that nodes net input.
[ "Determine", "the", "activation", "of", "a", "node", "based", "on", "that", "nodes", "net", "input", "." ]
python
train
36.888889
Jajcus/pyxmpp2
pyxmpp2/ext/vcard.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/vcard.py#L1172-L1186
def as_xml(self,parent): """Create vcard-tmp XML representation of the field. :Parameters: - `parent`: parent node for the element :Types: - `parent`: `libxml2.xmlNode` :return: xml node with the field data. :returntype: `libxml2.xmlNode`""" if self.value in ("public","private","confidental"): n=parent.newChild(None,self.name.upper(),None) n.newChild(None,self.value.upper(),None) return n return None
[ "def", "as_xml", "(", "self", ",", "parent", ")", ":", "if", "self", ".", "value", "in", "(", "\"public\"", ",", "\"private\"", ",", "\"confidental\"", ")", ":", "n", "=", "parent", ".", "newChild", "(", "None", ",", "self", ".", "name", ".", "upper", "(", ")", ",", "None", ")", "n", ".", "newChild", "(", "None", ",", "self", ".", "value", ".", "upper", "(", ")", ",", "None", ")", "return", "n", "return", "None" ]
Create vcard-tmp XML representation of the field. :Parameters: - `parent`: parent node for the element :Types: - `parent`: `libxml2.xmlNode` :return: xml node with the field data. :returntype: `libxml2.xmlNode`
[ "Create", "vcard", "-", "tmp", "XML", "representation", "of", "the", "field", "." ]
python
valid
33.8
koordinates/python-client
koordinates/base.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/base.py#L610-L621
def refresh(self): """ Refresh this model from the server. Updates attributes with the server-defined values. This is useful where the Model instance came from a partial response (eg. a list query) and additional details are required. Existing attribute values will be overwritten. """ r = self._client.request('GET', self.url) return self._deserialize(r.json(), self._manager)
[ "def", "refresh", "(", "self", ")", ":", "r", "=", "self", ".", "_client", ".", "request", "(", "'GET'", ",", "self", ".", "url", ")", "return", "self", ".", "_deserialize", "(", "r", ".", "json", "(", ")", ",", "self", ".", "_manager", ")" ]
Refresh this model from the server. Updates attributes with the server-defined values. This is useful where the Model instance came from a partial response (eg. a list query) and additional details are required. Existing attribute values will be overwritten.
[ "Refresh", "this", "model", "from", "the", "server", "." ]
python
train
36.666667
iotile/coretools
iotilesensorgraph/iotile/sg/processors.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/processors.py#L13-L31
def copy_all_a(input_a, *other_inputs, **kwargs): """Copy all readings in input a into the output. All other inputs are skipped so that after this function runs there are no readings left in any of the input walkers when the function finishes, even if it generated no output readings. Returns: list(IOTileReading) """ output = [] while input_a.count() > 0: output.append(input_a.pop()) for input_x in other_inputs: input_x.skip_all() return output
[ "def", "copy_all_a", "(", "input_a", ",", "*", "other_inputs", ",", "*", "*", "kwargs", ")", ":", "output", "=", "[", "]", "while", "input_a", ".", "count", "(", ")", ">", "0", ":", "output", ".", "append", "(", "input_a", ".", "pop", "(", ")", ")", "for", "input_x", "in", "other_inputs", ":", "input_x", ".", "skip_all", "(", ")", "return", "output" ]
Copy all readings in input a into the output. All other inputs are skipped so that after this function runs there are no readings left in any of the input walkers when the function finishes, even if it generated no output readings. Returns: list(IOTileReading)
[ "Copy", "all", "readings", "in", "input", "a", "into", "the", "output", "." ]
python
train
26.210526
KelSolaar/Umbra
umbra/ui/widgets/active_QLabel.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/active_QLabel.py#L203-L214
def active_pixmap(self, value): """ Setter for **self.__active_pixmap** attribute. :param value: Attribute value. :type value: QPixmap """ if value is not None: assert type(value) is QPixmap, "'{0}' attribute: '{1}' type is not 'QPixmap'!".format( "active_pixmap", value) self.__active_pixmap = value
[ "def", "active_pixmap", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "QPixmap", ",", "\"'{0}' attribute: '{1}' type is not 'QPixmap'!\"", ".", "format", "(", "\"active_pixmap\"", ",", "value", ")", "self", ".", "__active_pixmap", "=", "value" ]
Setter for **self.__active_pixmap** attribute. :param value: Attribute value. :type value: QPixmap
[ "Setter", "for", "**", "self", ".", "__active_pixmap", "**", "attribute", "." ]
python
train
31.25
vertexproject/synapse
synapse/cortex.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/cortex.py#L225-L231
async def updateTrigger(self, iden, query): ''' Change an existing trigger's query ''' trig = self.cell.triggers.get(iden) self._trig_auth_check(trig.get('useriden')) self.cell.triggers.mod(iden, query)
[ "async", "def", "updateTrigger", "(", "self", ",", "iden", ",", "query", ")", ":", "trig", "=", "self", ".", "cell", ".", "triggers", ".", "get", "(", "iden", ")", "self", ".", "_trig_auth_check", "(", "trig", ".", "get", "(", "'useriden'", ")", ")", "self", ".", "cell", ".", "triggers", ".", "mod", "(", "iden", ",", "query", ")" ]
Change an existing trigger's query
[ "Change", "an", "existing", "trigger", "s", "query" ]
python
train
34.857143
vatlab/SoS
src/sos/dag.py
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/dag.py#L184-L222
def find_executable(self): '''Find an executable node, which means nodes that has not been completed and has no input dependency.''' if 'DAG' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']: env.log_to_file('DAG', 'find_executable') for node in self.nodes(): # if it has not been executed if node._status is None: with_dependency = False for edge in self.in_edges(node): if edge[0]._status != 'completed': with_dependency = True break if not with_dependency: return node # if no node could be found, let use try pending ones pending_jobs = [ x for x in self.nodes() if x._status == 'signature_pending' ] if pending_jobs: try: notifier = ActivityNotifier( f'Waiting for {len(pending_jobs)} pending job{"s: e.g." if len(pending_jobs) > 1 else ":"} output {short_repr(pending_jobs[0]._signature[0])} with signature file {pending_jobs[0]._signature[1] + "_"}. You can manually remove this lock file if you are certain that no other process is working on the output.' ) while True: for node in pending_jobs: # if it has not been executed lock = fasteners.InterProcessLock(node._signature[1] + '_') if lock.acquire(blocking=False): lock.release() node._status = None return node time.sleep(0.1) except Exception as e: env.logger.error(e) finally: notifier.stop() return None
[ "def", "find_executable", "(", "self", ")", ":", "if", "'DAG'", "in", "env", ".", "config", "[", "'SOS_DEBUG'", "]", "or", "'ALL'", "in", "env", ".", "config", "[", "'SOS_DEBUG'", "]", ":", "env", ".", "log_to_file", "(", "'DAG'", ",", "'find_executable'", ")", "for", "node", "in", "self", ".", "nodes", "(", ")", ":", "# if it has not been executed", "if", "node", ".", "_status", "is", "None", ":", "with_dependency", "=", "False", "for", "edge", "in", "self", ".", "in_edges", "(", "node", ")", ":", "if", "edge", "[", "0", "]", ".", "_status", "!=", "'completed'", ":", "with_dependency", "=", "True", "break", "if", "not", "with_dependency", ":", "return", "node", "# if no node could be found, let use try pending ones", "pending_jobs", "=", "[", "x", "for", "x", "in", "self", ".", "nodes", "(", ")", "if", "x", ".", "_status", "==", "'signature_pending'", "]", "if", "pending_jobs", ":", "try", ":", "notifier", "=", "ActivityNotifier", "(", "f'Waiting for {len(pending_jobs)} pending job{\"s: e.g.\" if len(pending_jobs) > 1 else \":\"} output {short_repr(pending_jobs[0]._signature[0])} with signature file {pending_jobs[0]._signature[1] + \"_\"}. You can manually remove this lock file if you are certain that no other process is working on the output.'", ")", "while", "True", ":", "for", "node", "in", "pending_jobs", ":", "# if it has not been executed", "lock", "=", "fasteners", ".", "InterProcessLock", "(", "node", ".", "_signature", "[", "1", "]", "+", "'_'", ")", "if", "lock", ".", "acquire", "(", "blocking", "=", "False", ")", ":", "lock", ".", "release", "(", ")", "node", ".", "_status", "=", "None", "return", "node", "time", ".", "sleep", "(", "0.1", ")", "except", "Exception", "as", "e", ":", "env", ".", "logger", ".", "error", "(", "e", ")", "finally", ":", "notifier", ".", "stop", "(", ")", "return", "None" ]
Find an executable node, which means nodes that has not been completed and has no input dependency.
[ "Find", "an", "executable", "node", "which", "means", "nodes", "that", "has", "not", "been", "completed", "and", "has", "no", "input", "dependency", "." ]
python
train
48.487179
datosgobar/pydatajson
pydatajson/core.py
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/core.py#L153-L189
def _build_index(self): """Itera todos los datasets, distribucioens y fields indexandolos.""" datasets_index = {} distributions_index = {} fields_index = {} # recorre todos los datasets for dataset_index, dataset in enumerate(self.datasets): if "identifier" in dataset: datasets_index[dataset["identifier"]] = { "dataset_index": dataset_index } # recorre las distribuciones del dataset for distribution_index, distribution in enumerate( dataset.get("distribution", [])): if "identifier" in distribution: distributions_index[distribution["identifier"]] = { "distribution_index": distribution_index, "dataset_identifier": dataset["identifier"] } # recorre los fields de la distribucion for field_index, field in enumerate( distribution.get("field", [])): if "id" in field: fields_index[field["id"]] = { "field_index": field_index, "dataset_identifier": dataset["identifier"], "distribution_identifier": distribution["identifier"] } setattr(self, "_distributions_index", distributions_index) setattr(self, "_datasets_index", datasets_index) setattr(self, "_fields_index", fields_index)
[ "def", "_build_index", "(", "self", ")", ":", "datasets_index", "=", "{", "}", "distributions_index", "=", "{", "}", "fields_index", "=", "{", "}", "# recorre todos los datasets", "for", "dataset_index", ",", "dataset", "in", "enumerate", "(", "self", ".", "datasets", ")", ":", "if", "\"identifier\"", "in", "dataset", ":", "datasets_index", "[", "dataset", "[", "\"identifier\"", "]", "]", "=", "{", "\"dataset_index\"", ":", "dataset_index", "}", "# recorre las distribuciones del dataset", "for", "distribution_index", ",", "distribution", "in", "enumerate", "(", "dataset", ".", "get", "(", "\"distribution\"", ",", "[", "]", ")", ")", ":", "if", "\"identifier\"", "in", "distribution", ":", "distributions_index", "[", "distribution", "[", "\"identifier\"", "]", "]", "=", "{", "\"distribution_index\"", ":", "distribution_index", ",", "\"dataset_identifier\"", ":", "dataset", "[", "\"identifier\"", "]", "}", "# recorre los fields de la distribucion", "for", "field_index", ",", "field", "in", "enumerate", "(", "distribution", ".", "get", "(", "\"field\"", ",", "[", "]", ")", ")", ":", "if", "\"id\"", "in", "field", ":", "fields_index", "[", "field", "[", "\"id\"", "]", "]", "=", "{", "\"field_index\"", ":", "field_index", ",", "\"dataset_identifier\"", ":", "dataset", "[", "\"identifier\"", "]", ",", "\"distribution_identifier\"", ":", "distribution", "[", "\"identifier\"", "]", "}", "setattr", "(", "self", ",", "\"_distributions_index\"", ",", "distributions_index", ")", "setattr", "(", "self", ",", "\"_datasets_index\"", ",", "datasets_index", ")", "setattr", "(", "self", ",", "\"_fields_index\"", ",", "fields_index", ")" ]
Itera todos los datasets, distribucioens y fields indexandolos.
[ "Itera", "todos", "los", "datasets", "distribucioens", "y", "fields", "indexandolos", "." ]
python
train
47.702703
timknip/pyswf
swf/export.py
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/export.py#L827-L876
def export(self, swf, shape, **export_opts): """ Exports the specified shape of the SWF to SVG. @param swf The SWF. @param shape Which shape to export, either by characterId(int) or as a Tag object. """ # If `shape` is given as int, find corresponding shape tag. if isinstance(shape, Tag): shape_tag = shape else: shapes = [x for x in swf.all_tags_of_type((TagDefineShape, TagDefineSprite)) if x.characterId == shape] if len(shapes): shape_tag = shapes[0] else: raise Exception("Shape %s not found" % shape) from swf.movie import SWF # find a typical use of this shape example_place_objects = [x for x in swf.all_tags_of_type(TagPlaceObject) if x.hasCharacter and x.characterId == shape_tag.characterId] if len(example_place_objects): place_object = example_place_objects[0] characters = swf.build_dictionary() ids_to_export = place_object.get_dependencies() ids_exported = set() tags_to_export = [] # this had better form a dag! while len(ids_to_export): id = ids_to_export.pop() if id in ids_exported or id not in characters: continue tag = characters[id] ids_to_export.update(tag.get_dependencies()) tags_to_export.append(tag) ids_exported.add(id) tags_to_export.reverse() tags_to_export.append(place_object) else: place_object = TagPlaceObject() place_object.hasCharacter = True place_object.characterId = shape_tag.characterId tags_to_export = [ shape_tag, place_object ] stunt_swf = SWF() stunt_swf.tags = tags_to_export return super(SingleShapeSVGExporterMixin, self).export(stunt_swf, **export_opts)
[ "def", "export", "(", "self", ",", "swf", ",", "shape", ",", "*", "*", "export_opts", ")", ":", "# If `shape` is given as int, find corresponding shape tag.", "if", "isinstance", "(", "shape", ",", "Tag", ")", ":", "shape_tag", "=", "shape", "else", ":", "shapes", "=", "[", "x", "for", "x", "in", "swf", ".", "all_tags_of_type", "(", "(", "TagDefineShape", ",", "TagDefineSprite", ")", ")", "if", "x", ".", "characterId", "==", "shape", "]", "if", "len", "(", "shapes", ")", ":", "shape_tag", "=", "shapes", "[", "0", "]", "else", ":", "raise", "Exception", "(", "\"Shape %s not found\"", "%", "shape", ")", "from", "swf", ".", "movie", "import", "SWF", "# find a typical use of this shape", "example_place_objects", "=", "[", "x", "for", "x", "in", "swf", ".", "all_tags_of_type", "(", "TagPlaceObject", ")", "if", "x", ".", "hasCharacter", "and", "x", ".", "characterId", "==", "shape_tag", ".", "characterId", "]", "if", "len", "(", "example_place_objects", ")", ":", "place_object", "=", "example_place_objects", "[", "0", "]", "characters", "=", "swf", ".", "build_dictionary", "(", ")", "ids_to_export", "=", "place_object", ".", "get_dependencies", "(", ")", "ids_exported", "=", "set", "(", ")", "tags_to_export", "=", "[", "]", "# this had better form a dag!", "while", "len", "(", "ids_to_export", ")", ":", "id", "=", "ids_to_export", ".", "pop", "(", ")", "if", "id", "in", "ids_exported", "or", "id", "not", "in", "characters", ":", "continue", "tag", "=", "characters", "[", "id", "]", "ids_to_export", ".", "update", "(", "tag", ".", "get_dependencies", "(", ")", ")", "tags_to_export", ".", "append", "(", "tag", ")", "ids_exported", ".", "add", "(", "id", ")", "tags_to_export", ".", "reverse", "(", ")", "tags_to_export", ".", "append", "(", "place_object", ")", "else", ":", "place_object", "=", "TagPlaceObject", "(", ")", "place_object", ".", "hasCharacter", "=", "True", "place_object", ".", "characterId", "=", "shape_tag", ".", "characterId", "tags_to_export", "=", "[", "shape_tag", ",", "place_object", "]", "stunt_swf", "=", "SWF", "(", ")", "stunt_swf", ".", "tags", "=", "tags_to_export", "return", "super", "(", "SingleShapeSVGExporterMixin", ",", "self", ")", ".", "export", "(", "stunt_swf", ",", "*", "*", "export_opts", ")" ]
Exports the specified shape of the SWF to SVG. @param swf The SWF. @param shape Which shape to export, either by characterId(int) or as a Tag object.
[ "Exports", "the", "specified", "shape", "of", "the", "SWF", "to", "SVG", "." ]
python
train
38.8
Robpol86/libnl
libnl/nl80211/iw_scan.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/nl80211/iw_scan.py#L341-L367
def get_ht_op(_, data): """http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n766. Positional arguments: data -- bytearray data to read. Returns: Dict. """ protection = ('no', 'nonmember', 20, 'non-HT mixed') sta_chan_width = (20, 'any') answers = { 'primary channel': data[0], 'secondary channel offset': ht_secondary_offset[data[1] & 0x3], 'STA channel width': sta_chan_width[(data[1] & 0x4) >> 2], 'RIFS': (data[1] & 0x8) >> 3, 'HT protection': protection[data[2] & 0x3], 'non-GF present': (data[2] & 0x4) >> 2, 'OBSS non-GF present': (data[2] & 0x10) >> 4, 'dual beacon': (data[4] & 0x40) >> 6, 'dual CTS protection': (data[4] & 0x80) >> 7, 'STBC beacon': data[5] & 0x1, 'L-SIG TXOP Prot': (data[5] & 0x2) >> 1, 'PCO active': (data[5] & 0x4) >> 2, 'PCO phase': (data[5] & 0x8) >> 3, } return answers
[ "def", "get_ht_op", "(", "_", ",", "data", ")", ":", "protection", "=", "(", "'no'", ",", "'nonmember'", ",", "20", ",", "'non-HT mixed'", ")", "sta_chan_width", "=", "(", "20", ",", "'any'", ")", "answers", "=", "{", "'primary channel'", ":", "data", "[", "0", "]", ",", "'secondary channel offset'", ":", "ht_secondary_offset", "[", "data", "[", "1", "]", "&", "0x3", "]", ",", "'STA channel width'", ":", "sta_chan_width", "[", "(", "data", "[", "1", "]", "&", "0x4", ")", ">>", "2", "]", ",", "'RIFS'", ":", "(", "data", "[", "1", "]", "&", "0x8", ")", ">>", "3", ",", "'HT protection'", ":", "protection", "[", "data", "[", "2", "]", "&", "0x3", "]", ",", "'non-GF present'", ":", "(", "data", "[", "2", "]", "&", "0x4", ")", ">>", "2", ",", "'OBSS non-GF present'", ":", "(", "data", "[", "2", "]", "&", "0x10", ")", ">>", "4", ",", "'dual beacon'", ":", "(", "data", "[", "4", "]", "&", "0x40", ")", ">>", "6", ",", "'dual CTS protection'", ":", "(", "data", "[", "4", "]", "&", "0x80", ")", ">>", "7", ",", "'STBC beacon'", ":", "data", "[", "5", "]", "&", "0x1", ",", "'L-SIG TXOP Prot'", ":", "(", "data", "[", "5", "]", "&", "0x2", ")", ">>", "1", ",", "'PCO active'", ":", "(", "data", "[", "5", "]", "&", "0x4", ")", ">>", "2", ",", "'PCO phase'", ":", "(", "data", "[", "5", "]", "&", "0x8", ")", ">>", "3", ",", "}", "return", "answers" ]
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n766. Positional arguments: data -- bytearray data to read. Returns: Dict.
[ "http", ":", "//", "git", ".", "kernel", ".", "org", "/", "cgit", "/", "linux", "/", "kernel", "/", "git", "/", "jberg", "/", "iw", ".", "git", "/", "tree", "/", "scan", ".", "c?id", "=", "v3", ".", "17#n766", "." ]
python
train
35.37037
HazyResearch/fonduer
src/fonduer/parser/spacy_parser.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/spacy_parser.py#L87-L104
def model_installed(name): """Check if spaCy language model is installed. From https://github.com/explosion/spaCy/blob/master/spacy/util.py :param name: :return: """ data_path = util.get_data_path() if not data_path or not data_path.exists(): raise IOError(f"Can't find spaCy data path: {data_path}") if name in {d.name for d in data_path.iterdir()}: return True if Spacy.is_package(name): # installed as package return True if Path(name).exists(): # path to model data directory return True return False
[ "def", "model_installed", "(", "name", ")", ":", "data_path", "=", "util", ".", "get_data_path", "(", ")", "if", "not", "data_path", "or", "not", "data_path", ".", "exists", "(", ")", ":", "raise", "IOError", "(", "f\"Can't find spaCy data path: {data_path}\"", ")", "if", "name", "in", "{", "d", ".", "name", "for", "d", "in", "data_path", ".", "iterdir", "(", ")", "}", ":", "return", "True", "if", "Spacy", ".", "is_package", "(", "name", ")", ":", "# installed as package", "return", "True", "if", "Path", "(", "name", ")", ".", "exists", "(", ")", ":", "# path to model data directory", "return", "True", "return", "False" ]
Check if spaCy language model is installed. From https://github.com/explosion/spaCy/blob/master/spacy/util.py :param name: :return:
[ "Check", "if", "spaCy", "language", "model", "is", "installed", "." ]
python
train
34.833333
diffeo/rejester
rejester/_task_master.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_task_master.py#L745-L771
def idle_all_workers(self): '''Set the global mode to :attr:`IDLE` and wait for workers to stop. This can wait arbitrarily long before returning. The worst case in "normal" usage involves waiting five minutes for a "lost" job to expire; a well-behaved but very-long-running job can extend its own lease further, and this function will not return until that job finishes (if ever). .. deprecated:: 0.4.5 There isn't an obvious use case for this function, and its "maybe wait forever for something out of my control" nature makes it hard to use in real code. Polling all of the work specs and their :meth:`num_pending` in application code if you really needed this operation would have the same semantics and database load. ''' self.set_mode(self.IDLE) while 1: num_pending = dict() for work_spec_name in self.registry.pull(NICE_LEVELS).keys(): num_pending[work_spec_name] = self.num_pending(work_spec_name) if sum(num_pending.values()) == 0: break logger.warn('waiting for pending work_units: %r', num_pending) time.sleep(1)
[ "def", "idle_all_workers", "(", "self", ")", ":", "self", ".", "set_mode", "(", "self", ".", "IDLE", ")", "while", "1", ":", "num_pending", "=", "dict", "(", ")", "for", "work_spec_name", "in", "self", ".", "registry", ".", "pull", "(", "NICE_LEVELS", ")", ".", "keys", "(", ")", ":", "num_pending", "[", "work_spec_name", "]", "=", "self", ".", "num_pending", "(", "work_spec_name", ")", "if", "sum", "(", "num_pending", ".", "values", "(", ")", ")", "==", "0", ":", "break", "logger", ".", "warn", "(", "'waiting for pending work_units: %r'", ",", "num_pending", ")", "time", ".", "sleep", "(", "1", ")" ]
Set the global mode to :attr:`IDLE` and wait for workers to stop. This can wait arbitrarily long before returning. The worst case in "normal" usage involves waiting five minutes for a "lost" job to expire; a well-behaved but very-long-running job can extend its own lease further, and this function will not return until that job finishes (if ever). .. deprecated:: 0.4.5 There isn't an obvious use case for this function, and its "maybe wait forever for something out of my control" nature makes it hard to use in real code. Polling all of the work specs and their :meth:`num_pending` in application code if you really needed this operation would have the same semantics and database load.
[ "Set", "the", "global", "mode", "to", ":", "attr", ":", "IDLE", "and", "wait", "for", "workers", "to", "stop", "." ]
python
train
46.148148
SatelliteQE/nailgun
nailgun/config.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/config.py#L261-L285
def get_client_kwargs(self): """Get kwargs for use with the methods in :mod:`nailgun.client`. This method returns a dict of attributes that can be unpacked and used as kwargs via the ``**`` operator. For example:: cfg = ServerConfig.get() client.get(cfg.url + '/api/v2', **cfg.get_client_kwargs()) This method is useful because client code may not know which attributes should be passed from a ``ServerConfig`` object to one of the ``nailgun.client`` functions. Consider that the example above could also be written like this:: cfg = ServerConfig.get() client.get(cfg.url + '/api/v2', auth=cfg.auth, verify=cfg.verify) But this latter approach is more fragile. It will break if ``cfg`` does not have an ``auth`` or ``verify`` attribute. """ config = vars(self).copy() config.pop('url') config.pop('version', None) return config
[ "def", "get_client_kwargs", "(", "self", ")", ":", "config", "=", "vars", "(", "self", ")", ".", "copy", "(", ")", "config", ".", "pop", "(", "'url'", ")", "config", ".", "pop", "(", "'version'", ",", "None", ")", "return", "config" ]
Get kwargs for use with the methods in :mod:`nailgun.client`. This method returns a dict of attributes that can be unpacked and used as kwargs via the ``**`` operator. For example:: cfg = ServerConfig.get() client.get(cfg.url + '/api/v2', **cfg.get_client_kwargs()) This method is useful because client code may not know which attributes should be passed from a ``ServerConfig`` object to one of the ``nailgun.client`` functions. Consider that the example above could also be written like this:: cfg = ServerConfig.get() client.get(cfg.url + '/api/v2', auth=cfg.auth, verify=cfg.verify) But this latter approach is more fragile. It will break if ``cfg`` does not have an ``auth`` or ``verify`` attribute.
[ "Get", "kwargs", "for", "use", "with", "the", "methods", "in", ":", "mod", ":", "nailgun", ".", "client", "." ]
python
train
38.76
mnboos/airtiler
airtiler/__init__.py
https://github.com/mnboos/airtiler/blob/6e79cdf22c5cd8a9cfffe07b58824fc1d1027104/airtiler/__init__.py#L44-L63
def _tiles_from_bbox(bbox, zoom_level): """ * Returns all tiles for the specified bounding box """ if isinstance(bbox, dict): point_min = Point.from_latitude_longitude(latitude=bbox['tl'], longitude=bbox['tr']) point_max = Point.from_latitude_longitude(latitude=bbox['bl'], longitude=bbox['br']) elif isinstance(bbox, list): point_min = Point.from_latitude_longitude(latitude=bbox[1], longitude=bbox[0]) point_max = Point.from_latitude_longitude(latitude=bbox[3], longitude=bbox[2]) else: raise RuntimeError("bbox must bei either a dict or a list") tile_min = Tile.for_point(point_min, zoom_level) tile_max = Tile.for_point(point_max, zoom_level) tiles = [] for x in range(tile_min.tms_x, tile_max.tms_x + 1): for y in range(tile_min.tms_y, tile_max.tms_y + 1): tiles.append(Tile.from_tms(tms_x=x, tms_y=y, zoom=zoom_level)) return tiles
[ "def", "_tiles_from_bbox", "(", "bbox", ",", "zoom_level", ")", ":", "if", "isinstance", "(", "bbox", ",", "dict", ")", ":", "point_min", "=", "Point", ".", "from_latitude_longitude", "(", "latitude", "=", "bbox", "[", "'tl'", "]", ",", "longitude", "=", "bbox", "[", "'tr'", "]", ")", "point_max", "=", "Point", ".", "from_latitude_longitude", "(", "latitude", "=", "bbox", "[", "'bl'", "]", ",", "longitude", "=", "bbox", "[", "'br'", "]", ")", "elif", "isinstance", "(", "bbox", ",", "list", ")", ":", "point_min", "=", "Point", ".", "from_latitude_longitude", "(", "latitude", "=", "bbox", "[", "1", "]", ",", "longitude", "=", "bbox", "[", "0", "]", ")", "point_max", "=", "Point", ".", "from_latitude_longitude", "(", "latitude", "=", "bbox", "[", "3", "]", ",", "longitude", "=", "bbox", "[", "2", "]", ")", "else", ":", "raise", "RuntimeError", "(", "\"bbox must bei either a dict or a list\"", ")", "tile_min", "=", "Tile", ".", "for_point", "(", "point_min", ",", "zoom_level", ")", "tile_max", "=", "Tile", ".", "for_point", "(", "point_max", ",", "zoom_level", ")", "tiles", "=", "[", "]", "for", "x", "in", "range", "(", "tile_min", ".", "tms_x", ",", "tile_max", ".", "tms_x", "+", "1", ")", ":", "for", "y", "in", "range", "(", "tile_min", ".", "tms_y", ",", "tile_max", ".", "tms_y", "+", "1", ")", ":", "tiles", ".", "append", "(", "Tile", ".", "from_tms", "(", "tms_x", "=", "x", ",", "tms_y", "=", "y", ",", "zoom", "=", "zoom_level", ")", ")", "return", "tiles" ]
* Returns all tiles for the specified bounding box
[ "*", "Returns", "all", "tiles", "for", "the", "specified", "bounding", "box" ]
python
train
49.8
genepattern/nbtools
nbtools/jsobject/jsobject.py
https://github.com/genepattern/nbtools/blob/2f74703f59926d8565f9714b1458dc87da8f8574/nbtools/jsobject/jsobject.py#L89-L104
def serialize(self, obj): """Serialize an object for sending to the front-end.""" if hasattr(obj, '_jsid'): return {'immutable': False, 'value': obj._jsid} else: obj_json = {'immutable': True} try: json.dumps(obj) obj_json['value'] = obj except: pass if callable(obj): guid = str(uuid.uuid4()) callback_registry[guid] = obj obj_json['callback'] = guid return obj_json
[ "def", "serialize", "(", "self", ",", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "'_jsid'", ")", ":", "return", "{", "'immutable'", ":", "False", ",", "'value'", ":", "obj", ".", "_jsid", "}", "else", ":", "obj_json", "=", "{", "'immutable'", ":", "True", "}", "try", ":", "json", ".", "dumps", "(", "obj", ")", "obj_json", "[", "'value'", "]", "=", "obj", "except", ":", "pass", "if", "callable", "(", "obj", ")", ":", "guid", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "callback_registry", "[", "guid", "]", "=", "obj", "obj_json", "[", "'callback'", "]", "=", "guid", "return", "obj_json" ]
Serialize an object for sending to the front-end.
[ "Serialize", "an", "object", "for", "sending", "to", "the", "front", "-", "end", "." ]
python
train
34
chaoss/grimoirelab-sortinghat
sortinghat/api.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/api.py#L339-L363
def delete_unique_identity(db, uuid): """Remove a unique identity from the registry. Function that removes from the registry, the unique identity that matches with uuid. Data related to this identity will be also removed. It checks first whether the unique identity is already on the registry. When it is found, the unique identity is removed. Otherwise, it will raise a 'NotFoundError' exception. :param db: database manager :param uuid: unique identifier assigned to the unique identity set for being removed :raises NotFoundError: raised when the unique identity does not exist in the registry. """ with db.connect() as session: uidentity = find_unique_identity(session, uuid) if not uidentity: raise NotFoundError(entity=uuid) delete_unique_identity_db(session, uidentity)
[ "def", "delete_unique_identity", "(", "db", ",", "uuid", ")", ":", "with", "db", ".", "connect", "(", ")", "as", "session", ":", "uidentity", "=", "find_unique_identity", "(", "session", ",", "uuid", ")", "if", "not", "uidentity", ":", "raise", "NotFoundError", "(", "entity", "=", "uuid", ")", "delete_unique_identity_db", "(", "session", ",", "uidentity", ")" ]
Remove a unique identity from the registry. Function that removes from the registry, the unique identity that matches with uuid. Data related to this identity will be also removed. It checks first whether the unique identity is already on the registry. When it is found, the unique identity is removed. Otherwise, it will raise a 'NotFoundError' exception. :param db: database manager :param uuid: unique identifier assigned to the unique identity set for being removed :raises NotFoundError: raised when the unique identity does not exist in the registry.
[ "Remove", "a", "unique", "identity", "from", "the", "registry", "." ]
python
train
34.32
miyakogi/wdom
wdom/element.py
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/element.py#L600-L604
def hasAttribute(self, attr: str) -> bool: """Return True if this node has ``attr``.""" if attr == 'class': return bool(self.classList) return attr in self.attributes
[ "def", "hasAttribute", "(", "self", ",", "attr", ":", "str", ")", "->", "bool", ":", "if", "attr", "==", "'class'", ":", "return", "bool", "(", "self", ".", "classList", ")", "return", "attr", "in", "self", ".", "attributes" ]
Return True if this node has ``attr``.
[ "Return", "True", "if", "this", "node", "has", "attr", "." ]
python
train
39.6
duniter/duniter-python-api
duniterpy/documents/document.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/document.py#L106-L112
def sha_hash(self) -> str: """ Return uppercase hex sha256 hash from signed raw document :return: """ return hashlib.sha256(self.signed_raw().encode("ascii")).hexdigest().upper()
[ "def", "sha_hash", "(", "self", ")", "->", "str", ":", "return", "hashlib", ".", "sha256", "(", "self", ".", "signed_raw", "(", ")", ".", "encode", "(", "\"ascii\"", ")", ")", ".", "hexdigest", "(", ")", ".", "upper", "(", ")" ]
Return uppercase hex sha256 hash from signed raw document :return:
[ "Return", "uppercase", "hex", "sha256", "hash", "from", "signed", "raw", "document" ]
python
train
30.428571
harlowja/failure
failure/finders.py
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/finders.py#L44-L73
def match_modules(allowed_modules): """Creates a matcher that matches a list/set/tuple of allowed modules.""" cleaned_allowed_modules = [ utils.mod_to_mod_name(tmp_mod) for tmp_mod in allowed_modules ] cleaned_split_allowed_modules = [ tmp_mod.split(".") for tmp_mod in cleaned_allowed_modules ] cleaned_allowed_modules = [] del cleaned_allowed_modules def matcher(cause): cause_cls = None cause_type_name = cause.exception_type_names[0] # Rip off the class name (usually at the end). cause_type_name_pieces = cause_type_name.split(".") cause_type_name_mod_pieces = cause_type_name_pieces[0:-1] # Do any modules provided match the provided causes module? mod_match = any( utils.array_prefix_matches(mod_pieces, cause_type_name_mod_pieces) for mod_pieces in cleaned_split_allowed_modules) if mod_match: cause_cls = importutils.import_class(cause_type_name) cause_cls = ensure_base_exception(cause_type_name, cause_cls) return cause_cls return matcher
[ "def", "match_modules", "(", "allowed_modules", ")", ":", "cleaned_allowed_modules", "=", "[", "utils", ".", "mod_to_mod_name", "(", "tmp_mod", ")", "for", "tmp_mod", "in", "allowed_modules", "]", "cleaned_split_allowed_modules", "=", "[", "tmp_mod", ".", "split", "(", "\".\"", ")", "for", "tmp_mod", "in", "cleaned_allowed_modules", "]", "cleaned_allowed_modules", "=", "[", "]", "del", "cleaned_allowed_modules", "def", "matcher", "(", "cause", ")", ":", "cause_cls", "=", "None", "cause_type_name", "=", "cause", ".", "exception_type_names", "[", "0", "]", "# Rip off the class name (usually at the end).", "cause_type_name_pieces", "=", "cause_type_name", ".", "split", "(", "\".\"", ")", "cause_type_name_mod_pieces", "=", "cause_type_name_pieces", "[", "0", ":", "-", "1", "]", "# Do any modules provided match the provided causes module?", "mod_match", "=", "any", "(", "utils", ".", "array_prefix_matches", "(", "mod_pieces", ",", "cause_type_name_mod_pieces", ")", "for", "mod_pieces", "in", "cleaned_split_allowed_modules", ")", "if", "mod_match", ":", "cause_cls", "=", "importutils", ".", "import_class", "(", "cause_type_name", ")", "cause_cls", "=", "ensure_base_exception", "(", "cause_type_name", ",", "cause_cls", ")", "return", "cause_cls", "return", "matcher" ]
Creates a matcher that matches a list/set/tuple of allowed modules.
[ "Creates", "a", "matcher", "that", "matches", "a", "list", "/", "set", "/", "tuple", "of", "allowed", "modules", "." ]
python
train
38.3
singularityhub/singularity-cli
spython/oci/cmd/states.py
https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/oci/cmd/states.py#L13-L52
def state(self, container_id=None, sudo=None, sync_socket=None): ''' get the state of an OciImage, if it exists. The optional states that can be returned are created, running, stopped or (not existing). Equivalent command line example: singularity oci state <container_ID> Parameters ========== container_id: the id to get the state of. sudo: Add sudo to the command. If the container was created by root, you need sudo to interact and get its state. sync_socket: the path to the unix socket for state synchronization Returns ======= state: a parsed json of the container state, if exists. If the container is not found, None is returned. ''' sudo = self._get_sudo(sudo) container_id = self.get_container_id(container_id) # singularity oci state cmd = self._init_command('state') if sync_socket != None: cmd = cmd + ['--sync-socket', sync_socket] # Finally, add the container_id cmd.append(container_id) # Get the instance state result = self._run_command(cmd, sudo=sudo, quiet=True) if result != None: # If successful, a string is returned to parse if isinstance(result, str): return json.loads(result)
[ "def", "state", "(", "self", ",", "container_id", "=", "None", ",", "sudo", "=", "None", ",", "sync_socket", "=", "None", ")", ":", "sudo", "=", "self", ".", "_get_sudo", "(", "sudo", ")", "container_id", "=", "self", ".", "get_container_id", "(", "container_id", ")", "# singularity oci state", "cmd", "=", "self", ".", "_init_command", "(", "'state'", ")", "if", "sync_socket", "!=", "None", ":", "cmd", "=", "cmd", "+", "[", "'--sync-socket'", ",", "sync_socket", "]", "# Finally, add the container_id", "cmd", ".", "append", "(", "container_id", ")", "# Get the instance state", "result", "=", "self", ".", "_run_command", "(", "cmd", ",", "sudo", "=", "sudo", ",", "quiet", "=", "True", ")", "if", "result", "!=", "None", ":", "# If successful, a string is returned to parse", "if", "isinstance", "(", "result", ",", "str", ")", ":", "return", "json", ".", "loads", "(", "result", ")" ]
get the state of an OciImage, if it exists. The optional states that can be returned are created, running, stopped or (not existing). Equivalent command line example: singularity oci state <container_ID> Parameters ========== container_id: the id to get the state of. sudo: Add sudo to the command. If the container was created by root, you need sudo to interact and get its state. sync_socket: the path to the unix socket for state synchronization Returns ======= state: a parsed json of the container state, if exists. If the container is not found, None is returned.
[ "get", "the", "state", "of", "an", "OciImage", "if", "it", "exists", ".", "The", "optional", "states", "that", "can", "be", "returned", "are", "created", "running", "stopped", "or", "(", "not", "existing", ")", "." ]
python
train
32.35
sorgerlab/indra
indra/literature/elsevier_client.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/elsevier_client.py#L89-L106
def check_entitlement(doi): """Check whether IP and credentials enable access to content for a doi. This function uses the entitlement endpoint of the Elsevier API to check whether an article is available to a given institution. Note that this feature of the API is itself not available for all institution keys. """ if doi.lower().startswith('doi:'): doi = doi[4:] url = '%s/%s' % (elsevier_entitlement_url, doi) params = {'httpAccept': 'text/xml'} res = requests.get(url, params, headers=ELSEVIER_KEYS) if not res.status_code == 200: logger.error('Could not check entitlements for article %s: ' 'status code %d' % (doi, res.status_code)) logger.error('Response content: %s' % res.text) return False return True
[ "def", "check_entitlement", "(", "doi", ")", ":", "if", "doi", ".", "lower", "(", ")", ".", "startswith", "(", "'doi:'", ")", ":", "doi", "=", "doi", "[", "4", ":", "]", "url", "=", "'%s/%s'", "%", "(", "elsevier_entitlement_url", ",", "doi", ")", "params", "=", "{", "'httpAccept'", ":", "'text/xml'", "}", "res", "=", "requests", ".", "get", "(", "url", ",", "params", ",", "headers", "=", "ELSEVIER_KEYS", ")", "if", "not", "res", ".", "status_code", "==", "200", ":", "logger", ".", "error", "(", "'Could not check entitlements for article %s: '", "'status code %d'", "%", "(", "doi", ",", "res", ".", "status_code", ")", ")", "logger", ".", "error", "(", "'Response content: %s'", "%", "res", ".", "text", ")", "return", "False", "return", "True" ]
Check whether IP and credentials enable access to content for a doi. This function uses the entitlement endpoint of the Elsevier API to check whether an article is available to a given institution. Note that this feature of the API is itself not available for all institution keys.
[ "Check", "whether", "IP", "and", "credentials", "enable", "access", "to", "content", "for", "a", "doi", "." ]
python
train
44.055556
wummel/linkchecker
linkcheck/checker/urlbase.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/urlbase.py#L537-L546
def allows_simple_recursion(self): """Check recursion level and extern status.""" rec_level = self.aggregate.config["recursionlevel"] if rec_level >= 0 and self.recursion_level >= rec_level: log.debug(LOG_CHECK, "... no, maximum recursion level reached.") return False if self.extern[0]: log.debug(LOG_CHECK, "... no, extern.") return False return True
[ "def", "allows_simple_recursion", "(", "self", ")", ":", "rec_level", "=", "self", ".", "aggregate", ".", "config", "[", "\"recursionlevel\"", "]", "if", "rec_level", ">=", "0", "and", "self", ".", "recursion_level", ">=", "rec_level", ":", "log", ".", "debug", "(", "LOG_CHECK", ",", "\"... no, maximum recursion level reached.\"", ")", "return", "False", "if", "self", ".", "extern", "[", "0", "]", ":", "log", ".", "debug", "(", "LOG_CHECK", ",", "\"... no, extern.\"", ")", "return", "False", "return", "True" ]
Check recursion level and extern status.
[ "Check", "recursion", "level", "and", "extern", "status", "." ]
python
train
43.1
jgillick/LendingClub
lendingclub/session.py
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/session.py#L292-L296
def post(self, path, query=None, data=None, redirects=True): """ POST request wrapper for :func:`request()` """ return self.request('POST', path, query, data, redirects)
[ "def", "post", "(", "self", ",", "path", ",", "query", "=", "None", ",", "data", "=", "None", ",", "redirects", "=", "True", ")", ":", "return", "self", ".", "request", "(", "'POST'", ",", "path", ",", "query", ",", "data", ",", "redirects", ")" ]
POST request wrapper for :func:`request()`
[ "POST", "request", "wrapper", "for", ":", "func", ":", "request", "()" ]
python
train
39.4
h2oai/h2o-3
h2o-py/h2o/h2o.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L1297-L1316
def as_list(data, use_pandas=True, header=True): """ Convert an H2O data object into a python-specific object. WARNING! This will pull all data local! If Pandas is available (and use_pandas is True), then pandas will be used to parse the data frame. Otherwise, a list-of-lists populated by character data will be returned (so the types of data will all be str). :param data: an H2O data object. :param use_pandas: If True, try to use pandas for reading in the data. :param header: If True, return column names as first element in list :returns: List of lists (Rows x Columns). """ assert_is_type(data, H2OFrame) assert_is_type(use_pandas, bool) assert_is_type(header, bool) return H2OFrame.as_data_frame(data, use_pandas=use_pandas, header=header)
[ "def", "as_list", "(", "data", ",", "use_pandas", "=", "True", ",", "header", "=", "True", ")", ":", "assert_is_type", "(", "data", ",", "H2OFrame", ")", "assert_is_type", "(", "use_pandas", ",", "bool", ")", "assert_is_type", "(", "header", ",", "bool", ")", "return", "H2OFrame", ".", "as_data_frame", "(", "data", ",", "use_pandas", "=", "use_pandas", ",", "header", "=", "header", ")" ]
Convert an H2O data object into a python-specific object. WARNING! This will pull all data local! If Pandas is available (and use_pandas is True), then pandas will be used to parse the data frame. Otherwise, a list-of-lists populated by character data will be returned (so the types of data will all be str). :param data: an H2O data object. :param use_pandas: If True, try to use pandas for reading in the data. :param header: If True, return column names as first element in list :returns: List of lists (Rows x Columns).
[ "Convert", "an", "H2O", "data", "object", "into", "a", "python", "-", "specific", "object", "." ]
python
test
39.6
captin411/ofxclient
ofxclient/account.py
https://github.com/captin411/ofxclient/blob/4da2719f0ecbbf5eee62fb82c1b3b34ec955ee5e/ofxclient/account.py#L198-L227
def from_ofxparse(data, institution): """Instantiate :py:class:`ofxclient.Account` subclass from ofxparse module :param data: an ofxparse account :type data: An :py:class:`ofxparse.Account` object :param institution: The parent institution of the account :type institution: :py:class:`ofxclient.Institution` object """ description = data.desc if hasattr(data, 'desc') else None if data.type == AccountType.Bank: return BankAccount( institution=institution, number=data.account_id, routing_number=data.routing_number, account_type=data.account_type, description=description) elif data.type == AccountType.CreditCard: return CreditCardAccount( institution=institution, number=data.account_id, description=description) elif data.type == AccountType.Investment: return BrokerageAccount( institution=institution, number=data.account_id, broker_id=data.brokerid, description=description) raise ValueError("unknown account type: %s" % data.type)
[ "def", "from_ofxparse", "(", "data", ",", "institution", ")", ":", "description", "=", "data", ".", "desc", "if", "hasattr", "(", "data", ",", "'desc'", ")", "else", "None", "if", "data", ".", "type", "==", "AccountType", ".", "Bank", ":", "return", "BankAccount", "(", "institution", "=", "institution", ",", "number", "=", "data", ".", "account_id", ",", "routing_number", "=", "data", ".", "routing_number", ",", "account_type", "=", "data", ".", "account_type", ",", "description", "=", "description", ")", "elif", "data", ".", "type", "==", "AccountType", ".", "CreditCard", ":", "return", "CreditCardAccount", "(", "institution", "=", "institution", ",", "number", "=", "data", ".", "account_id", ",", "description", "=", "description", ")", "elif", "data", ".", "type", "==", "AccountType", ".", "Investment", ":", "return", "BrokerageAccount", "(", "institution", "=", "institution", ",", "number", "=", "data", ".", "account_id", ",", "broker_id", "=", "data", ".", "brokerid", ",", "description", "=", "description", ")", "raise", "ValueError", "(", "\"unknown account type: %s\"", "%", "data", ".", "type", ")" ]
Instantiate :py:class:`ofxclient.Account` subclass from ofxparse module :param data: an ofxparse account :type data: An :py:class:`ofxparse.Account` object :param institution: The parent institution of the account :type institution: :py:class:`ofxclient.Institution` object
[ "Instantiate", ":", "py", ":", "class", ":", "ofxclient", ".", "Account", "subclass", "from", "ofxparse", "module" ]
python
train
41.166667
vivangkumar/uberpy
uberpy/api.py
https://github.com/vivangkumar/uberpy/blob/abc62ccb5399424eb5690f12c392ab2dbd9d96e0/uberpy/api.py#L105-L138
def get_json(self, uri_path, http_method='GET', query_parameters=None, body=None, headers=None): """ Fetches the JSON returned, after making the call and checking for errors. :param uri_path: Endpoint to be used to make a request. :param http_method: HTTP method to be used. :param query_parameters: Parameters to be added to the request. :param body: Optional body, if required. :param headers: Optional headers, if required. :return: JSON """ query_parameters = query_parameters or {} headers = headers or {} # Add credentials to the request query_parameters = self.add_credentials(query_parameters) # Build the request uri with parameters uri = self.build_request(uri_path, query_parameters) if http_method in ('POST', 'PUT', 'DELETE') and 'Content-Type' not in headers: headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' response, content = self.client.request( uri=uri, method=http_method, body=body, headers=headers ) # Check for known errors that could be returned self.check_status(content, response) return json.loads(content.decode('utf-8'))
[ "def", "get_json", "(", "self", ",", "uri_path", ",", "http_method", "=", "'GET'", ",", "query_parameters", "=", "None", ",", "body", "=", "None", ",", "headers", "=", "None", ")", ":", "query_parameters", "=", "query_parameters", "or", "{", "}", "headers", "=", "headers", "or", "{", "}", "# Add credentials to the request", "query_parameters", "=", "self", ".", "add_credentials", "(", "query_parameters", ")", "# Build the request uri with parameters", "uri", "=", "self", ".", "build_request", "(", "uri_path", ",", "query_parameters", ")", "if", "http_method", "in", "(", "'POST'", ",", "'PUT'", ",", "'DELETE'", ")", "and", "'Content-Type'", "not", "in", "headers", ":", "headers", "[", "'Content-Type'", "]", "=", "'application/json'", "headers", "[", "'Accept'", "]", "=", "'application/json'", "response", ",", "content", "=", "self", ".", "client", ".", "request", "(", "uri", "=", "uri", ",", "method", "=", "http_method", ",", "body", "=", "body", ",", "headers", "=", "headers", ")", "# Check for known errors that could be returned", "self", ".", "check_status", "(", "content", ",", "response", ")", "return", "json", ".", "loads", "(", "content", ".", "decode", "(", "'utf-8'", ")", ")" ]
Fetches the JSON returned, after making the call and checking for errors. :param uri_path: Endpoint to be used to make a request. :param http_method: HTTP method to be used. :param query_parameters: Parameters to be added to the request. :param body: Optional body, if required. :param headers: Optional headers, if required. :return: JSON
[ "Fetches", "the", "JSON", "returned", "after", "making", "the", "call", "and", "checking", "for", "errors", ".", ":", "param", "uri_path", ":", "Endpoint", "to", "be", "used", "to", "make", "a", "request", ".", ":", "param", "http_method", ":", "HTTP", "method", "to", "be", "used", ".", ":", "param", "query_parameters", ":", "Parameters", "to", "be", "added", "to", "the", "request", ".", ":", "param", "body", ":", "Optional", "body", "if", "required", ".", ":", "param", "headers", ":", "Optional", "headers", "if", "required", ".", ":", "return", ":", "JSON" ]
python
valid
38.029412
hyperledger/indy-plenum
stp_core/ratchet.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/ratchet.py#L30-L40
def _sumSeries(a: float, b: float, steps: int) -> float: """ Return value of the the following polynomial. .. math:: (a * e^(b*steps) - 1) / (e^b - 1) :param a: multiplier :param b: exponent multiplier :param steps: the number of steps """ return a * (exp(b * steps) - 1) / (exp(b) - 1)
[ "def", "_sumSeries", "(", "a", ":", "float", ",", "b", ":", "float", ",", "steps", ":", "int", ")", "->", "float", ":", "return", "a", "*", "(", "exp", "(", "b", "*", "steps", ")", "-", "1", ")", "/", "(", "exp", "(", "b", ")", "-", "1", ")" ]
Return value of the the following polynomial. .. math:: (a * e^(b*steps) - 1) / (e^b - 1) :param a: multiplier :param b: exponent multiplier :param steps: the number of steps
[ "Return", "value", "of", "the", "the", "following", "polynomial", ".", "..", "math", "::", "(", "a", "*", "e^", "(", "b", "*", "steps", ")", "-", "1", ")", "/", "(", "e^b", "-", "1", ")" ]
python
train
32
saltstack/salt
salt/modules/systemd_service.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/systemd_service.py#L1189-L1237
def disable(name, no_block=False, root=None, **kwargs): # pylint: disable=unused-argument ''' .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html Disable the named service to not start when the system boots no_block : False Set to ``True`` to start the service using ``--no-block``. .. versionadded:: 2017.7.0 root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.disable <service name> ''' _check_for_unit_changes(name) if name in _get_sysv_services(root): cmd = [] if salt.utils.systemd.has_scope(__context__) \ and __salt__['config.get']('systemd.scope', True): cmd.extend(['systemd-run', '--scope']) service_exec = _get_service_exec() if service_exec.endswith('/update-rc.d'): cmd.extend([service_exec, '-f', name, 'remove']) elif service_exec.endswith('/chkconfig'): cmd.extend([service_exec, name, 'off']) return __salt__['cmd.retcode'](cmd, python_shell=False, ignore_retcode=True) == 0 # Using cmd.run_all instead of cmd.retcode here to make unit tests easier return __salt__['cmd.run_all']( _systemctl_cmd('disable', name, systemd_scope=True, no_block=no_block, root=root), python_shell=False, ignore_retcode=True)['retcode'] == 0
[ "def", "disable", "(", "name", ",", "no_block", "=", "False", ",", "root", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "_check_for_unit_changes", "(", "name", ")", "if", "name", "in", "_get_sysv_services", "(", "root", ")", ":", "cmd", "=", "[", "]", "if", "salt", ".", "utils", ".", "systemd", ".", "has_scope", "(", "__context__", ")", "and", "__salt__", "[", "'config.get'", "]", "(", "'systemd.scope'", ",", "True", ")", ":", "cmd", ".", "extend", "(", "[", "'systemd-run'", ",", "'--scope'", "]", ")", "service_exec", "=", "_get_service_exec", "(", ")", "if", "service_exec", ".", "endswith", "(", "'/update-rc.d'", ")", ":", "cmd", ".", "extend", "(", "[", "service_exec", ",", "'-f'", ",", "name", ",", "'remove'", "]", ")", "elif", "service_exec", ".", "endswith", "(", "'/chkconfig'", ")", ":", "cmd", ".", "extend", "(", "[", "service_exec", ",", "name", ",", "'off'", "]", ")", "return", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "python_shell", "=", "False", ",", "ignore_retcode", "=", "True", ")", "==", "0", "# Using cmd.run_all instead of cmd.retcode here to make unit tests easier", "return", "__salt__", "[", "'cmd.run_all'", "]", "(", "_systemctl_cmd", "(", "'disable'", ",", "name", ",", "systemd_scope", "=", "True", ",", "no_block", "=", "no_block", ",", "root", "=", "root", ")", ",", "python_shell", "=", "False", ",", "ignore_retcode", "=", "True", ")", "[", "'retcode'", "]", "==", "0" ]
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html Disable the named service to not start when the system boots no_block : False Set to ``True`` to start the service using ``--no-block``. .. versionadded:: 2017.7.0 root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.disable <service name>
[ "..", "versionchanged", "::", "2015", ".", "8", ".", "12", "2016", ".", "3", ".", "3", "2016", ".", "11", ".", "0", "On", "minions", "running", "systemd", ">", "=", "205", "systemd", "-", "run", "(", "1", ")", "_", "is", "now", "used", "to", "isolate", "commands", "run", "by", "this", "function", "from", "the", "salt", "-", "minion", "daemon", "s", "control", "group", ".", "This", "is", "done", "to", "avoid", "a", "race", "condition", "in", "cases", "where", "the", "salt", "-", "minion", "service", "is", "restarted", "while", "a", "service", "is", "being", "modified", ".", "If", "desired", "usage", "of", "systemd", "-", "run", "(", "1", ")", "_", "can", "be", "suppressed", "by", "setting", "a", ":", "mod", ":", "config", "option", "<salt", ".", "modules", ".", "config", ".", "get", ">", "called", "systemd", ".", "scope", "with", "a", "value", "of", "False", "(", "no", "quotes", ")", "." ]
python
train
42.44898
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L537-L556
def hex(self): """Return a hexadecimal representation of a BigFloat.""" sign = '-' if self._sign() else '' e = self._exponent() if isinstance(e, six.string_types): return sign + e m = self._significand() _, digits, _ = _mpfr_get_str2( 16, 0, m, ROUND_TIES_TO_EVEN, ) # only print the number of digits that are actually necessary n = 1 + (self.precision - 1) // 4 assert all(c == '0' for c in digits[n:]) result = '%s0x0.%sp%+d' % (sign, digits[:n], e) return result
[ "def", "hex", "(", "self", ")", ":", "sign", "=", "'-'", "if", "self", ".", "_sign", "(", ")", "else", "''", "e", "=", "self", ".", "_exponent", "(", ")", "if", "isinstance", "(", "e", ",", "six", ".", "string_types", ")", ":", "return", "sign", "+", "e", "m", "=", "self", ".", "_significand", "(", ")", "_", ",", "digits", ",", "_", "=", "_mpfr_get_str2", "(", "16", ",", "0", ",", "m", ",", "ROUND_TIES_TO_EVEN", ",", ")", "# only print the number of digits that are actually necessary", "n", "=", "1", "+", "(", "self", ".", "precision", "-", "1", ")", "//", "4", "assert", "all", "(", "c", "==", "'0'", "for", "c", "in", "digits", "[", "n", ":", "]", ")", "result", "=", "'%s0x0.%sp%+d'", "%", "(", "sign", ",", "digits", "[", ":", "n", "]", ",", "e", ")", "return", "result" ]
Return a hexadecimal representation of a BigFloat.
[ "Return", "a", "hexadecimal", "representation", "of", "a", "BigFloat", "." ]
python
train
30.2
keiichishima/pcalg
pcalg.py
https://github.com/keiichishima/pcalg/blob/f270e2bdb76b88c8f80a1ea07317ff4be88e2359/pcalg.py#L37-L123
def estimate_skeleton(indep_test_func, data_matrix, alpha, **kwargs): """Estimate a skeleton graph from the statistis information. Args: indep_test_func: the function name for a conditional independency test. data_matrix: data (as a numpy array). alpha: the significance level. kwargs: 'max_reach': maximum value of l (see the code). The value depends on the underlying distribution. 'method': if 'stable' given, use stable-PC algorithm (see [Colombo2014]). 'init_graph': initial structure of skeleton graph (as a networkx.Graph). If not specified, a complete graph is used. other parameters may be passed depending on the indep_test_func()s. Returns: g: a skeleton graph (as a networkx.Graph). sep_set: a separation set (as an 2D-array of set()). [Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent constraint-based causal structure learning. In The Journal of Machine Learning Research, Vol. 15, pp. 3741-3782, 2014. """ def method_stable(kwargs): return ('method' in kwargs) and kwargs['method'] == "stable" node_ids = range(data_matrix.shape[1]) node_size = data_matrix.shape[1] sep_set = [[set() for i in range(node_size)] for j in range(node_size)] if 'init_graph' in kwargs: g = kwargs['init_graph'] if not isinstance(g, nx.Graph): raise ValueError elif not g.number_of_nodes() == len(node_ids): raise ValueError('init_graph not matching data_matrix shape') for (i, j) in combinations(node_ids, 2): if not g.has_edge(i, j): sep_set[i][j] = None sep_set[j][i] = None else: g = _create_complete_graph(node_ids) l = 0 while True: cont = False remove_edges = [] for (i, j) in permutations(node_ids, 2): adj_i = list(g.neighbors(i)) if j not in adj_i: continue else: adj_i.remove(j) if len(adj_i) >= l: _logger.debug('testing %s and %s' % (i,j)) _logger.debug('neighbors of %s are %s' % (i, str(adj_i))) if len(adj_i) < l: continue for k in combinations(adj_i, l): _logger.debug('indep prob of %s and %s with subset %s' % (i, j, str(k))) p_val = indep_test_func(data_matrix, i, j, set(k), **kwargs) _logger.debug('p_val is %s' % str(p_val)) if p_val > alpha: if g.has_edge(i, j): _logger.debug('p: remove edge (%s, %s)' % (i, j)) if method_stable(kwargs): remove_edges.append((i, j)) else: g.remove_edge(i, j) sep_set[i][j] |= set(k) sep_set[j][i] |= set(k) break cont = True l += 1 if method_stable(kwargs): g.remove_edges_from(remove_edges) if cont is False: break if ('max_reach' in kwargs) and (l > kwargs['max_reach']): break return (g, sep_set)
[ "def", "estimate_skeleton", "(", "indep_test_func", ",", "data_matrix", ",", "alpha", ",", "*", "*", "kwargs", ")", ":", "def", "method_stable", "(", "kwargs", ")", ":", "return", "(", "'method'", "in", "kwargs", ")", "and", "kwargs", "[", "'method'", "]", "==", "\"stable\"", "node_ids", "=", "range", "(", "data_matrix", ".", "shape", "[", "1", "]", ")", "node_size", "=", "data_matrix", ".", "shape", "[", "1", "]", "sep_set", "=", "[", "[", "set", "(", ")", "for", "i", "in", "range", "(", "node_size", ")", "]", "for", "j", "in", "range", "(", "node_size", ")", "]", "if", "'init_graph'", "in", "kwargs", ":", "g", "=", "kwargs", "[", "'init_graph'", "]", "if", "not", "isinstance", "(", "g", ",", "nx", ".", "Graph", ")", ":", "raise", "ValueError", "elif", "not", "g", ".", "number_of_nodes", "(", ")", "==", "len", "(", "node_ids", ")", ":", "raise", "ValueError", "(", "'init_graph not matching data_matrix shape'", ")", "for", "(", "i", ",", "j", ")", "in", "combinations", "(", "node_ids", ",", "2", ")", ":", "if", "not", "g", ".", "has_edge", "(", "i", ",", "j", ")", ":", "sep_set", "[", "i", "]", "[", "j", "]", "=", "None", "sep_set", "[", "j", "]", "[", "i", "]", "=", "None", "else", ":", "g", "=", "_create_complete_graph", "(", "node_ids", ")", "l", "=", "0", "while", "True", ":", "cont", "=", "False", "remove_edges", "=", "[", "]", "for", "(", "i", ",", "j", ")", "in", "permutations", "(", "node_ids", ",", "2", ")", ":", "adj_i", "=", "list", "(", "g", ".", "neighbors", "(", "i", ")", ")", "if", "j", "not", "in", "adj_i", ":", "continue", "else", ":", "adj_i", ".", "remove", "(", "j", ")", "if", "len", "(", "adj_i", ")", ">=", "l", ":", "_logger", ".", "debug", "(", "'testing %s and %s'", "%", "(", "i", ",", "j", ")", ")", "_logger", ".", "debug", "(", "'neighbors of %s are %s'", "%", "(", "i", ",", "str", "(", "adj_i", ")", ")", ")", "if", "len", "(", "adj_i", ")", "<", "l", ":", "continue", "for", "k", "in", "combinations", "(", "adj_i", ",", "l", ")", ":", "_logger", ".", "debug", "(", "'indep prob of %s and %s with subset %s'", "%", "(", "i", ",", "j", ",", "str", "(", "k", ")", ")", ")", "p_val", "=", "indep_test_func", "(", "data_matrix", ",", "i", ",", "j", ",", "set", "(", "k", ")", ",", "*", "*", "kwargs", ")", "_logger", ".", "debug", "(", "'p_val is %s'", "%", "str", "(", "p_val", ")", ")", "if", "p_val", ">", "alpha", ":", "if", "g", ".", "has_edge", "(", "i", ",", "j", ")", ":", "_logger", ".", "debug", "(", "'p: remove edge (%s, %s)'", "%", "(", "i", ",", "j", ")", ")", "if", "method_stable", "(", "kwargs", ")", ":", "remove_edges", ".", "append", "(", "(", "i", ",", "j", ")", ")", "else", ":", "g", ".", "remove_edge", "(", "i", ",", "j", ")", "sep_set", "[", "i", "]", "[", "j", "]", "|=", "set", "(", "k", ")", "sep_set", "[", "j", "]", "[", "i", "]", "|=", "set", "(", "k", ")", "break", "cont", "=", "True", "l", "+=", "1", "if", "method_stable", "(", "kwargs", ")", ":", "g", ".", "remove_edges_from", "(", "remove_edges", ")", "if", "cont", "is", "False", ":", "break", "if", "(", "'max_reach'", "in", "kwargs", ")", "and", "(", "l", ">", "kwargs", "[", "'max_reach'", "]", ")", ":", "break", "return", "(", "g", ",", "sep_set", ")" ]
Estimate a skeleton graph from the statistis information. Args: indep_test_func: the function name for a conditional independency test. data_matrix: data (as a numpy array). alpha: the significance level. kwargs: 'max_reach': maximum value of l (see the code). The value depends on the underlying distribution. 'method': if 'stable' given, use stable-PC algorithm (see [Colombo2014]). 'init_graph': initial structure of skeleton graph (as a networkx.Graph). If not specified, a complete graph is used. other parameters may be passed depending on the indep_test_func()s. Returns: g: a skeleton graph (as a networkx.Graph). sep_set: a separation set (as an 2D-array of set()). [Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent constraint-based causal structure learning. In The Journal of Machine Learning Research, Vol. 15, pp. 3741-3782, 2014.
[ "Estimate", "a", "skeleton", "graph", "from", "the", "statistis", "information", "." ]
python
train
39.45977
pvlib/pvlib-python
pvlib/forecast.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/forecast.py#L327-L375
def _netcdf2pandas(self, netcdf_data, query_variables, start, end): """ Transforms data from netcdf to pandas DataFrame. Parameters ---------- data: netcdf Data returned from UNIDATA NCSS query. query_variables: list The variables requested. start: Timestamp The start time end: Timestamp The end time Returns ------- pd.DataFrame """ # set self.time try: time_var = 'time' self.set_time(netcdf_data.variables[time_var]) except KeyError: # which model does this dumb thing? time_var = 'time1' self.set_time(netcdf_data.variables[time_var]) data_dict = {} for key, data in netcdf_data.variables.items(): # if accounts for possibility of extra variable returned if key not in query_variables: continue squeezed = data[:].squeeze() if squeezed.ndim == 1: data_dict[key] = squeezed elif squeezed.ndim == 2: for num, data_level in enumerate(squeezed.T): data_dict[key + '_' + str(num)] = data_level else: raise ValueError('cannot parse ndim > 2') data = pd.DataFrame(data_dict, index=self.time) # sometimes data is returned as hours since T0 # where T0 is before start. Then the hours between # T0 and start are added *after* end. So sort and slice # to remove the garbage data = data.sort_index().loc[start:end] return data
[ "def", "_netcdf2pandas", "(", "self", ",", "netcdf_data", ",", "query_variables", ",", "start", ",", "end", ")", ":", "# set self.time", "try", ":", "time_var", "=", "'time'", "self", ".", "set_time", "(", "netcdf_data", ".", "variables", "[", "time_var", "]", ")", "except", "KeyError", ":", "# which model does this dumb thing?", "time_var", "=", "'time1'", "self", ".", "set_time", "(", "netcdf_data", ".", "variables", "[", "time_var", "]", ")", "data_dict", "=", "{", "}", "for", "key", ",", "data", "in", "netcdf_data", ".", "variables", ".", "items", "(", ")", ":", "# if accounts for possibility of extra variable returned", "if", "key", "not", "in", "query_variables", ":", "continue", "squeezed", "=", "data", "[", ":", "]", ".", "squeeze", "(", ")", "if", "squeezed", ".", "ndim", "==", "1", ":", "data_dict", "[", "key", "]", "=", "squeezed", "elif", "squeezed", ".", "ndim", "==", "2", ":", "for", "num", ",", "data_level", "in", "enumerate", "(", "squeezed", ".", "T", ")", ":", "data_dict", "[", "key", "+", "'_'", "+", "str", "(", "num", ")", "]", "=", "data_level", "else", ":", "raise", "ValueError", "(", "'cannot parse ndim > 2'", ")", "data", "=", "pd", ".", "DataFrame", "(", "data_dict", ",", "index", "=", "self", ".", "time", ")", "# sometimes data is returned as hours since T0", "# where T0 is before start. Then the hours between", "# T0 and start are added *after* end. So sort and slice", "# to remove the garbage", "data", "=", "data", ".", "sort_index", "(", ")", ".", "loc", "[", "start", ":", "end", "]", "return", "data" ]
Transforms data from netcdf to pandas DataFrame. Parameters ---------- data: netcdf Data returned from UNIDATA NCSS query. query_variables: list The variables requested. start: Timestamp The start time end: Timestamp The end time Returns ------- pd.DataFrame
[ "Transforms", "data", "from", "netcdf", "to", "pandas", "DataFrame", "." ]
python
train
33.285714
okeuday/erlang_py
erlang.py
https://github.com/okeuday/erlang_py/blob/81b7c2ace66b6bdee23602a6802efff541223fa3/erlang.py#L276-L292
def binary(self): """ return encoded representation """ creation_size = len(self.creation) if creation_size == 1: return ( b_chr(_TAG_PID_EXT) + self.node.binary() + self.id + self.serial + self.creation ) elif creation_size == 4: return ( b_chr(_TAG_NEW_PID_EXT) + self.node.binary() + self.id + self.serial + self.creation ) else: raise OutputException('unknown pid type')
[ "def", "binary", "(", "self", ")", ":", "creation_size", "=", "len", "(", "self", ".", "creation", ")", "if", "creation_size", "==", "1", ":", "return", "(", "b_chr", "(", "_TAG_PID_EXT", ")", "+", "self", ".", "node", ".", "binary", "(", ")", "+", "self", ".", "id", "+", "self", ".", "serial", "+", "self", ".", "creation", ")", "elif", "creation_size", "==", "4", ":", "return", "(", "b_chr", "(", "_TAG_NEW_PID_EXT", ")", "+", "self", ".", "node", ".", "binary", "(", ")", "+", "self", ".", "id", "+", "self", ".", "serial", "+", "self", ".", "creation", ")", "else", ":", "raise", "OutputException", "(", "'unknown pid type'", ")" ]
return encoded representation
[ "return", "encoded", "representation" ]
python
train
31.647059
mwgielen/jackal
jackal/core.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L129-L140
def get_pipe(self, object_type): """ Returns a generator that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json. """ for line in sys.stdin: try: data = json.loads(line.strip()) obj = object_type(**data) yield obj except ValueError: yield self.id_to_object(line.strip())
[ "def", "get_pipe", "(", "self", ",", "object_type", ")", ":", "for", "line", "in", "sys", ".", "stdin", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "line", ".", "strip", "(", ")", ")", "obj", "=", "object_type", "(", "*", "*", "data", ")", "yield", "obj", "except", "ValueError", ":", "yield", "self", ".", "id_to_object", "(", "line", ".", "strip", "(", ")", ")" ]
Returns a generator that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json.
[ "Returns", "a", "generator", "that", "maps", "the", "input", "of", "the", "pipe", "to", "an", "elasticsearch", "object", ".", "Will", "call", "id_to_object", "if", "it", "cannot", "serialize", "the", "data", "from", "json", "." ]
python
valid
38.666667
pettarin/ipapy
ipapy/compatibility.py
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/compatibility.py#L25-L39
def is_unicode_string(string): """ Return ``True`` if the given string is a Unicode string, that is, of type ``unicode`` in Python 2 or ``str`` in Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to be checked :rtype: bool """ if string is None: return None if PY2: return isinstance(string, unicode) return isinstance(string, str)
[ "def", "is_unicode_string", "(", "string", ")", ":", "if", "string", "is", "None", ":", "return", "None", "if", "PY2", ":", "return", "isinstance", "(", "string", ",", "unicode", ")", "return", "isinstance", "(", "string", ",", "str", ")" ]
Return ``True`` if the given string is a Unicode string, that is, of type ``unicode`` in Python 2 or ``str`` in Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to be checked :rtype: bool
[ "Return", "True", "if", "the", "given", "string", "is", "a", "Unicode", "string", "that", "is", "of", "type", "unicode", "in", "Python", "2", "or", "str", "in", "Python", "3", "." ]
python
train
27.266667
tcalmant/ipopo
pelix/ipopo/handlers/requiresmap.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/requiresmap.py#L354-L367
def stop(self): """ Stops the dependency manager (must be called before clear()) :return: The removed bindings (list) or None """ self._context.remove_service_listener(self) if self.services: return [ (service, reference) for reference, service in self.services.items() ] return None
[ "def", "stop", "(", "self", ")", ":", "self", ".", "_context", ".", "remove_service_listener", "(", "self", ")", "if", "self", ".", "services", ":", "return", "[", "(", "service", ",", "reference", ")", "for", "reference", ",", "service", "in", "self", ".", "services", ".", "items", "(", ")", "]", "return", "None" ]
Stops the dependency manager (must be called before clear()) :return: The removed bindings (list) or None
[ "Stops", "the", "dependency", "manager", "(", "must", "be", "called", "before", "clear", "()", ")" ]
python
train
27.428571
ubyssey/dispatch
dispatch/modules/content/models.py
https://github.com/ubyssey/dispatch/blob/8da6084fe61726f20e9cf675190480cfc45ee764/dispatch/modules/content/models.py#L509-L513
def get_medium_url(self): """Returns the medium size image URL.""" if self.is_gif(): return self.get_absolute_url() return '%s%s-%s.jpg' % (settings.MEDIA_URL, self.get_name(), 'medium')
[ "def", "get_medium_url", "(", "self", ")", ":", "if", "self", ".", "is_gif", "(", ")", ":", "return", "self", ".", "get_absolute_url", "(", ")", "return", "'%s%s-%s.jpg'", "%", "(", "settings", ".", "MEDIA_URL", ",", "self", ".", "get_name", "(", ")", ",", "'medium'", ")" ]
Returns the medium size image URL.
[ "Returns", "the", "medium", "size", "image", "URL", "." ]
python
test
43.6
7sDream/zhihu-py3
zhihu/author.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/author.py#L538-L562
def columns(self): """获取用户专栏. :return: 用户专栏,返回生成器 :rtype: Column.Iterable """ from .column import Column if self.url is None or self.post_num == 0: return soup = BeautifulSoup(self._session.get(self.url + 'posts').text) column_list = soup.find('div', class_='column-list') column_tags = column_list.find_all('div', class_='item') for column_tag in column_tags: name = column_tag['title'] url = column_tag['data-href'] numbers = column_tag.find('span', class_='des').text.split('•') follower_num = int(re_get_number.match(numbers[0]).group(1)) if len(numbers) == 1: post_num = 0 else: post_num = int( re_get_number.match(numbers[1]).group(1)) yield Column(url, name, follower_num, post_num, session=self._session)
[ "def", "columns", "(", "self", ")", ":", "from", ".", "column", "import", "Column", "if", "self", ".", "url", "is", "None", "or", "self", ".", "post_num", "==", "0", ":", "return", "soup", "=", "BeautifulSoup", "(", "self", ".", "_session", ".", "get", "(", "self", ".", "url", "+", "'posts'", ")", ".", "text", ")", "column_list", "=", "soup", ".", "find", "(", "'div'", ",", "class_", "=", "'column-list'", ")", "column_tags", "=", "column_list", ".", "find_all", "(", "'div'", ",", "class_", "=", "'item'", ")", "for", "column_tag", "in", "column_tags", ":", "name", "=", "column_tag", "[", "'title'", "]", "url", "=", "column_tag", "[", "'data-href'", "]", "numbers", "=", "column_tag", ".", "find", "(", "'span'", ",", "class_", "=", "'des'", ")", ".", "text", ".", "split", "(", "'•')", "", "follower_num", "=", "int", "(", "re_get_number", ".", "match", "(", "numbers", "[", "0", "]", ")", ".", "group", "(", "1", ")", ")", "if", "len", "(", "numbers", ")", "==", "1", ":", "post_num", "=", "0", "else", ":", "post_num", "=", "int", "(", "re_get_number", ".", "match", "(", "numbers", "[", "1", "]", ")", ".", "group", "(", "1", ")", ")", "yield", "Column", "(", "url", ",", "name", ",", "follower_num", ",", "post_num", ",", "session", "=", "self", ".", "_session", ")" ]
获取用户专栏. :return: 用户专栏,返回生成器 :rtype: Column.Iterable
[ "获取用户专栏", "." ]
python
train
37.72
saltstack/salt
salt/modules/aptly.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aptly.py#L70-L103
def _format_repo_args(comment=None, component=None, distribution=None, uploaders_file=None, saltenv='base'): ''' Format the common arguments for creating or editing a repository. :param str comment: The description of the repository. :param str component: The default component to use when publishing. :param str distribution: The default distribution to use when publishing. :param str uploaders_file: The repository upload restrictions config. :param str saltenv: The environment the file resides in. :return: A list of the arguments formatted as aptly arguments. :rtype: list ''' ret = list() cached_uploaders_path = None settings = {'comment': comment, 'component': component, 'distribution': distribution} if uploaders_file: cached_uploaders_path = __salt__['cp.cache_file'](uploaders_file, saltenv) if not cached_uploaders_path: log.error('Unable to get cached copy of file: %s', uploaders_file) return False for setting in settings: if settings[setting] is not None: ret.append('-{}={}'.format(setting, settings[setting])) if cached_uploaders_path: ret.append('-uploaders-file={}'.format(cached_uploaders_path)) return ret
[ "def", "_format_repo_args", "(", "comment", "=", "None", ",", "component", "=", "None", ",", "distribution", "=", "None", ",", "uploaders_file", "=", "None", ",", "saltenv", "=", "'base'", ")", ":", "ret", "=", "list", "(", ")", "cached_uploaders_path", "=", "None", "settings", "=", "{", "'comment'", ":", "comment", ",", "'component'", ":", "component", ",", "'distribution'", ":", "distribution", "}", "if", "uploaders_file", ":", "cached_uploaders_path", "=", "__salt__", "[", "'cp.cache_file'", "]", "(", "uploaders_file", ",", "saltenv", ")", "if", "not", "cached_uploaders_path", ":", "log", ".", "error", "(", "'Unable to get cached copy of file: %s'", ",", "uploaders_file", ")", "return", "False", "for", "setting", "in", "settings", ":", "if", "settings", "[", "setting", "]", "is", "not", "None", ":", "ret", ".", "append", "(", "'-{}={}'", ".", "format", "(", "setting", ",", "settings", "[", "setting", "]", ")", ")", "if", "cached_uploaders_path", ":", "ret", ".", "append", "(", "'-uploaders-file={}'", ".", "format", "(", "cached_uploaders_path", ")", ")", "return", "ret" ]
Format the common arguments for creating or editing a repository. :param str comment: The description of the repository. :param str component: The default component to use when publishing. :param str distribution: The default distribution to use when publishing. :param str uploaders_file: The repository upload restrictions config. :param str saltenv: The environment the file resides in. :return: A list of the arguments formatted as aptly arguments. :rtype: list
[ "Format", "the", "common", "arguments", "for", "creating", "or", "editing", "a", "repository", "." ]
python
train
37.558824
fastavro/fastavro
fastavro/_write_py.py
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L57-L67
def prepare_timestamp_micros(data, schema): """Converts datetime.datetime to int timestamp with microseconds""" if isinstance(data, datetime.datetime): if data.tzinfo is not None: delta = (data - epoch) return int(delta.total_seconds() * MCS_PER_SECOND) t = int(time.mktime(data.timetuple())) * MCS_PER_SECOND + \ data.microsecond return t else: return data
[ "def", "prepare_timestamp_micros", "(", "data", ",", "schema", ")", ":", "if", "isinstance", "(", "data", ",", "datetime", ".", "datetime", ")", ":", "if", "data", ".", "tzinfo", "is", "not", "None", ":", "delta", "=", "(", "data", "-", "epoch", ")", "return", "int", "(", "delta", ".", "total_seconds", "(", ")", "*", "MCS_PER_SECOND", ")", "t", "=", "int", "(", "time", ".", "mktime", "(", "data", ".", "timetuple", "(", ")", ")", ")", "*", "MCS_PER_SECOND", "+", "data", ".", "microsecond", "return", "t", "else", ":", "return", "data" ]
Converts datetime.datetime to int timestamp with microseconds
[ "Converts", "datetime", ".", "datetime", "to", "int", "timestamp", "with", "microseconds" ]
python
train
38.818182
MSchnei/pyprf_feature
pyprf_feature/analysis/utils_general.py
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_general.py#L26-L102
def load_nii(strPathIn, varSzeThr=5000.0): """ Load nii file. Parameters ---------- strPathIn : str Path to nii file to load. varSzeThr : float If the nii file is larger than this threshold (in MB), the file is loaded volume-by-volume in order to prevent memory overflow. Default threshold is 1000 MB. Returns ------- aryNii : np.array Array containing nii data. 32 bit floating point precision. objHdr : header object Header of nii file. aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of nii data. Notes ----- If the nii file is larger than the specified threshold (`varSzeThr`), the file is loaded volume-by-volume in order to prevent memory overflow. The reason for this is that nibabel imports data at float64 precision, which can lead to a memory overflow even for relatively small files. """ # Load nii file (this does not load the data into memory yet): objNii = nb.load(strPathIn) # Get size of nii file: varNiiSze = os.path.getsize(strPathIn) # Convert to MB: varNiiSze = np.divide(float(varNiiSze), 1000000.0) # Load volume-by-volume or all at once, depending on file size: if np.greater(varNiiSze, float(varSzeThr)): # Load large nii file print(('---------Large file size (' + str(np.around(varNiiSze)) + ' MB), reading volume-by-volume')) # Get image dimensions: tplSze = objNii.shape # Create empty array for nii data: aryNii = np.zeros(tplSze, dtype=np.float32) # Loop through volumes: for idxVol in range(tplSze[3]): aryNii[..., idxVol] = np.asarray( objNii.dataobj[..., idxVol]).astype(np.float32) else: # Load small nii file # Load nii file (this doesn't load the data into memory yet): objNii = nb.load(strPathIn) # Load data into array: aryNii = np.asarray(objNii.dataobj).astype(np.float32) # Get headers: objHdr = objNii.header # Get 'affine': aryAff = objNii.affine # Output nii data (as numpy array), header, and 'affine': return aryNii, objHdr, aryAff
[ "def", "load_nii", "(", "strPathIn", ",", "varSzeThr", "=", "5000.0", ")", ":", "# Load nii file (this does not load the data into memory yet):", "objNii", "=", "nb", ".", "load", "(", "strPathIn", ")", "# Get size of nii file:", "varNiiSze", "=", "os", ".", "path", ".", "getsize", "(", "strPathIn", ")", "# Convert to MB:", "varNiiSze", "=", "np", ".", "divide", "(", "float", "(", "varNiiSze", ")", ",", "1000000.0", ")", "# Load volume-by-volume or all at once, depending on file size:", "if", "np", ".", "greater", "(", "varNiiSze", ",", "float", "(", "varSzeThr", ")", ")", ":", "# Load large nii file", "print", "(", "(", "'---------Large file size ('", "+", "str", "(", "np", ".", "around", "(", "varNiiSze", ")", ")", "+", "' MB), reading volume-by-volume'", ")", ")", "# Get image dimensions:", "tplSze", "=", "objNii", ".", "shape", "# Create empty array for nii data:", "aryNii", "=", "np", ".", "zeros", "(", "tplSze", ",", "dtype", "=", "np", ".", "float32", ")", "# Loop through volumes:", "for", "idxVol", "in", "range", "(", "tplSze", "[", "3", "]", ")", ":", "aryNii", "[", "...", ",", "idxVol", "]", "=", "np", ".", "asarray", "(", "objNii", ".", "dataobj", "[", "...", ",", "idxVol", "]", ")", ".", "astype", "(", "np", ".", "float32", ")", "else", ":", "# Load small nii file", "# Load nii file (this doesn't load the data into memory yet):", "objNii", "=", "nb", ".", "load", "(", "strPathIn", ")", "# Load data into array:", "aryNii", "=", "np", ".", "asarray", "(", "objNii", ".", "dataobj", ")", ".", "astype", "(", "np", ".", "float32", ")", "# Get headers:", "objHdr", "=", "objNii", ".", "header", "# Get 'affine':", "aryAff", "=", "objNii", ".", "affine", "# Output nii data (as numpy array), header, and 'affine':", "return", "aryNii", ",", "objHdr", ",", "aryAff" ]
Load nii file. Parameters ---------- strPathIn : str Path to nii file to load. varSzeThr : float If the nii file is larger than this threshold (in MB), the file is loaded volume-by-volume in order to prevent memory overflow. Default threshold is 1000 MB. Returns ------- aryNii : np.array Array containing nii data. 32 bit floating point precision. objHdr : header object Header of nii file. aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of nii data. Notes ----- If the nii file is larger than the specified threshold (`varSzeThr`), the file is loaded volume-by-volume in order to prevent memory overflow. The reason for this is that nibabel imports data at float64 precision, which can lead to a memory overflow even for relatively small files.
[ "Load", "nii", "file", "." ]
python
train
28.779221
MacHu-GWU/constant2-project
constant2/_constant2.py
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L447-L469
def load(cls, data): """Construct a Constant class from it's dict data. .. versionadded:: 0.0.2 """ if len(data) == 1: for key, value in data.items(): if "__classname__" not in value: # pragma: no cover raise ValueError name = key bases = (Constant,) attrs = dict() for k, v in value.items(): if isinstance(v, dict): if "__classname__" in v: attrs[k] = cls.load({k: v}) else: attrs[k] = v else: attrs[k] = v return type(name, bases, attrs) else: # pragma: no cover raise ValueError
[ "def", "load", "(", "cls", ",", "data", ")", ":", "if", "len", "(", "data", ")", "==", "1", ":", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "\"__classname__\"", "not", "in", "value", ":", "# pragma: no cover", "raise", "ValueError", "name", "=", "key", "bases", "=", "(", "Constant", ",", ")", "attrs", "=", "dict", "(", ")", "for", "k", ",", "v", "in", "value", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "if", "\"__classname__\"", "in", "v", ":", "attrs", "[", "k", "]", "=", "cls", ".", "load", "(", "{", "k", ":", "v", "}", ")", "else", ":", "attrs", "[", "k", "]", "=", "v", "else", ":", "attrs", "[", "k", "]", "=", "v", "return", "type", "(", "name", ",", "bases", ",", "attrs", ")", "else", ":", "# pragma: no cover", "raise", "ValueError" ]
Construct a Constant class from it's dict data. .. versionadded:: 0.0.2
[ "Construct", "a", "Constant", "class", "from", "it", "s", "dict", "data", "." ]
python
train
35.043478
edx/edx-celeryutils
celery_utils/chordable_django_backend.py
https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/chordable_django_backend.py#L89-L96
def chord_task(*args, **kwargs): u""" Override of the default task decorator to specify use of this backend. """ given_backend = kwargs.get(u'backend', None) if not isinstance(given_backend, ChordableDjangoBackend): kwargs[u'backend'] = ChordableDjangoBackend(kwargs.get('app', current_app)) return task(*args, **kwargs)
[ "def", "chord_task", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "given_backend", "=", "kwargs", ".", "get", "(", "u'backend'", ",", "None", ")", "if", "not", "isinstance", "(", "given_backend", ",", "ChordableDjangoBackend", ")", ":", "kwargs", "[", "u'backend'", "]", "=", "ChordableDjangoBackend", "(", "kwargs", ".", "get", "(", "'app'", ",", "current_app", ")", ")", "return", "task", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
u""" Override of the default task decorator to specify use of this backend.
[ "u", "Override", "of", "the", "default", "task", "decorator", "to", "specify", "use", "of", "this", "backend", "." ]
python
train
43.125
BreakingBytes/simkit
simkit/core/models.py
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L166-L219
def _initialize(self): """ Initialize model and layers. """ meta = getattr(self, ModelBase._meta_attr) # read modelfile, convert JSON and load/update model if self.param_file is not None: self._load() LOGGER.debug('model:\n%r', self.model) # initialize layers # FIXME: move import inside loop for custom layers in different modules mod = importlib.import_module(meta.layers_mod, meta.layers_pkg) src_model = {} for layer, value in self.model.iteritems(): # from layers module get the layer's class definition layer_cls = getattr(mod, meta.layer_cls_names[layer]) # class def self.layers[layer] = layer_cls # add layer class def to model # check if model layers are classes src_value = {} # layer value generated from source classes for src in value['sources']: # check if source has keyword arguments try: src, kwargs = src except (TypeError, ValueError): kwargs = {} # no key work arguments # skip if not a source class if isinstance(src, basestring): continue # generate layer value from source class src_value[src.__name__] = {'module': src.__module__, 'package': None} # update layer keyword arguments src_value[src.__name__].update(kwargs) # use layer values generated from source class if src_value: value = src_model[layer] = src_value else: srcmod, srcpkg = value.get('module'), value.get('package') try: value = dict(value['sources']) except ValueError: value = dict.fromkeys(value['sources'], {}) for src in value.viewkeys(): if srcmod is not None: value[src]['module'] = srcmod if srcpkg is not None: value[src]['package'] = srcpkg # set layer attribute with model data setattr(self, layer, layer_cls(value)) # update model with layer values generated from source classes if src_model: self.model.update(src_model) self._update() self._state = 'initialized'
[ "def", "_initialize", "(", "self", ")", ":", "meta", "=", "getattr", "(", "self", ",", "ModelBase", ".", "_meta_attr", ")", "# read modelfile, convert JSON and load/update model", "if", "self", ".", "param_file", "is", "not", "None", ":", "self", ".", "_load", "(", ")", "LOGGER", ".", "debug", "(", "'model:\\n%r'", ",", "self", ".", "model", ")", "# initialize layers", "# FIXME: move import inside loop for custom layers in different modules", "mod", "=", "importlib", ".", "import_module", "(", "meta", ".", "layers_mod", ",", "meta", ".", "layers_pkg", ")", "src_model", "=", "{", "}", "for", "layer", ",", "value", "in", "self", ".", "model", ".", "iteritems", "(", ")", ":", "# from layers module get the layer's class definition", "layer_cls", "=", "getattr", "(", "mod", ",", "meta", ".", "layer_cls_names", "[", "layer", "]", ")", "# class def", "self", ".", "layers", "[", "layer", "]", "=", "layer_cls", "# add layer class def to model", "# check if model layers are classes", "src_value", "=", "{", "}", "# layer value generated from source classes", "for", "src", "in", "value", "[", "'sources'", "]", ":", "# check if source has keyword arguments", "try", ":", "src", ",", "kwargs", "=", "src", "except", "(", "TypeError", ",", "ValueError", ")", ":", "kwargs", "=", "{", "}", "# no key work arguments", "# skip if not a source class", "if", "isinstance", "(", "src", ",", "basestring", ")", ":", "continue", "# generate layer value from source class", "src_value", "[", "src", ".", "__name__", "]", "=", "{", "'module'", ":", "src", ".", "__module__", ",", "'package'", ":", "None", "}", "# update layer keyword arguments", "src_value", "[", "src", ".", "__name__", "]", ".", "update", "(", "kwargs", ")", "# use layer values generated from source class", "if", "src_value", ":", "value", "=", "src_model", "[", "layer", "]", "=", "src_value", "else", ":", "srcmod", ",", "srcpkg", "=", "value", ".", "get", "(", "'module'", ")", ",", "value", ".", "get", "(", "'package'", ")", "try", ":", "value", "=", "dict", "(", "value", "[", "'sources'", "]", ")", "except", "ValueError", ":", "value", "=", "dict", ".", "fromkeys", "(", "value", "[", "'sources'", "]", ",", "{", "}", ")", "for", "src", "in", "value", ".", "viewkeys", "(", ")", ":", "if", "srcmod", "is", "not", "None", ":", "value", "[", "src", "]", "[", "'module'", "]", "=", "srcmod", "if", "srcpkg", "is", "not", "None", ":", "value", "[", "src", "]", "[", "'package'", "]", "=", "srcpkg", "# set layer attribute with model data", "setattr", "(", "self", ",", "layer", ",", "layer_cls", "(", "value", ")", ")", "# update model with layer values generated from source classes", "if", "src_model", ":", "self", ".", "model", ".", "update", "(", "src_model", ")", "self", ".", "_update", "(", ")", "self", ".", "_state", "=", "'initialized'" ]
Initialize model and layers.
[ "Initialize", "model", "and", "layers", "." ]
python
train
45.518519
belbio/bel
bel/edge/edges.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/edges.py#L33-L196
def nanopub_to_edges(nanopub: dict = {}, rules: List[str] = [], orthologize_targets: list = []): """Process nanopub into edges and load into EdgeStore Args: nanopub: BEL Nanopub rules: list of compute rules to process orthologize_targets: list of species in TAX:<int> format Returns: list: of edges Edge object: { "edge": { "subject": { "name": subj_canon, "name_lc": subj_canon.lower(), "label": subj_lbl, "label_lc": subj_lbl.lower(), "components": subj_components, }, "relation": { # relation _key is based on a hash "relation": edge_ast.bel_relation, "edge_hash": edge_hash, "edge_dt": edge_dt, "nanopub_url": nanopub_url, "nanopub_id": nanopub_id, "citation": citation, "subject_canon": subj_canon, "subject": subj_lbl, "object_canon": obj_canon, "object": obj_lbl, "annotations": nanopub['annotations'], "metadata": nanopub['metadata'], "public_flag": True, # will be added when groups/permissions feature is finished, "edge_types": edge_types, }, 'object': { "name": obj_canon, "name_lc": obj_canon.lower(), "label": obj_lbl, "label_lc": obj_lbl.lower(), "components": obj_components, } } } """ # Collect input values #################################################### nanopub_url = nanopub.get("source_url", "") edge_dt = utils.dt_utc_formatted() # don't want this in relation_id # Extract BEL Version and make sure we can process this if nanopub["nanopub"]["type"]["name"].upper() == "BEL": bel_version = nanopub["nanopub"]["type"]["version"] versions = bel.lang.bel_specification.get_bel_versions() if bel_version not in versions: log.error( f"Do not know this BEL Version: {bel_version}, these are the ones I can process: {versions.keys()}" ) return [] else: log.error( f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}" ) return [] # Required for BEL parsing/canonicalization/orthologization api_url = config["bel_api"]["servers"]["api_url"] try: citation_string = normalize_nanopub_citation(nanopub) except Exception as e: log.error(f"Could not create citation string for {nanopub_url}") citation_string = "" if orthologize_targets == []: if config["bel_api"].get("edges", None): orthologize_targets = config["bel_api"]["edges"].get("orthologize_targets", []) # orig_species_id = [anno['id'] for anno in nanopub['nanopub']['annotations'] if anno['type'] == 'Species'] # if orig_species_id: # orig_species_id = orig_species_id[0] master_annotations = copy.deepcopy(nanopub["nanopub"]["annotations"]) master_metadata = copy.deepcopy(nanopub["nanopub"]["metadata"]) master_metadata.pop("gd_abstract", None) nanopub_type = nanopub["nanopub"]["metadata"].get("nanopub_type") # Create Edge Assertion Info ############################################## # r = generate_assertion_edge_info(nanopub['nanopub']['assertions'], orig_species_id, orthologize_targets, bel_version, api_url, nanopub_type) r = generate_assertion_edge_info( nanopub["nanopub"]["assertions"], orthologize_targets, bel_version, api_url, nanopub_type ) edge_info_list = r["edge_info_list"] # Build Edges ############################################################# edges = [] errors = [] for edge_info in edge_info_list: annotations = copy.deepcopy(master_annotations) metadata = copy.deepcopy(master_metadata) errors.extend(edge_info["errors"]) if not edge_info.get("canonical"): continue # TODO - remove this # if edge_info.get('species_id', False): # annotations = orthologize_context(edge_info['species_id'], annotations) edge_hash = utils._create_hash( f'{edge_info["canonical"]["subject"]} {edge_info["canonical"]["relation"]} {edge_info["canonical"]["object"]}' ) edge = { "edge": { "subject": { "name": edge_info["canonical"]["subject"], "name_lc": edge_info["canonical"]["subject"].lower(), "label": edge_info["decanonical"]["subject"], "label_lc": edge_info["decanonical"]["subject"].lower(), "components": edge_info["subject_comp"], }, "relation": { "relation": edge_info["canonical"]["relation"], "edge_hash": edge_hash, "edge_dt": edge_dt, "nanopub_url": nanopub_url, "nanopub_id": nanopub["nanopub"]["id"], "citation": citation_string, "subject_canon": edge_info["canonical"]["subject"], "subject": edge_info["decanonical"]["subject"], "object_canon": edge_info["canonical"]["object"], "object": edge_info["decanonical"]["object"], "annotations": copy.deepcopy(annotations), "metadata": copy.deepcopy(metadata), "public_flag": True, "edge_types": edge_info["edge_types"], "species_id": edge_info["species_id"], "species_label": edge_info["species_label"], }, "object": { "name": edge_info["canonical"]["object"], "name_lc": edge_info["canonical"]["object"].lower(), "label": edge_info["decanonical"]["object"], "label_lc": edge_info["decanonical"]["object"].lower(), "components": edge_info["object_comp"], }, } } edges.append(copy.deepcopy(edge)) return { "edges": edges, "nanopub_id": nanopub["nanopub"]["id"], "nanopub_url": nanopub_url, "success": True, "errors": errors, }
[ "def", "nanopub_to_edges", "(", "nanopub", ":", "dict", "=", "{", "}", ",", "rules", ":", "List", "[", "str", "]", "=", "[", "]", ",", "orthologize_targets", ":", "list", "=", "[", "]", ")", ":", "# Collect input values ####################################################", "nanopub_url", "=", "nanopub", ".", "get", "(", "\"source_url\"", ",", "\"\"", ")", "edge_dt", "=", "utils", ".", "dt_utc_formatted", "(", ")", "# don't want this in relation_id", "# Extract BEL Version and make sure we can process this", "if", "nanopub", "[", "\"nanopub\"", "]", "[", "\"type\"", "]", "[", "\"name\"", "]", ".", "upper", "(", ")", "==", "\"BEL\"", ":", "bel_version", "=", "nanopub", "[", "\"nanopub\"", "]", "[", "\"type\"", "]", "[", "\"version\"", "]", "versions", "=", "bel", ".", "lang", ".", "bel_specification", ".", "get_bel_versions", "(", ")", "if", "bel_version", "not", "in", "versions", ":", "log", ".", "error", "(", "f\"Do not know this BEL Version: {bel_version}, these are the ones I can process: {versions.keys()}\"", ")", "return", "[", "]", "else", ":", "log", ".", "error", "(", "f\"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}\"", ")", "return", "[", "]", "# Required for BEL parsing/canonicalization/orthologization", "api_url", "=", "config", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", "[", "\"api_url\"", "]", "try", ":", "citation_string", "=", "normalize_nanopub_citation", "(", "nanopub", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "f\"Could not create citation string for {nanopub_url}\"", ")", "citation_string", "=", "\"\"", "if", "orthologize_targets", "==", "[", "]", ":", "if", "config", "[", "\"bel_api\"", "]", ".", "get", "(", "\"edges\"", ",", "None", ")", ":", "orthologize_targets", "=", "config", "[", "\"bel_api\"", "]", "[", "\"edges\"", "]", ".", "get", "(", "\"orthologize_targets\"", ",", "[", "]", ")", "# orig_species_id = [anno['id'] for anno in nanopub['nanopub']['annotations'] if anno['type'] == 'Species']", "# if orig_species_id:", "# orig_species_id = orig_species_id[0]", "master_annotations", "=", "copy", ".", "deepcopy", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"annotations\"", "]", ")", "master_metadata", "=", "copy", ".", "deepcopy", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"metadata\"", "]", ")", "master_metadata", ".", "pop", "(", "\"gd_abstract\"", ",", "None", ")", "nanopub_type", "=", "nanopub", "[", "\"nanopub\"", "]", "[", "\"metadata\"", "]", ".", "get", "(", "\"nanopub_type\"", ")", "# Create Edge Assertion Info ##############################################", "# r = generate_assertion_edge_info(nanopub['nanopub']['assertions'], orig_species_id, orthologize_targets, bel_version, api_url, nanopub_type)", "r", "=", "generate_assertion_edge_info", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"assertions\"", "]", ",", "orthologize_targets", ",", "bel_version", ",", "api_url", ",", "nanopub_type", ")", "edge_info_list", "=", "r", "[", "\"edge_info_list\"", "]", "# Build Edges #############################################################", "edges", "=", "[", "]", "errors", "=", "[", "]", "for", "edge_info", "in", "edge_info_list", ":", "annotations", "=", "copy", ".", "deepcopy", "(", "master_annotations", ")", "metadata", "=", "copy", ".", "deepcopy", "(", "master_metadata", ")", "errors", ".", "extend", "(", "edge_info", "[", "\"errors\"", "]", ")", "if", "not", "edge_info", ".", "get", "(", "\"canonical\"", ")", ":", "continue", "# TODO - remove this", "# if edge_info.get('species_id', False):", "# annotations = orthologize_context(edge_info['species_id'], annotations)", "edge_hash", "=", "utils", ".", "_create_hash", "(", "f'{edge_info[\"canonical\"][\"subject\"]} {edge_info[\"canonical\"][\"relation\"]} {edge_info[\"canonical\"][\"object\"]}'", ")", "edge", "=", "{", "\"edge\"", ":", "{", "\"subject\"", ":", "{", "\"name\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"subject\"", "]", ",", "\"name_lc\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"subject\"", "]", ".", "lower", "(", ")", ",", "\"label\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"subject\"", "]", ",", "\"label_lc\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"subject\"", "]", ".", "lower", "(", ")", ",", "\"components\"", ":", "edge_info", "[", "\"subject_comp\"", "]", ",", "}", ",", "\"relation\"", ":", "{", "\"relation\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"relation\"", "]", ",", "\"edge_hash\"", ":", "edge_hash", ",", "\"edge_dt\"", ":", "edge_dt", ",", "\"nanopub_url\"", ":", "nanopub_url", ",", "\"nanopub_id\"", ":", "nanopub", "[", "\"nanopub\"", "]", "[", "\"id\"", "]", ",", "\"citation\"", ":", "citation_string", ",", "\"subject_canon\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"subject\"", "]", ",", "\"subject\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"subject\"", "]", ",", "\"object_canon\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"object\"", "]", ",", "\"object\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"object\"", "]", ",", "\"annotations\"", ":", "copy", ".", "deepcopy", "(", "annotations", ")", ",", "\"metadata\"", ":", "copy", ".", "deepcopy", "(", "metadata", ")", ",", "\"public_flag\"", ":", "True", ",", "\"edge_types\"", ":", "edge_info", "[", "\"edge_types\"", "]", ",", "\"species_id\"", ":", "edge_info", "[", "\"species_id\"", "]", ",", "\"species_label\"", ":", "edge_info", "[", "\"species_label\"", "]", ",", "}", ",", "\"object\"", ":", "{", "\"name\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"object\"", "]", ",", "\"name_lc\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"object\"", "]", ".", "lower", "(", ")", ",", "\"label\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"object\"", "]", ",", "\"label_lc\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"object\"", "]", ".", "lower", "(", ")", ",", "\"components\"", ":", "edge_info", "[", "\"object_comp\"", "]", ",", "}", ",", "}", "}", "edges", ".", "append", "(", "copy", ".", "deepcopy", "(", "edge", ")", ")", "return", "{", "\"edges\"", ":", "edges", ",", "\"nanopub_id\"", ":", "nanopub", "[", "\"nanopub\"", "]", "[", "\"id\"", "]", ",", "\"nanopub_url\"", ":", "nanopub_url", ",", "\"success\"", ":", "True", ",", "\"errors\"", ":", "errors", ",", "}" ]
Process nanopub into edges and load into EdgeStore Args: nanopub: BEL Nanopub rules: list of compute rules to process orthologize_targets: list of species in TAX:<int> format Returns: list: of edges Edge object: { "edge": { "subject": { "name": subj_canon, "name_lc": subj_canon.lower(), "label": subj_lbl, "label_lc": subj_lbl.lower(), "components": subj_components, }, "relation": { # relation _key is based on a hash "relation": edge_ast.bel_relation, "edge_hash": edge_hash, "edge_dt": edge_dt, "nanopub_url": nanopub_url, "nanopub_id": nanopub_id, "citation": citation, "subject_canon": subj_canon, "subject": subj_lbl, "object_canon": obj_canon, "object": obj_lbl, "annotations": nanopub['annotations'], "metadata": nanopub['metadata'], "public_flag": True, # will be added when groups/permissions feature is finished, "edge_types": edge_types, }, 'object': { "name": obj_canon, "name_lc": obj_canon.lower(), "label": obj_lbl, "label_lc": obj_lbl.lower(), "components": obj_components, } } }
[ "Process", "nanopub", "into", "edges", "and", "load", "into", "EdgeStore" ]
python
train
39.786585
cbclab/MOT
mot/random.py
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/random.py#L76-L112
def normal(nmr_distributions, nmr_samples, mean=0, std=1, ctype='float', seed=None): """Draw random samples from the Gaussian distribution. Args: nmr_distributions (int): the number of unique continuous_distributions to create nmr_samples (int): The number of samples to draw mean (float or ndarray): The mean of the distribution std (float or ndarray): The standard deviation or the distribution ctype (str): the C type of the output samples seed (float): the seed for the RNG Returns: ndarray: A two dimensional numpy array as (nmr_distributions, nmr_samples). """ if is_scalar(mean): mean = np.ones((nmr_distributions, 1)) * mean if is_scalar(std): std = np.ones((nmr_distributions, 1)) * std kernel_data = {'mean': Array(mean, as_scalar=True), 'std': Array(std, as_scalar=True)} kernel = SimpleCLFunction.from_string(''' void compute(double mean, double std, global uint* rng_state, global ''' + ctype + '''* samples){ rand123_data rand123_rng_data = rand123_initialize_data((uint[]){ rng_state[0], rng_state[1], rng_state[2], rng_state[3], rng_state[4], rng_state[5], 0}); void* rng_data = (void*)&rand123_rng_data; for(uint i = 0; i < ''' + str(nmr_samples) + '''; i++){ double4 randomnr = randn4(rng_data); samples[i] = (''' + ctype + ''')(mean + randomnr.x * std); } } ''', dependencies=[Rand123()]) return _generate_samples(kernel, nmr_distributions, nmr_samples, ctype, kernel_data, seed=seed)
[ "def", "normal", "(", "nmr_distributions", ",", "nmr_samples", ",", "mean", "=", "0", ",", "std", "=", "1", ",", "ctype", "=", "'float'", ",", "seed", "=", "None", ")", ":", "if", "is_scalar", "(", "mean", ")", ":", "mean", "=", "np", ".", "ones", "(", "(", "nmr_distributions", ",", "1", ")", ")", "*", "mean", "if", "is_scalar", "(", "std", ")", ":", "std", "=", "np", ".", "ones", "(", "(", "nmr_distributions", ",", "1", ")", ")", "*", "std", "kernel_data", "=", "{", "'mean'", ":", "Array", "(", "mean", ",", "as_scalar", "=", "True", ")", ",", "'std'", ":", "Array", "(", "std", ",", "as_scalar", "=", "True", ")", "}", "kernel", "=", "SimpleCLFunction", ".", "from_string", "(", "'''\n void compute(double mean, double std, global uint* rng_state, global '''", "+", "ctype", "+", "'''* samples){\n rand123_data rand123_rng_data = rand123_initialize_data((uint[]){\n rng_state[0], rng_state[1], rng_state[2], rng_state[3], \n rng_state[4], rng_state[5], 0});\n void* rng_data = (void*)&rand123_rng_data;\n\n for(uint i = 0; i < '''", "+", "str", "(", "nmr_samples", ")", "+", "'''; i++){\n double4 randomnr = randn4(rng_data);\n samples[i] = ('''", "+", "ctype", "+", "''')(mean + randomnr.x * std);\n }\n }\n '''", ",", "dependencies", "=", "[", "Rand123", "(", ")", "]", ")", "return", "_generate_samples", "(", "kernel", ",", "nmr_distributions", ",", "nmr_samples", ",", "ctype", ",", "kernel_data", ",", "seed", "=", "seed", ")" ]
Draw random samples from the Gaussian distribution. Args: nmr_distributions (int): the number of unique continuous_distributions to create nmr_samples (int): The number of samples to draw mean (float or ndarray): The mean of the distribution std (float or ndarray): The standard deviation or the distribution ctype (str): the C type of the output samples seed (float): the seed for the RNG Returns: ndarray: A two dimensional numpy array as (nmr_distributions, nmr_samples).
[ "Draw", "random", "samples", "from", "the", "Gaussian", "distribution", "." ]
python
train
44.135135
ask/carrot
carrot/messaging.py
https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/messaging.py#L507-L544
def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): """Request specific Quality of Service. This method requests a specific quality of service. The QoS can be specified for the current channel or for all channels on the connection. The particular properties and semantics of a qos method always depend on the content class semantics. Though the qos method could in principle apply to both peers, it is currently meaningful only for the server. :param prefetch_size: Prefetch window in octets. The client can request that messages be sent in advance so that when the client finishes processing a message, the following message is already held locally, rather than needing to be sent down the channel. Prefetching gives a performance improvement. This field specifies the prefetch window size in octets. The server will send a message in advance if it is equal to or smaller in size than the available prefetch size (and also falls into other prefetch limits). May be set to zero, meaning "no specific limit", although other prefetch limits may still apply. The ``prefetch_size`` is ignored if the :attr:`no_ack` option is set. :param prefetch_count: Specifies a prefetch window in terms of whole messages. This field may be used in combination with ``prefetch_size``; A message will only be sent in advance if both prefetch windows (and those at the channel and connection level) allow it. The prefetch- count is ignored if the :attr:`no_ack` option is set. :keyword apply_global: By default the QoS settings apply to the current channel only. If this is set, they are applied to the entire connection. """ return self.backend.qos(prefetch_size, prefetch_count, apply_global)
[ "def", "qos", "(", "self", ",", "prefetch_size", "=", "0", ",", "prefetch_count", "=", "0", ",", "apply_global", "=", "False", ")", ":", "return", "self", ".", "backend", ".", "qos", "(", "prefetch_size", ",", "prefetch_count", ",", "apply_global", ")" ]
Request specific Quality of Service. This method requests a specific quality of service. The QoS can be specified for the current channel or for all channels on the connection. The particular properties and semantics of a qos method always depend on the content class semantics. Though the qos method could in principle apply to both peers, it is currently meaningful only for the server. :param prefetch_size: Prefetch window in octets. The client can request that messages be sent in advance so that when the client finishes processing a message, the following message is already held locally, rather than needing to be sent down the channel. Prefetching gives a performance improvement. This field specifies the prefetch window size in octets. The server will send a message in advance if it is equal to or smaller in size than the available prefetch size (and also falls into other prefetch limits). May be set to zero, meaning "no specific limit", although other prefetch limits may still apply. The ``prefetch_size`` is ignored if the :attr:`no_ack` option is set. :param prefetch_count: Specifies a prefetch window in terms of whole messages. This field may be used in combination with ``prefetch_size``; A message will only be sent in advance if both prefetch windows (and those at the channel and connection level) allow it. The prefetch- count is ignored if the :attr:`no_ack` option is set. :keyword apply_global: By default the QoS settings apply to the current channel only. If this is set, they are applied to the entire connection.
[ "Request", "specific", "Quality", "of", "Service", "." ]
python
train
52.631579
openpermissions/perch
perch/views.py
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/views.py#L310-L320
def active_service_location(doc): """View for getting active service by location""" if doc.get('state') != 'deactivated': for service_id, service in doc.get('services', {}).items(): if service.get('state') != 'deactivated': service['id'] = service_id service['organisation_id'] = doc['_id'] location = service.get('location', None) if location: yield location, service
[ "def", "active_service_location", "(", "doc", ")", ":", "if", "doc", ".", "get", "(", "'state'", ")", "!=", "'deactivated'", ":", "for", "service_id", ",", "service", "in", "doc", ".", "get", "(", "'services'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "if", "service", ".", "get", "(", "'state'", ")", "!=", "'deactivated'", ":", "service", "[", "'id'", "]", "=", "service_id", "service", "[", "'organisation_id'", "]", "=", "doc", "[", "'_id'", "]", "location", "=", "service", ".", "get", "(", "'location'", ",", "None", ")", "if", "location", ":", "yield", "location", ",", "service" ]
View for getting active service by location
[ "View", "for", "getting", "active", "service", "by", "location" ]
python
train
42.818182
dereneaton/ipyrad
ipyrad/assemble/write_outfiles.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/write_outfiles.py#L973-L1024
def get_edges(data, superints, splits): """ Gets edge trimming based on the overlap of sequences at the edges of alignments and the tuple arg passed in for edge_trimming. Trims as (R1 left, R1 right, R2 left, R2 right). We also trim off the restriction site if it present. This modifies superints, and so should be run on an engine so it doesn't affect local copy. If this is changed to run locally for some reason make sure we copy the superints instead. """ ## the filtering arg and parse it into minsamp numbers if "trim_overhang" in data.paramsdict: edgetrims = np.array(data.paramsdict["trim_overhang"]).astype(np.int16) else: edgetrims = np.array(data.paramsdict["trim_loci"]).astype(np.int16) ## Cuts 3 and 4 are only for 3rad/radcap ## TODO: This is moderately hackish, it's not using cut3/4 ## correctly, just assuming the length is the same as cut1/2 try: cut1, cut2, _, _ = data.paramsdict["restriction_overhang"] LOGGER.debug("Found 3Rad cut sites") except ValueError: cut1, cut2 = data.paramsdict["restriction_overhang"] cuts = np.array([len(cut1), len(cut2)], dtype=np.int16) ## a local array for storing edge trims edges = np.zeros((superints.shape[0], 5), dtype=np.int16) ## a local array for storing edge filtered loci, these are stored ## eventually as minsamp excludes. edgefilter = np.zeros((superints.shape[0],), dtype=np.bool) ## TRIM GUIDE. The cut site lengths are always trimmed. In addition, ## edge overhangs are trimmed to min(4, minsamp), and then additional ## number of columns is trimmed based on edgetrims values. ## A special case, -1 value means no trim at all. if data.paramsdict["min_samples_locus"] <= 4: minedge = np.int16(data.paramsdict["min_samples_locus"]) else: minedge = np.int16(max(4, data.paramsdict["min_samples_locus"])) ## convert all - to N to make this easier nodashints = copy.deepcopy(superints)#.copy() nodashints[nodashints == 45] = 78 ## trim overhanging edges ## get the number not Ns in each site, #ccx = np.sum(superseqs != "N", axis=1) ccx = np.sum(nodashints != 78, axis=1, dtype=np.uint16) efi, edg = edgetrim_numba(splits, ccx, edges, edgefilter, edgetrims, cuts, minedge) return efi, edg
[ "def", "get_edges", "(", "data", ",", "superints", ",", "splits", ")", ":", "## the filtering arg and parse it into minsamp numbers", "if", "\"trim_overhang\"", "in", "data", ".", "paramsdict", ":", "edgetrims", "=", "np", ".", "array", "(", "data", ".", "paramsdict", "[", "\"trim_overhang\"", "]", ")", ".", "astype", "(", "np", ".", "int16", ")", "else", ":", "edgetrims", "=", "np", ".", "array", "(", "data", ".", "paramsdict", "[", "\"trim_loci\"", "]", ")", ".", "astype", "(", "np", ".", "int16", ")", "## Cuts 3 and 4 are only for 3rad/radcap", "## TODO: This is moderately hackish, it's not using cut3/4", "## correctly, just assuming the length is the same as cut1/2", "try", ":", "cut1", ",", "cut2", ",", "_", ",", "_", "=", "data", ".", "paramsdict", "[", "\"restriction_overhang\"", "]", "LOGGER", ".", "debug", "(", "\"Found 3Rad cut sites\"", ")", "except", "ValueError", ":", "cut1", ",", "cut2", "=", "data", ".", "paramsdict", "[", "\"restriction_overhang\"", "]", "cuts", "=", "np", ".", "array", "(", "[", "len", "(", "cut1", ")", ",", "len", "(", "cut2", ")", "]", ",", "dtype", "=", "np", ".", "int16", ")", "## a local array for storing edge trims", "edges", "=", "np", ".", "zeros", "(", "(", "superints", ".", "shape", "[", "0", "]", ",", "5", ")", ",", "dtype", "=", "np", ".", "int16", ")", "## a local array for storing edge filtered loci, these are stored", "## eventually as minsamp excludes.", "edgefilter", "=", "np", ".", "zeros", "(", "(", "superints", ".", "shape", "[", "0", "]", ",", ")", ",", "dtype", "=", "np", ".", "bool", ")", "## TRIM GUIDE. The cut site lengths are always trimmed. In addition,", "## edge overhangs are trimmed to min(4, minsamp), and then additional", "## number of columns is trimmed based on edgetrims values.", "## A special case, -1 value means no trim at all.", "if", "data", ".", "paramsdict", "[", "\"min_samples_locus\"", "]", "<=", "4", ":", "minedge", "=", "np", ".", "int16", "(", "data", ".", "paramsdict", "[", "\"min_samples_locus\"", "]", ")", "else", ":", "minedge", "=", "np", ".", "int16", "(", "max", "(", "4", ",", "data", ".", "paramsdict", "[", "\"min_samples_locus\"", "]", ")", ")", "## convert all - to N to make this easier", "nodashints", "=", "copy", ".", "deepcopy", "(", "superints", ")", "#.copy()", "nodashints", "[", "nodashints", "==", "45", "]", "=", "78", "## trim overhanging edges", "## get the number not Ns in each site,", "#ccx = np.sum(superseqs != \"N\", axis=1)", "ccx", "=", "np", ".", "sum", "(", "nodashints", "!=", "78", ",", "axis", "=", "1", ",", "dtype", "=", "np", ".", "uint16", ")", "efi", ",", "edg", "=", "edgetrim_numba", "(", "splits", ",", "ccx", ",", "edges", ",", "edgefilter", ",", "edgetrims", ",", "cuts", ",", "minedge", ")", "return", "efi", ",", "edg" ]
Gets edge trimming based on the overlap of sequences at the edges of alignments and the tuple arg passed in for edge_trimming. Trims as (R1 left, R1 right, R2 left, R2 right). We also trim off the restriction site if it present. This modifies superints, and so should be run on an engine so it doesn't affect local copy. If this is changed to run locally for some reason make sure we copy the superints instead.
[ "Gets", "edge", "trimming", "based", "on", "the", "overlap", "of", "sequences", "at", "the", "edges", "of", "alignments", "and", "the", "tuple", "arg", "passed", "in", "for", "edge_trimming", ".", "Trims", "as", "(", "R1", "left", "R1", "right", "R2", "left", "R2", "right", ")", ".", "We", "also", "trim", "off", "the", "restriction", "site", "if", "it", "present", ".", "This", "modifies", "superints", "and", "so", "should", "be", "run", "on", "an", "engine", "so", "it", "doesn", "t", "affect", "local", "copy", ".", "If", "this", "is", "changed", "to", "run", "locally", "for", "some", "reason", "make", "sure", "we", "copy", "the", "superints", "instead", "." ]
python
valid
44.480769
polyaxon/polyaxon
polyaxon/streams/consumers/consumers.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/streams/consumers/consumers.py#L280-L286
def open_channel(self): """Open a new channel with RabbitMQ by issuing the Channel.Open RPC command. When RabbitMQ responds that the channel is open, the on_channel_open callback will be invoked by pika. """ _logger.info('Creating a new channel') self._connection.channel(on_open_callback=self.on_channel_open)
[ "def", "open_channel", "(", "self", ")", ":", "_logger", ".", "info", "(", "'Creating a new channel'", ")", "self", ".", "_connection", ".", "channel", "(", "on_open_callback", "=", "self", ".", "on_channel_open", ")" ]
Open a new channel with RabbitMQ by issuing the Channel.Open RPC command. When RabbitMQ responds that the channel is open, the on_channel_open callback will be invoked by pika.
[ "Open", "a", "new", "channel", "with", "RabbitMQ", "by", "issuing", "the", "Channel", ".", "Open", "RPC", "command", ".", "When", "RabbitMQ", "responds", "that", "the", "channel", "is", "open", "the", "on_channel_open", "callback", "will", "be", "invoked", "by", "pika", "." ]
python
train
50.285714
fastai/fastai
fastai/gen_doc/gen_notebooks.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/gen_doc/gen_notebooks.py#L204-L212
def update_nb_metadata(nb_path=None, title=None, summary=None, keywords='fastai', overwrite=True, **kwargs): "Creates jekyll metadata for given notebook path." nb = read_nb(nb_path) data = {'title': title, 'summary': summary, 'keywords': keywords, **kwargs} data = {k:v for (k,v) in data.items() if v is not None} # remove none values if not data: return nb['metadata']['jekyll'] = data write_nb(nb, nb_path) NotebookNotary().sign(nb)
[ "def", "update_nb_metadata", "(", "nb_path", "=", "None", ",", "title", "=", "None", ",", "summary", "=", "None", ",", "keywords", "=", "'fastai'", ",", "overwrite", "=", "True", ",", "*", "*", "kwargs", ")", ":", "nb", "=", "read_nb", "(", "nb_path", ")", "data", "=", "{", "'title'", ":", "title", ",", "'summary'", ":", "summary", ",", "'keywords'", ":", "keywords", ",", "*", "*", "kwargs", "}", "data", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "data", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}", "# remove none values", "if", "not", "data", ":", "return", "nb", "[", "'metadata'", "]", "[", "'jekyll'", "]", "=", "data", "write_nb", "(", "nb", ",", "nb_path", ")", "NotebookNotary", "(", ")", ".", "sign", "(", "nb", ")" ]
Creates jekyll metadata for given notebook path.
[ "Creates", "jekyll", "metadata", "for", "given", "notebook", "path", "." ]
python
train
50.888889
edx/edx-search
search/api.py
https://github.com/edx/edx-search/blob/476cf02b71ceba34ae7d8b798f36d60692317c55/search/api.py#L80-L112
def course_discovery_search(search_term=None, size=20, from_=0, field_dictionary=None): """ Course Discovery activities against the search engine index of course details """ # We'll ignore the course-enrollemnt informaiton in field and filter # dictionary, and use our own logic upon enrollment dates for these use_search_fields = ["org"] (search_fields, _, exclude_dictionary) = SearchFilterGenerator.generate_field_filters() use_field_dictionary = {} use_field_dictionary.update({field: search_fields[field] for field in search_fields if field in use_search_fields}) if field_dictionary: use_field_dictionary.update(field_dictionary) if not getattr(settings, "SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING", False): use_field_dictionary["enrollment_start"] = DateRange(None, datetime.utcnow()) searcher = SearchEngine.get_search_engine(getattr(settings, "COURSEWARE_INDEX_NAME", "courseware_index")) if not searcher: raise NoSearchEngineError("No search engine specified in settings.SEARCH_ENGINE") results = searcher.search( query_string=search_term, doc_type="course_info", size=size, from_=from_, # only show when enrollment start IS provided and is before now field_dictionary=use_field_dictionary, # show if no enrollment end is provided and has not yet been reached filter_dictionary={"enrollment_end": DateRange(datetime.utcnow(), None)}, exclude_dictionary=exclude_dictionary, facet_terms=course_discovery_facets(), ) return results
[ "def", "course_discovery_search", "(", "search_term", "=", "None", ",", "size", "=", "20", ",", "from_", "=", "0", ",", "field_dictionary", "=", "None", ")", ":", "# We'll ignore the course-enrollemnt informaiton in field and filter", "# dictionary, and use our own logic upon enrollment dates for these", "use_search_fields", "=", "[", "\"org\"", "]", "(", "search_fields", ",", "_", ",", "exclude_dictionary", ")", "=", "SearchFilterGenerator", ".", "generate_field_filters", "(", ")", "use_field_dictionary", "=", "{", "}", "use_field_dictionary", ".", "update", "(", "{", "field", ":", "search_fields", "[", "field", "]", "for", "field", "in", "search_fields", "if", "field", "in", "use_search_fields", "}", ")", "if", "field_dictionary", ":", "use_field_dictionary", ".", "update", "(", "field_dictionary", ")", "if", "not", "getattr", "(", "settings", ",", "\"SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING\"", ",", "False", ")", ":", "use_field_dictionary", "[", "\"enrollment_start\"", "]", "=", "DateRange", "(", "None", ",", "datetime", ".", "utcnow", "(", ")", ")", "searcher", "=", "SearchEngine", ".", "get_search_engine", "(", "getattr", "(", "settings", ",", "\"COURSEWARE_INDEX_NAME\"", ",", "\"courseware_index\"", ")", ")", "if", "not", "searcher", ":", "raise", "NoSearchEngineError", "(", "\"No search engine specified in settings.SEARCH_ENGINE\"", ")", "results", "=", "searcher", ".", "search", "(", "query_string", "=", "search_term", ",", "doc_type", "=", "\"course_info\"", ",", "size", "=", "size", ",", "from_", "=", "from_", ",", "# only show when enrollment start IS provided and is before now", "field_dictionary", "=", "use_field_dictionary", ",", "# show if no enrollment end is provided and has not yet been reached", "filter_dictionary", "=", "{", "\"enrollment_end\"", ":", "DateRange", "(", "datetime", ".", "utcnow", "(", ")", ",", "None", ")", "}", ",", "exclude_dictionary", "=", "exclude_dictionary", ",", "facet_terms", "=", "course_discovery_facets", "(", ")", ",", ")", "return", "results" ]
Course Discovery activities against the search engine index of course details
[ "Course", "Discovery", "activities", "against", "the", "search", "engine", "index", "of", "course", "details" ]
python
valid
47.848485
Samreay/ChainConsumer
chainconsumer/chainconsumer.py
https://github.com/Samreay/ChainConsumer/blob/902288e4d85c2677a9051a2172e03128a6169ad7/chainconsumer/chainconsumer.py#L49-L234
def add_chain(self, chain, parameters=None, name=None, weights=None, posterior=None, walkers=None, grid=False, num_eff_data_points=None, num_free_params=None, color=None, linewidth=None, linestyle=None, kde=None, shade=None, shade_alpha=None, power=None, marker_style=None, marker_size=None, marker_alpha=None, plot_contour=None, plot_point=None, statistics=None, cloud=None, shade_gradient=None, bar_shade=None, bins=None, smooth=None, color_params=None, plot_color_params=None, cmap=None, num_cloud=None): """ Add a chain to the consumer. Parameters ---------- chain : str|ndarray|dict The chain to load. Normally a ``numpy.ndarray``. If a string is found, it interprets the string as a filename and attempts to load it in. If a ``dict`` is passed in, it assumes the dict has keys of parameter names and values of an array of samples. Notice that using a dictionary puts the order of parameters in the output under the control of the python ``dict.keys()`` function. If you passed ``grid`` is set, you can pass in the parameter ranges in list form. parameters : list[str], optional A list of parameter names, one for each column (dimension) in the chain. This parameter should remain ``None`` if a dictionary is given as ``chain``, as the parameter names are taken from the dictionary keys. name : str, optional The name of the chain. Used when plotting multiple chains at once. weights : ndarray, optional If given, uses this array to weight the samples in chain posterior : ndarray, optional If given, records the log posterior for each sample in the chain walkers : int, optional How many walkers went into creating the chain. Each walker should contribute the same number of steps, and should appear in contiguous blocks in the final chain. grid : boolean, optional Whether the input is a flattened chain from a grid search instead of a Monte-Carlo chains. Note that when this is set, `walkers` should not be set, and `weights` should be set to the posterior evaluation for the grid point. **Be careful** when using a coarse grid of setting a high smoothing value, as this may oversmooth the posterior surface and give unreasonably large parameter bounds. num_eff_data_points : int|float, optional The number of effective (independent) data points used in the model fitting. Not required for plotting, but required if loading in multiple chains to perform model comparison. num_free_params : int, optional The number of degrees of freedom in your model. Not required for plotting, but required if loading in multiple chains to perform model comparison. color : str(hex), optional Provide a colour for the chain. Can be used instead of calling `configure` for convenience. linewidth : float, optional Provide a line width to plot the contours. Can be used instead of calling `configure` for convenience. linestyle : str, optional Provide a line style to plot the contour. Can be used instead of calling `configure` for convenience. kde : bool|float, optional Set the `kde` value for this specific chain. Can be used instead of calling `configure` for convenience. shade : booloptional If set, overrides the default behaviour and plots filled contours or not. If a list of bools is passed, you can turn shading on or off for specific chains. shade_alpha : float, optional Filled contour alpha value. Can be used instead of calling `configure` for convenience. power : float, optional The power to raise the posterior surface to. Useful for inflating or deflating uncertainty for debugging. marker_style : str|, optional The marker style to use when plotting points. Defaults to `'.'` marker_size : numeric|, optional Size of markers, if plotted. Defaults to `4`. marker_alpha : numeric, optional The alpha values when plotting markers. plot_contour : bool, optional Whether to plot the whole contour (as opposed to a point). Defaults to true for less than 25 concurrent chains. plot_point : bool, optional Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains. statistics : string, optional Which sort of statistics to use. Defaults to `"max"` for maximum likelihood statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`, `"max_closest"` and `"max_central"`. In the very, very rare case you want to enable different statistics for different chains, you can pass in a list of strings. cloud : bool, optional If set, overrides the default behaviour and plots the cloud or not shade_gradient : bar_shade : bool, optional If set to true, shades in confidence regions in under histogram. By default this happens if you less than 3 chains, but is disabled if you are comparing more chains. You can pass a list if you wish to shade some chains but not others. bins : int|float, optional The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where :math:`n` are the number of data points. Giving an integer will set the number of bins to the given value. Giving a float will scale the number of bins, such that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins. Note this parameter is most useful if `kde=False` is also passed, so you can actually see the bins and not a KDE. smooth : color_params : str, optional The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain, it will respectively use the weights, log weights, or posterior, to colour the points. plot_color_params : bool, optional Whether or not the colour parameter should also be plotted as a posterior surface. cmaps : str, optional The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can specific a different cmap for each variable. By default ChainConsumer will cycle between several cmaps. num_cloud : int, optional The number of scatter points to show when enabling `cloud` or setting one of the parameters to colour scatter. Defaults to 15k per chain. Returns ------- ChainConsumer Itself, to allow chaining calls. """ is_dict = False assert chain is not None, "You cannot have a chain of None" if isinstance(chain, str): if chain.endswith("txt"): chain = np.loadtxt(chain) else: chain = np.load(chain) elif isinstance(chain, dict): assert parameters is None, \ "You cannot pass a dictionary and specify parameter names" is_dict = True parameters = list(chain.keys()) chain = np.array([chain[p] for p in parameters]).T elif isinstance(chain, list): chain = np.array(chain).T if grid: assert walkers is None, "If grid is set, walkers should not be" assert weights is not None, "If grid is set, you need to supply weights" if len(weights.shape) > 1: assert not is_dict, "We cannot construct a meshgrid from a dictionary, as the parameters" \ "are no longer ordered. Please pass in a flattened array instead." self._logger.info("Constructing meshgrid for grid results") meshes = np.meshgrid(*[u for u in chain.T], indexing="ij") chain = np.vstack([m.flatten() for m in meshes]).T weights = weights.flatten() assert weights.size == chain[:, 0].size, "Error, given weight array size disagrees with parameter sampling" if len(chain.shape) == 1: chain = chain[None].T if name is None: name = "Chain %d" % len(self.chains) if power is not None: assert isinstance(power, int) or isinstance(power, float), "Power should be numeric, but is %s" % type( power) if self._default_parameters is None and parameters is not None: self._default_parameters = parameters if parameters is None: if self._default_parameters is not None: assert chain.shape[1] == len(self._default_parameters), \ "Chain has %d dimensions, but default parameters have %d dimensions" \ % (chain.shape[1], len(self._default_parameters)) parameters = self._default_parameters self._logger.debug("Adding chain using default parameters") else: self._logger.debug("Adding chain with no parameter names") parameters = ["%d" % x for x in range(chain.shape[1])] else: self._logger.debug("Adding chain with defined parameters") assert len(parameters) <= chain.shape[1], \ "Have only %d columns in chain, but have been given %d parameters names! " \ "Please double check this." % (chain.shape[1], len(parameters)) for p in parameters: if p not in self._all_parameters: self._all_parameters.append(p) # Sorry, no KDE for you on a grid. if grid: kde = None if color is not None: color = self.color_finder.get_formatted([color])[0] c = Chain(chain, parameters, name, weights=weights, posterior=posterior, walkers=walkers, grid=grid, num_free_params=num_free_params, num_eff_data_points=num_eff_data_points, color=color, linewidth=linewidth, linestyle=linestyle, kde=kde, shade_alpha=shade_alpha, power=power, marker_style=marker_style, marker_size=marker_size, marker_alpha=marker_alpha, plot_contour=plot_contour, plot_point=plot_point, statistics=statistics, cloud=cloud, shade=shade, shade_gradient=shade_gradient, bar_shade=bar_shade, bins=bins, smooth=smooth, color_params=color_params, plot_color_params=plot_color_params, cmap=cmap, num_cloud=num_cloud) self.chains.append(c) self._init_params() return self
[ "def", "add_chain", "(", "self", ",", "chain", ",", "parameters", "=", "None", ",", "name", "=", "None", ",", "weights", "=", "None", ",", "posterior", "=", "None", ",", "walkers", "=", "None", ",", "grid", "=", "False", ",", "num_eff_data_points", "=", "None", ",", "num_free_params", "=", "None", ",", "color", "=", "None", ",", "linewidth", "=", "None", ",", "linestyle", "=", "None", ",", "kde", "=", "None", ",", "shade", "=", "None", ",", "shade_alpha", "=", "None", ",", "power", "=", "None", ",", "marker_style", "=", "None", ",", "marker_size", "=", "None", ",", "marker_alpha", "=", "None", ",", "plot_contour", "=", "None", ",", "plot_point", "=", "None", ",", "statistics", "=", "None", ",", "cloud", "=", "None", ",", "shade_gradient", "=", "None", ",", "bar_shade", "=", "None", ",", "bins", "=", "None", ",", "smooth", "=", "None", ",", "color_params", "=", "None", ",", "plot_color_params", "=", "None", ",", "cmap", "=", "None", ",", "num_cloud", "=", "None", ")", ":", "is_dict", "=", "False", "assert", "chain", "is", "not", "None", ",", "\"You cannot have a chain of None\"", "if", "isinstance", "(", "chain", ",", "str", ")", ":", "if", "chain", ".", "endswith", "(", "\"txt\"", ")", ":", "chain", "=", "np", ".", "loadtxt", "(", "chain", ")", "else", ":", "chain", "=", "np", ".", "load", "(", "chain", ")", "elif", "isinstance", "(", "chain", ",", "dict", ")", ":", "assert", "parameters", "is", "None", ",", "\"You cannot pass a dictionary and specify parameter names\"", "is_dict", "=", "True", "parameters", "=", "list", "(", "chain", ".", "keys", "(", ")", ")", "chain", "=", "np", ".", "array", "(", "[", "chain", "[", "p", "]", "for", "p", "in", "parameters", "]", ")", ".", "T", "elif", "isinstance", "(", "chain", ",", "list", ")", ":", "chain", "=", "np", ".", "array", "(", "chain", ")", ".", "T", "if", "grid", ":", "assert", "walkers", "is", "None", ",", "\"If grid is set, walkers should not be\"", "assert", "weights", "is", "not", "None", ",", "\"If grid is set, you need to supply weights\"", "if", "len", "(", "weights", ".", "shape", ")", ">", "1", ":", "assert", "not", "is_dict", ",", "\"We cannot construct a meshgrid from a dictionary, as the parameters\"", "\"are no longer ordered. Please pass in a flattened array instead.\"", "self", ".", "_logger", ".", "info", "(", "\"Constructing meshgrid for grid results\"", ")", "meshes", "=", "np", ".", "meshgrid", "(", "*", "[", "u", "for", "u", "in", "chain", ".", "T", "]", ",", "indexing", "=", "\"ij\"", ")", "chain", "=", "np", ".", "vstack", "(", "[", "m", ".", "flatten", "(", ")", "for", "m", "in", "meshes", "]", ")", ".", "T", "weights", "=", "weights", ".", "flatten", "(", ")", "assert", "weights", ".", "size", "==", "chain", "[", ":", ",", "0", "]", ".", "size", ",", "\"Error, given weight array size disagrees with parameter sampling\"", "if", "len", "(", "chain", ".", "shape", ")", "==", "1", ":", "chain", "=", "chain", "[", "None", "]", ".", "T", "if", "name", "is", "None", ":", "name", "=", "\"Chain %d\"", "%", "len", "(", "self", ".", "chains", ")", "if", "power", "is", "not", "None", ":", "assert", "isinstance", "(", "power", ",", "int", ")", "or", "isinstance", "(", "power", ",", "float", ")", ",", "\"Power should be numeric, but is %s\"", "%", "type", "(", "power", ")", "if", "self", ".", "_default_parameters", "is", "None", "and", "parameters", "is", "not", "None", ":", "self", ".", "_default_parameters", "=", "parameters", "if", "parameters", "is", "None", ":", "if", "self", ".", "_default_parameters", "is", "not", "None", ":", "assert", "chain", ".", "shape", "[", "1", "]", "==", "len", "(", "self", ".", "_default_parameters", ")", ",", "\"Chain has %d dimensions, but default parameters have %d dimensions\"", "%", "(", "chain", ".", "shape", "[", "1", "]", ",", "len", "(", "self", ".", "_default_parameters", ")", ")", "parameters", "=", "self", ".", "_default_parameters", "self", ".", "_logger", ".", "debug", "(", "\"Adding chain using default parameters\"", ")", "else", ":", "self", ".", "_logger", ".", "debug", "(", "\"Adding chain with no parameter names\"", ")", "parameters", "=", "[", "\"%d\"", "%", "x", "for", "x", "in", "range", "(", "chain", ".", "shape", "[", "1", "]", ")", "]", "else", ":", "self", ".", "_logger", ".", "debug", "(", "\"Adding chain with defined parameters\"", ")", "assert", "len", "(", "parameters", ")", "<=", "chain", ".", "shape", "[", "1", "]", ",", "\"Have only %d columns in chain, but have been given %d parameters names! \"", "\"Please double check this.\"", "%", "(", "chain", ".", "shape", "[", "1", "]", ",", "len", "(", "parameters", ")", ")", "for", "p", "in", "parameters", ":", "if", "p", "not", "in", "self", ".", "_all_parameters", ":", "self", ".", "_all_parameters", ".", "append", "(", "p", ")", "# Sorry, no KDE for you on a grid.", "if", "grid", ":", "kde", "=", "None", "if", "color", "is", "not", "None", ":", "color", "=", "self", ".", "color_finder", ".", "get_formatted", "(", "[", "color", "]", ")", "[", "0", "]", "c", "=", "Chain", "(", "chain", ",", "parameters", ",", "name", ",", "weights", "=", "weights", ",", "posterior", "=", "posterior", ",", "walkers", "=", "walkers", ",", "grid", "=", "grid", ",", "num_free_params", "=", "num_free_params", ",", "num_eff_data_points", "=", "num_eff_data_points", ",", "color", "=", "color", ",", "linewidth", "=", "linewidth", ",", "linestyle", "=", "linestyle", ",", "kde", "=", "kde", ",", "shade_alpha", "=", "shade_alpha", ",", "power", "=", "power", ",", "marker_style", "=", "marker_style", ",", "marker_size", "=", "marker_size", ",", "marker_alpha", "=", "marker_alpha", ",", "plot_contour", "=", "plot_contour", ",", "plot_point", "=", "plot_point", ",", "statistics", "=", "statistics", ",", "cloud", "=", "cloud", ",", "shade", "=", "shade", ",", "shade_gradient", "=", "shade_gradient", ",", "bar_shade", "=", "bar_shade", ",", "bins", "=", "bins", ",", "smooth", "=", "smooth", ",", "color_params", "=", "color_params", ",", "plot_color_params", "=", "plot_color_params", ",", "cmap", "=", "cmap", ",", "num_cloud", "=", "num_cloud", ")", "self", ".", "chains", ".", "append", "(", "c", ")", "self", ".", "_init_params", "(", ")", "return", "self" ]
Add a chain to the consumer. Parameters ---------- chain : str|ndarray|dict The chain to load. Normally a ``numpy.ndarray``. If a string is found, it interprets the string as a filename and attempts to load it in. If a ``dict`` is passed in, it assumes the dict has keys of parameter names and values of an array of samples. Notice that using a dictionary puts the order of parameters in the output under the control of the python ``dict.keys()`` function. If you passed ``grid`` is set, you can pass in the parameter ranges in list form. parameters : list[str], optional A list of parameter names, one for each column (dimension) in the chain. This parameter should remain ``None`` if a dictionary is given as ``chain``, as the parameter names are taken from the dictionary keys. name : str, optional The name of the chain. Used when plotting multiple chains at once. weights : ndarray, optional If given, uses this array to weight the samples in chain posterior : ndarray, optional If given, records the log posterior for each sample in the chain walkers : int, optional How many walkers went into creating the chain. Each walker should contribute the same number of steps, and should appear in contiguous blocks in the final chain. grid : boolean, optional Whether the input is a flattened chain from a grid search instead of a Monte-Carlo chains. Note that when this is set, `walkers` should not be set, and `weights` should be set to the posterior evaluation for the grid point. **Be careful** when using a coarse grid of setting a high smoothing value, as this may oversmooth the posterior surface and give unreasonably large parameter bounds. num_eff_data_points : int|float, optional The number of effective (independent) data points used in the model fitting. Not required for plotting, but required if loading in multiple chains to perform model comparison. num_free_params : int, optional The number of degrees of freedom in your model. Not required for plotting, but required if loading in multiple chains to perform model comparison. color : str(hex), optional Provide a colour for the chain. Can be used instead of calling `configure` for convenience. linewidth : float, optional Provide a line width to plot the contours. Can be used instead of calling `configure` for convenience. linestyle : str, optional Provide a line style to plot the contour. Can be used instead of calling `configure` for convenience. kde : bool|float, optional Set the `kde` value for this specific chain. Can be used instead of calling `configure` for convenience. shade : booloptional If set, overrides the default behaviour and plots filled contours or not. If a list of bools is passed, you can turn shading on or off for specific chains. shade_alpha : float, optional Filled contour alpha value. Can be used instead of calling `configure` for convenience. power : float, optional The power to raise the posterior surface to. Useful for inflating or deflating uncertainty for debugging. marker_style : str|, optional The marker style to use when plotting points. Defaults to `'.'` marker_size : numeric|, optional Size of markers, if plotted. Defaults to `4`. marker_alpha : numeric, optional The alpha values when plotting markers. plot_contour : bool, optional Whether to plot the whole contour (as opposed to a point). Defaults to true for less than 25 concurrent chains. plot_point : bool, optional Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains. statistics : string, optional Which sort of statistics to use. Defaults to `"max"` for maximum likelihood statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`, `"max_closest"` and `"max_central"`. In the very, very rare case you want to enable different statistics for different chains, you can pass in a list of strings. cloud : bool, optional If set, overrides the default behaviour and plots the cloud or not shade_gradient : bar_shade : bool, optional If set to true, shades in confidence regions in under histogram. By default this happens if you less than 3 chains, but is disabled if you are comparing more chains. You can pass a list if you wish to shade some chains but not others. bins : int|float, optional The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where :math:`n` are the number of data points. Giving an integer will set the number of bins to the given value. Giving a float will scale the number of bins, such that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins. Note this parameter is most useful if `kde=False` is also passed, so you can actually see the bins and not a KDE. smooth : color_params : str, optional The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain, it will respectively use the weights, log weights, or posterior, to colour the points. plot_color_params : bool, optional Whether or not the colour parameter should also be plotted as a posterior surface. cmaps : str, optional The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can specific a different cmap for each variable. By default ChainConsumer will cycle between several cmaps. num_cloud : int, optional The number of scatter points to show when enabling `cloud` or setting one of the parameters to colour scatter. Defaults to 15k per chain. Returns ------- ChainConsumer Itself, to allow chaining calls.
[ "Add", "a", "chain", "to", "the", "consumer", "." ]
python
train
59.478495
xeroc/python-graphenelib
graphenebase/operations.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenebase/operations.py#L88-L109
def detail(self, *args, **kwargs): prefix = kwargs.pop("prefix", default_prefix) # remove dublicates kwargs["votes"] = list(set(kwargs["votes"])) """ This is an example how to sort votes prior to using them in the Object """ # # Sort votes # kwargs["votes"] = sorted( # kwargs["votes"], # key=lambda x: float(x.split(":")[1]), # ) return OrderedDict( [ ("memo_key", PublicKey(kwargs["memo_key"], prefix=prefix)), ("voting_account", ObjectId(kwargs["voting_account"], "account")), ("num_witness", Uint16(kwargs["num_witness"])), ("num_committee", Uint16(kwargs["num_committee"])), ("votes", Array([VoteId(o) for o in kwargs["votes"]])), ("extensions", Set([])), ] )
[ "def", "detail", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "prefix", "=", "kwargs", ".", "pop", "(", "\"prefix\"", ",", "default_prefix", ")", "# remove dublicates", "kwargs", "[", "\"votes\"", "]", "=", "list", "(", "set", "(", "kwargs", "[", "\"votes\"", "]", ")", ")", "# # Sort votes", "# kwargs[\"votes\"] = sorted(", "# kwargs[\"votes\"],", "# key=lambda x: float(x.split(\":\")[1]),", "# )", "return", "OrderedDict", "(", "[", "(", "\"memo_key\"", ",", "PublicKey", "(", "kwargs", "[", "\"memo_key\"", "]", ",", "prefix", "=", "prefix", ")", ")", ",", "(", "\"voting_account\"", ",", "ObjectId", "(", "kwargs", "[", "\"voting_account\"", "]", ",", "\"account\"", ")", ")", ",", "(", "\"num_witness\"", ",", "Uint16", "(", "kwargs", "[", "\"num_witness\"", "]", ")", ")", ",", "(", "\"num_committee\"", ",", "Uint16", "(", "kwargs", "[", "\"num_committee\"", "]", ")", ")", ",", "(", "\"votes\"", ",", "Array", "(", "[", "VoteId", "(", "o", ")", "for", "o", "in", "kwargs", "[", "\"votes\"", "]", "]", ")", ")", ",", "(", "\"extensions\"", ",", "Set", "(", "[", "]", ")", ")", ",", "]", ")" ]
This is an example how to sort votes prior to using them in the Object
[ "This", "is", "an", "example", "how", "to", "sort", "votes", "prior", "to", "using", "them", "in", "the", "Object" ]
python
valid
39.954545
mozilla-services/pyramid_multiauth
pyramid_multiauth/__init__.py
https://github.com/mozilla-services/pyramid_multiauth/blob/9548aa55f726920a666791d7c89ac2b9779d2bc1/pyramid_multiauth/__init__.py#L293-L336
def policy_factory_from_module(config, module): """Create a policy factory that works by config.include()'ing a module. This function does some trickery with the Pyramid config system. Loosely, it does config.include(module), and then sucks out information about the authn policy that was registered. It's complicated by pyramid's delayed- commit system, which means we have to do the work via callbacks. """ # Remember the policy that's active before including the module, if any. orig_policy = config.registry.queryUtility(IAuthenticationPolicy) # Include the module, so we get any default views etc. config.include(module) # That might have registered and commited a new policy object. policy = config.registry.queryUtility(IAuthenticationPolicy) if policy is not None and policy is not orig_policy: return lambda: policy # Or it might have set up a pending action to register one later. # Find the most recent IAuthenticationPolicy action, and grab # out the registering function so we can call it ourselves. for action in reversed(config.action_state.actions): # Extract the discriminator and callable. This is complicated by # Pyramid 1.3 changing action from a tuple to a dict. try: discriminator = action["discriminator"] callable = action["callable"] except TypeError: # pragma: nocover discriminator = action[0] # pragma: nocover callable = action[1] # pragma: nocover # If it's not setting the authn policy, keep looking. if discriminator is not IAuthenticationPolicy: continue # Otherwise, wrap it up so we can extract the registered object. def grab_policy(register=callable): old_policy = config.registry.queryUtility(IAuthenticationPolicy) register() new_policy = config.registry.queryUtility(IAuthenticationPolicy) config.registry.registerUtility(old_policy, IAuthenticationPolicy) return new_policy return grab_policy # Or it might not have done *anything*. # So return a null policy factory. return lambda: None
[ "def", "policy_factory_from_module", "(", "config", ",", "module", ")", ":", "# Remember the policy that's active before including the module, if any.", "orig_policy", "=", "config", ".", "registry", ".", "queryUtility", "(", "IAuthenticationPolicy", ")", "# Include the module, so we get any default views etc.", "config", ".", "include", "(", "module", ")", "# That might have registered and commited a new policy object.", "policy", "=", "config", ".", "registry", ".", "queryUtility", "(", "IAuthenticationPolicy", ")", "if", "policy", "is", "not", "None", "and", "policy", "is", "not", "orig_policy", ":", "return", "lambda", ":", "policy", "# Or it might have set up a pending action to register one later.", "# Find the most recent IAuthenticationPolicy action, and grab", "# out the registering function so we can call it ourselves.", "for", "action", "in", "reversed", "(", "config", ".", "action_state", ".", "actions", ")", ":", "# Extract the discriminator and callable. This is complicated by", "# Pyramid 1.3 changing action from a tuple to a dict.", "try", ":", "discriminator", "=", "action", "[", "\"discriminator\"", "]", "callable", "=", "action", "[", "\"callable\"", "]", "except", "TypeError", ":", "# pragma: nocover", "discriminator", "=", "action", "[", "0", "]", "# pragma: nocover", "callable", "=", "action", "[", "1", "]", "# pragma: nocover", "# If it's not setting the authn policy, keep looking.", "if", "discriminator", "is", "not", "IAuthenticationPolicy", ":", "continue", "# Otherwise, wrap it up so we can extract the registered object.", "def", "grab_policy", "(", "register", "=", "callable", ")", ":", "old_policy", "=", "config", ".", "registry", ".", "queryUtility", "(", "IAuthenticationPolicy", ")", "register", "(", ")", "new_policy", "=", "config", ".", "registry", ".", "queryUtility", "(", "IAuthenticationPolicy", ")", "config", ".", "registry", ".", "registerUtility", "(", "old_policy", ",", "IAuthenticationPolicy", ")", "return", "new_policy", "return", "grab_policy", "# Or it might not have done *anything*.", "# So return a null policy factory.", "return", "lambda", ":", "None" ]
Create a policy factory that works by config.include()'ing a module. This function does some trickery with the Pyramid config system. Loosely, it does config.include(module), and then sucks out information about the authn policy that was registered. It's complicated by pyramid's delayed- commit system, which means we have to do the work via callbacks.
[ "Create", "a", "policy", "factory", "that", "works", "by", "config", ".", "include", "()", "ing", "a", "module", "." ]
python
train
49.795455
cnobile2012/pololu-motors
pololu/motors/crc7.py
https://github.com/cnobile2012/pololu-motors/blob/453d2283a63cfe15cda96cad6dffa73372d52a7c/pololu/motors/crc7.py#L26-L35
def crc7(data): """ Compute CRC of a whole message. """ crc = 0 for c in data: crc = CRC7_TABLE[crc ^ c] return crc
[ "def", "crc7", "(", "data", ")", ":", "crc", "=", "0", "for", "c", "in", "data", ":", "crc", "=", "CRC7_TABLE", "[", "crc", "^", "c", "]", "return", "crc" ]
Compute CRC of a whole message.
[ "Compute", "CRC", "of", "a", "whole", "message", "." ]
python
train
14
acutesoftware/AIKIF
aikif/agents/agent.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/agent.py#L104-L112
def set_coords(self, x=0, y=0, z=0, t=0): """ set coords of agent in an arbitrary world """ self.coords = {} self.coords['x'] = x self.coords['y'] = y self.coords['z'] = z self.coords['t'] = t
[ "def", "set_coords", "(", "self", ",", "x", "=", "0", ",", "y", "=", "0", ",", "z", "=", "0", ",", "t", "=", "0", ")", ":", "self", ".", "coords", "=", "{", "}", "self", ".", "coords", "[", "'x'", "]", "=", "x", "self", ".", "coords", "[", "'y'", "]", "=", "y", "self", ".", "coords", "[", "'z'", "]", "=", "z", "self", ".", "coords", "[", "'t'", "]", "=", "t" ]
set coords of agent in an arbitrary world
[ "set", "coords", "of", "agent", "in", "an", "arbitrary", "world" ]
python
train
27.555556
KrishnaswamyLab/graphtools
graphtools/graphs.py
https://github.com/KrishnaswamyLab/graphtools/blob/44685352be7df2005d44722903092207967457f2/graphtools/graphs.py#L673-L700
def interpolate(self, transform, transitions=None, Y=None): """Interpolate new data onto a transformation of the graph data One of either transitions or Y should be provided Parameters ---------- transform : array-like, shape=[n_samples, n_transform_features] transitions : array-like, optional, shape=[n_samples_y, n_samples] Transition matrix from `Y` (not provided) to `self.data` Y: array-like, optional, shape=[n_samples_y, n_features] new data for which an affinity matrix is calculated to the existing data. `n_features` must match either the ambient or PCA dimensions Returns ------- Y_transform : array-like, [n_samples_y, n_features or n_pca] Transition matrix from `Y` to `self.data` """ if transitions is None and Y is None: # assume Y is self.data and use standard landmark transitions transitions = self.transitions return super().interpolate(transform, transitions=transitions, Y=Y)
[ "def", "interpolate", "(", "self", ",", "transform", ",", "transitions", "=", "None", ",", "Y", "=", "None", ")", ":", "if", "transitions", "is", "None", "and", "Y", "is", "None", ":", "# assume Y is self.data and use standard landmark transitions", "transitions", "=", "self", ".", "transitions", "return", "super", "(", ")", ".", "interpolate", "(", "transform", ",", "transitions", "=", "transitions", ",", "Y", "=", "Y", ")" ]
Interpolate new data onto a transformation of the graph data One of either transitions or Y should be provided Parameters ---------- transform : array-like, shape=[n_samples, n_transform_features] transitions : array-like, optional, shape=[n_samples_y, n_samples] Transition matrix from `Y` (not provided) to `self.data` Y: array-like, optional, shape=[n_samples_y, n_features] new data for which an affinity matrix is calculated to the existing data. `n_features` must match either the ambient or PCA dimensions Returns ------- Y_transform : array-like, [n_samples_y, n_features or n_pca] Transition matrix from `Y` to `self.data`
[ "Interpolate", "new", "data", "onto", "a", "transformation", "of", "the", "graph", "data" ]
python
train
38.035714
fy0/slim
slim/base/sqlfuncs.py
https://github.com/fy0/slim/blob/9951a910750888dbe7dd3e98acae9c40efae0689/slim/base/sqlfuncs.py#L24-L33
async def select_page(self, info: SQLQueryInfo, size=1, page=1) -> Tuple[Tuple[DataRecord, ...], int]: """ Select from database :param info: :param size: -1 means infinite :param page: :param need_count: if True, get count as second return value, otherwise -1 :return: records. count """ raise NotImplementedError()
[ "async", "def", "select_page", "(", "self", ",", "info", ":", "SQLQueryInfo", ",", "size", "=", "1", ",", "page", "=", "1", ")", "->", "Tuple", "[", "Tuple", "[", "DataRecord", ",", "...", "]", ",", "int", "]", ":", "raise", "NotImplementedError", "(", ")" ]
Select from database :param info: :param size: -1 means infinite :param page: :param need_count: if True, get count as second return value, otherwise -1 :return: records. count
[ "Select", "from", "database", ":", "param", "info", ":", ":", "param", "size", ":", "-", "1", "means", "infinite", ":", "param", "page", ":", ":", "param", "need_count", ":", "if", "True", "get", "count", "as", "second", "return", "value", "otherwise", "-", "1", ":", "return", ":", "records", ".", "count" ]
python
valid
37.8
pantsbuild/pants
src/python/pants/goal/workspace.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/goal/workspace.py#L46-L55
def touched_files(self, parent): """ :API: public """ try: return self._scm.changed_files(from_commit=parent, include_untracked=True, relative_to=get_buildroot()) except Scm.ScmException as e: raise self.WorkspaceError("Problem detecting changed files.", e)
[ "def", "touched_files", "(", "self", ",", "parent", ")", ":", "try", ":", "return", "self", ".", "_scm", ".", "changed_files", "(", "from_commit", "=", "parent", ",", "include_untracked", "=", "True", ",", "relative_to", "=", "get_buildroot", "(", ")", ")", "except", "Scm", ".", "ScmException", "as", "e", ":", "raise", "self", ".", "WorkspaceError", "(", "\"Problem detecting changed files.\"", ",", "e", ")" ]
:API: public
[ ":", "API", ":", "public" ]
python
train
35.4
angr/angr
angr/calling_conventions.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/calling_conventions.py#L485-L520
def get_args(self, state, is_fp=None, sizes=None, stack_base=None): """ `is_fp` should be a list of booleans specifying whether each corresponding argument is floating-point - True for fp and False for int. For a shorthand to assume that all the parameters are int, pass the number of parameters as an int. If you've customized this CC, you may omit this parameter entirely. If it is provided, it is used for sanity-checking. `sizes` is an optional list of argument sizes, in bytes. Be careful about using this if you've made explicit the arg locations, since it might decide to combine two locations into one if an arg is too big. `stack_base` is an optional pointer to the top of the stack at the function start. If it is not specified, use the current stack pointer. Returns a list of bitvector expressions representing the arguments of a function. """ if sizes is None and self.func_ty is not None: sizes = [arg.size for arg in self.func_ty.args] if is_fp is None: if self.args is None: if self.func_ty is None: raise ValueError("You must either customize this CC or pass a value to is_fp!") else: arg_locs = self.arg_locs([False]*len(self.func_ty.args)) else: arg_locs = self.args elif type(is_fp) is int: if self.args is not None and len(self.args) != is_fp: raise ValueError("Bad number of args requested: got %d, expected %d" % (is_fp, len(self.args))) arg_locs = self.arg_locs([False]*is_fp, sizes) else: arg_locs = self.arg_locs(is_fp, sizes) return [loc.get_value(state, stack_base=stack_base) for loc in arg_locs]
[ "def", "get_args", "(", "self", ",", "state", ",", "is_fp", "=", "None", ",", "sizes", "=", "None", ",", "stack_base", "=", "None", ")", ":", "if", "sizes", "is", "None", "and", "self", ".", "func_ty", "is", "not", "None", ":", "sizes", "=", "[", "arg", ".", "size", "for", "arg", "in", "self", ".", "func_ty", ".", "args", "]", "if", "is_fp", "is", "None", ":", "if", "self", ".", "args", "is", "None", ":", "if", "self", ".", "func_ty", "is", "None", ":", "raise", "ValueError", "(", "\"You must either customize this CC or pass a value to is_fp!\"", ")", "else", ":", "arg_locs", "=", "self", ".", "arg_locs", "(", "[", "False", "]", "*", "len", "(", "self", ".", "func_ty", ".", "args", ")", ")", "else", ":", "arg_locs", "=", "self", ".", "args", "elif", "type", "(", "is_fp", ")", "is", "int", ":", "if", "self", ".", "args", "is", "not", "None", "and", "len", "(", "self", ".", "args", ")", "!=", "is_fp", ":", "raise", "ValueError", "(", "\"Bad number of args requested: got %d, expected %d\"", "%", "(", "is_fp", ",", "len", "(", "self", ".", "args", ")", ")", ")", "arg_locs", "=", "self", ".", "arg_locs", "(", "[", "False", "]", "*", "is_fp", ",", "sizes", ")", "else", ":", "arg_locs", "=", "self", ".", "arg_locs", "(", "is_fp", ",", "sizes", ")", "return", "[", "loc", ".", "get_value", "(", "state", ",", "stack_base", "=", "stack_base", ")", "for", "loc", "in", "arg_locs", "]" ]
`is_fp` should be a list of booleans specifying whether each corresponding argument is floating-point - True for fp and False for int. For a shorthand to assume that all the parameters are int, pass the number of parameters as an int. If you've customized this CC, you may omit this parameter entirely. If it is provided, it is used for sanity-checking. `sizes` is an optional list of argument sizes, in bytes. Be careful about using this if you've made explicit the arg locations, since it might decide to combine two locations into one if an arg is too big. `stack_base` is an optional pointer to the top of the stack at the function start. If it is not specified, use the current stack pointer. Returns a list of bitvector expressions representing the arguments of a function.
[ "is_fp", "should", "be", "a", "list", "of", "booleans", "specifying", "whether", "each", "corresponding", "argument", "is", "floating", "-", "point", "-", "True", "for", "fp", "and", "False", "for", "int", ".", "For", "a", "shorthand", "to", "assume", "that", "all", "the", "parameters", "are", "int", "pass", "the", "number", "of", "parameters", "as", "an", "int", "." ]
python
train
50.222222
ljcooke/see
see/output.py
https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/output.py#L151-L172
def display_name(name, obj, local): """ Get the display name of an object. Keyword arguments (all required): * ``name`` -- the name of the object as a string. * ``obj`` -- the object itself. * ``local`` -- a boolean value indicating whether the object is in local scope or owned by an object. """ prefix = '' if local else '.' if isinstance(obj, SeeError): suffix = '?' elif hasattr(obj, '__call__'): suffix = '()' else: suffix = '' return ''.join((prefix, name, suffix))
[ "def", "display_name", "(", "name", ",", "obj", ",", "local", ")", ":", "prefix", "=", "''", "if", "local", "else", "'.'", "if", "isinstance", "(", "obj", ",", "SeeError", ")", ":", "suffix", "=", "'?'", "elif", "hasattr", "(", "obj", ",", "'__call__'", ")", ":", "suffix", "=", "'()'", "else", ":", "suffix", "=", "''", "return", "''", ".", "join", "(", "(", "prefix", ",", "name", ",", "suffix", ")", ")" ]
Get the display name of an object. Keyword arguments (all required): * ``name`` -- the name of the object as a string. * ``obj`` -- the object itself. * ``local`` -- a boolean value indicating whether the object is in local scope or owned by an object.
[ "Get", "the", "display", "name", "of", "an", "object", "." ]
python
train
24.227273
mojaie/chorus
chorus/v2000reader.py
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L192-L253
def mol_supplier(lines, no_halt, assign_descriptors): """Yields molecules generated from CTAB text Args: lines (iterable): CTAB text lines no_halt (boolean): True: shows warning messages for invalid format and go on. False: throws an exception for it and stop parsing. assign_descriptors (boolean): if True, default descriptors are automatically assigned. """ def sdf_block(lns): mol = [] opt = [] is_mol = True for line in lns: if line.startswith("$$$$"): yield mol[:], opt[:] is_mol = True mol.clear() opt.clear() elif line.startswith("M END"): is_mol = False elif is_mol: mol.append(line.rstrip()) else: opt.append(line.rstrip()) if mol: yield mol, opt for i, (mol, opt) in enumerate(sdf_block(lines)): try: c = molecule(mol) if assign_descriptors: molutil.assign_descriptors(c) except ValueError as err: if no_halt: print("Unsupported symbol: {} (#{} in v2000reader)".format( err, i + 1)) c = molutil.null_molecule(assign_descriptors) else: raise ValueError("Unsupported symbol: {}".format(err)) except RuntimeError as err: if no_halt: print( "Failed to minimize ring: {} (#{} in v2000reader)".format( err, i + 1) ) else: raise RuntimeError("Failed to minimize ring: {}".format(err)) except: if no_halt: print("Unexpected error (#{} in v2000reader)".format(i + 1)) c = molutil.null_molecule(assign_descriptors) c.data = optional_data(opt) yield c continue else: print(traceback.format_exc()) raise Exception("Unsupported Error") c.data = optional_data(opt) yield c
[ "def", "mol_supplier", "(", "lines", ",", "no_halt", ",", "assign_descriptors", ")", ":", "def", "sdf_block", "(", "lns", ")", ":", "mol", "=", "[", "]", "opt", "=", "[", "]", "is_mol", "=", "True", "for", "line", "in", "lns", ":", "if", "line", ".", "startswith", "(", "\"$$$$\"", ")", ":", "yield", "mol", "[", ":", "]", ",", "opt", "[", ":", "]", "is_mol", "=", "True", "mol", ".", "clear", "(", ")", "opt", ".", "clear", "(", ")", "elif", "line", ".", "startswith", "(", "\"M END\"", ")", ":", "is_mol", "=", "False", "elif", "is_mol", ":", "mol", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "else", ":", "opt", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "if", "mol", ":", "yield", "mol", ",", "opt", "for", "i", ",", "(", "mol", ",", "opt", ")", "in", "enumerate", "(", "sdf_block", "(", "lines", ")", ")", ":", "try", ":", "c", "=", "molecule", "(", "mol", ")", "if", "assign_descriptors", ":", "molutil", ".", "assign_descriptors", "(", "c", ")", "except", "ValueError", "as", "err", ":", "if", "no_halt", ":", "print", "(", "\"Unsupported symbol: {} (#{} in v2000reader)\"", ".", "format", "(", "err", ",", "i", "+", "1", ")", ")", "c", "=", "molutil", ".", "null_molecule", "(", "assign_descriptors", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported symbol: {}\"", ".", "format", "(", "err", ")", ")", "except", "RuntimeError", "as", "err", ":", "if", "no_halt", ":", "print", "(", "\"Failed to minimize ring: {} (#{} in v2000reader)\"", ".", "format", "(", "err", ",", "i", "+", "1", ")", ")", "else", ":", "raise", "RuntimeError", "(", "\"Failed to minimize ring: {}\"", ".", "format", "(", "err", ")", ")", "except", ":", "if", "no_halt", ":", "print", "(", "\"Unexpected error (#{} in v2000reader)\"", ".", "format", "(", "i", "+", "1", ")", ")", "c", "=", "molutil", ".", "null_molecule", "(", "assign_descriptors", ")", "c", ".", "data", "=", "optional_data", "(", "opt", ")", "yield", "c", "continue", "else", ":", "print", "(", "traceback", ".", "format_exc", "(", ")", ")", "raise", "Exception", "(", "\"Unsupported Error\"", ")", "c", ".", "data", "=", "optional_data", "(", "opt", ")", "yield", "c" ]
Yields molecules generated from CTAB text Args: lines (iterable): CTAB text lines no_halt (boolean): True: shows warning messages for invalid format and go on. False: throws an exception for it and stop parsing. assign_descriptors (boolean): if True, default descriptors are automatically assigned.
[ "Yields", "molecules", "generated", "from", "CTAB", "text" ]
python
train
34.451613
CalebBell/fluids
fluids/geometry.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/geometry.py#L224-L269
def V_horiz_ellipsoidal(D, L, a, h, headonly=False): r'''Calculates volume of a tank with ellipsoidal ends, according to [1]_. .. math:: V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right) .. math:: Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2} Parameters ---------- D : float Diameter of the main cylindrical section, [m] L : float Length of the main cylindrical section, [m] a : float Distance the ellipsoidal head extends on one side, [m] h : float Height, as measured up to where the fluid ends, [m] headonly : bool, optional Function returns only the volume of a single head side if True Returns ------- V : float Volume [m^3] Examples -------- Matching example from [1]_, with inputs in inches and volume in gallons. >>> V_horiz_ellipsoidal(D=108, L=156, a=42, h=36)/231. 2380.9565415578145 References ---------- .. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015. http://www.webcalc.com.br/blog/Tank_Volume.PDF''' R = 0.5*D Af = R*R*acos((R-h)/R) - (R-h)*(2*R*h - h*h)**0.5 Vf = pi*a*h*h*(1 - h/(3.*R)) if headonly: Vf = 0.5*Vf else: Vf += Af*L return Vf
[ "def", "V_horiz_ellipsoidal", "(", "D", ",", "L", ",", "a", ",", "h", ",", "headonly", "=", "False", ")", ":", "R", "=", "0.5", "*", "D", "Af", "=", "R", "*", "R", "*", "acos", "(", "(", "R", "-", "h", ")", "/", "R", ")", "-", "(", "R", "-", "h", ")", "*", "(", "2", "*", "R", "*", "h", "-", "h", "*", "h", ")", "**", "0.5", "Vf", "=", "pi", "*", "a", "*", "h", "*", "h", "*", "(", "1", "-", "h", "/", "(", "3.", "*", "R", ")", ")", "if", "headonly", ":", "Vf", "=", "0.5", "*", "Vf", "else", ":", "Vf", "+=", "Af", "*", "L", "return", "Vf" ]
r'''Calculates volume of a tank with ellipsoidal ends, according to [1]_. .. math:: V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right) .. math:: Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2} Parameters ---------- D : float Diameter of the main cylindrical section, [m] L : float Length of the main cylindrical section, [m] a : float Distance the ellipsoidal head extends on one side, [m] h : float Height, as measured up to where the fluid ends, [m] headonly : bool, optional Function returns only the volume of a single head side if True Returns ------- V : float Volume [m^3] Examples -------- Matching example from [1]_, with inputs in inches and volume in gallons. >>> V_horiz_ellipsoidal(D=108, L=156, a=42, h=36)/231. 2380.9565415578145 References ---------- .. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015. http://www.webcalc.com.br/blog/Tank_Volume.PDF
[ "r", "Calculates", "volume", "of", "a", "tank", "with", "ellipsoidal", "ends", "according", "to", "[", "1", "]", "_", "." ]
python
train
27.23913
quantopian/zipline
zipline/utils/pandas_utils.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L247-L287
def categorical_df_concat(df_list, inplace=False): """ Prepare list of pandas DataFrames to be used as input to pd.concat. Ensure any columns of type 'category' have the same categories across each dataframe. Parameters ---------- df_list : list List of dataframes with same columns. inplace : bool True if input list can be modified. Default is False. Returns ------- concatenated : df Dataframe of concatenated list. """ if not inplace: df_list = deepcopy(df_list) # Assert each dataframe has the same columns/dtypes df = df_list[0] if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]): raise ValueError("Input DataFrames must have the same columns/dtypes.") categorical_columns = df.columns[df.dtypes == 'category'] for col in categorical_columns: new_categories = sorted( set().union( *(frame[col].cat.categories for frame in df_list) ) ) with ignore_pandas_nan_categorical_warning(): for df in df_list: df[col].cat.set_categories(new_categories, inplace=True) return pd.concat(df_list)
[ "def", "categorical_df_concat", "(", "df_list", ",", "inplace", "=", "False", ")", ":", "if", "not", "inplace", ":", "df_list", "=", "deepcopy", "(", "df_list", ")", "# Assert each dataframe has the same columns/dtypes", "df", "=", "df_list", "[", "0", "]", "if", "not", "all", "(", "[", "(", "df", ".", "dtypes", ".", "equals", "(", "df_i", ".", "dtypes", ")", ")", "for", "df_i", "in", "df_list", "[", "1", ":", "]", "]", ")", ":", "raise", "ValueError", "(", "\"Input DataFrames must have the same columns/dtypes.\"", ")", "categorical_columns", "=", "df", ".", "columns", "[", "df", ".", "dtypes", "==", "'category'", "]", "for", "col", "in", "categorical_columns", ":", "new_categories", "=", "sorted", "(", "set", "(", ")", ".", "union", "(", "*", "(", "frame", "[", "col", "]", ".", "cat", ".", "categories", "for", "frame", "in", "df_list", ")", ")", ")", "with", "ignore_pandas_nan_categorical_warning", "(", ")", ":", "for", "df", "in", "df_list", ":", "df", "[", "col", "]", ".", "cat", ".", "set_categories", "(", "new_categories", ",", "inplace", "=", "True", ")", "return", "pd", ".", "concat", "(", "df_list", ")" ]
Prepare list of pandas DataFrames to be used as input to pd.concat. Ensure any columns of type 'category' have the same categories across each dataframe. Parameters ---------- df_list : list List of dataframes with same columns. inplace : bool True if input list can be modified. Default is False. Returns ------- concatenated : df Dataframe of concatenated list.
[ "Prepare", "list", "of", "pandas", "DataFrames", "to", "be", "used", "as", "input", "to", "pd", ".", "concat", ".", "Ensure", "any", "columns", "of", "type", "category", "have", "the", "same", "categories", "across", "each", "dataframe", "." ]
python
train
28.853659
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_machine_tree.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_machine_tree.py#L300-L336
def update(self, changed_state_model=None, with_expand=False): """Checks if all states are in tree and if tree has states which were deleted :param changed_state_model: Model that row has to be updated :param with_expand: The expand flag for the tree """ if not self.view_is_registered: return # define initial state-model for update if changed_state_model is None: # reset all parent_row_iter = None self.state_row_iter_dict_by_state_path.clear() self.tree_store.clear() if self._selected_sm_model: changed_state_model = self._selected_sm_model.root_state else: return else: # pick if changed_state_model.state.is_root_state: parent_row_iter = self.state_row_iter_dict_by_state_path[changed_state_model.state.get_path()] else: if changed_state_model.state.is_root_state_of_library: # because either lib-state or lib-state-root is in tree the next higher hierarchy state is updated changed_upper_state_m = changed_state_model.parent.parent else: changed_upper_state_m = changed_state_model.parent # TODO check the work around of the next 2 lines while refactoring -> it is a check to be more robust while changed_upper_state_m.state.get_path() not in self.state_row_iter_dict_by_state_path: # show Warning because because avoided method states_update logger.warning("Take a parent state because this is not in.") changed_upper_state_m = changed_upper_state_m.parent parent_row_iter = self.state_row_iter_dict_by_state_path[changed_upper_state_m.state.get_path()] # do recursive update self.insert_and_update_recursively(parent_row_iter, changed_state_model, with_expand)
[ "def", "update", "(", "self", ",", "changed_state_model", "=", "None", ",", "with_expand", "=", "False", ")", ":", "if", "not", "self", ".", "view_is_registered", ":", "return", "# define initial state-model for update", "if", "changed_state_model", "is", "None", ":", "# reset all", "parent_row_iter", "=", "None", "self", ".", "state_row_iter_dict_by_state_path", ".", "clear", "(", ")", "self", ".", "tree_store", ".", "clear", "(", ")", "if", "self", ".", "_selected_sm_model", ":", "changed_state_model", "=", "self", ".", "_selected_sm_model", ".", "root_state", "else", ":", "return", "else", ":", "# pick", "if", "changed_state_model", ".", "state", ".", "is_root_state", ":", "parent_row_iter", "=", "self", ".", "state_row_iter_dict_by_state_path", "[", "changed_state_model", ".", "state", ".", "get_path", "(", ")", "]", "else", ":", "if", "changed_state_model", ".", "state", ".", "is_root_state_of_library", ":", "# because either lib-state or lib-state-root is in tree the next higher hierarchy state is updated", "changed_upper_state_m", "=", "changed_state_model", ".", "parent", ".", "parent", "else", ":", "changed_upper_state_m", "=", "changed_state_model", ".", "parent", "# TODO check the work around of the next 2 lines while refactoring -> it is a check to be more robust", "while", "changed_upper_state_m", ".", "state", ".", "get_path", "(", ")", "not", "in", "self", ".", "state_row_iter_dict_by_state_path", ":", "# show Warning because because avoided method states_update", "logger", ".", "warning", "(", "\"Take a parent state because this is not in.\"", ")", "changed_upper_state_m", "=", "changed_upper_state_m", ".", "parent", "parent_row_iter", "=", "self", ".", "state_row_iter_dict_by_state_path", "[", "changed_upper_state_m", ".", "state", ".", "get_path", "(", ")", "]", "# do recursive update", "self", ".", "insert_and_update_recursively", "(", "parent_row_iter", ",", "changed_state_model", ",", "with_expand", ")" ]
Checks if all states are in tree and if tree has states which were deleted :param changed_state_model: Model that row has to be updated :param with_expand: The expand flag for the tree
[ "Checks", "if", "all", "states", "are", "in", "tree", "and", "if", "tree", "has", "states", "which", "were", "deleted" ]
python
train
53.351351
devassistant/devassistant
devassistant/cache.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/cache.py#L94-L139
def _refresh_hierarchy_recursive(self, cached_hierarchy, file_hierarchy): """Recursively goes through given corresponding hierarchies from cache and filesystem and adds/refreshes/removes added/changed/removed assistants. Args: cached_hierarchy: the respective hierarchy part from current cache (for format see Cache class docstring) file_hierarchy: the respective hierarchy part from filesystem (for format see what refresh_role accepts) Returns: True if self.cache has been changed, False otherwise (doesn't write anything to cache file) """ was_change = False cached_ass = set(cached_hierarchy.keys()) new_ass = set(file_hierarchy.keys()) to_add = new_ass - cached_ass to_remove = cached_ass - new_ass to_check = cached_ass - to_remove if to_add or to_remove: was_change = True for ass in to_add: cached_hierarchy[ass] = self._new_ass_hierarchy(file_hierarchy[ass]) for ass in to_remove: del cached_hierarchy[ass] for ass in to_check: needs_refresh = False try: needs_refresh = self._ass_needs_refresh(cached_hierarchy[ass], file_hierarchy[ass]) except: needs_refresh = True if needs_refresh: self._ass_refresh_attrs(cached_hierarchy[ass], file_hierarchy[ass]) was_change = True was_change |= self._refresh_hierarchy_recursive( cached_hierarchy[ass]['subhierarchy'], file_hierarchy[ass]['subhierarchy']) return was_change
[ "def", "_refresh_hierarchy_recursive", "(", "self", ",", "cached_hierarchy", ",", "file_hierarchy", ")", ":", "was_change", "=", "False", "cached_ass", "=", "set", "(", "cached_hierarchy", ".", "keys", "(", ")", ")", "new_ass", "=", "set", "(", "file_hierarchy", ".", "keys", "(", ")", ")", "to_add", "=", "new_ass", "-", "cached_ass", "to_remove", "=", "cached_ass", "-", "new_ass", "to_check", "=", "cached_ass", "-", "to_remove", "if", "to_add", "or", "to_remove", ":", "was_change", "=", "True", "for", "ass", "in", "to_add", ":", "cached_hierarchy", "[", "ass", "]", "=", "self", ".", "_new_ass_hierarchy", "(", "file_hierarchy", "[", "ass", "]", ")", "for", "ass", "in", "to_remove", ":", "del", "cached_hierarchy", "[", "ass", "]", "for", "ass", "in", "to_check", ":", "needs_refresh", "=", "False", "try", ":", "needs_refresh", "=", "self", ".", "_ass_needs_refresh", "(", "cached_hierarchy", "[", "ass", "]", ",", "file_hierarchy", "[", "ass", "]", ")", "except", ":", "needs_refresh", "=", "True", "if", "needs_refresh", ":", "self", ".", "_ass_refresh_attrs", "(", "cached_hierarchy", "[", "ass", "]", ",", "file_hierarchy", "[", "ass", "]", ")", "was_change", "=", "True", "was_change", "|=", "self", ".", "_refresh_hierarchy_recursive", "(", "cached_hierarchy", "[", "ass", "]", "[", "'subhierarchy'", "]", ",", "file_hierarchy", "[", "ass", "]", "[", "'subhierarchy'", "]", ")", "return", "was_change" ]
Recursively goes through given corresponding hierarchies from cache and filesystem and adds/refreshes/removes added/changed/removed assistants. Args: cached_hierarchy: the respective hierarchy part from current cache (for format see Cache class docstring) file_hierarchy: the respective hierarchy part from filesystem (for format see what refresh_role accepts) Returns: True if self.cache has been changed, False otherwise (doesn't write anything to cache file)
[ "Recursively", "goes", "through", "given", "corresponding", "hierarchies", "from", "cache", "and", "filesystem", "and", "adds", "/", "refreshes", "/", "removes", "added", "/", "changed", "/", "removed", "assistants", "." ]
python
train
37.23913
diamondman/proteusisc
proteusisc/drivers/digilentdriver.py
https://github.com/diamondman/proteusisc/blob/7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c/proteusisc/drivers/digilentdriver.py#L170-L188
def _get_adv_trans_stats(self, cmd, return_tdo=False): """Utility function to fetch the transfer statistics for the last advanced transfer. Checking the stats appears to sync the controller. For details on the advanced transfer please refer to the documentation at http://diamondman.github.io/Adapt/cable_digilent_adept.html#bulk-requests """ t = time() code, res = self.bulkCommand(b'\x03\x02%c\x00'%(0x80|cmd), 10) if self._scanchain and self._scanchain._print_statistics: print("GET STATS TIME", time()-t)#pragma: no cover if len(res) == 4: count = struct.unpack('<I', res)[0] return count elif len(res) == 8: written, read = struct.unpack('<II', res) return written, read return res
[ "def", "_get_adv_trans_stats", "(", "self", ",", "cmd", ",", "return_tdo", "=", "False", ")", ":", "t", "=", "time", "(", ")", "code", ",", "res", "=", "self", ".", "bulkCommand", "(", "b'\\x03\\x02%c\\x00'", "%", "(", "0x80", "|", "cmd", ")", ",", "10", ")", "if", "self", ".", "_scanchain", "and", "self", ".", "_scanchain", ".", "_print_statistics", ":", "print", "(", "\"GET STATS TIME\"", ",", "time", "(", ")", "-", "t", ")", "#pragma: no cover", "if", "len", "(", "res", ")", "==", "4", ":", "count", "=", "struct", ".", "unpack", "(", "'<I'", ",", "res", ")", "[", "0", "]", "return", "count", "elif", "len", "(", "res", ")", "==", "8", ":", "written", ",", "read", "=", "struct", ".", "unpack", "(", "'<II'", ",", "res", ")", "return", "written", ",", "read", "return", "res" ]
Utility function to fetch the transfer statistics for the last advanced transfer. Checking the stats appears to sync the controller. For details on the advanced transfer please refer to the documentation at http://diamondman.github.io/Adapt/cable_digilent_adept.html#bulk-requests
[ "Utility", "function", "to", "fetch", "the", "transfer", "statistics", "for", "the", "last", "advanced", "transfer", ".", "Checking", "the", "stats", "appears", "to", "sync", "the", "controller", ".", "For", "details", "on", "the", "advanced", "transfer", "please", "refer", "to", "the", "documentation", "at", "http", ":", "//", "diamondman", ".", "github", ".", "io", "/", "Adapt", "/", "cable_digilent_adept", ".", "html#bulk", "-", "requests" ]
python
train
43.473684
openstack/proliantutils
proliantutils/redfish/resources/system/bios.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/bios.py#L52-L61
def pending_settings(self): """Property to provide reference to bios_pending_settings instance It is calculated once when the first time it is queried. On refresh, this property gets reset. """ return BIOSPendingSettings( self._conn, utils.get_subresource_path_by( self, ["@Redfish.Settings", "SettingsObject"]), redfish_version=self.redfish_version)
[ "def", "pending_settings", "(", "self", ")", ":", "return", "BIOSPendingSettings", "(", "self", ".", "_conn", ",", "utils", ".", "get_subresource_path_by", "(", "self", ",", "[", "\"@Redfish.Settings\"", ",", "\"SettingsObject\"", "]", ")", ",", "redfish_version", "=", "self", ".", "redfish_version", ")" ]
Property to provide reference to bios_pending_settings instance It is calculated once when the first time it is queried. On refresh, this property gets reset.
[ "Property", "to", "provide", "reference", "to", "bios_pending_settings", "instance" ]
python
train
42.2
reingart/gui2py
gui/controls/listview.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/controls/listview.py#L397-L402
def clear(self): "Remove all items and reset internal structures" dict.clear(self) self._key = 0 if hasattr(self._list_view, "wx_obj"): self._list_view.wx_obj.DeleteAllItems()
[ "def", "clear", "(", "self", ")", ":", "dict", ".", "clear", "(", "self", ")", "self", ".", "_key", "=", "0", "if", "hasattr", "(", "self", ".", "_list_view", ",", "\"wx_obj\"", ")", ":", "self", ".", "_list_view", ".", "wx_obj", ".", "DeleteAllItems", "(", ")" ]
Remove all items and reset internal structures
[ "Remove", "all", "items", "and", "reset", "internal", "structures" ]
python
test
36.5
dagwieers/vmguestlib
vmguestlib.py
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L325-L330
def GetMemBalloonMaxMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemBalloonMaxMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemBalloonMaxMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemBalloonMaxMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Undocumented.
[ "Undocumented", "." ]
python
train
45.5
mrstephenneal/dirutility
dirutility/compare.py
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/compare.py#L6-L13
def unique(list1, list2): """ Get unique items in list1 that are not in list2 :return: Unique items only in list 1 """ set2 = set(list2) list1_unique = [x for x in tqdm(list1, desc='Unique', total=len(list1)) if x not in set2] return list1_unique
[ "def", "unique", "(", "list1", ",", "list2", ")", ":", "set2", "=", "set", "(", "list2", ")", "list1_unique", "=", "[", "x", "for", "x", "in", "tqdm", "(", "list1", ",", "desc", "=", "'Unique'", ",", "total", "=", "len", "(", "list1", ")", ")", "if", "x", "not", "in", "set2", "]", "return", "list1_unique" ]
Get unique items in list1 that are not in list2 :return: Unique items only in list 1
[ "Get", "unique", "items", "in", "list1", "that", "are", "not", "in", "list2", ":", "return", ":", "Unique", "items", "only", "in", "list", "1" ]
python
train
33.375
jhermann/rituals
src/rituals/config.py
https://github.com/jhermann/rituals/blob/1534f50d81e19bbbe799e2eba0acdefbce047c06/src/rituals/config.py#L42-L49
def get_project_root(): """ Determine location of `tasks.py`.""" try: tasks_py = sys.modules['tasks'] except KeyError: return None else: return os.path.abspath(os.path.dirname(tasks_py.__file__))
[ "def", "get_project_root", "(", ")", ":", "try", ":", "tasks_py", "=", "sys", ".", "modules", "[", "'tasks'", "]", "except", "KeyError", ":", "return", "None", "else", ":", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "tasks_py", ".", "__file__", ")", ")" ]
Determine location of `tasks.py`.
[ "Determine", "location", "of", "tasks", ".", "py", "." ]
python
valid
28.5
aouyar/PyMunin
pysysinfo/diskio.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L367-L380
def getDevStats(self, dev, devtype = None): """Returns I/O stats for block device. @param dev: Device name @param devtype: Device type. (Ignored if None.) @return: Dict of stats. """ if devtype is not None: if self._devClassTree is None: self._initDevClasses() if devtype <> self._mapDevType.get(dev): return None return self._diskStats.get(dev)
[ "def", "getDevStats", "(", "self", ",", "dev", ",", "devtype", "=", "None", ")", ":", "if", "devtype", "is", "not", "None", ":", "if", "self", ".", "_devClassTree", "is", "None", ":", "self", ".", "_initDevClasses", "(", ")", "if", "devtype", "<>", "self", ".", "_mapDevType", ".", "get", "(", "dev", ")", ":", "return", "None", "return", "self", ".", "_diskStats", ".", "get", "(", "dev", ")" ]
Returns I/O stats for block device. @param dev: Device name @param devtype: Device type. (Ignored if None.) @return: Dict of stats.
[ "Returns", "I", "/", "O", "stats", "for", "block", "device", "." ]
python
train
33.785714
the01/python-paps
paps/si/app/sensorServer.py
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorServer.py#L392-L405
def stop(self): """ Stop the sensor server (soft stop - signal packet loop to stop) Warning: Is non blocking (server might still do something after this!) :rtype: None """ self.debug("()") super(SensorServer, self).stop() # No new clients if self._multicast_socket is not None: self._shutdown_multicast_socket() # Signal packet loop to shutdown self._is_stopped.set()
[ "def", "stop", "(", "self", ")", ":", "self", ".", "debug", "(", "\"()\"", ")", "super", "(", "SensorServer", ",", "self", ")", ".", "stop", "(", ")", "# No new clients", "if", "self", ".", "_multicast_socket", "is", "not", "None", ":", "self", ".", "_shutdown_multicast_socket", "(", ")", "# Signal packet loop to shutdown", "self", ".", "_is_stopped", ".", "set", "(", ")" ]
Stop the sensor server (soft stop - signal packet loop to stop) Warning: Is non blocking (server might still do something after this!) :rtype: None
[ "Stop", "the", "sensor", "server", "(", "soft", "stop", "-", "signal", "packet", "loop", "to", "stop", ")", "Warning", ":", "Is", "non", "blocking", "(", "server", "might", "still", "do", "something", "after", "this!", ")" ]
python
train
32.5
wtolson/gnsq
gnsq/nsqd.py
https://github.com/wtolson/gnsq/blob/0fd02578b2c9c5fa30626d78579db2a46c10edac/gnsq/nsqd.py#L453-L462
def requeue(self, message_id, timeout=0, backoff=True): """Re-queue a message (indicate failure to process).""" self.send(nsq.requeue(message_id, timeout)) self.finish_inflight() self.on_requeue.send( self, message_id=message_id, timeout=timeout, backoff=backoff )
[ "def", "requeue", "(", "self", ",", "message_id", ",", "timeout", "=", "0", ",", "backoff", "=", "True", ")", ":", "self", ".", "send", "(", "nsq", ".", "requeue", "(", "message_id", ",", "timeout", ")", ")", "self", ".", "finish_inflight", "(", ")", "self", ".", "on_requeue", ".", "send", "(", "self", ",", "message_id", "=", "message_id", ",", "timeout", "=", "timeout", ",", "backoff", "=", "backoff", ")" ]
Re-queue a message (indicate failure to process).
[ "Re", "-", "queue", "a", "message", "(", "indicate", "failure", "to", "process", ")", "." ]
python
train
34.3
iotaledger/iota.lib.py
iota/codecs.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/codecs.py#L61-L78
def get_codec_info(cls): """ Returns information used by the codecs library to configure the codec for use. """ codec = cls() codec_info = { 'encode': codec.encode, 'decode': codec.decode, } # In Python 2, all codecs are made equal. # In Python 3, some codecs are more equal than others. if PY3: codec_info['_is_text_encoding'] = False return CodecInfo(**codec_info)
[ "def", "get_codec_info", "(", "cls", ")", ":", "codec", "=", "cls", "(", ")", "codec_info", "=", "{", "'encode'", ":", "codec", ".", "encode", ",", "'decode'", ":", "codec", ".", "decode", ",", "}", "# In Python 2, all codecs are made equal.", "# In Python 3, some codecs are more equal than others.", "if", "PY3", ":", "codec_info", "[", "'_is_text_encoding'", "]", "=", "False", "return", "CodecInfo", "(", "*", "*", "codec_info", ")" ]
Returns information used by the codecs library to configure the codec for use.
[ "Returns", "information", "used", "by", "the", "codecs", "library", "to", "configure", "the", "codec", "for", "use", "." ]
python
test
26.444444
dcaune/perseus-lib-python-common
majormode/perseus/utils/rdbms.py
https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/majormode/perseus/utils/rdbms.py#L596-L629
def _expand_placeholder_value(value): """ Return the SQL string representation of the specified placeholder's value. @param value: the value of a placeholder such as a simple element, a list, or a tuple of one string. @note: by convention, a tuple of one string indicates that this string MUST not be quoted as it represents, for instance, a called to a stored procedure, and not a textual content to modify into a table. @return: a SQL string representation. """ if isinstance(value, (list, set)) or (isinstance(value, tuple) and len(value) != 1): sql_value = ','.join( [ RdbmsConnection._to_sql_value( element if not isinstance(element, tuple) else element[0], noquote=isinstance(element, tuple)) for element in value ]) elif isinstance(value, tuple): assert len(value) == 1 value = value[0] assert value is None or isinstance(value, basestring), 'basestring expected instead of %s' % type(value) sql_value = RdbmsConnection._to_sql_value(value, True) else: sql_value = RdbmsConnection._to_sql_value(value) return sql_value
[ "def", "_expand_placeholder_value", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "set", ")", ")", "or", "(", "isinstance", "(", "value", ",", "tuple", ")", "and", "len", "(", "value", ")", "!=", "1", ")", ":", "sql_value", "=", "','", ".", "join", "(", "[", "RdbmsConnection", ".", "_to_sql_value", "(", "element", "if", "not", "isinstance", "(", "element", ",", "tuple", ")", "else", "element", "[", "0", "]", ",", "noquote", "=", "isinstance", "(", "element", ",", "tuple", ")", ")", "for", "element", "in", "value", "]", ")", "elif", "isinstance", "(", "value", ",", "tuple", ")", ":", "assert", "len", "(", "value", ")", "==", "1", "value", "=", "value", "[", "0", "]", "assert", "value", "is", "None", "or", "isinstance", "(", "value", ",", "basestring", ")", ",", "'basestring expected instead of %s'", "%", "type", "(", "value", ")", "sql_value", "=", "RdbmsConnection", ".", "_to_sql_value", "(", "value", ",", "True", ")", "else", ":", "sql_value", "=", "RdbmsConnection", ".", "_to_sql_value", "(", "value", ")", "return", "sql_value" ]
Return the SQL string representation of the specified placeholder's value. @param value: the value of a placeholder such as a simple element, a list, or a tuple of one string. @note: by convention, a tuple of one string indicates that this string MUST not be quoted as it represents, for instance, a called to a stored procedure, and not a textual content to modify into a table. @return: a SQL string representation.
[ "Return", "the", "SQL", "string", "representation", "of", "the", "specified", "placeholder", "s", "value", "." ]
python
train
37.558824
radjkarl/imgProcessor
imgProcessor/array/subCell2D.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/array/subCell2D.py#L74-L84
def subCell2DCoords(*args, **kwargs): '''Same as subCell2DSlices but returning coordinates Example: g = subCell2DCoords(arr, shape) for x, y in g: plt.plot(x, y) ''' for _, _, s0, s1 in subCell2DSlices(*args, **kwargs): yield ((s1.start, s1.start, s1.stop), (s0.start, s0.stop, s0.stop))
[ "def", "subCell2DCoords", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "_", ",", "_", ",", "s0", ",", "s1", "in", "subCell2DSlices", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "yield", "(", "(", "s1", ".", "start", ",", "s1", ".", "start", ",", "s1", ".", "stop", ")", ",", "(", "s0", ".", "start", ",", "s0", ".", "stop", ",", "s0", ".", "stop", ")", ")" ]
Same as subCell2DSlices but returning coordinates Example: g = subCell2DCoords(arr, shape) for x, y in g: plt.plot(x, y)
[ "Same", "as", "subCell2DSlices", "but", "returning", "coordinates", "Example", ":", "g", "=", "subCell2DCoords", "(", "arr", "shape", ")", "for", "x", "y", "in", "g", ":", "plt", ".", "plot", "(", "x", "y", ")" ]
python
train
32.727273
benhoff/pluginmanager
pluginmanager/file_manager.py
https://github.com/benhoff/pluginmanager/blob/a8a184f9ebfbb521703492cb88c1dbda4cd04c06/pluginmanager/file_manager.py#L164-L177
def add_blacklisted_filepaths(self, filepaths, remove_from_stored=True): """ Add `filepaths` to blacklisted filepaths. If `remove_from_stored` is `True`, any `filepaths` in `plugin_filepaths` will be automatically removed. Recommend passing in absolute filepaths but method will attempt to convert to absolute filepaths based on current working directory. """ filepaths = util.to_absolute_paths(filepaths) self.blacklisted_filepaths.update(filepaths) if remove_from_stored: self.plugin_filepaths = util.remove_from_set(self.plugin_filepaths, filepaths)
[ "def", "add_blacklisted_filepaths", "(", "self", ",", "filepaths", ",", "remove_from_stored", "=", "True", ")", ":", "filepaths", "=", "util", ".", "to_absolute_paths", "(", "filepaths", ")", "self", ".", "blacklisted_filepaths", ".", "update", "(", "filepaths", ")", "if", "remove_from_stored", ":", "self", ".", "plugin_filepaths", "=", "util", ".", "remove_from_set", "(", "self", ".", "plugin_filepaths", ",", "filepaths", ")" ]
Add `filepaths` to blacklisted filepaths. If `remove_from_stored` is `True`, any `filepaths` in `plugin_filepaths` will be automatically removed. Recommend passing in absolute filepaths but method will attempt to convert to absolute filepaths based on current working directory.
[ "Add", "filepaths", "to", "blacklisted", "filepaths", ".", "If", "remove_from_stored", "is", "True", "any", "filepaths", "in", "plugin_filepaths", "will", "be", "automatically", "removed", "." ]
python
train
49.214286
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Taskmaster.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Taskmaster.py#L760-L950
def _find_next_ready_node(self): """ Finds the next node that is ready to be built. This is *the* main guts of the DAG walk. We loop through the list of candidates, looking for something that has no un-built children (i.e., that is a leaf Node or has dependencies that are all leaf Nodes or up-to-date). Candidate Nodes are re-scanned (both the target Node itself and its sources, which are always scanned in the context of a given target) to discover implicit dependencies. A Node that must wait for some children to be built will be put back on the candidates list after the children have finished building. A Node that has been put back on the candidates list in this way may have itself (or its sources) re-scanned, in order to handle generated header files (e.g.) and the implicit dependencies therein. Note that this method does not do any signature calculation or up-to-date check itself. All of that is handled by the Task class. This is purely concerned with the dependency graph walk. """ self.ready_exc = None T = self.trace if T: T.write(SCons.Util.UnicodeType('\n') + self.trace_message('Looking for a node to evaluate')) while True: node = self.next_candidate() if node is None: if T: T.write(self.trace_message('No candidate anymore.') + u'\n') return None node = node.disambiguate() state = node.get_state() # For debugging only: # # try: # self._validate_pending_children() # except: # self.ready_exc = sys.exc_info() # return node if CollectStats: if not hasattr(node.attributes, 'stats'): node.attributes.stats = Stats() StatsNodes.append(node) S = node.attributes.stats S.considered = S.considered + 1 else: S = None if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node))) if state == NODE_NO_STATE: # Mark this node as being on the execution stack: node.set_state(NODE_PENDING) elif state > NODE_PENDING: # Skip this node if it has already been evaluated: if S: S.already_handled = S.already_handled + 1 if T: T.write(self.trace_message(u' already handled (executed)')) continue executor = node.get_executor() try: children = executor.get_all_children() except SystemExit: exc_value = sys.exc_info()[1] e = SCons.Errors.ExplicitExit(node, exc_value.code) self.ready_exc = (SCons.Errors.ExplicitExit, e) if T: T.write(self.trace_message(' SystemExit')) return node except Exception as e: # We had a problem just trying to figure out the # children (like a child couldn't be linked in to a # VariantDir, or a Scanner threw something). Arrange to # raise the exception when the Task is "executed." self.ready_exc = sys.exc_info() if S: S.problem = S.problem + 1 if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e)) return node children_not_visited = [] children_pending = set() children_not_ready = [] children_failed = False for child in chain(executor.get_all_prerequisites(), children): childstate = child.get_state() if T: T.write(self.trace_message(u' ' + self.trace_node(child))) if childstate == NODE_NO_STATE: children_not_visited.append(child) elif childstate == NODE_PENDING: children_pending.add(child) elif childstate == NODE_FAILED: children_failed = True if childstate <= NODE_EXECUTING: children_not_ready.append(child) # These nodes have not even been visited yet. Add # them to the list so that on some next pass we can # take a stab at evaluating them (or their children). children_not_visited.reverse() self.candidates.extend(self.order(children_not_visited)) # if T and children_not_visited: # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited))) # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates))) # Skip this node if any of its children have failed. # # This catches the case where we're descending a top-level # target and one of our children failed while trying to be # built by a *previous* descent of an earlier top-level # target. # # It can also occur if a node is reused in multiple # targets. One first descends though the one of the # target, the next time occurs through the other target. # # Note that we can only have failed_children if the # --keep-going flag was used, because without it the build # will stop before diving in the other branch. # # Note that even if one of the children fails, we still # added the other children to the list of candidate nodes # to keep on building (--keep-going). if children_failed: for n in executor.get_action_targets(): n.set_state(NODE_FAILED) if S: S.child_failed = S.child_failed + 1 if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node))) continue if children_not_ready: for child in children_not_ready: # We're waiting on one or more derived targets # that have not yet finished building. if S: S.not_built = S.not_built + 1 # Add this node to the waiting parents lists of # anything we're waiting on, with a reference # count so we can be put back on the list for # re-evaluation when they've all finished. node.ref_count = node.ref_count + child.add_to_waiting_parents(node) if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' % (self.trace_node(node), repr(str(child))))) if T: for pc in children_pending: T.write(self.trace_message(' adding %s to the pending children set\n' % self.trace_node(pc))) self.pending_children = self.pending_children | children_pending continue # Skip this node if it has side-effects that are # currently being built: wait_side_effects = False for se in executor.get_action_side_effects(): if se.get_state() == NODE_EXECUTING: se.add_to_waiting_s_e(node) wait_side_effects = True if wait_side_effects: if S: S.side_effects = S.side_effects + 1 continue # The default when we've gotten through all of the checks above: # this node is ready to be built. if S: S.build = S.build + 1 if T: T.write(self.trace_message(u'Evaluating %s\n' % self.trace_node(node))) # For debugging only: # # try: # self._validate_pending_children() # except: # self.ready_exc = sys.exc_info() # return node return node return None
[ "def", "_find_next_ready_node", "(", "self", ")", ":", "self", ".", "ready_exc", "=", "None", "T", "=", "self", ".", "trace", "if", "T", ":", "T", ".", "write", "(", "SCons", ".", "Util", ".", "UnicodeType", "(", "'\\n'", ")", "+", "self", ".", "trace_message", "(", "'Looking for a node to evaluate'", ")", ")", "while", "True", ":", "node", "=", "self", ".", "next_candidate", "(", ")", "if", "node", "is", "None", ":", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "'No candidate anymore.'", ")", "+", "u'\\n'", ")", "return", "None", "node", "=", "node", ".", "disambiguate", "(", ")", "state", "=", "node", ".", "get_state", "(", ")", "# For debugging only:", "#", "# try:", "# self._validate_pending_children()", "# except:", "# self.ready_exc = sys.exc_info()", "# return node", "if", "CollectStats", ":", "if", "not", "hasattr", "(", "node", ".", "attributes", ",", "'stats'", ")", ":", "node", ".", "attributes", ".", "stats", "=", "Stats", "(", ")", "StatsNodes", ".", "append", "(", "node", ")", "S", "=", "node", ".", "attributes", ".", "stats", "S", ".", "considered", "=", "S", ".", "considered", "+", "1", "else", ":", "S", "=", "None", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "u' Considering node %s and its children:'", "%", "self", ".", "trace_node", "(", "node", ")", ")", ")", "if", "state", "==", "NODE_NO_STATE", ":", "# Mark this node as being on the execution stack:", "node", ".", "set_state", "(", "NODE_PENDING", ")", "elif", "state", ">", "NODE_PENDING", ":", "# Skip this node if it has already been evaluated:", "if", "S", ":", "S", ".", "already_handled", "=", "S", ".", "already_handled", "+", "1", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "u' already handled (executed)'", ")", ")", "continue", "executor", "=", "node", ".", "get_executor", "(", ")", "try", ":", "children", "=", "executor", ".", "get_all_children", "(", ")", "except", "SystemExit", ":", "exc_value", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "e", "=", "SCons", ".", "Errors", ".", "ExplicitExit", "(", "node", ",", "exc_value", ".", "code", ")", "self", ".", "ready_exc", "=", "(", "SCons", ".", "Errors", ".", "ExplicitExit", ",", "e", ")", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "' SystemExit'", ")", ")", "return", "node", "except", "Exception", "as", "e", ":", "# We had a problem just trying to figure out the", "# children (like a child couldn't be linked in to a", "# VariantDir, or a Scanner threw something). Arrange to", "# raise the exception when the Task is \"executed.\"", "self", ".", "ready_exc", "=", "sys", ".", "exc_info", "(", ")", "if", "S", ":", "S", ".", "problem", "=", "S", ".", "problem", "+", "1", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "' exception %s while scanning children.\\n'", "%", "e", ")", ")", "return", "node", "children_not_visited", "=", "[", "]", "children_pending", "=", "set", "(", ")", "children_not_ready", "=", "[", "]", "children_failed", "=", "False", "for", "child", "in", "chain", "(", "executor", ".", "get_all_prerequisites", "(", ")", ",", "children", ")", ":", "childstate", "=", "child", ".", "get_state", "(", ")", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "u' '", "+", "self", ".", "trace_node", "(", "child", ")", ")", ")", "if", "childstate", "==", "NODE_NO_STATE", ":", "children_not_visited", ".", "append", "(", "child", ")", "elif", "childstate", "==", "NODE_PENDING", ":", "children_pending", ".", "add", "(", "child", ")", "elif", "childstate", "==", "NODE_FAILED", ":", "children_failed", "=", "True", "if", "childstate", "<=", "NODE_EXECUTING", ":", "children_not_ready", ".", "append", "(", "child", ")", "# These nodes have not even been visited yet. Add", "# them to the list so that on some next pass we can", "# take a stab at evaluating them (or their children).", "children_not_visited", ".", "reverse", "(", ")", "self", ".", "candidates", ".", "extend", "(", "self", ".", "order", "(", "children_not_visited", ")", ")", "# if T and children_not_visited:", "# T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))", "# T.write(self.trace_message(' candidates now: %s\\n' % map(str, self.candidates)))", "# Skip this node if any of its children have failed.", "#", "# This catches the case where we're descending a top-level", "# target and one of our children failed while trying to be", "# built by a *previous* descent of an earlier top-level", "# target.", "#", "# It can also occur if a node is reused in multiple", "# targets. One first descends though the one of the", "# target, the next time occurs through the other target.", "#", "# Note that we can only have failed_children if the", "# --keep-going flag was used, because without it the build", "# will stop before diving in the other branch.", "#", "# Note that even if one of the children fails, we still", "# added the other children to the list of candidate nodes", "# to keep on building (--keep-going).", "if", "children_failed", ":", "for", "n", "in", "executor", ".", "get_action_targets", "(", ")", ":", "n", ".", "set_state", "(", "NODE_FAILED", ")", "if", "S", ":", "S", ".", "child_failed", "=", "S", ".", "child_failed", "+", "1", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "'****** %s\\n'", "%", "self", ".", "trace_node", "(", "node", ")", ")", ")", "continue", "if", "children_not_ready", ":", "for", "child", "in", "children_not_ready", ":", "# We're waiting on one or more derived targets", "# that have not yet finished building.", "if", "S", ":", "S", ".", "not_built", "=", "S", ".", "not_built", "+", "1", "# Add this node to the waiting parents lists of", "# anything we're waiting on, with a reference", "# count so we can be put back on the list for", "# re-evaluation when they've all finished.", "node", ".", "ref_count", "=", "node", ".", "ref_count", "+", "child", ".", "add_to_waiting_parents", "(", "node", ")", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "u' adjusted ref count: %s, child %s'", "%", "(", "self", ".", "trace_node", "(", "node", ")", ",", "repr", "(", "str", "(", "child", ")", ")", ")", ")", ")", "if", "T", ":", "for", "pc", "in", "children_pending", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "' adding %s to the pending children set\\n'", "%", "self", ".", "trace_node", "(", "pc", ")", ")", ")", "self", ".", "pending_children", "=", "self", ".", "pending_children", "|", "children_pending", "continue", "# Skip this node if it has side-effects that are", "# currently being built:", "wait_side_effects", "=", "False", "for", "se", "in", "executor", ".", "get_action_side_effects", "(", ")", ":", "if", "se", ".", "get_state", "(", ")", "==", "NODE_EXECUTING", ":", "se", ".", "add_to_waiting_s_e", "(", "node", ")", "wait_side_effects", "=", "True", "if", "wait_side_effects", ":", "if", "S", ":", "S", ".", "side_effects", "=", "S", ".", "side_effects", "+", "1", "continue", "# The default when we've gotten through all of the checks above:", "# this node is ready to be built.", "if", "S", ":", "S", ".", "build", "=", "S", ".", "build", "+", "1", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "u'Evaluating %s\\n'", "%", "self", ".", "trace_node", "(", "node", ")", ")", ")", "# For debugging only:", "#", "# try:", "# self._validate_pending_children()", "# except:", "# self.ready_exc = sys.exc_info()", "# return node", "return", "node", "return", "None" ]
Finds the next node that is ready to be built. This is *the* main guts of the DAG walk. We loop through the list of candidates, looking for something that has no un-built children (i.e., that is a leaf Node or has dependencies that are all leaf Nodes or up-to-date). Candidate Nodes are re-scanned (both the target Node itself and its sources, which are always scanned in the context of a given target) to discover implicit dependencies. A Node that must wait for some children to be built will be put back on the candidates list after the children have finished building. A Node that has been put back on the candidates list in this way may have itself (or its sources) re-scanned, in order to handle generated header files (e.g.) and the implicit dependencies therein. Note that this method does not do any signature calculation or up-to-date check itself. All of that is handled by the Task class. This is purely concerned with the dependency graph walk.
[ "Finds", "the", "next", "node", "that", "is", "ready", "to", "be", "built", "." ]
python
train
42.753927
insightindustry/validator-collection
validator_collection/validators.py
https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/validators.py#L716-L892
def datetime(value, allow_empty = False, minimum = None, maximum = None, coerce_value = True, **kwargs): """Validate that ``value`` is a valid datetime. .. caution:: If supplying a string, the string needs to be in an ISO 8601-format to pass validation. If it is not in an ISO 8601-format, validation will fail. :param value: The value to validate. :type value: :class:`str <python:str>` / :class:`datetime <python:datetime.datetime>` / :class:`date <python:datetime.date>` / :obj:`None <python:None>` :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :param minimum: If supplied, will make sure that ``value`` is on or after this value. :type minimum: :class:`datetime <python:datetime.datetime>` / :class:`date <python:datetime.date>` / compliant :class:`str <python:str>` / :obj:`None <python:None>` :param maximum: If supplied, will make sure that ``value`` is on or before this value. :type maximum: :class:`datetime <python:datetime.datetime>` / :class:`date <python:datetime.date>` / compliant :class:`str <python:str>` / :obj:`None <python:None>` :param coerce_value: If ``True``, will coerce dates to :class:`datetime <python:datetime.datetime>` objects with times of 00:00:00. If ``False``, will error if ``value`` is not an unambiguous timestamp. Defaults to ``True``. :type coerce_value: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`datetime <python:datetime.datetime>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``value`` cannot be coerced to a :class:`datetime <python:datetime.datetime>` value and is not :obj:`None <python:None>` :raises MinimumValueError: if ``minimum`` is supplied but ``value`` occurs before ``minimum`` :raises MaximumValueError: if ``maximum`` is supplied but ``value`` occurs after ``minimum`` """ # pylint: disable=too-many-branches if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None minimum = datetime(minimum, allow_empty = True, force_run = True) # pylint: disable=E1123 maximum = datetime(maximum, allow_empty = True, force_run = True) # pylint: disable=E1123 if not isinstance(value, datetime_types): raise errors.CannotCoerceError( 'value (%s) must be a date object, datetime object, ' 'ISO 8601-formatted string, ' 'or POSIX timestamp, but was %s' % (value, type(value)) ) elif isinstance(value, timestamp_types) and coerce_value: try: value = datetime_.datetime.fromtimestamp(value) except ValueError: raise errors.CannotCoerceError( 'value (%s) must be a date object, datetime object, ' 'ISO 8601-formatted string, ' 'or POSIX timestamp, but was %s' % (value, type(value)) ) elif isinstance(value, str): # pylint: disable=line-too-long try: if 'T' in value: value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f%z') else: value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S.%f%z') except ValueError: try: if 'T' in value: value = datetime_.datetime.strptime(value, '%Y/%m/%dT%H:%M:%S%z') else: value = datetime_.datetime.strptime(value, '%Y/%m/%d %H:%M:%S%z') except ValueError: try: if 'T' in value: value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S%z') else: value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S%z') except ValueError: try: if 'T' in value: value = datetime_.datetime.strptime(value, '%Y/%m/%dT%H:%M:%S%z') else: value = datetime_.datetime.strptime(value, '%Y/%m/%d %H:%M:%S%z') except ValueError: try: if 'T' in value: value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f') else: value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S.%f') except ValueError: try: if 'T' in value: value = datetime_.datetime.strptime(value, '%Y/%m/%dT%H:%M:%S') else: value = datetime_.datetime.strptime(value, '%Y/%m/%d %H:%M:%S') except ValueError: try: if 'T' in value: value = datetime_.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S') else: value = datetime_.datetime.strptime(value, '%Y-%m-%d %H:%M:%S') except ValueError: try: if 'T' in value: value = datetime_.datetime.strptime(value, '%Y/%m/%dT%H:%M:%S') else: value = datetime_.datetime.strptime(value, '%Y/%m/%d %H:%M:%S') except ValueError: if coerce_value: value = date(value) else: raise errors.CannotCoerceError( 'value (%s) must be a datetime object, ' 'ISO 8601-formatted string, ' 'or POSIX timestamp' % value ) # pylint: enable=line-too-long elif isinstance(value, numeric_types) and not coerce_value: raise errors.CannotCoerceError( 'value (%s) must be a datetime object, ' 'ISO 8601-formatted string, ' 'or POSIX timestamp' % value ) if isinstance(value, datetime_.date) and not isinstance(value, datetime_.datetime): if coerce_value: value = datetime_.datetime(value.year, # pylint: disable=R0204 value.month, value.day, 0, 0, 0, 0) else: raise errors.CannotCoerceError( 'value (%s) must be a datetime object, ' 'ISO 8601-formatted string, ' 'or POSIX timestamp' % value ) if minimum and value and value < minimum: raise errors.MinimumValueError( 'value (%s) is before the minimum given (%s)' % (value.isoformat(), minimum.isoformat()) ) if maximum and value and value > maximum: raise errors.MaximumValueError( 'value (%s) is after the maximum given (%s)' % (value.isoformat(), maximum.isoformat()) ) return value
[ "def", "datetime", "(", "value", ",", "allow_empty", "=", "False", ",", "minimum", "=", "None", ",", "maximum", "=", "None", ",", "coerce_value", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=too-many-branches", "if", "not", "value", "and", "not", "allow_empty", ":", "raise", "errors", ".", "EmptyValueError", "(", "'value (%s) was empty'", "%", "value", ")", "elif", "not", "value", ":", "return", "None", "minimum", "=", "datetime", "(", "minimum", ",", "allow_empty", "=", "True", ",", "force_run", "=", "True", ")", "# pylint: disable=E1123", "maximum", "=", "datetime", "(", "maximum", ",", "allow_empty", "=", "True", ",", "force_run", "=", "True", ")", "# pylint: disable=E1123", "if", "not", "isinstance", "(", "value", ",", "datetime_types", ")", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a date object, datetime object, '", "'ISO 8601-formatted string, '", "'or POSIX timestamp, but was %s'", "%", "(", "value", ",", "type", "(", "value", ")", ")", ")", "elif", "isinstance", "(", "value", ",", "timestamp_types", ")", "and", "coerce_value", ":", "try", ":", "value", "=", "datetime_", ".", "datetime", ".", "fromtimestamp", "(", "value", ")", "except", "ValueError", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a date object, datetime object, '", "'ISO 8601-formatted string, '", "'or POSIX timestamp, but was %s'", "%", "(", "value", ",", "type", "(", "value", ")", ")", ")", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "# pylint: disable=line-too-long", "try", ":", "if", "'T'", "in", "value", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y-%m-%dT%H:%M:%S.%f%z'", ")", "else", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y-%m-%d %H:%M:%S.%f%z'", ")", "except", "ValueError", ":", "try", ":", "if", "'T'", "in", "value", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y/%m/%dT%H:%M:%S%z'", ")", "else", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y/%m/%d %H:%M:%S%z'", ")", "except", "ValueError", ":", "try", ":", "if", "'T'", "in", "value", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y-%m-%dT%H:%M:%S%z'", ")", "else", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y-%m-%d %H:%M:%S%z'", ")", "except", "ValueError", ":", "try", ":", "if", "'T'", "in", "value", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y/%m/%dT%H:%M:%S%z'", ")", "else", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y/%m/%d %H:%M:%S%z'", ")", "except", "ValueError", ":", "try", ":", "if", "'T'", "in", "value", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y-%m-%dT%H:%M:%S.%f'", ")", "else", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y-%m-%d %H:%M:%S.%f'", ")", "except", "ValueError", ":", "try", ":", "if", "'T'", "in", "value", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y/%m/%dT%H:%M:%S'", ")", "else", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y/%m/%d %H:%M:%S'", ")", "except", "ValueError", ":", "try", ":", "if", "'T'", "in", "value", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y-%m-%dT%H:%M:%S'", ")", "else", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y-%m-%d %H:%M:%S'", ")", "except", "ValueError", ":", "try", ":", "if", "'T'", "in", "value", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y/%m/%dT%H:%M:%S'", ")", "else", ":", "value", "=", "datetime_", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y/%m/%d %H:%M:%S'", ")", "except", "ValueError", ":", "if", "coerce_value", ":", "value", "=", "date", "(", "value", ")", "else", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a datetime object, '", "'ISO 8601-formatted string, '", "'or POSIX timestamp'", "%", "value", ")", "# pylint: enable=line-too-long", "elif", "isinstance", "(", "value", ",", "numeric_types", ")", "and", "not", "coerce_value", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a datetime object, '", "'ISO 8601-formatted string, '", "'or POSIX timestamp'", "%", "value", ")", "if", "isinstance", "(", "value", ",", "datetime_", ".", "date", ")", "and", "not", "isinstance", "(", "value", ",", "datetime_", ".", "datetime", ")", ":", "if", "coerce_value", ":", "value", "=", "datetime_", ".", "datetime", "(", "value", ".", "year", ",", "# pylint: disable=R0204", "value", ".", "month", ",", "value", ".", "day", ",", "0", ",", "0", ",", "0", ",", "0", ")", "else", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) must be a datetime object, '", "'ISO 8601-formatted string, '", "'or POSIX timestamp'", "%", "value", ")", "if", "minimum", "and", "value", "and", "value", "<", "minimum", ":", "raise", "errors", ".", "MinimumValueError", "(", "'value (%s) is before the minimum given (%s)'", "%", "(", "value", ".", "isoformat", "(", ")", ",", "minimum", ".", "isoformat", "(", ")", ")", ")", "if", "maximum", "and", "value", "and", "value", ">", "maximum", ":", "raise", "errors", ".", "MaximumValueError", "(", "'value (%s) is after the maximum given (%s)'", "%", "(", "value", ".", "isoformat", "(", ")", ",", "maximum", ".", "isoformat", "(", ")", ")", ")", "return", "value" ]
Validate that ``value`` is a valid datetime. .. caution:: If supplying a string, the string needs to be in an ISO 8601-format to pass validation. If it is not in an ISO 8601-format, validation will fail. :param value: The value to validate. :type value: :class:`str <python:str>` / :class:`datetime <python:datetime.datetime>` / :class:`date <python:datetime.date>` / :obj:`None <python:None>` :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :param minimum: If supplied, will make sure that ``value`` is on or after this value. :type minimum: :class:`datetime <python:datetime.datetime>` / :class:`date <python:datetime.date>` / compliant :class:`str <python:str>` / :obj:`None <python:None>` :param maximum: If supplied, will make sure that ``value`` is on or before this value. :type maximum: :class:`datetime <python:datetime.datetime>` / :class:`date <python:datetime.date>` / compliant :class:`str <python:str>` / :obj:`None <python:None>` :param coerce_value: If ``True``, will coerce dates to :class:`datetime <python:datetime.datetime>` objects with times of 00:00:00. If ``False``, will error if ``value`` is not an unambiguous timestamp. Defaults to ``True``. :type coerce_value: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`datetime <python:datetime.datetime>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``value`` cannot be coerced to a :class:`datetime <python:datetime.datetime>` value and is not :obj:`None <python:None>` :raises MinimumValueError: if ``minimum`` is supplied but ``value`` occurs before ``minimum`` :raises MaximumValueError: if ``maximum`` is supplied but ``value`` occurs after ``minimum``
[ "Validate", "that", "value", "is", "a", "valid", "datetime", "." ]
python
train
47.79661
michael-lazar/rtv
rtv/packages/praw/__init__.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L1008-L1024
def get_random_subreddit(self, nsfw=False): """Return a random Subreddit object. :param nsfw: When true, return a random NSFW Subreddit object. Calling in this manner will set the 'over18' cookie for the duration of the PRAW session. """ path = 'random' if nsfw: self.http.cookies.set('over18', '1') path = 'randnsfw' url = self.config['subreddit'].format(subreddit=path) response = self._request(url, params={'unique': self._unique_count}, raw_response=True) self._unique_count += 1 return self.get_subreddit(response.url.rsplit('/', 2)[-2])
[ "def", "get_random_subreddit", "(", "self", ",", "nsfw", "=", "False", ")", ":", "path", "=", "'random'", "if", "nsfw", ":", "self", ".", "http", ".", "cookies", ".", "set", "(", "'over18'", ",", "'1'", ")", "path", "=", "'randnsfw'", "url", "=", "self", ".", "config", "[", "'subreddit'", "]", ".", "format", "(", "subreddit", "=", "path", ")", "response", "=", "self", ".", "_request", "(", "url", ",", "params", "=", "{", "'unique'", ":", "self", ".", "_unique_count", "}", ",", "raw_response", "=", "True", ")", "self", ".", "_unique_count", "+=", "1", "return", "self", ".", "get_subreddit", "(", "response", ".", "url", ".", "rsplit", "(", "'/'", ",", "2", ")", "[", "-", "2", "]", ")" ]
Return a random Subreddit object. :param nsfw: When true, return a random NSFW Subreddit object. Calling in this manner will set the 'over18' cookie for the duration of the PRAW session.
[ "Return", "a", "random", "Subreddit", "object", "." ]
python
train
40.058824
log2timeline/plaso
plaso/parsers/bsm.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/bsm.py#L728-L744
def _ParseTokenType(self, file_object, file_offset): """Parses a token type. Args: file_object (dfvfs.FileIO): file-like object. file_offset (int): offset of the token relative to the start of the file-like object. Returns: int: token type """ token_type_map = self._GetDataTypeMap('uint8') token_type, _ = self._ReadStructureFromFileObject( file_object, file_offset, token_type_map) return token_type
[ "def", "_ParseTokenType", "(", "self", ",", "file_object", ",", "file_offset", ")", ":", "token_type_map", "=", "self", ".", "_GetDataTypeMap", "(", "'uint8'", ")", "token_type", ",", "_", "=", "self", ".", "_ReadStructureFromFileObject", "(", "file_object", ",", "file_offset", ",", "token_type_map", ")", "return", "token_type" ]
Parses a token type. Args: file_object (dfvfs.FileIO): file-like object. file_offset (int): offset of the token relative to the start of the file-like object. Returns: int: token type
[ "Parses", "a", "token", "type", "." ]
python
train
26.647059
smarie/python-parsyfiles
parsyfiles/parsing_core_api.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_core_api.py#L275-L291
def create_for_wrong_result_type(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T], obj: PersistedObject, result: T, options: Dict[str, Dict[str, Any]]): """ Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param desired_type: :param obj: :param result: :param options: :return: """ msg = "Error while parsing {obj} as a {typ} with parser {p} using options=({opts}) - parser returned an object " \ "of wrong type {tret}: {ret}".format(obj=obj, typ=get_pretty_type_str(desired_type), p=parser, opts=options, tret=type(result), ret=result) return WrongTypeCreatedError(msg)
[ "def", "create_for_wrong_result_type", "(", "parser", ":", "_BaseParserDeclarationForRegistries", ",", "desired_type", ":", "Type", "[", "T", "]", ",", "obj", ":", "PersistedObject", ",", "result", ":", "T", ",", "options", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "Any", "]", "]", ")", ":", "msg", "=", "\"Error while parsing {obj} as a {typ} with parser {p} using options=({opts}) - parser returned an object \"", "\"of wrong type {tret}: {ret}\"", ".", "format", "(", "obj", "=", "obj", ",", "typ", "=", "get_pretty_type_str", "(", "desired_type", ")", ",", "p", "=", "parser", ",", "opts", "=", "options", ",", "tret", "=", "type", "(", "result", ")", ",", "ret", "=", "result", ")", "return", "WrongTypeCreatedError", "(", "msg", ")" ]
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param desired_type: :param obj: :param result: :param options: :return:
[ "Helper", "method", "provided", "because", "we", "actually", "can", "t", "put", "that", "in", "the", "constructor", "it", "creates", "a", "bug", "in", "Nose", "tests", "https", ":", "//", "github", ".", "com", "/", "nose", "-", "devs", "/", "nose", "/", "issues", "/", "725" ]
python
train
52.470588
ruipgil/TrackToTrip
tracktotrip/track.py
https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/track.py#L82-L97
def segment(self, eps, min_time): """In-place segmentation of segments Spatio-temporal segmentation of each segment The number of segments may increse after this step Returns: This track """ new_segments = [] for segment in self.segments: segmented = segment.segment(eps, min_time) for seg in segmented: new_segments.append(Segment(seg)) self.segments = new_segments return self
[ "def", "segment", "(", "self", ",", "eps", ",", "min_time", ")", ":", "new_segments", "=", "[", "]", "for", "segment", "in", "self", ".", "segments", ":", "segmented", "=", "segment", ".", "segment", "(", "eps", ",", "min_time", ")", "for", "seg", "in", "segmented", ":", "new_segments", ".", "append", "(", "Segment", "(", "seg", ")", ")", "self", ".", "segments", "=", "new_segments", "return", "self" ]
In-place segmentation of segments Spatio-temporal segmentation of each segment The number of segments may increse after this step Returns: This track
[ "In", "-", "place", "segmentation", "of", "segments" ]
python
train
30.5625
Kaggle/kaggle-api
kaggle/api/kaggle_api.py
https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api.py#L973-L992
def datasets_create_new(self, dataset_new_request, **kwargs): # noqa: E501 """Create a new dataset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.datasets_create_new(dataset_new_request, async_req=True) >>> result = thread.get() :param async_req bool :param DatasetNewRequest dataset_new_request: Information for creating a new dataset (required) :return: Result If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.datasets_create_new_with_http_info(dataset_new_request, **kwargs) # noqa: E501 else: (data) = self.datasets_create_new_with_http_info(dataset_new_request, **kwargs) # noqa: E501 return data
[ "def", "datasets_create_new", "(", "self", ",", "dataset_new_request", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "datasets_create_new_with_http_info", "(", "dataset_new_request", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "datasets_create_new_with_http_info", "(", "dataset_new_request", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Create a new dataset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.datasets_create_new(dataset_new_request, async_req=True) >>> result = thread.get() :param async_req bool :param DatasetNewRequest dataset_new_request: Information for creating a new dataset (required) :return: Result If the method is called asynchronously, returns the request thread.
[ "Create", "a", "new", "dataset", "#", "noqa", ":", "E501" ]
python
train
48.1
scrapinghub/kafka-scanner
kafka_scanner/msg_processor_handlers.py
https://github.com/scrapinghub/kafka-scanner/blob/8a71901012e8c948180f70a485b57f8d2e7e3ec1/kafka_scanner/msg_processor_handlers.py#L85-L91
def decompress_messages(self, partitions_offmsgs): """ Decompress pre-defined compressed fields for each message. """ for pomsg in partitions_offmsgs: if pomsg['message']: pomsg['message'] = self.decompress_fun(pomsg['message']) yield pomsg
[ "def", "decompress_messages", "(", "self", ",", "partitions_offmsgs", ")", ":", "for", "pomsg", "in", "partitions_offmsgs", ":", "if", "pomsg", "[", "'message'", "]", ":", "pomsg", "[", "'message'", "]", "=", "self", ".", "decompress_fun", "(", "pomsg", "[", "'message'", "]", ")", "yield", "pomsg" ]
Decompress pre-defined compressed fields for each message.
[ "Decompress", "pre", "-", "defined", "compressed", "fields", "for", "each", "message", "." ]
python
train
41.571429
honzajavorek/redis-collections
redis_collections/lists.py
https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/lists.py#L482-L498
def reverse(self): """ Reverses the items of this collection "in place" (only two values are retrieved from Redis at a time). """ def reverse_trans(pipe): if self.writeback: self._sync_helper(pipe) n = self.__len__(pipe) for i in range(n // 2): left = pipe.lindex(self.key, i) right = pipe.lindex(self.key, n - i - 1) pipe.lset(self.key, i, right) pipe.lset(self.key, n - i - 1, left) self._transaction(reverse_trans)
[ "def", "reverse", "(", "self", ")", ":", "def", "reverse_trans", "(", "pipe", ")", ":", "if", "self", ".", "writeback", ":", "self", ".", "_sync_helper", "(", "pipe", ")", "n", "=", "self", ".", "__len__", "(", "pipe", ")", "for", "i", "in", "range", "(", "n", "//", "2", ")", ":", "left", "=", "pipe", ".", "lindex", "(", "self", ".", "key", ",", "i", ")", "right", "=", "pipe", ".", "lindex", "(", "self", ".", "key", ",", "n", "-", "i", "-", "1", ")", "pipe", ".", "lset", "(", "self", ".", "key", ",", "i", ",", "right", ")", "pipe", ".", "lset", "(", "self", ".", "key", ",", "n", "-", "i", "-", "1", ",", "left", ")", "self", ".", "_transaction", "(", "reverse_trans", ")" ]
Reverses the items of this collection "in place" (only two values are retrieved from Redis at a time).
[ "Reverses", "the", "items", "of", "this", "collection", "in", "place", "(", "only", "two", "values", "are", "retrieved", "from", "Redis", "at", "a", "time", ")", "." ]
python
train
33.352941
ArchiveTeam/wpull
wpull/application/tasks/download.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/application/tasks/download.py#L259-L271
def _build_processor(cls, session: AppSession): '''Create the Processor Returns: Processor: An instance of :class:`.processor.BaseProcessor`. ''' web_processor = cls._build_web_processor(session) ftp_processor = cls._build_ftp_processor(session) delegate_processor = session.factory.new('Processor') delegate_processor.register('http', web_processor) delegate_processor.register('https', web_processor) delegate_processor.register('ftp', ftp_processor)
[ "def", "_build_processor", "(", "cls", ",", "session", ":", "AppSession", ")", ":", "web_processor", "=", "cls", ".", "_build_web_processor", "(", "session", ")", "ftp_processor", "=", "cls", ".", "_build_ftp_processor", "(", "session", ")", "delegate_processor", "=", "session", ".", "factory", ".", "new", "(", "'Processor'", ")", "delegate_processor", ".", "register", "(", "'http'", ",", "web_processor", ")", "delegate_processor", ".", "register", "(", "'https'", ",", "web_processor", ")", "delegate_processor", ".", "register", "(", "'ftp'", ",", "ftp_processor", ")" ]
Create the Processor Returns: Processor: An instance of :class:`.processor.BaseProcessor`.
[ "Create", "the", "Processor" ]
python
train
40.461538
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L1173-L1185
def create_seq(self, project): """Create and return a new sequence :param project: the project for the sequence :type deps: :class:`jukeboxcore.djadapter.models.Project` :returns: The created sequence or None :rtype: None | :class:`jukeboxcore.djadapter.models.Sequence` :raises: None """ dialog = SequenceCreatorDialog(project=project, parent=self) dialog.exec_() seq = dialog.sequence return seq
[ "def", "create_seq", "(", "self", ",", "project", ")", ":", "dialog", "=", "SequenceCreatorDialog", "(", "project", "=", "project", ",", "parent", "=", "self", ")", "dialog", ".", "exec_", "(", ")", "seq", "=", "dialog", ".", "sequence", "return", "seq" ]
Create and return a new sequence :param project: the project for the sequence :type deps: :class:`jukeboxcore.djadapter.models.Project` :returns: The created sequence or None :rtype: None | :class:`jukeboxcore.djadapter.models.Sequence` :raises: None
[ "Create", "and", "return", "a", "new", "sequence" ]
python
train
36.461538
google/grr
grr/server/grr_response_server/databases/mysql_flows.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_flows.py#L1230-L1237
def UnregisterFlowProcessingHandler(self, timeout=None): """Unregisters any registered flow processing handler.""" if self.flow_processing_request_handler_thread: self.flow_processing_request_handler_stop = True self.flow_processing_request_handler_thread.join(timeout) if self.flow_processing_request_handler_thread.isAlive(): raise RuntimeError("Flow processing handler did not join in time.") self.flow_processing_request_handler_thread = None
[ "def", "UnregisterFlowProcessingHandler", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "self", ".", "flow_processing_request_handler_thread", ":", "self", ".", "flow_processing_request_handler_stop", "=", "True", "self", ".", "flow_processing_request_handler_thread", ".", "join", "(", "timeout", ")", "if", "self", ".", "flow_processing_request_handler_thread", ".", "isAlive", "(", ")", ":", "raise", "RuntimeError", "(", "\"Flow processing handler did not join in time.\"", ")", "self", ".", "flow_processing_request_handler_thread", "=", "None" ]
Unregisters any registered flow processing handler.
[ "Unregisters", "any", "registered", "flow", "processing", "handler", "." ]
python
train
59.875
kwikteam/phy
phy/plot/plot.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/plot.py#L153-L158
def scatter(self, *args, **kwargs): """Add a scatter plot.""" cls = _make_class(ScatterVisual, _default_marker=kwargs.pop('marker', None), ) return self._add_item(cls, *args, **kwargs)
[ "def", "scatter", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cls", "=", "_make_class", "(", "ScatterVisual", ",", "_default_marker", "=", "kwargs", ".", "pop", "(", "'marker'", ",", "None", ")", ",", ")", "return", "self", ".", "_add_item", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Add a scatter plot.
[ "Add", "a", "scatter", "plot", "." ]
python
train
42.5
saltstack/salt
salt/modules/systemd_service.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/systemd_service.py#L73-L80
def _root(path, root): ''' Relocate an absolute path to a new root directory. ''' if root: return os.path.join(root, os.path.relpath(path, os.path.sep)) else: return path
[ "def", "_root", "(", "path", ",", "root", ")", ":", "if", "root", ":", "return", "os", ".", "path", ".", "join", "(", "root", ",", "os", ".", "path", ".", "relpath", "(", "path", ",", "os", ".", "path", ".", "sep", ")", ")", "else", ":", "return", "path" ]
Relocate an absolute path to a new root directory.
[ "Relocate", "an", "absolute", "path", "to", "a", "new", "root", "directory", "." ]
python
train
24.875
Chilipp/funcargparse
funcargparse/__init__.py
https://github.com/Chilipp/funcargparse/blob/398ce8e7fa5aa35c465215446bda151cf1ecf7ad/funcargparse/__init__.py#L1006-L1022
def get_subparser(self, name): """ Convenience method to get a certain subparser Parameters ---------- name: str The name of the subparser Returns ------- FuncArgParser The subparsers corresponding to `name` """ if self._subparsers_action is None: raise ValueError("%s has no subparsers defined!" % self) return self._subparsers_action.choices[name]
[ "def", "get_subparser", "(", "self", ",", "name", ")", ":", "if", "self", ".", "_subparsers_action", "is", "None", ":", "raise", "ValueError", "(", "\"%s has no subparsers defined!\"", "%", "self", ")", "return", "self", ".", "_subparsers_action", ".", "choices", "[", "name", "]" ]
Convenience method to get a certain subparser Parameters ---------- name: str The name of the subparser Returns ------- FuncArgParser The subparsers corresponding to `name`
[ "Convenience", "method", "to", "get", "a", "certain", "subparser" ]
python
train
27
pyblish/pyblish-houdini
pyblish_houdini/lib.py
https://github.com/pyblish/pyblish-houdini/blob/661b08696f04b4c5d8b03aa0c75cba3ca72f1e8d/pyblish_houdini/lib.py#L20-L38
def setup(console=False, port=None): """Setup integration Register plug-ins and integrate into the host Arguments: console (bool): DEPRECATED port (int, optional): DEPRECATED """ if self._has_been_setup: teardown() register_plugins() register_host() self._has_been_setup = True print("pyblish: Pyblish loaded successfully.")
[ "def", "setup", "(", "console", "=", "False", ",", "port", "=", "None", ")", ":", "if", "self", ".", "_has_been_setup", ":", "teardown", "(", ")", "register_plugins", "(", ")", "register_host", "(", ")", "self", ".", "_has_been_setup", "=", "True", "print", "(", "\"pyblish: Pyblish loaded successfully.\"", ")" ]
Setup integration Register plug-ins and integrate into the host Arguments: console (bool): DEPRECATED port (int, optional): DEPRECATED
[ "Setup", "integration" ]
python
train
19.578947
DataBiosphere/toil
src/toil/common.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L1001-L1027
def getWorkflowDir(workflowID, configWorkDir=None): """ Returns a path to the directory where worker directories and the cache will be located for this workflow. :param str workflowID: Unique identifier for the workflow :param str configWorkDir: Value passed to the program using the --workDir flag :return: Path to the workflow directory :rtype: str """ workDir = configWorkDir or os.getenv('TOIL_WORKDIR') or tempfile.gettempdir() if not os.path.exists(workDir): raise RuntimeError("The directory specified by --workDir or TOIL_WORKDIR (%s) does not " "exist." % workDir) # Create the workflow dir, make it unique to each host in case workDir is on a shared FS. # This prevents workers on different nodes from erasing each other's directories. workflowDir = os.path.join(workDir, 'toil-%s-%s' % (workflowID, getNodeID())) try: # Directory creation is atomic os.mkdir(workflowDir) except OSError as err: if err.errno != 17: # The directory exists if a previous worker set it up. raise else: logger.debug('Created the workflow directory at %s' % workflowDir) return workflowDir
[ "def", "getWorkflowDir", "(", "workflowID", ",", "configWorkDir", "=", "None", ")", ":", "workDir", "=", "configWorkDir", "or", "os", ".", "getenv", "(", "'TOIL_WORKDIR'", ")", "or", "tempfile", ".", "gettempdir", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "workDir", ")", ":", "raise", "RuntimeError", "(", "\"The directory specified by --workDir or TOIL_WORKDIR (%s) does not \"", "\"exist.\"", "%", "workDir", ")", "# Create the workflow dir, make it unique to each host in case workDir is on a shared FS.", "# This prevents workers on different nodes from erasing each other's directories.", "workflowDir", "=", "os", ".", "path", ".", "join", "(", "workDir", ",", "'toil-%s-%s'", "%", "(", "workflowID", ",", "getNodeID", "(", ")", ")", ")", "try", ":", "# Directory creation is atomic", "os", ".", "mkdir", "(", "workflowDir", ")", "except", "OSError", "as", "err", ":", "if", "err", ".", "errno", "!=", "17", ":", "# The directory exists if a previous worker set it up.", "raise", "else", ":", "logger", ".", "debug", "(", "'Created the workflow directory at %s'", "%", "workflowDir", ")", "return", "workflowDir" ]
Returns a path to the directory where worker directories and the cache will be located for this workflow. :param str workflowID: Unique identifier for the workflow :param str configWorkDir: Value passed to the program using the --workDir flag :return: Path to the workflow directory :rtype: str
[ "Returns", "a", "path", "to", "the", "directory", "where", "worker", "directories", "and", "the", "cache", "will", "be", "located", "for", "this", "workflow", "." ]
python
train
48.555556
tradenity/python-sdk
tradenity/resources/table_rate_rule.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/table_rate_rule.py#L492-L512
def get_table_rate_rule_by_id(cls, table_rate_rule_id, **kwargs): """Find TableRateRule Return single instance of TableRateRule by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_table_rate_rule_by_id(table_rate_rule_id, async=True) >>> result = thread.get() :param async bool :param str table_rate_rule_id: ID of tableRateRule to return (required) :return: TableRateRule If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_table_rate_rule_by_id_with_http_info(table_rate_rule_id, **kwargs) else: (data) = cls._get_table_rate_rule_by_id_with_http_info(table_rate_rule_id, **kwargs) return data
[ "def", "get_table_rate_rule_by_id", "(", "cls", ",", "table_rate_rule_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_get_table_rate_rule_by_id_with_http_info", "(", "table_rate_rule_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_get_table_rate_rule_by_id_with_http_info", "(", "table_rate_rule_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Find TableRateRule Return single instance of TableRateRule by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_table_rate_rule_by_id(table_rate_rule_id, async=True) >>> result = thread.get() :param async bool :param str table_rate_rule_id: ID of tableRateRule to return (required) :return: TableRateRule If the method is called asynchronously, returns the request thread.
[ "Find", "TableRateRule" ]
python
train
45.142857
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L1380-L1394
def get_media_detail_output_interface_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_media_detail = ET.Element("get_media_detail") config = get_media_detail output = ET.SubElement(get_media_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name = ET.SubElement(interface, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_media_detail_output_interface_interface_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_media_detail", "=", "ET", ".", "Element", "(", "\"get_media_detail\"", ")", "config", "=", "get_media_detail", "output", "=", "ET", ".", "SubElement", "(", "get_media_detail", ",", "\"output\"", ")", "interface", "=", "ET", ".", "SubElement", "(", "output", ",", "\"interface\"", ")", "interface_type_key", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"interface-type\"", ")", "interface_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_type'", ")", "interface_name", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"interface-name\"", ")", "interface_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
46.333333