repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
merll/docker-map
dockermap/map/policy/cache.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/policy/cache.py#L85-L94
def refresh(self): """ Fetches all current network names from the client. """ if not self._client: return current_volumes = self._client.volumes()['Volumes'] self.clear() if current_volumes: self.update(vol['Name'] for vol in current_volumes)
[ "def", "refresh", "(", "self", ")", ":", "if", "not", "self", ".", "_client", ":", "return", "current_volumes", "=", "self", ".", "_client", ".", "volumes", "(", ")", "[", "'Volumes'", "]", "self", ".", "clear", "(", ")", "if", "current_volumes", ":", "self", ".", "update", "(", "vol", "[", "'Name'", "]", "for", "vol", "in", "current_volumes", ")" ]
Fetches all current network names from the client.
[ "Fetches", "all", "current", "network", "names", "from", "the", "client", "." ]
python
train
rwl/pylon
contrib/public/services/simplejson/__init__.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/contrib/public/services/simplejson/__init__.py#L83-L124
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, **kw): """ Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ if cls is None: cls = JSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, **kw).iterencode(obj) # could accelerate with writelines in some versions of Python, at # a debuggability cost for chunk in iterable: fp.write(chunk)
[ "def", "dump", "(", "obj", ",", "fp", ",", "skipkeys", "=", "False", ",", "ensure_ascii", "=", "True", ",", "check_circular", "=", "True", ",", "allow_nan", "=", "True", ",", "cls", "=", "None", ",", "indent", "=", "None", ",", "*", "*", "kw", ")", ":", "if", "cls", "is", "None", ":", "cls", "=", "JSONEncoder", "iterable", "=", "cls", "(", "skipkeys", "=", "skipkeys", ",", "ensure_ascii", "=", "ensure_ascii", ",", "check_circular", "=", "check_circular", ",", "allow_nan", "=", "allow_nan", ",", "indent", "=", "indent", ",", "*", "*", "kw", ")", ".", "iterencode", "(", "obj", ")", "# could accelerate with writelines in some versions of Python, at", "# a debuggability cost", "for", "chunk", "in", "iterable", ":", "fp", ".", "write", "(", "chunk", ")" ]
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg.
[ "Serialize", "obj", "as", "a", "JSON", "formatted", "stream", "to", "fp", "(", "a", ".", "write", "()", "-", "supporting", "file", "-", "like", "object", ")", "." ]
python
train
sykora/djournal
djournal/views.py
https://github.com/sykora/djournal/blob/c074e1f94e07e2630034a00c7dbd768e933f85e2/djournal/views.py#L10-L26
def entry_index(request, limit=0, template='djournal/entry_index.html'): '''Returns a reponse of a fixed number of entries; all of them, by default. ''' entries = Entry.public.all() if limit > 0: entries = entries[:limit] context = { 'entries': entries, } return render_to_response( template, context, context_instance=RequestContext(request), )
[ "def", "entry_index", "(", "request", ",", "limit", "=", "0", ",", "template", "=", "'djournal/entry_index.html'", ")", ":", "entries", "=", "Entry", ".", "public", ".", "all", "(", ")", "if", "limit", ">", "0", ":", "entries", "=", "entries", "[", ":", "limit", "]", "context", "=", "{", "'entries'", ":", "entries", ",", "}", "return", "render_to_response", "(", "template", ",", "context", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", ")" ]
Returns a reponse of a fixed number of entries; all of them, by default.
[ "Returns", "a", "reponse", "of", "a", "fixed", "number", "of", "entries", ";", "all", "of", "them", "by", "default", "." ]
python
train
jlmadurga/listenclosely
listenclosely/services/console.py
https://github.com/jlmadurga/listenclosely/blob/d6df9110c3ed6fd337e0236cccbe4d931bf217b0/listenclosely/services/console.py#L16-L28
def send_message(self, id_service, content): """Write all messages to the stream in a thread-safe way.""" if not content: return with self._lock: try: message = "Message: %s to %s" % (content, id_service) self.write_message(message) self.stream.flush() # flush after each message return "message_id" except Exception: if not self.fail_silently: raise
[ "def", "send_message", "(", "self", ",", "id_service", ",", "content", ")", ":", "if", "not", "content", ":", "return", "with", "self", ".", "_lock", ":", "try", ":", "message", "=", "\"Message: %s to %s\"", "%", "(", "content", ",", "id_service", ")", "self", ".", "write_message", "(", "message", ")", "self", ".", "stream", ".", "flush", "(", ")", "# flush after each message", "return", "\"message_id\"", "except", "Exception", ":", "if", "not", "self", ".", "fail_silently", ":", "raise" ]
Write all messages to the stream in a thread-safe way.
[ "Write", "all", "messages", "to", "the", "stream", "in", "a", "thread", "-", "safe", "way", "." ]
python
train
happyleavesaoc/python-firetv
firetv/__main__.py
https://github.com/happyleavesaoc/python-firetv/blob/3dd953376c0d5af502e775ae14ed0afe03224781/firetv/__main__.py#L78-L93
def add(device_id, host, adbkey='', adb_server_ip='', adb_server_port=5037): """ Add a device. Creates FireTV instance associated with device identifier. :param device_id: Device identifier. :param host: Host in <address>:<port> format. :param adbkey: The path to the "adbkey" file :param adb_server_ip: the IP address for the ADB server :param adb_server_port: the port for the ADB server :returns: Added successfully or not. """ valid = is_valid_device_id(device_id) and is_valid_host(host) if valid: devices[device_id] = FireTV(str(host), str(adbkey), str(adb_server_ip), str(adb_server_port)) return valid
[ "def", "add", "(", "device_id", ",", "host", ",", "adbkey", "=", "''", ",", "adb_server_ip", "=", "''", ",", "adb_server_port", "=", "5037", ")", ":", "valid", "=", "is_valid_device_id", "(", "device_id", ")", "and", "is_valid_host", "(", "host", ")", "if", "valid", ":", "devices", "[", "device_id", "]", "=", "FireTV", "(", "str", "(", "host", ")", ",", "str", "(", "adbkey", ")", ",", "str", "(", "adb_server_ip", ")", ",", "str", "(", "adb_server_port", ")", ")", "return", "valid" ]
Add a device. Creates FireTV instance associated with device identifier. :param device_id: Device identifier. :param host: Host in <address>:<port> format. :param adbkey: The path to the "adbkey" file :param adb_server_ip: the IP address for the ADB server :param adb_server_port: the port for the ADB server :returns: Added successfully or not.
[ "Add", "a", "device", "." ]
python
train
dtmilano/AndroidViewClient
src/com/dtmilano/android/viewclient.py
https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/viewclient.py#L3781-L3789
def findViewsContainingPoint(self, (x, y), _filter=None): ''' Finds the list of Views that contain the point (x, y). ''' if not _filter: _filter = lambda v: True return [v for v in self.views if (v.containsPoint((x,y)) and _filter(v))]
[ "def", "findViewsContainingPoint", "(", "self", ",", "(", "x", ",", "y", ")", ",", "_filter", "=", "None", ")", ":", "if", "not", "_filter", ":", "_filter", "=", "lambda", "v", ":", "True", "return", "[", "v", "for", "v", "in", "self", ".", "views", "if", "(", "v", ".", "containsPoint", "(", "(", "x", ",", "y", ")", ")", "and", "_filter", "(", "v", ")", ")", "]" ]
Finds the list of Views that contain the point (x, y).
[ "Finds", "the", "list", "of", "Views", "that", "contain", "the", "point", "(", "x", "y", ")", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/master_controller/master_controller_healthcheck.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/master_controller/master_controller_healthcheck.py#L66-L85
def get_overall_services_health(self) -> str: """Get the overall health of all the services. Returns: str, overall health status """ services_health_status = self.get_services_health() # Evaluate overall health health_status = all(status == "Healthy" for status in services_health_status.values()) # Converting from bool to str if health_status: overall_status = "Healthy" else: overall_status = "Unhealthy" return overall_status
[ "def", "get_overall_services_health", "(", "self", ")", "->", "str", ":", "services_health_status", "=", "self", ".", "get_services_health", "(", ")", "# Evaluate overall health", "health_status", "=", "all", "(", "status", "==", "\"Healthy\"", "for", "status", "in", "services_health_status", ".", "values", "(", ")", ")", "# Converting from bool to str", "if", "health_status", ":", "overall_status", "=", "\"Healthy\"", "else", ":", "overall_status", "=", "\"Unhealthy\"", "return", "overall_status" ]
Get the overall health of all the services. Returns: str, overall health status
[ "Get", "the", "overall", "health", "of", "all", "the", "services", "." ]
python
train
dslackw/slpkg
slpkg/tracking.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/tracking.py#L170-L181
def check_used(self, pkg): """Check if dependencies used """ used = [] dep_path = self.meta.log_path + "dep/" logs = find_package("", dep_path) for log in logs: deps = Utils().read_file(dep_path + log) for dep in deps.splitlines(): if pkg == dep: used.append(log) return used
[ "def", "check_used", "(", "self", ",", "pkg", ")", ":", "used", "=", "[", "]", "dep_path", "=", "self", ".", "meta", ".", "log_path", "+", "\"dep/\"", "logs", "=", "find_package", "(", "\"\"", ",", "dep_path", ")", "for", "log", "in", "logs", ":", "deps", "=", "Utils", "(", ")", ".", "read_file", "(", "dep_path", "+", "log", ")", "for", "dep", "in", "deps", ".", "splitlines", "(", ")", ":", "if", "pkg", "==", "dep", ":", "used", ".", "append", "(", "log", ")", "return", "used" ]
Check if dependencies used
[ "Check", "if", "dependencies", "used" ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/campbell_bozorgnia_2014.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/campbell_bozorgnia_2014.py#L234-L241
def _get_hanging_wall_coeffs_rrup(self, dists): """ Returns the hanging wall rrup term defined in equation 13 """ fhngrrup = np.ones(len(dists.rrup)) idx = dists.rrup > 0.0 fhngrrup[idx] = (dists.rrup[idx] - dists.rjb[idx]) / dists.rrup[idx] return fhngrrup
[ "def", "_get_hanging_wall_coeffs_rrup", "(", "self", ",", "dists", ")", ":", "fhngrrup", "=", "np", ".", "ones", "(", "len", "(", "dists", ".", "rrup", ")", ")", "idx", "=", "dists", ".", "rrup", ">", "0.0", "fhngrrup", "[", "idx", "]", "=", "(", "dists", ".", "rrup", "[", "idx", "]", "-", "dists", ".", "rjb", "[", "idx", "]", ")", "/", "dists", ".", "rrup", "[", "idx", "]", "return", "fhngrrup" ]
Returns the hanging wall rrup term defined in equation 13
[ "Returns", "the", "hanging", "wall", "rrup", "term", "defined", "in", "equation", "13" ]
python
train
stevelittlefish/littlefish
littlefish/pager.py
https://github.com/stevelittlefish/littlefish/blob/6deee7f81fab30716c743efe2e94e786c6e17016/littlefish/pager.py#L153-L160
def render_seo_links(self, scheme=None): """Render the rel=canonical, rel=prev and rel=next links to a Markup object for injection into a template""" out = self.render_prev_next_links(scheme=scheme) if self.total_pages == 1: out += self.render_canonical_link(scheme=scheme) return out
[ "def", "render_seo_links", "(", "self", ",", "scheme", "=", "None", ")", ":", "out", "=", "self", ".", "render_prev_next_links", "(", "scheme", "=", "scheme", ")", "if", "self", ".", "total_pages", "==", "1", ":", "out", "+=", "self", ".", "render_canonical_link", "(", "scheme", "=", "scheme", ")", "return", "out" ]
Render the rel=canonical, rel=prev and rel=next links to a Markup object for injection into a template
[ "Render", "the", "rel", "=", "canonical", "rel", "=", "prev", "and", "rel", "=", "next", "links", "to", "a", "Markup", "object", "for", "injection", "into", "a", "template" ]
python
test
twisted/txaws
txaws/wsdl.py
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/wsdl.py#L281-L285
def _get_namespace_tag(self, tag): """Return the given C{tag} with the namespace prefix added, if any.""" if self._namespace is not None: tag = "{%s}%s" % (self._namespace, tag) return tag
[ "def", "_get_namespace_tag", "(", "self", ",", "tag", ")", ":", "if", "self", ".", "_namespace", "is", "not", "None", ":", "tag", "=", "\"{%s}%s\"", "%", "(", "self", ".", "_namespace", ",", "tag", ")", "return", "tag" ]
Return the given C{tag} with the namespace prefix added, if any.
[ "Return", "the", "given", "C", "{", "tag", "}", "with", "the", "namespace", "prefix", "added", "if", "any", "." ]
python
train
gwastro/pycbc-glue
pycbc_glue/pipeline.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L3841-L3848
def set_end(self,time): """ Set the end time of the datafind query. @param time: GPS end time of query. """ self.add_var_opt('gps-end-time', time) self.__end = time self.__set_output()
[ "def", "set_end", "(", "self", ",", "time", ")", ":", "self", ".", "add_var_opt", "(", "'gps-end-time'", ",", "time", ")", "self", ".", "__end", "=", "time", "self", ".", "__set_output", "(", ")" ]
Set the end time of the datafind query. @param time: GPS end time of query.
[ "Set", "the", "end", "time", "of", "the", "datafind", "query", "." ]
python
train
phaethon/kamene
kamene/modules/p0f.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/modules/p0f.py#L329-L497
def p0f_impersonate(pkt, osgenre=None, osdetails=None, signature=None, extrahops=0, mtu=1500, uptime=None): """Modifies pkt so that p0f will think it has been sent by a specific OS. If osdetails is None, then we randomly pick up a personality matching osgenre. If osgenre and signature are also None, we use a local signature (using p0f_getlocalsigs). If signature is specified (as a tuple), we use the signature. For now, only TCP Syn packets are supported. Some specifications of the p0f.fp file are not (yet) implemented.""" pkt = pkt.copy() #pkt = pkt.__class__(str(pkt)) while pkt.haslayer(IP) and pkt.haslayer(TCP): pkt = pkt.getlayer(IP) if isinstance(pkt.payload, TCP): break pkt = pkt.payload if not isinstance(pkt, IP) or not isinstance(pkt.payload, TCP): raise TypeError("Not a TCP/IP packet") if uptime is None: uptime = random.randint(120,100*60*60*24*365) db = p0f_selectdb(pkt.payload.flags) if osgenre: pb = db.get_base() if pb is None: pb = [] #pb = filter(lambda x: x[6] == osgenre, pb) pb = [ x for x in pb if x[6] == osgenre ] if osdetails: #pb = filter(lambda x: x[7] == osdetails, pb) pb = [ x for x in pb if x[7] == osdetails ] elif signature: pb = [signature] else: pb = p0f_getlocalsigs()[db] if db == p0fr_kdb: # 'K' quirk <=> RST+ACK if pkt.payload.flags & 0x4 == 0x4: #pb = filter(lambda x: 'K' in x[5], pb) pb = [ x for x in pb if 'K' in x[5] ] else: #pb = filter(lambda x: 'K' not in x[5], pb) pb = [ x for x in pb if 'K' not in x[5] ] if not pb: raise Kamene_Exception("No match in the p0f database") pers = pb[random.randint(0, len(pb) - 1)] # options (we start with options because of MSS) ## TODO: let the options already set if they are valid options = [] if pers[4] != '.': for opt in pers[4].split(','): if opt[0] == 'M': # MSS might have a maximum size because of window size # specification if pers[0][0] == 'S': maxmss = (2**16-1) / int(pers[0][1:]) else: maxmss = (2**16-1) # If we have to randomly pick up a value, we cannot use # kamene RandXXX() functions, because the value has to be # set in case we need it for the window size value. That's # why we use random.randint() if opt[1:] == '*': options.append(('MSS', random.randint(1,maxmss))) elif opt[1] == '%': coef = int(opt[2:]) options.append(('MSS', coef*random.randint(1,maxmss/coef))) else: options.append(('MSS', int(opt[1:]))) elif opt[0] == 'W': if opt[1:] == '*': options.append(('WScale', RandByte())) elif opt[1] == '%': coef = int(opt[2:]) options.append(('WScale', coef*RandNum(min=1, max=(2**8-1)/coef))) else: options.append(('WScale', int(opt[1:]))) elif opt == 'T0': options.append(('Timestamp', (0, 0))) elif opt == 'T': if 'T' in pers[5]: # FIXME: RandInt() here does not work (bug (?) in # TCPOptionsField.m2i often raises "OverflowError: # long int too large to convert to int" in: # oval = struct.pack(ofmt, *oval)" # Actually, this is enough to often raise the error: # struct.pack('I', RandInt()) options.append(('Timestamp', (uptime, random.randint(1,2**32-1)))) else: options.append(('Timestamp', (uptime, 0))) elif opt == 'S': options.append(('SAckOK', '')) elif opt == 'N': options.append(('NOP', None)) elif opt == 'E': options.append(('EOL', None)) elif opt[0] == '?': if int(opt[1:]) in TCPOptions[0]: optname = TCPOptions[0][int(opt[1:])][0] optstruct = TCPOptions[0][int(opt[1:])][1] options.append((optname, struct.unpack(optstruct, RandString(struct.calcsize(optstruct))._fix()))) else: options.append((int(opt[1:]), '')) ## FIXME: qqP not handled else: warning("unhandled TCP option " + opt) pkt.payload.options = options # window size if pers[0] == '*': pkt.payload.window = RandShort() elif pers[0].isdigit(): pkt.payload.window = int(pers[0]) elif pers[0][0] == '%': coef = int(pers[0][1:]) pkt.payload.window = coef * RandNum(min=1,max=(2**16-1)/coef) elif pers[0][0] == 'T': pkt.payload.window = mtu * int(pers[0][1:]) elif pers[0][0] == 'S': ## needs MSS set #MSS = filter(lambda x: x[0] == 'MSS', options) MSS = [ x for x in options if x[0] == 'MSS' ] if not MSS: raise Kamene_Exception("TCP window value requires MSS, and MSS option not set") pkt.payload.window = MSS[0][1] * int(pers[0][1:]) else: raise Kamene_Exception('Unhandled window size specification') # ttl pkt.ttl = pers[1]-extrahops # DF flag pkt.flags |= (2 * pers[2]) ## FIXME: ss (packet size) not handled (how ? may be with D quirk ## if present) # Quirks if pers[5] != '.': for qq in pers[5]: ## FIXME: not handled: P, I, X, ! # T handled with the Timestamp option if qq == 'Z': pkt.id = 0 elif qq == 'U': pkt.payload.urgptr = RandShort() elif qq == 'A': pkt.payload.ack = RandInt() elif qq == 'F': #if db == p0fo_kdb: # pkt.payload.flags |= 0x20 # U #else: pkt.payload.flags |= RandChoice(8, 32, 40) #P / U / PU elif qq == 'D' and db != p0fo_kdb: pkt /= conf.raw_layer(load=RandString(random.randint(1, 10))) # XXX p0fo.fp elif qq == 'Q': pkt.payload.seq = pkt.payload.ack #elif qq == '0': pkt.payload.seq = 0 #if db == p0fr_kdb: # '0' quirk is actually not only for p0fr.fp (see # packet2p0f()) if '0' in pers[5]: pkt.payload.seq = 0 elif pkt.payload.seq == 0: pkt.payload.seq = RandInt() while pkt.underlayer: pkt = pkt.underlayer return pkt
[ "def", "p0f_impersonate", "(", "pkt", ",", "osgenre", "=", "None", ",", "osdetails", "=", "None", ",", "signature", "=", "None", ",", "extrahops", "=", "0", ",", "mtu", "=", "1500", ",", "uptime", "=", "None", ")", ":", "pkt", "=", "pkt", ".", "copy", "(", ")", "#pkt = pkt.__class__(str(pkt))", "while", "pkt", ".", "haslayer", "(", "IP", ")", "and", "pkt", ".", "haslayer", "(", "TCP", ")", ":", "pkt", "=", "pkt", ".", "getlayer", "(", "IP", ")", "if", "isinstance", "(", "pkt", ".", "payload", ",", "TCP", ")", ":", "break", "pkt", "=", "pkt", ".", "payload", "if", "not", "isinstance", "(", "pkt", ",", "IP", ")", "or", "not", "isinstance", "(", "pkt", ".", "payload", ",", "TCP", ")", ":", "raise", "TypeError", "(", "\"Not a TCP/IP packet\"", ")", "if", "uptime", "is", "None", ":", "uptime", "=", "random", ".", "randint", "(", "120", ",", "100", "*", "60", "*", "60", "*", "24", "*", "365", ")", "db", "=", "p0f_selectdb", "(", "pkt", ".", "payload", ".", "flags", ")", "if", "osgenre", ":", "pb", "=", "db", ".", "get_base", "(", ")", "if", "pb", "is", "None", ":", "pb", "=", "[", "]", "#pb = filter(lambda x: x[6] == osgenre, pb)", "pb", "=", "[", "x", "for", "x", "in", "pb", "if", "x", "[", "6", "]", "==", "osgenre", "]", "if", "osdetails", ":", "#pb = filter(lambda x: x[7] == osdetails, pb)", "pb", "=", "[", "x", "for", "x", "in", "pb", "if", "x", "[", "7", "]", "==", "osdetails", "]", "elif", "signature", ":", "pb", "=", "[", "signature", "]", "else", ":", "pb", "=", "p0f_getlocalsigs", "(", ")", "[", "db", "]", "if", "db", "==", "p0fr_kdb", ":", "# 'K' quirk <=> RST+ACK", "if", "pkt", ".", "payload", ".", "flags", "&", "0x4", "==", "0x4", ":", "#pb = filter(lambda x: 'K' in x[5], pb)", "pb", "=", "[", "x", "for", "x", "in", "pb", "if", "'K'", "in", "x", "[", "5", "]", "]", "else", ":", "#pb = filter(lambda x: 'K' not in x[5], pb)", "pb", "=", "[", "x", "for", "x", "in", "pb", "if", "'K'", "not", "in", "x", "[", "5", "]", "]", "if", "not", "pb", ":", "raise", "Kamene_Exception", "(", "\"No match in the p0f database\"", ")", "pers", "=", "pb", "[", "random", ".", "randint", "(", "0", ",", "len", "(", "pb", ")", "-", "1", ")", "]", "# options (we start with options because of MSS)", "## TODO: let the options already set if they are valid", "options", "=", "[", "]", "if", "pers", "[", "4", "]", "!=", "'.'", ":", "for", "opt", "in", "pers", "[", "4", "]", ".", "split", "(", "','", ")", ":", "if", "opt", "[", "0", "]", "==", "'M'", ":", "# MSS might have a maximum size because of window size", "# specification", "if", "pers", "[", "0", "]", "[", "0", "]", "==", "'S'", ":", "maxmss", "=", "(", "2", "**", "16", "-", "1", ")", "/", "int", "(", "pers", "[", "0", "]", "[", "1", ":", "]", ")", "else", ":", "maxmss", "=", "(", "2", "**", "16", "-", "1", ")", "# If we have to randomly pick up a value, we cannot use", "# kamene RandXXX() functions, because the value has to be", "# set in case we need it for the window size value. That's", "# why we use random.randint()", "if", "opt", "[", "1", ":", "]", "==", "'*'", ":", "options", ".", "append", "(", "(", "'MSS'", ",", "random", ".", "randint", "(", "1", ",", "maxmss", ")", ")", ")", "elif", "opt", "[", "1", "]", "==", "'%'", ":", "coef", "=", "int", "(", "opt", "[", "2", ":", "]", ")", "options", ".", "append", "(", "(", "'MSS'", ",", "coef", "*", "random", ".", "randint", "(", "1", ",", "maxmss", "/", "coef", ")", ")", ")", "else", ":", "options", ".", "append", "(", "(", "'MSS'", ",", "int", "(", "opt", "[", "1", ":", "]", ")", ")", ")", "elif", "opt", "[", "0", "]", "==", "'W'", ":", "if", "opt", "[", "1", ":", "]", "==", "'*'", ":", "options", ".", "append", "(", "(", "'WScale'", ",", "RandByte", "(", ")", ")", ")", "elif", "opt", "[", "1", "]", "==", "'%'", ":", "coef", "=", "int", "(", "opt", "[", "2", ":", "]", ")", "options", ".", "append", "(", "(", "'WScale'", ",", "coef", "*", "RandNum", "(", "min", "=", "1", ",", "max", "=", "(", "2", "**", "8", "-", "1", ")", "/", "coef", ")", ")", ")", "else", ":", "options", ".", "append", "(", "(", "'WScale'", ",", "int", "(", "opt", "[", "1", ":", "]", ")", ")", ")", "elif", "opt", "==", "'T0'", ":", "options", ".", "append", "(", "(", "'Timestamp'", ",", "(", "0", ",", "0", ")", ")", ")", "elif", "opt", "==", "'T'", ":", "if", "'T'", "in", "pers", "[", "5", "]", ":", "# FIXME: RandInt() here does not work (bug (?) in", "# TCPOptionsField.m2i often raises \"OverflowError:", "# long int too large to convert to int\" in:", "# oval = struct.pack(ofmt, *oval)\"", "# Actually, this is enough to often raise the error:", "# struct.pack('I', RandInt())", "options", ".", "append", "(", "(", "'Timestamp'", ",", "(", "uptime", ",", "random", ".", "randint", "(", "1", ",", "2", "**", "32", "-", "1", ")", ")", ")", ")", "else", ":", "options", ".", "append", "(", "(", "'Timestamp'", ",", "(", "uptime", ",", "0", ")", ")", ")", "elif", "opt", "==", "'S'", ":", "options", ".", "append", "(", "(", "'SAckOK'", ",", "''", ")", ")", "elif", "opt", "==", "'N'", ":", "options", ".", "append", "(", "(", "'NOP'", ",", "None", ")", ")", "elif", "opt", "==", "'E'", ":", "options", ".", "append", "(", "(", "'EOL'", ",", "None", ")", ")", "elif", "opt", "[", "0", "]", "==", "'?'", ":", "if", "int", "(", "opt", "[", "1", ":", "]", ")", "in", "TCPOptions", "[", "0", "]", ":", "optname", "=", "TCPOptions", "[", "0", "]", "[", "int", "(", "opt", "[", "1", ":", "]", ")", "]", "[", "0", "]", "optstruct", "=", "TCPOptions", "[", "0", "]", "[", "int", "(", "opt", "[", "1", ":", "]", ")", "]", "[", "1", "]", "options", ".", "append", "(", "(", "optname", ",", "struct", ".", "unpack", "(", "optstruct", ",", "RandString", "(", "struct", ".", "calcsize", "(", "optstruct", ")", ")", ".", "_fix", "(", ")", ")", ")", ")", "else", ":", "options", ".", "append", "(", "(", "int", "(", "opt", "[", "1", ":", "]", ")", ",", "''", ")", ")", "## FIXME: qqP not handled", "else", ":", "warning", "(", "\"unhandled TCP option \"", "+", "opt", ")", "pkt", ".", "payload", ".", "options", "=", "options", "# window size", "if", "pers", "[", "0", "]", "==", "'*'", ":", "pkt", ".", "payload", ".", "window", "=", "RandShort", "(", ")", "elif", "pers", "[", "0", "]", ".", "isdigit", "(", ")", ":", "pkt", ".", "payload", ".", "window", "=", "int", "(", "pers", "[", "0", "]", ")", "elif", "pers", "[", "0", "]", "[", "0", "]", "==", "'%'", ":", "coef", "=", "int", "(", "pers", "[", "0", "]", "[", "1", ":", "]", ")", "pkt", ".", "payload", ".", "window", "=", "coef", "*", "RandNum", "(", "min", "=", "1", ",", "max", "=", "(", "2", "**", "16", "-", "1", ")", "/", "coef", ")", "elif", "pers", "[", "0", "]", "[", "0", "]", "==", "'T'", ":", "pkt", ".", "payload", ".", "window", "=", "mtu", "*", "int", "(", "pers", "[", "0", "]", "[", "1", ":", "]", ")", "elif", "pers", "[", "0", "]", "[", "0", "]", "==", "'S'", ":", "## needs MSS set", "#MSS = filter(lambda x: x[0] == 'MSS', options)", "MSS", "=", "[", "x", "for", "x", "in", "options", "if", "x", "[", "0", "]", "==", "'MSS'", "]", "if", "not", "MSS", ":", "raise", "Kamene_Exception", "(", "\"TCP window value requires MSS, and MSS option not set\"", ")", "pkt", ".", "payload", ".", "window", "=", "MSS", "[", "0", "]", "[", "1", "]", "*", "int", "(", "pers", "[", "0", "]", "[", "1", ":", "]", ")", "else", ":", "raise", "Kamene_Exception", "(", "'Unhandled window size specification'", ")", "# ttl", "pkt", ".", "ttl", "=", "pers", "[", "1", "]", "-", "extrahops", "# DF flag", "pkt", ".", "flags", "|=", "(", "2", "*", "pers", "[", "2", "]", ")", "## FIXME: ss (packet size) not handled (how ? may be with D quirk", "## if present)", "# Quirks", "if", "pers", "[", "5", "]", "!=", "'.'", ":", "for", "qq", "in", "pers", "[", "5", "]", ":", "## FIXME: not handled: P, I, X, !", "# T handled with the Timestamp option", "if", "qq", "==", "'Z'", ":", "pkt", ".", "id", "=", "0", "elif", "qq", "==", "'U'", ":", "pkt", ".", "payload", ".", "urgptr", "=", "RandShort", "(", ")", "elif", "qq", "==", "'A'", ":", "pkt", ".", "payload", ".", "ack", "=", "RandInt", "(", ")", "elif", "qq", "==", "'F'", ":", "#if db == p0fo_kdb:", "# pkt.payload.flags |= 0x20 # U", "#else:", "pkt", ".", "payload", ".", "flags", "|=", "RandChoice", "(", "8", ",", "32", ",", "40", ")", "#P / U / PU", "elif", "qq", "==", "'D'", "and", "db", "!=", "p0fo_kdb", ":", "pkt", "/=", "conf", ".", "raw_layer", "(", "load", "=", "RandString", "(", "random", ".", "randint", "(", "1", ",", "10", ")", ")", ")", "# XXX p0fo.fp", "elif", "qq", "==", "'Q'", ":", "pkt", ".", "payload", ".", "seq", "=", "pkt", ".", "payload", ".", "ack", "#elif qq == '0': pkt.payload.seq = 0", "#if db == p0fr_kdb:", "# '0' quirk is actually not only for p0fr.fp (see", "# packet2p0f())", "if", "'0'", "in", "pers", "[", "5", "]", ":", "pkt", ".", "payload", ".", "seq", "=", "0", "elif", "pkt", ".", "payload", ".", "seq", "==", "0", ":", "pkt", ".", "payload", ".", "seq", "=", "RandInt", "(", ")", "while", "pkt", ".", "underlayer", ":", "pkt", "=", "pkt", ".", "underlayer", "return", "pkt" ]
Modifies pkt so that p0f will think it has been sent by a specific OS. If osdetails is None, then we randomly pick up a personality matching osgenre. If osgenre and signature are also None, we use a local signature (using p0f_getlocalsigs). If signature is specified (as a tuple), we use the signature. For now, only TCP Syn packets are supported. Some specifications of the p0f.fp file are not (yet) implemented.
[ "Modifies", "pkt", "so", "that", "p0f", "will", "think", "it", "has", "been", "sent", "by", "a", "specific", "OS", ".", "If", "osdetails", "is", "None", "then", "we", "randomly", "pick", "up", "a", "personality", "matching", "osgenre", ".", "If", "osgenre", "and", "signature", "are", "also", "None", "we", "use", "a", "local", "signature", "(", "using", "p0f_getlocalsigs", ")", ".", "If", "signature", "is", "specified", "(", "as", "a", "tuple", ")", "we", "use", "the", "signature", "." ]
python
train
sorgerlab/indra
indra/sources/sofia/api.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sofia/api.py#L35-L80
def process_text(text, out_file='sofia_output.json', auth=None): """Return processor by processing text given as a string. Parameters ---------- text : str A string containing the text to be processed with Sofia. out_file : Optional[str] The path to a file to save the reader's output into. Default: sofia_output.json auth : Optional[list] A username/password pair for the Sofia web service. If not given, the SOFIA_USERNAME and SOFIA_PASSWORD values are loaded from either the INDRA config or the environment. Returns ------- sp : indra.sources.sofia.processor.SofiaProcessor A SofiaProcessor object which has a list of extracted INDRA Statements as its statements attribute. If the API did not process the text, None is returned. """ text_json = {'text': text} if not auth: user, password = _get_sofia_auth() else: user, password = auth if not user or not password: raise ValueError('Could not use SOFIA web service since' ' authentication information is missing. Please' ' set SOFIA_USERNAME and SOFIA_PASSWORD in the' ' INDRA configuration file or as environmental' ' variables.') json_response, status_code, process_status = \ _text_processing(text_json=text_json, user=user, password=password) # Check response status if process_status != 'Done' or status_code != 200: return None # Cache reading output if out_file: with open(out_file, 'w') as fh: json.dump(json_response, fh, indent=1) return process_json(json_response)
[ "def", "process_text", "(", "text", ",", "out_file", "=", "'sofia_output.json'", ",", "auth", "=", "None", ")", ":", "text_json", "=", "{", "'text'", ":", "text", "}", "if", "not", "auth", ":", "user", ",", "password", "=", "_get_sofia_auth", "(", ")", "else", ":", "user", ",", "password", "=", "auth", "if", "not", "user", "or", "not", "password", ":", "raise", "ValueError", "(", "'Could not use SOFIA web service since'", "' authentication information is missing. Please'", "' set SOFIA_USERNAME and SOFIA_PASSWORD in the'", "' INDRA configuration file or as environmental'", "' variables.'", ")", "json_response", ",", "status_code", ",", "process_status", "=", "_text_processing", "(", "text_json", "=", "text_json", ",", "user", "=", "user", ",", "password", "=", "password", ")", "# Check response status", "if", "process_status", "!=", "'Done'", "or", "status_code", "!=", "200", ":", "return", "None", "# Cache reading output", "if", "out_file", ":", "with", "open", "(", "out_file", ",", "'w'", ")", "as", "fh", ":", "json", ".", "dump", "(", "json_response", ",", "fh", ",", "indent", "=", "1", ")", "return", "process_json", "(", "json_response", ")" ]
Return processor by processing text given as a string. Parameters ---------- text : str A string containing the text to be processed with Sofia. out_file : Optional[str] The path to a file to save the reader's output into. Default: sofia_output.json auth : Optional[list] A username/password pair for the Sofia web service. If not given, the SOFIA_USERNAME and SOFIA_PASSWORD values are loaded from either the INDRA config or the environment. Returns ------- sp : indra.sources.sofia.processor.SofiaProcessor A SofiaProcessor object which has a list of extracted INDRA Statements as its statements attribute. If the API did not process the text, None is returned.
[ "Return", "processor", "by", "processing", "text", "given", "as", "a", "string", "." ]
python
train
SeabornGames/Table
seaborn_table/table.py
https://github.com/SeabornGames/Table/blob/0c474ef2fb00db0e7cf47e8af91e3556c2e7485a/seaborn_table/table.py#L577-L590
def obj_to_txt(self, file_path=None, deliminator=None, tab=None, quote_numbers=True, quote_empty_str=False): """ This will return a simple str table. :param file_path: str of the path to the file :param keys: list of str of the order of keys to use :param tab: string of offset of the table :param quote_numbers: bool if True will quote numbers that are strings :param quote_empty_str: bool if True will quote empty strings :return: str of the converted markdown tables """ return self.obj_to_str(file_path=file_path, deliminator=deliminator, tab=tab, quote_numbers=quote_numbers, quote_empty_str=quote_empty_str)
[ "def", "obj_to_txt", "(", "self", ",", "file_path", "=", "None", ",", "deliminator", "=", "None", ",", "tab", "=", "None", ",", "quote_numbers", "=", "True", ",", "quote_empty_str", "=", "False", ")", ":", "return", "self", ".", "obj_to_str", "(", "file_path", "=", "file_path", ",", "deliminator", "=", "deliminator", ",", "tab", "=", "tab", ",", "quote_numbers", "=", "quote_numbers", ",", "quote_empty_str", "=", "quote_empty_str", ")" ]
This will return a simple str table. :param file_path: str of the path to the file :param keys: list of str of the order of keys to use :param tab: string of offset of the table :param quote_numbers: bool if True will quote numbers that are strings :param quote_empty_str: bool if True will quote empty strings :return: str of the converted markdown tables
[ "This", "will", "return", "a", "simple", "str", "table", ".", ":", "param", "file_path", ":", "str", "of", "the", "path", "to", "the", "file", ":", "param", "keys", ":", "list", "of", "str", "of", "the", "order", "of", "keys", "to", "use", ":", "param", "tab", ":", "string", "of", "offset", "of", "the", "table", ":", "param", "quote_numbers", ":", "bool", "if", "True", "will", "quote", "numbers", "that", "are", "strings", ":", "param", "quote_empty_str", ":", "bool", "if", "True", "will", "quote", "empty", "strings", ":", "return", ":", "str", "of", "the", "converted", "markdown", "tables" ]
python
train
gem/oq-engine
openquake/hazardlib/geo/geodetic.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/geodetic.py#L528-L540
def _prepare_coords(lons1, lats1, lons2, lats2): """ Convert two pairs of spherical coordinates in decimal degrees to numpy arrays of radians. Makes sure that respective coordinates in pairs have the same shape. """ lons1 = numpy.radians(lons1) lats1 = numpy.radians(lats1) assert lons1.shape == lats1.shape lons2 = numpy.radians(lons2) lats2 = numpy.radians(lats2) assert lons2.shape == lats2.shape return lons1, lats1, lons2, lats2
[ "def", "_prepare_coords", "(", "lons1", ",", "lats1", ",", "lons2", ",", "lats2", ")", ":", "lons1", "=", "numpy", ".", "radians", "(", "lons1", ")", "lats1", "=", "numpy", ".", "radians", "(", "lats1", ")", "assert", "lons1", ".", "shape", "==", "lats1", ".", "shape", "lons2", "=", "numpy", ".", "radians", "(", "lons2", ")", "lats2", "=", "numpy", ".", "radians", "(", "lats2", ")", "assert", "lons2", ".", "shape", "==", "lats2", ".", "shape", "return", "lons1", ",", "lats1", ",", "lons2", ",", "lats2" ]
Convert two pairs of spherical coordinates in decimal degrees to numpy arrays of radians. Makes sure that respective coordinates in pairs have the same shape.
[ "Convert", "two", "pairs", "of", "spherical", "coordinates", "in", "decimal", "degrees", "to", "numpy", "arrays", "of", "radians", ".", "Makes", "sure", "that", "respective", "coordinates", "in", "pairs", "have", "the", "same", "shape", "." ]
python
train
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/eplusdata.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/eplusdata.py#L146-L174
def initdict(self, fname): """create a blank dictionary""" if isinstance(fname, Idd): self.dt, self.dtls = fname.dt, fname.dtls return self.dt, self.dtls astr = mylib2.readfile(fname) nocom = removecomment(astr, '!') idfst = nocom alist = idfst.split(';') lss = [] for element in alist: lst = element.split(',') lss.append(lst) for i in range(0, len(lss)): for j in range(0, len(lss[i])): lss[i][j] = lss[i][j].strip() dt = {} dtls = [] for element in lss: if element[0] == '': continue dt[element[0].upper()] = [] dtls.append(element[0].upper()) self.dt, self.dtls = dt, dtls return dt, dtls
[ "def", "initdict", "(", "self", ",", "fname", ")", ":", "if", "isinstance", "(", "fname", ",", "Idd", ")", ":", "self", ".", "dt", ",", "self", ".", "dtls", "=", "fname", ".", "dt", ",", "fname", ".", "dtls", "return", "self", ".", "dt", ",", "self", ".", "dtls", "astr", "=", "mylib2", ".", "readfile", "(", "fname", ")", "nocom", "=", "removecomment", "(", "astr", ",", "'!'", ")", "idfst", "=", "nocom", "alist", "=", "idfst", ".", "split", "(", "';'", ")", "lss", "=", "[", "]", "for", "element", "in", "alist", ":", "lst", "=", "element", ".", "split", "(", "','", ")", "lss", ".", "append", "(", "lst", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "lss", ")", ")", ":", "for", "j", "in", "range", "(", "0", ",", "len", "(", "lss", "[", "i", "]", ")", ")", ":", "lss", "[", "i", "]", "[", "j", "]", "=", "lss", "[", "i", "]", "[", "j", "]", ".", "strip", "(", ")", "dt", "=", "{", "}", "dtls", "=", "[", "]", "for", "element", "in", "lss", ":", "if", "element", "[", "0", "]", "==", "''", ":", "continue", "dt", "[", "element", "[", "0", "]", ".", "upper", "(", ")", "]", "=", "[", "]", "dtls", ".", "append", "(", "element", "[", "0", "]", ".", "upper", "(", ")", ")", "self", ".", "dt", ",", "self", ".", "dtls", "=", "dt", ",", "dtls", "return", "dt", ",", "dtls" ]
create a blank dictionary
[ "create", "a", "blank", "dictionary" ]
python
train
Yelp/detect-secrets
detect_secrets/core/secrets_collection.py
https://github.com/Yelp/detect-secrets/blob/473923ea71f1ac2b5ea1eacc49b98f97967e3d05/detect_secrets/core/secrets_collection.py#L274-L294
def _results_accumulator(self, filename): """ :type filename: str :param filename: name of file, used as a key to store in self.data :yields: (dict, detect_secrets.plugins.base.BasePlugin) Caller is responsible for updating the dictionary with results of plugin analysis. """ file_results = {} for plugin in self.plugins: yield file_results, plugin if not file_results: return if filename not in self.data: self.data[filename] = file_results else: self.data[filename].update(file_results)
[ "def", "_results_accumulator", "(", "self", ",", "filename", ")", ":", "file_results", "=", "{", "}", "for", "plugin", "in", "self", ".", "plugins", ":", "yield", "file_results", ",", "plugin", "if", "not", "file_results", ":", "return", "if", "filename", "not", "in", "self", ".", "data", ":", "self", ".", "data", "[", "filename", "]", "=", "file_results", "else", ":", "self", ".", "data", "[", "filename", "]", ".", "update", "(", "file_results", ")" ]
:type filename: str :param filename: name of file, used as a key to store in self.data :yields: (dict, detect_secrets.plugins.base.BasePlugin) Caller is responsible for updating the dictionary with results of plugin analysis.
[ ":", "type", "filename", ":", "str", ":", "param", "filename", ":", "name", "of", "file", "used", "as", "a", "key", "to", "store", "in", "self", ".", "data" ]
python
train
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L157-L165
def inspect_commit(self, commit): """ Returns info about a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.InspectCommitRequest(commit=commit_from(commit)) return self.stub.InspectCommit(req, metadata=self.metadata)
[ "def", "inspect_commit", "(", "self", ",", "commit", ")", ":", "req", "=", "proto", ".", "InspectCommitRequest", "(", "commit", "=", "commit_from", "(", "commit", ")", ")", "return", "self", ".", "stub", ".", "InspectCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Returns info about a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit.
[ "Returns", "info", "about", "a", "specific", "Commit", "." ]
python
train
bioidiap/bob.bio.spear
bob/bio/spear/utils/extraction.py
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/extraction.py#L40-L51
def calc_std(c0, c1=[]): """ Calculates the variance of the data.""" if c1 == []: return numpy.std(c0, 0) prop = float(len(c0)) / float(len(c1)) if prop < 1: p0 = int(math.ceil(1 / prop)) p1 = 1 else: p0 = 1 p1 = int(math.ceil(prop)) return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0)
[ "def", "calc_std", "(", "c0", ",", "c1", "=", "[", "]", ")", ":", "if", "c1", "==", "[", "]", ":", "return", "numpy", ".", "std", "(", "c0", ",", "0", ")", "prop", "=", "float", "(", "len", "(", "c0", ")", ")", "/", "float", "(", "len", "(", "c1", ")", ")", "if", "prop", "<", "1", ":", "p0", "=", "int", "(", "math", ".", "ceil", "(", "1", "/", "prop", ")", ")", "p1", "=", "1", "else", ":", "p0", "=", "1", "p1", "=", "int", "(", "math", ".", "ceil", "(", "prop", ")", ")", "return", "numpy", ".", "std", "(", "numpy", ".", "vstack", "(", "p0", "*", "[", "c0", "]", "+", "p1", "*", "[", "c1", "]", ")", ",", "0", ")" ]
Calculates the variance of the data.
[ "Calculates", "the", "variance", "of", "the", "data", "." ]
python
train
DataDog/integrations-core
agent_metrics/datadog_checks/agent_metrics/agent_metrics.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/agent_metrics/datadog_checks/agent_metrics/agent_metrics.py#L87-L117
def _register_psutil_metrics(self, stats, names_to_metric_types, tags=None): """ Saves sample metrics from psutil :param stats: a dictionary that looks like: { 'memory_info': OrderedDict([('rss', 24395776), ('vms', 144666624)]), 'io_counters': OrderedDict([('read_count', 4536), ('write_count', 100), ('read_bytes', 0), ('write_bytes', 61440)]) ... } This creates a metric like `datadog.agent.collector.{key_1}.{key_2}` where key_1 is a top-level key in `stats`, and key_2 is a nested key. E.g. datadog.agent.collector.memory_info.rss """ if tags is None: tags = [] base_metric = 'datadog.agent.collector.{0}.{1}' # TODO: May have to call self.normalize(metric_name) to get a compliant name for k, v in stats.iteritems(): metric_type = names_to_metric_types[k] if isinstance(v, dict): for _k, _v in v.iteritems(): full_metric_name = base_metric.format(k, _k) self._send_single_metric(full_metric_name, _v, metric_type, tags) else: full_metric_name = 'datadog.agent.collector.{0}'.format(k) self._send_single_metric(full_metric_name, v, metric_type, tags)
[ "def", "_register_psutil_metrics", "(", "self", ",", "stats", ",", "names_to_metric_types", ",", "tags", "=", "None", ")", ":", "if", "tags", "is", "None", ":", "tags", "=", "[", "]", "base_metric", "=", "'datadog.agent.collector.{0}.{1}'", "# TODO: May have to call self.normalize(metric_name) to get a compliant name", "for", "k", ",", "v", "in", "stats", ".", "iteritems", "(", ")", ":", "metric_type", "=", "names_to_metric_types", "[", "k", "]", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "for", "_k", ",", "_v", "in", "v", ".", "iteritems", "(", ")", ":", "full_metric_name", "=", "base_metric", ".", "format", "(", "k", ",", "_k", ")", "self", ".", "_send_single_metric", "(", "full_metric_name", ",", "_v", ",", "metric_type", ",", "tags", ")", "else", ":", "full_metric_name", "=", "'datadog.agent.collector.{0}'", ".", "format", "(", "k", ")", "self", ".", "_send_single_metric", "(", "full_metric_name", ",", "v", ",", "metric_type", ",", "tags", ")" ]
Saves sample metrics from psutil :param stats: a dictionary that looks like: { 'memory_info': OrderedDict([('rss', 24395776), ('vms', 144666624)]), 'io_counters': OrderedDict([('read_count', 4536), ('write_count', 100), ('read_bytes', 0), ('write_bytes', 61440)]) ... } This creates a metric like `datadog.agent.collector.{key_1}.{key_2}` where key_1 is a top-level key in `stats`, and key_2 is a nested key. E.g. datadog.agent.collector.memory_info.rss
[ "Saves", "sample", "metrics", "from", "psutil" ]
python
train
bslatkin/dpxdt
dpxdt/server/api.py
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L598-L617
def upload(): """Uploads an artifact referenced by a run.""" build = g.build utils.jsonify_assert(len(request.files) == 1, 'Need exactly one uploaded file') file_storage = request.files.values()[0] data = file_storage.read() content_type, _ = mimetypes.guess_type(file_storage.filename) artifact = _save_artifact(build, data, content_type) db.session.add(artifact) db.session.commit() return flask.jsonify( success=True, build_id=build.id, sha1sum=artifact.id, content_type=content_type)
[ "def", "upload", "(", ")", ":", "build", "=", "g", ".", "build", "utils", ".", "jsonify_assert", "(", "len", "(", "request", ".", "files", ")", "==", "1", ",", "'Need exactly one uploaded file'", ")", "file_storage", "=", "request", ".", "files", ".", "values", "(", ")", "[", "0", "]", "data", "=", "file_storage", ".", "read", "(", ")", "content_type", ",", "_", "=", "mimetypes", ".", "guess_type", "(", "file_storage", ".", "filename", ")", "artifact", "=", "_save_artifact", "(", "build", ",", "data", ",", "content_type", ")", "db", ".", "session", ".", "add", "(", "artifact", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "flask", ".", "jsonify", "(", "success", "=", "True", ",", "build_id", "=", "build", ".", "id", ",", "sha1sum", "=", "artifact", ".", "id", ",", "content_type", "=", "content_type", ")" ]
Uploads an artifact referenced by a run.
[ "Uploads", "an", "artifact", "referenced", "by", "a", "run", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L20184-L20277
def process_create_ex(self, executable, arguments, environment_changes, flags, timeout_ms, priority, affinity): """Creates a new process running in the guest with the extended options for setting the process priority and affinity. See :py:func:`IGuestSession.process_create` for more information. in executable of type str Full path to the file to execute in the guest. The file has to exists in the guest VM with executable right to the session user in order to succeed. If empty/null, the first entry in the @a arguments array will be used instead (i.e. argv[0]). in arguments of type str Array of arguments passed to the new process. Starting with VirtualBox 5.0 this array starts with argument 0 instead of argument 1 as in previous versions. Whether the zeroth argument can be passed to the guest depends on the VBoxService version running there. If you depend on this, check that the :py:func:`IGuestSession.protocol_version` is 3 or higher. in environment_changes of type str Set of environment changes to complement :py:func:`IGuestSession.environment_changes` . Takes precedence over the session ones. The changes are in putenv format, i.e. "VAR=VALUE" for setting and "VAR" for unsetting. The changes are applied to the base environment of the impersonated guest user (:py:func:`IGuestSession.environment_base` ) when creating the process. (This is done on the guest side of things in order to be compatible with older guest additions. That is one of the motivations for not passing in the whole environment here.) in flags of type :class:`ProcessCreateFlag` Process creation flags, see :py:class:`ProcessCreateFlag` for detailed description of available flags. in timeout_ms of type int Timeout (in ms) for limiting the guest process' running time. Pass 0 for an infinite timeout. On timeout the guest process will be killed and its status will be put to an appropriate value. See :py:class:`ProcessStatus` for more information. in priority of type :class:`ProcessPriority` Process priority to use for execution, see :py:class:`ProcessPriority` for available priority levels. This is silently ignored if not supported by guest additions. in affinity of type int Processor affinity to set for the new process. This is a list of guest CPU numbers the process is allowed to run on. This is silently ignored if the guest does not support setting the affinity of processes, or if the guest additions does not implemet this feature. return guest_process of type :class:`IGuestProcess` Guest process object of the newly created process. """ if not isinstance(executable, basestring): raise TypeError("executable can only be an instance of type basestring") if not isinstance(arguments, list): raise TypeError("arguments can only be an instance of type list") for a in arguments[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") if not isinstance(environment_changes, list): raise TypeError("environment_changes can only be an instance of type list") for a in environment_changes[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") if not isinstance(flags, list): raise TypeError("flags can only be an instance of type list") for a in flags[:10]: if not isinstance(a, ProcessCreateFlag): raise TypeError( "array can only contain objects of type ProcessCreateFlag") if not isinstance(timeout_ms, baseinteger): raise TypeError("timeout_ms can only be an instance of type baseinteger") if not isinstance(priority, ProcessPriority): raise TypeError("priority can only be an instance of type ProcessPriority") if not isinstance(affinity, list): raise TypeError("affinity can only be an instance of type list") for a in affinity[:10]: if not isinstance(a, baseinteger): raise TypeError( "array can only contain objects of type baseinteger") guest_process = self._call("processCreateEx", in_p=[executable, arguments, environment_changes, flags, timeout_ms, priority, affinity]) guest_process = IGuestProcess(guest_process) return guest_process
[ "def", "process_create_ex", "(", "self", ",", "executable", ",", "arguments", ",", "environment_changes", ",", "flags", ",", "timeout_ms", ",", "priority", ",", "affinity", ")", ":", "if", "not", "isinstance", "(", "executable", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"executable can only be an instance of type basestring\"", ")", "if", "not", "isinstance", "(", "arguments", ",", "list", ")", ":", "raise", "TypeError", "(", "\"arguments can only be an instance of type list\"", ")", "for", "a", "in", "arguments", "[", ":", "10", "]", ":", "if", "not", "isinstance", "(", "a", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"array can only contain objects of type basestring\"", ")", "if", "not", "isinstance", "(", "environment_changes", ",", "list", ")", ":", "raise", "TypeError", "(", "\"environment_changes can only be an instance of type list\"", ")", "for", "a", "in", "environment_changes", "[", ":", "10", "]", ":", "if", "not", "isinstance", "(", "a", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"array can only contain objects of type basestring\"", ")", "if", "not", "isinstance", "(", "flags", ",", "list", ")", ":", "raise", "TypeError", "(", "\"flags can only be an instance of type list\"", ")", "for", "a", "in", "flags", "[", ":", "10", "]", ":", "if", "not", "isinstance", "(", "a", ",", "ProcessCreateFlag", ")", ":", "raise", "TypeError", "(", "\"array can only contain objects of type ProcessCreateFlag\"", ")", "if", "not", "isinstance", "(", "timeout_ms", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"timeout_ms can only be an instance of type baseinteger\"", ")", "if", "not", "isinstance", "(", "priority", ",", "ProcessPriority", ")", ":", "raise", "TypeError", "(", "\"priority can only be an instance of type ProcessPriority\"", ")", "if", "not", "isinstance", "(", "affinity", ",", "list", ")", ":", "raise", "TypeError", "(", "\"affinity can only be an instance of type list\"", ")", "for", "a", "in", "affinity", "[", ":", "10", "]", ":", "if", "not", "isinstance", "(", "a", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"array can only contain objects of type baseinteger\"", ")", "guest_process", "=", "self", ".", "_call", "(", "\"processCreateEx\"", ",", "in_p", "=", "[", "executable", ",", "arguments", ",", "environment_changes", ",", "flags", ",", "timeout_ms", ",", "priority", ",", "affinity", "]", ")", "guest_process", "=", "IGuestProcess", "(", "guest_process", ")", "return", "guest_process" ]
Creates a new process running in the guest with the extended options for setting the process priority and affinity. See :py:func:`IGuestSession.process_create` for more information. in executable of type str Full path to the file to execute in the guest. The file has to exists in the guest VM with executable right to the session user in order to succeed. If empty/null, the first entry in the @a arguments array will be used instead (i.e. argv[0]). in arguments of type str Array of arguments passed to the new process. Starting with VirtualBox 5.0 this array starts with argument 0 instead of argument 1 as in previous versions. Whether the zeroth argument can be passed to the guest depends on the VBoxService version running there. If you depend on this, check that the :py:func:`IGuestSession.protocol_version` is 3 or higher. in environment_changes of type str Set of environment changes to complement :py:func:`IGuestSession.environment_changes` . Takes precedence over the session ones. The changes are in putenv format, i.e. "VAR=VALUE" for setting and "VAR" for unsetting. The changes are applied to the base environment of the impersonated guest user (:py:func:`IGuestSession.environment_base` ) when creating the process. (This is done on the guest side of things in order to be compatible with older guest additions. That is one of the motivations for not passing in the whole environment here.) in flags of type :class:`ProcessCreateFlag` Process creation flags, see :py:class:`ProcessCreateFlag` for detailed description of available flags. in timeout_ms of type int Timeout (in ms) for limiting the guest process' running time. Pass 0 for an infinite timeout. On timeout the guest process will be killed and its status will be put to an appropriate value. See :py:class:`ProcessStatus` for more information. in priority of type :class:`ProcessPriority` Process priority to use for execution, see :py:class:`ProcessPriority` for available priority levels. This is silently ignored if not supported by guest additions. in affinity of type int Processor affinity to set for the new process. This is a list of guest CPU numbers the process is allowed to run on. This is silently ignored if the guest does not support setting the affinity of processes, or if the guest additions does not implemet this feature. return guest_process of type :class:`IGuestProcess` Guest process object of the newly created process.
[ "Creates", "a", "new", "process", "running", "in", "the", "guest", "with", "the", "extended", "options", "for", "setting", "the", "process", "priority", "and", "affinity", ".", "See", ":", "py", ":", "func", ":", "IGuestSession", ".", "process_create", "for", "more", "information", "." ]
python
train
CameronLonsdale/lantern
lantern/modules/simplesubstitution.py
https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/modules/simplesubstitution.py#L63-L81
def decrypt(key, ciphertext): """Decrypt Simple Substitution enciphered ``ciphertext`` using ``key``. Example: >>> decrypt("PQSTUVWXYZCODEBRAKINGFHJLM", "XUOOB") HELLO Args: key (iterable): The key to use ciphertext (str): The text to decrypt Returns: Decrypted ciphertext """ # TODO: Is it worth keeping this here I should I only accept strings? key = ''.join(key) alphabet = string.ascii_letters cipher_alphabet = key.lower() + key.upper() return ciphertext.translate(str.maketrans(cipher_alphabet, alphabet))
[ "def", "decrypt", "(", "key", ",", "ciphertext", ")", ":", "# TODO: Is it worth keeping this here I should I only accept strings?", "key", "=", "''", ".", "join", "(", "key", ")", "alphabet", "=", "string", ".", "ascii_letters", "cipher_alphabet", "=", "key", ".", "lower", "(", ")", "+", "key", ".", "upper", "(", ")", "return", "ciphertext", ".", "translate", "(", "str", ".", "maketrans", "(", "cipher_alphabet", ",", "alphabet", ")", ")" ]
Decrypt Simple Substitution enciphered ``ciphertext`` using ``key``. Example: >>> decrypt("PQSTUVWXYZCODEBRAKINGFHJLM", "XUOOB") HELLO Args: key (iterable): The key to use ciphertext (str): The text to decrypt Returns: Decrypted ciphertext
[ "Decrypt", "Simple", "Substitution", "enciphered", "ciphertext", "using", "key", "." ]
python
train
pybel/pybel-tools
src/pybel_tools/mutation/expansion.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/mutation/expansion.py#L216-L221
def enrich_complexes(graph: BELGraph) -> None: """Add all of the members of the complex abundances to the graph.""" nodes = list(get_nodes_by_function(graph, COMPLEX)) for u in nodes: for v in u.members: graph.add_has_component(u, v)
[ "def", "enrich_complexes", "(", "graph", ":", "BELGraph", ")", "->", "None", ":", "nodes", "=", "list", "(", "get_nodes_by_function", "(", "graph", ",", "COMPLEX", ")", ")", "for", "u", "in", "nodes", ":", "for", "v", "in", "u", ".", "members", ":", "graph", ".", "add_has_component", "(", "u", ",", "v", ")" ]
Add all of the members of the complex abundances to the graph.
[ "Add", "all", "of", "the", "members", "of", "the", "complex", "abundances", "to", "the", "graph", "." ]
python
valid
WhyNotHugo/django-afip
django_afip/models.py
https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L307-L319
def get_certificate_expiration(self): """ Gets the certificate expiration from the certificate Gets the certificate expiration from the certificate file. Note that this value is stored into ``certificate_expiration`` when an instance is saved, so you should generally prefer that method (since this one requires reading and parsing the entire certificate). """ datestring = self.certificate_object.get_notAfter().decode() dt = datetime.strptime(datestring, '%Y%m%d%H%M%SZ') return dt.replace(tzinfo=timezone.utc)
[ "def", "get_certificate_expiration", "(", "self", ")", ":", "datestring", "=", "self", ".", "certificate_object", ".", "get_notAfter", "(", ")", ".", "decode", "(", ")", "dt", "=", "datetime", ".", "strptime", "(", "datestring", ",", "'%Y%m%d%H%M%SZ'", ")", "return", "dt", ".", "replace", "(", "tzinfo", "=", "timezone", ".", "utc", ")" ]
Gets the certificate expiration from the certificate Gets the certificate expiration from the certificate file. Note that this value is stored into ``certificate_expiration`` when an instance is saved, so you should generally prefer that method (since this one requires reading and parsing the entire certificate).
[ "Gets", "the", "certificate", "expiration", "from", "the", "certificate" ]
python
train
hellosign/hellosign-python-sdk
hellosign_sdk/hsclient.py
https://github.com/hellosign/hellosign-python-sdk/blob/4325a29ad5766380a214eac3914511f62f7ecba4/hellosign_sdk/hsclient.py#L517-L533
def cancel_signature_request(self, signature_request_id): ''' Cancels a SignatureRequest Cancels a SignatureRequest. After canceling, no one will be able to sign or access the SignatureRequest or its documents. Only the requester can cancel and only before everyone has signed. Args: signing_request_id (str): The id of the signature request to cancel Returns: None ''' request = self._get_request() request.post(url=self.SIGNATURE_REQUEST_CANCEL_URL + signature_request_id, get_json=False)
[ "def", "cancel_signature_request", "(", "self", ",", "signature_request_id", ")", ":", "request", "=", "self", ".", "_get_request", "(", ")", "request", ".", "post", "(", "url", "=", "self", ".", "SIGNATURE_REQUEST_CANCEL_URL", "+", "signature_request_id", ",", "get_json", "=", "False", ")" ]
Cancels a SignatureRequest Cancels a SignatureRequest. After canceling, no one will be able to sign or access the SignatureRequest or its documents. Only the requester can cancel and only before everyone has signed. Args: signing_request_id (str): The id of the signature request to cancel Returns: None
[ "Cancels", "a", "SignatureRequest" ]
python
train
sosy-lab/benchexec
benchexec/runexecutor.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/runexecutor.py#L1094-L1111
def check_for_new_files_in_home(self): """Check that the user account's home directory now does not contain more files than when this instance was created, and warn otherwise. Does nothing if no user account was given to RunExecutor. @return set of newly created files """ if not self._user: return None try: created_files = set(self._listdir(self._home_dir)).difference(self._home_dir_content) except (subprocess.CalledProcessError, IOError): # Probably home directory does not exist created_files = [] if created_files: logging.warning('The tool created the following files in %s, ' 'this may influence later runs:\n\t%s', self._home_dir, '\n\t'.join(created_files)) return created_files
[ "def", "check_for_new_files_in_home", "(", "self", ")", ":", "if", "not", "self", ".", "_user", ":", "return", "None", "try", ":", "created_files", "=", "set", "(", "self", ".", "_listdir", "(", "self", ".", "_home_dir", ")", ")", ".", "difference", "(", "self", ".", "_home_dir_content", ")", "except", "(", "subprocess", ".", "CalledProcessError", ",", "IOError", ")", ":", "# Probably home directory does not exist", "created_files", "=", "[", "]", "if", "created_files", ":", "logging", ".", "warning", "(", "'The tool created the following files in %s, '", "'this may influence later runs:\\n\\t%s'", ",", "self", ".", "_home_dir", ",", "'\\n\\t'", ".", "join", "(", "created_files", ")", ")", "return", "created_files" ]
Check that the user account's home directory now does not contain more files than when this instance was created, and warn otherwise. Does nothing if no user account was given to RunExecutor. @return set of newly created files
[ "Check", "that", "the", "user", "account", "s", "home", "directory", "now", "does", "not", "contain", "more", "files", "than", "when", "this", "instance", "was", "created", "and", "warn", "otherwise", ".", "Does", "nothing", "if", "no", "user", "account", "was", "given", "to", "RunExecutor", "." ]
python
train
bronto/javasphinx
javasphinx/compiler.py
https://github.com/bronto/javasphinx/blob/cd1df27f1d70efaae079b74573efdd8e069ff02d/javasphinx/compiler.py#L221-L288
def compile_type_document(self, imports_block, package, name, declaration): """ Compile a complete document, documenting a type and its members """ outer_type = name.rpartition('.')[0] document = util.Document() document.add(imports_block) document.add_heading(name, '=') method_summary = util.StringBuilder() document.add_object(method_summary) package_dir = util.Directive('java:package', package) package_dir.add_option('noindex') document.add_object(package_dir) # Add type-level documentation type_dir = self.compile_type(declaration) if outer_type: type_dir.add_option('outertype', outer_type) document.add_object(type_dir) if isinstance(declaration, javalang.tree.EnumDeclaration): enum_constants = list(declaration.body.constants) enum_constants.sort(key=lambda c: c.name) document.add_heading('Enum Constants') for enum_constant in enum_constants: if self.member_headers: document.add_heading(enum_constant.name, '^') c = self.compile_enum_constant(name, enum_constant) c.add_option('outertype', name) document.add_object(c) fields = list(filter(self.filter, declaration.fields)) if fields: document.add_heading('Fields', '-') fields.sort(key=lambda f: f.declarators[0].name) for field in fields: if self.member_headers: document.add_heading(field.declarators[0].name, '^') f = self.compile_field(field) f.add_option('outertype', name) document.add_object(f) constructors = list(filter(self.filter, declaration.constructors)) if constructors: document.add_heading('Constructors', '-') constructors.sort(key=lambda c: c.name) for constructor in constructors: if self.member_headers: document.add_heading(constructor.name, '^') c = self.compile_constructor(constructor) c.add_option('outertype', name) document.add_object(c) methods = list(filter(self.filter, declaration.methods)) if methods: document.add_heading('Methods', '-') methods.sort(key=lambda m: m.name) for method in methods: if self.member_headers: document.add_heading(method.name, '^') m = self.compile_method(method) m.add_option('outertype', name) document.add_object(m) return document
[ "def", "compile_type_document", "(", "self", ",", "imports_block", ",", "package", ",", "name", ",", "declaration", ")", ":", "outer_type", "=", "name", ".", "rpartition", "(", "'.'", ")", "[", "0", "]", "document", "=", "util", ".", "Document", "(", ")", "document", ".", "add", "(", "imports_block", ")", "document", ".", "add_heading", "(", "name", ",", "'='", ")", "method_summary", "=", "util", ".", "StringBuilder", "(", ")", "document", ".", "add_object", "(", "method_summary", ")", "package_dir", "=", "util", ".", "Directive", "(", "'java:package'", ",", "package", ")", "package_dir", ".", "add_option", "(", "'noindex'", ")", "document", ".", "add_object", "(", "package_dir", ")", "# Add type-level documentation", "type_dir", "=", "self", ".", "compile_type", "(", "declaration", ")", "if", "outer_type", ":", "type_dir", ".", "add_option", "(", "'outertype'", ",", "outer_type", ")", "document", ".", "add_object", "(", "type_dir", ")", "if", "isinstance", "(", "declaration", ",", "javalang", ".", "tree", ".", "EnumDeclaration", ")", ":", "enum_constants", "=", "list", "(", "declaration", ".", "body", ".", "constants", ")", "enum_constants", ".", "sort", "(", "key", "=", "lambda", "c", ":", "c", ".", "name", ")", "document", ".", "add_heading", "(", "'Enum Constants'", ")", "for", "enum_constant", "in", "enum_constants", ":", "if", "self", ".", "member_headers", ":", "document", ".", "add_heading", "(", "enum_constant", ".", "name", ",", "'^'", ")", "c", "=", "self", ".", "compile_enum_constant", "(", "name", ",", "enum_constant", ")", "c", ".", "add_option", "(", "'outertype'", ",", "name", ")", "document", ".", "add_object", "(", "c", ")", "fields", "=", "list", "(", "filter", "(", "self", ".", "filter", ",", "declaration", ".", "fields", ")", ")", "if", "fields", ":", "document", ".", "add_heading", "(", "'Fields'", ",", "'-'", ")", "fields", ".", "sort", "(", "key", "=", "lambda", "f", ":", "f", ".", "declarators", "[", "0", "]", ".", "name", ")", "for", "field", "in", "fields", ":", "if", "self", ".", "member_headers", ":", "document", ".", "add_heading", "(", "field", ".", "declarators", "[", "0", "]", ".", "name", ",", "'^'", ")", "f", "=", "self", ".", "compile_field", "(", "field", ")", "f", ".", "add_option", "(", "'outertype'", ",", "name", ")", "document", ".", "add_object", "(", "f", ")", "constructors", "=", "list", "(", "filter", "(", "self", ".", "filter", ",", "declaration", ".", "constructors", ")", ")", "if", "constructors", ":", "document", ".", "add_heading", "(", "'Constructors'", ",", "'-'", ")", "constructors", ".", "sort", "(", "key", "=", "lambda", "c", ":", "c", ".", "name", ")", "for", "constructor", "in", "constructors", ":", "if", "self", ".", "member_headers", ":", "document", ".", "add_heading", "(", "constructor", ".", "name", ",", "'^'", ")", "c", "=", "self", ".", "compile_constructor", "(", "constructor", ")", "c", ".", "add_option", "(", "'outertype'", ",", "name", ")", "document", ".", "add_object", "(", "c", ")", "methods", "=", "list", "(", "filter", "(", "self", ".", "filter", ",", "declaration", ".", "methods", ")", ")", "if", "methods", ":", "document", ".", "add_heading", "(", "'Methods'", ",", "'-'", ")", "methods", ".", "sort", "(", "key", "=", "lambda", "m", ":", "m", ".", "name", ")", "for", "method", "in", "methods", ":", "if", "self", ".", "member_headers", ":", "document", ".", "add_heading", "(", "method", ".", "name", ",", "'^'", ")", "m", "=", "self", ".", "compile_method", "(", "method", ")", "m", ".", "add_option", "(", "'outertype'", ",", "name", ")", "document", ".", "add_object", "(", "m", ")", "return", "document" ]
Compile a complete document, documenting a type and its members
[ "Compile", "a", "complete", "document", "documenting", "a", "type", "and", "its", "members" ]
python
train
jobovy/galpy
galpy/df/streamgapdf.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/streamgapdf.py#L1617-L1679
def impulse_deltav_plummerstream_curvedstream(v,x,t,b,w,x0,v0,GSigma,rs, galpot,tmin=None,tmax=None): """ NAME: impulse_deltav_plummerstream_curvedstream PURPOSE: calculate the delta velocity to due an encounter with a Plummer sphere in the impulse approximation; allows for arbitrary velocity vectors, and arbitrary position along the stream; velocities and positions are assumed to lie along an orbit INPUT: v - velocity of the stream (nstar,3) x - position along the stream (nstar,3) t - times at which (v,x) are reached, wrt the closest impact t=0 (nstar) b - impact parameter w - velocity of the Plummer sphere (3) x0 - point of closest approach v0 - velocity of point of closest approach GSigma - surface density of the Plummer-softened stream (in natural units); should be a function of time rs - size of the Plummer sphere galpot - galpy Potential object or list thereof tmin, tmax= (None) minimum and maximum time to consider for GSigma OUTPUT: deltav (nstar,3) HISTORY: 2015-11-20 - Written based on Plummer sphere above - Bovy (UofT) """ galpot= flatten_potential(galpot) if len(v.shape) == 1: v= numpy.reshape(v,(1,3)) if len(x.shape) == 1: x= numpy.reshape(x,(1,3)) # Integrate an orbit to use to figure out where each (v,x) is at each time R, phi, z= bovy_coords.rect_to_cyl(x0[0],x0[1],x0[2]) vR, vT, vz= bovy_coords.rect_to_cyl_vec(v0[0],v0[1],v0[2],R,phi,z,cyl=True) # First back, then forward to cover the entire range with 1 orbit o= Orbit([R,vR,vT,z,vz,phi]).flip() ts= numpy.linspace(0.,numpy.fabs(numpy.amin(t)+tmin),101) o.integrate(ts,galpot) o= o(ts[-1]).flip() ts= numpy.linspace(0.,numpy.amax(t)+tmax-numpy.amin(t)-tmin,201) o.integrate(ts,galpot) # Calculate kicks b0 = numpy.cross(w,v0) b0 *= b/numpy.sqrt(numpy.sum(b0**2)) return numpy.array(list(map(lambda i:_astream_integrate(\ b0-x0,o,i,w,GSigma,rs**2.,numpy.amin(t)+tmin, tmin,tmax),t)))
[ "def", "impulse_deltav_plummerstream_curvedstream", "(", "v", ",", "x", ",", "t", ",", "b", ",", "w", ",", "x0", ",", "v0", ",", "GSigma", ",", "rs", ",", "galpot", ",", "tmin", "=", "None", ",", "tmax", "=", "None", ")", ":", "galpot", "=", "flatten_potential", "(", "galpot", ")", "if", "len", "(", "v", ".", "shape", ")", "==", "1", ":", "v", "=", "numpy", ".", "reshape", "(", "v", ",", "(", "1", ",", "3", ")", ")", "if", "len", "(", "x", ".", "shape", ")", "==", "1", ":", "x", "=", "numpy", ".", "reshape", "(", "x", ",", "(", "1", ",", "3", ")", ")", "# Integrate an orbit to use to figure out where each (v,x) is at each time", "R", ",", "phi", ",", "z", "=", "bovy_coords", ".", "rect_to_cyl", "(", "x0", "[", "0", "]", ",", "x0", "[", "1", "]", ",", "x0", "[", "2", "]", ")", "vR", ",", "vT", ",", "vz", "=", "bovy_coords", ".", "rect_to_cyl_vec", "(", "v0", "[", "0", "]", ",", "v0", "[", "1", "]", ",", "v0", "[", "2", "]", ",", "R", ",", "phi", ",", "z", ",", "cyl", "=", "True", ")", "# First back, then forward to cover the entire range with 1 orbit", "o", "=", "Orbit", "(", "[", "R", ",", "vR", ",", "vT", ",", "z", ",", "vz", ",", "phi", "]", ")", ".", "flip", "(", ")", "ts", "=", "numpy", ".", "linspace", "(", "0.", ",", "numpy", ".", "fabs", "(", "numpy", ".", "amin", "(", "t", ")", "+", "tmin", ")", ",", "101", ")", "o", ".", "integrate", "(", "ts", ",", "galpot", ")", "o", "=", "o", "(", "ts", "[", "-", "1", "]", ")", ".", "flip", "(", ")", "ts", "=", "numpy", ".", "linspace", "(", "0.", ",", "numpy", ".", "amax", "(", "t", ")", "+", "tmax", "-", "numpy", ".", "amin", "(", "t", ")", "-", "tmin", ",", "201", ")", "o", ".", "integrate", "(", "ts", ",", "galpot", ")", "# Calculate kicks", "b0", "=", "numpy", ".", "cross", "(", "w", ",", "v0", ")", "b0", "*=", "b", "/", "numpy", ".", "sqrt", "(", "numpy", ".", "sum", "(", "b0", "**", "2", ")", ")", "return", "numpy", ".", "array", "(", "list", "(", "map", "(", "lambda", "i", ":", "_astream_integrate", "(", "b0", "-", "x0", ",", "o", ",", "i", ",", "w", ",", "GSigma", ",", "rs", "**", "2.", ",", "numpy", ".", "amin", "(", "t", ")", "+", "tmin", ",", "tmin", ",", "tmax", ")", ",", "t", ")", ")", ")" ]
NAME: impulse_deltav_plummerstream_curvedstream PURPOSE: calculate the delta velocity to due an encounter with a Plummer sphere in the impulse approximation; allows for arbitrary velocity vectors, and arbitrary position along the stream; velocities and positions are assumed to lie along an orbit INPUT: v - velocity of the stream (nstar,3) x - position along the stream (nstar,3) t - times at which (v,x) are reached, wrt the closest impact t=0 (nstar) b - impact parameter w - velocity of the Plummer sphere (3) x0 - point of closest approach v0 - velocity of point of closest approach GSigma - surface density of the Plummer-softened stream (in natural units); should be a function of time rs - size of the Plummer sphere galpot - galpy Potential object or list thereof tmin, tmax= (None) minimum and maximum time to consider for GSigma OUTPUT: deltav (nstar,3) HISTORY: 2015-11-20 - Written based on Plummer sphere above - Bovy (UofT)
[ "NAME", ":" ]
python
train
python-xlib/python-xlib
Xlib/protocol/rq.py
https://github.com/python-xlib/python-xlib/blob/8901e831737e79fe5645f48089d70e1d1046d2f2/Xlib/protocol/rq.py#L457-L469
def pack_value(self, val): """Convert 8-byte string into 16-byte list""" if isinstance(val, bytes): val = list(iterbytes(val)) slen = len(val) if self.pad: pad = b'\0\0' * (slen % 2) else: pad = b'' return struct.pack('>' + 'H' * slen, *val) + pad, slen, None
[ "def", "pack_value", "(", "self", ",", "val", ")", ":", "if", "isinstance", "(", "val", ",", "bytes", ")", ":", "val", "=", "list", "(", "iterbytes", "(", "val", ")", ")", "slen", "=", "len", "(", "val", ")", "if", "self", ".", "pad", ":", "pad", "=", "b'\\0\\0'", "*", "(", "slen", "%", "2", ")", "else", ":", "pad", "=", "b''", "return", "struct", ".", "pack", "(", "'>'", "+", "'H'", "*", "slen", ",", "*", "val", ")", "+", "pad", ",", "slen", ",", "None" ]
Convert 8-byte string into 16-byte list
[ "Convert", "8", "-", "byte", "string", "into", "16", "-", "byte", "list" ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/tone_analyzer_v3.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/tone_analyzer_v3.py#L342-L354
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'tones') and self.tones is not None: _dict['tones'] = [x._to_dict() for x in self.tones] if hasattr(self, 'tone_categories') and self.tone_categories is not None: _dict['tone_categories'] = [ x._to_dict() for x in self.tone_categories ] if hasattr(self, 'warning') and self.warning is not None: _dict['warning'] = self.warning return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'tones'", ")", "and", "self", ".", "tones", "is", "not", "None", ":", "_dict", "[", "'tones'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "tones", "]", "if", "hasattr", "(", "self", ",", "'tone_categories'", ")", "and", "self", ".", "tone_categories", "is", "not", "None", ":", "_dict", "[", "'tone_categories'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "tone_categories", "]", "if", "hasattr", "(", "self", ",", "'warning'", ")", "and", "self", ".", "warning", "is", "not", "None", ":", "_dict", "[", "'warning'", "]", "=", "self", ".", "warning", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
planetarypy/pvl
pvl/__init__.py
https://github.com/planetarypy/pvl/blob/ed92b284c4208439b033d28c9c176534c0faac0e/pvl/__init__.py#L100-L113
def loads(data, cls=PVLDecoder, strict=True, **kwargs): """Deserialize ``data`` as a pvl module. :param data: a pvl module as a byte or unicode string :param cls: the decoder class used to deserialize the pvl module. You may use the default ``PVLDecoder`` class or provide a custom sublcass. :param **kwargs: the keyword arguments to pass to the decoder class. """ decoder = __create_decoder(cls, strict, **kwargs) if not isinstance(data, bytes): data = data.encode('utf-8') return decoder.decode(data)
[ "def", "loads", "(", "data", ",", "cls", "=", "PVLDecoder", ",", "strict", "=", "True", ",", "*", "*", "kwargs", ")", ":", "decoder", "=", "__create_decoder", "(", "cls", ",", "strict", ",", "*", "*", "kwargs", ")", "if", "not", "isinstance", "(", "data", ",", "bytes", ")", ":", "data", "=", "data", ".", "encode", "(", "'utf-8'", ")", "return", "decoder", ".", "decode", "(", "data", ")" ]
Deserialize ``data`` as a pvl module. :param data: a pvl module as a byte or unicode string :param cls: the decoder class used to deserialize the pvl module. You may use the default ``PVLDecoder`` class or provide a custom sublcass. :param **kwargs: the keyword arguments to pass to the decoder class.
[ "Deserialize", "data", "as", "a", "pvl", "module", "." ]
python
train
sys-git/certifiable
certifiable/complex.py
https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/complex.py#L326-L372
def certify_tuple(value, certifier=None, min_len=None, max_len=None, required=True, schema=None): """ Validates a tuple, checking it against an optional schema. The schema should be a list of expected values replaced by functions which will be called to with the corresponding value in the input. A simple example: >>> certifier = certify_tuple(schema=( ... certify_key(kind='Model'), ... certify_int(min=0), ... )) >>> certifier((self.key, self.count)) :param tuple value: The value to be certified. :param func certifier: A function to be called on each value in the iterable to check that it is valid. :param int min_len: The minimum acceptable length for the iterable. If None, the minimum length is not checked. :param int max_len: The maximum acceptable length for the iterable. If None, the maximum length is not checked. :param bool required: Whether the value can't be `None`. Defaults to True. :param tuple schema: The schema against which the value should be checked. For single-item tuple make sure to add comma at the end of schema tuple, that is, for example: schema=(certify_int(),) :return: The certified tuple. :rtype: tuple :raises CertifierTypeError: The type is invalid :raises CertifierValueError: The valid is invalid """ certify_iterable( value=value, types=tuple([tuple]), certifier=certifier, min_len=min_len, max_len=max_len, schema=schema, required=required, )
[ "def", "certify_tuple", "(", "value", ",", "certifier", "=", "None", ",", "min_len", "=", "None", ",", "max_len", "=", "None", ",", "required", "=", "True", ",", "schema", "=", "None", ")", ":", "certify_iterable", "(", "value", "=", "value", ",", "types", "=", "tuple", "(", "[", "tuple", "]", ")", ",", "certifier", "=", "certifier", ",", "min_len", "=", "min_len", ",", "max_len", "=", "max_len", ",", "schema", "=", "schema", ",", "required", "=", "required", ",", ")" ]
Validates a tuple, checking it against an optional schema. The schema should be a list of expected values replaced by functions which will be called to with the corresponding value in the input. A simple example: >>> certifier = certify_tuple(schema=( ... certify_key(kind='Model'), ... certify_int(min=0), ... )) >>> certifier((self.key, self.count)) :param tuple value: The value to be certified. :param func certifier: A function to be called on each value in the iterable to check that it is valid. :param int min_len: The minimum acceptable length for the iterable. If None, the minimum length is not checked. :param int max_len: The maximum acceptable length for the iterable. If None, the maximum length is not checked. :param bool required: Whether the value can't be `None`. Defaults to True. :param tuple schema: The schema against which the value should be checked. For single-item tuple make sure to add comma at the end of schema tuple, that is, for example: schema=(certify_int(),) :return: The certified tuple. :rtype: tuple :raises CertifierTypeError: The type is invalid :raises CertifierValueError: The valid is invalid
[ "Validates", "a", "tuple", "checking", "it", "against", "an", "optional", "schema", "." ]
python
train
waqasbhatti/astrobase
astrobase/varbase/trends.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/trends.py#L150-L175
def smooth_magseries_savgol(mags, windowsize, polyorder=2): '''This smooths the magseries with a Savitsky-Golay filter. Parameters ---------- mags : np.array The input mags/flux time-series to smooth. windowsize : int This is a odd integer containing the smoothing window size. polyorder : int This is an integer containing the polynomial degree order to use when generating the Savitsky-Golay filter. Returns ------- np.array The smoothed mag/flux time-series array. ''' smoothed = savgol_filter(mags, windowsize, polyorder) return smoothed
[ "def", "smooth_magseries_savgol", "(", "mags", ",", "windowsize", ",", "polyorder", "=", "2", ")", ":", "smoothed", "=", "savgol_filter", "(", "mags", ",", "windowsize", ",", "polyorder", ")", "return", "smoothed" ]
This smooths the magseries with a Savitsky-Golay filter. Parameters ---------- mags : np.array The input mags/flux time-series to smooth. windowsize : int This is a odd integer containing the smoothing window size. polyorder : int This is an integer containing the polynomial degree order to use when generating the Savitsky-Golay filter. Returns ------- np.array The smoothed mag/flux time-series array.
[ "This", "smooths", "the", "magseries", "with", "a", "Savitsky", "-", "Golay", "filter", "." ]
python
valid
pyrogram/pyrogram
pyrogram/client/methods/password/enable_cloud_password.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/methods/password/enable_cloud_password.py#L27-L75
def enable_cloud_password( self, password: str, hint: str = "", email: str = None ) -> bool: """Use this method to enable the Two-Step Verification security feature (Cloud Password) on your account. This password will be asked when you log-in on a new device in addition to the SMS code. Args: password (``str``): Your password. hint (``str``, *optional*): A password hint. email (``str``, *optional*): Recovery e-mail. Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ValueError`` in case there is already a cloud password enabled. """ r = self.send(functions.account.GetPassword()) if r.has_password: raise ValueError("There is already a cloud password enabled") r.new_algo.salt1 += os.urandom(32) new_hash = btoi(compute_hash(r.new_algo, password)) new_hash = itob(pow(r.new_algo.g, new_hash, btoi(r.new_algo.p))) self.send( functions.account.UpdatePasswordSettings( password=types.InputCheckPasswordEmpty(), new_settings=types.account.PasswordInputSettings( new_algo=r.new_algo, new_password_hash=new_hash, hint=hint, email=email ) ) ) return True
[ "def", "enable_cloud_password", "(", "self", ",", "password", ":", "str", ",", "hint", ":", "str", "=", "\"\"", ",", "email", ":", "str", "=", "None", ")", "->", "bool", ":", "r", "=", "self", ".", "send", "(", "functions", ".", "account", ".", "GetPassword", "(", ")", ")", "if", "r", ".", "has_password", ":", "raise", "ValueError", "(", "\"There is already a cloud password enabled\"", ")", "r", ".", "new_algo", ".", "salt1", "+=", "os", ".", "urandom", "(", "32", ")", "new_hash", "=", "btoi", "(", "compute_hash", "(", "r", ".", "new_algo", ",", "password", ")", ")", "new_hash", "=", "itob", "(", "pow", "(", "r", ".", "new_algo", ".", "g", ",", "new_hash", ",", "btoi", "(", "r", ".", "new_algo", ".", "p", ")", ")", ")", "self", ".", "send", "(", "functions", ".", "account", ".", "UpdatePasswordSettings", "(", "password", "=", "types", ".", "InputCheckPasswordEmpty", "(", ")", ",", "new_settings", "=", "types", ".", "account", ".", "PasswordInputSettings", "(", "new_algo", "=", "r", ".", "new_algo", ",", "new_password_hash", "=", "new_hash", ",", "hint", "=", "hint", ",", "email", "=", "email", ")", ")", ")", "return", "True" ]
Use this method to enable the Two-Step Verification security feature (Cloud Password) on your account. This password will be asked when you log-in on a new device in addition to the SMS code. Args: password (``str``): Your password. hint (``str``, *optional*): A password hint. email (``str``, *optional*): Recovery e-mail. Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ValueError`` in case there is already a cloud password enabled.
[ "Use", "this", "method", "to", "enable", "the", "Two", "-", "Step", "Verification", "security", "feature", "(", "Cloud", "Password", ")", "on", "your", "account", "." ]
python
train
xolox/python-vcs-repo-mgr
vcs_repo_mgr/__init__.py
https://github.com/xolox/python-vcs-repo-mgr/blob/fdad2441a3e7ba5deeeddfa1c2f5ebc00c393aed/vcs_repo_mgr/__init__.py#L1372-L1391
def generate_control_field(self, revision=None): """ Generate a Debian control file field referring for this repository and revision. :param revision: A reference to a revision, most likely the name of a branch (a string, defaults to :attr:`default_revision`). :returns: A tuple with two strings: The name of the field and the value. This generates a `Vcs-Bzr` field for Bazaar repositories, a `Vcs-Git` field for Git repositories and a `Vcs-Hg` field for Mercurial repositories. Here's an example based on the public git repository of the `vcs-repo-mgr` project: >>> from vcs_repo_mgr import coerce_repository >>> repository = coerce_repository('https://github.com/xolox/python-vcs-repo-mgr.git') >>> repository.generate_control_field() ('Vcs-Git', 'https://github.com/xolox/python-vcs-repo-mgr.git#b617731b6c0ca746665f597d2f24b8814b137ebc') """ value = "%s#%s" % (self.remote or self.local, self.find_revision_id(revision)) return self.control_field, value
[ "def", "generate_control_field", "(", "self", ",", "revision", "=", "None", ")", ":", "value", "=", "\"%s#%s\"", "%", "(", "self", ".", "remote", "or", "self", ".", "local", ",", "self", ".", "find_revision_id", "(", "revision", ")", ")", "return", "self", ".", "control_field", ",", "value" ]
Generate a Debian control file field referring for this repository and revision. :param revision: A reference to a revision, most likely the name of a branch (a string, defaults to :attr:`default_revision`). :returns: A tuple with two strings: The name of the field and the value. This generates a `Vcs-Bzr` field for Bazaar repositories, a `Vcs-Git` field for Git repositories and a `Vcs-Hg` field for Mercurial repositories. Here's an example based on the public git repository of the `vcs-repo-mgr` project: >>> from vcs_repo_mgr import coerce_repository >>> repository = coerce_repository('https://github.com/xolox/python-vcs-repo-mgr.git') >>> repository.generate_control_field() ('Vcs-Git', 'https://github.com/xolox/python-vcs-repo-mgr.git#b617731b6c0ca746665f597d2f24b8814b137ebc')
[ "Generate", "a", "Debian", "control", "file", "field", "referring", "for", "this", "repository", "and", "revision", "." ]
python
train
potash/drain
drain/model.py
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/model.py#L394-L424
def perturb(estimator, X, bins, columns=None): """ Predict on peturbations of a feature vector estimator: a fitted sklearn estimator index: the index of the example to perturb bins: a dictionary of column:bins arrays columns: list of columns if bins doesn't cover all columns TODO make this work when index is multiple rows """ if columns is None: if len(bins) != X.shape[1]: raise ValueError("Must specify columns when not perturbing all columns") else: columns = X.columns n = np.concatenate(([0], np.cumsum([len(b) for b in bins]))) X_test = np.empty((n[-1]*X.shape[0], X.shape[1])) r = pd.DataFrame(columns=['value', 'feature', 'index'], index=np.arange(n[-1]*X.shape[0])) for j, index in enumerate(X.index): X_test[j*n[-1]:(j+1)*n[-1], :] = X.values[j, :] for i, c in enumerate(columns): s = slice(j*n[-1] + n[i], j*n[-1] + n[i+1]) r['value'].values[s] = bins[i] r['feature'].values[s] = c r['index'].values[s] = [index]*(n[i+1]-n[i]) X_test[s, (X.columns == c).argmax()] = bins[i] y = estimator.predict_proba(X_test)[:, 1] r['y'] = y return r
[ "def", "perturb", "(", "estimator", ",", "X", ",", "bins", ",", "columns", "=", "None", ")", ":", "if", "columns", "is", "None", ":", "if", "len", "(", "bins", ")", "!=", "X", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "\"Must specify columns when not perturbing all columns\"", ")", "else", ":", "columns", "=", "X", ".", "columns", "n", "=", "np", ".", "concatenate", "(", "(", "[", "0", "]", ",", "np", ".", "cumsum", "(", "[", "len", "(", "b", ")", "for", "b", "in", "bins", "]", ")", ")", ")", "X_test", "=", "np", ".", "empty", "(", "(", "n", "[", "-", "1", "]", "*", "X", ".", "shape", "[", "0", "]", ",", "X", ".", "shape", "[", "1", "]", ")", ")", "r", "=", "pd", ".", "DataFrame", "(", "columns", "=", "[", "'value'", ",", "'feature'", ",", "'index'", "]", ",", "index", "=", "np", ".", "arange", "(", "n", "[", "-", "1", "]", "*", "X", ".", "shape", "[", "0", "]", ")", ")", "for", "j", ",", "index", "in", "enumerate", "(", "X", ".", "index", ")", ":", "X_test", "[", "j", "*", "n", "[", "-", "1", "]", ":", "(", "j", "+", "1", ")", "*", "n", "[", "-", "1", "]", ",", ":", "]", "=", "X", ".", "values", "[", "j", ",", ":", "]", "for", "i", ",", "c", "in", "enumerate", "(", "columns", ")", ":", "s", "=", "slice", "(", "j", "*", "n", "[", "-", "1", "]", "+", "n", "[", "i", "]", ",", "j", "*", "n", "[", "-", "1", "]", "+", "n", "[", "i", "+", "1", "]", ")", "r", "[", "'value'", "]", ".", "values", "[", "s", "]", "=", "bins", "[", "i", "]", "r", "[", "'feature'", "]", ".", "values", "[", "s", "]", "=", "c", "r", "[", "'index'", "]", ".", "values", "[", "s", "]", "=", "[", "index", "]", "*", "(", "n", "[", "i", "+", "1", "]", "-", "n", "[", "i", "]", ")", "X_test", "[", "s", ",", "(", "X", ".", "columns", "==", "c", ")", ".", "argmax", "(", ")", "]", "=", "bins", "[", "i", "]", "y", "=", "estimator", ".", "predict_proba", "(", "X_test", ")", "[", ":", ",", "1", "]", "r", "[", "'y'", "]", "=", "y", "return", "r" ]
Predict on peturbations of a feature vector estimator: a fitted sklearn estimator index: the index of the example to perturb bins: a dictionary of column:bins arrays columns: list of columns if bins doesn't cover all columns TODO make this work when index is multiple rows
[ "Predict", "on", "peturbations", "of", "a", "feature", "vector", "estimator", ":", "a", "fitted", "sklearn", "estimator", "index", ":", "the", "index", "of", "the", "example", "to", "perturb", "bins", ":", "a", "dictionary", "of", "column", ":", "bins", "arrays", "columns", ":", "list", "of", "columns", "if", "bins", "doesn", "t", "cover", "all", "columns", "TODO", "make", "this", "work", "when", "index", "is", "multiple", "rows" ]
python
train
rigetti/quantumflow
quantumflow/ops.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/ops.py#L168-L172
def evolve(self, rho: Density) -> Density: """Apply the action of this gate upon a density""" # TODO: implement without explicit channel creation? chan = self.aschannel() return chan.evolve(rho)
[ "def", "evolve", "(", "self", ",", "rho", ":", "Density", ")", "->", "Density", ":", "# TODO: implement without explicit channel creation?", "chan", "=", "self", ".", "aschannel", "(", ")", "return", "chan", ".", "evolve", "(", "rho", ")" ]
Apply the action of this gate upon a density
[ "Apply", "the", "action", "of", "this", "gate", "upon", "a", "density" ]
python
train
saltstack/salt
salt/modules/nilrt_ip.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L109-L117
def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv
[ "def", "_get_services", "(", ")", ":", "serv", "=", "[", "]", "services", "=", "pyconnman", ".", "ConnManager", "(", ")", ".", "get_services", "(", ")", "for", "path", ",", "_", "in", "services", ":", "serv", ".", "append", "(", "six", ".", "text_type", "(", "path", "[", "len", "(", "SERVICE_PATH", ")", ":", "]", ")", ")", "return", "serv" ]
Returns a list with all connman services
[ "Returns", "a", "list", "with", "all", "connman", "services" ]
python
train
kgaughan/dbkit
dbkit.py
https://github.com/kgaughan/dbkit/blob/2aef6376a60965d7820c91692046f4bcf7d43640/dbkit.py#L693-L702
def execute(stmt, args=()): """ Execute an SQL statement. Returns the number of affected rows. """ ctx = Context.current() with ctx.mdr: cursor = ctx.execute(stmt, args) row_count = cursor.rowcount _safe_close(cursor) return row_count
[ "def", "execute", "(", "stmt", ",", "args", "=", "(", ")", ")", ":", "ctx", "=", "Context", ".", "current", "(", ")", "with", "ctx", ".", "mdr", ":", "cursor", "=", "ctx", ".", "execute", "(", "stmt", ",", "args", ")", "row_count", "=", "cursor", ".", "rowcount", "_safe_close", "(", "cursor", ")", "return", "row_count" ]
Execute an SQL statement. Returns the number of affected rows.
[ "Execute", "an", "SQL", "statement", ".", "Returns", "the", "number", "of", "affected", "rows", "." ]
python
train
Lucretiel/Dispatch
dispatching.py
https://github.com/Lucretiel/Dispatch/blob/dffbce6bacb4370c4ecd11652e5ba8a6aaf2b5b4/dispatching.py#L113-L118
def dispatch(self, func): ''' Adds the decorated function to this dispatch. ''' self.callees.append(self._make_dispatch(func)) return self._make_wrapper(func)
[ "def", "dispatch", "(", "self", ",", "func", ")", ":", "self", ".", "callees", ".", "append", "(", "self", ".", "_make_dispatch", "(", "func", ")", ")", "return", "self", ".", "_make_wrapper", "(", "func", ")" ]
Adds the decorated function to this dispatch.
[ "Adds", "the", "decorated", "function", "to", "this", "dispatch", "." ]
python
valid
rflamary/POT
ot/utils.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/utils.py#L169-L199
def cost_normalization(C, norm=None): """ Apply normalization to the loss matrix Parameters ---------- C : np.array (n1, n2) The cost matrix to normalize. norm : str type of normalization from 'median','max','log','loglog'. Any other value do not normalize. Returns ------- C : np.array (n1, n2) The input cost matrix normalized according to given norm. """ if norm == "median": C /= float(np.median(C)) elif norm == "max": C /= float(np.max(C)) elif norm == "log": C = np.log(1 + C) elif norm == "loglog": C = np.log(1 + np.log(1 + C)) return C
[ "def", "cost_normalization", "(", "C", ",", "norm", "=", "None", ")", ":", "if", "norm", "==", "\"median\"", ":", "C", "/=", "float", "(", "np", ".", "median", "(", "C", ")", ")", "elif", "norm", "==", "\"max\"", ":", "C", "/=", "float", "(", "np", ".", "max", "(", "C", ")", ")", "elif", "norm", "==", "\"log\"", ":", "C", "=", "np", ".", "log", "(", "1", "+", "C", ")", "elif", "norm", "==", "\"loglog\"", ":", "C", "=", "np", ".", "log", "(", "1", "+", "np", ".", "log", "(", "1", "+", "C", ")", ")", "return", "C" ]
Apply normalization to the loss matrix Parameters ---------- C : np.array (n1, n2) The cost matrix to normalize. norm : str type of normalization from 'median','max','log','loglog'. Any other value do not normalize. Returns ------- C : np.array (n1, n2) The input cost matrix normalized according to given norm.
[ "Apply", "normalization", "to", "the", "loss", "matrix" ]
python
train
trombastic/PyScada
pyscada/utils/scheduler.py
https://github.com/trombastic/PyScada/blob/c5fc348a25f0df1340336f694ee9bc1aea62516a/pyscada/utils/scheduler.py#L598-L614
def pre_init_process(self): """ will be executed after process fork """ db.connections.close_all() # update process info BackgroundProcess.objects.filter(pk=self.process_id).update( pid=self.pid, last_update=now(), running_since=now(), done=False, failed=False, message='init process..', ) [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS] # reset [signal.signal(s, self.signal) for s in self.SIGNALS]
[ "def", "pre_init_process", "(", "self", ")", ":", "db", ".", "connections", ".", "close_all", "(", ")", "# update process info", "BackgroundProcess", ".", "objects", ".", "filter", "(", "pk", "=", "self", ".", "process_id", ")", ".", "update", "(", "pid", "=", "self", ".", "pid", ",", "last_update", "=", "now", "(", ")", ",", "running_since", "=", "now", "(", ")", ",", "done", "=", "False", ",", "failed", "=", "False", ",", "message", "=", "'init process..'", ",", ")", "[", "signal", ".", "signal", "(", "s", ",", "signal", ".", "SIG_DFL", ")", "for", "s", "in", "self", ".", "SIGNALS", "]", "# reset", "[", "signal", ".", "signal", "(", "s", ",", "self", ".", "signal", ")", "for", "s", "in", "self", ".", "SIGNALS", "]" ]
will be executed after process fork
[ "will", "be", "executed", "after", "process", "fork" ]
python
train
saltstack/salt-pylint
saltpylint/pep8.py
https://github.com/saltstack/salt-pylint/blob/524a419d3bfc7dbd91c9c85040bc64935a275b24/saltpylint/pep8.py#L462-L479
def register(linter): ''' required method to auto register this checker ''' if HAS_PEP8 is False: return linter.register_checker(PEP8Indentation(linter)) linter.register_checker(PEP8Whitespace(linter)) linter.register_checker(PEP8BlankLine(linter)) linter.register_checker(PEP8Import(linter)) linter.register_checker(PEP8LineLength(linter)) linter.register_checker(PEP8Statement(linter)) linter.register_checker(PEP8Runtime(linter)) linter.register_checker(PEP8IndentationWarning(linter)) linter.register_checker(PEP8WhitespaceWarning(linter)) linter.register_checker(PEP8BlankLineWarning(linter)) linter.register_checker(PEP8DeprecationWarning(linter))
[ "def", "register", "(", "linter", ")", ":", "if", "HAS_PEP8", "is", "False", ":", "return", "linter", ".", "register_checker", "(", "PEP8Indentation", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "PEP8Whitespace", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "PEP8BlankLine", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "PEP8Import", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "PEP8LineLength", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "PEP8Statement", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "PEP8Runtime", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "PEP8IndentationWarning", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "PEP8WhitespaceWarning", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "PEP8BlankLineWarning", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "PEP8DeprecationWarning", "(", "linter", ")", ")" ]
required method to auto register this checker
[ "required", "method", "to", "auto", "register", "this", "checker" ]
python
train
redapple/parslepy
parslepy/base.py
https://github.com/redapple/parslepy/blob/a8bc4c0592824459629018c8f4c6ae3dad6cc3cc/parslepy/base.py#L338-L429
def _compile(self, parselet_node, level=0): """ Build part of the abstract Parsley extraction tree Arguments: parselet_node (dict) -- part of the Parsley tree to compile (can be the root dict/node) level (int) -- current recursion depth (used for debug) """ if self.DEBUG: debug_offset = "".join([" " for x in range(level)]) if self.DEBUG: print(debug_offset, "%s::compile(%s)" % ( self.__class__.__name__, parselet_node)) if isinstance(parselet_node, dict): parselet_tree = ParsleyNode() for k, v in list(parselet_node.items()): # we parse the key raw elements but without much # interpretation (which is done by the SelectorHandler) try: m = self.REGEX_PARSELET_KEY.match(k) if not m: if self.DEBUG: print(debug_offset, "could not parse key", k) raise InvalidKeySyntax(k) except: raise InvalidKeySyntax("Key %s is not valid" % k) key = m.group('key') # by default, fields are required key_required = True operator = m.group('operator') if operator == '?': key_required = False # FIXME: "!" operator not supported (complete array) scope = m.group('scope') # example: get list of H3 tags # { "titles": ["h3"] } # FIXME: should we support multiple selectors in list? # e.g. { "titles": ["h1", "h2", "h3", "h4"] } if isinstance(v, (list, tuple)): v = v[0] iterate = True else: iterate = False # keys in the abstract Parsley trees are of type `ParsleyContext` try: parsley_context = ParsleyContext( key, operator=operator, required=key_required, scope=self.selector_handler.make(scope) if scope else None, iterate=iterate) except SyntaxError: if self.DEBUG: print("Invalid scope:", k, scope) raise if self.DEBUG: print(debug_offset, "current context:", parsley_context) # go deeper in the Parsley tree... try: child_tree = self._compile(v, level=level+1) except SyntaxError: if self.DEBUG: print("Invalid value: ", v) raise except: raise if self.DEBUG: print(debug_offset, "child tree:", child_tree) parselet_tree[parsley_context] = child_tree return parselet_tree # a string leaf should match some kind of selector, # let the selector handler deal with it elif isstr(parselet_node): return self.selector_handler.make(parselet_node) else: raise ValueError( "Unsupported type(%s) for Parselet node <%s>" % ( type(parselet_node), parselet_node))
[ "def", "_compile", "(", "self", ",", "parselet_node", ",", "level", "=", "0", ")", ":", "if", "self", ".", "DEBUG", ":", "debug_offset", "=", "\"\"", ".", "join", "(", "[", "\" \"", "for", "x", "in", "range", "(", "level", ")", "]", ")", "if", "self", ".", "DEBUG", ":", "print", "(", "debug_offset", ",", "\"%s::compile(%s)\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "parselet_node", ")", ")", "if", "isinstance", "(", "parselet_node", ",", "dict", ")", ":", "parselet_tree", "=", "ParsleyNode", "(", ")", "for", "k", ",", "v", "in", "list", "(", "parselet_node", ".", "items", "(", ")", ")", ":", "# we parse the key raw elements but without much", "# interpretation (which is done by the SelectorHandler)", "try", ":", "m", "=", "self", ".", "REGEX_PARSELET_KEY", ".", "match", "(", "k", ")", "if", "not", "m", ":", "if", "self", ".", "DEBUG", ":", "print", "(", "debug_offset", ",", "\"could not parse key\"", ",", "k", ")", "raise", "InvalidKeySyntax", "(", "k", ")", "except", ":", "raise", "InvalidKeySyntax", "(", "\"Key %s is not valid\"", "%", "k", ")", "key", "=", "m", ".", "group", "(", "'key'", ")", "# by default, fields are required", "key_required", "=", "True", "operator", "=", "m", ".", "group", "(", "'operator'", ")", "if", "operator", "==", "'?'", ":", "key_required", "=", "False", "# FIXME: \"!\" operator not supported (complete array)", "scope", "=", "m", ".", "group", "(", "'scope'", ")", "# example: get list of H3 tags", "# { \"titles\": [\"h3\"] }", "# FIXME: should we support multiple selectors in list?", "# e.g. { \"titles\": [\"h1\", \"h2\", \"h3\", \"h4\"] }", "if", "isinstance", "(", "v", ",", "(", "list", ",", "tuple", ")", ")", ":", "v", "=", "v", "[", "0", "]", "iterate", "=", "True", "else", ":", "iterate", "=", "False", "# keys in the abstract Parsley trees are of type `ParsleyContext`", "try", ":", "parsley_context", "=", "ParsleyContext", "(", "key", ",", "operator", "=", "operator", ",", "required", "=", "key_required", ",", "scope", "=", "self", ".", "selector_handler", ".", "make", "(", "scope", ")", "if", "scope", "else", "None", ",", "iterate", "=", "iterate", ")", "except", "SyntaxError", ":", "if", "self", ".", "DEBUG", ":", "print", "(", "\"Invalid scope:\"", ",", "k", ",", "scope", ")", "raise", "if", "self", ".", "DEBUG", ":", "print", "(", "debug_offset", ",", "\"current context:\"", ",", "parsley_context", ")", "# go deeper in the Parsley tree...", "try", ":", "child_tree", "=", "self", ".", "_compile", "(", "v", ",", "level", "=", "level", "+", "1", ")", "except", "SyntaxError", ":", "if", "self", ".", "DEBUG", ":", "print", "(", "\"Invalid value: \"", ",", "v", ")", "raise", "except", ":", "raise", "if", "self", ".", "DEBUG", ":", "print", "(", "debug_offset", ",", "\"child tree:\"", ",", "child_tree", ")", "parselet_tree", "[", "parsley_context", "]", "=", "child_tree", "return", "parselet_tree", "# a string leaf should match some kind of selector,", "# let the selector handler deal with it", "elif", "isstr", "(", "parselet_node", ")", ":", "return", "self", ".", "selector_handler", ".", "make", "(", "parselet_node", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported type(%s) for Parselet node <%s>\"", "%", "(", "type", "(", "parselet_node", ")", ",", "parselet_node", ")", ")" ]
Build part of the abstract Parsley extraction tree Arguments: parselet_node (dict) -- part of the Parsley tree to compile (can be the root dict/node) level (int) -- current recursion depth (used for debug)
[ "Build", "part", "of", "the", "abstract", "Parsley", "extraction", "tree" ]
python
valid
open511/open511
open511/converter/o5xml.py
https://github.com/open511/open511/blob/3d573f59d7efa06ff1b5419ea5ff4d90a90b3cf8/open511/converter/o5xml.py#L116-L146
def geojson_to_gml(gj, set_srs=True): """Given a dict deserialized from a GeoJSON object, returns an lxml Element of the corresponding GML geometry.""" tag = G(gj['type']) if set_srs: tag.set('srsName', 'urn:ogc:def:crs:EPSG::4326') if gj['type'] == 'Point': tag.append(G.pos(_reverse_geojson_coords(gj['coordinates']))) elif gj['type'] == 'LineString': tag.append(G.posList(' '.join(_reverse_geojson_coords(ll) for ll in gj['coordinates']))) elif gj['type'] == 'Polygon': rings = [ G.LinearRing( G.posList(' '.join(_reverse_geojson_coords(ll) for ll in ring)) ) for ring in gj['coordinates'] ] tag.append(G.exterior(rings.pop(0))) for ring in rings: tag.append(G.interior(ring)) elif gj['type'] in ('MultiPoint', 'MultiLineString', 'MultiPolygon'): single_type = gj['type'][5:] member_tag = single_type[0].lower() + single_type[1:] + 'Member' for coord in gj['coordinates']: tag.append( G(member_tag, geojson_to_gml({'type': single_type, 'coordinates': coord}, set_srs=False)) ) else: raise NotImplementedError return tag
[ "def", "geojson_to_gml", "(", "gj", ",", "set_srs", "=", "True", ")", ":", "tag", "=", "G", "(", "gj", "[", "'type'", "]", ")", "if", "set_srs", ":", "tag", ".", "set", "(", "'srsName'", ",", "'urn:ogc:def:crs:EPSG::4326'", ")", "if", "gj", "[", "'type'", "]", "==", "'Point'", ":", "tag", ".", "append", "(", "G", ".", "pos", "(", "_reverse_geojson_coords", "(", "gj", "[", "'coordinates'", "]", ")", ")", ")", "elif", "gj", "[", "'type'", "]", "==", "'LineString'", ":", "tag", ".", "append", "(", "G", ".", "posList", "(", "' '", ".", "join", "(", "_reverse_geojson_coords", "(", "ll", ")", "for", "ll", "in", "gj", "[", "'coordinates'", "]", ")", ")", ")", "elif", "gj", "[", "'type'", "]", "==", "'Polygon'", ":", "rings", "=", "[", "G", ".", "LinearRing", "(", "G", ".", "posList", "(", "' '", ".", "join", "(", "_reverse_geojson_coords", "(", "ll", ")", "for", "ll", "in", "ring", ")", ")", ")", "for", "ring", "in", "gj", "[", "'coordinates'", "]", "]", "tag", ".", "append", "(", "G", ".", "exterior", "(", "rings", ".", "pop", "(", "0", ")", ")", ")", "for", "ring", "in", "rings", ":", "tag", ".", "append", "(", "G", ".", "interior", "(", "ring", ")", ")", "elif", "gj", "[", "'type'", "]", "in", "(", "'MultiPoint'", ",", "'MultiLineString'", ",", "'MultiPolygon'", ")", ":", "single_type", "=", "gj", "[", "'type'", "]", "[", "5", ":", "]", "member_tag", "=", "single_type", "[", "0", "]", ".", "lower", "(", ")", "+", "single_type", "[", "1", ":", "]", "+", "'Member'", "for", "coord", "in", "gj", "[", "'coordinates'", "]", ":", "tag", ".", "append", "(", "G", "(", "member_tag", ",", "geojson_to_gml", "(", "{", "'type'", ":", "single_type", ",", "'coordinates'", ":", "coord", "}", ",", "set_srs", "=", "False", ")", ")", ")", "else", ":", "raise", "NotImplementedError", "return", "tag" ]
Given a dict deserialized from a GeoJSON object, returns an lxml Element of the corresponding GML geometry.
[ "Given", "a", "dict", "deserialized", "from", "a", "GeoJSON", "object", "returns", "an", "lxml", "Element", "of", "the", "corresponding", "GML", "geometry", "." ]
python
valid
gem/oq-engine
openquake/hazardlib/gsim/dowrickrhoades_2005.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/dowrickrhoades_2005.py#L145-L190
def _get_site_class(self, vs30, mmi_mean): """ Return site class flag for: Class E - Very Soft Soil vs30 < 180 Class D - Deep or Soft Soil vs30 >= 180 and vs30 <= 360 Class C - Shallow Soil vs30 > 360 and vs30 <= 760 Class B - Rock vs30 > 760 and vs30 <= 1500 Class A - Strong Rock vs30 >= 180 and vs30 <= 360 The S site class is equal to S = c1 if MMI <= 7 S = c1 - d *(MMI - 7.0) if 7<MMI<9.5 S = c2 if MMI >= 9.5 """ if vs30[0] < 180: c1 = 1.0 c2 = -0.25 d = 0.5 elif vs30[0] >= 180 and vs30[0] <= 360: c1 = 0.5 c2 = -0.125 d = 0.25 elif vs30[0] > 360 and vs30[0] <= 760: c1 = 0. c2 = 0. d = 0. elif vs30[0] > 760 and vs30[0] <= 1500: c1 = -0.5 c2 = 0.125 d = -0.25 elif vs30[0] > 1500: c1 = -1.0 c2 = 0.25 d = -0.5 S = np.zeros_like(vs30) for i in range(vs30.size): if mmi_mean[i] <= 7.0: S[i] += c1 elif mmi_mean[i] > 7 and mmi_mean[i] < 9.5: S[i] += c1 - d * (mmi_mean[i] - 7.0) else: S[i] += c2 return S
[ "def", "_get_site_class", "(", "self", ",", "vs30", ",", "mmi_mean", ")", ":", "if", "vs30", "[", "0", "]", "<", "180", ":", "c1", "=", "1.0", "c2", "=", "-", "0.25", "d", "=", "0.5", "elif", "vs30", "[", "0", "]", ">=", "180", "and", "vs30", "[", "0", "]", "<=", "360", ":", "c1", "=", "0.5", "c2", "=", "-", "0.125", "d", "=", "0.25", "elif", "vs30", "[", "0", "]", ">", "360", "and", "vs30", "[", "0", "]", "<=", "760", ":", "c1", "=", "0.", "c2", "=", "0.", "d", "=", "0.", "elif", "vs30", "[", "0", "]", ">", "760", "and", "vs30", "[", "0", "]", "<=", "1500", ":", "c1", "=", "-", "0.5", "c2", "=", "0.125", "d", "=", "-", "0.25", "elif", "vs30", "[", "0", "]", ">", "1500", ":", "c1", "=", "-", "1.0", "c2", "=", "0.25", "d", "=", "-", "0.5", "S", "=", "np", ".", "zeros_like", "(", "vs30", ")", "for", "i", "in", "range", "(", "vs30", ".", "size", ")", ":", "if", "mmi_mean", "[", "i", "]", "<=", "7.0", ":", "S", "[", "i", "]", "+=", "c1", "elif", "mmi_mean", "[", "i", "]", ">", "7", "and", "mmi_mean", "[", "i", "]", "<", "9.5", ":", "S", "[", "i", "]", "+=", "c1", "-", "d", "*", "(", "mmi_mean", "[", "i", "]", "-", "7.0", ")", "else", ":", "S", "[", "i", "]", "+=", "c2", "return", "S" ]
Return site class flag for: Class E - Very Soft Soil vs30 < 180 Class D - Deep or Soft Soil vs30 >= 180 and vs30 <= 360 Class C - Shallow Soil vs30 > 360 and vs30 <= 760 Class B - Rock vs30 > 760 and vs30 <= 1500 Class A - Strong Rock vs30 >= 180 and vs30 <= 360 The S site class is equal to S = c1 if MMI <= 7 S = c1 - d *(MMI - 7.0) if 7<MMI<9.5 S = c2 if MMI >= 9.5
[ "Return", "site", "class", "flag", "for", ":", "Class", "E", "-", "Very", "Soft", "Soil", "vs30", "<", "180", "Class", "D", "-", "Deep", "or", "Soft", "Soil", "vs30", ">", "=", "180", "and", "vs30", "<", "=", "360", "Class", "C", "-", "Shallow", "Soil", "vs30", ">", "360", "and", "vs30", "<", "=", "760", "Class", "B", "-", "Rock", "vs30", ">", "760", "and", "vs30", "<", "=", "1500", "Class", "A", "-", "Strong", "Rock", "vs30", ">", "=", "180", "and", "vs30", "<", "=", "360", "The", "S", "site", "class", "is", "equal", "to", "S", "=", "c1", "if", "MMI", "<", "=", "7", "S", "=", "c1", "-", "d", "*", "(", "MMI", "-", "7", ".", "0", ")", "if", "7<MMI<9", ".", "5", "S", "=", "c2", "if", "MMI", ">", "=", "9", ".", "5" ]
python
train
mehmetg/streak_client
streak_client/streak_client.py
https://github.com/mehmetg/streak_client/blob/46575510b4e4163a4a3cc06f7283a1ae377cdce6/streak_client/streak_client.py#L222-L234
def delete_all_pipelines(self): '''Deletes all pipelines Args: returns OK for overall success or last error code, resp data. ''' code, data = self.get_pipeline() if code == requests.codes.ok: for pl_data in data: c, d = self.delete_pipeline(pl_data['pipelineKey']) if c != requests.codes.ok: code = c data = d return code, data
[ "def", "delete_all_pipelines", "(", "self", ")", ":", "code", ",", "data", "=", "self", ".", "get_pipeline", "(", ")", "if", "code", "==", "requests", ".", "codes", ".", "ok", ":", "for", "pl_data", "in", "data", ":", "c", ",", "d", "=", "self", ".", "delete_pipeline", "(", "pl_data", "[", "'pipelineKey'", "]", ")", "if", "c", "!=", "requests", ".", "codes", ".", "ok", ":", "code", "=", "c", "data", "=", "d", "return", "code", ",", "data" ]
Deletes all pipelines Args: returns OK for overall success or last error code, resp data.
[ "Deletes", "all", "pipelines", "Args", ":", "returns", "OK", "for", "overall", "success", "or", "last", "error", "code", "resp", "data", "." ]
python
train
gaqzi/py-gocd
gocd/api/template_config.py
https://github.com/gaqzi/py-gocd/blob/6fe5b62dea51e665c11a343aba5fc98e130c5c63/gocd/api/template_config.py#L77-L92
def delete(self): """Delete template config for specified template name. .. __: https://api.go.cd/current/#delete-a-template Returns: Response: :class:`gocd.api.response.Response` object """ headers = self._default_headers() return self._request(self.name, ok_status=None, data=None, headers=headers, method="DELETE")
[ "def", "delete", "(", "self", ")", ":", "headers", "=", "self", ".", "_default_headers", "(", ")", "return", "self", ".", "_request", "(", "self", ".", "name", ",", "ok_status", "=", "None", ",", "data", "=", "None", ",", "headers", "=", "headers", ",", "method", "=", "\"DELETE\"", ")" ]
Delete template config for specified template name. .. __: https://api.go.cd/current/#delete-a-template Returns: Response: :class:`gocd.api.response.Response` object
[ "Delete", "template", "config", "for", "specified", "template", "name", "." ]
python
valid
asweigart/pybresenham
pybresenham/__init__.py
https://github.com/asweigart/pybresenham/blob/5183f39af58d899cf736075d2b27c892824bb563/pybresenham/__init__.py#L437-L514
def floodFill(points, startx, starty): """ Returns a set of the (x, y) points of a filled in area. `points` is an iterable of (x, y) tuples of an arbitrary shape. `startx` and `starty` mark the starting point (likely inside the arbitrary shape) to begin filling from. >>> drawPoints(polygon(5, 5, 4, 5)) ,,,O,,, ,,O,O,, ,O,,,O, O,,,,,O O,,,,,O O,,,,,O ,O,,,O, ,OOOOO, >>> pentagonOutline = list(polygon(5, 5, 4, 5)) >>> floodFill(pentagonOutline, 5, 5) {(7, 3), (4, 7), (4, 8), (5, 6), (6, 6), (7, 7), (6, 2), (5, 1), (3, 7), (2, 5), (8, 5), (5, 8), (6, 7), (3, 3), (5, 5), (7, 6), (4, 4), (6, 3), (3, 6), (3, 4), (8, 6), (6, 4), (5, 4), (2, 6), (4, 5), (5, 2), (7, 5), (4, 2), (6, 5), (5, 3), (3, 5), (6, 8), (4, 6), (5, 7), (3, 8), (7, 4), (4, 3), (7, 8), (2, 4), (8, 4)} >>> drawPoints(floodFill(pentagonOutline, 5, 5)) ,,,O,,, ,,OOO,, ,OOOOO, OOOOOOO OOOOOOO OOOOOOO ,OOOOO, ,OOOOO, """ # Note: We're not going to use recursion here because 1) recursion is # overrated 2) on a large enough shape it would cause a stackoverflow # 3) flood fill doesn't strictly need recursion because it doesn't require # a stack and 4) recursion is overrated. allPoints = set(points) # Use a set because the look ups will be faster. # Find the min/max x/y values to get the "boundaries" of this shape, to # prevent an infinite loop. minx = miny = maxx = maxy = None for bpx, bpy in points: if minx is None: # This is the first point, so set all the min/max to it. minx = maxx = bpx miny = maxy = bpy continue if bpx < minx: minx = bpx if bpx > maxx: maxx = bpx if bpy < miny: miny = bpy if bpy > maxy: maxy = bpy pointsToProcess = [(startx, starty)] while pointsToProcess: x, y = pointsToProcess.pop() # Process point to right left of x, y. if x + 1 < maxx and (x + 1, y) not in allPoints: pointsToProcess.append((x + 1, y)) allPoints.add((x + 1, y)) # Process point to the left of x, y. if x - 1 > minx and (x - 1, y) not in allPoints: pointsToProcess.append((x - 1, y)) allPoints.add((x - 1, y)) # Process point below x, y. if y + 1 < maxy and (x, y + 1) not in allPoints: pointsToProcess.append((x, y + 1)) allPoints.add((x, y + 1)) # Process point above x, y. if y - 1 > miny and (x, y - 1) not in allPoints: pointsToProcess.append((x, y - 1)) allPoints.add((x, y - 1)) return allPoints
[ "def", "floodFill", "(", "points", ",", "startx", ",", "starty", ")", ":", "# Note: We're not going to use recursion here because 1) recursion is", "# overrated 2) on a large enough shape it would cause a stackoverflow", "# 3) flood fill doesn't strictly need recursion because it doesn't require", "# a stack and 4) recursion is overrated.", "allPoints", "=", "set", "(", "points", ")", "# Use a set because the look ups will be faster.", "# Find the min/max x/y values to get the \"boundaries\" of this shape, to", "# prevent an infinite loop.", "minx", "=", "miny", "=", "maxx", "=", "maxy", "=", "None", "for", "bpx", ",", "bpy", "in", "points", ":", "if", "minx", "is", "None", ":", "# This is the first point, so set all the min/max to it.", "minx", "=", "maxx", "=", "bpx", "miny", "=", "maxy", "=", "bpy", "continue", "if", "bpx", "<", "minx", ":", "minx", "=", "bpx", "if", "bpx", ">", "maxx", ":", "maxx", "=", "bpx", "if", "bpy", "<", "miny", ":", "miny", "=", "bpy", "if", "bpy", ">", "maxy", ":", "maxy", "=", "bpy", "pointsToProcess", "=", "[", "(", "startx", ",", "starty", ")", "]", "while", "pointsToProcess", ":", "x", ",", "y", "=", "pointsToProcess", ".", "pop", "(", ")", "# Process point to right left of x, y.", "if", "x", "+", "1", "<", "maxx", "and", "(", "x", "+", "1", ",", "y", ")", "not", "in", "allPoints", ":", "pointsToProcess", ".", "append", "(", "(", "x", "+", "1", ",", "y", ")", ")", "allPoints", ".", "add", "(", "(", "x", "+", "1", ",", "y", ")", ")", "# Process point to the left of x, y.", "if", "x", "-", "1", ">", "minx", "and", "(", "x", "-", "1", ",", "y", ")", "not", "in", "allPoints", ":", "pointsToProcess", ".", "append", "(", "(", "x", "-", "1", ",", "y", ")", ")", "allPoints", ".", "add", "(", "(", "x", "-", "1", ",", "y", ")", ")", "# Process point below x, y.", "if", "y", "+", "1", "<", "maxy", "and", "(", "x", ",", "y", "+", "1", ")", "not", "in", "allPoints", ":", "pointsToProcess", ".", "append", "(", "(", "x", ",", "y", "+", "1", ")", ")", "allPoints", ".", "add", "(", "(", "x", ",", "y", "+", "1", ")", ")", "# Process point above x, y.", "if", "y", "-", "1", ">", "miny", "and", "(", "x", ",", "y", "-", "1", ")", "not", "in", "allPoints", ":", "pointsToProcess", ".", "append", "(", "(", "x", ",", "y", "-", "1", ")", ")", "allPoints", ".", "add", "(", "(", "x", ",", "y", "-", "1", ")", ")", "return", "allPoints" ]
Returns a set of the (x, y) points of a filled in area. `points` is an iterable of (x, y) tuples of an arbitrary shape. `startx` and `starty` mark the starting point (likely inside the arbitrary shape) to begin filling from. >>> drawPoints(polygon(5, 5, 4, 5)) ,,,O,,, ,,O,O,, ,O,,,O, O,,,,,O O,,,,,O O,,,,,O ,O,,,O, ,OOOOO, >>> pentagonOutline = list(polygon(5, 5, 4, 5)) >>> floodFill(pentagonOutline, 5, 5) {(7, 3), (4, 7), (4, 8), (5, 6), (6, 6), (7, 7), (6, 2), (5, 1), (3, 7), (2, 5), (8, 5), (5, 8), (6, 7), (3, 3), (5, 5), (7, 6), (4, 4), (6, 3), (3, 6), (3, 4), (8, 6), (6, 4), (5, 4), (2, 6), (4, 5), (5, 2), (7, 5), (4, 2), (6, 5), (5, 3), (3, 5), (6, 8), (4, 6), (5, 7), (3, 8), (7, 4), (4, 3), (7, 8), (2, 4), (8, 4)} >>> drawPoints(floodFill(pentagonOutline, 5, 5)) ,,,O,,, ,,OOO,, ,OOOOO, OOOOOOO OOOOOOO OOOOOOO ,OOOOO, ,OOOOO,
[ "Returns", "a", "set", "of", "the", "(", "x", "y", ")", "points", "of", "a", "filled", "in", "area", "." ]
python
train
eng-tools/geofound
geofound/models.py
https://github.com/eng-tools/geofound/blob/6b1b097d5db998907bdcb5b4798fb4629674c770/geofound/models.py#L37-L51
def create_foundation(length, width, depth=0.0, height=0.0): """ Can define a Foundation Object from dimensions. :param length: Foundation length :param width: Foundation width :param depth: Foundation depth :param height: Foundation height :return: A Foundation object """ a_foundation = FoundationRaft() a_foundation.length = length a_foundation.width = width a_foundation.depth = depth a_foundation.height = height return a_foundation
[ "def", "create_foundation", "(", "length", ",", "width", ",", "depth", "=", "0.0", ",", "height", "=", "0.0", ")", ":", "a_foundation", "=", "FoundationRaft", "(", ")", "a_foundation", ".", "length", "=", "length", "a_foundation", ".", "width", "=", "width", "a_foundation", ".", "depth", "=", "depth", "a_foundation", ".", "height", "=", "height", "return", "a_foundation" ]
Can define a Foundation Object from dimensions. :param length: Foundation length :param width: Foundation width :param depth: Foundation depth :param height: Foundation height :return: A Foundation object
[ "Can", "define", "a", "Foundation", "Object", "from", "dimensions", ".", ":", "param", "length", ":", "Foundation", "length", ":", "param", "width", ":", "Foundation", "width", ":", "param", "depth", ":", "Foundation", "depth", ":", "param", "height", ":", "Foundation", "height", ":", "return", ":", "A", "Foundation", "object" ]
python
test
saltstack/salt
salt/netapi/rest_cherrypy/event_processor.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_cherrypy/event_processor.py#L35-L48
def publish_minions(self): ''' Publishes minions as a list of dicts. ''' minions = [] for minion, minion_info in six.iteritems(self.minions): curr_minion = {} curr_minion.update(minion_info) curr_minion.update({'id': minion}) minions.append(curr_minion) ret = {'minions': minions} self.handler.send(salt.utils.json.dumps(ret), False)
[ "def", "publish_minions", "(", "self", ")", ":", "minions", "=", "[", "]", "for", "minion", ",", "minion_info", "in", "six", ".", "iteritems", "(", "self", ".", "minions", ")", ":", "curr_minion", "=", "{", "}", "curr_minion", ".", "update", "(", "minion_info", ")", "curr_minion", ".", "update", "(", "{", "'id'", ":", "minion", "}", ")", "minions", ".", "append", "(", "curr_minion", ")", "ret", "=", "{", "'minions'", ":", "minions", "}", "self", ".", "handler", ".", "send", "(", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "ret", ")", ",", "False", ")" ]
Publishes minions as a list of dicts.
[ "Publishes", "minions", "as", "a", "list", "of", "dicts", "." ]
python
train
apache/incubator-mxnet
python/mxnet/gluon/parameter.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L287-L298
def _init_impl(self, data, ctx_list): """Sets data and grad.""" self._ctx_list = list(ctx_list) self._ctx_map = [[], []] for i, ctx in enumerate(self._ctx_list): dev_list = self._ctx_map[ctx.device_typeid&1] while len(dev_list) <= ctx.device_id: dev_list.append(None) dev_list[ctx.device_id] = i self._data = [data.copyto(ctx) for ctx in self._ctx_list] self._init_grad()
[ "def", "_init_impl", "(", "self", ",", "data", ",", "ctx_list", ")", ":", "self", ".", "_ctx_list", "=", "list", "(", "ctx_list", ")", "self", ".", "_ctx_map", "=", "[", "[", "]", ",", "[", "]", "]", "for", "i", ",", "ctx", "in", "enumerate", "(", "self", ".", "_ctx_list", ")", ":", "dev_list", "=", "self", ".", "_ctx_map", "[", "ctx", ".", "device_typeid", "&", "1", "]", "while", "len", "(", "dev_list", ")", "<=", "ctx", ".", "device_id", ":", "dev_list", ".", "append", "(", "None", ")", "dev_list", "[", "ctx", ".", "device_id", "]", "=", "i", "self", ".", "_data", "=", "[", "data", ".", "copyto", "(", "ctx", ")", "for", "ctx", "in", "self", ".", "_ctx_list", "]", "self", ".", "_init_grad", "(", ")" ]
Sets data and grad.
[ "Sets", "data", "and", "grad", "." ]
python
train
Qiskit/qiskit-terra
qiskit/visualization/bloch.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/visualization/bloch.py#L380-L415
def render(self, title=''): """ Render the Bloch sphere and its data sets in on given figure and axes. """ if self._rendered: self.axes.clear() self._rendered = True # Figure instance for Bloch sphere plot if not self._ext_fig: self.fig = plt.figure(figsize=self.figsize) if not self._ext_axes: self.axes = Axes3D(self.fig, azim=self.view[0], elev=self.view[1]) if self.background: self.axes.clear() self.axes.set_xlim3d(-1.3, 1.3) self.axes.set_ylim3d(-1.3, 1.3) self.axes.set_zlim3d(-1.3, 1.3) else: self.plot_axes() self.axes.set_axis_off() self.axes.set_xlim3d(-0.7, 0.7) self.axes.set_ylim3d(-0.7, 0.7) self.axes.set_zlim3d(-0.7, 0.7) self.axes.grid(False) self.plot_back() self.plot_points() self.plot_vectors() self.plot_front() self.plot_axes_labels() self.plot_annotations() self.axes.set_title(title, fontsize=self.font_size, y=1.08)
[ "def", "render", "(", "self", ",", "title", "=", "''", ")", ":", "if", "self", ".", "_rendered", ":", "self", ".", "axes", ".", "clear", "(", ")", "self", ".", "_rendered", "=", "True", "# Figure instance for Bloch sphere plot", "if", "not", "self", ".", "_ext_fig", ":", "self", ".", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "self", ".", "figsize", ")", "if", "not", "self", ".", "_ext_axes", ":", "self", ".", "axes", "=", "Axes3D", "(", "self", ".", "fig", ",", "azim", "=", "self", ".", "view", "[", "0", "]", ",", "elev", "=", "self", ".", "view", "[", "1", "]", ")", "if", "self", ".", "background", ":", "self", ".", "axes", ".", "clear", "(", ")", "self", ".", "axes", ".", "set_xlim3d", "(", "-", "1.3", ",", "1.3", ")", "self", ".", "axes", ".", "set_ylim3d", "(", "-", "1.3", ",", "1.3", ")", "self", ".", "axes", ".", "set_zlim3d", "(", "-", "1.3", ",", "1.3", ")", "else", ":", "self", ".", "plot_axes", "(", ")", "self", ".", "axes", ".", "set_axis_off", "(", ")", "self", ".", "axes", ".", "set_xlim3d", "(", "-", "0.7", ",", "0.7", ")", "self", ".", "axes", ".", "set_ylim3d", "(", "-", "0.7", ",", "0.7", ")", "self", ".", "axes", ".", "set_zlim3d", "(", "-", "0.7", ",", "0.7", ")", "self", ".", "axes", ".", "grid", "(", "False", ")", "self", ".", "plot_back", "(", ")", "self", ".", "plot_points", "(", ")", "self", ".", "plot_vectors", "(", ")", "self", ".", "plot_front", "(", ")", "self", ".", "plot_axes_labels", "(", ")", "self", ".", "plot_annotations", "(", ")", "self", ".", "axes", ".", "set_title", "(", "title", ",", "fontsize", "=", "self", ".", "font_size", ",", "y", "=", "1.08", ")" ]
Render the Bloch sphere and its data sets in on given figure and axes.
[ "Render", "the", "Bloch", "sphere", "and", "its", "data", "sets", "in", "on", "given", "figure", "and", "axes", "." ]
python
test
kubernetes-client/python
kubernetes/client/apis/apps_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/apps_v1_api.py#L4848-L4871
def read_namespaced_daemon_set(self, name, namespace, **kwargs): """ read the specified DaemonSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_daemon_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the DaemonSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18. :param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18. :return: V1DaemonSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_daemon_set_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_daemon_set_with_http_info(name, namespace, **kwargs) return data
[ "def", "read_namespaced_daemon_set", "(", "self", ",", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "read_namespaced_daemon_set_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "read_namespaced_daemon_set_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "return", "data" ]
read the specified DaemonSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_daemon_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the DaemonSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18. :param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18. :return: V1DaemonSet If the method is called asynchronously, returns the request thread.
[ "read", "the", "specified", "DaemonSet", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "read_namespaced_daemon_set", "(", "name", "namespace", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
omza/azurestoragewrap
azurestoragewrap/queue.py
https://github.com/omza/azurestoragewrap/blob/976878e95d82ff0f7d8a00a5e4a7a3fb6268ab08/azurestoragewrap/queue.py#L361-L376
def delete(self, storagemodel:object, modeldefinition = None) -> bool: """ delete the message in queue """ deleted = False if (storagemodel.id != '') and (storagemodel.pop_receipt != '') and (not storagemodel.id is None) and (not storagemodel.pop_receipt is None): try: modeldefinition['queueservice'].delete_message(storagemodel._queuename, storagemodel.id, storagemodel.pop_receipt) deleted = True except Exception as e: msg = 'can not delete queue message: queue {} with message.id {!s} because {!s}'.format(storagemodel._queuename, storagemodel.id, e) raise AzureStorageWrapException(msg=msg) else: log.info('cant update queuemessage {} due to missing id and pop_receipt'.format(storagemodel._queuename)) return deleted
[ "def", "delete", "(", "self", ",", "storagemodel", ":", "object", ",", "modeldefinition", "=", "None", ")", "->", "bool", ":", "deleted", "=", "False", "if", "(", "storagemodel", ".", "id", "!=", "''", ")", "and", "(", "storagemodel", ".", "pop_receipt", "!=", "''", ")", "and", "(", "not", "storagemodel", ".", "id", "is", "None", ")", "and", "(", "not", "storagemodel", ".", "pop_receipt", "is", "None", ")", ":", "try", ":", "modeldefinition", "[", "'queueservice'", "]", ".", "delete_message", "(", "storagemodel", ".", "_queuename", ",", "storagemodel", ".", "id", ",", "storagemodel", ".", "pop_receipt", ")", "deleted", "=", "True", "except", "Exception", "as", "e", ":", "msg", "=", "'can not delete queue message: queue {} with message.id {!s} because {!s}'", ".", "format", "(", "storagemodel", ".", "_queuename", ",", "storagemodel", ".", "id", ",", "e", ")", "raise", "AzureStorageWrapException", "(", "msg", "=", "msg", ")", "else", ":", "log", ".", "info", "(", "'cant update queuemessage {} due to missing id and pop_receipt'", ".", "format", "(", "storagemodel", ".", "_queuename", ")", ")", "return", "deleted" ]
delete the message in queue
[ "delete", "the", "message", "in", "queue" ]
python
train
mitsei/dlkit
dlkit/records/assessment/analytic/irt.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/analytic/irt.py#L121-L125
def set_difficulty_value(self, difficulty): """stub""" if not isinstance(difficulty, float): raise InvalidArgument('difficulty value must be a decimal') self.add_decimal_value(difficulty, 'difficulty')
[ "def", "set_difficulty_value", "(", "self", ",", "difficulty", ")", ":", "if", "not", "isinstance", "(", "difficulty", ",", "float", ")", ":", "raise", "InvalidArgument", "(", "'difficulty value must be a decimal'", ")", "self", ".", "add_decimal_value", "(", "difficulty", ",", "'difficulty'", ")" ]
stub
[ "stub" ]
python
train
deepmipt/DeepPavlov
deeppavlov/utils/alexa/conversation.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/conversation.py#L101-L116
def _act(self, utterance: str) -> list: """Infers DeepPavlov agent with raw user input extracted from Alexa request. Args: utterance: Raw user input extracted from Alexa request. Returns: response: DeepPavlov agent response. """ if self.stateful: utterance = [[utterance], [self.key]] else: utterance = [[utterance]] agent_response: list = self.agent(*utterance) return agent_response
[ "def", "_act", "(", "self", ",", "utterance", ":", "str", ")", "->", "list", ":", "if", "self", ".", "stateful", ":", "utterance", "=", "[", "[", "utterance", "]", ",", "[", "self", ".", "key", "]", "]", "else", ":", "utterance", "=", "[", "[", "utterance", "]", "]", "agent_response", ":", "list", "=", "self", ".", "agent", "(", "*", "utterance", ")", "return", "agent_response" ]
Infers DeepPavlov agent with raw user input extracted from Alexa request. Args: utterance: Raw user input extracted from Alexa request. Returns: response: DeepPavlov agent response.
[ "Infers", "DeepPavlov", "agent", "with", "raw", "user", "input", "extracted", "from", "Alexa", "request", "." ]
python
test
tensorflow/lucid
lucid/misc/tfutil.py
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/tfutil.py#L19-L31
def create_session(target='', timeout_sec=10): '''Create an intractive TensorFlow session. Helper function that creates TF session that uses growing GPU memory allocation and opration timeout. 'allow_growth' flag prevents TF from allocating the whole GPU memory an once, which is useful when having multiple python sessions sharing the same GPU. ''' graph = tf.Graph() config = tf.ConfigProto() config.gpu_options.allow_growth = True config.operation_timeout_in_ms = int(timeout_sec*1000) return tf.InteractiveSession(target=target, graph=graph, config=config)
[ "def", "create_session", "(", "target", "=", "''", ",", "timeout_sec", "=", "10", ")", ":", "graph", "=", "tf", ".", "Graph", "(", ")", "config", "=", "tf", ".", "ConfigProto", "(", ")", "config", ".", "gpu_options", ".", "allow_growth", "=", "True", "config", ".", "operation_timeout_in_ms", "=", "int", "(", "timeout_sec", "*", "1000", ")", "return", "tf", ".", "InteractiveSession", "(", "target", "=", "target", ",", "graph", "=", "graph", ",", "config", "=", "config", ")" ]
Create an intractive TensorFlow session. Helper function that creates TF session that uses growing GPU memory allocation and opration timeout. 'allow_growth' flag prevents TF from allocating the whole GPU memory an once, which is useful when having multiple python sessions sharing the same GPU.
[ "Create", "an", "intractive", "TensorFlow", "session", "." ]
python
train
Scifabric/pybossa-client
pbclient/__init__.py
https://github.com/Scifabric/pybossa-client/blob/998d7cb0207ff5030dc800f0c2577c5692316c2c/pbclient/__init__.py#L329-L345
def get_category(category_id): """Return a PYBOSSA Category for the category_id. :param category_id: PYBOSSA Category ID :type category_id: integer :rtype: PYBOSSA Category :returns: A PYBOSSA Category object """ try: res = _pybossa_req('get', 'category', category_id) if res.get('id'): return Category(res) else: return res except: # pragma: no cover raise
[ "def", "get_category", "(", "category_id", ")", ":", "try", ":", "res", "=", "_pybossa_req", "(", "'get'", ",", "'category'", ",", "category_id", ")", "if", "res", ".", "get", "(", "'id'", ")", ":", "return", "Category", "(", "res", ")", "else", ":", "return", "res", "except", ":", "# pragma: no cover", "raise" ]
Return a PYBOSSA Category for the category_id. :param category_id: PYBOSSA Category ID :type category_id: integer :rtype: PYBOSSA Category :returns: A PYBOSSA Category object
[ "Return", "a", "PYBOSSA", "Category", "for", "the", "category_id", "." ]
python
valid
codeforamerica/epa_python
epa/pcs/pcs.py
https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L24-L33
def admin_penalty(self, column=None, value=None, **kwargs): """ An enforcement action that results in levying the permit holder with a penalty or fine. It is used to track judicial hearing dates, penalty amounts, and type of administrative penalty order. >>> PCS().admin_penalty('enfor_action_date', '16-MAR-01') """ return self._resolve_call('PCS_ADMIN_PENALTY_ORDER', column, value, **kwargs)
[ "def", "admin_penalty", "(", "self", ",", "column", "=", "None", ",", "value", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_resolve_call", "(", "'PCS_ADMIN_PENALTY_ORDER'", ",", "column", ",", "value", ",", "*", "*", "kwargs", ")" ]
An enforcement action that results in levying the permit holder with a penalty or fine. It is used to track judicial hearing dates, penalty amounts, and type of administrative penalty order. >>> PCS().admin_penalty('enfor_action_date', '16-MAR-01')
[ "An", "enforcement", "action", "that", "results", "in", "levying", "the", "permit", "holder", "with", "a", "penalty", "or", "fine", ".", "It", "is", "used", "to", "track", "judicial", "hearing", "dates", "penalty", "amounts", "and", "type", "of", "administrative", "penalty", "order", "." ]
python
train
pandas-dev/pandas
pandas/core/groupby/groupby.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1251-L1324
def _add_numeric_operations(cls): """ Add numeric operations to the GroupBy generically. """ def groupby_function(name, alias, npfunc, numeric_only=True, _convert=False, min_count=-1): _local_template = "Compute %(f)s of group values" @Substitution(name='groupby', f=name) @Appender(_common_see_also) @Appender(_local_template) def f(self, **kwargs): if 'numeric_only' not in kwargs: kwargs['numeric_only'] = numeric_only if 'min_count' not in kwargs: kwargs['min_count'] = min_count self._set_group_selection() try: return self._cython_agg_general( alias, alt=npfunc, **kwargs) except AssertionError as e: raise SpecificationError(str(e)) except Exception: result = self.aggregate( lambda x: npfunc(x, axis=self.axis)) if _convert: result = result._convert(datetime=True) return result set_function_name(f, name, cls) return f def first_compat(x, axis=0): def first(x): x = x.to_numpy() x = x[notna(x)] if len(x) == 0: return np.nan return x[0] if isinstance(x, DataFrame): return x.apply(first, axis=axis) else: return first(x) def last_compat(x, axis=0): def last(x): x = x.to_numpy() x = x[notna(x)] if len(x) == 0: return np.nan return x[-1] if isinstance(x, DataFrame): return x.apply(last, axis=axis) else: return last(x) cls.sum = groupby_function('sum', 'add', np.sum, min_count=0) cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0) cls.min = groupby_function('min', 'min', np.min, numeric_only=False) cls.max = groupby_function('max', 'max', np.max, numeric_only=False) cls.first = groupby_function('first', 'first', first_compat, numeric_only=False) cls.last = groupby_function('last', 'last', last_compat, numeric_only=False)
[ "def", "_add_numeric_operations", "(", "cls", ")", ":", "def", "groupby_function", "(", "name", ",", "alias", ",", "npfunc", ",", "numeric_only", "=", "True", ",", "_convert", "=", "False", ",", "min_count", "=", "-", "1", ")", ":", "_local_template", "=", "\"Compute %(f)s of group values\"", "@", "Substitution", "(", "name", "=", "'groupby'", ",", "f", "=", "name", ")", "@", "Appender", "(", "_common_see_also", ")", "@", "Appender", "(", "_local_template", ")", "def", "f", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "'numeric_only'", "not", "in", "kwargs", ":", "kwargs", "[", "'numeric_only'", "]", "=", "numeric_only", "if", "'min_count'", "not", "in", "kwargs", ":", "kwargs", "[", "'min_count'", "]", "=", "min_count", "self", ".", "_set_group_selection", "(", ")", "try", ":", "return", "self", ".", "_cython_agg_general", "(", "alias", ",", "alt", "=", "npfunc", ",", "*", "*", "kwargs", ")", "except", "AssertionError", "as", "e", ":", "raise", "SpecificationError", "(", "str", "(", "e", ")", ")", "except", "Exception", ":", "result", "=", "self", ".", "aggregate", "(", "lambda", "x", ":", "npfunc", "(", "x", ",", "axis", "=", "self", ".", "axis", ")", ")", "if", "_convert", ":", "result", "=", "result", ".", "_convert", "(", "datetime", "=", "True", ")", "return", "result", "set_function_name", "(", "f", ",", "name", ",", "cls", ")", "return", "f", "def", "first_compat", "(", "x", ",", "axis", "=", "0", ")", ":", "def", "first", "(", "x", ")", ":", "x", "=", "x", ".", "to_numpy", "(", ")", "x", "=", "x", "[", "notna", "(", "x", ")", "]", "if", "len", "(", "x", ")", "==", "0", ":", "return", "np", ".", "nan", "return", "x", "[", "0", "]", "if", "isinstance", "(", "x", ",", "DataFrame", ")", ":", "return", "x", ".", "apply", "(", "first", ",", "axis", "=", "axis", ")", "else", ":", "return", "first", "(", "x", ")", "def", "last_compat", "(", "x", ",", "axis", "=", "0", ")", ":", "def", "last", "(", "x", ")", ":", "x", "=", "x", ".", "to_numpy", "(", ")", "x", "=", "x", "[", "notna", "(", "x", ")", "]", "if", "len", "(", "x", ")", "==", "0", ":", "return", "np", ".", "nan", "return", "x", "[", "-", "1", "]", "if", "isinstance", "(", "x", ",", "DataFrame", ")", ":", "return", "x", ".", "apply", "(", "last", ",", "axis", "=", "axis", ")", "else", ":", "return", "last", "(", "x", ")", "cls", ".", "sum", "=", "groupby_function", "(", "'sum'", ",", "'add'", ",", "np", ".", "sum", ",", "min_count", "=", "0", ")", "cls", ".", "prod", "=", "groupby_function", "(", "'prod'", ",", "'prod'", ",", "np", ".", "prod", ",", "min_count", "=", "0", ")", "cls", ".", "min", "=", "groupby_function", "(", "'min'", ",", "'min'", ",", "np", ".", "min", ",", "numeric_only", "=", "False", ")", "cls", ".", "max", "=", "groupby_function", "(", "'max'", ",", "'max'", ",", "np", ".", "max", ",", "numeric_only", "=", "False", ")", "cls", ".", "first", "=", "groupby_function", "(", "'first'", ",", "'first'", ",", "first_compat", ",", "numeric_only", "=", "False", ")", "cls", ".", "last", "=", "groupby_function", "(", "'last'", ",", "'last'", ",", "last_compat", ",", "numeric_only", "=", "False", ")" ]
Add numeric operations to the GroupBy generically.
[ "Add", "numeric", "operations", "to", "the", "GroupBy", "generically", "." ]
python
train
mehcode/python-saml
saml/signature.py
https://github.com/mehcode/python-saml/blob/33ed62018efa9ec15b551f309429de510fa44321/saml/signature.py#L11-L110
def sign(xml, stream, password=None): """ Sign an XML document with the given private key file. This will add a <Signature> element to the document. :param lxml.etree._Element xml: The document to sign :param file stream: The private key to sign the document with :param str password: The password used to access the private key :rtype: None Example usage: :: from saml import schema from lxml import etree document = schema.AuthenticationRequest() xml_document = document.serialize() with open('my_key_file.pem', 'r+') as stream: sign(xml_document, stream) print etree.tostring(xml_document) Produces the following XML document: .. code-block:: xml <samlp:AuthnRequest xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" Version="2.0" ID="_6087de0b111b44349a70ff40191a4c0c" IssueInstant="2015-03-16T21:06:39Z"> <Signature xmlns="http://www.w3.org/2000/09/xmldsig#"> <SignedInfo> <CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/> <SignatureMethod Algorithm="http://www.w3.org/2000/ 09/xmldsig#rsa-sha1"/> <Reference> <Transforms> <Transform Algorithm="http://www.w3.org/2000/ 09/xmldsig#enveloped-signature"/> </Transforms> <DigestMethod Algorithm="http://www.w3.org/2000/ 09/xmldsig#sha1"/> <DigestValue> 94O1FOjRE4JQYVDqStkYzne9StQ= </DigestValue> </Reference> </SignedInfo> <SignatureValue> aFYRRjtB3bDyLLJzLZmsn0K4SXmOpFYJ+8R8D31VojgiF37FOElbE56UFbm8BAjn l2AixrUGXP4djxoxxnfBD/reYw5yVuIVXlMxKec784nF2V4GyrfwJOKaNmlVPkq5 c8SI+EkKJ02mwiail0Zvjb9FzwvlYD+osMSXvJXVqnGHQDVFlhwbBRRVB6t44/M3 TzC4mLSVhuvcpsm4GTQSpGkHP7HvweKN/OTc0aTy8Kh/YUrImwnUCii+J0EW4nGg 71eZyq/IiSPnTD09WDHsWe3g29kpicZXqrQCWeLE2zfVKtyxxs7PyEmodH19jXyz wh9hQ8t6PFO47Ros5aV0bw== </SignatureValue> </Signature> </samlp:AuthnRequest> """ # Import xmlsec here to delay initializing the C library in # case we don't need it. import xmlsec # Resolve the SAML/2.0 element in question. from saml.schema.base import _element_registry element = _element_registry.get(xml.tag) # Create a signature template for RSA-SHA1 enveloped signature. signature_node = xmlsec.template.create( xml, xmlsec.Transform.EXCL_C14N, xmlsec.Transform.RSA_SHA1) # Add the <ds:Signature/> node to the document. xml.insert(element.meta.signature_index, signature_node) # Add the <ds:Reference/> node to the signature template. ref = xmlsec.template.add_reference( signature_node, xmlsec.Transform.SHA1) # Add the enveloped transform descriptor. xmlsec.template.add_transform(ref, xmlsec.Transform.ENVELOPED) # Create a digital signature context (no key manager is needed). ctx = xmlsec.SignatureContext() # Load private key. key = xmlsec.Key.from_memory(stream, xmlsec.KeyFormat.PEM, password) # Set the key on the context. ctx.key = key # Sign the template. ctx.sign(signature_node)
[ "def", "sign", "(", "xml", ",", "stream", ",", "password", "=", "None", ")", ":", "# Import xmlsec here to delay initializing the C library in", "# case we don't need it.", "import", "xmlsec", "# Resolve the SAML/2.0 element in question.", "from", "saml", ".", "schema", ".", "base", "import", "_element_registry", "element", "=", "_element_registry", ".", "get", "(", "xml", ".", "tag", ")", "# Create a signature template for RSA-SHA1 enveloped signature.", "signature_node", "=", "xmlsec", ".", "template", ".", "create", "(", "xml", ",", "xmlsec", ".", "Transform", ".", "EXCL_C14N", ",", "xmlsec", ".", "Transform", ".", "RSA_SHA1", ")", "# Add the <ds:Signature/> node to the document.", "xml", ".", "insert", "(", "element", ".", "meta", ".", "signature_index", ",", "signature_node", ")", "# Add the <ds:Reference/> node to the signature template.", "ref", "=", "xmlsec", ".", "template", ".", "add_reference", "(", "signature_node", ",", "xmlsec", ".", "Transform", ".", "SHA1", ")", "# Add the enveloped transform descriptor.", "xmlsec", ".", "template", ".", "add_transform", "(", "ref", ",", "xmlsec", ".", "Transform", ".", "ENVELOPED", ")", "# Create a digital signature context (no key manager is needed).", "ctx", "=", "xmlsec", ".", "SignatureContext", "(", ")", "# Load private key.", "key", "=", "xmlsec", ".", "Key", ".", "from_memory", "(", "stream", ",", "xmlsec", ".", "KeyFormat", ".", "PEM", ",", "password", ")", "# Set the key on the context.", "ctx", ".", "key", "=", "key", "# Sign the template.", "ctx", ".", "sign", "(", "signature_node", ")" ]
Sign an XML document with the given private key file. This will add a <Signature> element to the document. :param lxml.etree._Element xml: The document to sign :param file stream: The private key to sign the document with :param str password: The password used to access the private key :rtype: None Example usage: :: from saml import schema from lxml import etree document = schema.AuthenticationRequest() xml_document = document.serialize() with open('my_key_file.pem', 'r+') as stream: sign(xml_document, stream) print etree.tostring(xml_document) Produces the following XML document: .. code-block:: xml <samlp:AuthnRequest xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" Version="2.0" ID="_6087de0b111b44349a70ff40191a4c0c" IssueInstant="2015-03-16T21:06:39Z"> <Signature xmlns="http://www.w3.org/2000/09/xmldsig#"> <SignedInfo> <CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/> <SignatureMethod Algorithm="http://www.w3.org/2000/ 09/xmldsig#rsa-sha1"/> <Reference> <Transforms> <Transform Algorithm="http://www.w3.org/2000/ 09/xmldsig#enveloped-signature"/> </Transforms> <DigestMethod Algorithm="http://www.w3.org/2000/ 09/xmldsig#sha1"/> <DigestValue> 94O1FOjRE4JQYVDqStkYzne9StQ= </DigestValue> </Reference> </SignedInfo> <SignatureValue> aFYRRjtB3bDyLLJzLZmsn0K4SXmOpFYJ+8R8D31VojgiF37FOElbE56UFbm8BAjn l2AixrUGXP4djxoxxnfBD/reYw5yVuIVXlMxKec784nF2V4GyrfwJOKaNmlVPkq5 c8SI+EkKJ02mwiail0Zvjb9FzwvlYD+osMSXvJXVqnGHQDVFlhwbBRRVB6t44/M3 TzC4mLSVhuvcpsm4GTQSpGkHP7HvweKN/OTc0aTy8Kh/YUrImwnUCii+J0EW4nGg 71eZyq/IiSPnTD09WDHsWe3g29kpicZXqrQCWeLE2zfVKtyxxs7PyEmodH19jXyz wh9hQ8t6PFO47Ros5aV0bw== </SignatureValue> </Signature> </samlp:AuthnRequest>
[ "Sign", "an", "XML", "document", "with", "the", "given", "private", "key", "file", ".", "This", "will", "add", "a", "<Signature", ">", "element", "to", "the", "document", "." ]
python
valid
CityOfZion/neo-python-core
neocore/Cryptography/ECCurve.py
https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/Cryptography/ECCurve.py#L864-L870
def secp256k1(): """ create the secp256k1 curve """ GFp = FiniteField(2 ** 256 - 2 ** 32 - 977) # This is P from below... aka FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F ec = EllipticCurve(GFp, 0, 7) return ECDSA(ec, ec.point(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8), 2 ** 256 - 432420386565659656852420866394968145599)
[ "def", "secp256k1", "(", ")", ":", "GFp", "=", "FiniteField", "(", "2", "**", "256", "-", "2", "**", "32", "-", "977", ")", "# This is P from below... aka FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", "ec", "=", "EllipticCurve", "(", "GFp", ",", "0", ",", "7", ")", "return", "ECDSA", "(", "ec", ",", "ec", ".", "point", "(", "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", ",", "0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", ")", ",", "2", "**", "256", "-", "432420386565659656852420866394968145599", ")" ]
create the secp256k1 curve
[ "create", "the", "secp256k1", "curve" ]
python
train
Projectplace/basepage
basepage/base_page.py
https://github.com/Projectplace/basepage/blob/735476877eb100db0981590a6d12140e68652167/basepage/base_page.py#L418-L430
def get_visible_element(self, locator, params=None, timeout=None): """ Get element both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance """ return self.get_present_element(locator, params, timeout, True)
[ "def", "get_visible_element", "(", "self", ",", "locator", ",", "params", "=", "None", ",", "timeout", "=", "None", ")", ":", "return", "self", ".", "get_present_element", "(", "locator", ",", "params", ",", "timeout", ",", "True", ")" ]
Get element both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance
[ "Get", "element", "both", "present", "AND", "visible", "in", "the", "DOM", "." ]
python
train
svenkreiss/databench
databench/datastore.py
https://github.com/svenkreiss/databench/blob/99d4adad494b60a42af6b8bfba94dd0c41ba0786/databench/datastore.py#L98-L109
def set(self, key, value): """Set a value at key and return a Future. :rtype: Iterable[tornado.concurrent.Future] """ value_encoded = encode(value) if key in self.data and self.data[key] == value_encoded: return [] self.data[key] = value_encoded return self.trigger_callbacks(key)
[ "def", "set", "(", "self", ",", "key", ",", "value", ")", ":", "value_encoded", "=", "encode", "(", "value", ")", "if", "key", "in", "self", ".", "data", "and", "self", ".", "data", "[", "key", "]", "==", "value_encoded", ":", "return", "[", "]", "self", ".", "data", "[", "key", "]", "=", "value_encoded", "return", "self", ".", "trigger_callbacks", "(", "key", ")" ]
Set a value at key and return a Future. :rtype: Iterable[tornado.concurrent.Future]
[ "Set", "a", "value", "at", "key", "and", "return", "a", "Future", "." ]
python
train
pkkid/python-plexapi
plexapi/library.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/library.py#L423-L426
def cancelUpdate(self): """ Cancel update of this Library Section. """ key = '/library/sections/%s/refresh' % self.key self._server.query(key, method=self._server._session.delete)
[ "def", "cancelUpdate", "(", "self", ")", ":", "key", "=", "'/library/sections/%s/refresh'", "%", "self", ".", "key", "self", ".", "_server", ".", "query", "(", "key", ",", "method", "=", "self", ".", "_server", ".", "_session", ".", "delete", ")" ]
Cancel update of this Library Section.
[ "Cancel", "update", "of", "this", "Library", "Section", "." ]
python
train
saltstack/salt
salt/modules/heat.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/heat.py#L286-L314
def list_stack(profile=None): ''' Return a list of available stack (heat stack-list) profile Profile to use CLI Example: .. code-block:: bash salt '*' heat.list_stack profile=openstack1 ''' ret = {} h_client = _auth(profile) for stack in h_client.stacks.list(): links = {} for link in stack.links: links[link['rel']] = link['href'] ret[stack.stack_name] = { 'status': stack.stack_status, 'id': stack.id, 'name': stack.stack_name, 'creation': stack.creation_time, 'owner': stack.stack_owner, 'reason': stack.stack_status_reason, 'links': links, } return ret
[ "def", "list_stack", "(", "profile", "=", "None", ")", ":", "ret", "=", "{", "}", "h_client", "=", "_auth", "(", "profile", ")", "for", "stack", "in", "h_client", ".", "stacks", ".", "list", "(", ")", ":", "links", "=", "{", "}", "for", "link", "in", "stack", ".", "links", ":", "links", "[", "link", "[", "'rel'", "]", "]", "=", "link", "[", "'href'", "]", "ret", "[", "stack", ".", "stack_name", "]", "=", "{", "'status'", ":", "stack", ".", "stack_status", ",", "'id'", ":", "stack", ".", "id", ",", "'name'", ":", "stack", ".", "stack_name", ",", "'creation'", ":", "stack", ".", "creation_time", ",", "'owner'", ":", "stack", ".", "stack_owner", ",", "'reason'", ":", "stack", ".", "stack_status_reason", ",", "'links'", ":", "links", ",", "}", "return", "ret" ]
Return a list of available stack (heat stack-list) profile Profile to use CLI Example: .. code-block:: bash salt '*' heat.list_stack profile=openstack1
[ "Return", "a", "list", "of", "available", "stack", "(", "heat", "stack", "-", "list", ")" ]
python
train
pandas-dev/pandas
pandas/core/ops.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L1628-L1646
def _align_method_SERIES(left, right, align_asobject=False): """ align lhs and rhs Series """ # ToDo: Different from _align_method_FRAME, list, tuple and ndarray # are not coerced here # because Series has inconsistencies described in #13637 if isinstance(right, ABCSeries): # avoid repeated alignment if not left.index.equals(right.index): if align_asobject: # to keep original value's dtype for bool ops left = left.astype(object) right = right.astype(object) left, right = left.align(right, copy=False) return left, right
[ "def", "_align_method_SERIES", "(", "left", ",", "right", ",", "align_asobject", "=", "False", ")", ":", "# ToDo: Different from _align_method_FRAME, list, tuple and ndarray", "# are not coerced here", "# because Series has inconsistencies described in #13637", "if", "isinstance", "(", "right", ",", "ABCSeries", ")", ":", "# avoid repeated alignment", "if", "not", "left", ".", "index", ".", "equals", "(", "right", ".", "index", ")", ":", "if", "align_asobject", ":", "# to keep original value's dtype for bool ops", "left", "=", "left", ".", "astype", "(", "object", ")", "right", "=", "right", ".", "astype", "(", "object", ")", "left", ",", "right", "=", "left", ".", "align", "(", "right", ",", "copy", "=", "False", ")", "return", "left", ",", "right" ]
align lhs and rhs Series
[ "align", "lhs", "and", "rhs", "Series" ]
python
train
mukulhase/WebWhatsapp-Wrapper
sample/flask/webapi.py
https://github.com/mukulhase/WebWhatsapp-Wrapper/blob/81b918ee4e0cd0cb563807a72baa167f670d70cb/sample/flask/webapi.py#L473-L479
def create_client(): """Create a new client driver. The driver is automatically created in before_request function.""" result = False if g.client_id in drivers: result = True return jsonify({'Success': result})
[ "def", "create_client", "(", ")", ":", "result", "=", "False", "if", "g", ".", "client_id", "in", "drivers", ":", "result", "=", "True", "return", "jsonify", "(", "{", "'Success'", ":", "result", "}", ")" ]
Create a new client driver. The driver is automatically created in before_request function.
[ "Create", "a", "new", "client", "driver", ".", "The", "driver", "is", "automatically", "created", "in", "before_request", "function", "." ]
python
train
StorjOld/pyp2p
pyp2p/rendezvous_client.py
https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L544-L557
def parse_remote_port(self, reply): """ Parses a remote port from a Rendezvous Server's response. """ remote_port = re.findall("^REMOTE (TCP|UDP) ([0-9]+)$", reply) if not len(remote_port): remote_port = 0 else: remote_port = int(remote_port[0][1]) if remote_port < 1 or remote_port > 65535: remote_port = 0 return remote_port
[ "def", "parse_remote_port", "(", "self", ",", "reply", ")", ":", "remote_port", "=", "re", ".", "findall", "(", "\"^REMOTE (TCP|UDP) ([0-9]+)$\"", ",", "reply", ")", "if", "not", "len", "(", "remote_port", ")", ":", "remote_port", "=", "0", "else", ":", "remote_port", "=", "int", "(", "remote_port", "[", "0", "]", "[", "1", "]", ")", "if", "remote_port", "<", "1", "or", "remote_port", ">", "65535", ":", "remote_port", "=", "0", "return", "remote_port" ]
Parses a remote port from a Rendezvous Server's response.
[ "Parses", "a", "remote", "port", "from", "a", "Rendezvous", "Server", "s", "response", "." ]
python
train
HDI-Project/BTB
btb/selection/best.py
https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/selection/best.py#L71-L81
def compute_rewards(self, scores): """Compute the velocity of the best scores The velocities are the k distances between the k+1 best scores. """ k = self.k m = max(len(scores) - k, 0) best_scores = sorted(scores)[-k - 1:] velocities = np.diff(best_scores) nans = np.full(m, np.nan) return list(velocities) + list(nans)
[ "def", "compute_rewards", "(", "self", ",", "scores", ")", ":", "k", "=", "self", ".", "k", "m", "=", "max", "(", "len", "(", "scores", ")", "-", "k", ",", "0", ")", "best_scores", "=", "sorted", "(", "scores", ")", "[", "-", "k", "-", "1", ":", "]", "velocities", "=", "np", ".", "diff", "(", "best_scores", ")", "nans", "=", "np", ".", "full", "(", "m", ",", "np", ".", "nan", ")", "return", "list", "(", "velocities", ")", "+", "list", "(", "nans", ")" ]
Compute the velocity of the best scores The velocities are the k distances between the k+1 best scores.
[ "Compute", "the", "velocity", "of", "the", "best", "scores" ]
python
train
Unidata/MetPy
metpy/calc/cross_sections.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/calc/cross_sections.py#L221-L258
def tangential_component(data_x, data_y, index='index'): r"""Obtain the tangential component of a cross-section of a vector field. Parameters ---------- data_x : `xarray.DataArray` The input DataArray of the x-component (in terms of data projection) of the vector field. data_y : `xarray.DataArray` The input DataArray of the y-component (in terms of data projection) of the vector field. Returns ------- component_tangential: `xarray.DataArray` The component of the vector field in the tangential directions. See Also -------- cross_section_components, normal_component Notes ----- The coordinates of `data_x` and `data_y` must match. """ # Get the unit vectors unit_tang, _ = unit_vectors_from_cross_section(data_x, index=index) # Take the dot products component_tang = data_x * unit_tang[0] + data_y * unit_tang[1] # Reattach only reliable attributes after operation for attr in ('units', 'grid_mapping'): if attr in data_x.attrs: component_tang.attrs[attr] = data_x.attrs[attr] return component_tang
[ "def", "tangential_component", "(", "data_x", ",", "data_y", ",", "index", "=", "'index'", ")", ":", "# Get the unit vectors", "unit_tang", ",", "_", "=", "unit_vectors_from_cross_section", "(", "data_x", ",", "index", "=", "index", ")", "# Take the dot products", "component_tang", "=", "data_x", "*", "unit_tang", "[", "0", "]", "+", "data_y", "*", "unit_tang", "[", "1", "]", "# Reattach only reliable attributes after operation", "for", "attr", "in", "(", "'units'", ",", "'grid_mapping'", ")", ":", "if", "attr", "in", "data_x", ".", "attrs", ":", "component_tang", ".", "attrs", "[", "attr", "]", "=", "data_x", ".", "attrs", "[", "attr", "]", "return", "component_tang" ]
r"""Obtain the tangential component of a cross-section of a vector field. Parameters ---------- data_x : `xarray.DataArray` The input DataArray of the x-component (in terms of data projection) of the vector field. data_y : `xarray.DataArray` The input DataArray of the y-component (in terms of data projection) of the vector field. Returns ------- component_tangential: `xarray.DataArray` The component of the vector field in the tangential directions. See Also -------- cross_section_components, normal_component Notes ----- The coordinates of `data_x` and `data_y` must match.
[ "r", "Obtain", "the", "tangential", "component", "of", "a", "cross", "-", "section", "of", "a", "vector", "field", "." ]
python
train
google/transitfeed
transitfeed/schedule.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/schedule.py#L770-L804
def GenerateDateTripsDeparturesList(self, date_start, date_end): """Return a list of (date object, number of trips, number of departures). The list is generated for dates in the range [date_start, date_end). Args: date_start: The first date in the list, a date object date_end: The first date after the list, a date object Returns: a list of (date object, number of trips, number of departures) tuples """ service_id_to_trips = defaultdict(lambda: 0) service_id_to_departures = defaultdict(lambda: 0) for trip in self.GetTripList(): headway_start_times = trip.GetFrequencyStartTimes() if headway_start_times: trip_runs = len(headway_start_times) else: trip_runs = 1 service_id_to_trips[trip.service_id] += trip_runs service_id_to_departures[trip.service_id] += ( (trip.GetCountStopTimes() - 1) * trip_runs) date_services = self.GetServicePeriodsActiveEachDate(date_start, date_end) date_trips = [] for date, services in date_services: day_trips = sum(service_id_to_trips[s.service_id] for s in services) day_departures = sum( service_id_to_departures[s.service_id] for s in services) date_trips.append((date, day_trips, day_departures)) return date_trips
[ "def", "GenerateDateTripsDeparturesList", "(", "self", ",", "date_start", ",", "date_end", ")", ":", "service_id_to_trips", "=", "defaultdict", "(", "lambda", ":", "0", ")", "service_id_to_departures", "=", "defaultdict", "(", "lambda", ":", "0", ")", "for", "trip", "in", "self", ".", "GetTripList", "(", ")", ":", "headway_start_times", "=", "trip", ".", "GetFrequencyStartTimes", "(", ")", "if", "headway_start_times", ":", "trip_runs", "=", "len", "(", "headway_start_times", ")", "else", ":", "trip_runs", "=", "1", "service_id_to_trips", "[", "trip", ".", "service_id", "]", "+=", "trip_runs", "service_id_to_departures", "[", "trip", ".", "service_id", "]", "+=", "(", "(", "trip", ".", "GetCountStopTimes", "(", ")", "-", "1", ")", "*", "trip_runs", ")", "date_services", "=", "self", ".", "GetServicePeriodsActiveEachDate", "(", "date_start", ",", "date_end", ")", "date_trips", "=", "[", "]", "for", "date", ",", "services", "in", "date_services", ":", "day_trips", "=", "sum", "(", "service_id_to_trips", "[", "s", ".", "service_id", "]", "for", "s", "in", "services", ")", "day_departures", "=", "sum", "(", "service_id_to_departures", "[", "s", ".", "service_id", "]", "for", "s", "in", "services", ")", "date_trips", ".", "append", "(", "(", "date", ",", "day_trips", ",", "day_departures", ")", ")", "return", "date_trips" ]
Return a list of (date object, number of trips, number of departures). The list is generated for dates in the range [date_start, date_end). Args: date_start: The first date in the list, a date object date_end: The first date after the list, a date object Returns: a list of (date object, number of trips, number of departures) tuples
[ "Return", "a", "list", "of", "(", "date", "object", "number", "of", "trips", "number", "of", "departures", ")", "." ]
python
train
10gen/mongo-orchestration
mongo_orchestration/servers.py
https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/servers.py#L525-L532
def remove(self, server_id): """remove server and data stuff Args: server_id - server identity """ server = self._storage.pop(server_id) server.stop() server.cleanup()
[ "def", "remove", "(", "self", ",", "server_id", ")", ":", "server", "=", "self", ".", "_storage", ".", "pop", "(", "server_id", ")", "server", ".", "stop", "(", ")", "server", ".", "cleanup", "(", ")" ]
remove server and data stuff Args: server_id - server identity
[ "remove", "server", "and", "data", "stuff", "Args", ":", "server_id", "-", "server", "identity" ]
python
train
yeraydiazdiaz/lunr.py
lunr/pipeline.py
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L58-L68
def add(self, *args): """Adds new functions to the end of the pipeline. Functions must accept three arguments: - Token: A lunr.Token object which will be updated - i: The index of the token in the set - tokens: A list of tokens representing the set """ for fn in args: self.warn_if_function_not_registered(fn) self._stack.append(fn)
[ "def", "add", "(", "self", ",", "*", "args", ")", ":", "for", "fn", "in", "args", ":", "self", ".", "warn_if_function_not_registered", "(", "fn", ")", "self", ".", "_stack", ".", "append", "(", "fn", ")" ]
Adds new functions to the end of the pipeline. Functions must accept three arguments: - Token: A lunr.Token object which will be updated - i: The index of the token in the set - tokens: A list of tokens representing the set
[ "Adds", "new", "functions", "to", "the", "end", "of", "the", "pipeline", "." ]
python
train
globality-corp/microcosm-flask
microcosm_flask/conventions/crud.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/conventions/crud.py#L139-L171
def configure_updatebatch(self, ns, definition): """ Register an update batch endpoint. The definition's func should be an update function, which must: - accept kwargs for the request and path data - return a new item :param ns: the namespace :param definition: the endpoint definition """ operation = Operation.UpdateBatch @self.add_route(ns.collection_path, operation, ns) @request(definition.request_schema) @response(definition.response_schema) @wraps(definition.func) def update_batch(**path_data): headers = dict() request_data = load_request_data(definition.request_schema) response_data = definition.func(**merge_data(path_data, request_data)) definition.header_func(headers, response_data) response_format = self.negotiate_response_content(definition.response_formats) return dump_response_data( definition.response_schema, response_data, status_code=operation.value.default_code, headers=headers, response_format=response_format, ) update_batch.__doc__ = "Update a batch of {}".format(ns.subject_name)
[ "def", "configure_updatebatch", "(", "self", ",", "ns", ",", "definition", ")", ":", "operation", "=", "Operation", ".", "UpdateBatch", "@", "self", ".", "add_route", "(", "ns", ".", "collection_path", ",", "operation", ",", "ns", ")", "@", "request", "(", "definition", ".", "request_schema", ")", "@", "response", "(", "definition", ".", "response_schema", ")", "@", "wraps", "(", "definition", ".", "func", ")", "def", "update_batch", "(", "*", "*", "path_data", ")", ":", "headers", "=", "dict", "(", ")", "request_data", "=", "load_request_data", "(", "definition", ".", "request_schema", ")", "response_data", "=", "definition", ".", "func", "(", "*", "*", "merge_data", "(", "path_data", ",", "request_data", ")", ")", "definition", ".", "header_func", "(", "headers", ",", "response_data", ")", "response_format", "=", "self", ".", "negotiate_response_content", "(", "definition", ".", "response_formats", ")", "return", "dump_response_data", "(", "definition", ".", "response_schema", ",", "response_data", ",", "status_code", "=", "operation", ".", "value", ".", "default_code", ",", "headers", "=", "headers", ",", "response_format", "=", "response_format", ",", ")", "update_batch", ".", "__doc__", "=", "\"Update a batch of {}\"", ".", "format", "(", "ns", ".", "subject_name", ")" ]
Register an update batch endpoint. The definition's func should be an update function, which must: - accept kwargs for the request and path data - return a new item :param ns: the namespace :param definition: the endpoint definition
[ "Register", "an", "update", "batch", "endpoint", "." ]
python
train
mattupstate/cubric
cubric/providers/amazon.py
https://github.com/mattupstate/cubric/blob/a648ce00e4467cd14d71e754240ef6c1f87a34b5/cubric/providers/amazon.py#L11-L55
def create_server(): """Creates an EC2 Server""" try: import boto except ImportError: sys.exit("boto library required for creating servers with Amazon.") print(green("Creating EC2 server")) conn = boto.connect_ec2( get_or_prompt('ec2_key', 'API Key'), get_or_prompt('ec2_secret', 'API Secret')) reservation = conn.run_instances( get_or_prompt( 'ec2_ami', 'AMI ID', 'ami-fd589594'), instance_type=get_or_prompt( 'ec2_instancetype', 'Instance Type', 't1.micro'), key_name=get_or_prompt( 'ec2_keypair', 'Key Pair'), security_groups=get_or_prompt_list( 'ec2_secgroups', 'Security Groups')) instance = reservation.instances[0] time.sleep(3) tag = get_or_prompt('ec2_tag', 'Instance Tag (blank for none)', '').strip() if len(tag) > 0: conn.create_tags([instance.id], {"Name": tag}) while instance.state != u'running': print(yellow("Instance state: %s" % instance.state)) time.sleep(10) instance.update() print(green("Instance state: %s" % instance.state)) print(green("Public dns: %s" % instance.public_dns_name)) print(green("Waiting 30 seconds for server to boot")) time.sleep(30) return instance.public_dns_name
[ "def", "create_server", "(", ")", ":", "try", ":", "import", "boto", "except", "ImportError", ":", "sys", ".", "exit", "(", "\"boto library required for creating servers with Amazon.\"", ")", "print", "(", "green", "(", "\"Creating EC2 server\"", ")", ")", "conn", "=", "boto", ".", "connect_ec2", "(", "get_or_prompt", "(", "'ec2_key'", ",", "'API Key'", ")", ",", "get_or_prompt", "(", "'ec2_secret'", ",", "'API Secret'", ")", ")", "reservation", "=", "conn", ".", "run_instances", "(", "get_or_prompt", "(", "'ec2_ami'", ",", "'AMI ID'", ",", "'ami-fd589594'", ")", ",", "instance_type", "=", "get_or_prompt", "(", "'ec2_instancetype'", ",", "'Instance Type'", ",", "'t1.micro'", ")", ",", "key_name", "=", "get_or_prompt", "(", "'ec2_keypair'", ",", "'Key Pair'", ")", ",", "security_groups", "=", "get_or_prompt_list", "(", "'ec2_secgroups'", ",", "'Security Groups'", ")", ")", "instance", "=", "reservation", ".", "instances", "[", "0", "]", "time", ".", "sleep", "(", "3", ")", "tag", "=", "get_or_prompt", "(", "'ec2_tag'", ",", "'Instance Tag (blank for none)'", ",", "''", ")", ".", "strip", "(", ")", "if", "len", "(", "tag", ")", ">", "0", ":", "conn", ".", "create_tags", "(", "[", "instance", ".", "id", "]", ",", "{", "\"Name\"", ":", "tag", "}", ")", "while", "instance", ".", "state", "!=", "u'running'", ":", "print", "(", "yellow", "(", "\"Instance state: %s\"", "%", "instance", ".", "state", ")", ")", "time", ".", "sleep", "(", "10", ")", "instance", ".", "update", "(", ")", "print", "(", "green", "(", "\"Instance state: %s\"", "%", "instance", ".", "state", ")", ")", "print", "(", "green", "(", "\"Public dns: %s\"", "%", "instance", ".", "public_dns_name", ")", ")", "print", "(", "green", "(", "\"Waiting 30 seconds for server to boot\"", ")", ")", "time", ".", "sleep", "(", "30", ")", "return", "instance", ".", "public_dns_name" ]
Creates an EC2 Server
[ "Creates", "an", "EC2", "Server" ]
python
train
chrisjsewell/jsonextended
jsonextended/ejson.py
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/ejson.py#L137-L211
def jkeys(jfile, key_path=None, in_memory=True, ignore_prefix=('.', '_')): """ get keys for initial json level, or at level after following key_path Parameters ---------- jfile : str, file_like or path_like if str, must be existing file or folder, if file_like, must have 'read' method if path_like, must have 'iterdir' method (see pathlib.Path) key_path : list[str] a list of keys to index into the json before returning keys in_memory : bool if true reads json into memory before finding keys (this is faster but uses more memory) ignore_prefix : list[str] ignore folders beginning with these prefixes Examples -------- >>> from jsonextended.utils import MockPath >>> file_obj = MockPath('test.json',is_file=True, ... content=''' ... { ... "a": 1, ... "b": [1.1,2.1], ... "c": {"d":"e","f":"g"} ... } ... ''') ... >>> jkeys(file_obj) ['a', 'b', 'c'] >>> jkeys(file_obj,["c"]) ['d', 'f'] >>> from jsonextended.utils import get_test_path >>> path = get_test_path() >>> jkeys(path) ['dir1', 'dir2', 'dir3'] >>> path = get_test_path() >>> jkeys(path, ['dir1','file1'], in_memory=True) ['initial', 'meta', 'optimised', 'units'] """ key_path = [] if key_path is None else key_path def eval_file(file_obj): if not in_memory: return _get_keys_ijson(file_obj, key_path) else: return _get_keys(file_obj, key_path) if isinstance(jfile, basestring): if not os.path.exists(jfile): raise IOError('jfile does not exist: {}'.format(jfile)) if os.path.isdir(jfile): jpath = pathlib.Path(jfile) return _get_keys_folder(jpath, key_path, in_memory, ignore_prefix) else: with open(jfile, 'r') as file_obj: return eval_file(file_obj) elif hasattr(jfile, 'read'): return eval_file(jfile) elif hasattr(jfile, 'iterdir'): if jfile.is_file(): with jfile.open('r') as file_obj: return eval_file(file_obj) else: return _get_keys_folder(jfile, key_path, in_memory, ignore_prefix) else: raise ValueError( 'jfile should be a str, ' 'file_like or path_like object: {}'.format(jfile))
[ "def", "jkeys", "(", "jfile", ",", "key_path", "=", "None", ",", "in_memory", "=", "True", ",", "ignore_prefix", "=", "(", "'.'", ",", "'_'", ")", ")", ":", "key_path", "=", "[", "]", "if", "key_path", "is", "None", "else", "key_path", "def", "eval_file", "(", "file_obj", ")", ":", "if", "not", "in_memory", ":", "return", "_get_keys_ijson", "(", "file_obj", ",", "key_path", ")", "else", ":", "return", "_get_keys", "(", "file_obj", ",", "key_path", ")", "if", "isinstance", "(", "jfile", ",", "basestring", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "jfile", ")", ":", "raise", "IOError", "(", "'jfile does not exist: {}'", ".", "format", "(", "jfile", ")", ")", "if", "os", ".", "path", ".", "isdir", "(", "jfile", ")", ":", "jpath", "=", "pathlib", ".", "Path", "(", "jfile", ")", "return", "_get_keys_folder", "(", "jpath", ",", "key_path", ",", "in_memory", ",", "ignore_prefix", ")", "else", ":", "with", "open", "(", "jfile", ",", "'r'", ")", "as", "file_obj", ":", "return", "eval_file", "(", "file_obj", ")", "elif", "hasattr", "(", "jfile", ",", "'read'", ")", ":", "return", "eval_file", "(", "jfile", ")", "elif", "hasattr", "(", "jfile", ",", "'iterdir'", ")", ":", "if", "jfile", ".", "is_file", "(", ")", ":", "with", "jfile", ".", "open", "(", "'r'", ")", "as", "file_obj", ":", "return", "eval_file", "(", "file_obj", ")", "else", ":", "return", "_get_keys_folder", "(", "jfile", ",", "key_path", ",", "in_memory", ",", "ignore_prefix", ")", "else", ":", "raise", "ValueError", "(", "'jfile should be a str, '", "'file_like or path_like object: {}'", ".", "format", "(", "jfile", ")", ")" ]
get keys for initial json level, or at level after following key_path Parameters ---------- jfile : str, file_like or path_like if str, must be existing file or folder, if file_like, must have 'read' method if path_like, must have 'iterdir' method (see pathlib.Path) key_path : list[str] a list of keys to index into the json before returning keys in_memory : bool if true reads json into memory before finding keys (this is faster but uses more memory) ignore_prefix : list[str] ignore folders beginning with these prefixes Examples -------- >>> from jsonextended.utils import MockPath >>> file_obj = MockPath('test.json',is_file=True, ... content=''' ... { ... "a": 1, ... "b": [1.1,2.1], ... "c": {"d":"e","f":"g"} ... } ... ''') ... >>> jkeys(file_obj) ['a', 'b', 'c'] >>> jkeys(file_obj,["c"]) ['d', 'f'] >>> from jsonextended.utils import get_test_path >>> path = get_test_path() >>> jkeys(path) ['dir1', 'dir2', 'dir3'] >>> path = get_test_path() >>> jkeys(path, ['dir1','file1'], in_memory=True) ['initial', 'meta', 'optimised', 'units']
[ "get", "keys", "for", "initial", "json", "level", "or", "at", "level", "after", "following", "key_path" ]
python
train
VingtCinq/python-resize-image
resizeimage/resizeimage.py
https://github.com/VingtCinq/python-resize-image/blob/a4e645792ef30c5fcc558df6da6de18b1ecb95ea/resizeimage/resizeimage.py#L78-L95
def resize_cover(image, size, resample=Image.LANCZOS): """ Resize image according to size. image: a Pillow image instance size: a list of two integers [width, height] """ img_format = image.format img = image.copy() img_size = img.size ratio = max(size[0] / img_size[0], size[1] / img_size[1]) new_size = [ int(math.ceil(img_size[0] * ratio)), int(math.ceil(img_size[1] * ratio)) ] img = img.resize((new_size[0], new_size[1]), resample) img = resize_crop(img, size) img.format = img_format return img
[ "def", "resize_cover", "(", "image", ",", "size", ",", "resample", "=", "Image", ".", "LANCZOS", ")", ":", "img_format", "=", "image", ".", "format", "img", "=", "image", ".", "copy", "(", ")", "img_size", "=", "img", ".", "size", "ratio", "=", "max", "(", "size", "[", "0", "]", "/", "img_size", "[", "0", "]", ",", "size", "[", "1", "]", "/", "img_size", "[", "1", "]", ")", "new_size", "=", "[", "int", "(", "math", ".", "ceil", "(", "img_size", "[", "0", "]", "*", "ratio", ")", ")", ",", "int", "(", "math", ".", "ceil", "(", "img_size", "[", "1", "]", "*", "ratio", ")", ")", "]", "img", "=", "img", ".", "resize", "(", "(", "new_size", "[", "0", "]", ",", "new_size", "[", "1", "]", ")", ",", "resample", ")", "img", "=", "resize_crop", "(", "img", ",", "size", ")", "img", ".", "format", "=", "img_format", "return", "img" ]
Resize image according to size. image: a Pillow image instance size: a list of two integers [width, height]
[ "Resize", "image", "according", "to", "size", ".", "image", ":", "a", "Pillow", "image", "instance", "size", ":", "a", "list", "of", "two", "integers", "[", "width", "height", "]" ]
python
test
bxlab/bx-python
lib/bx_extras/pyparsing.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/pyparsing.py#L1019-L1052
def parseString( self, instring, parseAll=False ): """Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. If you want the grammar to require that the entire input string be successfully parsed, then set parseAll to True (equivalent to ending the grammar with StringEnd()). Note: parseString implicitly calls expandtabs() on the input string, in order to report proper column numbers in parse actions. If the input string contains tabs and the grammar uses parse actions that use the loc argument to index into the string being parsed, you can ensure you have a consistent view of the input string by: - calling parseWithTabs on your grammar before calling parseString (see L{I{parseWithTabs}<parseWithTabs>}) - define your parse action using the full (s,loc,toks) signature, and reference the input string using the parse action's s argument - explictly expand the tabs in your input string before calling parseString """ ParserElement.resetCache() if not self.streamlined: self.streamline() #~ self.saveAsList = True for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = instring.expandtabs() loc, tokens = self._parse( instring, 0 ) if parseAll: StringEnd()._parse( instring, loc ) return tokens
[ "def", "parseString", "(", "self", ",", "instring", ",", "parseAll", "=", "False", ")", ":", "ParserElement", ".", "resetCache", "(", ")", "if", "not", "self", ".", "streamlined", ":", "self", ".", "streamline", "(", ")", "#~ self.saveAsList = True", "for", "e", "in", "self", ".", "ignoreExprs", ":", "e", ".", "streamline", "(", ")", "if", "not", "self", ".", "keepTabs", ":", "instring", "=", "instring", ".", "expandtabs", "(", ")", "loc", ",", "tokens", "=", "self", ".", "_parse", "(", "instring", ",", "0", ")", "if", "parseAll", ":", "StringEnd", "(", ")", ".", "_parse", "(", "instring", ",", "loc", ")", "return", "tokens" ]
Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. If you want the grammar to require that the entire input string be successfully parsed, then set parseAll to True (equivalent to ending the grammar with StringEnd()). Note: parseString implicitly calls expandtabs() on the input string, in order to report proper column numbers in parse actions. If the input string contains tabs and the grammar uses parse actions that use the loc argument to index into the string being parsed, you can ensure you have a consistent view of the input string by: - calling parseWithTabs on your grammar before calling parseString (see L{I{parseWithTabs}<parseWithTabs>}) - define your parse action using the full (s,loc,toks) signature, and reference the input string using the parse action's s argument - explictly expand the tabs in your input string before calling parseString
[ "Execute", "the", "parse", "expression", "with", "the", "given", "string", ".", "This", "is", "the", "main", "interface", "to", "the", "client", "code", "once", "the", "complete", "expression", "has", "been", "built", "." ]
python
train
alaudet/hcsr04sensor
recipes/imperial_distance.py
https://github.com/alaudet/hcsr04sensor/blob/74caf5c825e3f700c9daa9985542c061ae04b002/recipes/imperial_distance.py#L6-L31
def main(): '''Calculate the distance of an object in inches using a HCSR04 sensor and a Raspberry Pi''' trig_pin = 17 echo_pin = 27 # Default values # unit = 'metric' # temperature = 20 # round_to = 1 # Create a distance reading with the hcsr04 sensor module # and overide the default values for temp, unit and rounding) value = sensor.Measurement(trig_pin, echo_pin, temperature=68, unit='imperial', round_to=2 ) raw_measurement = value.raw_distance() # Calculate the distance in inches imperial_distance = value.distance_imperial(raw_measurement) print("The Distance = {} inches".format(imperial_distance))
[ "def", "main", "(", ")", ":", "trig_pin", "=", "17", "echo_pin", "=", "27", "# Default values", "# unit = 'metric'", "# temperature = 20", "# round_to = 1", "# Create a distance reading with the hcsr04 sensor module", "# and overide the default values for temp, unit and rounding)", "value", "=", "sensor", ".", "Measurement", "(", "trig_pin", ",", "echo_pin", ",", "temperature", "=", "68", ",", "unit", "=", "'imperial'", ",", "round_to", "=", "2", ")", "raw_measurement", "=", "value", ".", "raw_distance", "(", ")", "# Calculate the distance in inches", "imperial_distance", "=", "value", ".", "distance_imperial", "(", "raw_measurement", ")", "print", "(", "\"The Distance = {} inches\"", ".", "format", "(", "imperial_distance", ")", ")" ]
Calculate the distance of an object in inches using a HCSR04 sensor and a Raspberry Pi
[ "Calculate", "the", "distance", "of", "an", "object", "in", "inches", "using", "a", "HCSR04", "sensor", "and", "a", "Raspberry", "Pi" ]
python
train
globality-corp/microcosm-flask
microcosm_flask/audit.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/audit.py#L290-L306
def post_process_response_headers(self, dct): """ Rewrite X-<>-Id header into audit logs. """ if not self.response_headers: return for key, value in self.response_headers.items(): parts = key.split("-") if len(parts) != 3: continue if parts[0] != "X": continue if parts[-1] != "Id": continue dct["{}_id".format(underscore(parts[1]))] = value
[ "def", "post_process_response_headers", "(", "self", ",", "dct", ")", ":", "if", "not", "self", ".", "response_headers", ":", "return", "for", "key", ",", "value", "in", "self", ".", "response_headers", ".", "items", "(", ")", ":", "parts", "=", "key", ".", "split", "(", "\"-\"", ")", "if", "len", "(", "parts", ")", "!=", "3", ":", "continue", "if", "parts", "[", "0", "]", "!=", "\"X\"", ":", "continue", "if", "parts", "[", "-", "1", "]", "!=", "\"Id\"", ":", "continue", "dct", "[", "\"{}_id\"", ".", "format", "(", "underscore", "(", "parts", "[", "1", "]", ")", ")", "]", "=", "value" ]
Rewrite X-<>-Id header into audit logs.
[ "Rewrite", "X", "-", "<", ">", "-", "Id", "header", "into", "audit", "logs", "." ]
python
train
gwastro/pycbc
pycbc/inference/sampler/base_mcmc.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base_mcmc.py#L828-L874
def compute_acl(cls, filename, start_index=None, end_index=None, min_nsamples=10): """Computes the autocorrleation length for all model params in the given file. Parameter values are averaged over all walkers at each iteration. The ACL is then calculated over the averaged chain. If an ACL cannot be calculated because there are not enough samples, it will be set to ``inf``. Parameters ----------- filename : str Name of a samples file to compute ACLs for. start_index : int, optional The start index to compute the acl from. If None, will try to use the number of burn-in iterations in the file; otherwise, will start at the first sample. end_index : int, optional The end index to compute the acl to. If None, will go to the end of the current iteration. min_nsamples : int, optional Require a minimum number of samples to compute an ACL. If the number of samples per walker is less than this, will just set to ``inf``. Default is 10. Returns ------- dict A dictionary giving the ACL for each parameter. """ acls = {} with cls._io(filename, 'r') as fp: for param in fp.variable_params: samples = fp.read_raw_samples( param, thin_start=start_index, thin_interval=1, thin_end=end_index, flatten=False)[param] samples = samples.mean(axis=0) # if < min number of samples, just set to inf if samples.size < min_nsamples: acl = numpy.inf else: acl = autocorrelation.calculate_acl(samples) if acl <= 0: acl = numpy.inf acls[param] = acl return acls
[ "def", "compute_acl", "(", "cls", ",", "filename", ",", "start_index", "=", "None", ",", "end_index", "=", "None", ",", "min_nsamples", "=", "10", ")", ":", "acls", "=", "{", "}", "with", "cls", ".", "_io", "(", "filename", ",", "'r'", ")", "as", "fp", ":", "for", "param", "in", "fp", ".", "variable_params", ":", "samples", "=", "fp", ".", "read_raw_samples", "(", "param", ",", "thin_start", "=", "start_index", ",", "thin_interval", "=", "1", ",", "thin_end", "=", "end_index", ",", "flatten", "=", "False", ")", "[", "param", "]", "samples", "=", "samples", ".", "mean", "(", "axis", "=", "0", ")", "# if < min number of samples, just set to inf", "if", "samples", ".", "size", "<", "min_nsamples", ":", "acl", "=", "numpy", ".", "inf", "else", ":", "acl", "=", "autocorrelation", ".", "calculate_acl", "(", "samples", ")", "if", "acl", "<=", "0", ":", "acl", "=", "numpy", ".", "inf", "acls", "[", "param", "]", "=", "acl", "return", "acls" ]
Computes the autocorrleation length for all model params in the given file. Parameter values are averaged over all walkers at each iteration. The ACL is then calculated over the averaged chain. If an ACL cannot be calculated because there are not enough samples, it will be set to ``inf``. Parameters ----------- filename : str Name of a samples file to compute ACLs for. start_index : int, optional The start index to compute the acl from. If None, will try to use the number of burn-in iterations in the file; otherwise, will start at the first sample. end_index : int, optional The end index to compute the acl to. If None, will go to the end of the current iteration. min_nsamples : int, optional Require a minimum number of samples to compute an ACL. If the number of samples per walker is less than this, will just set to ``inf``. Default is 10. Returns ------- dict A dictionary giving the ACL for each parameter.
[ "Computes", "the", "autocorrleation", "length", "for", "all", "model", "params", "in", "the", "given", "file", "." ]
python
train
jilljenn/tryalgo
tryalgo/graph.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/graph.py#L155-L173
def tree_adj_to_prec(graph, root=0): """Transforms a tree given as adjacency list into predecessor table form. if graph is not a tree: will return a DFS spanning tree :param graph: directed graph in listlist or listdict format :returns: tree in predecessor table representation :complexity: linear """ prec = [None] * len(graph) prec[root] = root # mark to visit root only once to_visit = [root] while to_visit: # DFS node = to_visit.pop() for neighbor in graph[node]: if prec[neighbor] is None: prec[neighbor] = node to_visit.append(neighbor) prec[root] = None # put the standard mark for root return prec
[ "def", "tree_adj_to_prec", "(", "graph", ",", "root", "=", "0", ")", ":", "prec", "=", "[", "None", "]", "*", "len", "(", "graph", ")", "prec", "[", "root", "]", "=", "root", "# mark to visit root only once", "to_visit", "=", "[", "root", "]", "while", "to_visit", ":", "# DFS", "node", "=", "to_visit", ".", "pop", "(", ")", "for", "neighbor", "in", "graph", "[", "node", "]", ":", "if", "prec", "[", "neighbor", "]", "is", "None", ":", "prec", "[", "neighbor", "]", "=", "node", "to_visit", ".", "append", "(", "neighbor", ")", "prec", "[", "root", "]", "=", "None", "# put the standard mark for root", "return", "prec" ]
Transforms a tree given as adjacency list into predecessor table form. if graph is not a tree: will return a DFS spanning tree :param graph: directed graph in listlist or listdict format :returns: tree in predecessor table representation :complexity: linear
[ "Transforms", "a", "tree", "given", "as", "adjacency", "list", "into", "predecessor", "table", "form", ".", "if", "graph", "is", "not", "a", "tree", ":", "will", "return", "a", "DFS", "spanning", "tree" ]
python
train
StackStorm/pybind
pybind/slxos/v17r_2_00/keychain/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_2_00/keychain/__init__.py#L94-L120
def _set_name_of_keychain(self, v, load=False): """ Setter method for name_of_keychain, mapped from YANG variable /keychain/name_of_keychain (string) If this variable is read-only (config: false) in the source YANG file, then _set_name_of_keychain is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_name_of_keychain() directly. """ parent = getattr(self, "_parent", None) if parent is not None and load is False: raise AttributeError("Cannot set keys directly when" + " within an instantiated list") if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4..32']}), is_leaf=True, yang_name="name-of-keychain", rest_name="name-of-keychain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'name of the keychain', u'cli-full-command': None, u'cli-full-no': None, u'cli-suppress-range': None, u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='string', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """name_of_keychain must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4..32']}), is_leaf=True, yang_name="name-of-keychain", rest_name="name-of-keychain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'name of the keychain', u'cli-full-command': None, u'cli-full-no': None, u'cli-suppress-range': None, u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='string', is_config=True)""", }) self.__name_of_keychain = t if hasattr(self, '_set'): self._set()
[ "def", "_set_name_of_keychain", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "parent", "=", "getattr", "(", "self", ",", "\"_parent\"", ",", "None", ")", "if", "parent", "is", "not", "None", "and", "load", "is", "False", ":", "raise", "AttributeError", "(", "\"Cannot set keys directly when\"", "+", "\" within an instantiated list\"", ")", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "unicode", ",", "restriction_dict", "=", "{", "'length'", ":", "[", "u'4..32'", "]", "}", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"name-of-keychain\"", ",", "rest_name", "=", "\"name-of-keychain\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'name of the keychain'", ",", "u'cli-full-command'", ":", "None", ",", "u'cli-full-no'", ":", "None", ",", "u'cli-suppress-range'", ":", "None", ",", "u'cli-drop-node-name'", ":", "None", "}", "}", ",", "is_keyval", "=", "True", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-keychain'", ",", "defining_module", "=", "'brocade-keychain'", ",", "yang_type", "=", "'string'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"name_of_keychain must be of a type compatible with string\"\"\"", ",", "'defined-type'", ":", "\"string\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4..32']}), is_leaf=True, yang_name=\"name-of-keychain\", rest_name=\"name-of-keychain\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'name of the keychain', u'cli-full-command': None, u'cli-full-no': None, u'cli-suppress-range': None, u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='string', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__name_of_keychain", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for name_of_keychain, mapped from YANG variable /keychain/name_of_keychain (string) If this variable is read-only (config: false) in the source YANG file, then _set_name_of_keychain is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_name_of_keychain() directly.
[ "Setter", "method", "for", "name_of_keychain", "mapped", "from", "YANG", "variable", "/", "keychain", "/", "name_of_keychain", "(", "string", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_name_of_keychain", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_name_of_keychain", "()", "directly", "." ]
python
train
saltstack/salt
salt/platform/win.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/platform/win.py#L1062-L1115
def enumerate_tokens(sid=None, session_id=None, privs=None): ''' Enumerate tokens from any existing processes that can be accessed. Optionally filter by sid. ''' for p in psutil.process_iter(): if p.pid == 0: continue try: ph = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, 0, p.pid) except win32api.error as exc: if exc.winerror == 5: log.debug("Unable to OpenProcess pid=%d name=%s", p.pid, p.name()) continue raise exc try: access = ( win32security.TOKEN_DUPLICATE | win32security.TOKEN_QUERY | win32security.TOKEN_IMPERSONATE | win32security.TOKEN_ASSIGN_PRIMARY ) th = win32security.OpenProcessToken(ph, access) except Exception as exc: log.debug("OpenProcessToken failed pid=%d name=%s user%s", p.pid, p.name(), p.username()) continue try: process_sid = win32security.GetTokenInformation(th, win32security.TokenUser)[0] except Exception as exc: log.exception("GetTokenInformation pid=%d name=%s user%s", p.pid, p.name(), p.username()) continue proc_sid = win32security.ConvertSidToStringSid(process_sid) if sid and sid != proc_sid: log.debug("Token for pid does not match user sid: %s", sid) continue if session_id and win32security.GetTokenInformation(th, win32security.TokenSessionId) != session_id: continue def has_priv(tok, priv): luid = win32security.LookupPrivilegeValue(None, priv) for priv_luid, flags in win32security.GetTokenInformation(tok, win32security.TokenPrivileges): if priv_luid == luid: return True return False if privs: has_all = True for name in privs: if not has_priv(th, name): has_all = False if not has_all: continue yield dup_token(th)
[ "def", "enumerate_tokens", "(", "sid", "=", "None", ",", "session_id", "=", "None", ",", "privs", "=", "None", ")", ":", "for", "p", "in", "psutil", ".", "process_iter", "(", ")", ":", "if", "p", ".", "pid", "==", "0", ":", "continue", "try", ":", "ph", "=", "win32api", ".", "OpenProcess", "(", "win32con", ".", "PROCESS_ALL_ACCESS", ",", "0", ",", "p", ".", "pid", ")", "except", "win32api", ".", "error", "as", "exc", ":", "if", "exc", ".", "winerror", "==", "5", ":", "log", ".", "debug", "(", "\"Unable to OpenProcess pid=%d name=%s\"", ",", "p", ".", "pid", ",", "p", ".", "name", "(", ")", ")", "continue", "raise", "exc", "try", ":", "access", "=", "(", "win32security", ".", "TOKEN_DUPLICATE", "|", "win32security", ".", "TOKEN_QUERY", "|", "win32security", ".", "TOKEN_IMPERSONATE", "|", "win32security", ".", "TOKEN_ASSIGN_PRIMARY", ")", "th", "=", "win32security", ".", "OpenProcessToken", "(", "ph", ",", "access", ")", "except", "Exception", "as", "exc", ":", "log", ".", "debug", "(", "\"OpenProcessToken failed pid=%d name=%s user%s\"", ",", "p", ".", "pid", ",", "p", ".", "name", "(", ")", ",", "p", ".", "username", "(", ")", ")", "continue", "try", ":", "process_sid", "=", "win32security", ".", "GetTokenInformation", "(", "th", ",", "win32security", ".", "TokenUser", ")", "[", "0", "]", "except", "Exception", "as", "exc", ":", "log", ".", "exception", "(", "\"GetTokenInformation pid=%d name=%s user%s\"", ",", "p", ".", "pid", ",", "p", ".", "name", "(", ")", ",", "p", ".", "username", "(", ")", ")", "continue", "proc_sid", "=", "win32security", ".", "ConvertSidToStringSid", "(", "process_sid", ")", "if", "sid", "and", "sid", "!=", "proc_sid", ":", "log", ".", "debug", "(", "\"Token for pid does not match user sid: %s\"", ",", "sid", ")", "continue", "if", "session_id", "and", "win32security", ".", "GetTokenInformation", "(", "th", ",", "win32security", ".", "TokenSessionId", ")", "!=", "session_id", ":", "continue", "def", "has_priv", "(", "tok", ",", "priv", ")", ":", "luid", "=", "win32security", ".", "LookupPrivilegeValue", "(", "None", ",", "priv", ")", "for", "priv_luid", ",", "flags", "in", "win32security", ".", "GetTokenInformation", "(", "tok", ",", "win32security", ".", "TokenPrivileges", ")", ":", "if", "priv_luid", "==", "luid", ":", "return", "True", "return", "False", "if", "privs", ":", "has_all", "=", "True", "for", "name", "in", "privs", ":", "if", "not", "has_priv", "(", "th", ",", "name", ")", ":", "has_all", "=", "False", "if", "not", "has_all", ":", "continue", "yield", "dup_token", "(", "th", ")" ]
Enumerate tokens from any existing processes that can be accessed. Optionally filter by sid.
[ "Enumerate", "tokens", "from", "any", "existing", "processes", "that", "can", "be", "accessed", ".", "Optionally", "filter", "by", "sid", "." ]
python
train
openpermissions/koi
koi/keygen.py
https://github.com/openpermissions/koi/blob/d721f8e1dfa8f07ad265d9dec32e8aaf80a9f281/koi/keygen.py#L92-L104
def gen_cert_request(filepath, keyfile, config, silent=False): """ generate certificate request :param filepath: file path to the certificate request :param keyfile: file path to the private key :param silent: whether to suppress output """ message = 'generate ssl certificate request' cmd = ( 'openssl req -new -key {} -out {} -subj "{}"' ' -extensions v3_req -config {}').format( keyfile, filepath, SUBJECT, config) call_openssl(cmd, message, silent)
[ "def", "gen_cert_request", "(", "filepath", ",", "keyfile", ",", "config", ",", "silent", "=", "False", ")", ":", "message", "=", "'generate ssl certificate request'", "cmd", "=", "(", "'openssl req -new -key {} -out {} -subj \"{}\"'", "' -extensions v3_req -config {}'", ")", ".", "format", "(", "keyfile", ",", "filepath", ",", "SUBJECT", ",", "config", ")", "call_openssl", "(", "cmd", ",", "message", ",", "silent", ")" ]
generate certificate request :param filepath: file path to the certificate request :param keyfile: file path to the private key :param silent: whether to suppress output
[ "generate", "certificate", "request", ":", "param", "filepath", ":", "file", "path", "to", "the", "certificate", "request", ":", "param", "keyfile", ":", "file", "path", "to", "the", "private", "key", ":", "param", "silent", ":", "whether", "to", "suppress", "output" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L9688-L9707
def sensor_offsets_encode(self, mag_ofs_x, mag_ofs_y, mag_ofs_z, mag_declination, raw_press, raw_temp, gyro_cal_x, gyro_cal_y, gyro_cal_z, accel_cal_x, accel_cal_y, accel_cal_z): ''' Offsets and calibrations values for hardware sensors. This makes it easier to debug the calibration process. mag_ofs_x : magnetometer X offset (int16_t) mag_ofs_y : magnetometer Y offset (int16_t) mag_ofs_z : magnetometer Z offset (int16_t) mag_declination : magnetic declination (radians) (float) raw_press : raw pressure from barometer (int32_t) raw_temp : raw temperature from barometer (int32_t) gyro_cal_x : gyro X calibration (float) gyro_cal_y : gyro Y calibration (float) gyro_cal_z : gyro Z calibration (float) accel_cal_x : accel X calibration (float) accel_cal_y : accel Y calibration (float) accel_cal_z : accel Z calibration (float) ''' return MAVLink_sensor_offsets_message(mag_ofs_x, mag_ofs_y, mag_ofs_z, mag_declination, raw_press, raw_temp, gyro_cal_x, gyro_cal_y, gyro_cal_z, accel_cal_x, accel_cal_y, accel_cal_z)
[ "def", "sensor_offsets_encode", "(", "self", ",", "mag_ofs_x", ",", "mag_ofs_y", ",", "mag_ofs_z", ",", "mag_declination", ",", "raw_press", ",", "raw_temp", ",", "gyro_cal_x", ",", "gyro_cal_y", ",", "gyro_cal_z", ",", "accel_cal_x", ",", "accel_cal_y", ",", "accel_cal_z", ")", ":", "return", "MAVLink_sensor_offsets_message", "(", "mag_ofs_x", ",", "mag_ofs_y", ",", "mag_ofs_z", ",", "mag_declination", ",", "raw_press", ",", "raw_temp", ",", "gyro_cal_x", ",", "gyro_cal_y", ",", "gyro_cal_z", ",", "accel_cal_x", ",", "accel_cal_y", ",", "accel_cal_z", ")" ]
Offsets and calibrations values for hardware sensors. This makes it easier to debug the calibration process. mag_ofs_x : magnetometer X offset (int16_t) mag_ofs_y : magnetometer Y offset (int16_t) mag_ofs_z : magnetometer Z offset (int16_t) mag_declination : magnetic declination (radians) (float) raw_press : raw pressure from barometer (int32_t) raw_temp : raw temperature from barometer (int32_t) gyro_cal_x : gyro X calibration (float) gyro_cal_y : gyro Y calibration (float) gyro_cal_z : gyro Z calibration (float) accel_cal_x : accel X calibration (float) accel_cal_y : accel Y calibration (float) accel_cal_z : accel Z calibration (float)
[ "Offsets", "and", "calibrations", "values", "for", "hardware", "sensors", ".", "This", "makes", "it", "easier", "to", "debug", "the", "calibration", "process", "." ]
python
train
ejeschke/ginga
ginga/mockw/ImageViewMock.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/mockw/ImageViewMock.py#L75-L89
def render_image(self, rgbobj, dst_x, dst_y): """Render the image represented by (rgbobj) at dst_x, dst_y in the offscreen pixmap. """ self.logger.debug("redraw pixmap=%s" % (self.pixmap)) if self.pixmap is None: return self.logger.debug("drawing to pixmap") # Prepare array for rendering arr = rgbobj.get_array(self.rgb_order, dtype=np.uint8) (height, width) = arr.shape[:2] return self._render_offscreen(self.pixmap, arr, dst_x, dst_y, width, height)
[ "def", "render_image", "(", "self", ",", "rgbobj", ",", "dst_x", ",", "dst_y", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"redraw pixmap=%s\"", "%", "(", "self", ".", "pixmap", ")", ")", "if", "self", ".", "pixmap", "is", "None", ":", "return", "self", ".", "logger", ".", "debug", "(", "\"drawing to pixmap\"", ")", "# Prepare array for rendering", "arr", "=", "rgbobj", ".", "get_array", "(", "self", ".", "rgb_order", ",", "dtype", "=", "np", ".", "uint8", ")", "(", "height", ",", "width", ")", "=", "arr", ".", "shape", "[", ":", "2", "]", "return", "self", ".", "_render_offscreen", "(", "self", ".", "pixmap", ",", "arr", ",", "dst_x", ",", "dst_y", ",", "width", ",", "height", ")" ]
Render the image represented by (rgbobj) at dst_x, dst_y in the offscreen pixmap.
[ "Render", "the", "image", "represented", "by", "(", "rgbobj", ")", "at", "dst_x", "dst_y", "in", "the", "offscreen", "pixmap", "." ]
python
train
androguard/androguard
androguard/core/bytecodes/dvm.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L3345-L3353
def get_methods(self): """ Return direct and virtual methods :rtype: a list of :class:`EncodedMethod` objects """ return [x for x in self.direct_methods] + [x for x in self.virtual_methods]
[ "def", "get_methods", "(", "self", ")", ":", "return", "[", "x", "for", "x", "in", "self", ".", "direct_methods", "]", "+", "[", "x", "for", "x", "in", "self", ".", "virtual_methods", "]" ]
Return direct and virtual methods :rtype: a list of :class:`EncodedMethod` objects
[ "Return", "direct", "and", "virtual", "methods" ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/oneoffs/oneoff_utils.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/oneoffs/oneoff_utils.py#L34-L41
def parse_sgf_to_examples(sgf_path): """Return supervised examples from positions NOTE: last move is not played because no p.next_move after. """ return zip(*[(p.position, p.next_move, p.result) for p in sgf_wrapper.replay_sgf_file(sgf_path)])
[ "def", "parse_sgf_to_examples", "(", "sgf_path", ")", ":", "return", "zip", "(", "*", "[", "(", "p", ".", "position", ",", "p", ".", "next_move", ",", "p", ".", "result", ")", "for", "p", "in", "sgf_wrapper", ".", "replay_sgf_file", "(", "sgf_path", ")", "]", ")" ]
Return supervised examples from positions NOTE: last move is not played because no p.next_move after.
[ "Return", "supervised", "examples", "from", "positions" ]
python
train
equinor/segyio
python/segyio/trace.py
https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/trace.py#L253-L266
def raw(self): """ An eager version of Trace Returns ------- raw : RawTrace """ return RawTrace(self.filehandle, self.dtype, len(self), self.shape, self.readonly, )
[ "def", "raw", "(", "self", ")", ":", "return", "RawTrace", "(", "self", ".", "filehandle", ",", "self", ".", "dtype", ",", "len", "(", "self", ")", ",", "self", ".", "shape", ",", "self", ".", "readonly", ",", ")" ]
An eager version of Trace Returns ------- raw : RawTrace
[ "An", "eager", "version", "of", "Trace" ]
python
train
4degrees/riffle
source/riffle/icon_factory.py
https://github.com/4degrees/riffle/blob/e5a0d908df8c93ff1ee7abdda8875fd1667df53d/source/riffle/icon_factory.py#L25-L54
def icon(self, specification): '''Return appropriate icon for *specification*. *specification* should be either: * An instance of :py:class:`riffle.model.Item` * One of the defined icon types (:py:class:`IconType`) ''' if isinstance(specification, riffle.model.Item): specification = self.type(specification) icon = None if specification == IconType.Computer: icon = QtGui.QIcon(':riffle/icon/computer') elif specification == IconType.Mount: icon = QtGui.QIcon(':riffle/icon/drive') elif specification == IconType.Directory: icon = QtGui.QIcon(':riffle/icon/folder') elif specification == IconType.File: icon = QtGui.QIcon(':riffle/icon/file') elif specification == IconType.Collection: icon = QtGui.QIcon(':riffle/icon/collection') return icon
[ "def", "icon", "(", "self", ",", "specification", ")", ":", "if", "isinstance", "(", "specification", ",", "riffle", ".", "model", ".", "Item", ")", ":", "specification", "=", "self", ".", "type", "(", "specification", ")", "icon", "=", "None", "if", "specification", "==", "IconType", ".", "Computer", ":", "icon", "=", "QtGui", ".", "QIcon", "(", "':riffle/icon/computer'", ")", "elif", "specification", "==", "IconType", ".", "Mount", ":", "icon", "=", "QtGui", ".", "QIcon", "(", "':riffle/icon/drive'", ")", "elif", "specification", "==", "IconType", ".", "Directory", ":", "icon", "=", "QtGui", ".", "QIcon", "(", "':riffle/icon/folder'", ")", "elif", "specification", "==", "IconType", ".", "File", ":", "icon", "=", "QtGui", ".", "QIcon", "(", "':riffle/icon/file'", ")", "elif", "specification", "==", "IconType", ".", "Collection", ":", "icon", "=", "QtGui", ".", "QIcon", "(", "':riffle/icon/collection'", ")", "return", "icon" ]
Return appropriate icon for *specification*. *specification* should be either: * An instance of :py:class:`riffle.model.Item` * One of the defined icon types (:py:class:`IconType`)
[ "Return", "appropriate", "icon", "for", "*", "specification", "*", "." ]
python
test
librosa/librosa
librosa/core/time_frequency.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L1014-L1067
def times_like(X, sr=22050, hop_length=512, n_fft=None, axis=-1): """Return an array of time values to match the time axis from a feature matrix. Parameters ---------- X : np.ndarray or scalar - If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram. - If scalar, X represents the number of frames. sr : number > 0 [scalar] audio sampling rate hop_length : int > 0 [scalar] number of samples between successive frames n_fft : None or int > 0 [scalar] Optional: length of the FFT window. If given, time conversion will include an offset of `n_fft / 2` to counteract windowing effects when using a non-centered STFT. axis : int [scalar] The axis representing the time axis of X. By default, the last axis (-1) is taken. Returns ------- times : np.ndarray [shape=(n,)] ndarray of times (in seconds) corresponding to each frame of X. See Also -------- samples_like : Return an array of sample indices to match the time axis from a feature matrix. Examples -------- Provide a feature matrix input: >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> X = librosa.stft(y) >>> times = librosa.times_like(X) >>> times array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ..., 6.13935601e+01, 6.14167800e+01, 6.14400000e+01]) Provide a scalar input: >>> n_frames = 2647 >>> times = librosa.times_like(n_frames) >>> times array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ..., 6.13935601e+01, 6.14167800e+01, 6.14400000e+01]) """ samples = samples_like(X, hop_length=hop_length, n_fft=n_fft, axis=axis) return samples_to_time(samples, sr=sr)
[ "def", "times_like", "(", "X", ",", "sr", "=", "22050", ",", "hop_length", "=", "512", ",", "n_fft", "=", "None", ",", "axis", "=", "-", "1", ")", ":", "samples", "=", "samples_like", "(", "X", ",", "hop_length", "=", "hop_length", ",", "n_fft", "=", "n_fft", ",", "axis", "=", "axis", ")", "return", "samples_to_time", "(", "samples", ",", "sr", "=", "sr", ")" ]
Return an array of time values to match the time axis from a feature matrix. Parameters ---------- X : np.ndarray or scalar - If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram. - If scalar, X represents the number of frames. sr : number > 0 [scalar] audio sampling rate hop_length : int > 0 [scalar] number of samples between successive frames n_fft : None or int > 0 [scalar] Optional: length of the FFT window. If given, time conversion will include an offset of `n_fft / 2` to counteract windowing effects when using a non-centered STFT. axis : int [scalar] The axis representing the time axis of X. By default, the last axis (-1) is taken. Returns ------- times : np.ndarray [shape=(n,)] ndarray of times (in seconds) corresponding to each frame of X. See Also -------- samples_like : Return an array of sample indices to match the time axis from a feature matrix. Examples -------- Provide a feature matrix input: >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> X = librosa.stft(y) >>> times = librosa.times_like(X) >>> times array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ..., 6.13935601e+01, 6.14167800e+01, 6.14400000e+01]) Provide a scalar input: >>> n_frames = 2647 >>> times = librosa.times_like(n_frames) >>> times array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ..., 6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
[ "Return", "an", "array", "of", "time", "values", "to", "match", "the", "time", "axis", "from", "a", "feature", "matrix", "." ]
python
test
feliphebueno/Rinzler
rinzler/core/route_mapping.py
https://github.com/feliphebueno/Rinzler/blob/7f6d5445b5662cba2e8938bb82c7f3ef94e5ded8/rinzler/core/route_mapping.py#L72-L82
def __set_route(self, type_route, route): """ Sets the given type_route and route to the route mapping :rtype: object """ if type_route in self.__routes: if not self.verify_route_already_bound(type_route, route): self.__routes[type_route].append(route) else: self.__routes[type_route] = [route] return RouteMapping
[ "def", "__set_route", "(", "self", ",", "type_route", ",", "route", ")", ":", "if", "type_route", "in", "self", ".", "__routes", ":", "if", "not", "self", ".", "verify_route_already_bound", "(", "type_route", ",", "route", ")", ":", "self", ".", "__routes", "[", "type_route", "]", ".", "append", "(", "route", ")", "else", ":", "self", ".", "__routes", "[", "type_route", "]", "=", "[", "route", "]", "return", "RouteMapping" ]
Sets the given type_route and route to the route mapping :rtype: object
[ "Sets", "the", "given", "type_route", "and", "route", "to", "the", "route", "mapping", ":", "rtype", ":", "object" ]
python
train
kislyuk/aegea
aegea/packages/github3/pulls.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L414-L427
def reply(self, body): """Reply to this review comment with a new review comment. :param str body: The text of the comment. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment` """ url = self._build_url('comments', base_url=self.pull_request_url) index = self._api.rfind('/') + 1 in_reply_to = self._api[index:] json = self._json(self._post(url, data={ 'body': body, 'in_reply_to': in_reply_to }), 201) return ReviewComment(json, self) if json else None
[ "def", "reply", "(", "self", ",", "body", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'comments'", ",", "base_url", "=", "self", ".", "pull_request_url", ")", "index", "=", "self", ".", "_api", ".", "rfind", "(", "'/'", ")", "+", "1", "in_reply_to", "=", "self", ".", "_api", "[", "index", ":", "]", "json", "=", "self", ".", "_json", "(", "self", ".", "_post", "(", "url", ",", "data", "=", "{", "'body'", ":", "body", ",", "'in_reply_to'", ":", "in_reply_to", "}", ")", ",", "201", ")", "return", "ReviewComment", "(", "json", ",", "self", ")", "if", "json", "else", "None" ]
Reply to this review comment with a new review comment. :param str body: The text of the comment. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment`
[ "Reply", "to", "this", "review", "comment", "with", "a", "new", "review", "comment", "." ]
python
train
IvanMalison/okcupyd
okcupyd/util/__init__.py
https://github.com/IvanMalison/okcupyd/blob/46f4eaa9419098f6c299738ce148af55c64deb64/okcupyd/util/__init__.py#L54-L62
def bust_self(self, obj): """Remove the value that is being stored on `obj` for this :class:`.cached_property` object. :param obj: The instance on which to bust the cache. """ if self.func.__name__ in obj.__dict__: delattr(obj, self.func.__name__)
[ "def", "bust_self", "(", "self", ",", "obj", ")", ":", "if", "self", ".", "func", ".", "__name__", "in", "obj", ".", "__dict__", ":", "delattr", "(", "obj", ",", "self", ".", "func", ".", "__name__", ")" ]
Remove the value that is being stored on `obj` for this :class:`.cached_property` object. :param obj: The instance on which to bust the cache.
[ "Remove", "the", "value", "that", "is", "being", "stored", "on", "obj", "for", "this", ":", "class", ":", ".", "cached_property", "object", "." ]
python
train
crossbario/txaio
txaio/__init__.py
https://github.com/crossbario/txaio/blob/29c77ff1210cabd4cc03f16f34672612e7eef704/txaio/__init__.py#L130-L140
def _use_framework(module): """ Internal helper, to set this modules methods to a specified framework helper-methods. """ import txaio for method_name in __all__: if method_name in ['use_twisted', 'use_asyncio']: continue setattr(txaio, method_name, getattr(module, method_name))
[ "def", "_use_framework", "(", "module", ")", ":", "import", "txaio", "for", "method_name", "in", "__all__", ":", "if", "method_name", "in", "[", "'use_twisted'", ",", "'use_asyncio'", "]", ":", "continue", "setattr", "(", "txaio", ",", "method_name", ",", "getattr", "(", "module", ",", "method_name", ")", ")" ]
Internal helper, to set this modules methods to a specified framework helper-methods.
[ "Internal", "helper", "to", "set", "this", "modules", "methods", "to", "a", "specified", "framework", "helper", "-", "methods", "." ]
python
train