repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L1313-L1319
def system_generate_batch_inputs(input_params={}, always_retry=True, **kwargs): """ Invokes the /system/generateBatchInputs API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/generateBatchInputs """ return DXHTTPRequest('/system/generateBatchInputs', input_params, always_retry=always_retry, **kwargs)
[ "def", "system_generate_batch_inputs", "(", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/system/generateBatchInputs'", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /system/generateBatchInputs API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/generateBatchInputs
[ "Invokes", "the", "/", "system", "/", "generateBatchInputs", "API", "method", "." ]
python
train
53.571429
apache/airflow
airflow/models/dag.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dag.py#L1494-L1526
def create_dagrun(self, run_id, state, execution_date, start_date=None, external_trigger=False, conf=None, session=None): """ Creates a dag run from this dag including the tasks associated with this dag. Returns the dag run. :param run_id: defines the the run id for this dag run :type run_id: str :param execution_date: the execution date of this dag run :type execution_date: datetime.datetime :param state: the state of the dag run :type state: airflow.utils.state.State :param start_date: the date this dag run should be evaluated :type start_date: datetime.datetime :param external_trigger: whether this dag run is externally triggered :type external_trigger: bool :param session: database session :type session: sqlalchemy.orm.session.Session """ return self.get_dag().create_dagrun(run_id=run_id, state=state, execution_date=execution_date, start_date=start_date, external_trigger=external_trigger, conf=conf, session=session)
[ "def", "create_dagrun", "(", "self", ",", "run_id", ",", "state", ",", "execution_date", ",", "start_date", "=", "None", ",", "external_trigger", "=", "False", ",", "conf", "=", "None", ",", "session", "=", "None", ")", ":", "return", "self", ".", "get_dag", "(", ")", ".", "create_dagrun", "(", "run_id", "=", "run_id", ",", "state", "=", "state", ",", "execution_date", "=", "execution_date", ",", "start_date", "=", "start_date", ",", "external_trigger", "=", "external_trigger", ",", "conf", "=", "conf", ",", "session", "=", "session", ")" ]
Creates a dag run from this dag including the tasks associated with this dag. Returns the dag run. :param run_id: defines the the run id for this dag run :type run_id: str :param execution_date: the execution date of this dag run :type execution_date: datetime.datetime :param state: the state of the dag run :type state: airflow.utils.state.State :param start_date: the date this dag run should be evaluated :type start_date: datetime.datetime :param external_trigger: whether this dag run is externally triggered :type external_trigger: bool :param session: database session :type session: sqlalchemy.orm.session.Session
[ "Creates", "a", "dag", "run", "from", "this", "dag", "including", "the", "tasks", "associated", "with", "this", "dag", ".", "Returns", "the", "dag", "run", "." ]
python
test
44.151515
nerdvegas/rez
src/rez/solver.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1928-L1936
def resolved_packages(self): """Return a list of PackageVariant objects, or None if the resolve did not complete or was unsuccessful. """ if (self.status != SolverStatus.solved): return None final_phase = self.phase_stack[-1] return final_phase._get_solved_variants()
[ "def", "resolved_packages", "(", "self", ")", ":", "if", "(", "self", ".", "status", "!=", "SolverStatus", ".", "solved", ")", ":", "return", "None", "final_phase", "=", "self", ".", "phase_stack", "[", "-", "1", "]", "return", "final_phase", ".", "_get_solved_variants", "(", ")" ]
Return a list of PackageVariant objects, or None if the resolve did not complete or was unsuccessful.
[ "Return", "a", "list", "of", "PackageVariant", "objects", "or", "None", "if", "the", "resolve", "did", "not", "complete", "or", "was", "unsuccessful", "." ]
python
train
35.555556
wbond/oscrypto
oscrypto/_win/asymmetric.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_win/asymmetric.py#L2997-L3051
def _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False): """ Encrypts a value using an RSA public key via CryptoAPI :param certificate_or_public_key: A Certificate or PublicKey instance to encrypt with :param data: A byte string of the data to encrypt :param rsa_oaep_padding: If OAEP padding should be used instead of PKCS#1 v1.5 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the ciphertext """ flags = 0 if rsa_oaep_padding: flags = Advapi32Const.CRYPT_OAEP out_len = new(advapi32, 'DWORD *', len(data)) res = advapi32.CryptEncrypt( certificate_or_public_key.ex_key_handle, null(), True, flags, null(), out_len, 0 ) handle_error(res) buffer_len = deref(out_len) buffer = buffer_from_bytes(buffer_len) write_to_buffer(buffer, data) pointer_set(out_len, len(data)) res = advapi32.CryptEncrypt( certificate_or_public_key.ex_key_handle, null(), True, flags, buffer, out_len, buffer_len ) handle_error(res) return bytes_from_buffer(buffer, deref(out_len))[::-1]
[ "def", "_advapi32_encrypt", "(", "certificate_or_public_key", ",", "data", ",", "rsa_oaep_padding", "=", "False", ")", ":", "flags", "=", "0", "if", "rsa_oaep_padding", ":", "flags", "=", "Advapi32Const", ".", "CRYPT_OAEP", "out_len", "=", "new", "(", "advapi32", ",", "'DWORD *'", ",", "len", "(", "data", ")", ")", "res", "=", "advapi32", ".", "CryptEncrypt", "(", "certificate_or_public_key", ".", "ex_key_handle", ",", "null", "(", ")", ",", "True", ",", "flags", ",", "null", "(", ")", ",", "out_len", ",", "0", ")", "handle_error", "(", "res", ")", "buffer_len", "=", "deref", "(", "out_len", ")", "buffer", "=", "buffer_from_bytes", "(", "buffer_len", ")", "write_to_buffer", "(", "buffer", ",", "data", ")", "pointer_set", "(", "out_len", ",", "len", "(", "data", ")", ")", "res", "=", "advapi32", ".", "CryptEncrypt", "(", "certificate_or_public_key", ".", "ex_key_handle", ",", "null", "(", ")", ",", "True", ",", "flags", ",", "buffer", ",", "out_len", ",", "buffer_len", ")", "handle_error", "(", "res", ")", "return", "bytes_from_buffer", "(", "buffer", ",", "deref", "(", "out_len", ")", ")", "[", ":", ":", "-", "1", "]" ]
Encrypts a value using an RSA public key via CryptoAPI :param certificate_or_public_key: A Certificate or PublicKey instance to encrypt with :param data: A byte string of the data to encrypt :param rsa_oaep_padding: If OAEP padding should be used instead of PKCS#1 v1.5 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the ciphertext
[ "Encrypts", "a", "value", "using", "an", "RSA", "public", "key", "via", "CryptoAPI" ]
python
valid
25.036364
pinax/pinax-webanalytics
pinax/webanalytics/activity.py
https://github.com/pinax/pinax-webanalytics/blob/bc84f6bcefa022bfd2532187e5a949a391494578/pinax/webanalytics/activity.py#L25-L33
def add(request, kind, method, *args): """ add(request, "mixpanel", "track", "purchase", {order: "1234", amount: "100"}) add(request, "google", "push", ["_addTrans", "1234", "Gondor", "100"]) """ request.session.setdefault(_key_name(kind), []).append({ "method": method, "args": args })
[ "def", "add", "(", "request", ",", "kind", ",", "method", ",", "*", "args", ")", ":", "request", ".", "session", ".", "setdefault", "(", "_key_name", "(", "kind", ")", ",", "[", "]", ")", ".", "append", "(", "{", "\"method\"", ":", "method", ",", "\"args\"", ":", "args", "}", ")" ]
add(request, "mixpanel", "track", "purchase", {order: "1234", amount: "100"}) add(request, "google", "push", ["_addTrans", "1234", "Gondor", "100"])
[ "add", "(", "request", "mixpanel", "track", "purchase", "{", "order", ":", "1234", "amount", ":", "100", "}", ")", "add", "(", "request", "google", "push", "[", "_addTrans", "1234", "Gondor", "100", "]", ")" ]
python
train
35.333333
mdeous/fatbotslim
fatbotslim/cli.py
https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/cli.py#L117-L132
def main(bot): """ Entry point for the command line launcher. :param bot: the IRC bot to run :type bot: :class:`fatbotslim.irc.bot.IRC` """ greenlet = spawn(bot.run) try: greenlet.join() except KeyboardInterrupt: print '' # cosmetics matters log.info("Killed by user, disconnecting...") bot.disconnect() finally: greenlet.kill()
[ "def", "main", "(", "bot", ")", ":", "greenlet", "=", "spawn", "(", "bot", ".", "run", ")", "try", ":", "greenlet", ".", "join", "(", ")", "except", "KeyboardInterrupt", ":", "print", "''", "# cosmetics matters", "log", ".", "info", "(", "\"Killed by user, disconnecting...\"", ")", "bot", ".", "disconnect", "(", ")", "finally", ":", "greenlet", ".", "kill", "(", ")" ]
Entry point for the command line launcher. :param bot: the IRC bot to run :type bot: :class:`fatbotslim.irc.bot.IRC`
[ "Entry", "point", "for", "the", "command", "line", "launcher", "." ]
python
train
24.4375
pilosus/ForgeryPy3
forgery_py/forgery/lorem_ipsum.py
https://github.com/pilosus/ForgeryPy3/blob/e15f2e59538deb4cbfceaac314f5ea897f2d5450/forgery_py/forgery/lorem_ipsum.py#L143-L165
def text(what="sentence", *args, **kwargs): """An aggregator for all above defined public methods.""" if what == "character": return character(*args, **kwargs) elif what == "characters": return characters(*args, **kwargs) elif what == "word": return word(*args, **kwargs) elif what == "words": return words(*args, **kwargs) elif what == "sentence": return sentence(*args, **kwargs) elif what == "sentences": return sentences(*args, **kwargs) elif what == "paragraph": return paragraph(*args, **kwargs) elif what == "paragraphs": return paragraphs(*args, **kwargs) elif what == "title": return title(*args, **kwargs) else: raise NameError('No such method')
[ "def", "text", "(", "what", "=", "\"sentence\"", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "what", "==", "\"character\"", ":", "return", "character", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "what", "==", "\"characters\"", ":", "return", "characters", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "what", "==", "\"word\"", ":", "return", "word", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "what", "==", "\"words\"", ":", "return", "words", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "what", "==", "\"sentence\"", ":", "return", "sentence", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "what", "==", "\"sentences\"", ":", "return", "sentences", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "what", "==", "\"paragraph\"", ":", "return", "paragraph", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "what", "==", "\"paragraphs\"", ":", "return", "paragraphs", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "what", "==", "\"title\"", ":", "return", "title", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "NameError", "(", "'No such method'", ")" ]
An aggregator for all above defined public methods.
[ "An", "aggregator", "for", "all", "above", "defined", "public", "methods", "." ]
python
valid
32.956522
materialsproject/pymatgen
pymatgen/core/surface.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/surface.py#L1105-L1175
def repair_broken_bonds(self, slab, bonds): """ This method will find undercoordinated atoms due to slab cleaving specified by the bonds parameter and move them to the other surface to make sure the bond is kept intact. In a future release of surface.py, the ghost_sites will be used to tell us how the repair bonds should look like. Arg: slab (structure): A structure object representing a slab. bonds ({(specie1, specie2): max_bond_dist}: bonds are specified as a dict of tuples: float of specie1, specie2 and the max bonding distance. For example, PO4 groups may be defined as {("P", "O"): 3}. Returns: (Slab) A Slab object with a particular shifted oriented unit cell. """ for pair in bonds.keys(): blength = bonds[pair] # First lets determine which element should be the # reference (center element) to determine broken bonds. # e.g. P for a PO4 bond. Find integer coordination # numbers of the pair of elements wrt to each other cn_dict = {} for i, el in enumerate(pair): cnlist = [] for site in self.oriented_unit_cell: poly_coord = 0 if site.species_string == el: for nn in self.oriented_unit_cell.get_neighbors( site, blength): if nn[0].species_string == pair[i-1]: poly_coord += 1 cnlist.append(poly_coord) cn_dict[el] = cnlist # We make the element with the higher coordination our reference if max(cn_dict[pair[0]]) > max(cn_dict[pair[1]]): element1, element2 = pair else: element2, element1 = pair for i, site in enumerate(slab): # Determine the coordination of our reference if site.species_string == element1: poly_coord = 0 for neighbor in slab.get_neighbors(site, blength): poly_coord += 1 if neighbor[0].species_string == element2 else 0 # suppose we find an undercoordinated reference atom if poly_coord not in cn_dict[element1]: # We get the reference atom of the broken bonds # (undercoordinated), move it to the other surface slab = self.move_to_other_side(slab, [i]) # find its NNs with the corresponding # species it should be coordinated with neighbors = slab.get_neighbors(slab[i], blength, include_index=True) tomove = [nn[2] for nn in neighbors if nn[0].species_string == element2] tomove.append(i) # and then move those NNs along with the central # atom back to the other side of the slab again slab = self.move_to_other_side(slab, tomove) return slab
[ "def", "repair_broken_bonds", "(", "self", ",", "slab", ",", "bonds", ")", ":", "for", "pair", "in", "bonds", ".", "keys", "(", ")", ":", "blength", "=", "bonds", "[", "pair", "]", "# First lets determine which element should be the", "# reference (center element) to determine broken bonds.", "# e.g. P for a PO4 bond. Find integer coordination", "# numbers of the pair of elements wrt to each other", "cn_dict", "=", "{", "}", "for", "i", ",", "el", "in", "enumerate", "(", "pair", ")", ":", "cnlist", "=", "[", "]", "for", "site", "in", "self", ".", "oriented_unit_cell", ":", "poly_coord", "=", "0", "if", "site", ".", "species_string", "==", "el", ":", "for", "nn", "in", "self", ".", "oriented_unit_cell", ".", "get_neighbors", "(", "site", ",", "blength", ")", ":", "if", "nn", "[", "0", "]", ".", "species_string", "==", "pair", "[", "i", "-", "1", "]", ":", "poly_coord", "+=", "1", "cnlist", ".", "append", "(", "poly_coord", ")", "cn_dict", "[", "el", "]", "=", "cnlist", "# We make the element with the higher coordination our reference", "if", "max", "(", "cn_dict", "[", "pair", "[", "0", "]", "]", ")", ">", "max", "(", "cn_dict", "[", "pair", "[", "1", "]", "]", ")", ":", "element1", ",", "element2", "=", "pair", "else", ":", "element2", ",", "element1", "=", "pair", "for", "i", ",", "site", "in", "enumerate", "(", "slab", ")", ":", "# Determine the coordination of our reference", "if", "site", ".", "species_string", "==", "element1", ":", "poly_coord", "=", "0", "for", "neighbor", "in", "slab", ".", "get_neighbors", "(", "site", ",", "blength", ")", ":", "poly_coord", "+=", "1", "if", "neighbor", "[", "0", "]", ".", "species_string", "==", "element2", "else", "0", "# suppose we find an undercoordinated reference atom", "if", "poly_coord", "not", "in", "cn_dict", "[", "element1", "]", ":", "# We get the reference atom of the broken bonds", "# (undercoordinated), move it to the other surface", "slab", "=", "self", ".", "move_to_other_side", "(", "slab", ",", "[", "i", "]", ")", "# find its NNs with the corresponding", "# species it should be coordinated with", "neighbors", "=", "slab", ".", "get_neighbors", "(", "slab", "[", "i", "]", ",", "blength", ",", "include_index", "=", "True", ")", "tomove", "=", "[", "nn", "[", "2", "]", "for", "nn", "in", "neighbors", "if", "nn", "[", "0", "]", ".", "species_string", "==", "element2", "]", "tomove", ".", "append", "(", "i", ")", "# and then move those NNs along with the central", "# atom back to the other side of the slab again", "slab", "=", "self", ".", "move_to_other_side", "(", "slab", ",", "tomove", ")", "return", "slab" ]
This method will find undercoordinated atoms due to slab cleaving specified by the bonds parameter and move them to the other surface to make sure the bond is kept intact. In a future release of surface.py, the ghost_sites will be used to tell us how the repair bonds should look like. Arg: slab (structure): A structure object representing a slab. bonds ({(specie1, specie2): max_bond_dist}: bonds are specified as a dict of tuples: float of specie1, specie2 and the max bonding distance. For example, PO4 groups may be defined as {("P", "O"): 3}. Returns: (Slab) A Slab object with a particular shifted oriented unit cell.
[ "This", "method", "will", "find", "undercoordinated", "atoms", "due", "to", "slab", "cleaving", "specified", "by", "the", "bonds", "parameter", "and", "move", "them", "to", "the", "other", "surface", "to", "make", "sure", "the", "bond", "is", "kept", "intact", ".", "In", "a", "future", "release", "of", "surface", ".", "py", "the", "ghost_sites", "will", "be", "used", "to", "tell", "us", "how", "the", "repair", "bonds", "should", "look", "like", "." ]
python
train
46.042254
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAARP/QAPortfolio.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAPortfolio.py#L310-L324
def get_account(self, account): ''' check the account whether in the protfolio dict or not :param account: QA_Account :return: QA_Account if in dict None not in list ''' try: return self.get_account_by_cookie(account.account_cookie) except: QA_util_log_info( 'Can not find this account with cookies %s' % account.account_cookie ) return None
[ "def", "get_account", "(", "self", ",", "account", ")", ":", "try", ":", "return", "self", ".", "get_account_by_cookie", "(", "account", ".", "account_cookie", ")", "except", ":", "QA_util_log_info", "(", "'Can not find this account with cookies %s'", "%", "account", ".", "account_cookie", ")", "return", "None" ]
check the account whether in the protfolio dict or not :param account: QA_Account :return: QA_Account if in dict None not in list
[ "check", "the", "account", "whether", "in", "the", "protfolio", "dict", "or", "not", ":", "param", "account", ":", "QA_Account", ":", "return", ":", "QA_Account", "if", "in", "dict", "None", "not", "in", "list" ]
python
train
32.066667
meejah/txtorcon
txtorcon/onion.py
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/onion.py#L389-L504
def _await_descriptor_upload(tor_protocol, onion, progress, await_all_uploads): """ Internal helper. :param tor_protocol: ITorControlProtocol instance :param onion: IOnionService instance :param progress: a progess callback, or None :returns: a Deferred that fires once we've detected at least one descriptor upload for the service (as detected by listening for HS_DESC events) """ # For v3 services, Tor attempts to upload to 16 services; we'll # assume that for now but also cap it (we want to show some # progress for "attempting uploads" but we need to decide how # much) .. so we leave 50% of the "progress" for attempts, and the # other 50% for "are we done" (which is either "one thing # uploaded" or "all the things uploaded") attempted_uploads = set() confirmed_uploads = set() failed_uploads = set() uploaded = defer.Deferred() await_all = False if await_all_uploads is None else await_all_uploads def translate_progress(tag, description): if progress: done = len(confirmed_uploads) + len(failed_uploads) done_endpoint = float(len(attempted_uploads)) if await_all else 1.0 done_pct = 0 if not attempted_uploads else float(done) / done_endpoint started_pct = float(min(16, len(attempted_uploads))) / 16.0 try: progress( (done_pct * 50.0) + (started_pct * 50.0), tag, description, ) except Exception: log.err() def hostname_matches(hostname): if IAuthenticatedOnionClients.providedBy(onion): return hostname[:-6] == onion.get_permanent_id() else: # provides IOnionService return onion.hostname == hostname def hs_desc(evt): """ From control-spec: "650" SP "HS_DESC" SP Action SP HSAddress SP AuthType SP HsDir [SP DescriptorID] [SP "REASON=" Reason] [SP "REPLICA=" Replica] """ args = evt.split() subtype = args[0] if subtype == 'UPLOAD': if hostname_matches('{}.onion'.format(args[1])): attempted_uploads.add(args[3]) translate_progress( "wait_descriptor", "Upload to {} started".format(args[3]) ) elif subtype == 'UPLOADED': # we only need ONE successful upload to happen for the # HS to be reachable. # unused? addr = args[1] # XXX FIXME I think tor is sending the onion-address # properly with these now, so we can use those # (i.e. instead of matching to "attempted_uploads") if args[3] in attempted_uploads: confirmed_uploads.add(args[3]) log.msg("Uploaded '{}' to '{}'".format(args[1], args[3])) translate_progress( "wait_descriptor", "Successful upload to {}".format(args[3]) ) if not uploaded.called: if await_all: if (len(failed_uploads) + len(confirmed_uploads)) == len(attempted_uploads): uploaded.callback(onion) else: uploaded.callback(onion) elif subtype == 'FAILED': if hostname_matches('{}.onion'.format(args[1])): failed_uploads.add(args[3]) translate_progress( "wait_descriptor", "Failed upload to {}".format(args[3]) ) if failed_uploads == attempted_uploads: msg = "Failed to upload '{}' to: {}".format( args[1], ', '.join(failed_uploads), ) uploaded.errback(RuntimeError(msg)) # the first 'yield' should be the add_event_listener so that a # caller can do "d = _await_descriptor_upload()", then add the # service. yield tor_protocol.add_event_listener('HS_DESC', hs_desc) yield uploaded yield tor_protocol.remove_event_listener('HS_DESC', hs_desc) # ensure we show "100%" at the end if progress: if await_all_uploads: msg = "Completed descriptor uploads" else: msg = "At least one descriptor uploaded" try: progress(100.0, "wait_descriptor", msg) except Exception: log.err()
[ "def", "_await_descriptor_upload", "(", "tor_protocol", ",", "onion", ",", "progress", ",", "await_all_uploads", ")", ":", "# For v3 services, Tor attempts to upload to 16 services; we'll", "# assume that for now but also cap it (we want to show some", "# progress for \"attempting uploads\" but we need to decide how", "# much) .. so we leave 50% of the \"progress\" for attempts, and the", "# other 50% for \"are we done\" (which is either \"one thing", "# uploaded\" or \"all the things uploaded\")", "attempted_uploads", "=", "set", "(", ")", "confirmed_uploads", "=", "set", "(", ")", "failed_uploads", "=", "set", "(", ")", "uploaded", "=", "defer", ".", "Deferred", "(", ")", "await_all", "=", "False", "if", "await_all_uploads", "is", "None", "else", "await_all_uploads", "def", "translate_progress", "(", "tag", ",", "description", ")", ":", "if", "progress", ":", "done", "=", "len", "(", "confirmed_uploads", ")", "+", "len", "(", "failed_uploads", ")", "done_endpoint", "=", "float", "(", "len", "(", "attempted_uploads", ")", ")", "if", "await_all", "else", "1.0", "done_pct", "=", "0", "if", "not", "attempted_uploads", "else", "float", "(", "done", ")", "/", "done_endpoint", "started_pct", "=", "float", "(", "min", "(", "16", ",", "len", "(", "attempted_uploads", ")", ")", ")", "/", "16.0", "try", ":", "progress", "(", "(", "done_pct", "*", "50.0", ")", "+", "(", "started_pct", "*", "50.0", ")", ",", "tag", ",", "description", ",", ")", "except", "Exception", ":", "log", ".", "err", "(", ")", "def", "hostname_matches", "(", "hostname", ")", ":", "if", "IAuthenticatedOnionClients", ".", "providedBy", "(", "onion", ")", ":", "return", "hostname", "[", ":", "-", "6", "]", "==", "onion", ".", "get_permanent_id", "(", ")", "else", ":", "# provides IOnionService", "return", "onion", ".", "hostname", "==", "hostname", "def", "hs_desc", "(", "evt", ")", ":", "\"\"\"\n From control-spec:\n \"650\" SP \"HS_DESC\" SP Action SP HSAddress SP AuthType SP HsDir\n [SP DescriptorID] [SP \"REASON=\" Reason] [SP \"REPLICA=\" Replica]\n \"\"\"", "args", "=", "evt", ".", "split", "(", ")", "subtype", "=", "args", "[", "0", "]", "if", "subtype", "==", "'UPLOAD'", ":", "if", "hostname_matches", "(", "'{}.onion'", ".", "format", "(", "args", "[", "1", "]", ")", ")", ":", "attempted_uploads", ".", "add", "(", "args", "[", "3", "]", ")", "translate_progress", "(", "\"wait_descriptor\"", ",", "\"Upload to {} started\"", ".", "format", "(", "args", "[", "3", "]", ")", ")", "elif", "subtype", "==", "'UPLOADED'", ":", "# we only need ONE successful upload to happen for the", "# HS to be reachable.", "# unused? addr = args[1]", "# XXX FIXME I think tor is sending the onion-address", "# properly with these now, so we can use those", "# (i.e. instead of matching to \"attempted_uploads\")", "if", "args", "[", "3", "]", "in", "attempted_uploads", ":", "confirmed_uploads", ".", "add", "(", "args", "[", "3", "]", ")", "log", ".", "msg", "(", "\"Uploaded '{}' to '{}'\"", ".", "format", "(", "args", "[", "1", "]", ",", "args", "[", "3", "]", ")", ")", "translate_progress", "(", "\"wait_descriptor\"", ",", "\"Successful upload to {}\"", ".", "format", "(", "args", "[", "3", "]", ")", ")", "if", "not", "uploaded", ".", "called", ":", "if", "await_all", ":", "if", "(", "len", "(", "failed_uploads", ")", "+", "len", "(", "confirmed_uploads", ")", ")", "==", "len", "(", "attempted_uploads", ")", ":", "uploaded", ".", "callback", "(", "onion", ")", "else", ":", "uploaded", ".", "callback", "(", "onion", ")", "elif", "subtype", "==", "'FAILED'", ":", "if", "hostname_matches", "(", "'{}.onion'", ".", "format", "(", "args", "[", "1", "]", ")", ")", ":", "failed_uploads", ".", "add", "(", "args", "[", "3", "]", ")", "translate_progress", "(", "\"wait_descriptor\"", ",", "\"Failed upload to {}\"", ".", "format", "(", "args", "[", "3", "]", ")", ")", "if", "failed_uploads", "==", "attempted_uploads", ":", "msg", "=", "\"Failed to upload '{}' to: {}\"", ".", "format", "(", "args", "[", "1", "]", ",", "', '", ".", "join", "(", "failed_uploads", ")", ",", ")", "uploaded", ".", "errback", "(", "RuntimeError", "(", "msg", ")", ")", "# the first 'yield' should be the add_event_listener so that a", "# caller can do \"d = _await_descriptor_upload()\", then add the", "# service.", "yield", "tor_protocol", ".", "add_event_listener", "(", "'HS_DESC'", ",", "hs_desc", ")", "yield", "uploaded", "yield", "tor_protocol", ".", "remove_event_listener", "(", "'HS_DESC'", ",", "hs_desc", ")", "# ensure we show \"100%\" at the end", "if", "progress", ":", "if", "await_all_uploads", ":", "msg", "=", "\"Completed descriptor uploads\"", "else", ":", "msg", "=", "\"At least one descriptor uploaded\"", "try", ":", "progress", "(", "100.0", ",", "\"wait_descriptor\"", ",", "msg", ")", "except", "Exception", ":", "log", ".", "err", "(", ")" ]
Internal helper. :param tor_protocol: ITorControlProtocol instance :param onion: IOnionService instance :param progress: a progess callback, or None :returns: a Deferred that fires once we've detected at least one descriptor upload for the service (as detected by listening for HS_DESC events)
[ "Internal", "helper", "." ]
python
train
38.577586
abe-winter/pg13-py
pg13/sqex.py
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L273-L282
def eval_nonagg_call(self, exp): "helper for eval_callx; evaluator for CallX that consume a single value" # todo: get more concrete about argument counts args=self.eval(exp.args) if exp.f=='coalesce': a,b=args # todo: does coalesce take more than 2 args? return b if a is None else a elif exp.f=='unnest': return self.eval(exp.args)[0] # note: run_select does some work in this case too elif exp.f in ('to_tsquery','to_tsvector'): return set(self.eval(exp.args.children[0]).split()) else: raise NotImplementedError('unk_function',exp.f)
[ "def", "eval_nonagg_call", "(", "self", ",", "exp", ")", ":", "# todo: get more concrete about argument counts", "args", "=", "self", ".", "eval", "(", "exp", ".", "args", ")", "if", "exp", ".", "f", "==", "'coalesce'", ":", "a", ",", "b", "=", "args", "# todo: does coalesce take more than 2 args?", "return", "b", "if", "a", "is", "None", "else", "a", "elif", "exp", ".", "f", "==", "'unnest'", ":", "return", "self", ".", "eval", "(", "exp", ".", "args", ")", "[", "0", "]", "# note: run_select does some work in this case too", "elif", "exp", ".", "f", "in", "(", "'to_tsquery'", ",", "'to_tsvector'", ")", ":", "return", "set", "(", "self", ".", "eval", "(", "exp", ".", "args", ".", "children", "[", "0", "]", ")", ".", "split", "(", ")", ")", "else", ":", "raise", "NotImplementedError", "(", "'unk_function'", ",", "exp", ".", "f", ")" ]
helper for eval_callx; evaluator for CallX that consume a single value
[ "helper", "for", "eval_callx", ";", "evaluator", "for", "CallX", "that", "consume", "a", "single", "value" ]
python
train
56.7
materialsproject/pymatgen
pymatgen/transformations/advanced_transformations.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/transformations/advanced_transformations.py#L729-L755
def _add_spin_magnitudes(self, structure): """ Replaces Spin.up/Spin.down with spin magnitudes specified by mag_species_spin. :param structure: :return: """ for idx, site in enumerate(structure): if getattr(site.specie, '_properties', None): spin = site.specie._properties.get('spin', None) sign = int(spin) if spin else 0 if spin: new_properties = site.specie._properties.copy() # this very hacky bit of code only works because we know # that on disordered sites in this class, all species are the same # but have different spins, and this is comma-delimited sp = str(site.specie).split(",")[0] new_properties.update({ 'spin': sign * self.mag_species_spin.get(sp, 0) }) new_specie = Specie(site.specie.symbol, getattr(site.specie, 'oxi_state', None), new_properties) structure.replace(idx, new_specie, properties=site.properties) logger.debug('Structure with spin magnitudes:\n{}'.format(str(structure))) return structure
[ "def", "_add_spin_magnitudes", "(", "self", ",", "structure", ")", ":", "for", "idx", ",", "site", "in", "enumerate", "(", "structure", ")", ":", "if", "getattr", "(", "site", ".", "specie", ",", "'_properties'", ",", "None", ")", ":", "spin", "=", "site", ".", "specie", ".", "_properties", ".", "get", "(", "'spin'", ",", "None", ")", "sign", "=", "int", "(", "spin", ")", "if", "spin", "else", "0", "if", "spin", ":", "new_properties", "=", "site", ".", "specie", ".", "_properties", ".", "copy", "(", ")", "# this very hacky bit of code only works because we know", "# that on disordered sites in this class, all species are the same", "# but have different spins, and this is comma-delimited", "sp", "=", "str", "(", "site", ".", "specie", ")", ".", "split", "(", "\",\"", ")", "[", "0", "]", "new_properties", ".", "update", "(", "{", "'spin'", ":", "sign", "*", "self", ".", "mag_species_spin", ".", "get", "(", "sp", ",", "0", ")", "}", ")", "new_specie", "=", "Specie", "(", "site", ".", "specie", ".", "symbol", ",", "getattr", "(", "site", ".", "specie", ",", "'oxi_state'", ",", "None", ")", ",", "new_properties", ")", "structure", ".", "replace", "(", "idx", ",", "new_specie", ",", "properties", "=", "site", ".", "properties", ")", "logger", ".", "debug", "(", "'Structure with spin magnitudes:\\n{}'", ".", "format", "(", "str", "(", "structure", ")", ")", ")", "return", "structure" ]
Replaces Spin.up/Spin.down with spin magnitudes specified by mag_species_spin. :param structure: :return:
[ "Replaces", "Spin", ".", "up", "/", "Spin", ".", "down", "with", "spin", "magnitudes", "specified", "by", "mag_species_spin", ".", ":", "param", "structure", ":", ":", "return", ":" ]
python
train
50
openpermissions/perch
perch/migrate.py
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/migrate.py#L224-L248
def _migrate_subresource(subresource, parent, migrations): """ Migrate a resource's subresource :param subresource: the perch.SubResource instance :param parent: the parent perch.Document instance :param migrations: the migrations for a resource """ for key, doc in getattr(parent, subresource.parent_key, {}).items(): for migration in migrations['migrations']: instance = migration(subresource(id=key, **doc)) parent._resource['doc_version'] = unicode(migration.version) instance = _migrate_subresources( instance, migrations['subresources'] ) doc = instance._resource doc.pop('id', None) doc.pop(instance.resource_type + '_id', None) getattr(parent, subresource.parent_key)[key] = doc return parent
[ "def", "_migrate_subresource", "(", "subresource", ",", "parent", ",", "migrations", ")", ":", "for", "key", ",", "doc", "in", "getattr", "(", "parent", ",", "subresource", ".", "parent_key", ",", "{", "}", ")", ".", "items", "(", ")", ":", "for", "migration", "in", "migrations", "[", "'migrations'", "]", ":", "instance", "=", "migration", "(", "subresource", "(", "id", "=", "key", ",", "*", "*", "doc", ")", ")", "parent", ".", "_resource", "[", "'doc_version'", "]", "=", "unicode", "(", "migration", ".", "version", ")", "instance", "=", "_migrate_subresources", "(", "instance", ",", "migrations", "[", "'subresources'", "]", ")", "doc", "=", "instance", ".", "_resource", "doc", ".", "pop", "(", "'id'", ",", "None", ")", "doc", ".", "pop", "(", "instance", ".", "resource_type", "+", "'_id'", ",", "None", ")", "getattr", "(", "parent", ",", "subresource", ".", "parent_key", ")", "[", "key", "]", "=", "doc", "return", "parent" ]
Migrate a resource's subresource :param subresource: the perch.SubResource instance :param parent: the parent perch.Document instance :param migrations: the migrations for a resource
[ "Migrate", "a", "resource", "s", "subresource" ]
python
train
33.32
facelessuser/backrefs
tools/unipropgen.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/tools/unipropgen.py#L412-L459
def gen_age(output, ascii_props=False, append=False, prefix=""): """Generate `age` property.""" obj = {} all_chars = ALL_ASCII if ascii_props else ALL_CHARS with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedAge.txt'), 'r', 'utf-8') as uf: for line in uf: if not line.startswith('#'): data = line.split('#')[0].split(';') if len(data) < 2: continue span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props) name = format_name(data[1]) if name not in obj: obj[name] = [] if span is None: continue obj[name].extend(span) unassigned = set() for x in obj.values(): unassigned |= set(x) obj['na'] = list(all_chars - unassigned) for name in list(obj.keys()): s = set(obj[name]) obj[name] = sorted(s) # Convert characters values to ranges char2range(obj, is_bytes=ascii_props) with codecs.open(output, 'a' if append else 'w', 'utf-8') as f: if not append: f.write(HEADER) # Write out the Unicode properties f.write('%s_age = {\n' % prefix) count = len(obj) - 1 i = 0 for k1, v1 in sorted(obj.items()): f.write(' "%s": "%s"' % (k1, v1)) if i == count: f.write('\n}\n') else: f.write(',\n') i += 1
[ "def", "gen_age", "(", "output", ",", "ascii_props", "=", "False", ",", "append", "=", "False", ",", "prefix", "=", "\"\"", ")", ":", "obj", "=", "{", "}", "all_chars", "=", "ALL_ASCII", "if", "ascii_props", "else", "ALL_CHARS", "with", "codecs", ".", "open", "(", "os", ".", "path", ".", "join", "(", "HOME", ",", "'unicodedata'", ",", "UNIVERSION", ",", "'DerivedAge.txt'", ")", ",", "'r'", ",", "'utf-8'", ")", "as", "uf", ":", "for", "line", "in", "uf", ":", "if", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "data", "=", "line", ".", "split", "(", "'#'", ")", "[", "0", "]", ".", "split", "(", "';'", ")", "if", "len", "(", "data", ")", "<", "2", ":", "continue", "span", "=", "create_span", "(", "[", "int", "(", "i", ",", "16", ")", "for", "i", "in", "data", "[", "0", "]", ".", "strip", "(", ")", ".", "split", "(", "'..'", ")", "]", ",", "is_bytes", "=", "ascii_props", ")", "name", "=", "format_name", "(", "data", "[", "1", "]", ")", "if", "name", "not", "in", "obj", ":", "obj", "[", "name", "]", "=", "[", "]", "if", "span", "is", "None", ":", "continue", "obj", "[", "name", "]", ".", "extend", "(", "span", ")", "unassigned", "=", "set", "(", ")", "for", "x", "in", "obj", ".", "values", "(", ")", ":", "unassigned", "|=", "set", "(", "x", ")", "obj", "[", "'na'", "]", "=", "list", "(", "all_chars", "-", "unassigned", ")", "for", "name", "in", "list", "(", "obj", ".", "keys", "(", ")", ")", ":", "s", "=", "set", "(", "obj", "[", "name", "]", ")", "obj", "[", "name", "]", "=", "sorted", "(", "s", ")", "# Convert characters values to ranges", "char2range", "(", "obj", ",", "is_bytes", "=", "ascii_props", ")", "with", "codecs", ".", "open", "(", "output", ",", "'a'", "if", "append", "else", "'w'", ",", "'utf-8'", ")", "as", "f", ":", "if", "not", "append", ":", "f", ".", "write", "(", "HEADER", ")", "# Write out the Unicode properties", "f", ".", "write", "(", "'%s_age = {\\n'", "%", "prefix", ")", "count", "=", "len", "(", "obj", ")", "-", "1", "i", "=", "0", "for", "k1", ",", "v1", "in", "sorted", "(", "obj", ".", "items", "(", ")", ")", ":", "f", ".", "write", "(", "' \"%s\": \"%s\"'", "%", "(", "k1", ",", "v1", ")", ")", "if", "i", "==", "count", ":", "f", ".", "write", "(", "'\\n}\\n'", ")", "else", ":", "f", ".", "write", "(", "',\\n'", ")", "i", "+=", "1" ]
Generate `age` property.
[ "Generate", "age", "property", "." ]
python
train
31.375
peterldowns/djoauth2
djoauth2/authorization.py
https://github.com/peterldowns/djoauth2/blob/151c7619d1d7a91d720397cfecf3a29fcc9747a9/djoauth2/authorization.py#L328-L353
def make_success_redirect(self): """ Return a Django ``HttpResponseRedirect`` describing the request success. The custom authorization endpoint should return the result of this method when the user grants the Client's authorization request. The request is assumed to have successfully been vetted by the :py:meth:`validate` method. """ new_authorization_code = AuthorizationCode.objects.create( user=self.user, client=self.client, redirect_uri=(self.redirect_uri if self.request_redirect_uri else None) ) new_authorization_code.scopes = self.valid_scope_objects new_authorization_code.save() response_params = {'code': new_authorization_code.value} # From http://tools.ietf.org/html/rfc6749#section-4.1.2 : # # REQUIRED if the "state" parameter was present in the client # authorization request. The exact value received from the # client. # if self.state is not None: response_params['state'] = self.state return HttpResponseRedirect( update_parameters(self.redirect_uri, response_params))
[ "def", "make_success_redirect", "(", "self", ")", ":", "new_authorization_code", "=", "AuthorizationCode", ".", "objects", ".", "create", "(", "user", "=", "self", ".", "user", ",", "client", "=", "self", ".", "client", ",", "redirect_uri", "=", "(", "self", ".", "redirect_uri", "if", "self", ".", "request_redirect_uri", "else", "None", ")", ")", "new_authorization_code", ".", "scopes", "=", "self", ".", "valid_scope_objects", "new_authorization_code", ".", "save", "(", ")", "response_params", "=", "{", "'code'", ":", "new_authorization_code", ".", "value", "}", "# From http://tools.ietf.org/html/rfc6749#section-4.1.2 :", "#", "# REQUIRED if the \"state\" parameter was present in the client", "# authorization request. The exact value received from the", "# client.", "#", "if", "self", ".", "state", "is", "not", "None", ":", "response_params", "[", "'state'", "]", "=", "self", ".", "state", "return", "HttpResponseRedirect", "(", "update_parameters", "(", "self", ".", "redirect_uri", ",", "response_params", ")", ")" ]
Return a Django ``HttpResponseRedirect`` describing the request success. The custom authorization endpoint should return the result of this method when the user grants the Client's authorization request. The request is assumed to have successfully been vetted by the :py:meth:`validate` method.
[ "Return", "a", "Django", "HttpResponseRedirect", "describing", "the", "request", "success", "." ]
python
train
41.923077
pydata/xarray
xarray/core/formatting.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/formatting.py#L380-L391
def short_dask_repr(array, show_dtype=True): """Similar to dask.array.DataArray.__repr__, but without redundant information that's already printed by the repr function of the xarray wrapper. """ chunksize = tuple(c[0] for c in array.chunks) if show_dtype: return 'dask.array<shape={}, dtype={}, chunksize={}>'.format( array.shape, array.dtype, chunksize) else: return 'dask.array<shape={}, chunksize={}>'.format( array.shape, chunksize)
[ "def", "short_dask_repr", "(", "array", ",", "show_dtype", "=", "True", ")", ":", "chunksize", "=", "tuple", "(", "c", "[", "0", "]", "for", "c", "in", "array", ".", "chunks", ")", "if", "show_dtype", ":", "return", "'dask.array<shape={}, dtype={}, chunksize={}>'", ".", "format", "(", "array", ".", "shape", ",", "array", ".", "dtype", ",", "chunksize", ")", "else", ":", "return", "'dask.array<shape={}, chunksize={}>'", ".", "format", "(", "array", ".", "shape", ",", "chunksize", ")" ]
Similar to dask.array.DataArray.__repr__, but without redundant information that's already printed by the repr function of the xarray wrapper.
[ "Similar", "to", "dask", ".", "array", ".", "DataArray", ".", "__repr__", "but", "without", "redundant", "information", "that", "s", "already", "printed", "by", "the", "repr", "function", "of", "the", "xarray", "wrapper", "." ]
python
train
41.083333
pingali/dgit
dgitcore/datasets/files.py
https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/datasets/files.py#L349-L431
def add(repo, args, targetdir, execute=False, generator=False, includes=[], script=False, source=None): """ Add files to the repository by explicitly specifying them or by specifying a pattern over files accessed during execution of an executable. Parameters ---------- repo: Repository args: files or command line (a) If simply adding files, then the list of files that must be added (including any additional arguments to be passed to git (b) If files to be added are an output of a command line, then args is the command lined targetdir: Target directory to store the files execute: Args are not files to be added but scripts that must be run. includes: patterns used to select files to script: Is this a script? generator: Is this a generator source: Link to the original source of the data """ # Gather the files... if not execute: files = add_files(args=args, targetdir=targetdir, source=source, script=script, generator=generator) else: files = run_executable(repo, args, includes) if files is None or len(files) == 0: return repo # Update the repo package but with only those that have changed. filtered_files = [] package = repo.package for h in files: found = False for i, r in enumerate(package['resources']): if h['relativepath'] == r['relativepath']: found = True if h['sha256'] == r['sha256']: change = False for attr in ['source']: if h[attr] != r[attr]: r[attr] = h[attr] change = True if change: filtered_files.append(h) continue else: filtered_files.append(h) package['resources'][i] = h break if not found: filtered_files.append(h) package['resources'].append(h) if len(filtered_files) == 0: return 0 # Copy the files repo.manager.add_files(repo, filtered_files) # Write to disk... rootdir = repo.rootdir with cd(rootdir): datapath = "datapackage.json" with open(datapath, 'w') as fd: fd.write(json.dumps(package, indent=4)) return len(filtered_files)
[ "def", "add", "(", "repo", ",", "args", ",", "targetdir", ",", "execute", "=", "False", ",", "generator", "=", "False", ",", "includes", "=", "[", "]", ",", "script", "=", "False", ",", "source", "=", "None", ")", ":", "# Gather the files...", "if", "not", "execute", ":", "files", "=", "add_files", "(", "args", "=", "args", ",", "targetdir", "=", "targetdir", ",", "source", "=", "source", ",", "script", "=", "script", ",", "generator", "=", "generator", ")", "else", ":", "files", "=", "run_executable", "(", "repo", ",", "args", ",", "includes", ")", "if", "files", "is", "None", "or", "len", "(", "files", ")", "==", "0", ":", "return", "repo", "# Update the repo package but with only those that have changed.", "filtered_files", "=", "[", "]", "package", "=", "repo", ".", "package", "for", "h", "in", "files", ":", "found", "=", "False", "for", "i", ",", "r", "in", "enumerate", "(", "package", "[", "'resources'", "]", ")", ":", "if", "h", "[", "'relativepath'", "]", "==", "r", "[", "'relativepath'", "]", ":", "found", "=", "True", "if", "h", "[", "'sha256'", "]", "==", "r", "[", "'sha256'", "]", ":", "change", "=", "False", "for", "attr", "in", "[", "'source'", "]", ":", "if", "h", "[", "attr", "]", "!=", "r", "[", "attr", "]", ":", "r", "[", "attr", "]", "=", "h", "[", "attr", "]", "change", "=", "True", "if", "change", ":", "filtered_files", ".", "append", "(", "h", ")", "continue", "else", ":", "filtered_files", ".", "append", "(", "h", ")", "package", "[", "'resources'", "]", "[", "i", "]", "=", "h", "break", "if", "not", "found", ":", "filtered_files", ".", "append", "(", "h", ")", "package", "[", "'resources'", "]", ".", "append", "(", "h", ")", "if", "len", "(", "filtered_files", ")", "==", "0", ":", "return", "0", "# Copy the files", "repo", ".", "manager", ".", "add_files", "(", "repo", ",", "filtered_files", ")", "# Write to disk...", "rootdir", "=", "repo", ".", "rootdir", "with", "cd", "(", "rootdir", ")", ":", "datapath", "=", "\"datapackage.json\"", "with", "open", "(", "datapath", ",", "'w'", ")", "as", "fd", ":", "fd", ".", "write", "(", "json", ".", "dumps", "(", "package", ",", "indent", "=", "4", ")", ")", "return", "len", "(", "filtered_files", ")" ]
Add files to the repository by explicitly specifying them or by specifying a pattern over files accessed during execution of an executable. Parameters ---------- repo: Repository args: files or command line (a) If simply adding files, then the list of files that must be added (including any additional arguments to be passed to git (b) If files to be added are an output of a command line, then args is the command lined targetdir: Target directory to store the files execute: Args are not files to be added but scripts that must be run. includes: patterns used to select files to script: Is this a script? generator: Is this a generator source: Link to the original source of the data
[ "Add", "files", "to", "the", "repository", "by", "explicitly", "specifying", "them", "or", "by", "specifying", "a", "pattern", "over", "files", "accessed", "during", "execution", "of", "an", "executable", "." ]
python
valid
30.156627
graphql-python/graphql-core-next
graphql/utilities/build_ast_schema.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/utilities/build_ast_schema.py#L71-L164
def build_ast_schema( document_ast: DocumentNode, assume_valid: bool = False, assume_valid_sdl: bool = False, ) -> GraphQLSchema: """Build a GraphQL Schema from a given AST. This takes the ast of a schema document produced by the parse function in src/language/parser.py. If no schema definition is provided, then it will look for types named Query and Mutation. Given that AST it constructs a GraphQLSchema. The resulting schema has no resolve methods, so execution will use default resolvers. When building a schema from a GraphQL service's introspection result, it might be safe to assume the schema is valid. Set `assume_valid` to True to assume the produced schema is valid. Set `assume_valid_sdl` to True to assume it is already a valid SDL document. """ if not isinstance(document_ast, DocumentNode): raise TypeError("Must provide a Document AST.") if not (assume_valid or assume_valid_sdl): from ..validation.validate import assert_valid_sdl assert_valid_sdl(document_ast) schema_def: Optional[SchemaDefinitionNode] = None type_defs: List[TypeDefinitionNode] = [] directive_defs: List[DirectiveDefinitionNode] = [] append_directive_def = directive_defs.append for def_ in document_ast.definitions: if isinstance(def_, SchemaDefinitionNode): schema_def = def_ elif isinstance(def_, TypeDefinitionNode): def_ = cast(TypeDefinitionNode, def_) type_defs.append(def_) elif isinstance(def_, DirectiveDefinitionNode): append_directive_def(def_) def resolve_type(type_name: str) -> GraphQLNamedType: type_ = type_map.get(type_name) if not type: raise TypeError(f"Type '{type_name}' not found in document.") return type_ ast_builder = ASTDefinitionBuilder( assume_valid=assume_valid, resolve_type=resolve_type ) type_map = {node.name.value: ast_builder.build_type(node) for node in type_defs} if schema_def: operation_types = get_operation_types(schema_def) else: operation_types = { OperationType.QUERY: "Query", OperationType.MUTATION: "Mutation", OperationType.SUBSCRIPTION: "Subscription", } directives = [ ast_builder.build_directive(directive_def) for directive_def in directive_defs ] # If specified directives were not explicitly declared, add them. if not any(directive.name == "skip" for directive in directives): directives.append(GraphQLSkipDirective) if not any(directive.name == "include" for directive in directives): directives.append(GraphQLIncludeDirective) if not any(directive.name == "deprecated" for directive in directives): directives.append(GraphQLDeprecatedDirective) query_type = operation_types.get(OperationType.QUERY) mutation_type = operation_types.get(OperationType.MUTATION) subscription_type = operation_types.get(OperationType.SUBSCRIPTION) return GraphQLSchema( # Note: While this could make early assertions to get the correctly # typed values below, that would throw immediately while type system # validation with `validate_schema()` will produce more actionable results. query=cast(GraphQLObjectType, type_map.get(query_type)) if query_type else None, mutation=cast(GraphQLObjectType, type_map.get(mutation_type)) if mutation_type else None, subscription=cast(GraphQLObjectType, type_map.get(subscription_type)) if subscription_type else None, types=list(type_map.values()), directives=directives, ast_node=schema_def, assume_valid=assume_valid, )
[ "def", "build_ast_schema", "(", "document_ast", ":", "DocumentNode", ",", "assume_valid", ":", "bool", "=", "False", ",", "assume_valid_sdl", ":", "bool", "=", "False", ",", ")", "->", "GraphQLSchema", ":", "if", "not", "isinstance", "(", "document_ast", ",", "DocumentNode", ")", ":", "raise", "TypeError", "(", "\"Must provide a Document AST.\"", ")", "if", "not", "(", "assume_valid", "or", "assume_valid_sdl", ")", ":", "from", ".", ".", "validation", ".", "validate", "import", "assert_valid_sdl", "assert_valid_sdl", "(", "document_ast", ")", "schema_def", ":", "Optional", "[", "SchemaDefinitionNode", "]", "=", "None", "type_defs", ":", "List", "[", "TypeDefinitionNode", "]", "=", "[", "]", "directive_defs", ":", "List", "[", "DirectiveDefinitionNode", "]", "=", "[", "]", "append_directive_def", "=", "directive_defs", ".", "append", "for", "def_", "in", "document_ast", ".", "definitions", ":", "if", "isinstance", "(", "def_", ",", "SchemaDefinitionNode", ")", ":", "schema_def", "=", "def_", "elif", "isinstance", "(", "def_", ",", "TypeDefinitionNode", ")", ":", "def_", "=", "cast", "(", "TypeDefinitionNode", ",", "def_", ")", "type_defs", ".", "append", "(", "def_", ")", "elif", "isinstance", "(", "def_", ",", "DirectiveDefinitionNode", ")", ":", "append_directive_def", "(", "def_", ")", "def", "resolve_type", "(", "type_name", ":", "str", ")", "->", "GraphQLNamedType", ":", "type_", "=", "type_map", ".", "get", "(", "type_name", ")", "if", "not", "type", ":", "raise", "TypeError", "(", "f\"Type '{type_name}' not found in document.\"", ")", "return", "type_", "ast_builder", "=", "ASTDefinitionBuilder", "(", "assume_valid", "=", "assume_valid", ",", "resolve_type", "=", "resolve_type", ")", "type_map", "=", "{", "node", ".", "name", ".", "value", ":", "ast_builder", ".", "build_type", "(", "node", ")", "for", "node", "in", "type_defs", "}", "if", "schema_def", ":", "operation_types", "=", "get_operation_types", "(", "schema_def", ")", "else", ":", "operation_types", "=", "{", "OperationType", ".", "QUERY", ":", "\"Query\"", ",", "OperationType", ".", "MUTATION", ":", "\"Mutation\"", ",", "OperationType", ".", "SUBSCRIPTION", ":", "\"Subscription\"", ",", "}", "directives", "=", "[", "ast_builder", ".", "build_directive", "(", "directive_def", ")", "for", "directive_def", "in", "directive_defs", "]", "# If specified directives were not explicitly declared, add them.", "if", "not", "any", "(", "directive", ".", "name", "==", "\"skip\"", "for", "directive", "in", "directives", ")", ":", "directives", ".", "append", "(", "GraphQLSkipDirective", ")", "if", "not", "any", "(", "directive", ".", "name", "==", "\"include\"", "for", "directive", "in", "directives", ")", ":", "directives", ".", "append", "(", "GraphQLIncludeDirective", ")", "if", "not", "any", "(", "directive", ".", "name", "==", "\"deprecated\"", "for", "directive", "in", "directives", ")", ":", "directives", ".", "append", "(", "GraphQLDeprecatedDirective", ")", "query_type", "=", "operation_types", ".", "get", "(", "OperationType", ".", "QUERY", ")", "mutation_type", "=", "operation_types", ".", "get", "(", "OperationType", ".", "MUTATION", ")", "subscription_type", "=", "operation_types", ".", "get", "(", "OperationType", ".", "SUBSCRIPTION", ")", "return", "GraphQLSchema", "(", "# Note: While this could make early assertions to get the correctly", "# typed values below, that would throw immediately while type system", "# validation with `validate_schema()` will produce more actionable results.", "query", "=", "cast", "(", "GraphQLObjectType", ",", "type_map", ".", "get", "(", "query_type", ")", ")", "if", "query_type", "else", "None", ",", "mutation", "=", "cast", "(", "GraphQLObjectType", ",", "type_map", ".", "get", "(", "mutation_type", ")", ")", "if", "mutation_type", "else", "None", ",", "subscription", "=", "cast", "(", "GraphQLObjectType", ",", "type_map", ".", "get", "(", "subscription_type", ")", ")", "if", "subscription_type", "else", "None", ",", "types", "=", "list", "(", "type_map", ".", "values", "(", ")", ")", ",", "directives", "=", "directives", ",", "ast_node", "=", "schema_def", ",", "assume_valid", "=", "assume_valid", ",", ")" ]
Build a GraphQL Schema from a given AST. This takes the ast of a schema document produced by the parse function in src/language/parser.py. If no schema definition is provided, then it will look for types named Query and Mutation. Given that AST it constructs a GraphQLSchema. The resulting schema has no resolve methods, so execution will use default resolvers. When building a schema from a GraphQL service's introspection result, it might be safe to assume the schema is valid. Set `assume_valid` to True to assume the produced schema is valid. Set `assume_valid_sdl` to True to assume it is already a valid SDL document.
[ "Build", "a", "GraphQL", "Schema", "from", "a", "given", "AST", "." ]
python
train
39.43617
matthieugouel/gibica
gibica/parser.py
https://github.com/matthieugouel/gibica/blob/65f937f7a6255078cc22eb7691a2897466032909/gibica/parser.py#L132-L139
def variable_declaration(self): """ variable_declaration: 'let' assignment ';' """ self._process(Nature.LET) node = VariableDeclaration(assignment=self.assignment()) self._process(Nature.SEMI) return node
[ "def", "variable_declaration", "(", "self", ")", ":", "self", ".", "_process", "(", "Nature", ".", "LET", ")", "node", "=", "VariableDeclaration", "(", "assignment", "=", "self", ".", "assignment", "(", ")", ")", "self", ".", "_process", "(", "Nature", ".", "SEMI", ")", "return", "node" ]
variable_declaration: 'let' assignment ';'
[ "variable_declaration", ":", "let", "assignment", ";" ]
python
train
31.625
ethereum/pyethereum
ethereum/slogging.py
https://github.com/ethereum/pyethereum/blob/b704a5c6577863edc539a1ec3d2620a443b950fb/ethereum/slogging.py#L349-L356
def DEBUG(msg, *args, **kwargs): """temporary logger during development that is always on""" logger = getLogger("DEBUG") if len(logger.handlers) == 0: logger.addHandler(StreamHandler()) logger.propagate = False logger.setLevel(logging.DEBUG) logger.DEV(msg, *args, **kwargs)
[ "def", "DEBUG", "(", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", "=", "getLogger", "(", "\"DEBUG\"", ")", "if", "len", "(", "logger", ".", "handlers", ")", "==", "0", ":", "logger", ".", "addHandler", "(", "StreamHandler", "(", ")", ")", "logger", ".", "propagate", "=", "False", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "logger", ".", "DEV", "(", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
temporary logger during development that is always on
[ "temporary", "logger", "during", "development", "that", "is", "always", "on" ]
python
train
37.375
martinpitt/python-dbusmock
dbusmock/mockobject.py
https://github.com/martinpitt/python-dbusmock/blob/26f65f78bc0ed347233f699a8d6ee0e6880e7eb0/dbusmock/mockobject.py#L368-L394
def AddProperty(self, interface, name, value): '''Add property to this object interface: D-Bus interface to add this to. For convenience you can specify '' here to add the property to the object's main interface (as specified on construction). name: Property name. value: Property value. ''' if not interface: interface = self.interface try: self.props[interface][name] raise dbus.exceptions.DBusException( 'property %s already exists' % name, name=self.interface + '.PropertyExists') except KeyError: # this is what we expect pass # copy.copy removes one level of variant-ness, which means that the # types get exported in introspection data correctly, but we can't do # this for container types. if not (isinstance(value, dbus.Dictionary) or isinstance(value, dbus.Array)): value = copy.copy(value) self.props.setdefault(interface, {})[name] = value
[ "def", "AddProperty", "(", "self", ",", "interface", ",", "name", ",", "value", ")", ":", "if", "not", "interface", ":", "interface", "=", "self", ".", "interface", "try", ":", "self", ".", "props", "[", "interface", "]", "[", "name", "]", "raise", "dbus", ".", "exceptions", ".", "DBusException", "(", "'property %s already exists'", "%", "name", ",", "name", "=", "self", ".", "interface", "+", "'.PropertyExists'", ")", "except", "KeyError", ":", "# this is what we expect", "pass", "# copy.copy removes one level of variant-ness, which means that the", "# types get exported in introspection data correctly, but we can't do", "# this for container types.", "if", "not", "(", "isinstance", "(", "value", ",", "dbus", ".", "Dictionary", ")", "or", "isinstance", "(", "value", ",", "dbus", ".", "Array", ")", ")", ":", "value", "=", "copy", ".", "copy", "(", "value", ")", "self", ".", "props", ".", "setdefault", "(", "interface", ",", "{", "}", ")", "[", "name", "]", "=", "value" ]
Add property to this object interface: D-Bus interface to add this to. For convenience you can specify '' here to add the property to the object's main interface (as specified on construction). name: Property name. value: Property value.
[ "Add", "property", "to", "this", "object" ]
python
train
39.777778
doakey3/DashTable
dashtable/dashutils/ensure_table_strings.py
https://github.com/doakey3/DashTable/blob/744cfb6a717fa75a8092c83ebcd49b2668023681/dashtable/dashutils/ensure_table_strings.py#L1-L16
def ensure_table_strings(table): """ Force each cell in the table to be a string Parameters ---------- table : list of lists Returns ------- table : list of lists of str """ for row in range(len(table)): for column in range(len(table[row])): table[row][column] = str(table[row][column]) return table
[ "def", "ensure_table_strings", "(", "table", ")", ":", "for", "row", "in", "range", "(", "len", "(", "table", ")", ")", ":", "for", "column", "in", "range", "(", "len", "(", "table", "[", "row", "]", ")", ")", ":", "table", "[", "row", "]", "[", "column", "]", "=", "str", "(", "table", "[", "row", "]", "[", "column", "]", ")", "return", "table" ]
Force each cell in the table to be a string Parameters ---------- table : list of lists Returns ------- table : list of lists of str
[ "Force", "each", "cell", "in", "the", "table", "to", "be", "a", "string" ]
python
train
21.875
lepture/terminal
terminal/log.py
https://github.com/lepture/terminal/blob/5226d1cac53077f12624aa51f64de7b5b05d9cb8/terminal/log.py#L100-L113
def verbose(self): """ Make it the verbose log. A verbose log can be only shown when user want to see more logs. It works as:: log.verbose.warn('this is a verbose warn') log.verbose.info('this is a verbose info') """ log = copy.copy(self) log._is_verbose = True return log
[ "def", "verbose", "(", "self", ")", ":", "log", "=", "copy", ".", "copy", "(", "self", ")", "log", ".", "_is_verbose", "=", "True", "return", "log" ]
Make it the verbose log. A verbose log can be only shown when user want to see more logs. It works as:: log.verbose.warn('this is a verbose warn') log.verbose.info('this is a verbose info')
[ "Make", "it", "the", "verbose", "log", "." ]
python
train
25
UCL-INGI/INGInious
inginious/frontend/pages/course_admin/task_edit_file.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course_admin/task_edit_file.py#L19-L38
def GET_AUTH(self, courseid, taskid): # pylint: disable=arguments-differ """ Edit a task """ if not id_checker(taskid): raise Exception("Invalid task id") self.get_course_and_check_rights(courseid, allow_all_staff=False) request = web.input() if request.get("action") == "download" and request.get('path') is not None: return self.action_download(courseid, taskid, request.get('path')) elif request.get("action") == "delete" and request.get('path') is not None: return self.action_delete(courseid, taskid, request.get('path')) elif request.get("action") == "rename" and request.get('path') is not None and request.get('new_path') is not None: return self.action_rename(courseid, taskid, request.get('path'), request.get('new_path')) elif request.get("action") == "create" and request.get('path') is not None: return self.action_create(courseid, taskid, request.get('path')) elif request.get("action") == "edit" and request.get('path') is not None: return self.action_edit(courseid, taskid, request.get('path')) else: return self.show_tab_file(courseid, taskid)
[ "def", "GET_AUTH", "(", "self", ",", "courseid", ",", "taskid", ")", ":", "# pylint: disable=arguments-differ", "if", "not", "id_checker", "(", "taskid", ")", ":", "raise", "Exception", "(", "\"Invalid task id\"", ")", "self", ".", "get_course_and_check_rights", "(", "courseid", ",", "allow_all_staff", "=", "False", ")", "request", "=", "web", ".", "input", "(", ")", "if", "request", ".", "get", "(", "\"action\"", ")", "==", "\"download\"", "and", "request", ".", "get", "(", "'path'", ")", "is", "not", "None", ":", "return", "self", ".", "action_download", "(", "courseid", ",", "taskid", ",", "request", ".", "get", "(", "'path'", ")", ")", "elif", "request", ".", "get", "(", "\"action\"", ")", "==", "\"delete\"", "and", "request", ".", "get", "(", "'path'", ")", "is", "not", "None", ":", "return", "self", ".", "action_delete", "(", "courseid", ",", "taskid", ",", "request", ".", "get", "(", "'path'", ")", ")", "elif", "request", ".", "get", "(", "\"action\"", ")", "==", "\"rename\"", "and", "request", ".", "get", "(", "'path'", ")", "is", "not", "None", "and", "request", ".", "get", "(", "'new_path'", ")", "is", "not", "None", ":", "return", "self", ".", "action_rename", "(", "courseid", ",", "taskid", ",", "request", ".", "get", "(", "'path'", ")", ",", "request", ".", "get", "(", "'new_path'", ")", ")", "elif", "request", ".", "get", "(", "\"action\"", ")", "==", "\"create\"", "and", "request", ".", "get", "(", "'path'", ")", "is", "not", "None", ":", "return", "self", ".", "action_create", "(", "courseid", ",", "taskid", ",", "request", ".", "get", "(", "'path'", ")", ")", "elif", "request", ".", "get", "(", "\"action\"", ")", "==", "\"edit\"", "and", "request", ".", "get", "(", "'path'", ")", "is", "not", "None", ":", "return", "self", ".", "action_edit", "(", "courseid", ",", "taskid", ",", "request", ".", "get", "(", "'path'", ")", ")", "else", ":", "return", "self", ".", "show_tab_file", "(", "courseid", ",", "taskid", ")" ]
Edit a task
[ "Edit", "a", "task" ]
python
train
60.4
lk-geimfari/mimesis
mimesis/providers/address.py
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/address.py#L221-L227
def latitude(self, dms: bool = False) -> Union[str, float]: """Generate a random value of latitude. :param dms: DMS format. :return: Value of longitude. """ return self._get_fs('lt', dms)
[ "def", "latitude", "(", "self", ",", "dms", ":", "bool", "=", "False", ")", "->", "Union", "[", "str", ",", "float", "]", ":", "return", "self", ".", "_get_fs", "(", "'lt'", ",", "dms", ")" ]
Generate a random value of latitude. :param dms: DMS format. :return: Value of longitude.
[ "Generate", "a", "random", "value", "of", "latitude", "." ]
python
train
31.714286
Contraz/demosys-py
demosys/effects/registry.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/effects/registry.py#L9-L34
def parse_package_string(path): """ Parse the effect package string. Can contain the package python path or path to effect class in an effect package. Examples:: # Path to effect pacakge examples.cubes # Path to effect class examples.cubes.Cubes Args: path: python path to effect package. May also include effect class name. Returns: tuple: (package_path, effect_class) """ parts = path.split('.') # Is the last entry in the path capitalized? if parts[-1][0].isupper(): return ".".join(parts[:-1]), parts[-1] return path, ""
[ "def", "parse_package_string", "(", "path", ")", ":", "parts", "=", "path", ".", "split", "(", "'.'", ")", "# Is the last entry in the path capitalized?", "if", "parts", "[", "-", "1", "]", "[", "0", "]", ".", "isupper", "(", ")", ":", "return", "\".\"", ".", "join", "(", "parts", "[", ":", "-", "1", "]", ")", ",", "parts", "[", "-", "1", "]", "return", "path", ",", "\"\"" ]
Parse the effect package string. Can contain the package python path or path to effect class in an effect package. Examples:: # Path to effect pacakge examples.cubes # Path to effect class examples.cubes.Cubes Args: path: python path to effect package. May also include effect class name. Returns: tuple: (package_path, effect_class)
[ "Parse", "the", "effect", "package", "string", ".", "Can", "contain", "the", "package", "python", "path", "or", "path", "to", "effect", "class", "in", "an", "effect", "package", "." ]
python
valid
23.307692
PyCQA/astroid
astroid/rebuilder.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/rebuilder.py#L775-L777
def visit_pass(self, node, parent): """visit a Pass node by returning a fresh instance of it""" return nodes.Pass(node.lineno, node.col_offset, parent)
[ "def", "visit_pass", "(", "self", ",", "node", ",", "parent", ")", ":", "return", "nodes", ".", "Pass", "(", "node", ".", "lineno", ",", "node", ".", "col_offset", ",", "parent", ")" ]
visit a Pass node by returning a fresh instance of it
[ "visit", "a", "Pass", "node", "by", "returning", "a", "fresh", "instance", "of", "it" ]
python
train
55
googlefonts/fontmake
Lib/fontmake/font_project.py
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L187-L205
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)): """Remove overlaps in UFOs' glyphs' contours.""" from booleanOperations import union, BooleanOperationsError for ufo in ufos: font_name = self._font_name(ufo) logger.info("Removing overlaps for " + font_name) for glyph in ufo: if not glyph_filter(glyph): continue contours = list(glyph) glyph.clearContours() try: union(contours, glyph.getPointPen()) except BooleanOperationsError: logger.error( "Failed to remove overlaps for %s: %r", font_name, glyph.name ) raise
[ "def", "remove_overlaps", "(", "self", ",", "ufos", ",", "glyph_filter", "=", "lambda", "g", ":", "len", "(", "g", ")", ")", ":", "from", "booleanOperations", "import", "union", ",", "BooleanOperationsError", "for", "ufo", "in", "ufos", ":", "font_name", "=", "self", ".", "_font_name", "(", "ufo", ")", "logger", ".", "info", "(", "\"Removing overlaps for \"", "+", "font_name", ")", "for", "glyph", "in", "ufo", ":", "if", "not", "glyph_filter", "(", "glyph", ")", ":", "continue", "contours", "=", "list", "(", "glyph", ")", "glyph", ".", "clearContours", "(", ")", "try", ":", "union", "(", "contours", ",", "glyph", ".", "getPointPen", "(", ")", ")", "except", "BooleanOperationsError", ":", "logger", ".", "error", "(", "\"Failed to remove overlaps for %s: %r\"", ",", "font_name", ",", "glyph", ".", "name", ")", "raise" ]
Remove overlaps in UFOs' glyphs' contours.
[ "Remove", "overlaps", "in", "UFOs", "glyphs", "contours", "." ]
python
train
40.842105
wtsi-hgi/python-git-subrepo
gitsubrepo/_common.py
https://github.com/wtsi-hgi/python-git-subrepo/blob/bb2eb2bd9a7e51b862298ddb4168cc5b8633dad0/gitsubrepo/_common.py#L10-L27
def run(arguments: List[str], execution_directory: str=None, execution_environment: Dict=None) -> str: """ Runs the given arguments from the given directory (if given, else resorts to the (undefined) current directory). :param arguments: the CLI arguments to run :param execution_directory: the directory to execute the arguments in :param execution_environment: the environment to execute in :return: what is written to stdout following execution :exception RunException: called if the execution has a non-zero return code """ process = subprocess.Popen( arguments, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, cwd=execution_directory, env=execution_environment) out, error = process.communicate() stdout = out.decode(_DATA_ENCODING).rstrip() if process.returncode == _SUCCESS_RETURN_CODE: return stdout else: raise RunException(stdout, error.decode(_DATA_ENCODING).rstrip(), arguments, execution_directory)
[ "def", "run", "(", "arguments", ":", "List", "[", "str", "]", ",", "execution_directory", ":", "str", "=", "None", ",", "execution_environment", ":", "Dict", "=", "None", ")", "->", "str", ":", "process", "=", "subprocess", ".", "Popen", "(", "arguments", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "cwd", "=", "execution_directory", ",", "env", "=", "execution_environment", ")", "out", ",", "error", "=", "process", ".", "communicate", "(", ")", "stdout", "=", "out", ".", "decode", "(", "_DATA_ENCODING", ")", ".", "rstrip", "(", ")", "if", "process", ".", "returncode", "==", "_SUCCESS_RETURN_CODE", ":", "return", "stdout", "else", ":", "raise", "RunException", "(", "stdout", ",", "error", ".", "decode", "(", "_DATA_ENCODING", ")", ".", "rstrip", "(", ")", ",", "arguments", ",", "execution_directory", ")" ]
Runs the given arguments from the given directory (if given, else resorts to the (undefined) current directory). :param arguments: the CLI arguments to run :param execution_directory: the directory to execute the arguments in :param execution_environment: the environment to execute in :return: what is written to stdout following execution :exception RunException: called if the execution has a non-zero return code
[ "Runs", "the", "given", "arguments", "from", "the", "given", "directory", "(", "if", "given", "else", "resorts", "to", "the", "(", "undefined", ")", "current", "directory", ")", ".", ":", "param", "arguments", ":", "the", "CLI", "arguments", "to", "run", ":", "param", "execution_directory", ":", "the", "directory", "to", "execute", "the", "arguments", "in", ":", "param", "execution_environment", ":", "the", "environment", "to", "execute", "in", ":", "return", ":", "what", "is", "written", "to", "stdout", "following", "execution", ":", "exception", "RunException", ":", "called", "if", "the", "execution", "has", "a", "non", "-", "zero", "return", "code" ]
python
train
55.611111
gem/oq-engine
openquake/commonlib/readinput.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/readinput.py#L1113-L1142
def get_pmap_from_nrml(oqparam, fname): """ :param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :param fname: an XML file containing hazard curves :returns: site mesh, curve array """ hcurves_by_imt = {} oqparam.hazard_imtls = imtls = {} for hcurves in nrml.read(fname): imt = hcurves['IMT'] oqparam.investigation_time = hcurves['investigationTime'] if imt == 'SA': imt += '(%s)' % hcurves['saPeriod'] imtls[imt] = ~hcurves.IMLs data = sorted((~node.Point.pos, ~node.poEs) for node in hcurves[1:]) hcurves_by_imt[imt] = numpy.array([d[1] for d in data]) lons, lats = [], [] for xy, poes in data: lons.append(xy[0]) lats.append(xy[1]) mesh = geo.Mesh(numpy.array(lons), numpy.array(lats)) num_levels = sum(len(v) for v in imtls.values()) array = numpy.zeros((len(mesh), num_levels)) imtls = DictArray(imtls) for imt_ in hcurves_by_imt: array[:, imtls(imt_)] = hcurves_by_imt[imt_] return mesh, ProbabilityMap.from_array(array, range(len(mesh)))
[ "def", "get_pmap_from_nrml", "(", "oqparam", ",", "fname", ")", ":", "hcurves_by_imt", "=", "{", "}", "oqparam", ".", "hazard_imtls", "=", "imtls", "=", "{", "}", "for", "hcurves", "in", "nrml", ".", "read", "(", "fname", ")", ":", "imt", "=", "hcurves", "[", "'IMT'", "]", "oqparam", ".", "investigation_time", "=", "hcurves", "[", "'investigationTime'", "]", "if", "imt", "==", "'SA'", ":", "imt", "+=", "'(%s)'", "%", "hcurves", "[", "'saPeriod'", "]", "imtls", "[", "imt", "]", "=", "~", "hcurves", ".", "IMLs", "data", "=", "sorted", "(", "(", "~", "node", ".", "Point", ".", "pos", ",", "~", "node", ".", "poEs", ")", "for", "node", "in", "hcurves", "[", "1", ":", "]", ")", "hcurves_by_imt", "[", "imt", "]", "=", "numpy", ".", "array", "(", "[", "d", "[", "1", "]", "for", "d", "in", "data", "]", ")", "lons", ",", "lats", "=", "[", "]", ",", "[", "]", "for", "xy", ",", "poes", "in", "data", ":", "lons", ".", "append", "(", "xy", "[", "0", "]", ")", "lats", ".", "append", "(", "xy", "[", "1", "]", ")", "mesh", "=", "geo", ".", "Mesh", "(", "numpy", ".", "array", "(", "lons", ")", ",", "numpy", ".", "array", "(", "lats", ")", ")", "num_levels", "=", "sum", "(", "len", "(", "v", ")", "for", "v", "in", "imtls", ".", "values", "(", ")", ")", "array", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "mesh", ")", ",", "num_levels", ")", ")", "imtls", "=", "DictArray", "(", "imtls", ")", "for", "imt_", "in", "hcurves_by_imt", ":", "array", "[", ":", ",", "imtls", "(", "imt_", ")", "]", "=", "hcurves_by_imt", "[", "imt_", "]", "return", "mesh", ",", "ProbabilityMap", ".", "from_array", "(", "array", ",", "range", "(", "len", "(", "mesh", ")", ")", ")" ]
:param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :param fname: an XML file containing hazard curves :returns: site mesh, curve array
[ ":", "param", "oqparam", ":", "an", ":", "class", ":", "openquake", ".", "commonlib", ".", "oqvalidation", ".", "OqParam", "instance", ":", "param", "fname", ":", "an", "XML", "file", "containing", "hazard", "curves", ":", "returns", ":", "site", "mesh", "curve", "array" ]
python
train
37.066667
apache/incubator-mxnet
tools/rec2idx.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/rec2idx.py#L65-L70
def tell(self): """Returns the current position of read head. """ pos = ctypes.c_size_t() check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos))) return pos.value
[ "def", "tell", "(", "self", ")", ":", "pos", "=", "ctypes", ".", "c_size_t", "(", ")", "check_call", "(", "_LIB", ".", "MXRecordIOReaderTell", "(", "self", ".", "handle", ",", "ctypes", ".", "byref", "(", "pos", ")", ")", ")", "return", "pos", ".", "value" ]
Returns the current position of read head.
[ "Returns", "the", "current", "position", "of", "read", "head", "." ]
python
train
35.166667
ArchiveTeam/wpull
wpull/processor/web.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/processor/web.py#L370-L389
def _add_post_data(self, request: Request): '''Add data to the payload.''' if self._item_session.url_record.post_data: data = wpull.string.to_bytes(self._item_session.url_record.post_data) else: data = wpull.string.to_bytes( self._processor.fetch_params.post_data ) request.method = 'POST' request.fields['Content-Type'] = 'application/x-www-form-urlencoded' request.fields['Content-Length'] = str(len(data)) _logger.debug('Posting with data {0}.', data) if not request.body: request.body = Body(io.BytesIO()) with wpull.util.reset_file_offset(request.body): request.body.write(data)
[ "def", "_add_post_data", "(", "self", ",", "request", ":", "Request", ")", ":", "if", "self", ".", "_item_session", ".", "url_record", ".", "post_data", ":", "data", "=", "wpull", ".", "string", ".", "to_bytes", "(", "self", ".", "_item_session", ".", "url_record", ".", "post_data", ")", "else", ":", "data", "=", "wpull", ".", "string", ".", "to_bytes", "(", "self", ".", "_processor", ".", "fetch_params", ".", "post_data", ")", "request", ".", "method", "=", "'POST'", "request", ".", "fields", "[", "'Content-Type'", "]", "=", "'application/x-www-form-urlencoded'", "request", ".", "fields", "[", "'Content-Length'", "]", "=", "str", "(", "len", "(", "data", ")", ")", "_logger", ".", "debug", "(", "'Posting with data {0}.'", ",", "data", ")", "if", "not", "request", ".", "body", ":", "request", ".", "body", "=", "Body", "(", "io", ".", "BytesIO", "(", ")", ")", "with", "wpull", ".", "util", ".", "reset_file_offset", "(", "request", ".", "body", ")", ":", "request", ".", "body", ".", "write", "(", "data", ")" ]
Add data to the payload.
[ "Add", "data", "to", "the", "payload", "." ]
python
train
35.8
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/process.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/process.py#L4660-L4693
def scan_processes_fast(self): """ Populates the snapshot with running processes. Only the PID is retrieved for each process. Dead processes are removed. Threads and modules of living processes are ignored. Tipically you don't need to call this method directly, if unsure use L{scan} instead. @note: This method uses the PSAPI. It may be faster for scanning, but some information may be missing, outdated or slower to obtain. This could be a good tradeoff under some circumstances. """ # Get the new and old list of pids new_pids = set( win32.EnumProcesses() ) old_pids = set( compat.iterkeys(self.__processDict) ) # Ignore our own pid our_pid = win32.GetCurrentProcessId() if our_pid in new_pids: new_pids.remove(our_pid) if our_pid in old_pids: old_pids.remove(our_pid) # Add newly found pids for pid in new_pids.difference(old_pids): self._add_process( Process(pid) ) # Remove missing pids for pid in old_pids.difference(new_pids): self._del_process(pid)
[ "def", "scan_processes_fast", "(", "self", ")", ":", "# Get the new and old list of pids", "new_pids", "=", "set", "(", "win32", ".", "EnumProcesses", "(", ")", ")", "old_pids", "=", "set", "(", "compat", ".", "iterkeys", "(", "self", ".", "__processDict", ")", ")", "# Ignore our own pid", "our_pid", "=", "win32", ".", "GetCurrentProcessId", "(", ")", "if", "our_pid", "in", "new_pids", ":", "new_pids", ".", "remove", "(", "our_pid", ")", "if", "our_pid", "in", "old_pids", ":", "old_pids", ".", "remove", "(", "our_pid", ")", "# Add newly found pids", "for", "pid", "in", "new_pids", ".", "difference", "(", "old_pids", ")", ":", "self", ".", "_add_process", "(", "Process", "(", "pid", ")", ")", "# Remove missing pids", "for", "pid", "in", "old_pids", ".", "difference", "(", "new_pids", ")", ":", "self", ".", "_del_process", "(", "pid", ")" ]
Populates the snapshot with running processes. Only the PID is retrieved for each process. Dead processes are removed. Threads and modules of living processes are ignored. Tipically you don't need to call this method directly, if unsure use L{scan} instead. @note: This method uses the PSAPI. It may be faster for scanning, but some information may be missing, outdated or slower to obtain. This could be a good tradeoff under some circumstances.
[ "Populates", "the", "snapshot", "with", "running", "processes", ".", "Only", "the", "PID", "is", "retrieved", "for", "each", "process", "." ]
python
train
34.235294
materialsproject/pymatgen
pymatgen/core/tensors.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/tensors.py#L917-L922
def get_uvec(vec): """ Gets a unit vector parallel to input vector""" l = np.linalg.norm(vec) if l < 1e-8: return vec return vec / l
[ "def", "get_uvec", "(", "vec", ")", ":", "l", "=", "np", ".", "linalg", ".", "norm", "(", "vec", ")", "if", "l", "<", "1e-8", ":", "return", "vec", "return", "vec", "/", "l" ]
Gets a unit vector parallel to input vector
[ "Gets", "a", "unit", "vector", "parallel", "to", "input", "vector" ]
python
train
25.166667
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/exportxml.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/exportxml.py#L396-L421
def add_ne(self, ne): """ Parameters ---------- ne : etree.Element etree representation of a <ne> element (marks a text span -- (one or more <node> or <word> elements) as a named entity) Example ------- <ne xml:id="ne_23" type="PER"> <word xml:id="s3_2" form="Ute" pos="NE" morph="nsf" lemma="Ute" func="-" parent="s3_501" dephead="s3_1" deprel="APP"/> <word xml:id="s3_3" form="Wedemeier" pos="NE" morph="nsf" lemma="Wedemeier" func="-" parent="s3_501" dephead="s3_2" deprel="APP"/> </ne> """ ne_id = self.get_element_id(ne) ne_label = 'ne:'+ne.attrib['type'] self.add_node(ne_id, layers={self.ns, self.ns+':ne'}, attr_dict=self.element_attribs_to_dict(ne), label=ne_label) # possible children: [('word', 78703), ('node', 11152), ('ne', 49)] for child in ne.iterchildren(): child_id = self.get_element_id(child) self.add_edge(ne_id, child_id, layers={self.ns, self.ns+':ne'}, edge_type=dg.EdgeTypes.spanning_relation, label=ne_label)
[ "def", "add_ne", "(", "self", ",", "ne", ")", ":", "ne_id", "=", "self", ".", "get_element_id", "(", "ne", ")", "ne_label", "=", "'ne:'", "+", "ne", ".", "attrib", "[", "'type'", "]", "self", ".", "add_node", "(", "ne_id", ",", "layers", "=", "{", "self", ".", "ns", ",", "self", ".", "ns", "+", "':ne'", "}", ",", "attr_dict", "=", "self", ".", "element_attribs_to_dict", "(", "ne", ")", ",", "label", "=", "ne_label", ")", "# possible children: [('word', 78703), ('node', 11152), ('ne', 49)]", "for", "child", "in", "ne", ".", "iterchildren", "(", ")", ":", "child_id", "=", "self", ".", "get_element_id", "(", "child", ")", "self", ".", "add_edge", "(", "ne_id", ",", "child_id", ",", "layers", "=", "{", "self", ".", "ns", ",", "self", ".", "ns", "+", "':ne'", "}", ",", "edge_type", "=", "dg", ".", "EdgeTypes", ".", "spanning_relation", ",", "label", "=", "ne_label", ")" ]
Parameters ---------- ne : etree.Element etree representation of a <ne> element (marks a text span -- (one or more <node> or <word> elements) as a named entity) Example ------- <ne xml:id="ne_23" type="PER"> <word xml:id="s3_2" form="Ute" pos="NE" morph="nsf" lemma="Ute" func="-" parent="s3_501" dephead="s3_1" deprel="APP"/> <word xml:id="s3_3" form="Wedemeier" pos="NE" morph="nsf" lemma="Wedemeier" func="-" parent="s3_501" dephead="s3_2" deprel="APP"/> </ne>
[ "Parameters", "----------", "ne", ":", "etree", ".", "Element", "etree", "representation", "of", "a", "<ne", ">", "element", "(", "marks", "a", "text", "span", "--", "(", "one", "or", "more", "<node", ">", "or", "<word", ">", "elements", ")", "as", "a", "named", "entity", ")" ]
python
train
46.153846
project-ncl/pnc-cli
pnc_cli/swagger_client/apis/licenses_api.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/licenses_api.py#L145-L169
def delete(self, id, **kwargs): """ Deletes an existing License This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: License id (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_with_http_info(id, **kwargs) else: (data) = self.delete_with_http_info(id, **kwargs) return data
[ "def", "delete", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "delete_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "delete_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Deletes an existing License This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: License id (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Deletes", "an", "existing", "License", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "define", "a", "callback", "function", "to", "be", "invoked", "when", "receiving", "the", "response", ".", ">>>", "def", "callback_function", "(", "response", ")", ":", ">>>", "pprint", "(", "response", ")", ">>>", ">>>", "thread", "=", "api", ".", "delete", "(", "id", "callback", "=", "callback_function", ")" ]
python
train
37.84
staggerpkg/stagger
stagger/frames.py
https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/frames.py#L113-L120
def _in_version(self, *versions): "Returns true if this frame is in any of the specified versions of ID3." for version in versions: if (self._version == version or (isinstance(self._version, collections.Container) and version in self._version)): return True return False
[ "def", "_in_version", "(", "self", ",", "*", "versions", ")", ":", "for", "version", "in", "versions", ":", "if", "(", "self", ".", "_version", "==", "version", "or", "(", "isinstance", "(", "self", ".", "_version", ",", "collections", ".", "Container", ")", "and", "version", "in", "self", ".", "_version", ")", ")", ":", "return", "True", "return", "False" ]
Returns true if this frame is in any of the specified versions of ID3.
[ "Returns", "true", "if", "this", "frame", "is", "in", "any", "of", "the", "specified", "versions", "of", "ID3", "." ]
python
train
44
jic-dtool/dtoolcore
dtoolcore/utils.py
https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/utils.py#L42-L59
def generous_parse_uri(uri): """Return a urlparse.ParseResult object with the results of parsing the given URI. This has the same properties as the result of parse_uri. When passed a relative path, it determines the absolute path, sets the scheme to file, the netloc to localhost and returns a parse of the result. """ parse_result = urlparse(uri) if parse_result.scheme == '': abspath = os.path.abspath(parse_result.path) if IS_WINDOWS: abspath = windows_to_unix_path(abspath) fixed_uri = "file://{}".format(abspath) parse_result = urlparse(fixed_uri) return parse_result
[ "def", "generous_parse_uri", "(", "uri", ")", ":", "parse_result", "=", "urlparse", "(", "uri", ")", "if", "parse_result", ".", "scheme", "==", "''", ":", "abspath", "=", "os", ".", "path", ".", "abspath", "(", "parse_result", ".", "path", ")", "if", "IS_WINDOWS", ":", "abspath", "=", "windows_to_unix_path", "(", "abspath", ")", "fixed_uri", "=", "\"file://{}\"", ".", "format", "(", "abspath", ")", "parse_result", "=", "urlparse", "(", "fixed_uri", ")", "return", "parse_result" ]
Return a urlparse.ParseResult object with the results of parsing the given URI. This has the same properties as the result of parse_uri. When passed a relative path, it determines the absolute path, sets the scheme to file, the netloc to localhost and returns a parse of the result.
[ "Return", "a", "urlparse", ".", "ParseResult", "object", "with", "the", "results", "of", "parsing", "the", "given", "URI", ".", "This", "has", "the", "same", "properties", "as", "the", "result", "of", "parse_uri", "." ]
python
train
35.277778
IRC-SPHERE/HyperStream
hyperstream/utils/time_utils.py
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/time_utils.py#L90-L98
def reconstruct_interval(experiment_id): """ Reverse the construct_experiment_id operation :param experiment_id: The experiment id :return: time interval """ start, end = map(lambda x: udatetime.utcfromtimestamp(x / 1000.0), map(float, experiment_id.split("-"))) from ..time_interval import TimeInterval return TimeInterval(start, end)
[ "def", "reconstruct_interval", "(", "experiment_id", ")", ":", "start", ",", "end", "=", "map", "(", "lambda", "x", ":", "udatetime", ".", "utcfromtimestamp", "(", "x", "/", "1000.0", ")", ",", "map", "(", "float", ",", "experiment_id", ".", "split", "(", "\"-\"", ")", ")", ")", "from", ".", ".", "time_interval", "import", "TimeInterval", "return", "TimeInterval", "(", "start", ",", "end", ")" ]
Reverse the construct_experiment_id operation :param experiment_id: The experiment id :return: time interval
[ "Reverse", "the", "construct_experiment_id", "operation", ":", "param", "experiment_id", ":", "The", "experiment", "id", ":", "return", ":", "time", "interval" ]
python
train
39.888889
cloudmesh-cmd3/cmd3
fabfile/doc.py
https://github.com/cloudmesh-cmd3/cmd3/blob/92e33c96032fd3921f159198a0e57917c4dc34ed/fabfile/doc.py#L70-L80
def html(theme_name='readthedocs'): # disable Flask RSTPAGES due to sphinx incompatibility os.environ['RSTPAGES'] = 'FALSE' theme(theme_name) api() man() """build the doc locally and view""" clean() local("cd docs; make html") local("fab security.check") local("touch docs/build/html/.nojekyll")
[ "def", "html", "(", "theme_name", "=", "'readthedocs'", ")", ":", "# disable Flask RSTPAGES due to sphinx incompatibility", "os", ".", "environ", "[", "'RSTPAGES'", "]", "=", "'FALSE'", "theme", "(", "theme_name", ")", "api", "(", ")", "man", "(", ")", "clean", "(", ")", "local", "(", "\"cd docs; make html\"", ")", "local", "(", "\"fab security.check\"", ")", "local", "(", "\"touch docs/build/html/.nojekyll\"", ")" ]
build the doc locally and view
[ "build", "the", "doc", "locally", "and", "view" ]
python
train
29.545455
plivo/plivohelper-python
plivohelper.py
https://github.com/plivo/plivohelper-python/blob/a2f706d69e2138fbb973f792041341f662072d26/plivohelper.py#L160-L165
def bulk_call(self, call_params): """REST BulkCalls Helper """ path = '/' + self.api_version + '/BulkCall/' method = 'POST' return self.request(path, method, call_params)
[ "def", "bulk_call", "(", "self", ",", "call_params", ")", ":", "path", "=", "'/'", "+", "self", ".", "api_version", "+", "'/BulkCall/'", "method", "=", "'POST'", "return", "self", ".", "request", "(", "path", ",", "method", ",", "call_params", ")" ]
REST BulkCalls Helper
[ "REST", "BulkCalls", "Helper" ]
python
valid
34.166667
sebp/scikit-survival
sksurv/svm/survival_svm.py
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/svm/survival_svm.py#L825-L851
def predict(self, X): """Rank samples according to survival times Lower ranks indicate shorter survival, higher ranks longer survival. Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape = (n_samples,) Predicted ranks. """ val = numpy.dot(X, self.coef_) if hasattr(self, "intercept_"): val += self.intercept_ # Order by increasing survival time if objective is pure ranking if self.rank_ratio == 1: val *= -1 else: # model was fitted on log(time), transform to original scale val = numpy.exp(val) return val
[ "def", "predict", "(", "self", ",", "X", ")", ":", "val", "=", "numpy", ".", "dot", "(", "X", ",", "self", ".", "coef_", ")", "if", "hasattr", "(", "self", ",", "\"intercept_\"", ")", ":", "val", "+=", "self", ".", "intercept_", "# Order by increasing survival time if objective is pure ranking", "if", "self", ".", "rank_ratio", "==", "1", ":", "val", "*=", "-", "1", "else", ":", "# model was fitted on log(time), transform to original scale", "val", "=", "numpy", ".", "exp", "(", "val", ")", "return", "val" ]
Rank samples according to survival times Lower ranks indicate shorter survival, higher ranks longer survival. Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape = (n_samples,) Predicted ranks.
[ "Rank", "samples", "according", "to", "survival", "times" ]
python
train
27.777778
chaoss/grimoirelab-kingarthur
arthur/arthur.py
https://github.com/chaoss/grimoirelab-kingarthur/blob/9d6a638bee68d5e5c511f045eeebf06340fd3252/arthur/arthur.py#L106-L117
def items(self): """Get the items fetched by the jobs.""" # Get and remove queued items in an atomic transaction pipe = self.conn.pipeline() pipe.lrange(Q_STORAGE_ITEMS, 0, -1) pipe.ltrim(Q_STORAGE_ITEMS, 1, 0) items = pipe.execute()[0] for item in items: item = pickle.loads(item) yield item
[ "def", "items", "(", "self", ")", ":", "# Get and remove queued items in an atomic transaction", "pipe", "=", "self", ".", "conn", ".", "pipeline", "(", ")", "pipe", ".", "lrange", "(", "Q_STORAGE_ITEMS", ",", "0", ",", "-", "1", ")", "pipe", ".", "ltrim", "(", "Q_STORAGE_ITEMS", ",", "1", ",", "0", ")", "items", "=", "pipe", ".", "execute", "(", ")", "[", "0", "]", "for", "item", "in", "items", ":", "item", "=", "pickle", ".", "loads", "(", "item", ")", "yield", "item" ]
Get the items fetched by the jobs.
[ "Get", "the", "items", "fetched", "by", "the", "jobs", "." ]
python
test
30.25
ph4r05/monero-serialize
monero_serialize/xmrobj.py
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrobj.py#L70-L88
async def dump_blob(elem, elem_type=None): """ Dumps blob message. Supports both blob and raw value. :param writer: :param elem: :param elem_type: :param params: :return: """ elem_is_blob = isinstance(elem, x.BlobType) data = getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem if data is None or len(data) == 0: return b'' if isinstance(data, (bytes, bytearray, list)): return base64.b16encode(bytes(data)) else: raise ValueError('Unknown blob type')
[ "async", "def", "dump_blob", "(", "elem", ",", "elem_type", "=", "None", ")", ":", "elem_is_blob", "=", "isinstance", "(", "elem", ",", "x", ".", "BlobType", ")", "data", "=", "getattr", "(", "elem", ",", "x", ".", "BlobType", ".", "DATA_ATTR", ")", "if", "elem_is_blob", "else", "elem", "if", "data", "is", "None", "or", "len", "(", "data", ")", "==", "0", ":", "return", "b''", "if", "isinstance", "(", "data", ",", "(", "bytes", ",", "bytearray", ",", "list", ")", ")", ":", "return", "base64", ".", "b16encode", "(", "bytes", "(", "data", ")", ")", "else", ":", "raise", "ValueError", "(", "'Unknown blob type'", ")" ]
Dumps blob message. Supports both blob and raw value. :param writer: :param elem: :param elem_type: :param params: :return:
[ "Dumps", "blob", "message", ".", "Supports", "both", "blob", "and", "raw", "value", "." ]
python
train
27.578947
pyviz/geoviews
geoviews/element/geo.py
https://github.com/pyviz/geoviews/blob/cc70ac2d5a96307769bc6192eaef8576c3d24b30/geoviews/element/geo.py#L661-L692
def from_shapefile(cls, shapefile, *args, **kwargs): """ Loads a shapefile from disk and optionally merges it with a dataset. See ``from_records`` for full signature. Parameters ---------- records: list of cartopy.io.shapereader.Record Iterator containing Records. dataset: holoviews.Dataset Any HoloViews Dataset type. on: str or list or dict A mapping between the attribute names in the records and the dimensions in the dataset. value: str The value dimension in the dataset the values will be drawn from. index: str or list One or more dimensions in the dataset the Shapes will be indexed by. drop_missing: boolean Whether to drop shapes which are missing from the provides dataset. Returns ------- shapes: Polygons or Path object A Polygons or Path object containing the geometries """ reader = Reader(shapefile) return cls.from_records(reader.records(), *args, **kwargs)
[ "def", "from_shapefile", "(", "cls", ",", "shapefile", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "reader", "=", "Reader", "(", "shapefile", ")", "return", "cls", ".", "from_records", "(", "reader", ".", "records", "(", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Loads a shapefile from disk and optionally merges it with a dataset. See ``from_records`` for full signature. Parameters ---------- records: list of cartopy.io.shapereader.Record Iterator containing Records. dataset: holoviews.Dataset Any HoloViews Dataset type. on: str or list or dict A mapping between the attribute names in the records and the dimensions in the dataset. value: str The value dimension in the dataset the values will be drawn from. index: str or list One or more dimensions in the dataset the Shapes will be indexed by. drop_missing: boolean Whether to drop shapes which are missing from the provides dataset. Returns ------- shapes: Polygons or Path object A Polygons or Path object containing the geometries
[ "Loads", "a", "shapefile", "from", "disk", "and", "optionally", "merges", "it", "with", "a", "dataset", ".", "See", "from_records", "for", "full", "signature", "." ]
python
train
34.46875
celiao/tmdbsimple
tmdbsimple/tv.py
https://github.com/celiao/tmdbsimple/blob/ff17893110c99771d6398a62c35d36dd9735f4b9/tmdbsimple/tv.py#L177-L193
def similar(self, **kwargs): """ Get the similar TV series for a specific TV series id. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. append_to_response: (optional) Comma separated, any TV method. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_id_path('similar') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "similar", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_id_path", "(", "'similar'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", ")", "return", "response" ]
Get the similar TV series for a specific TV series id. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. append_to_response: (optional) Comma separated, any TV method. Returns: A dict respresentation of the JSON returned from the API.
[ "Get", "the", "similar", "TV", "series", "for", "a", "specific", "TV", "series", "id", "." ]
python
test
33.058824
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L3864-L3877
def _apply_dvs_capability(capability_spec, capability_dict): ''' Applies the values of the capability_dict dictionary to a DVS capability object (vim.vim.DVSCapability) ''' if 'operation_supported' in capability_dict: capability_spec.dvsOperationSupported = \ capability_dict['operation_supported'] if 'port_operation_supported' in capability_dict: capability_spec.dvPortOperationSupported = \ capability_dict['port_operation_supported'] if 'portgroup_operation_supported' in capability_dict: capability_spec.dvPortGroupOperationSupported = \ capability_dict['portgroup_operation_supported']
[ "def", "_apply_dvs_capability", "(", "capability_spec", ",", "capability_dict", ")", ":", "if", "'operation_supported'", "in", "capability_dict", ":", "capability_spec", ".", "dvsOperationSupported", "=", "capability_dict", "[", "'operation_supported'", "]", "if", "'port_operation_supported'", "in", "capability_dict", ":", "capability_spec", ".", "dvPortOperationSupported", "=", "capability_dict", "[", "'port_operation_supported'", "]", "if", "'portgroup_operation_supported'", "in", "capability_dict", ":", "capability_spec", ".", "dvPortGroupOperationSupported", "=", "capability_dict", "[", "'portgroup_operation_supported'", "]" ]
Applies the values of the capability_dict dictionary to a DVS capability object (vim.vim.DVSCapability)
[ "Applies", "the", "values", "of", "the", "capability_dict", "dictionary", "to", "a", "DVS", "capability", "object", "(", "vim", ".", "vim", ".", "DVSCapability", ")" ]
python
train
48.428571
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L379-L416
def parse_manifest(path_to_manifest): """ Parses manifest file for Toil Germline Pipeline :param str path_to_manifest: Path to sample manifest file :return: List of GermlineSample namedtuples :rtype: list[GermlineSample] """ bam_re = r"^(?P<uuid>\S+)\s(?P<url>\S+[bsc][r]?am)" fq_re = r"^(?P<uuid>\S+)\s(?P<url>\S+)\s(?P<paired_url>\S+)?\s?(?P<rg_line>@RG\S+)" samples = [] with open(path_to_manifest, 'r') as f: for line in f.readlines(): line = line.strip() if line.startswith('#'): continue bam_match = re.match(bam_re, line) fastq_match = re.match(fq_re, line) if bam_match: uuid = bam_match.group('uuid') url = bam_match.group('url') paired_url = None rg_line = None require('.bam' in url.lower(), 'Expected .bam extension:\n{}:\t{}'.format(uuid, url)) elif fastq_match: uuid = fastq_match.group('uuid') url = fastq_match.group('url') paired_url = fastq_match.group('paired_url') rg_line = fastq_match.group('rg_line') require('.fq' in url.lower() or '.fastq' in url.lower(), 'Expected .fq extension:\n{}:\t{}'.format(uuid, url)) else: raise ValueError('Could not parse entry in manifest: %s\n%s' % (f.name, line)) # Checks that URL has a scheme require(urlparse(url).scheme, 'Invalid URL passed for {}'.format(url)) samples.append(GermlineSample(uuid, url, paired_url, rg_line)) return samples
[ "def", "parse_manifest", "(", "path_to_manifest", ")", ":", "bam_re", "=", "r\"^(?P<uuid>\\S+)\\s(?P<url>\\S+[bsc][r]?am)\"", "fq_re", "=", "r\"^(?P<uuid>\\S+)\\s(?P<url>\\S+)\\s(?P<paired_url>\\S+)?\\s?(?P<rg_line>@RG\\S+)\"", "samples", "=", "[", "]", "with", "open", "(", "path_to_manifest", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "bam_match", "=", "re", ".", "match", "(", "bam_re", ",", "line", ")", "fastq_match", "=", "re", ".", "match", "(", "fq_re", ",", "line", ")", "if", "bam_match", ":", "uuid", "=", "bam_match", ".", "group", "(", "'uuid'", ")", "url", "=", "bam_match", ".", "group", "(", "'url'", ")", "paired_url", "=", "None", "rg_line", "=", "None", "require", "(", "'.bam'", "in", "url", ".", "lower", "(", ")", ",", "'Expected .bam extension:\\n{}:\\t{}'", ".", "format", "(", "uuid", ",", "url", ")", ")", "elif", "fastq_match", ":", "uuid", "=", "fastq_match", ".", "group", "(", "'uuid'", ")", "url", "=", "fastq_match", ".", "group", "(", "'url'", ")", "paired_url", "=", "fastq_match", ".", "group", "(", "'paired_url'", ")", "rg_line", "=", "fastq_match", ".", "group", "(", "'rg_line'", ")", "require", "(", "'.fq'", "in", "url", ".", "lower", "(", ")", "or", "'.fastq'", "in", "url", ".", "lower", "(", ")", ",", "'Expected .fq extension:\\n{}:\\t{}'", ".", "format", "(", "uuid", ",", "url", ")", ")", "else", ":", "raise", "ValueError", "(", "'Could not parse entry in manifest: %s\\n%s'", "%", "(", "f", ".", "name", ",", "line", ")", ")", "# Checks that URL has a scheme", "require", "(", "urlparse", "(", "url", ")", ".", "scheme", ",", "'Invalid URL passed for {}'", ".", "format", "(", "url", ")", ")", "samples", ".", "append", "(", "GermlineSample", "(", "uuid", ",", "url", ",", "paired_url", ",", "rg_line", ")", ")", "return", "samples" ]
Parses manifest file for Toil Germline Pipeline :param str path_to_manifest: Path to sample manifest file :return: List of GermlineSample namedtuples :rtype: list[GermlineSample]
[ "Parses", "manifest", "file", "for", "Toil", "Germline", "Pipeline" ]
python
train
44.078947
MacHu-GWU/rolex-project
rolex/generator.py
https://github.com/MacHu-GWU/rolex-project/blob/a1111b410ed04b4b6eddd81df110fa2dacfa6537/rolex/generator.py#L259-L278
def rnd_date(start=date(1970, 1, 1), end=None, **kwargs): """ Generate a random date between ``start`` to ``end``. :param start: Left bound :type start: string or datetime.date, (default date(1970, 1, 1)) :param end: Right bound :type end: string or datetime.date, (default date.today()) :return: a datetime.date object **中文文档** 随机生成一个位于 ``start`` 和 ``end`` 之间的日期。 """ if end is None: end = date.today() start = parser.parse_date(start) end = parser.parse_date(end) _assert_correct_start_end(start, end) return _rnd_date(start, end)
[ "def", "rnd_date", "(", "start", "=", "date", "(", "1970", ",", "1", ",", "1", ")", ",", "end", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "end", "is", "None", ":", "end", "=", "date", ".", "today", "(", ")", "start", "=", "parser", ".", "parse_date", "(", "start", ")", "end", "=", "parser", ".", "parse_date", "(", "end", ")", "_assert_correct_start_end", "(", "start", ",", "end", ")", "return", "_rnd_date", "(", "start", ",", "end", ")" ]
Generate a random date between ``start`` to ``end``. :param start: Left bound :type start: string or datetime.date, (default date(1970, 1, 1)) :param end: Right bound :type end: string or datetime.date, (default date.today()) :return: a datetime.date object **中文文档** 随机生成一个位于 ``start`` 和 ``end`` 之间的日期。
[ "Generate", "a", "random", "date", "between", "start", "to", "end", "." ]
python
train
29.2
alfred82santa/aio-service-client
service_client/__init__.py
https://github.com/alfred82santa/aio-service-client/blob/dd9ad49e23067b22178534915aa23ba24f6ff39b/service_client/__init__.py#L222-L229
def close(self): """ Close service client and its plugins. """ self._execute_plugin_hooks_sync(hook='close') if not self.session.closed: ensure_future(self.session.close(), loop=self.loop)
[ "def", "close", "(", "self", ")", ":", "self", ".", "_execute_plugin_hooks_sync", "(", "hook", "=", "'close'", ")", "if", "not", "self", ".", "session", ".", "closed", ":", "ensure_future", "(", "self", ".", "session", ".", "close", "(", ")", ",", "loop", "=", "self", ".", "loop", ")" ]
Close service client and its plugins.
[ "Close", "service", "client", "and", "its", "plugins", "." ]
python
train
29.25
apache/airflow
airflow/models/taskinstance.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L1303-L1352
def xcom_pull( self, task_ids=None, dag_id=None, key=XCOM_RETURN_KEY, include_prior_dates=False): """ Pull XComs that optionally meet certain criteria. The default value for `key` limits the search to XComs that were returned by other tasks (as opposed to those that were pushed manually). To remove this filter, pass key=None (or any desired value). If a single task_id string is provided, the result is the value of the most recent matching XCom from that task_id. If multiple task_ids are provided, a tuple of matching values is returned. None is returned whenever no matches are found. :param key: A key for the XCom. If provided, only XComs with matching keys will be returned. The default key is 'return_value', also available as a constant XCOM_RETURN_KEY. This key is automatically given to XComs returned by tasks (as opposed to being pushed manually). To remove the filter, pass key=None. :type key: str :param task_ids: Only XComs from tasks with matching ids will be pulled. Can pass None to remove the filter. :type task_ids: str or iterable of strings (representing task_ids) :param dag_id: If provided, only pulls XComs from this DAG. If None (default), the DAG of the calling task is used. :type dag_id: str :param include_prior_dates: If False, only XComs from the current execution_date are returned. If True, XComs from previous dates are returned as well. :type include_prior_dates: bool """ if dag_id is None: dag_id = self.dag_id pull_fn = functools.partial( XCom.get_one, execution_date=self.execution_date, key=key, dag_id=dag_id, include_prior_dates=include_prior_dates) if is_container(task_ids): return tuple(pull_fn(task_id=t) for t in task_ids) else: return pull_fn(task_id=task_ids)
[ "def", "xcom_pull", "(", "self", ",", "task_ids", "=", "None", ",", "dag_id", "=", "None", ",", "key", "=", "XCOM_RETURN_KEY", ",", "include_prior_dates", "=", "False", ")", ":", "if", "dag_id", "is", "None", ":", "dag_id", "=", "self", ".", "dag_id", "pull_fn", "=", "functools", ".", "partial", "(", "XCom", ".", "get_one", ",", "execution_date", "=", "self", ".", "execution_date", ",", "key", "=", "key", ",", "dag_id", "=", "dag_id", ",", "include_prior_dates", "=", "include_prior_dates", ")", "if", "is_container", "(", "task_ids", ")", ":", "return", "tuple", "(", "pull_fn", "(", "task_id", "=", "t", ")", "for", "t", "in", "task_ids", ")", "else", ":", "return", "pull_fn", "(", "task_id", "=", "task_ids", ")" ]
Pull XComs that optionally meet certain criteria. The default value for `key` limits the search to XComs that were returned by other tasks (as opposed to those that were pushed manually). To remove this filter, pass key=None (or any desired value). If a single task_id string is provided, the result is the value of the most recent matching XCom from that task_id. If multiple task_ids are provided, a tuple of matching values is returned. None is returned whenever no matches are found. :param key: A key for the XCom. If provided, only XComs with matching keys will be returned. The default key is 'return_value', also available as a constant XCOM_RETURN_KEY. This key is automatically given to XComs returned by tasks (as opposed to being pushed manually). To remove the filter, pass key=None. :type key: str :param task_ids: Only XComs from tasks with matching ids will be pulled. Can pass None to remove the filter. :type task_ids: str or iterable of strings (representing task_ids) :param dag_id: If provided, only pulls XComs from this DAG. If None (default), the DAG of the calling task is used. :type dag_id: str :param include_prior_dates: If False, only XComs from the current execution_date are returned. If True, XComs from previous dates are returned as well. :type include_prior_dates: bool
[ "Pull", "XComs", "that", "optionally", "meet", "certain", "criteria", "." ]
python
test
41.94
senaite/senaite.core
bika/lims/content/worksheet.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/worksheet.py#L1300-L1308
def getAnalystName(self): """ Returns the name of the currently assigned analyst """ mtool = getToolByName(self, 'portal_membership') analyst = self.getAnalyst().strip() analyst_member = mtool.getMemberById(analyst) if analyst_member is not None: return analyst_member.getProperty('fullname') return analyst
[ "def", "getAnalystName", "(", "self", ")", ":", "mtool", "=", "getToolByName", "(", "self", ",", "'portal_membership'", ")", "analyst", "=", "self", ".", "getAnalyst", "(", ")", ".", "strip", "(", ")", "analyst_member", "=", "mtool", ".", "getMemberById", "(", "analyst", ")", "if", "analyst_member", "is", "not", "None", ":", "return", "analyst_member", ".", "getProperty", "(", "'fullname'", ")", "return", "analyst" ]
Returns the name of the currently assigned analyst
[ "Returns", "the", "name", "of", "the", "currently", "assigned", "analyst" ]
python
train
40.777778
asaskevich/binario
binario/reader.py
https://github.com/asaskevich/binario/blob/8d40337952ab77f02da0edeae7fa761eadf6ab45/binario/reader.py#L72-L76
def seek(self, pos): """ Move to new input file position. If position is negative or out of file, raise Exception. """ if (pos > self.file_size) or (pos < 0): raise Exception("Unable to seek - position out of file!") self.file.seek(pos)
[ "def", "seek", "(", "self", ",", "pos", ")", ":", "if", "(", "pos", ">", "self", ".", "file_size", ")", "or", "(", "pos", "<", "0", ")", ":", "raise", "Exception", "(", "\"Unable to seek - position out of file!\"", ")", "self", ".", "file", ".", "seek", "(", "pos", ")" ]
Move to new input file position. If position is negative or out of file, raise Exception.
[ "Move", "to", "new", "input", "file", "position", ".", "If", "position", "is", "negative", "or", "out", "of", "file", "raise", "Exception", "." ]
python
train
53.6
flo-compbio/genometools
genometools/basic/gene_set.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/basic/gene_set.py#L165-L184
def to_list(self): """Converts the GeneSet object to a flat list of strings. Note: see also :meth:`from_list`. Parameters ---------- Returns ------- list of str The data from the GeneSet object as a flat list. """ src = self._source or '' coll = self._collection or '' desc = self._description or '' l = [self._id, src, coll, self._name, ','.join(sorted(self._genes)), desc] return l
[ "def", "to_list", "(", "self", ")", ":", "src", "=", "self", ".", "_source", "or", "''", "coll", "=", "self", ".", "_collection", "or", "''", "desc", "=", "self", ".", "_description", "or", "''", "l", "=", "[", "self", ".", "_id", ",", "src", ",", "coll", ",", "self", ".", "_name", ",", "','", ".", "join", "(", "sorted", "(", "self", ".", "_genes", ")", ")", ",", "desc", "]", "return", "l" ]
Converts the GeneSet object to a flat list of strings. Note: see also :meth:`from_list`. Parameters ---------- Returns ------- list of str The data from the GeneSet object as a flat list.
[ "Converts", "the", "GeneSet", "object", "to", "a", "flat", "list", "of", "strings", "." ]
python
train
24.85
bennyrowland/suspect
suspect/processing/frequency_correction.py
https://github.com/bennyrowland/suspect/blob/c09ab0a5013c5a199218214cdd791659243d7e41/suspect/processing/frequency_correction.py#L23-L67
def spectral_registration(data, target, initial_guess=(0.0, 0.0), frequency_range=None): """ Performs the spectral registration method to calculate the frequency and phase shifts between the input data and the reference spectrum target. The frequency range over which the two spectra are compared can be specified to exclude regions where the spectra differ. :param data: :param target: :param initial_guess: :param frequency_range: :return: """ # make sure that there are no extra dimensions in the data data = data.squeeze() target = target.squeeze() # the supplied frequency range can be none, in which case we use the whole # spectrum, or it can be a tuple defining two frequencies in Hz, in which # case we use the spectral points between those two frequencies, or it can # be a numpy.array of the same size as the data in which case we simply use # that array as the weightings for the comparison if type(frequency_range) is tuple: spectral_weights = frequency_range[0] < data.frequency_axis() & data.frequency_axis() < frequency_range[1] else: spectral_weights = frequency_range # define a residual function for the optimizer to use def residual(input_vector): transformed_data = transform_fid(data, input_vector[0], input_vector[1]) residual_data = transformed_data - target if frequency_range is not None: spectrum = residual_data.spectrum() weighted_spectrum = residual_data * spectral_weights # remove zero-elements weighted_spectrum = weighted_spectrum[weighted_spectrum != 0] residual_data = numpy.fft.ifft(numpy.fft.ifftshift(weighted_spectrum)) return_vector = numpy.zeros(len(residual_data) * 2) return_vector[:len(residual_data)] = residual_data.real return_vector[len(residual_data):] = residual_data.imag return return_vector out = scipy.optimize.leastsq(residual, initial_guess) return -out[0][0], -out[0][1]
[ "def", "spectral_registration", "(", "data", ",", "target", ",", "initial_guess", "=", "(", "0.0", ",", "0.0", ")", ",", "frequency_range", "=", "None", ")", ":", "# make sure that there are no extra dimensions in the data", "data", "=", "data", ".", "squeeze", "(", ")", "target", "=", "target", ".", "squeeze", "(", ")", "# the supplied frequency range can be none, in which case we use the whole", "# spectrum, or it can be a tuple defining two frequencies in Hz, in which", "# case we use the spectral points between those two frequencies, or it can", "# be a numpy.array of the same size as the data in which case we simply use", "# that array as the weightings for the comparison", "if", "type", "(", "frequency_range", ")", "is", "tuple", ":", "spectral_weights", "=", "frequency_range", "[", "0", "]", "<", "data", ".", "frequency_axis", "(", ")", "&", "data", ".", "frequency_axis", "(", ")", "<", "frequency_range", "[", "1", "]", "else", ":", "spectral_weights", "=", "frequency_range", "# define a residual function for the optimizer to use", "def", "residual", "(", "input_vector", ")", ":", "transformed_data", "=", "transform_fid", "(", "data", ",", "input_vector", "[", "0", "]", ",", "input_vector", "[", "1", "]", ")", "residual_data", "=", "transformed_data", "-", "target", "if", "frequency_range", "is", "not", "None", ":", "spectrum", "=", "residual_data", ".", "spectrum", "(", ")", "weighted_spectrum", "=", "residual_data", "*", "spectral_weights", "# remove zero-elements", "weighted_spectrum", "=", "weighted_spectrum", "[", "weighted_spectrum", "!=", "0", "]", "residual_data", "=", "numpy", ".", "fft", ".", "ifft", "(", "numpy", ".", "fft", ".", "ifftshift", "(", "weighted_spectrum", ")", ")", "return_vector", "=", "numpy", ".", "zeros", "(", "len", "(", "residual_data", ")", "*", "2", ")", "return_vector", "[", ":", "len", "(", "residual_data", ")", "]", "=", "residual_data", ".", "real", "return_vector", "[", "len", "(", "residual_data", ")", ":", "]", "=", "residual_data", ".", "imag", "return", "return_vector", "out", "=", "scipy", ".", "optimize", ".", "leastsq", "(", "residual", ",", "initial_guess", ")", "return", "-", "out", "[", "0", "]", "[", "0", "]", ",", "-", "out", "[", "0", "]", "[", "1", "]" ]
Performs the spectral registration method to calculate the frequency and phase shifts between the input data and the reference spectrum target. The frequency range over which the two spectra are compared can be specified to exclude regions where the spectra differ. :param data: :param target: :param initial_guess: :param frequency_range: :return:
[ "Performs", "the", "spectral", "registration", "method", "to", "calculate", "the", "frequency", "and", "phase", "shifts", "between", "the", "input", "data", "and", "the", "reference", "spectrum", "target", ".", "The", "frequency", "range", "over", "which", "the", "two", "spectra", "are", "compared", "can", "be", "specified", "to", "exclude", "regions", "where", "the", "spectra", "differ", "." ]
python
train
44.911111
push-things/django-th
th_github/my_github.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/th_github/my_github.py#L178-L188
def callback(self, request, **kwargs): """ Called from the Service when the user accept to activate it :param request: request object :return: callback url :rtype: string , path to the template """ access_token = request.session['oauth_token'] + "#TH#" access_token += str(request.session['oauth_id']) kwargs = {'access_token': access_token} return super(ServiceGithub, self).callback(request, **kwargs)
[ "def", "callback", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "access_token", "=", "request", ".", "session", "[", "'oauth_token'", "]", "+", "\"#TH#\"", "access_token", "+=", "str", "(", "request", ".", "session", "[", "'oauth_id'", "]", ")", "kwargs", "=", "{", "'access_token'", ":", "access_token", "}", "return", "super", "(", "ServiceGithub", ",", "self", ")", ".", "callback", "(", "request", ",", "*", "*", "kwargs", ")" ]
Called from the Service when the user accept to activate it :param request: request object :return: callback url :rtype: string , path to the template
[ "Called", "from", "the", "Service", "when", "the", "user", "accept", "to", "activate", "it", ":", "param", "request", ":", "request", "object", ":", "return", ":", "callback", "url", ":", "rtype", ":", "string", "path", "to", "the", "template" ]
python
train
44.454545
oasis-open/cti-stix-validator
stix2validator/v21/shoulds.py
https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/v21/shoulds.py#L68-L78
def custom_object_prefix_lax(instance): """Ensure custom objects follow lenient naming style conventions for forward-compatibility. """ if (instance['type'] not in enums.TYPES and instance['type'] not in enums.RESERVED_OBJECTS and not CUSTOM_TYPE_LAX_PREFIX_RE.match(instance['type'])): yield JSONError("Custom object type '%s' should start with 'x-' in " "order to be compatible with future versions of the " "STIX 2 specification." % instance['type'], instance['id'], 'custom-prefix-lax')
[ "def", "custom_object_prefix_lax", "(", "instance", ")", ":", "if", "(", "instance", "[", "'type'", "]", "not", "in", "enums", ".", "TYPES", "and", "instance", "[", "'type'", "]", "not", "in", "enums", ".", "RESERVED_OBJECTS", "and", "not", "CUSTOM_TYPE_LAX_PREFIX_RE", ".", "match", "(", "instance", "[", "'type'", "]", ")", ")", ":", "yield", "JSONError", "(", "\"Custom object type '%s' should start with 'x-' in \"", "\"order to be compatible with future versions of the \"", "\"STIX 2 specification.\"", "%", "instance", "[", "'type'", "]", ",", "instance", "[", "'id'", "]", ",", "'custom-prefix-lax'", ")" ]
Ensure custom objects follow lenient naming style conventions for forward-compatibility.
[ "Ensure", "custom", "objects", "follow", "lenient", "naming", "style", "conventions", "for", "forward", "-", "compatibility", "." ]
python
train
54.545455
joequant/cryptoexchange
cryptoexchange/util/bitmex-generate-api-key.py
https://github.com/joequant/cryptoexchange/blob/6690fbd9a2ba00e40d7484425808c84d44233f0c/cryptoexchange/util/bitmex-generate-api-key.py#L98-L108
def enable_key(self): """Enable an existing API Key.""" print("This command will enable a disabled key.") apiKeyID = input("API Key ID: ") try: key = self._curl_bitmex("/apiKey/enable", postdict={"apiKeyID": apiKeyID}) print("Key with ID %s enabled." % key["id"]) except: print("Unable to enable key, please try again.") self.enable_key()
[ "def", "enable_key", "(", "self", ")", ":", "print", "(", "\"This command will enable a disabled key.\"", ")", "apiKeyID", "=", "input", "(", "\"API Key ID: \"", ")", "try", ":", "key", "=", "self", ".", "_curl_bitmex", "(", "\"/apiKey/enable\"", ",", "postdict", "=", "{", "\"apiKeyID\"", ":", "apiKeyID", "}", ")", "print", "(", "\"Key with ID %s enabled.\"", "%", "key", "[", "\"id\"", "]", ")", "except", ":", "print", "(", "\"Unable to enable key, please try again.\"", ")", "self", ".", "enable_key", "(", ")" ]
Enable an existing API Key.
[ "Enable", "an", "existing", "API", "Key", "." ]
python
train
41.090909
ldo/dbussy
dbussy.py
https://github.com/ldo/dbussy/blob/59e4fbe8b8111ceead884e50d1973901a0a2d240/dbussy.py#L5672-L5685
def split_path(path) : "convenience routine for splitting a path into a list of components." if isinstance(path, (tuple, list)) : result = path # assume already split elif path == "/" : result = [] else : if not path.startswith("/") or path.endswith("/") : raise DBusError(DBUS.ERROR_INVALID_ARGS, "invalid path %s" % repr(path)) #end if result = path.split("/")[1:] #end if return \ result
[ "def", "split_path", "(", "path", ")", ":", "if", "isinstance", "(", "path", ",", "(", "tuple", ",", "list", ")", ")", ":", "result", "=", "path", "# assume already split", "elif", "path", "==", "\"/\"", ":", "result", "=", "[", "]", "else", ":", "if", "not", "path", ".", "startswith", "(", "\"/\"", ")", "or", "path", ".", "endswith", "(", "\"/\"", ")", ":", "raise", "DBusError", "(", "DBUS", ".", "ERROR_INVALID_ARGS", ",", "\"invalid path %s\"", "%", "repr", "(", "path", ")", ")", "#end if", "result", "=", "path", ".", "split", "(", "\"/\"", ")", "[", "1", ":", "]", "#end if", "return", "result" ]
convenience routine for splitting a path into a list of components.
[ "convenience", "routine", "for", "splitting", "a", "path", "into", "a", "list", "of", "components", "." ]
python
train
32.928571
tomprince/txgithub
txgithub/api.py
https://github.com/tomprince/txgithub/blob/3bd5eebb25db013e2193e6a102a91049f356710d/txgithub/api.py#L398-L413
def replyToComment(self, repo_user, repo_name, pull_number, body, in_reply_to): """ POST /repos/:owner/:repo/pulls/:number/comments Like create, but reply to an existing comment. :param body: The text of the comment. :param in_reply_to: The comment ID to reply to. """ return self.api.makeRequest( ["repos", repo_user, repo_name, "pulls", str(pull_number), "comments"], method="POST", data=dict(body=body, in_reply_to=in_reply_to))
[ "def", "replyToComment", "(", "self", ",", "repo_user", ",", "repo_name", ",", "pull_number", ",", "body", ",", "in_reply_to", ")", ":", "return", "self", ".", "api", ".", "makeRequest", "(", "[", "\"repos\"", ",", "repo_user", ",", "repo_name", ",", "\"pulls\"", ",", "str", "(", "pull_number", ")", ",", "\"comments\"", "]", ",", "method", "=", "\"POST\"", ",", "data", "=", "dict", "(", "body", "=", "body", ",", "in_reply_to", "=", "in_reply_to", ")", ")" ]
POST /repos/:owner/:repo/pulls/:number/comments Like create, but reply to an existing comment. :param body: The text of the comment. :param in_reply_to: The comment ID to reply to.
[ "POST", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "pulls", "/", ":", "number", "/", "comments" ]
python
train
35.5
merry-bits/DBQuery
src/dbquery/query.py
https://github.com/merry-bits/DBQuery/blob/5f46dc94e2721129f8a799b5f613373e6cd9cb73/src/dbquery/query.py#L262-L267
def _produce_return(self, cursor): """ Calls callback once with generator. :rtype: None """ self.callback(self._row_generator(cursor), *self.cb_args) return None
[ "def", "_produce_return", "(", "self", ",", "cursor", ")", ":", "self", ".", "callback", "(", "self", ".", "_row_generator", "(", "cursor", ")", ",", "*", "self", ".", "cb_args", ")", "return", "None" ]
Calls callback once with generator. :rtype: None
[ "Calls", "callback", "once", "with", "generator", ".", ":", "rtype", ":", "None" ]
python
train
33.333333
rochacbruno/manage
manage/cli.py
https://github.com/rochacbruno/manage/blob/e904c451862f036f4be8723df5704a9844103c74/manage/cli.py#L70-L90
def init(banner, hidden, backup): """Initialize a manage shell in current directory $ manage init --banner="My awesome app shell" initializing manage... creating manage.yml """ manage_file = HIDDEN_MANAGE_FILE if hidden else MANAGE_FILE if os.path.exists(manage_file): if not click.confirm('Rewrite {0}?'.format(manage_file)): return if backup: bck = '.bck_{0}'.format(manage_file) with open(manage_file, 'r') as source, open(bck, 'w') as bck_file: bck_file.write(source.read()) with open(manage_file, 'w') as output: data = default_manage_dict if banner: data['shell']['banner']['message'] = banner output.write(yaml.dump(data, default_flow_style=False))
[ "def", "init", "(", "banner", ",", "hidden", ",", "backup", ")", ":", "manage_file", "=", "HIDDEN_MANAGE_FILE", "if", "hidden", "else", "MANAGE_FILE", "if", "os", ".", "path", ".", "exists", "(", "manage_file", ")", ":", "if", "not", "click", ".", "confirm", "(", "'Rewrite {0}?'", ".", "format", "(", "manage_file", ")", ")", ":", "return", "if", "backup", ":", "bck", "=", "'.bck_{0}'", ".", "format", "(", "manage_file", ")", "with", "open", "(", "manage_file", ",", "'r'", ")", "as", "source", ",", "open", "(", "bck", ",", "'w'", ")", "as", "bck_file", ":", "bck_file", ".", "write", "(", "source", ".", "read", "(", ")", ")", "with", "open", "(", "manage_file", ",", "'w'", ")", "as", "output", ":", "data", "=", "default_manage_dict", "if", "banner", ":", "data", "[", "'shell'", "]", "[", "'banner'", "]", "[", "'message'", "]", "=", "banner", "output", ".", "write", "(", "yaml", ".", "dump", "(", "data", ",", "default_flow_style", "=", "False", ")", ")" ]
Initialize a manage shell in current directory $ manage init --banner="My awesome app shell" initializing manage... creating manage.yml
[ "Initialize", "a", "manage", "shell", "in", "current", "directory", "$", "manage", "init", "--", "banner", "=", "My", "awesome", "app", "shell", "initializing", "manage", "...", "creating", "manage", ".", "yml" ]
python
train
37.380952
gwpy/gwpy
gwpy/plot/rc.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/rc.py#L100-L138
def rc_params(usetex=None): """Returns a new `matplotlib.RcParams` with updated GWpy parameters The updated parameters are globally stored as `gwpy.plot.rc.GWPY_RCPARAMS`, with the updated TeX parameters as `gwpy.plot.rc.GWPY_TEX_RCPARAMS`. .. note:: This function doesn't apply the new `RcParams` in any way, just creates something that can be used to set `matplotlib.rcParams`. Parameters ---------- usetex : `bool`, `None` value to set for `text.usetex`; if `None` determine automatically using the ``GWPY_USETEX`` environment variable, and whether `tex` is available on the system. If `True` is given (or determined) a number of other parameters are updated to improve TeX formatting. Examples -------- >>> import matplotlib >>> from gwpy.plot.rc import rc_params as gwpy_rc_params() >>> matplotlib.rcParams.update(gwpy_rc_params(usetex=False)) """ # if user didn't specify to use tex or not, guess based on # the `GWPY_USETEX` environment variable, or whether tex is # installed at all. if usetex is None: usetex = bool_env( 'GWPY_USETEX', default=rcParams['text.usetex'] or tex.has_tex()) # build RcParams from matplotlib.rcParams with GWpy extras rcp = GWPY_RCPARAMS.copy() if usetex: rcp.update(GWPY_TEX_RCPARAMS) return rcp
[ "def", "rc_params", "(", "usetex", "=", "None", ")", ":", "# if user didn't specify to use tex or not, guess based on", "# the `GWPY_USETEX` environment variable, or whether tex is", "# installed at all.", "if", "usetex", "is", "None", ":", "usetex", "=", "bool_env", "(", "'GWPY_USETEX'", ",", "default", "=", "rcParams", "[", "'text.usetex'", "]", "or", "tex", ".", "has_tex", "(", ")", ")", "# build RcParams from matplotlib.rcParams with GWpy extras", "rcp", "=", "GWPY_RCPARAMS", ".", "copy", "(", ")", "if", "usetex", ":", "rcp", ".", "update", "(", "GWPY_TEX_RCPARAMS", ")", "return", "rcp" ]
Returns a new `matplotlib.RcParams` with updated GWpy parameters The updated parameters are globally stored as `gwpy.plot.rc.GWPY_RCPARAMS`, with the updated TeX parameters as `gwpy.plot.rc.GWPY_TEX_RCPARAMS`. .. note:: This function doesn't apply the new `RcParams` in any way, just creates something that can be used to set `matplotlib.rcParams`. Parameters ---------- usetex : `bool`, `None` value to set for `text.usetex`; if `None` determine automatically using the ``GWPY_USETEX`` environment variable, and whether `tex` is available on the system. If `True` is given (or determined) a number of other parameters are updated to improve TeX formatting. Examples -------- >>> import matplotlib >>> from gwpy.plot.rc import rc_params as gwpy_rc_params() >>> matplotlib.rcParams.update(gwpy_rc_params(usetex=False))
[ "Returns", "a", "new", "matplotlib", ".", "RcParams", "with", "updated", "GWpy", "parameters" ]
python
train
35.230769
hydpy-dev/hydpy
hydpy/models/dam/dam_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/dam/dam_model.py#L734-L757
def calc_allowedremoterelieve_v1(self): """Get the allowed remote relieve of the last simulation step. Required log sequence: |LoggedAllowedRemoteRelieve| Calculated flux sequence: |AllowedRemoteRelieve| Basic equation: :math:`AllowedRemoteRelieve = LoggedAllowedRemoteRelieve` Example: >>> from hydpy.models.dam import * >>> parameterstep() >>> logs.loggedallowedremoterelieve = 2.0 >>> model.calc_allowedremoterelieve_v1() >>> fluxes.allowedremoterelieve allowedremoterelieve(2.0) """ flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess flu.allowedremoterelieve = log.loggedallowedremoterelieve[0]
[ "def", "calc_allowedremoterelieve_v1", "(", "self", ")", ":", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "log", "=", "self", ".", "sequences", ".", "logs", ".", "fastaccess", "flu", ".", "allowedremoterelieve", "=", "log", ".", "loggedallowedremoterelieve", "[", "0", "]" ]
Get the allowed remote relieve of the last simulation step. Required log sequence: |LoggedAllowedRemoteRelieve| Calculated flux sequence: |AllowedRemoteRelieve| Basic equation: :math:`AllowedRemoteRelieve = LoggedAllowedRemoteRelieve` Example: >>> from hydpy.models.dam import * >>> parameterstep() >>> logs.loggedallowedremoterelieve = 2.0 >>> model.calc_allowedremoterelieve_v1() >>> fluxes.allowedremoterelieve allowedremoterelieve(2.0)
[ "Get", "the", "allowed", "remote", "relieve", "of", "the", "last", "simulation", "step", "." ]
python
train
29.458333
dls-controls/pymalcolm
malcolm/modules/scanning/controllers/runnablecontroller.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/modules/scanning/controllers/runnablecontroller.py#L221-L257
def validate(self, generator, axesToMove=None, **kwargs): # type: (AGenerator, AAxesToMove, **Any) -> AConfigureParams """Validate configuration parameters and return validated parameters. Doesn't take device state into account so can be run in any state """ iterations = 10 # We will return this, so make sure we fill in defaults for k, default in self._block.configure.defaults.items(): if k not in kwargs: kwargs[k] = default # The validated parameters we will eventually return params = ConfigureParams(generator, axesToMove, **kwargs) # Make some tasks just for validate part_contexts = self.create_part_contexts() # Get any status from all parts status_part_info = self.run_hooks( ReportStatusHook(p, c) for p, c in part_contexts.items()) while iterations > 0: # Try up to 10 times to get a valid set of parameters iterations -= 1 # Validate the params with all the parts validate_part_info = self.run_hooks( ValidateHook(p, c, status_part_info, **kwargs) for p, c, kwargs in self._part_params(part_contexts, params)) tweaks = ParameterTweakInfo.filter_values(validate_part_info) if tweaks: for tweak in tweaks: deserialized = self._block.configure.takes.elements[ tweak.parameter].validate(tweak.value) setattr(params, tweak.parameter, deserialized) self.log.debug( "Tweaking %s to %s", tweak.parameter, deserialized) else: # Consistent set, just return the params return params raise ValueError("Could not get a consistent set of parameters")
[ "def", "validate", "(", "self", ",", "generator", ",", "axesToMove", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# type: (AGenerator, AAxesToMove, **Any) -> AConfigureParams", "iterations", "=", "10", "# We will return this, so make sure we fill in defaults", "for", "k", ",", "default", "in", "self", ".", "_block", ".", "configure", ".", "defaults", ".", "items", "(", ")", ":", "if", "k", "not", "in", "kwargs", ":", "kwargs", "[", "k", "]", "=", "default", "# The validated parameters we will eventually return", "params", "=", "ConfigureParams", "(", "generator", ",", "axesToMove", ",", "*", "*", "kwargs", ")", "# Make some tasks just for validate", "part_contexts", "=", "self", ".", "create_part_contexts", "(", ")", "# Get any status from all parts", "status_part_info", "=", "self", ".", "run_hooks", "(", "ReportStatusHook", "(", "p", ",", "c", ")", "for", "p", ",", "c", "in", "part_contexts", ".", "items", "(", ")", ")", "while", "iterations", ">", "0", ":", "# Try up to 10 times to get a valid set of parameters", "iterations", "-=", "1", "# Validate the params with all the parts", "validate_part_info", "=", "self", ".", "run_hooks", "(", "ValidateHook", "(", "p", ",", "c", ",", "status_part_info", ",", "*", "*", "kwargs", ")", "for", "p", ",", "c", ",", "kwargs", "in", "self", ".", "_part_params", "(", "part_contexts", ",", "params", ")", ")", "tweaks", "=", "ParameterTweakInfo", ".", "filter_values", "(", "validate_part_info", ")", "if", "tweaks", ":", "for", "tweak", "in", "tweaks", ":", "deserialized", "=", "self", ".", "_block", ".", "configure", ".", "takes", ".", "elements", "[", "tweak", ".", "parameter", "]", ".", "validate", "(", "tweak", ".", "value", ")", "setattr", "(", "params", ",", "tweak", ".", "parameter", ",", "deserialized", ")", "self", ".", "log", ".", "debug", "(", "\"Tweaking %s to %s\"", ",", "tweak", ".", "parameter", ",", "deserialized", ")", "else", ":", "# Consistent set, just return the params", "return", "params", "raise", "ValueError", "(", "\"Could not get a consistent set of parameters\"", ")" ]
Validate configuration parameters and return validated parameters. Doesn't take device state into account so can be run in any state
[ "Validate", "configuration", "parameters", "and", "return", "validated", "parameters", "." ]
python
train
49.918919
ocaballeror/LyricFetch
lyricfetch/song.py
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L212-L232
def get_current_cmus(): """ Get the current song from cmus. """ result = subprocess.run('cmus-remote -Q'.split(' '), check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) info = {} for line in result.stdout.decode().split('\n'): line = line.split(' ') if line[0] != 'tag': continue key = line[1] if key in ['album', 'title', 'artist', 'albumartist'] and\ key not in info: info[key] = ' '.join(line[2:]) if 'albumartist' in info: info['artist'] = info['albumartist'] del info['albumartist'] return Song(**info)
[ "def", "get_current_cmus", "(", ")", ":", "result", "=", "subprocess", ".", "run", "(", "'cmus-remote -Q'", ".", "split", "(", "' '", ")", ",", "check", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "DEVNULL", ")", "info", "=", "{", "}", "for", "line", "in", "result", ".", "stdout", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", ":", "line", "=", "line", ".", "split", "(", "' '", ")", "if", "line", "[", "0", "]", "!=", "'tag'", ":", "continue", "key", "=", "line", "[", "1", "]", "if", "key", "in", "[", "'album'", ",", "'title'", ",", "'artist'", ",", "'albumartist'", "]", "and", "key", "not", "in", "info", ":", "info", "[", "key", "]", "=", "' '", ".", "join", "(", "line", "[", "2", ":", "]", ")", "if", "'albumartist'", "in", "info", ":", "info", "[", "'artist'", "]", "=", "info", "[", "'albumartist'", "]", "del", "info", "[", "'albumartist'", "]", "return", "Song", "(", "*", "*", "info", ")" ]
Get the current song from cmus.
[ "Get", "the", "current", "song", "from", "cmus", "." ]
python
train
30.619048
pipermerriam/flex
flex/validation/common.py
https://github.com/pipermerriam/flex/blob/233f8149fb851a6255753bcec948cb6fefb2723b/flex/validation/common.py#L248-L264
def validate_unique_items(value, **kwargs): """ Validator for ARRAY types to enforce that all array items must be unique. """ # we can't just look at the items themselves since 0 and False are treated # the same as dictionary keys, and objects aren't hashable. counter = collections.Counter(( json.dumps(v, sort_keys=True) for v in value )) dupes = [json.loads(v) for v, count in counter.items() if count > 1] if dupes: raise ValidationError( MESSAGES['unique_items']['invalid'].format( repr(dupes), ), )
[ "def", "validate_unique_items", "(", "value", ",", "*", "*", "kwargs", ")", ":", "# we can't just look at the items themselves since 0 and False are treated", "# the same as dictionary keys, and objects aren't hashable.", "counter", "=", "collections", ".", "Counter", "(", "(", "json", ".", "dumps", "(", "v", ",", "sort_keys", "=", "True", ")", "for", "v", "in", "value", ")", ")", "dupes", "=", "[", "json", ".", "loads", "(", "v", ")", "for", "v", ",", "count", "in", "counter", ".", "items", "(", ")", "if", "count", ">", "1", "]", "if", "dupes", ":", "raise", "ValidationError", "(", "MESSAGES", "[", "'unique_items'", "]", "[", "'invalid'", "]", ".", "format", "(", "repr", "(", "dupes", ")", ",", ")", ",", ")" ]
Validator for ARRAY types to enforce that all array items must be unique.
[ "Validator", "for", "ARRAY", "types", "to", "enforce", "that", "all", "array", "items", "must", "be", "unique", "." ]
python
train
34.647059
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumbermatcher.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumbermatcher.py#L584-L613
def _extract_inner_match(self, candidate, offset): """Attempts to extract a match from candidate if the whole candidate does not qualify as a match. Arguments: candidate -- The candidate text that might contain a phone number offset -- The current offset of candidate within text Returns the match found, None if none can be found """ for possible_inner_match in _INNER_MATCHES: group_match = possible_inner_match.search(candidate) is_first_match = True while group_match and self._max_tries > 0: if is_first_match: # We should handle any group before this one too. group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, candidate[:group_match.start()]) match = self._parse_and_verify(group, offset) if match is not None: return match self._max_tries -= 1 is_first_match = False group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, group_match.group(1)) match = self._parse_and_verify(group, offset + group_match.start(1)) if match is not None: return match self._max_tries -= 1 group_match = possible_inner_match.search(candidate, group_match.start() + 1) return None
[ "def", "_extract_inner_match", "(", "self", ",", "candidate", ",", "offset", ")", ":", "for", "possible_inner_match", "in", "_INNER_MATCHES", ":", "group_match", "=", "possible_inner_match", ".", "search", "(", "candidate", ")", "is_first_match", "=", "True", "while", "group_match", "and", "self", ".", "_max_tries", ">", "0", ":", "if", "is_first_match", ":", "# We should handle any group before this one too.", "group", "=", "self", ".", "_trim_after_first_match", "(", "_UNWANTED_END_CHAR_PATTERN", ",", "candidate", "[", ":", "group_match", ".", "start", "(", ")", "]", ")", "match", "=", "self", ".", "_parse_and_verify", "(", "group", ",", "offset", ")", "if", "match", "is", "not", "None", ":", "return", "match", "self", ".", "_max_tries", "-=", "1", "is_first_match", "=", "False", "group", "=", "self", ".", "_trim_after_first_match", "(", "_UNWANTED_END_CHAR_PATTERN", ",", "group_match", ".", "group", "(", "1", ")", ")", "match", "=", "self", ".", "_parse_and_verify", "(", "group", ",", "offset", "+", "group_match", ".", "start", "(", "1", ")", ")", "if", "match", "is", "not", "None", ":", "return", "match", "self", ".", "_max_tries", "-=", "1", "group_match", "=", "possible_inner_match", ".", "search", "(", "candidate", ",", "group_match", ".", "start", "(", ")", "+", "1", ")", "return", "None" ]
Attempts to extract a match from candidate if the whole candidate does not qualify as a match. Arguments: candidate -- The candidate text that might contain a phone number offset -- The current offset of candidate within text Returns the match found, None if none can be found
[ "Attempts", "to", "extract", "a", "match", "from", "candidate", "if", "the", "whole", "candidate", "does", "not", "qualify", "as", "a", "match", "." ]
python
train
51.333333
iotile/coretools
transport_plugins/native_ble/iotile_transport_native_ble/virtual_ble.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/native_ble/iotile_transport_native_ble/virtual_ble.py#L416-L444
def _send_rpc_response(self, *packets): """Send an RPC response. It is executed in the baBLE working thread: should not be blocking. The RPC response is notified in one or two packets depending on whether or not response data is included. If there is a temporary error sending one of the packets it is retried automatically. If there is a permanent error, it is logged and the response is abandoned. """ if len(packets) == 0: return handle, payload = packets[0] try: self._send_notification(handle, payload) except bable_interface.BaBLEException as err: if err.packet.status == 'Rejected': # If we are streaming too fast, back off and try again time.sleep(0.05) self._defer(self._send_rpc_response, list(packets)) else: self._audit('ErrorSendingRPCResponse') self._logger.exception("Error while sending RPC response, handle=%s, payload=%s", handle, payload) return if len(packets) > 1: self._defer(self._send_rpc_response, list(packets[1:]))
[ "def", "_send_rpc_response", "(", "self", ",", "*", "packets", ")", ":", "if", "len", "(", "packets", ")", "==", "0", ":", "return", "handle", ",", "payload", "=", "packets", "[", "0", "]", "try", ":", "self", ".", "_send_notification", "(", "handle", ",", "payload", ")", "except", "bable_interface", ".", "BaBLEException", "as", "err", ":", "if", "err", ".", "packet", ".", "status", "==", "'Rejected'", ":", "# If we are streaming too fast, back off and try again", "time", ".", "sleep", "(", "0.05", ")", "self", ".", "_defer", "(", "self", ".", "_send_rpc_response", ",", "list", "(", "packets", ")", ")", "else", ":", "self", ".", "_audit", "(", "'ErrorSendingRPCResponse'", ")", "self", ".", "_logger", ".", "exception", "(", "\"Error while sending RPC response, handle=%s, payload=%s\"", ",", "handle", ",", "payload", ")", "return", "if", "len", "(", "packets", ")", ">", "1", ":", "self", ".", "_defer", "(", "self", ".", "_send_rpc_response", ",", "list", "(", "packets", "[", "1", ":", "]", ")", ")" ]
Send an RPC response. It is executed in the baBLE working thread: should not be blocking. The RPC response is notified in one or two packets depending on whether or not response data is included. If there is a temporary error sending one of the packets it is retried automatically. If there is a permanent error, it is logged and the response is abandoned.
[ "Send", "an", "RPC", "response", ".", "It", "is", "executed", "in", "the", "baBLE", "working", "thread", ":", "should", "not", "be", "blocking", "." ]
python
train
39.724138
crs4/pydoop
pydoop/hdfs/path.py
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L389-L400
def stat(path, user=None): """ Performs the equivalent of :func:`os.stat` on ``path``, returning a :class:`StatResult` object. """ host, port, path_ = split(path, user) fs = hdfs_fs.hdfs(host, port, user) retval = StatResult(fs.get_path_info(path_)) if not host: _update_stat(retval, path_) fs.close() return retval
[ "def", "stat", "(", "path", ",", "user", "=", "None", ")", ":", "host", ",", "port", ",", "path_", "=", "split", "(", "path", ",", "user", ")", "fs", "=", "hdfs_fs", ".", "hdfs", "(", "host", ",", "port", ",", "user", ")", "retval", "=", "StatResult", "(", "fs", ".", "get_path_info", "(", "path_", ")", ")", "if", "not", "host", ":", "_update_stat", "(", "retval", ",", "path_", ")", "fs", ".", "close", "(", ")", "return", "retval" ]
Performs the equivalent of :func:`os.stat` on ``path``, returning a :class:`StatResult` object.
[ "Performs", "the", "equivalent", "of", ":", "func", ":", "os", ".", "stat", "on", "path", "returning", "a", ":", "class", ":", "StatResult", "object", "." ]
python
train
29.333333
saltstack/salt
salt/utils/docker/translate/container.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/container.py#L44-L64
def _merge_keys(kwargs): ''' The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ''' log_driver = kwargs.pop('log_driver', helpers.NOTSET) log_opt = kwargs.pop('log_opt', helpers.NOTSET) if 'log_config' not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not helpers.NOTSET else 'none', 'Config': log_opt if log_opt is not helpers.NOTSET else {} }
[ "def", "_merge_keys", "(", "kwargs", ")", ":", "log_driver", "=", "kwargs", ".", "pop", "(", "'log_driver'", ",", "helpers", ".", "NOTSET", ")", "log_opt", "=", "kwargs", ".", "pop", "(", "'log_opt'", ",", "helpers", ".", "NOTSET", ")", "if", "'log_config'", "not", "in", "kwargs", ":", "if", "log_driver", "is", "not", "helpers", ".", "NOTSET", "or", "log_opt", "is", "not", "helpers", ".", "NOTSET", ":", "kwargs", "[", "'log_config'", "]", "=", "{", "'Type'", ":", "log_driver", "if", "log_driver", "is", "not", "helpers", ".", "NOTSET", "else", "'none'", ",", "'Config'", ":", "log_opt", "if", "log_opt", "is", "not", "helpers", ".", "NOTSET", "else", "{", "}", "}" ]
The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments.
[ "The", "log_config", "is", "a", "mixture", "of", "the", "CLI", "options", "--", "log", "-", "driver", "and", "--", "log", "-", "opt", "(", "which", "we", "support", "in", "Salt", "as", "log_driver", "and", "log_opt", "respectively", ")", "but", "it", "must", "be", "submitted", "to", "the", "host", "config", "in", "the", "format", "{", "Type", ":", "log_driver", "Config", ":", "log_opt", "}", ".", "So", "we", "need", "to", "construct", "this", "argument", "to", "be", "passed", "to", "the", "API", "from", "those", "two", "arguments", "." ]
python
train
42.904762
keleshev/schema
schema.py
https://github.com/keleshev/schema/blob/4a0bf6f509e6b69956a8f2fd4e1c3873fc419be8/schema.py#L242-L255
def _priority(s): """Return priority for a given object.""" if type(s) in (list, tuple, set, frozenset): return ITERABLE if type(s) is dict: return DICT if issubclass(type(s), type): return TYPE if hasattr(s, "validate"): return VALIDATOR if callable(s): return CALLABLE else: return COMPARABLE
[ "def", "_priority", "(", "s", ")", ":", "if", "type", "(", "s", ")", "in", "(", "list", ",", "tuple", ",", "set", ",", "frozenset", ")", ":", "return", "ITERABLE", "if", "type", "(", "s", ")", "is", "dict", ":", "return", "DICT", "if", "issubclass", "(", "type", "(", "s", ")", ",", "type", ")", ":", "return", "TYPE", "if", "hasattr", "(", "s", ",", "\"validate\"", ")", ":", "return", "VALIDATOR", "if", "callable", "(", "s", ")", ":", "return", "CALLABLE", "else", ":", "return", "COMPARABLE" ]
Return priority for a given object.
[ "Return", "priority", "for", "a", "given", "object", "." ]
python
train
25.5
kgori/treeCl
treeCl/collection.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/collection.py#L489-L513
def permuted_copy(self, partition=None): """ Return a copy of the collection with all alignment columns permuted """ def take(n, iterable): return [next(iterable) for _ in range(n)] if partition is None: partition = Partition([1] * len(self)) index_tuples = partition.get_membership() alignments = [] for ix in index_tuples: concat = Concatenation(self, ix) sites = concat.alignment.get_sites() random.shuffle(sites) d = dict(zip(concat.alignment.get_names(), [iter(x) for x in zip(*sites)])) new_seqs = [[(k, ''.join(take(l, d[k]))) for k in d] for l in concat.lengths] for seqs, datatype, name in zip(new_seqs, concat.datatypes, concat.names): alignment = Alignment(seqs, datatype) alignment.name = name alignments.append(alignment) return self.__class__(records=sorted(alignments, key=lambda x: SORT_KEY(x.name)))
[ "def", "permuted_copy", "(", "self", ",", "partition", "=", "None", ")", ":", "def", "take", "(", "n", ",", "iterable", ")", ":", "return", "[", "next", "(", "iterable", ")", "for", "_", "in", "range", "(", "n", ")", "]", "if", "partition", "is", "None", ":", "partition", "=", "Partition", "(", "[", "1", "]", "*", "len", "(", "self", ")", ")", "index_tuples", "=", "partition", ".", "get_membership", "(", ")", "alignments", "=", "[", "]", "for", "ix", "in", "index_tuples", ":", "concat", "=", "Concatenation", "(", "self", ",", "ix", ")", "sites", "=", "concat", ".", "alignment", ".", "get_sites", "(", ")", "random", ".", "shuffle", "(", "sites", ")", "d", "=", "dict", "(", "zip", "(", "concat", ".", "alignment", ".", "get_names", "(", ")", ",", "[", "iter", "(", "x", ")", "for", "x", "in", "zip", "(", "*", "sites", ")", "]", ")", ")", "new_seqs", "=", "[", "[", "(", "k", ",", "''", ".", "join", "(", "take", "(", "l", ",", "d", "[", "k", "]", ")", ")", ")", "for", "k", "in", "d", "]", "for", "l", "in", "concat", ".", "lengths", "]", "for", "seqs", ",", "datatype", ",", "name", "in", "zip", "(", "new_seqs", ",", "concat", ".", "datatypes", ",", "concat", ".", "names", ")", ":", "alignment", "=", "Alignment", "(", "seqs", ",", "datatype", ")", "alignment", ".", "name", "=", "name", "alignments", ".", "append", "(", "alignment", ")", "return", "self", ".", "__class__", "(", "records", "=", "sorted", "(", "alignments", ",", "key", "=", "lambda", "x", ":", "SORT_KEY", "(", "x", ".", "name", ")", ")", ")" ]
Return a copy of the collection with all alignment columns permuted
[ "Return", "a", "copy", "of", "the", "collection", "with", "all", "alignment", "columns", "permuted" ]
python
train
40.2
ruipgil/TrackToTrip
tracktotrip/track.py
https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/track.py#L286-L303
def bounds(self, thr=0): """ Gets the bounds of this segment Returns: (float, float, float, float): Bounds, with min latitude, min longitude, max latitude and max longitude """ min_lat = float("inf") min_lon = float("inf") max_lat = -float("inf") max_lon = -float("inf") for segment in self.segments: milat, milon, malat, malon = segment.bounds(thr=thr) min_lat = min(milat, min_lat) min_lon = min(milon, min_lon) max_lat = max(malat, max_lat) max_lon = max(malon, max_lon) return min_lat, min_lon, max_lat, max_lon
[ "def", "bounds", "(", "self", ",", "thr", "=", "0", ")", ":", "min_lat", "=", "float", "(", "\"inf\"", ")", "min_lon", "=", "float", "(", "\"inf\"", ")", "max_lat", "=", "-", "float", "(", "\"inf\"", ")", "max_lon", "=", "-", "float", "(", "\"inf\"", ")", "for", "segment", "in", "self", ".", "segments", ":", "milat", ",", "milon", ",", "malat", ",", "malon", "=", "segment", ".", "bounds", "(", "thr", "=", "thr", ")", "min_lat", "=", "min", "(", "milat", ",", "min_lat", ")", "min_lon", "=", "min", "(", "milon", ",", "min_lon", ")", "max_lat", "=", "max", "(", "malat", ",", "max_lat", ")", "max_lon", "=", "max", "(", "malon", ",", "max_lon", ")", "return", "min_lat", ",", "min_lon", ",", "max_lat", ",", "max_lon" ]
Gets the bounds of this segment Returns: (float, float, float, float): Bounds, with min latitude, min longitude, max latitude and max longitude
[ "Gets", "the", "bounds", "of", "this", "segment" ]
python
train
36.611111
aouyar/PyMunin
pysysinfo/netiface.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/netiface.py#L96-L113
def getRoutes(self): """Get routing table. @return: List of routes. """ routes = [] try: out = subprocess.Popen([routeCmd, "-n"], stdout=subprocess.PIPE).communicate()[0] except: raise Exception('Execution of command %s failed.' % ipCmd) lines = out.splitlines() if len(lines) > 1: headers = [col.lower() for col in lines[1].split()] for line in lines[2:]: routes.append(dict(zip(headers, line.split()))) return routes
[ "def", "getRoutes", "(", "self", ")", ":", "routes", "=", "[", "]", "try", ":", "out", "=", "subprocess", ".", "Popen", "(", "[", "routeCmd", ",", "\"-n\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")", "[", "0", "]", "except", ":", "raise", "Exception", "(", "'Execution of command %s failed.'", "%", "ipCmd", ")", "lines", "=", "out", ".", "splitlines", "(", ")", "if", "len", "(", "lines", ")", ">", "1", ":", "headers", "=", "[", "col", ".", "lower", "(", ")", "for", "col", "in", "lines", "[", "1", "]", ".", "split", "(", ")", "]", "for", "line", "in", "lines", "[", "2", ":", "]", ":", "routes", ".", "append", "(", "dict", "(", "zip", "(", "headers", ",", "line", ".", "split", "(", ")", ")", ")", ")", "return", "routes" ]
Get routing table. @return: List of routes.
[ "Get", "routing", "table", "." ]
python
train
32.833333
Kami/python-yubico-client
yubico_client/yubico.py
https://github.com/Kami/python-yubico-client/blob/3334b2ee1b5b996af3ef6be57a4ea52b8e45e764/yubico_client/yubico.py#L163-L219
def verify_multi(self, otp_list, max_time_window=DEFAULT_MAX_TIME_WINDOW, sl=None, timeout=None): """ Verify a provided list of OTPs. :param max_time_window: Maximum number of seconds which can pass between the first and last OTP generation for the OTP to still be considered valid. :type max_time_window: ``int`` """ # Create the OTP objects otps = [] for otp in otp_list: otps.append(OTP(otp, self.translate_otp)) if len(otp_list) < 2: raise ValueError('otp_list needs to contain at least two OTPs') device_ids = set() for otp in otps: device_ids.add(otp.device_id) # Check that all the OTPs contain same device id if len(device_ids) != 1: raise Exception('OTPs contain different device ids') # Now we verify the OTPs and save the server response for each OTP. # We need the server response, to retrieve the timestamp. # It's possible to retrieve this value locally, without querying the # server but in this case, user would need to provide his AES key. for otp in otps: response = self.verify(otp.otp, True, sl, timeout, return_response=True) if not response: return False otp.timestamp = int(response['timestamp']) count = len(otps) delta = otps[count - 1].timestamp - otps[0].timestamp # OTPs have an 8Hz timestamp counter so we need to divide it to get # seconds delta = delta / 8 if delta < 0: raise Exception('delta is smaller than zero. First OTP appears to ' 'be older than the last one') if delta > max_time_window: raise Exception(('More than %s seconds have passed between ' 'generating the first and the last OTP.') % (max_time_window)) return True
[ "def", "verify_multi", "(", "self", ",", "otp_list", ",", "max_time_window", "=", "DEFAULT_MAX_TIME_WINDOW", ",", "sl", "=", "None", ",", "timeout", "=", "None", ")", ":", "# Create the OTP objects", "otps", "=", "[", "]", "for", "otp", "in", "otp_list", ":", "otps", ".", "append", "(", "OTP", "(", "otp", ",", "self", ".", "translate_otp", ")", ")", "if", "len", "(", "otp_list", ")", "<", "2", ":", "raise", "ValueError", "(", "'otp_list needs to contain at least two OTPs'", ")", "device_ids", "=", "set", "(", ")", "for", "otp", "in", "otps", ":", "device_ids", ".", "add", "(", "otp", ".", "device_id", ")", "# Check that all the OTPs contain same device id", "if", "len", "(", "device_ids", ")", "!=", "1", ":", "raise", "Exception", "(", "'OTPs contain different device ids'", ")", "# Now we verify the OTPs and save the server response for each OTP.", "# We need the server response, to retrieve the timestamp.", "# It's possible to retrieve this value locally, without querying the", "# server but in this case, user would need to provide his AES key.", "for", "otp", "in", "otps", ":", "response", "=", "self", ".", "verify", "(", "otp", ".", "otp", ",", "True", ",", "sl", ",", "timeout", ",", "return_response", "=", "True", ")", "if", "not", "response", ":", "return", "False", "otp", ".", "timestamp", "=", "int", "(", "response", "[", "'timestamp'", "]", ")", "count", "=", "len", "(", "otps", ")", "delta", "=", "otps", "[", "count", "-", "1", "]", ".", "timestamp", "-", "otps", "[", "0", "]", ".", "timestamp", "# OTPs have an 8Hz timestamp counter so we need to divide it to get", "# seconds", "delta", "=", "delta", "/", "8", "if", "delta", "<", "0", ":", "raise", "Exception", "(", "'delta is smaller than zero. First OTP appears to '", "'be older than the last one'", ")", "if", "delta", ">", "max_time_window", ":", "raise", "Exception", "(", "(", "'More than %s seconds have passed between '", "'generating the first and the last OTP.'", ")", "%", "(", "max_time_window", ")", ")", "return", "True" ]
Verify a provided list of OTPs. :param max_time_window: Maximum number of seconds which can pass between the first and last OTP generation for the OTP to still be considered valid. :type max_time_window: ``int``
[ "Verify", "a", "provided", "list", "of", "OTPs", "." ]
python
train
36.070175
jssimporter/python-jss
jss/contrib/mount_shares_better.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/contrib/mount_shares_better.py#L67-L94
def mount_share_at_path(share_path, mount_path): """Mounts a share at the specified path Args: share_path: String URL with all auth info to connect to file share. mount_path: Path to mount share on. Returns: The mount point or raises an error """ sh_url = CFURLCreateWithString(None, share_path, None) mo_url = CFURLCreateWithString(None, mount_path, None) # Set UI to reduced interaction open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI} # Allow mounting sub-directories of root shares # Also specify the share should be mounted directly at (not under) # mount_path mount_options = {NetFS.kNetFSAllowSubMountsKey: True, NetFS.kNetFSMountAtMountDirKey: True} # Mount! result, output = NetFS.NetFSMountURLSync(sh_url, mo_url, None, None, open_options, mount_options, None) # Check if it worked if result != 0: raise Exception('Error mounting url "%s" at path "%s": %s' % (share_path, mount_path, output)) # Return the mountpath return str(output[0])
[ "def", "mount_share_at_path", "(", "share_path", ",", "mount_path", ")", ":", "sh_url", "=", "CFURLCreateWithString", "(", "None", ",", "share_path", ",", "None", ")", "mo_url", "=", "CFURLCreateWithString", "(", "None", ",", "mount_path", ",", "None", ")", "# Set UI to reduced interaction", "open_options", "=", "{", "NetFS", ".", "kNAUIOptionKey", ":", "NetFS", ".", "kNAUIOptionNoUI", "}", "# Allow mounting sub-directories of root shares", "# Also specify the share should be mounted directly at (not under)", "# mount_path", "mount_options", "=", "{", "NetFS", ".", "kNetFSAllowSubMountsKey", ":", "True", ",", "NetFS", ".", "kNetFSMountAtMountDirKey", ":", "True", "}", "# Mount!", "result", ",", "output", "=", "NetFS", ".", "NetFSMountURLSync", "(", "sh_url", ",", "mo_url", ",", "None", ",", "None", ",", "open_options", ",", "mount_options", ",", "None", ")", "# Check if it worked", "if", "result", "!=", "0", ":", "raise", "Exception", "(", "'Error mounting url \"%s\" at path \"%s\": %s'", "%", "(", "share_path", ",", "mount_path", ",", "output", ")", ")", "# Return the mountpath", "return", "str", "(", "output", "[", "0", "]", ")" ]
Mounts a share at the specified path Args: share_path: String URL with all auth info to connect to file share. mount_path: Path to mount share on. Returns: The mount point or raises an error
[ "Mounts", "a", "share", "at", "the", "specified", "path" ]
python
train
40.285714
cltk/cltk
cltk/prosody/latin/syllabifier.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/syllabifier.py#L353-L372
def get_syllable_count(self, syllables: List[str]) -> int: """ Counts the number of syllable groups that would occur after ellision. Often we will want preserve the position and separation of syllables so that they can be used to reconstitute a line, and apply stresses to the original word positions. However, we also want to be able to count the number of syllables accurately. :param syllables: :return: >>> syllabifier = Syllabifier() >>> print(syllabifier.get_syllable_count([ ... 'Jām', 'tūm', 'c', 'au', 'sus', 'es', 'u', 'nus', 'I', 'ta', 'lo', 'rum'])) 11 """ tmp_syllables = copy.deepcopy(syllables) return len(string_utils.remove_blank_spaces( string_utils.move_consonant_right(tmp_syllables, self._find_solo_consonant(tmp_syllables))))
[ "def", "get_syllable_count", "(", "self", ",", "syllables", ":", "List", "[", "str", "]", ")", "->", "int", ":", "tmp_syllables", "=", "copy", ".", "deepcopy", "(", "syllables", ")", "return", "len", "(", "string_utils", ".", "remove_blank_spaces", "(", "string_utils", ".", "move_consonant_right", "(", "tmp_syllables", ",", "self", ".", "_find_solo_consonant", "(", "tmp_syllables", ")", ")", ")", ")" ]
Counts the number of syllable groups that would occur after ellision. Often we will want preserve the position and separation of syllables so that they can be used to reconstitute a line, and apply stresses to the original word positions. However, we also want to be able to count the number of syllables accurately. :param syllables: :return: >>> syllabifier = Syllabifier() >>> print(syllabifier.get_syllable_count([ ... 'Jām', 'tūm', 'c', 'au', 'sus', 'es', 'u', 'nus', 'I', 'ta', 'lo', 'rum'])) 11
[ "Counts", "the", "number", "of", "syllable", "groups", "that", "would", "occur", "after", "ellision", "." ]
python
train
45
Nic30/hwt
hwt/hdl/types/bitValFunctions.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/types/bitValFunctions.py#L17-L25
def signFix(val, width): """ Convert negative int to positive int which has same bits set """ if val > 0: msb = 1 << (width - 1) if val & msb: val -= mask(width) + 1 return val
[ "def", "signFix", "(", "val", ",", "width", ")", ":", "if", "val", ">", "0", ":", "msb", "=", "1", "<<", "(", "width", "-", "1", ")", "if", "val", "&", "msb", ":", "val", "-=", "mask", "(", "width", ")", "+", "1", "return", "val" ]
Convert negative int to positive int which has same bits set
[ "Convert", "negative", "int", "to", "positive", "int", "which", "has", "same", "bits", "set" ]
python
test
24
numenta/nupic
src/nupic/swarming/hypersearch_v2.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L134-L308
def update(self, modelID, modelParams, modelParamsHash, metricResult, completed, completionReason, matured, numRecords): """ Insert a new entry or update an existing one. If this is an update of an existing entry, then modelParams will be None Parameters: -------------------------------------------------------------------- modelID: globally unique modelID of this model modelParams: params dict for this model, or None if this is just an update of a model that it already previously reported on. See the comments for the createModels() method for a description of this dict. modelParamsHash: hash of the modelParams dict, generated by the worker that put it into the model database. metricResult: value on the optimizeMetric for this model. May be None if we have no results yet. completed: True if the model has completed evaluation, False if it is still running (and these are online results) completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates matured: True if this model has matured numRecords: Number of records that have been processed so far by this model. retval: Canonicalized result on the optimize metric """ # The modelParamsHash must always be provided - it can change after a # model is inserted into the models table if it got detected as an # orphan assert (modelParamsHash is not None) # We consider a model metricResult as "final" if it has completed or # matured. By default, assume anything that has completed has matured if completed: matured = True # Get the canonicalized optimize metric results. For this metric, lower # is always better if metricResult is not None and matured and \ completionReason in [ClientJobsDAO.CMPL_REASON_EOF, ClientJobsDAO.CMPL_REASON_STOPPED]: # Canonicalize the error score so that lower is better if self._hsObj._maximize: errScore = -1 * metricResult else: errScore = metricResult if errScore < self._bestResult: self._bestResult = errScore self._bestModelID = modelID self._hsObj.logger.info("New best model after %d evaluations: errScore " "%g on model %s" % (len(self._allResults), self._bestResult, self._bestModelID)) else: errScore = numpy.inf # If this model completed with an unacceptable completion reason, set the # errScore to infinite and essentially make this model invisible to # further queries if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]: errScore = numpy.inf hidden = True else: hidden = False # Update our set of erred models and completed models. These are used # to determine if we should abort the search because of too many errors if completed: self._completedModels.add(modelID) self._numCompletedModels = len(self._completedModels) if completionReason == ClientJobsDAO.CMPL_REASON_ERROR: self._errModels.add(modelID) self._numErrModels = len(self._errModels) # Are we creating a new entry? wasHidden = False if modelID not in self._modelIDToIdx: assert (modelParams is not None) entry = dict(modelID=modelID, modelParams=modelParams, modelParamsHash=modelParamsHash, errScore=errScore, completed=completed, matured=matured, numRecords=numRecords, hidden=hidden) self._allResults.append(entry) entryIdx = len(self._allResults) - 1 self._modelIDToIdx[modelID] = entryIdx self._paramsHashToIndexes[modelParamsHash] = entryIdx swarmId = modelParams['particleState']['swarmId'] if not hidden: # Update the list of particles in each swarm if swarmId in self._swarmIdToIndexes: self._swarmIdToIndexes[swarmId].append(entryIdx) else: self._swarmIdToIndexes[swarmId] = [entryIdx] # Update number of particles at each generation in this swarm genIdx = modelParams['particleState']['genIdx'] numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0]) while genIdx >= len(numPsEntry): numPsEntry.append(0) numPsEntry[genIdx] += 1 self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry # Replacing an existing one else: entryIdx = self._modelIDToIdx.get(modelID, None) assert (entryIdx is not None) entry = self._allResults[entryIdx] wasHidden = entry['hidden'] # If the paramsHash changed, note that. This can happen for orphaned # models if entry['modelParamsHash'] != modelParamsHash: self._paramsHashToIndexes.pop(entry['modelParamsHash']) self._paramsHashToIndexes[modelParamsHash] = entryIdx entry['modelParamsHash'] = modelParamsHash # Get the model params, swarmId, and genIdx modelParams = entry['modelParams'] swarmId = modelParams['particleState']['swarmId'] genIdx = modelParams['particleState']['genIdx'] # If this particle just became hidden, remove it from our swarm counts if hidden and not wasHidden: assert (entryIdx in self._swarmIdToIndexes[swarmId]) self._swarmIdToIndexes[swarmId].remove(entryIdx) self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1 # Update the entry for the latest info entry['errScore'] = errScore entry['completed'] = completed entry['matured'] = matured entry['numRecords'] = numRecords entry['hidden'] = hidden # Update the particle best errScore particleId = modelParams['particleState']['id'] genIdx = modelParams['particleState']['genIdx'] if matured and not hidden: (oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None)) if errScore < oldResult: pos = Particle.getPositionFromState(modelParams['particleState']) self._particleBest[particleId] = (errScore, pos) # Update the particle latest generation index prevGenIdx = self._particleLatestGenIdx.get(particleId, -1) if not hidden and genIdx > prevGenIdx: self._particleLatestGenIdx[particleId] = genIdx elif hidden and not wasHidden and genIdx == prevGenIdx: self._particleLatestGenIdx[particleId] = genIdx-1 # Update the swarm best score if not hidden: swarmId = modelParams['particleState']['swarmId'] if not swarmId in self._swarmBestOverall: self._swarmBestOverall[swarmId] = [] bestScores = self._swarmBestOverall[swarmId] while genIdx >= len(bestScores): bestScores.append((None, numpy.inf)) if errScore < bestScores[genIdx][1]: bestScores[genIdx] = (modelID, errScore) # Update the self._modifiedSwarmGens flags to support the # getMaturedSwarmGenerations() call. if not hidden: key = (swarmId, genIdx) if not key in self._maturedSwarmGens: self._modifiedSwarmGens.add(key) return errScore
[ "def", "update", "(", "self", ",", "modelID", ",", "modelParams", ",", "modelParamsHash", ",", "metricResult", ",", "completed", ",", "completionReason", ",", "matured", ",", "numRecords", ")", ":", "# The modelParamsHash must always be provided - it can change after a", "# model is inserted into the models table if it got detected as an", "# orphan", "assert", "(", "modelParamsHash", "is", "not", "None", ")", "# We consider a model metricResult as \"final\" if it has completed or", "# matured. By default, assume anything that has completed has matured", "if", "completed", ":", "matured", "=", "True", "# Get the canonicalized optimize metric results. For this metric, lower", "# is always better", "if", "metricResult", "is", "not", "None", "and", "matured", "and", "completionReason", "in", "[", "ClientJobsDAO", ".", "CMPL_REASON_EOF", ",", "ClientJobsDAO", ".", "CMPL_REASON_STOPPED", "]", ":", "# Canonicalize the error score so that lower is better", "if", "self", ".", "_hsObj", ".", "_maximize", ":", "errScore", "=", "-", "1", "*", "metricResult", "else", ":", "errScore", "=", "metricResult", "if", "errScore", "<", "self", ".", "_bestResult", ":", "self", ".", "_bestResult", "=", "errScore", "self", ".", "_bestModelID", "=", "modelID", "self", ".", "_hsObj", ".", "logger", ".", "info", "(", "\"New best model after %d evaluations: errScore \"", "\"%g on model %s\"", "%", "(", "len", "(", "self", ".", "_allResults", ")", ",", "self", ".", "_bestResult", ",", "self", ".", "_bestModelID", ")", ")", "else", ":", "errScore", "=", "numpy", ".", "inf", "# If this model completed with an unacceptable completion reason, set the", "# errScore to infinite and essentially make this model invisible to", "# further queries", "if", "completed", "and", "completionReason", "in", "[", "ClientJobsDAO", ".", "CMPL_REASON_ORPHAN", "]", ":", "errScore", "=", "numpy", ".", "inf", "hidden", "=", "True", "else", ":", "hidden", "=", "False", "# Update our set of erred models and completed models. These are used", "# to determine if we should abort the search because of too many errors", "if", "completed", ":", "self", ".", "_completedModels", ".", "add", "(", "modelID", ")", "self", ".", "_numCompletedModels", "=", "len", "(", "self", ".", "_completedModels", ")", "if", "completionReason", "==", "ClientJobsDAO", ".", "CMPL_REASON_ERROR", ":", "self", ".", "_errModels", ".", "add", "(", "modelID", ")", "self", ".", "_numErrModels", "=", "len", "(", "self", ".", "_errModels", ")", "# Are we creating a new entry?", "wasHidden", "=", "False", "if", "modelID", "not", "in", "self", ".", "_modelIDToIdx", ":", "assert", "(", "modelParams", "is", "not", "None", ")", "entry", "=", "dict", "(", "modelID", "=", "modelID", ",", "modelParams", "=", "modelParams", ",", "modelParamsHash", "=", "modelParamsHash", ",", "errScore", "=", "errScore", ",", "completed", "=", "completed", ",", "matured", "=", "matured", ",", "numRecords", "=", "numRecords", ",", "hidden", "=", "hidden", ")", "self", ".", "_allResults", ".", "append", "(", "entry", ")", "entryIdx", "=", "len", "(", "self", ".", "_allResults", ")", "-", "1", "self", ".", "_modelIDToIdx", "[", "modelID", "]", "=", "entryIdx", "self", ".", "_paramsHashToIndexes", "[", "modelParamsHash", "]", "=", "entryIdx", "swarmId", "=", "modelParams", "[", "'particleState'", "]", "[", "'swarmId'", "]", "if", "not", "hidden", ":", "# Update the list of particles in each swarm", "if", "swarmId", "in", "self", ".", "_swarmIdToIndexes", ":", "self", ".", "_swarmIdToIndexes", "[", "swarmId", "]", ".", "append", "(", "entryIdx", ")", "else", ":", "self", ".", "_swarmIdToIndexes", "[", "swarmId", "]", "=", "[", "entryIdx", "]", "# Update number of particles at each generation in this swarm", "genIdx", "=", "modelParams", "[", "'particleState'", "]", "[", "'genIdx'", "]", "numPsEntry", "=", "self", ".", "_swarmNumParticlesPerGeneration", ".", "get", "(", "swarmId", ",", "[", "0", "]", ")", "while", "genIdx", ">=", "len", "(", "numPsEntry", ")", ":", "numPsEntry", ".", "append", "(", "0", ")", "numPsEntry", "[", "genIdx", "]", "+=", "1", "self", ".", "_swarmNumParticlesPerGeneration", "[", "swarmId", "]", "=", "numPsEntry", "# Replacing an existing one", "else", ":", "entryIdx", "=", "self", ".", "_modelIDToIdx", ".", "get", "(", "modelID", ",", "None", ")", "assert", "(", "entryIdx", "is", "not", "None", ")", "entry", "=", "self", ".", "_allResults", "[", "entryIdx", "]", "wasHidden", "=", "entry", "[", "'hidden'", "]", "# If the paramsHash changed, note that. This can happen for orphaned", "# models", "if", "entry", "[", "'modelParamsHash'", "]", "!=", "modelParamsHash", ":", "self", ".", "_paramsHashToIndexes", ".", "pop", "(", "entry", "[", "'modelParamsHash'", "]", ")", "self", ".", "_paramsHashToIndexes", "[", "modelParamsHash", "]", "=", "entryIdx", "entry", "[", "'modelParamsHash'", "]", "=", "modelParamsHash", "# Get the model params, swarmId, and genIdx", "modelParams", "=", "entry", "[", "'modelParams'", "]", "swarmId", "=", "modelParams", "[", "'particleState'", "]", "[", "'swarmId'", "]", "genIdx", "=", "modelParams", "[", "'particleState'", "]", "[", "'genIdx'", "]", "# If this particle just became hidden, remove it from our swarm counts", "if", "hidden", "and", "not", "wasHidden", ":", "assert", "(", "entryIdx", "in", "self", ".", "_swarmIdToIndexes", "[", "swarmId", "]", ")", "self", ".", "_swarmIdToIndexes", "[", "swarmId", "]", ".", "remove", "(", "entryIdx", ")", "self", ".", "_swarmNumParticlesPerGeneration", "[", "swarmId", "]", "[", "genIdx", "]", "-=", "1", "# Update the entry for the latest info", "entry", "[", "'errScore'", "]", "=", "errScore", "entry", "[", "'completed'", "]", "=", "completed", "entry", "[", "'matured'", "]", "=", "matured", "entry", "[", "'numRecords'", "]", "=", "numRecords", "entry", "[", "'hidden'", "]", "=", "hidden", "# Update the particle best errScore", "particleId", "=", "modelParams", "[", "'particleState'", "]", "[", "'id'", "]", "genIdx", "=", "modelParams", "[", "'particleState'", "]", "[", "'genIdx'", "]", "if", "matured", "and", "not", "hidden", ":", "(", "oldResult", ",", "pos", ")", "=", "self", ".", "_particleBest", ".", "get", "(", "particleId", ",", "(", "numpy", ".", "inf", ",", "None", ")", ")", "if", "errScore", "<", "oldResult", ":", "pos", "=", "Particle", ".", "getPositionFromState", "(", "modelParams", "[", "'particleState'", "]", ")", "self", ".", "_particleBest", "[", "particleId", "]", "=", "(", "errScore", ",", "pos", ")", "# Update the particle latest generation index", "prevGenIdx", "=", "self", ".", "_particleLatestGenIdx", ".", "get", "(", "particleId", ",", "-", "1", ")", "if", "not", "hidden", "and", "genIdx", ">", "prevGenIdx", ":", "self", ".", "_particleLatestGenIdx", "[", "particleId", "]", "=", "genIdx", "elif", "hidden", "and", "not", "wasHidden", "and", "genIdx", "==", "prevGenIdx", ":", "self", ".", "_particleLatestGenIdx", "[", "particleId", "]", "=", "genIdx", "-", "1", "# Update the swarm best score", "if", "not", "hidden", ":", "swarmId", "=", "modelParams", "[", "'particleState'", "]", "[", "'swarmId'", "]", "if", "not", "swarmId", "in", "self", ".", "_swarmBestOverall", ":", "self", ".", "_swarmBestOverall", "[", "swarmId", "]", "=", "[", "]", "bestScores", "=", "self", ".", "_swarmBestOverall", "[", "swarmId", "]", "while", "genIdx", ">=", "len", "(", "bestScores", ")", ":", "bestScores", ".", "append", "(", "(", "None", ",", "numpy", ".", "inf", ")", ")", "if", "errScore", "<", "bestScores", "[", "genIdx", "]", "[", "1", "]", ":", "bestScores", "[", "genIdx", "]", "=", "(", "modelID", ",", "errScore", ")", "# Update the self._modifiedSwarmGens flags to support the", "# getMaturedSwarmGenerations() call.", "if", "not", "hidden", ":", "key", "=", "(", "swarmId", ",", "genIdx", ")", "if", "not", "key", "in", "self", ".", "_maturedSwarmGens", ":", "self", ".", "_modifiedSwarmGens", ".", "add", "(", "key", ")", "return", "errScore" ]
Insert a new entry or update an existing one. If this is an update of an existing entry, then modelParams will be None Parameters: -------------------------------------------------------------------- modelID: globally unique modelID of this model modelParams: params dict for this model, or None if this is just an update of a model that it already previously reported on. See the comments for the createModels() method for a description of this dict. modelParamsHash: hash of the modelParams dict, generated by the worker that put it into the model database. metricResult: value on the optimizeMetric for this model. May be None if we have no results yet. completed: True if the model has completed evaluation, False if it is still running (and these are online results) completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates matured: True if this model has matured numRecords: Number of records that have been processed so far by this model. retval: Canonicalized result on the optimize metric
[ "Insert", "a", "new", "entry", "or", "update", "an", "existing", "one", ".", "If", "this", "is", "an", "update", "of", "an", "existing", "entry", "then", "modelParams", "will", "be", "None" ]
python
valid
40.725714
lumpywizard/check_email_status
check_email_status/__init__.py
https://github.com/lumpywizard/check_email_status/blob/3a4c5dc42ada61325d5d9baad9e2b1b78084ee2f/check_email_status/__init__.py#L6-L59
def check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None): """ Checks if an email might be valid by getting the status from the SMTP server. :param mx_resolver: MXResolver :param recipient_address: string :param sender_address: string :param smtp_timeout: integer :param helo_hostname: string :return: dict """ domain = recipient_address[recipient_address.find('@') + 1:] if helo_hostname is None: helo_hostname = domain ret = {'status': 101, 'extended_status': None, 'message': "The server is unable to connect."} records = [] try: records = mx_resolver.get_mx_records(helo_hostname) except socket.gaierror: ret['status'] = 512 ret['extended_status'] = "5.1.2 Domain name address resolution failed in MX lookup." smtp = smtplib.SMTP(timeout=smtp_timeout) for mx in records: try: connection_status, connection_message = smtp.connect(mx.exchange) if connection_status == 220: smtp.helo(domain) smtp.mail(sender_address) status, message = smtp.rcpt(recipient_address) ret['status'] = status pattern = re.compile('(\d+\.\d+\.\d+)') matches = re.match(pattern, message) if matches: ret['extended_status'] = matches.group(1) ret['message'] = message smtp.quit() break except smtplib.SMTPConnectError: ret['status'] = 111 ret['message'] = "Connection refused or unable to open an SMTP stream." except smtplib.SMTPServerDisconnected: ret['status'] = 111 ret['extended_status'] = "SMTP Server disconnected" except socket.gaierror: ret['status'] = 512 ret['extended_status'] = "5.1.2 Domain name address resolution failed." return ret
[ "def", "check_email_status", "(", "mx_resolver", ",", "recipient_address", ",", "sender_address", ",", "smtp_timeout", "=", "10", ",", "helo_hostname", "=", "None", ")", ":", "domain", "=", "recipient_address", "[", "recipient_address", ".", "find", "(", "'@'", ")", "+", "1", ":", "]", "if", "helo_hostname", "is", "None", ":", "helo_hostname", "=", "domain", "ret", "=", "{", "'status'", ":", "101", ",", "'extended_status'", ":", "None", ",", "'message'", ":", "\"The server is unable to connect.\"", "}", "records", "=", "[", "]", "try", ":", "records", "=", "mx_resolver", ".", "get_mx_records", "(", "helo_hostname", ")", "except", "socket", ".", "gaierror", ":", "ret", "[", "'status'", "]", "=", "512", "ret", "[", "'extended_status'", "]", "=", "\"5.1.2 Domain name address resolution failed in MX lookup.\"", "smtp", "=", "smtplib", ".", "SMTP", "(", "timeout", "=", "smtp_timeout", ")", "for", "mx", "in", "records", ":", "try", ":", "connection_status", ",", "connection_message", "=", "smtp", ".", "connect", "(", "mx", ".", "exchange", ")", "if", "connection_status", "==", "220", ":", "smtp", ".", "helo", "(", "domain", ")", "smtp", ".", "mail", "(", "sender_address", ")", "status", ",", "message", "=", "smtp", ".", "rcpt", "(", "recipient_address", ")", "ret", "[", "'status'", "]", "=", "status", "pattern", "=", "re", ".", "compile", "(", "'(\\d+\\.\\d+\\.\\d+)'", ")", "matches", "=", "re", ".", "match", "(", "pattern", ",", "message", ")", "if", "matches", ":", "ret", "[", "'extended_status'", "]", "=", "matches", ".", "group", "(", "1", ")", "ret", "[", "'message'", "]", "=", "message", "smtp", ".", "quit", "(", ")", "break", "except", "smtplib", ".", "SMTPConnectError", ":", "ret", "[", "'status'", "]", "=", "111", "ret", "[", "'message'", "]", "=", "\"Connection refused or unable to open an SMTP stream.\"", "except", "smtplib", ".", "SMTPServerDisconnected", ":", "ret", "[", "'status'", "]", "=", "111", "ret", "[", "'extended_status'", "]", "=", "\"SMTP Server disconnected\"", "except", "socket", ".", "gaierror", ":", "ret", "[", "'status'", "]", "=", "512", "ret", "[", "'extended_status'", "]", "=", "\"5.1.2 Domain name address resolution failed.\"", "return", "ret" ]
Checks if an email might be valid by getting the status from the SMTP server. :param mx_resolver: MXResolver :param recipient_address: string :param sender_address: string :param smtp_timeout: integer :param helo_hostname: string :return: dict
[ "Checks", "if", "an", "email", "might", "be", "valid", "by", "getting", "the", "status", "from", "the", "SMTP", "server", "." ]
python
train
35.814815
partofthething/ace
ace/samples/breiman85.py
https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/samples/breiman85.py#L39-L61
def run_breiman2(): """Run Breiman's other sample problem.""" x, y = build_sample_ace_problem_breiman2(500) ace_solver = ace.ACESolver() ace_solver.specify_data_set(x, y) ace_solver.solve() try: plt = ace.plot_transforms(ace_solver, None) except ImportError: pass plt.subplot(1, 2, 1) phi = numpy.sin(2.0 * numpy.pi * x[0]) plt.plot(x[0], phi, label='analytic') plt.legend() plt.subplot(1, 2, 2) y = numpy.exp(phi) plt.plot(y, phi, label='analytic') plt.legend(loc='lower right') # plt.show() plt.savefig('no_noise_linear_x.png') return ace_solver
[ "def", "run_breiman2", "(", ")", ":", "x", ",", "y", "=", "build_sample_ace_problem_breiman2", "(", "500", ")", "ace_solver", "=", "ace", ".", "ACESolver", "(", ")", "ace_solver", ".", "specify_data_set", "(", "x", ",", "y", ")", "ace_solver", ".", "solve", "(", ")", "try", ":", "plt", "=", "ace", ".", "plot_transforms", "(", "ace_solver", ",", "None", ")", "except", "ImportError", ":", "pass", "plt", ".", "subplot", "(", "1", ",", "2", ",", "1", ")", "phi", "=", "numpy", ".", "sin", "(", "2.0", "*", "numpy", ".", "pi", "*", "x", "[", "0", "]", ")", "plt", ".", "plot", "(", "x", "[", "0", "]", ",", "phi", ",", "label", "=", "'analytic'", ")", "plt", ".", "legend", "(", ")", "plt", ".", "subplot", "(", "1", ",", "2", ",", "2", ")", "y", "=", "numpy", ".", "exp", "(", "phi", ")", "plt", ".", "plot", "(", "y", ",", "phi", ",", "label", "=", "'analytic'", ")", "plt", ".", "legend", "(", "loc", "=", "'lower right'", ")", "# plt.show()", "plt", ".", "savefig", "(", "'no_noise_linear_x.png'", ")", "return", "ace_solver" ]
Run Breiman's other sample problem.
[ "Run", "Breiman", "s", "other", "sample", "problem", "." ]
python
train
26.73913
tensorflow/tensor2tensor
tensor2tensor/models/lstm.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/lstm.py#L305-L325
def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams, train): """LSTM seq2seq model with attention, main step used for training.""" with tf.variable_scope("lstm_seq2seq_attention_bid_encoder"): inputs_length = common_layers.length_from_embedding(inputs) # Flatten inputs. inputs = common_layers.flatten4d3d(inputs) # LSTM encoder. encoder_outputs, final_encoder_state = lstm_bid_encoder( inputs, inputs_length, hparams, train, "encoder") # LSTM decoder with attention shifted_targets = common_layers.shift_right(targets) # Add 1 to account for the padding added to the left from shift_right targets_length = common_layers.length_from_embedding(shifted_targets) + 1 hparams_decoder = copy.copy(hparams) hparams_decoder.hidden_size = 2 * hparams.hidden_size decoder_outputs = lstm_attention_decoder( common_layers.flatten4d3d(shifted_targets), hparams_decoder, train, "decoder", final_encoder_state, encoder_outputs, inputs_length, targets_length) return tf.expand_dims(decoder_outputs, axis=2)
[ "def", "lstm_seq2seq_internal_attention_bid_encoder", "(", "inputs", ",", "targets", ",", "hparams", ",", "train", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"lstm_seq2seq_attention_bid_encoder\"", ")", ":", "inputs_length", "=", "common_layers", ".", "length_from_embedding", "(", "inputs", ")", "# Flatten inputs.", "inputs", "=", "common_layers", ".", "flatten4d3d", "(", "inputs", ")", "# LSTM encoder.", "encoder_outputs", ",", "final_encoder_state", "=", "lstm_bid_encoder", "(", "inputs", ",", "inputs_length", ",", "hparams", ",", "train", ",", "\"encoder\"", ")", "# LSTM decoder with attention", "shifted_targets", "=", "common_layers", ".", "shift_right", "(", "targets", ")", "# Add 1 to account for the padding added to the left from shift_right", "targets_length", "=", "common_layers", ".", "length_from_embedding", "(", "shifted_targets", ")", "+", "1", "hparams_decoder", "=", "copy", ".", "copy", "(", "hparams", ")", "hparams_decoder", ".", "hidden_size", "=", "2", "*", "hparams", ".", "hidden_size", "decoder_outputs", "=", "lstm_attention_decoder", "(", "common_layers", ".", "flatten4d3d", "(", "shifted_targets", ")", ",", "hparams_decoder", ",", "train", ",", "\"decoder\"", ",", "final_encoder_state", ",", "encoder_outputs", ",", "inputs_length", ",", "targets_length", ")", "return", "tf", ".", "expand_dims", "(", "decoder_outputs", ",", "axis", "=", "2", ")" ]
LSTM seq2seq model with attention, main step used for training.
[ "LSTM", "seq2seq", "model", "with", "attention", "main", "step", "used", "for", "training", "." ]
python
train
53.714286
codito/pyqtkeybind
pyqtkeybind/x11/keybindutil.py
https://github.com/codito/pyqtkeybind/blob/fbfedc654c4f77778d565b52ac76470631036255/pyqtkeybind/x11/keybindutil.py#L177-L186
def get_keyboard_mapping_unchecked(conn): """ Return an unchecked keyboard mapping cookie that can be used to fetch the table of keysyms in the current X environment. :rtype: xcb.xproto.GetKeyboardMappingCookie """ mn, mx = get_min_max_keycode() return conn.core.GetKeyboardMappingUnchecked(mn, mx - mn + 1)
[ "def", "get_keyboard_mapping_unchecked", "(", "conn", ")", ":", "mn", ",", "mx", "=", "get_min_max_keycode", "(", ")", "return", "conn", ".", "core", ".", "GetKeyboardMappingUnchecked", "(", "mn", ",", "mx", "-", "mn", "+", "1", ")" ]
Return an unchecked keyboard mapping cookie that can be used to fetch the table of keysyms in the current X environment. :rtype: xcb.xproto.GetKeyboardMappingCookie
[ "Return", "an", "unchecked", "keyboard", "mapping", "cookie", "that", "can", "be", "used", "to", "fetch", "the", "table", "of", "keysyms", "in", "the", "current", "X", "environment", "." ]
python
train
32.8
atlassian-api/atlassian-python-api
atlassian/jira.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L1021-L1035
def tempo_account_get_customers(self, query=None, count_accounts=None): """ Gets all or some Attribute whose key or name contain a specific substring. Attributes can be a Category or Customer. :param query: OPTIONAL: query for search :param count_accounts: bool OPTIONAL: provide how many associated Accounts with Customer :return: list of customers """ params = {} if query is not None: params['query'] = query if count_accounts is not None: params['countAccounts'] = count_accounts url = 'rest/tempo-accounts/1/customer' return self.get(url, params=params)
[ "def", "tempo_account_get_customers", "(", "self", ",", "query", "=", "None", ",", "count_accounts", "=", "None", ")", ":", "params", "=", "{", "}", "if", "query", "is", "not", "None", ":", "params", "[", "'query'", "]", "=", "query", "if", "count_accounts", "is", "not", "None", ":", "params", "[", "'countAccounts'", "]", "=", "count_accounts", "url", "=", "'rest/tempo-accounts/1/customer'", "return", "self", ".", "get", "(", "url", ",", "params", "=", "params", ")" ]
Gets all or some Attribute whose key or name contain a specific substring. Attributes can be a Category or Customer. :param query: OPTIONAL: query for search :param count_accounts: bool OPTIONAL: provide how many associated Accounts with Customer :return: list of customers
[ "Gets", "all", "or", "some", "Attribute", "whose", "key", "or", "name", "contain", "a", "specific", "substring", ".", "Attributes", "can", "be", "a", "Category", "or", "Customer", ".", ":", "param", "query", ":", "OPTIONAL", ":", "query", "for", "search", ":", "param", "count_accounts", ":", "bool", "OPTIONAL", ":", "provide", "how", "many", "associated", "Accounts", "with", "Customer", ":", "return", ":", "list", "of", "customers" ]
python
train
44.266667
noahbenson/neuropythy
neuropythy/geometry/util.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/geometry/util.py#L216-L243
def line_intersection_2D(abarg, cdarg): ''' line_intersection((a, b), (c, d)) yields the intersection point between the lines that pass through the given pairs of points. If any lines are parallel, (numpy.nan, numpy.nan) is returned; note that a, b, c, and d can all be 2 x n matrices of x and y coordinate row-vectors. ''' ((x1,y1),(x2,y2)) = abarg ((x3,y3),(x4,y4)) = cdarg dx12 = (x1 - x2) dx34 = (x3 - x4) dy12 = (y1 - y2) dy34 = (y3 - y4) denom = dx12*dy34 - dy12*dx34 unit = np.isclose(denom, 0) if unit is True: return (np.nan, np.nan) denom = unit + denom q12 = (x1*y2 - y1*x2) / denom q34 = (x3*y4 - y3*x4) / denom xi = q12*dx34 - q34*dx12 yi = q12*dy34 - q34*dy12 if unit is False: return (xi, yi) elif unit is True: return (np.nan, np.nan) else: xi = np.asarray(xi) yi = np.asarray(yi) xi[unit] = np.nan yi[unit] = np.nan return (xi, yi)
[ "def", "line_intersection_2D", "(", "abarg", ",", "cdarg", ")", ":", "(", "(", "x1", ",", "y1", ")", ",", "(", "x2", ",", "y2", ")", ")", "=", "abarg", "(", "(", "x3", ",", "y3", ")", ",", "(", "x4", ",", "y4", ")", ")", "=", "cdarg", "dx12", "=", "(", "x1", "-", "x2", ")", "dx34", "=", "(", "x3", "-", "x4", ")", "dy12", "=", "(", "y1", "-", "y2", ")", "dy34", "=", "(", "y3", "-", "y4", ")", "denom", "=", "dx12", "*", "dy34", "-", "dy12", "*", "dx34", "unit", "=", "np", ".", "isclose", "(", "denom", ",", "0", ")", "if", "unit", "is", "True", ":", "return", "(", "np", ".", "nan", ",", "np", ".", "nan", ")", "denom", "=", "unit", "+", "denom", "q12", "=", "(", "x1", "*", "y2", "-", "y1", "*", "x2", ")", "/", "denom", "q34", "=", "(", "x3", "*", "y4", "-", "y3", "*", "x4", ")", "/", "denom", "xi", "=", "q12", "*", "dx34", "-", "q34", "*", "dx12", "yi", "=", "q12", "*", "dy34", "-", "q34", "*", "dy12", "if", "unit", "is", "False", ":", "return", "(", "xi", ",", "yi", ")", "elif", "unit", "is", "True", ":", "return", "(", "np", ".", "nan", ",", "np", ".", "nan", ")", "else", ":", "xi", "=", "np", ".", "asarray", "(", "xi", ")", "yi", "=", "np", ".", "asarray", "(", "yi", ")", "xi", "[", "unit", "]", "=", "np", ".", "nan", "yi", "[", "unit", "]", "=", "np", ".", "nan", "return", "(", "xi", ",", "yi", ")" ]
line_intersection((a, b), (c, d)) yields the intersection point between the lines that pass through the given pairs of points. If any lines are parallel, (numpy.nan, numpy.nan) is returned; note that a, b, c, and d can all be 2 x n matrices of x and y coordinate row-vectors.
[ "line_intersection", "((", "a", "b", ")", "(", "c", "d", "))", "yields", "the", "intersection", "point", "between", "the", "lines", "that", "pass", "through", "the", "given", "pairs", "of", "points", ".", "If", "any", "lines", "are", "parallel", "(", "numpy", ".", "nan", "numpy", ".", "nan", ")", "is", "returned", ";", "note", "that", "a", "b", "c", "and", "d", "can", "all", "be", "2", "x", "n", "matrices", "of", "x", "and", "y", "coordinate", "row", "-", "vectors", "." ]
python
train
34
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/lib/irunner.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/irunner.py#L50-L81
def pexpect_monkeypatch(): """Patch pexpect to prevent unhandled exceptions at VM teardown. Calling this function will monkeypatch the pexpect.spawn class and modify its __del__ method to make it more robust in the face of failures that can occur if it is called when the Python VM is shutting down. Since Python may fire __del__ methods arbitrarily late, it's possible for them to execute during the teardown of the Python VM itself. At this point, various builtin modules have been reset to None. Thus, the call to self.close() will trigger an exception because it tries to call os.close(), and os is now None. """ if pexpect.__version__[:3] >= '2.2': # No need to patch, fix is already the upstream version. return def __del__(self): """This makes sure that no system resources are left open. Python only garbage collects Python objects. OS file descriptors are not Python objects, so they must be handled explicitly. If the child file descriptor was opened outside of this class (passed to the constructor) then this does not close it. """ if not self.closed: try: self.close() except AttributeError: pass pexpect.spawn.__del__ = __del__
[ "def", "pexpect_monkeypatch", "(", ")", ":", "if", "pexpect", ".", "__version__", "[", ":", "3", "]", ">=", "'2.2'", ":", "# No need to patch, fix is already the upstream version.", "return", "def", "__del__", "(", "self", ")", ":", "\"\"\"This makes sure that no system resources are left open.\n Python only garbage collects Python objects. OS file descriptors\n are not Python objects, so they must be handled explicitly.\n If the child file descriptor was opened outside of this class\n (passed to the constructor) then this does not close it.\n \"\"\"", "if", "not", "self", ".", "closed", ":", "try", ":", "self", ".", "close", "(", ")", "except", "AttributeError", ":", "pass", "pexpect", ".", "spawn", ".", "__del__", "=", "__del__" ]
Patch pexpect to prevent unhandled exceptions at VM teardown. Calling this function will monkeypatch the pexpect.spawn class and modify its __del__ method to make it more robust in the face of failures that can occur if it is called when the Python VM is shutting down. Since Python may fire __del__ methods arbitrarily late, it's possible for them to execute during the teardown of the Python VM itself. At this point, various builtin modules have been reset to None. Thus, the call to self.close() will trigger an exception because it tries to call os.close(), and os is now None.
[ "Patch", "pexpect", "to", "prevent", "unhandled", "exceptions", "at", "VM", "teardown", "." ]
python
test
40.53125
billyshambrook/taskman
taskman/queue.py
https://github.com/billyshambrook/taskman/blob/7e293ce9ea89ec6fc7e8b5a687f02ec9d4ad235e/taskman/queue.py#L36-L43
def get(self): """Get a task from the queue.""" tasks = self._get_avaliable_tasks() if not tasks: return None name, data = tasks[0] self._client.kv.delete(name) return data
[ "def", "get", "(", "self", ")", ":", "tasks", "=", "self", ".", "_get_avaliable_tasks", "(", ")", "if", "not", "tasks", ":", "return", "None", "name", ",", "data", "=", "tasks", "[", "0", "]", "self", ".", "_client", ".", "kv", ".", "delete", "(", "name", ")", "return", "data" ]
Get a task from the queue.
[ "Get", "a", "task", "from", "the", "queue", "." ]
python
train
28.125
corpusops/pdbclone
lib/pdb_clone/pdb.py
https://github.com/corpusops/pdbclone/blob/f781537c243a4874b246d43dbdef8c4279f0094d/lib/pdb_clone/pdb.py#L1650-L1683
def do_alias(self, arg): """alias [name [command [parameter parameter ...] ]] Create an alias called 'name' that executes 'command'. The command must *not* be enclosed in quotes. Replaceable parameters can be indicated by %1, %2, and so on, while %* is replaced by all the parameters. If no command is given, the current alias for name is shown. If no name is given, all aliases are listed. Aliases may be nested and can contain anything that can be legally typed at the pdb prompt. Note! You *can* override internal pdb commands with aliases! Those internal commands are then hidden until the alias is removed. Aliasing is recursively applied to the first word of the command line; all other words in the line are left alone. As an example, here are two useful aliases (especially when placed in the .pdbrc file): # Print instance variables (usage "pi classInst") alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) # Print instance variables in self alias ps pi self """ args = arg.split() if len(args) == 0: keys = sorted(self.aliases.keys()) for alias in keys: self.message("%s = %s" % (alias, self.aliases[alias])) return if args[0] in self.aliases and len(args) == 1: self.message("%s = %s" % (args[0], self.aliases[args[0]])) else: self.aliases[args[0]] = ' '.join(args[1:])
[ "def", "do_alias", "(", "self", ",", "arg", ")", ":", "args", "=", "arg", ".", "split", "(", ")", "if", "len", "(", "args", ")", "==", "0", ":", "keys", "=", "sorted", "(", "self", ".", "aliases", ".", "keys", "(", ")", ")", "for", "alias", "in", "keys", ":", "self", ".", "message", "(", "\"%s = %s\"", "%", "(", "alias", ",", "self", ".", "aliases", "[", "alias", "]", ")", ")", "return", "if", "args", "[", "0", "]", "in", "self", ".", "aliases", "and", "len", "(", "args", ")", "==", "1", ":", "self", ".", "message", "(", "\"%s = %s\"", "%", "(", "args", "[", "0", "]", ",", "self", ".", "aliases", "[", "args", "[", "0", "]", "]", ")", ")", "else", ":", "self", ".", "aliases", "[", "args", "[", "0", "]", "]", "=", "' '", ".", "join", "(", "args", "[", "1", ":", "]", ")" ]
alias [name [command [parameter parameter ...] ]] Create an alias called 'name' that executes 'command'. The command must *not* be enclosed in quotes. Replaceable parameters can be indicated by %1, %2, and so on, while %* is replaced by all the parameters. If no command is given, the current alias for name is shown. If no name is given, all aliases are listed. Aliases may be nested and can contain anything that can be legally typed at the pdb prompt. Note! You *can* override internal pdb commands with aliases! Those internal commands are then hidden until the alias is removed. Aliasing is recursively applied to the first word of the command line; all other words in the line are left alone. As an example, here are two useful aliases (especially when placed in the .pdbrc file): # Print instance variables (usage "pi classInst") alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) # Print instance variables in self alias ps pi self
[ "alias", "[", "name", "[", "command", "[", "parameter", "parameter", "...", "]", "]]", "Create", "an", "alias", "called", "name", "that", "executes", "command", ".", "The", "command", "must", "*", "not", "*", "be", "enclosed", "in", "quotes", ".", "Replaceable", "parameters", "can", "be", "indicated", "by", "%1", "%2", "and", "so", "on", "while", "%", "*", "is", "replaced", "by", "all", "the", "parameters", ".", "If", "no", "command", "is", "given", "the", "current", "alias", "for", "name", "is", "shown", ".", "If", "no", "name", "is", "given", "all", "aliases", "are", "listed", "." ]
python
train
45.470588
pyroscope/pyrocore
src/pyrocore/util/metafile.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/util/metafile.py#L421-L429
def _set_datapath(self, datapath): """ Set a datapath. """ if datapath: self._datapath = datapath.rstrip(os.sep) self._fifo = int(stat.S_ISFIFO(os.stat(self.datapath).st_mode)) else: self._datapath = None self._fifo = False
[ "def", "_set_datapath", "(", "self", ",", "datapath", ")", ":", "if", "datapath", ":", "self", ".", "_datapath", "=", "datapath", ".", "rstrip", "(", "os", ".", "sep", ")", "self", ".", "_fifo", "=", "int", "(", "stat", ".", "S_ISFIFO", "(", "os", ".", "stat", "(", "self", ".", "datapath", ")", ".", "st_mode", ")", ")", "else", ":", "self", ".", "_datapath", "=", "None", "self", ".", "_fifo", "=", "False" ]
Set a datapath.
[ "Set", "a", "datapath", "." ]
python
train
32.777778
dereneaton/ipyrad
ipyrad/analysis/tetrad.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1354-L1383
def count_snps(mat): """ get dstats from the count array and return as a float tuple """ ## get [aabb, baba, abba, aaab] snps = np.zeros(4, dtype=np.uint32) ## get concordant (aabb) pis sites snps[0] = np.uint32(\ mat[0, 5] + mat[0, 10] + mat[0, 15] + \ mat[5, 0] + mat[5, 10] + mat[5, 15] + \ mat[10, 0] + mat[10, 5] + mat[10, 15] + \ mat[15, 0] + mat[15, 5] + mat[15, 10]) ## get discordant (baba) sites for i in range(16): if i % 5: snps[1] += mat[i, i] ## get discordant (abba) sites snps[2] = mat[1, 4] + mat[2, 8] + mat[3, 12] +\ mat[4, 1] + mat[6, 9] + mat[7, 13] +\ mat[8, 2] + mat[9, 6] + mat[11, 14] +\ mat[12, 3] + mat[13, 7] + mat[14, 11] ## get autapomorphy sites snps[3] = (mat.sum() - np.diag(mat).sum()) - snps[2] return snps
[ "def", "count_snps", "(", "mat", ")", ":", "## get [aabb, baba, abba, aaab] ", "snps", "=", "np", ".", "zeros", "(", "4", ",", "dtype", "=", "np", ".", "uint32", ")", "## get concordant (aabb) pis sites", "snps", "[", "0", "]", "=", "np", ".", "uint32", "(", "mat", "[", "0", ",", "5", "]", "+", "mat", "[", "0", ",", "10", "]", "+", "mat", "[", "0", ",", "15", "]", "+", "mat", "[", "5", ",", "0", "]", "+", "mat", "[", "5", ",", "10", "]", "+", "mat", "[", "5", ",", "15", "]", "+", "mat", "[", "10", ",", "0", "]", "+", "mat", "[", "10", ",", "5", "]", "+", "mat", "[", "10", ",", "15", "]", "+", "mat", "[", "15", ",", "0", "]", "+", "mat", "[", "15", ",", "5", "]", "+", "mat", "[", "15", ",", "10", "]", ")", "## get discordant (baba) sites", "for", "i", "in", "range", "(", "16", ")", ":", "if", "i", "%", "5", ":", "snps", "[", "1", "]", "+=", "mat", "[", "i", ",", "i", "]", "## get discordant (abba) sites", "snps", "[", "2", "]", "=", "mat", "[", "1", ",", "4", "]", "+", "mat", "[", "2", ",", "8", "]", "+", "mat", "[", "3", ",", "12", "]", "+", "mat", "[", "4", ",", "1", "]", "+", "mat", "[", "6", ",", "9", "]", "+", "mat", "[", "7", ",", "13", "]", "+", "mat", "[", "8", ",", "2", "]", "+", "mat", "[", "9", ",", "6", "]", "+", "mat", "[", "11", ",", "14", "]", "+", "mat", "[", "12", ",", "3", "]", "+", "mat", "[", "13", ",", "7", "]", "+", "mat", "[", "14", ",", "11", "]", "## get autapomorphy sites", "snps", "[", "3", "]", "=", "(", "mat", ".", "sum", "(", ")", "-", "np", ".", "diag", "(", "mat", ")", ".", "sum", "(", ")", ")", "-", "snps", "[", "2", "]", "return", "snps" ]
get dstats from the count array and return as a float tuple
[ "get", "dstats", "from", "the", "count", "array", "and", "return", "as", "a", "float", "tuple" ]
python
valid
29.566667
bxlab/bx-python
lib/bx_extras/stats.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L505-L541
def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0): """ Returns (i) a list of histogram bin counts, (ii) the smallest value of the histogram binning, and (iii) the bin width (the last 2 are not necessarily integers). Default number of bins is 10. If no sequence object is given for defaultreallimits, the routine picks (usually non-pretty) bins spanning all the numbers in the inlist. Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0) Returns: list of bin values, lowerreallimit, binsize, extrapoints """ if (defaultreallimits != None): if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd lowerreallimit = defaultreallimits upperreallimit = 1.0001 * max(inlist) else: # assume both limits given lowerreallimit = defaultreallimits[0] upperreallimit = defaultreallimits[1] binsize = (upperreallimit-lowerreallimit)/float(numbins) else: # no limits given for histogram, both must be calc'd estbinwidth=(max(inlist)-min(inlist))/float(numbins) + 1 # 1=>cover all binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins) lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin bins = [0]*(numbins) extrapoints = 0 for num in inlist: try: if (num-lowerreallimit) < 0: extrapoints = extrapoints + 1 else: bintoincrement = int((num-lowerreallimit)/float(binsize)) bins[bintoincrement] = bins[bintoincrement] + 1 except: extrapoints = extrapoints + 1 if (extrapoints > 0 and printextras == 1): print('\nPoints outside given histogram range =',extrapoints) return (bins, lowerreallimit, binsize, extrapoints)
[ "def", "lhistogram", "(", "inlist", ",", "numbins", "=", "10", ",", "defaultreallimits", "=", "None", ",", "printextras", "=", "0", ")", ":", "if", "(", "defaultreallimits", "!=", "None", ")", ":", "if", "type", "(", "defaultreallimits", ")", "not", "in", "[", "ListType", ",", "TupleType", "]", "or", "len", "(", "defaultreallimits", ")", "==", "1", ":", "# only one limit given, assumed to be lower one & upper is calc'd", "lowerreallimit", "=", "defaultreallimits", "upperreallimit", "=", "1.0001", "*", "max", "(", "inlist", ")", "else", ":", "# assume both limits given", "lowerreallimit", "=", "defaultreallimits", "[", "0", "]", "upperreallimit", "=", "defaultreallimits", "[", "1", "]", "binsize", "=", "(", "upperreallimit", "-", "lowerreallimit", ")", "/", "float", "(", "numbins", ")", "else", ":", "# no limits given for histogram, both must be calc'd", "estbinwidth", "=", "(", "max", "(", "inlist", ")", "-", "min", "(", "inlist", ")", ")", "/", "float", "(", "numbins", ")", "+", "1", "# 1=>cover all", "binsize", "=", "(", "(", "max", "(", "inlist", ")", "-", "min", "(", "inlist", ")", "+", "estbinwidth", ")", ")", "/", "float", "(", "numbins", ")", "lowerreallimit", "=", "min", "(", "inlist", ")", "-", "binsize", "/", "2", "#lower real limit,1st bin", "bins", "=", "[", "0", "]", "*", "(", "numbins", ")", "extrapoints", "=", "0", "for", "num", "in", "inlist", ":", "try", ":", "if", "(", "num", "-", "lowerreallimit", ")", "<", "0", ":", "extrapoints", "=", "extrapoints", "+", "1", "else", ":", "bintoincrement", "=", "int", "(", "(", "num", "-", "lowerreallimit", ")", "/", "float", "(", "binsize", ")", ")", "bins", "[", "bintoincrement", "]", "=", "bins", "[", "bintoincrement", "]", "+", "1", "except", ":", "extrapoints", "=", "extrapoints", "+", "1", "if", "(", "extrapoints", ">", "0", "and", "printextras", "==", "1", ")", ":", "print", "(", "'\\nPoints outside given histogram range ='", ",", "extrapoints", ")", "return", "(", "bins", ",", "lowerreallimit", ",", "binsize", ",", "extrapoints", ")" ]
Returns (i) a list of histogram bin counts, (ii) the smallest value of the histogram binning, and (iii) the bin width (the last 2 are not necessarily integers). Default number of bins is 10. If no sequence object is given for defaultreallimits, the routine picks (usually non-pretty) bins spanning all the numbers in the inlist. Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0) Returns: list of bin values, lowerreallimit, binsize, extrapoints
[ "Returns", "(", "i", ")", "a", "list", "of", "histogram", "bin", "counts", "(", "ii", ")", "the", "smallest", "value", "of", "the", "histogram", "binning", "and", "(", "iii", ")", "the", "bin", "width", "(", "the", "last", "2", "are", "not", "necessarily", "integers", ")", ".", "Default", "number", "of", "bins", "is", "10", ".", "If", "no", "sequence", "object", "is", "given", "for", "defaultreallimits", "the", "routine", "picks", "(", "usually", "non", "-", "pretty", ")", "bins", "spanning", "all", "the", "numbers", "in", "the", "inlist", "." ]
python
train
50.702703
federico123579/Trading212-API
tradingAPI/utils.py
https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/utils.py#L59-L117
def get_pip(mov=None, api=None, name=None): """get value of pip""" # ~ check args if mov is None and api is None: logger.error("need at least one of those") raise ValueError() elif mov is not None and api is not None: logger.error("mov and api are exclusive") raise ValueError() if api is not None: if name is None: logger.error("need a name") raise ValueError() mov = api.new_mov(name) mov.open() if mov is not None: mov._check_open() # find in the collection try: logger.debug(len(Glob().theCollector.collection)) pip = Glob().theCollector.collection['pip'] if name is not None: pip_res = pip[name] elif mov is not None: pip_res = pip[mov.product] logger.debug("pip found in the collection") return pip_res except KeyError: logger.debug("pip not found in the collection") # ~ vars records = [] intervals = [10, 20, 30] def _check_price(interval=10): timeout = time.time() + interval while time.time() < timeout: records.append(mov.get_price()) time.sleep(0.5) # find variation for interval in intervals: _check_price(interval) if min(records) == max(records): logger.debug("no variation in %d seconds" % interval) if interval == intervals[-1]: raise TimeoutError("no variation") else: break # find longer price for price in records: if 'best_price' not in locals(): best_price = price if len(str(price)) > len(str(best_price)): logger.debug("found new best_price %f" % price) best_price = price # get pip pip = get_number_unit(best_price) Glob().pipHandler.add_val({mov.product: pip}) return pip
[ "def", "get_pip", "(", "mov", "=", "None", ",", "api", "=", "None", ",", "name", "=", "None", ")", ":", "# ~ check args", "if", "mov", "is", "None", "and", "api", "is", "None", ":", "logger", ".", "error", "(", "\"need at least one of those\"", ")", "raise", "ValueError", "(", ")", "elif", "mov", "is", "not", "None", "and", "api", "is", "not", "None", ":", "logger", ".", "error", "(", "\"mov and api are exclusive\"", ")", "raise", "ValueError", "(", ")", "if", "api", "is", "not", "None", ":", "if", "name", "is", "None", ":", "logger", ".", "error", "(", "\"need a name\"", ")", "raise", "ValueError", "(", ")", "mov", "=", "api", ".", "new_mov", "(", "name", ")", "mov", ".", "open", "(", ")", "if", "mov", "is", "not", "None", ":", "mov", ".", "_check_open", "(", ")", "# find in the collection", "try", ":", "logger", ".", "debug", "(", "len", "(", "Glob", "(", ")", ".", "theCollector", ".", "collection", ")", ")", "pip", "=", "Glob", "(", ")", ".", "theCollector", ".", "collection", "[", "'pip'", "]", "if", "name", "is", "not", "None", ":", "pip_res", "=", "pip", "[", "name", "]", "elif", "mov", "is", "not", "None", ":", "pip_res", "=", "pip", "[", "mov", ".", "product", "]", "logger", ".", "debug", "(", "\"pip found in the collection\"", ")", "return", "pip_res", "except", "KeyError", ":", "logger", ".", "debug", "(", "\"pip not found in the collection\"", ")", "# ~ vars", "records", "=", "[", "]", "intervals", "=", "[", "10", ",", "20", ",", "30", "]", "def", "_check_price", "(", "interval", "=", "10", ")", ":", "timeout", "=", "time", ".", "time", "(", ")", "+", "interval", "while", "time", ".", "time", "(", ")", "<", "timeout", ":", "records", ".", "append", "(", "mov", ".", "get_price", "(", ")", ")", "time", ".", "sleep", "(", "0.5", ")", "# find variation", "for", "interval", "in", "intervals", ":", "_check_price", "(", "interval", ")", "if", "min", "(", "records", ")", "==", "max", "(", "records", ")", ":", "logger", ".", "debug", "(", "\"no variation in %d seconds\"", "%", "interval", ")", "if", "interval", "==", "intervals", "[", "-", "1", "]", ":", "raise", "TimeoutError", "(", "\"no variation\"", ")", "else", ":", "break", "# find longer price", "for", "price", "in", "records", ":", "if", "'best_price'", "not", "in", "locals", "(", ")", ":", "best_price", "=", "price", "if", "len", "(", "str", "(", "price", ")", ")", ">", "len", "(", "str", "(", "best_price", ")", ")", ":", "logger", ".", "debug", "(", "\"found new best_price %f\"", "%", "price", ")", "best_price", "=", "price", "# get pip", "pip", "=", "get_number_unit", "(", "best_price", ")", "Glob", "(", ")", ".", "pipHandler", ".", "add_val", "(", "{", "mov", ".", "product", ":", "pip", "}", ")", "return", "pip" ]
get value of pip
[ "get", "value", "of", "pip" ]
python
train
31.542373
JoelBender/bacpypes
py25/bacpypes/object.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/object.py#L610-L645
def _dict_contents(self, use_dict=None, as_class=dict): """Return the contents of an object as a dict.""" if _debug: Object._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class) # make/extend the dictionary of content if use_dict is None: use_dict = as_class() klasses = list(self.__class__.__mro__) klasses.reverse() # build a list of property identifiers "bottom up" property_names = [] properties_seen = set() for c in klasses: for prop in getattr(c, 'properties', []): if prop.identifier not in properties_seen: property_names.append(prop.identifier) properties_seen.add(prop.identifier) # extract the values for property_name in property_names: # get the value property_value = self._properties.get(property_name).ReadProperty(self) if property_value is None: continue # if the value has a way to convert it to a dict, use it if hasattr(property_value, "dict_contents"): property_value = property_value.dict_contents(as_class=as_class) # save the value use_dict.__setitem__(property_name, property_value) # return what we built/updated return use_dict
[ "def", "_dict_contents", "(", "self", ",", "use_dict", "=", "None", ",", "as_class", "=", "dict", ")", ":", "if", "_debug", ":", "Object", ".", "_debug", "(", "\"dict_contents use_dict=%r as_class=%r\"", ",", "use_dict", ",", "as_class", ")", "# make/extend the dictionary of content", "if", "use_dict", "is", "None", ":", "use_dict", "=", "as_class", "(", ")", "klasses", "=", "list", "(", "self", ".", "__class__", ".", "__mro__", ")", "klasses", ".", "reverse", "(", ")", "# build a list of property identifiers \"bottom up\"", "property_names", "=", "[", "]", "properties_seen", "=", "set", "(", ")", "for", "c", "in", "klasses", ":", "for", "prop", "in", "getattr", "(", "c", ",", "'properties'", ",", "[", "]", ")", ":", "if", "prop", ".", "identifier", "not", "in", "properties_seen", ":", "property_names", ".", "append", "(", "prop", ".", "identifier", ")", "properties_seen", ".", "add", "(", "prop", ".", "identifier", ")", "# extract the values", "for", "property_name", "in", "property_names", ":", "# get the value", "property_value", "=", "self", ".", "_properties", ".", "get", "(", "property_name", ")", ".", "ReadProperty", "(", "self", ")", "if", "property_value", "is", "None", ":", "continue", "# if the value has a way to convert it to a dict, use it", "if", "hasattr", "(", "property_value", ",", "\"dict_contents\"", ")", ":", "property_value", "=", "property_value", ".", "dict_contents", "(", "as_class", "=", "as_class", ")", "# save the value", "use_dict", ".", "__setitem__", "(", "property_name", ",", "property_value", ")", "# return what we built/updated", "return", "use_dict" ]
Return the contents of an object as a dict.
[ "Return", "the", "contents", "of", "an", "object", "as", "a", "dict", "." ]
python
train
37.5
dw/mitogen
mitogen/minify.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/minify.py#L86-L118
def strip_docstrings(tokens): """Replace docstring tokens with NL tokens in a `tokenize` stream. Any STRING token not part of an expression is deemed a docstring. Indented docstrings are not yet recognised. """ stack = [] state = 'wait_string' for t in tokens: typ = t[0] if state == 'wait_string': if typ in (tokenize.NL, tokenize.COMMENT): yield t elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING): stack.append(t) elif typ == tokenize.NEWLINE: stack.append(t) start_line, end_line = stack[0][2][0], stack[-1][3][0]+1 for i in range(start_line, end_line): yield tokenize.NL, '\n', (i, 0), (i,1), '\n' for t in stack: if t[0] in (tokenize.DEDENT, tokenize.INDENT): yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4] del stack[:] else: stack.append(t) for t in stack: yield t del stack[:] state = 'wait_newline' elif state == 'wait_newline': if typ == tokenize.NEWLINE: state = 'wait_string' yield t
[ "def", "strip_docstrings", "(", "tokens", ")", ":", "stack", "=", "[", "]", "state", "=", "'wait_string'", "for", "t", "in", "tokens", ":", "typ", "=", "t", "[", "0", "]", "if", "state", "==", "'wait_string'", ":", "if", "typ", "in", "(", "tokenize", ".", "NL", ",", "tokenize", ".", "COMMENT", ")", ":", "yield", "t", "elif", "typ", "in", "(", "tokenize", ".", "DEDENT", ",", "tokenize", ".", "INDENT", ",", "tokenize", ".", "STRING", ")", ":", "stack", ".", "append", "(", "t", ")", "elif", "typ", "==", "tokenize", ".", "NEWLINE", ":", "stack", ".", "append", "(", "t", ")", "start_line", ",", "end_line", "=", "stack", "[", "0", "]", "[", "2", "]", "[", "0", "]", ",", "stack", "[", "-", "1", "]", "[", "3", "]", "[", "0", "]", "+", "1", "for", "i", "in", "range", "(", "start_line", ",", "end_line", ")", ":", "yield", "tokenize", ".", "NL", ",", "'\\n'", ",", "(", "i", ",", "0", ")", ",", "(", "i", ",", "1", ")", ",", "'\\n'", "for", "t", "in", "stack", ":", "if", "t", "[", "0", "]", "in", "(", "tokenize", ".", "DEDENT", ",", "tokenize", ".", "INDENT", ")", ":", "yield", "t", "[", "0", "]", ",", "t", "[", "1", "]", ",", "(", "i", "+", "1", ",", "t", "[", "2", "]", "[", "1", "]", ")", ",", "(", "i", "+", "1", ",", "t", "[", "3", "]", "[", "1", "]", ")", ",", "t", "[", "4", "]", "del", "stack", "[", ":", "]", "else", ":", "stack", ".", "append", "(", "t", ")", "for", "t", "in", "stack", ":", "yield", "t", "del", "stack", "[", ":", "]", "state", "=", "'wait_newline'", "elif", "state", "==", "'wait_newline'", ":", "if", "typ", "==", "tokenize", ".", "NEWLINE", ":", "state", "=", "'wait_string'", "yield", "t" ]
Replace docstring tokens with NL tokens in a `tokenize` stream. Any STRING token not part of an expression is deemed a docstring. Indented docstrings are not yet recognised.
[ "Replace", "docstring", "tokens", "with", "NL", "tokens", "in", "a", "tokenize", "stream", "." ]
python
train
38.363636
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L211-L220
def get_brandings(self): """ Get all account brandings @return List of brandings """ connection = Connection(self.token) connection.set_url(self.production, self.BRANDINGS_URL) return connection.get_request()
[ "def", "get_brandings", "(", "self", ")", ":", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "BRANDINGS_URL", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all account brandings @return List of brandings
[ "Get", "all", "account", "brandings" ]
python
train
25.7
dmwilcox/vcard-tools
vcardtools/vcf_merge.py
https://github.com/dmwilcox/vcard-tools/blob/1b0f62a0f4c128c7a212ecdca34ff2acb746b262/vcardtools/vcf_merge.py#L65-L70
def CopyVcardFields(new_vcard, auth_vcard, field_names): """Copy vCard field values from an authoritative vCard into a new one.""" for field in field_names: value_list = auth_vcard.contents.get(field) new_vcard = SetVcardField(new_vcard, field, value_list) return new_vcard
[ "def", "CopyVcardFields", "(", "new_vcard", ",", "auth_vcard", ",", "field_names", ")", ":", "for", "field", "in", "field_names", ":", "value_list", "=", "auth_vcard", ".", "contents", ".", "get", "(", "field", ")", "new_vcard", "=", "SetVcardField", "(", "new_vcard", ",", "field", ",", "value_list", ")", "return", "new_vcard" ]
Copy vCard field values from an authoritative vCard into a new one.
[ "Copy", "vCard", "field", "values", "from", "an", "authoritative", "vCard", "into", "a", "new", "one", "." ]
python
train
49.333333
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/nfw.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/nfw.py#L231-L251
def F_(self, X): """ computes h() :param X: :return: """ if self._interpol: if not hasattr(self, '_F_interp'): if self._lookup: x = self._x_lookup F_x = self._f_lookup else: x = np.linspace(0, self._max_interp_X, self._num_interp_X) F_x = self._F(x) self._F_interp = interp.interp1d(x, F_x, kind='linear', axis=-1, copy=False, bounds_error=False, fill_value=0, assume_sorted=True) return self._F_interp(X) else: return self._F(X)
[ "def", "F_", "(", "self", ",", "X", ")", ":", "if", "self", ".", "_interpol", ":", "if", "not", "hasattr", "(", "self", ",", "'_F_interp'", ")", ":", "if", "self", ".", "_lookup", ":", "x", "=", "self", ".", "_x_lookup", "F_x", "=", "self", ".", "_f_lookup", "else", ":", "x", "=", "np", ".", "linspace", "(", "0", ",", "self", ".", "_max_interp_X", ",", "self", ".", "_num_interp_X", ")", "F_x", "=", "self", ".", "_F", "(", "x", ")", "self", ".", "_F_interp", "=", "interp", ".", "interp1d", "(", "x", ",", "F_x", ",", "kind", "=", "'linear'", ",", "axis", "=", "-", "1", ",", "copy", "=", "False", ",", "bounds_error", "=", "False", ",", "fill_value", "=", "0", ",", "assume_sorted", "=", "True", ")", "return", "self", ".", "_F_interp", "(", "X", ")", "else", ":", "return", "self", ".", "_F", "(", "X", ")" ]
computes h() :param X: :return:
[ "computes", "h", "()" ]
python
train
32.380952
wummel/linkchecker
linkcheck/logger/html.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/html.py#L233-L238
def write_info (self, url_data): """Write url_data.info.""" sep = u"<br/>"+os.linesep text = sep.join(cgi.escape(x) for x in url_data.info) self.writeln(u'<tr><td valign="top">' + self.part("info")+ u"</td><td>"+text+u"</td></tr>")
[ "def", "write_info", "(", "self", ",", "url_data", ")", ":", "sep", "=", "u\"<br/>\"", "+", "os", ".", "linesep", "text", "=", "sep", ".", "join", "(", "cgi", ".", "escape", "(", "x", ")", "for", "x", "in", "url_data", ".", "info", ")", "self", ".", "writeln", "(", "u'<tr><td valign=\"top\">'", "+", "self", ".", "part", "(", "\"info\"", ")", "+", "u\"</td><td>\"", "+", "text", "+", "u\"</td></tr>\"", ")" ]
Write url_data.info.
[ "Write", "url_data", ".", "info", "." ]
python
train
45.5
spyder-ide/spyder
spyder/dependencies.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/dependencies.py#L87-L101
def status(deps=DEPENDENCIES, linesep=os.linesep): """Return a status of dependencies""" maxwidth = 0 col1 = [] col2 = [] for dependency in deps: title1 = dependency.modname title1 += ' ' + dependency.required_version col1.append(title1) maxwidth = max([maxwidth, len(title1)]) col2.append(dependency.get_installed_version()) text = "" for index in range(len(deps)): text += col1[index].ljust(maxwidth) + ': ' + col2[index] + linesep return text[:-1]
[ "def", "status", "(", "deps", "=", "DEPENDENCIES", ",", "linesep", "=", "os", ".", "linesep", ")", ":", "maxwidth", "=", "0", "col1", "=", "[", "]", "col2", "=", "[", "]", "for", "dependency", "in", "deps", ":", "title1", "=", "dependency", ".", "modname", "title1", "+=", "' '", "+", "dependency", ".", "required_version", "col1", ".", "append", "(", "title1", ")", "maxwidth", "=", "max", "(", "[", "maxwidth", ",", "len", "(", "title1", ")", "]", ")", "col2", ".", "append", "(", "dependency", ".", "get_installed_version", "(", ")", ")", "text", "=", "\"\"", "for", "index", "in", "range", "(", "len", "(", "deps", ")", ")", ":", "text", "+=", "col1", "[", "index", "]", ".", "ljust", "(", "maxwidth", ")", "+", "': '", "+", "col2", "[", "index", "]", "+", "linesep", "return", "text", "[", ":", "-", "1", "]" ]
Return a status of dependencies
[ "Return", "a", "status", "of", "dependencies" ]
python
train
35.4