repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
materialsproject/pymatgen
pymatgen/__init__.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/__init__.py#L52-L73
def get_structure_from_mp(formula): """ Convenience method to get a crystal from the Materials Project database via the API. Requires PMG_MAPI_KEY to be set. Args: formula (str): A formula Returns: (Structure) The lowest energy structure in Materials Project with that formula. """ m = MPRester() entries = m.get_entries(formula, inc_structure="final") if len(entries) == 0: raise ValueError("No structure with formula %s in Materials Project!" % formula) elif len(entries) > 1: warnings.warn("%d structures with formula %s found in Materials " "Project. The lowest energy structure will be returned." % (len(entries), formula)) return min(entries, key=lambda e: e.energy_per_atom).structure
[ "def", "get_structure_from_mp", "(", "formula", ")", ":", "m", "=", "MPRester", "(", ")", "entries", "=", "m", ".", "get_entries", "(", "formula", ",", "inc_structure", "=", "\"final\"", ")", "if", "len", "(", "entries", ")", "==", "0", ":", "raise", "ValueError", "(", "\"No structure with formula %s in Materials Project!\"", "%", "formula", ")", "elif", "len", "(", "entries", ")", ">", "1", ":", "warnings", ".", "warn", "(", "\"%d structures with formula %s found in Materials \"", "\"Project. The lowest energy structure will be returned.\"", "%", "(", "len", "(", "entries", ")", ",", "formula", ")", ")", "return", "min", "(", "entries", ",", "key", "=", "lambda", "e", ":", "e", ".", "energy_per_atom", ")", ".", "structure" ]
Convenience method to get a crystal from the Materials Project database via the API. Requires PMG_MAPI_KEY to be set. Args: formula (str): A formula Returns: (Structure) The lowest energy structure in Materials Project with that formula.
[ "Convenience", "method", "to", "get", "a", "crystal", "from", "the", "Materials", "Project", "database", "via", "the", "API", ".", "Requires", "PMG_MAPI_KEY", "to", "be", "set", "." ]
python
train
37.681818
HIPS/autograd
examples/ica.py
https://github.com/HIPS/autograd/blob/e3b525302529d7490769d5c0bcfc7457e24e3b3e/examples/ica.py#L13-L41
def make_ica_funs(observed_dimension, latent_dimension): """These functions implement independent component analysis. The model is: latents are drawn i.i.d. for each data point from a product of student-ts. weights are the same across all datapoints. each data = latents * weghts + noise.""" def sample(weights, n_samples, noise_std, rs): latents = rs.randn(latent_dimension, n_samples) latents = np.array(sorted(latents.T, key=lambda a_entry: a_entry[0])).T noise = rs.randn(n_samples, observed_dimension) * noise_std observed = predict(weights, latents) + noise return latents, observed def predict(weights, latents): return np.dot(weights, latents).T def logprob(weights, latents, noise_std, observed): preds = predict(weights, latents) log_lik = np.sum(t.logpdf(preds, 2.4, observed, noise_std)) return log_lik num_weights = observed_dimension * latent_dimension def unpack_weights(weights): return np.reshape(weights, (observed_dimension, latent_dimension)) return num_weights, sample, logprob, unpack_weights
[ "def", "make_ica_funs", "(", "observed_dimension", ",", "latent_dimension", ")", ":", "def", "sample", "(", "weights", ",", "n_samples", ",", "noise_std", ",", "rs", ")", ":", "latents", "=", "rs", ".", "randn", "(", "latent_dimension", ",", "n_samples", ")", "latents", "=", "np", ".", "array", "(", "sorted", "(", "latents", ".", "T", ",", "key", "=", "lambda", "a_entry", ":", "a_entry", "[", "0", "]", ")", ")", ".", "T", "noise", "=", "rs", ".", "randn", "(", "n_samples", ",", "observed_dimension", ")", "*", "noise_std", "observed", "=", "predict", "(", "weights", ",", "latents", ")", "+", "noise", "return", "latents", ",", "observed", "def", "predict", "(", "weights", ",", "latents", ")", ":", "return", "np", ".", "dot", "(", "weights", ",", "latents", ")", ".", "T", "def", "logprob", "(", "weights", ",", "latents", ",", "noise_std", ",", "observed", ")", ":", "preds", "=", "predict", "(", "weights", ",", "latents", ")", "log_lik", "=", "np", ".", "sum", "(", "t", ".", "logpdf", "(", "preds", ",", "2.4", ",", "observed", ",", "noise_std", ")", ")", "return", "log_lik", "num_weights", "=", "observed_dimension", "*", "latent_dimension", "def", "unpack_weights", "(", "weights", ")", ":", "return", "np", ".", "reshape", "(", "weights", ",", "(", "observed_dimension", ",", "latent_dimension", ")", ")", "return", "num_weights", ",", "sample", ",", "logprob", ",", "unpack_weights" ]
These functions implement independent component analysis. The model is: latents are drawn i.i.d. for each data point from a product of student-ts. weights are the same across all datapoints. each data = latents * weghts + noise.
[ "These", "functions", "implement", "independent", "component", "analysis", "." ]
python
train
38.517241
scanny/python-pptx
pptx/dml/fill.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/dml/fill.py#L131-L140
def patterned(self): """Selects the pattern fill type. Note that calling this method does not by itself set a foreground or background color of the pattern. Rather it enables subsequent assignments to properties like fore_color to set the pattern and colors. """ pattFill = self._xPr.get_or_change_to_pattFill() self._fill = _PattFill(pattFill)
[ "def", "patterned", "(", "self", ")", ":", "pattFill", "=", "self", ".", "_xPr", ".", "get_or_change_to_pattFill", "(", ")", "self", ".", "_fill", "=", "_PattFill", "(", "pattFill", ")" ]
Selects the pattern fill type. Note that calling this method does not by itself set a foreground or background color of the pattern. Rather it enables subsequent assignments to properties like fore_color to set the pattern and colors.
[ "Selects", "the", "pattern", "fill", "type", "." ]
python
train
40
zhanglab/psamm
psamm/commands/chargecheck.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/commands/chargecheck.py#L48-L81
def run(self): """Run charge balance command""" # Load compound information def compound_name(id): if id not in self._model.compounds: return id return self._model.compounds[id].properties.get('name', id) # Create a set of excluded reactions exclude = set(self._args.exclude) count = 0 unbalanced = 0 unchecked = 0 for reaction, charge in charge_balance(self._model): count += 1 if reaction.id in exclude or reaction.equation is None: continue if math.isnan(charge): logger.debug('Not checking reaction {};' ' missing charge'.format(reaction.id)) unchecked += 1 elif abs(charge) > self._args.epsilon: unbalanced += 1 rxt = reaction.equation.translated_compounds(compound_name) print('{}\t{}\t{}'.format(reaction.id, charge, rxt)) logger.info('Unbalanced reactions: {}/{}'.format(unbalanced, count)) logger.info('Unchecked reactions due to missing charge: {}/{}'.format( unchecked, count)) logger.info('Reactions excluded from check: {}/{}'.format( len(exclude), count))
[ "def", "run", "(", "self", ")", ":", "# Load compound information", "def", "compound_name", "(", "id", ")", ":", "if", "id", "not", "in", "self", ".", "_model", ".", "compounds", ":", "return", "id", "return", "self", ".", "_model", ".", "compounds", "[", "id", "]", ".", "properties", ".", "get", "(", "'name'", ",", "id", ")", "# Create a set of excluded reactions", "exclude", "=", "set", "(", "self", ".", "_args", ".", "exclude", ")", "count", "=", "0", "unbalanced", "=", "0", "unchecked", "=", "0", "for", "reaction", ",", "charge", "in", "charge_balance", "(", "self", ".", "_model", ")", ":", "count", "+=", "1", "if", "reaction", ".", "id", "in", "exclude", "or", "reaction", ".", "equation", "is", "None", ":", "continue", "if", "math", ".", "isnan", "(", "charge", ")", ":", "logger", ".", "debug", "(", "'Not checking reaction {};'", "' missing charge'", ".", "format", "(", "reaction", ".", "id", ")", ")", "unchecked", "+=", "1", "elif", "abs", "(", "charge", ")", ">", "self", ".", "_args", ".", "epsilon", ":", "unbalanced", "+=", "1", "rxt", "=", "reaction", ".", "equation", ".", "translated_compounds", "(", "compound_name", ")", "print", "(", "'{}\\t{}\\t{}'", ".", "format", "(", "reaction", ".", "id", ",", "charge", ",", "rxt", ")", ")", "logger", ".", "info", "(", "'Unbalanced reactions: {}/{}'", ".", "format", "(", "unbalanced", ",", "count", ")", ")", "logger", ".", "info", "(", "'Unchecked reactions due to missing charge: {}/{}'", ".", "format", "(", "unchecked", ",", "count", ")", ")", "logger", ".", "info", "(", "'Reactions excluded from check: {}/{}'", ".", "format", "(", "len", "(", "exclude", ")", ",", "count", ")", ")" ]
Run charge balance command
[ "Run", "charge", "balance", "command" ]
python
train
37.470588
ga4gh/ga4gh-server
ga4gh/server/backend.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/backend.py#L338-L349
def variantsGenerator(self, request): """ Returns a generator over the (variant, nextPageToken) pairs defined by the specified request. """ compoundId = datamodel.VariantSetCompoundId \ .parse(request.variant_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) intervalIterator = paging.VariantsIntervalIterator( request, variantSet) return intervalIterator
[ "def", "variantsGenerator", "(", "self", ",", "request", ")", ":", "compoundId", "=", "datamodel", ".", "VariantSetCompoundId", ".", "parse", "(", "request", ".", "variant_set_id", ")", "dataset", "=", "self", ".", "getDataRepository", "(", ")", ".", "getDataset", "(", "compoundId", ".", "dataset_id", ")", "variantSet", "=", "dataset", ".", "getVariantSet", "(", "compoundId", ".", "variant_set_id", ")", "intervalIterator", "=", "paging", ".", "VariantsIntervalIterator", "(", "request", ",", "variantSet", ")", "return", "intervalIterator" ]
Returns a generator over the (variant, nextPageToken) pairs defined by the specified request.
[ "Returns", "a", "generator", "over", "the", "(", "variant", "nextPageToken", ")", "pairs", "defined", "by", "the", "specified", "request", "." ]
python
train
44.083333
mozilla/taar
taar/flask_app.py
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/flask_app.py#L41-L86
def flaskrun(app, default_host="127.0.0.1", default_port="8000"): """ Takes a flask.Flask instance and runs it. Parses command-line flags to configure the app. """ # Set up the command-line options parser = optparse.OptionParser() parser.add_option( "-H", "--host", help="Hostname of the Flask app " + "[default %s]" % default_host, default=default_host, ) parser.add_option( "-P", "--port", help="Port for the Flask app " + "[default %s]" % default_port, default=default_port, ) # Two options useful for debugging purposes, but # a bit dangerous so not exposed in the help message. parser.add_option( "-d", "--debug", action="store_true", dest="debug", help=optparse.SUPPRESS_HELP ) parser.add_option( "-p", "--profile", action="store_true", dest="profile", help=optparse.SUPPRESS_HELP, ) options, _ = parser.parse_args() # If the user selects the profiling option, then we need # to do a little extra setup if options.profile: from werkzeug.contrib.profiler import ProfilerMiddleware app.config["PROFILE"] = True app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30]) options.debug = True app.run(debug=options.debug, host=options.host, port=int(options.port))
[ "def", "flaskrun", "(", "app", ",", "default_host", "=", "\"127.0.0.1\"", ",", "default_port", "=", "\"8000\"", ")", ":", "# Set up the command-line options", "parser", "=", "optparse", ".", "OptionParser", "(", ")", "parser", ".", "add_option", "(", "\"-H\"", ",", "\"--host\"", ",", "help", "=", "\"Hostname of the Flask app \"", "+", "\"[default %s]\"", "%", "default_host", ",", "default", "=", "default_host", ",", ")", "parser", ".", "add_option", "(", "\"-P\"", ",", "\"--port\"", ",", "help", "=", "\"Port for the Flask app \"", "+", "\"[default %s]\"", "%", "default_port", ",", "default", "=", "default_port", ",", ")", "# Two options useful for debugging purposes, but", "# a bit dangerous so not exposed in the help message.", "parser", ".", "add_option", "(", "\"-d\"", ",", "\"--debug\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"debug\"", ",", "help", "=", "optparse", ".", "SUPPRESS_HELP", ")", "parser", ".", "add_option", "(", "\"-p\"", ",", "\"--profile\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"profile\"", ",", "help", "=", "optparse", ".", "SUPPRESS_HELP", ",", ")", "options", ",", "_", "=", "parser", ".", "parse_args", "(", ")", "# If the user selects the profiling option, then we need", "# to do a little extra setup", "if", "options", ".", "profile", ":", "from", "werkzeug", ".", "contrib", ".", "profiler", "import", "ProfilerMiddleware", "app", ".", "config", "[", "\"PROFILE\"", "]", "=", "True", "app", ".", "wsgi_app", "=", "ProfilerMiddleware", "(", "app", ".", "wsgi_app", ",", "restrictions", "=", "[", "30", "]", ")", "options", ".", "debug", "=", "True", "app", ".", "run", "(", "debug", "=", "options", ".", "debug", ",", "host", "=", "options", ".", "host", ",", "port", "=", "int", "(", "options", ".", "port", ")", ")" ]
Takes a flask.Flask instance and runs it. Parses command-line flags to configure the app.
[ "Takes", "a", "flask", ".", "Flask", "instance", "and", "runs", "it", ".", "Parses", "command", "-", "line", "flags", "to", "configure", "the", "app", "." ]
python
train
29.630435
jasonrbriggs/stomp.py
stomp/transport.py
https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/transport.py#L130-L137
def set_connected(self, connected): """ :param bool connected: """ with self.__connect_wait_condition: self.connected = connected if connected: self.__connect_wait_condition.notify()
[ "def", "set_connected", "(", "self", ",", "connected", ")", ":", "with", "self", ".", "__connect_wait_condition", ":", "self", ".", "connected", "=", "connected", "if", "connected", ":", "self", ".", "__connect_wait_condition", ".", "notify", "(", ")" ]
:param bool connected:
[ ":", "param", "bool", "connected", ":" ]
python
train
30.875
phaethon/kamene
kamene/packet.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/packet.py#L1250-L1273
def ls(obj=None): """List available layers, or infos on a given layer""" if obj is None: import builtins all = builtins.__dict__.copy() all.update(globals()) objlst = sorted(conf.layers, key=lambda x:x.__name__) for o in objlst: print("%-10s : %s" %(o.__name__,o.name)) else: if isinstance(obj, type) and issubclass(obj, Packet): for f in obj.fields_desc: print("%-10s : %-20s = (%s)" % (f.name, f.__class__.__name__, repr(f.default))) elif isinstance(obj, Packet): for f in obj.fields_desc: print("%-10s : %-20s = %-15s (%s)" % (f.name, f.__class__.__name__, repr(getattr(obj,f.name)), repr(f.default))) if not isinstance(obj.payload, NoPayload): print("--") ls(obj.payload) else: print("Not a packet class. Type 'ls()' to list packet classes.")
[ "def", "ls", "(", "obj", "=", "None", ")", ":", "if", "obj", "is", "None", ":", "import", "builtins", "all", "=", "builtins", ".", "__dict__", ".", "copy", "(", ")", "all", ".", "update", "(", "globals", "(", ")", ")", "objlst", "=", "sorted", "(", "conf", ".", "layers", ",", "key", "=", "lambda", "x", ":", "x", ".", "__name__", ")", "for", "o", "in", "objlst", ":", "print", "(", "\"%-10s : %s\"", "%", "(", "o", ".", "__name__", ",", "o", ".", "name", ")", ")", "else", ":", "if", "isinstance", "(", "obj", ",", "type", ")", "and", "issubclass", "(", "obj", ",", "Packet", ")", ":", "for", "f", "in", "obj", ".", "fields_desc", ":", "print", "(", "\"%-10s : %-20s = (%s)\"", "%", "(", "f", ".", "name", ",", "f", ".", "__class__", ".", "__name__", ",", "repr", "(", "f", ".", "default", ")", ")", ")", "elif", "isinstance", "(", "obj", ",", "Packet", ")", ":", "for", "f", "in", "obj", ".", "fields_desc", ":", "print", "(", "\"%-10s : %-20s = %-15s (%s)\"", "%", "(", "f", ".", "name", ",", "f", ".", "__class__", ".", "__name__", ",", "repr", "(", "getattr", "(", "obj", ",", "f", ".", "name", ")", ")", ",", "repr", "(", "f", ".", "default", ")", ")", ")", "if", "not", "isinstance", "(", "obj", ".", "payload", ",", "NoPayload", ")", ":", "print", "(", "\"--\"", ")", "ls", "(", "obj", ".", "payload", ")", "else", ":", "print", "(", "\"Not a packet class. Type 'ls()' to list packet classes.\"", ")" ]
List available layers, or infos on a given layer
[ "List", "available", "layers", "or", "infos", "on", "a", "given", "layer" ]
python
train
39.666667
notanumber/xapian-haystack
xapian_backend.py
https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L552-L564
def _build_models_query(self, query): """ Builds a query from `query` that filters to documents only from registered models. """ registered_models_ct = self.build_models_list() if registered_models_ct: restrictions = [xapian.Query('%s%s' % (TERM_PREFIXES[DJANGO_CT], model_ct)) for model_ct in registered_models_ct] limit_query = xapian.Query(xapian.Query.OP_OR, restrictions) query = xapian.Query(xapian.Query.OP_AND, query, limit_query) return query
[ "def", "_build_models_query", "(", "self", ",", "query", ")", ":", "registered_models_ct", "=", "self", ".", "build_models_list", "(", ")", "if", "registered_models_ct", ":", "restrictions", "=", "[", "xapian", ".", "Query", "(", "'%s%s'", "%", "(", "TERM_PREFIXES", "[", "DJANGO_CT", "]", ",", "model_ct", ")", ")", "for", "model_ct", "in", "registered_models_ct", "]", "limit_query", "=", "xapian", ".", "Query", "(", "xapian", ".", "Query", ".", "OP_OR", ",", "restrictions", ")", "query", "=", "xapian", ".", "Query", "(", "xapian", ".", "Query", ".", "OP_AND", ",", "query", ",", "limit_query", ")", "return", "query" ]
Builds a query from `query` that filters to documents only from registered models.
[ "Builds", "a", "query", "from", "query", "that", "filters", "to", "documents", "only", "from", "registered", "models", "." ]
python
train
42.538462
T-002/pycast
pycast/common/matrix.py
https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L154-L172
def _initialize_with_array(self, data, rowBased=True): """Set the matrix values from a two dimensional list.""" if rowBased: self.matrix = [] if len(data) != self._rows: raise ValueError("Size of Matrix does not match") for col in xrange(self._columns): self.matrix.append([]) for row in xrange(self._rows): if len(data[row]) != self._columns: raise ValueError("Size of Matrix does not match") self.matrix[col].append(data[row][col]) else: if len(data) != self._columns: raise ValueError("Size of Matrix does not match") for col in data: if len(col) != self._rows: raise ValueError("Size of Matrix does not match") self.matrix = copy.deepcopy(data)
[ "def", "_initialize_with_array", "(", "self", ",", "data", ",", "rowBased", "=", "True", ")", ":", "if", "rowBased", ":", "self", ".", "matrix", "=", "[", "]", "if", "len", "(", "data", ")", "!=", "self", ".", "_rows", ":", "raise", "ValueError", "(", "\"Size of Matrix does not match\"", ")", "for", "col", "in", "xrange", "(", "self", ".", "_columns", ")", ":", "self", ".", "matrix", ".", "append", "(", "[", "]", ")", "for", "row", "in", "xrange", "(", "self", ".", "_rows", ")", ":", "if", "len", "(", "data", "[", "row", "]", ")", "!=", "self", ".", "_columns", ":", "raise", "ValueError", "(", "\"Size of Matrix does not match\"", ")", "self", ".", "matrix", "[", "col", "]", ".", "append", "(", "data", "[", "row", "]", "[", "col", "]", ")", "else", ":", "if", "len", "(", "data", ")", "!=", "self", ".", "_columns", ":", "raise", "ValueError", "(", "\"Size of Matrix does not match\"", ")", "for", "col", "in", "data", ":", "if", "len", "(", "col", ")", "!=", "self", ".", "_rows", ":", "raise", "ValueError", "(", "\"Size of Matrix does not match\"", ")", "self", ".", "matrix", "=", "copy", ".", "deepcopy", "(", "data", ")" ]
Set the matrix values from a two dimensional list.
[ "Set", "the", "matrix", "values", "from", "a", "two", "dimensional", "list", "." ]
python
train
46.842105
mitsei/dlkit
dlkit/services/assessment.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/assessment.py#L3643-L3650
def save_assessment_part(self, assessment_part_form, *args, **kwargs): """Pass through to provider AssessmentPartAdminSession.update_assessment_part""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.update_resource if assessment_part_form.is_for_update(): return self.update_assessment_part(assessment_part_form, *args, **kwargs) else: return self.create_assessment_part(assessment_part_form, *args, **kwargs)
[ "def", "save_assessment_part", "(", "self", ",", "assessment_part_form", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Implemented from kitosid template for -", "# osid.resource.ResourceAdminSession.update_resource", "if", "assessment_part_form", ".", "is_for_update", "(", ")", ":", "return", "self", ".", "update_assessment_part", "(", "assessment_part_form", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "self", ".", "create_assessment_part", "(", "assessment_part_form", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Pass through to provider AssessmentPartAdminSession.update_assessment_part
[ "Pass", "through", "to", "provider", "AssessmentPartAdminSession", ".", "update_assessment_part" ]
python
train
62.25
enkore/i3pystatus
i3pystatus/sabnzbd.py
https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/sabnzbd.py#L52-L91
def run(self): """Connect to SABnzbd and get the data.""" try: answer = urlopen(self.url + "&mode=queue").read().decode() except (HTTPError, URLError) as error: self.output = { "full_text": str(error.reason), "color": "#FF0000" } return answer = json.loads(answer) # if answer["status"] exists and is False, an error occured if not answer.get("status", True): self.output = { "full_text": answer["error"], "color": "#FF0000" } return queue = answer["queue"] self.status = queue["status"] if self.is_paused(): color = self.color_paused elif self.is_downloading(): color = self.color_downloading else: color = self.color if self.is_downloading(): full_text = self.format.format(**queue) else: full_text = self.format_paused.format(**queue) self.output = { "full_text": full_text, "color": color }
[ "def", "run", "(", "self", ")", ":", "try", ":", "answer", "=", "urlopen", "(", "self", ".", "url", "+", "\"&mode=queue\"", ")", ".", "read", "(", ")", ".", "decode", "(", ")", "except", "(", "HTTPError", ",", "URLError", ")", "as", "error", ":", "self", ".", "output", "=", "{", "\"full_text\"", ":", "str", "(", "error", ".", "reason", ")", ",", "\"color\"", ":", "\"#FF0000\"", "}", "return", "answer", "=", "json", ".", "loads", "(", "answer", ")", "# if answer[\"status\"] exists and is False, an error occured", "if", "not", "answer", ".", "get", "(", "\"status\"", ",", "True", ")", ":", "self", ".", "output", "=", "{", "\"full_text\"", ":", "answer", "[", "\"error\"", "]", ",", "\"color\"", ":", "\"#FF0000\"", "}", "return", "queue", "=", "answer", "[", "\"queue\"", "]", "self", ".", "status", "=", "queue", "[", "\"status\"", "]", "if", "self", ".", "is_paused", "(", ")", ":", "color", "=", "self", ".", "color_paused", "elif", "self", ".", "is_downloading", "(", ")", ":", "color", "=", "self", ".", "color_downloading", "else", ":", "color", "=", "self", ".", "color", "if", "self", ".", "is_downloading", "(", ")", ":", "full_text", "=", "self", ".", "format", ".", "format", "(", "*", "*", "queue", ")", "else", ":", "full_text", "=", "self", ".", "format_paused", ".", "format", "(", "*", "*", "queue", ")", "self", ".", "output", "=", "{", "\"full_text\"", ":", "full_text", ",", "\"color\"", ":", "color", "}" ]
Connect to SABnzbd and get the data.
[ "Connect", "to", "SABnzbd", "and", "get", "the", "data", "." ]
python
train
27.825
CEA-COSMIC/ModOpt
modopt/opt/linear.py
https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/opt/linear.py#L235-L256
def _op_method(self, data): """Operator This method returns the input data operated on by all of the operators Parameters ---------- data : np.ndarray Input data array Returns ------- np.ndarray linear operation results """ res = np.empty(len(self.operators), dtype=np.ndarray) for i in range(len(self.operators)): res[i] = self.operators[i].op(data) return res
[ "def", "_op_method", "(", "self", ",", "data", ")", ":", "res", "=", "np", ".", "empty", "(", "len", "(", "self", ".", "operators", ")", ",", "dtype", "=", "np", ".", "ndarray", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "operators", ")", ")", ":", "res", "[", "i", "]", "=", "self", ".", "operators", "[", "i", "]", ".", "op", "(", "data", ")", "return", "res" ]
Operator This method returns the input data operated on by all of the operators Parameters ---------- data : np.ndarray Input data array Returns ------- np.ndarray linear operation results
[ "Operator" ]
python
train
21.227273
apple/turicreate
src/unity/python/turicreate/data_structures/sframe.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L2268-L2278
def _row_selector(self, other): """ Where other is an SArray of identical length as the current Frame, this returns a selection of a subset of rows in the current SFrame where the corresponding row in the selector is non-zero. """ if type(other) is SArray: if self.__has_size__() and other.__has_size__() and len(other) != len(self): raise IndexError("Cannot perform logical indexing on arrays of different length.") with cython_context(): return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__))
[ "def", "_row_selector", "(", "self", ",", "other", ")", ":", "if", "type", "(", "other", ")", "is", "SArray", ":", "if", "self", ".", "__has_size__", "(", ")", "and", "other", ".", "__has_size__", "(", ")", "and", "len", "(", "other", ")", "!=", "len", "(", "self", ")", ":", "raise", "IndexError", "(", "\"Cannot perform logical indexing on arrays of different length.\"", ")", "with", "cython_context", "(", ")", ":", "return", "SFrame", "(", "_proxy", "=", "self", ".", "__proxy__", ".", "logical_filter", "(", "other", ".", "__proxy__", ")", ")" ]
Where other is an SArray of identical length as the current Frame, this returns a selection of a subset of rows in the current SFrame where the corresponding row in the selector is non-zero.
[ "Where", "other", "is", "an", "SArray", "of", "identical", "length", "as", "the", "current", "Frame", "this", "returns", "a", "selection", "of", "a", "subset", "of", "rows", "in", "the", "current", "SFrame", "where", "the", "corresponding", "row", "in", "the", "selector", "is", "non", "-", "zero", "." ]
python
train
54.727273
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L749-L788
def submit_response(self, assessment_section_id, item_id, answer_form): """Submits an answer to an item. arg: assessment_section_id (osid.id.Id): ``Id`` of the ``AssessmentSection`` arg: item_id (osid.id.Id): ``Id`` of the ``Item`` arg: answer_form (osid.assessment.AnswerForm): the response raise: IllegalState - ``has_assessment_section_begun()`` is ``false or is_assessment_section_over()`` is ``true`` raise: InvalidArgument - one or more of the elements in the form is invalid raise: NotFound - ``assessment_section_id`` or ``item_id`` is not found, or ``item_id`` not part of ``assessment_section_id`` raise: NullArgument - ``assessment_section_id, item_id,`` or ``answer_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``answer_form`` is not of this service *compliance: mandatory -- This method must be implemented.* """ if not isinstance(answer_form, ABCAnswerForm): raise errors.InvalidArgument('argument type is not an AnswerForm') # OK, so the following should actually NEVER be true. Remove it? if answer_form.is_for_update(): raise errors.InvalidArgument('the AnswerForm is for update only, not submit') # try: if self._forms[answer_form.get_id().get_identifier()] == SUBMITTED: raise errors.IllegalState('answer_form already used in a submit transaction') except KeyError: raise errors.Unsupported('answer_form did not originate from this assessment session') if not answer_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') answer_form._my_map['_id'] = ObjectId() self.get_assessment_section(assessment_section_id).submit_response(item_id, answer_form) self._forms[answer_form.get_id().get_identifier()] = SUBMITTED
[ "def", "submit_response", "(", "self", ",", "assessment_section_id", ",", "item_id", ",", "answer_form", ")", ":", "if", "not", "isinstance", "(", "answer_form", ",", "ABCAnswerForm", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'argument type is not an AnswerForm'", ")", "# OK, so the following should actually NEVER be true. Remove it?", "if", "answer_form", ".", "is_for_update", "(", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'the AnswerForm is for update only, not submit'", ")", "#", "try", ":", "if", "self", ".", "_forms", "[", "answer_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "==", "SUBMITTED", ":", "raise", "errors", ".", "IllegalState", "(", "'answer_form already used in a submit transaction'", ")", "except", "KeyError", ":", "raise", "errors", ".", "Unsupported", "(", "'answer_form did not originate from this assessment session'", ")", "if", "not", "answer_form", ".", "is_valid", "(", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'one or more of the form elements is invalid'", ")", "answer_form", ".", "_my_map", "[", "'_id'", "]", "=", "ObjectId", "(", ")", "self", ".", "get_assessment_section", "(", "assessment_section_id", ")", ".", "submit_response", "(", "item_id", ",", "answer_form", ")", "self", ".", "_forms", "[", "answer_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "SUBMITTED" ]
Submits an answer to an item. arg: assessment_section_id (osid.id.Id): ``Id`` of the ``AssessmentSection`` arg: item_id (osid.id.Id): ``Id`` of the ``Item`` arg: answer_form (osid.assessment.AnswerForm): the response raise: IllegalState - ``has_assessment_section_begun()`` is ``false or is_assessment_section_over()`` is ``true`` raise: InvalidArgument - one or more of the elements in the form is invalid raise: NotFound - ``assessment_section_id`` or ``item_id`` is not found, or ``item_id`` not part of ``assessment_section_id`` raise: NullArgument - ``assessment_section_id, item_id,`` or ``answer_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``answer_form`` is not of this service *compliance: mandatory -- This method must be implemented.*
[ "Submits", "an", "answer", "to", "an", "item", "." ]
python
train
52.55
smarie/python-valid8
valid8/base.py
https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/base.py#L515-L539
def _none_rejecter(validation_callable # type: Callable ): # type: (...) -> Callable """ Wraps the given validation callable to reject None values. When a None value is received by the wrapper, it is not passed to the validation_callable and instead this function will raise a WrappingFailure. When any other value is received the validation_callable is called as usual. :param validation_callable: :return: """ # option (a) use the `decorate()` helper method to preserve name and signature of the inner object # ==> NO, we want to support also non-function callable objects # option (b) simply create a wrapper manually def reject_none(x): if x is not None: return validation_callable(x) else: raise ValueIsNone(wrong_value=x) # set a name so that the error messages are more user-friendly ==> NO ! here we want to see the checker reject_none.__name__ = 'reject_none({})'.format(get_callable_name(validation_callable)) return reject_none
[ "def", "_none_rejecter", "(", "validation_callable", "# type: Callable", ")", ":", "# type: (...) -> Callable", "# option (a) use the `decorate()` helper method to preserve name and signature of the inner object", "# ==> NO, we want to support also non-function callable objects", "# option (b) simply create a wrapper manually", "def", "reject_none", "(", "x", ")", ":", "if", "x", "is", "not", "None", ":", "return", "validation_callable", "(", "x", ")", "else", ":", "raise", "ValueIsNone", "(", "wrong_value", "=", "x", ")", "# set a name so that the error messages are more user-friendly ==> NO ! here we want to see the checker", "reject_none", ".", "__name__", "=", "'reject_none({})'", ".", "format", "(", "get_callable_name", "(", "validation_callable", ")", ")", "return", "reject_none" ]
Wraps the given validation callable to reject None values. When a None value is received by the wrapper, it is not passed to the validation_callable and instead this function will raise a WrappingFailure. When any other value is received the validation_callable is called as usual. :param validation_callable: :return:
[ "Wraps", "the", "given", "validation", "callable", "to", "reject", "None", "values", ".", "When", "a", "None", "value", "is", "received", "by", "the", "wrapper", "it", "is", "not", "passed", "to", "the", "validation_callable", "and", "instead", "this", "function", "will", "raise", "a", "WrappingFailure", ".", "When", "any", "other", "value", "is", "received", "the", "validation_callable", "is", "called", "as", "usual", "." ]
python
train
41.52
niemasd/TreeSwift
treeswift/Node.py
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Node.py#L238-L261
def traverse_bfs(self): '''Perform a Breadth-First Search (BFS) starting at this ``Node`` object'. Yields (``Node``, distance) tuples Args: ``include_self`` (``bool``): ``True`` to include self in the traversal, otherwise ``False`` ''' if not isinstance(include_self, bool): raise TypeError("include_self must be a bool") q = deque(); dist = dict(); dist[self] = 0; q.append((self,0)) while len(q) != 0: curr = q.popleft(); yield curr for c in curr[0].children: if c not in dist: if c.edge_length is None: el = 0 else: el = c.edge_length dist[c] = dist[curr[0]] + el; q.append((c,dist[c])) if curr[0].parent is not None and curr[0].parent not in dist: if curr[0].edge_length is None: el = 0 else: el = curr[0].edge_length dist[curr[0].parent] = dist[curr[0]] + el; q.append((curr[0].parent,dist[curr[0].parent]))
[ "def", "traverse_bfs", "(", "self", ")", ":", "if", "not", "isinstance", "(", "include_self", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"include_self must be a bool\"", ")", "q", "=", "deque", "(", ")", "dist", "=", "dict", "(", ")", "dist", "[", "self", "]", "=", "0", "q", ".", "append", "(", "(", "self", ",", "0", ")", ")", "while", "len", "(", "q", ")", "!=", "0", ":", "curr", "=", "q", ".", "popleft", "(", ")", "yield", "curr", "for", "c", "in", "curr", "[", "0", "]", ".", "children", ":", "if", "c", "not", "in", "dist", ":", "if", "c", ".", "edge_length", "is", "None", ":", "el", "=", "0", "else", ":", "el", "=", "c", ".", "edge_length", "dist", "[", "c", "]", "=", "dist", "[", "curr", "[", "0", "]", "]", "+", "el", "q", ".", "append", "(", "(", "c", ",", "dist", "[", "c", "]", ")", ")", "if", "curr", "[", "0", "]", ".", "parent", "is", "not", "None", "and", "curr", "[", "0", "]", ".", "parent", "not", "in", "dist", ":", "if", "curr", "[", "0", "]", ".", "edge_length", "is", "None", ":", "el", "=", "0", "else", ":", "el", "=", "curr", "[", "0", "]", ".", "edge_length", "dist", "[", "curr", "[", "0", "]", ".", "parent", "]", "=", "dist", "[", "curr", "[", "0", "]", "]", "+", "el", "q", ".", "append", "(", "(", "curr", "[", "0", "]", ".", "parent", ",", "dist", "[", "curr", "[", "0", "]", ".", "parent", "]", ")", ")" ]
Perform a Breadth-First Search (BFS) starting at this ``Node`` object'. Yields (``Node``, distance) tuples Args: ``include_self`` (``bool``): ``True`` to include self in the traversal, otherwise ``False``
[ "Perform", "a", "Breadth", "-", "First", "Search", "(", "BFS", ")", "starting", "at", "this", "Node", "object", ".", "Yields", "(", "Node", "distance", ")", "tuples", "Args", ":", "include_self", "(", "bool", ")", ":", "True", "to", "include", "self", "in", "the", "traversal", "otherwise", "False" ]
python
train
46.583333
saltstack/salt
salt/modules/daemontools.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/daemontools.py#L157-L173
def status(name, sig=None): ''' Return the status for a service via daemontools, return pid if running CLI Example: .. code-block:: bash salt '*' daemontools.status <service name> ''' cmd = 'svstat {0}'.format(_service_path(name)) out = __salt__['cmd.run_stdout'](cmd, python_shell=False) try: pid = re.search(r'\(pid (\d+)\)', out).group(1) except AttributeError: pid = '' return pid
[ "def", "status", "(", "name", ",", "sig", "=", "None", ")", ":", "cmd", "=", "'svstat {0}'", ".", "format", "(", "_service_path", "(", "name", ")", ")", "out", "=", "__salt__", "[", "'cmd.run_stdout'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "try", ":", "pid", "=", "re", ".", "search", "(", "r'\\(pid (\\d+)\\)'", ",", "out", ")", ".", "group", "(", "1", ")", "except", "AttributeError", ":", "pid", "=", "''", "return", "pid" ]
Return the status for a service via daemontools, return pid if running CLI Example: .. code-block:: bash salt '*' daemontools.status <service name>
[ "Return", "the", "status", "for", "a", "service", "via", "daemontools", "return", "pid", "if", "running" ]
python
train
25.588235
lisael/fastidious
fastidious/parser_base.py
https://github.com/lisael/fastidious/blob/2542db9de779ddabc3a64e9eb19a4e2de99741dc/fastidious/parser_base.py#L163-L173
def p_startswith(self, st, ignorecase=False): "Return True if the input starts with `st` at current position" length = len(st) matcher = result = self.input[self.pos:self.pos + length] if ignorecase: matcher = result.lower() st = st.lower() if matcher == st: self.pos += length return result return False
[ "def", "p_startswith", "(", "self", ",", "st", ",", "ignorecase", "=", "False", ")", ":", "length", "=", "len", "(", "st", ")", "matcher", "=", "result", "=", "self", ".", "input", "[", "self", ".", "pos", ":", "self", ".", "pos", "+", "length", "]", "if", "ignorecase", ":", "matcher", "=", "result", ".", "lower", "(", ")", "st", "=", "st", ".", "lower", "(", ")", "if", "matcher", "==", "st", ":", "self", ".", "pos", "+=", "length", "return", "result", "return", "False" ]
Return True if the input starts with `st` at current position
[ "Return", "True", "if", "the", "input", "starts", "with", "st", "at", "current", "position" ]
python
train
35.454545
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/internal/formatting/__init__.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/internal/formatting/__init__.py#L32-L44
def _non_framed_body_length(header, plaintext_length): """Calculates the length of a non-framed message body, given a complete header. :param header: Complete message header object :type header: aws_encryption_sdk.structures.MessageHeader :param int plaintext_length: Length of plaintext in bytes :rtype: int """ body_length = header.algorithm.iv_len # IV body_length += 8 # Encrypted Content Length body_length += plaintext_length # Encrypted Content body_length += header.algorithm.auth_len # Authentication Tag return body_length
[ "def", "_non_framed_body_length", "(", "header", ",", "plaintext_length", ")", ":", "body_length", "=", "header", ".", "algorithm", ".", "iv_len", "# IV", "body_length", "+=", "8", "# Encrypted Content Length", "body_length", "+=", "plaintext_length", "# Encrypted Content", "body_length", "+=", "header", ".", "algorithm", ".", "auth_len", "# Authentication Tag", "return", "body_length" ]
Calculates the length of a non-framed message body, given a complete header. :param header: Complete message header object :type header: aws_encryption_sdk.structures.MessageHeader :param int plaintext_length: Length of plaintext in bytes :rtype: int
[ "Calculates", "the", "length", "of", "a", "non", "-", "framed", "message", "body", "given", "a", "complete", "header", "." ]
python
train
43.769231
googledatalab/pydatalab
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L550-L592
def predict(data, training_dir=None, model_name=None, model_version=None, cloud=False): """Runs prediction locally or on the cloud. Args: data: List of csv strings or a Pandas DataFrame that match the model schema. training_dir: local path to the trained output folder. model_name: deployed model name model_version: depoyed model version cloud: bool. If False, does local prediction and data and training_dir must be set. If True, does cloud prediction and data, model_name, and model_version must be set. For cloud prediction, the model must be created. This can be done by running two gcloud commands:: 1) gcloud beta ml models create NAME 2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model or these datalab commands: 1) import google.datalab as datalab model = datalab.ml.ModelVersions(MODEL_NAME) model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model') Note that the model must be on GCS. Returns: Pandas DataFrame. """ if cloud: if not model_version or not model_name: raise ValueError('model_version or model_name is not set') if training_dir: raise ValueError('training_dir not needed when cloud is True') with warnings.catch_warnings(): warnings.simplefilter("ignore") return cloud_predict(model_name, model_version, data) else: if not training_dir: raise ValueError('training_dir is not set') if model_version or model_name: raise ValueError('model_name and model_version not needed when cloud is ' 'False.') with warnings.catch_warnings(): warnings.simplefilter("ignore") return local_predict(training_dir, data)
[ "def", "predict", "(", "data", ",", "training_dir", "=", "None", ",", "model_name", "=", "None", ",", "model_version", "=", "None", ",", "cloud", "=", "False", ")", ":", "if", "cloud", ":", "if", "not", "model_version", "or", "not", "model_name", ":", "raise", "ValueError", "(", "'model_version or model_name is not set'", ")", "if", "training_dir", ":", "raise", "ValueError", "(", "'training_dir not needed when cloud is True'", ")", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "return", "cloud_predict", "(", "model_name", ",", "model_version", ",", "data", ")", "else", ":", "if", "not", "training_dir", ":", "raise", "ValueError", "(", "'training_dir is not set'", ")", "if", "model_version", "or", "model_name", ":", "raise", "ValueError", "(", "'model_name and model_version not needed when cloud is '", "'False.'", ")", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "return", "local_predict", "(", "training_dir", ",", "data", ")" ]
Runs prediction locally or on the cloud. Args: data: List of csv strings or a Pandas DataFrame that match the model schema. training_dir: local path to the trained output folder. model_name: deployed model name model_version: depoyed model version cloud: bool. If False, does local prediction and data and training_dir must be set. If True, does cloud prediction and data, model_name, and model_version must be set. For cloud prediction, the model must be created. This can be done by running two gcloud commands:: 1) gcloud beta ml models create NAME 2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model or these datalab commands: 1) import google.datalab as datalab model = datalab.ml.ModelVersions(MODEL_NAME) model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model') Note that the model must be on GCS. Returns: Pandas DataFrame.
[ "Runs", "prediction", "locally", "or", "on", "the", "cloud", "." ]
python
train
40.139535
OpenAgInitiative/openag_python
openag/cli/firmware/__init__.py
https://github.com/OpenAgInitiative/openag_python/blob/f6202340292bbf7185e1a7d4290188c0dacbb8d0/openag/cli/firmware/__init__.py#L386-L413
def load_plugin(plugin_name): """ Given a plugin name, load plugin cls from plugin directory. Will throw an exception if no plugin can be found. """ plugin_cls = plugin_map.get(plugin_name, None) if not plugin_cls: try: plugin_module_name, plugin_cls_name = plugin_name.split(":") plugin_module = import_module(plugin_module_name) plugin_cls = getattr(plugin_module, plugin_cls_name) except ValueError: raise click.ClickException( '"{}" is not a valid plugin path'.format(plugin_name) ) except ImportError: raise click.ClickException( '"{}" does not name a Python module'.format( plugin_module_name ) ) except AttributeError: raise click.ClickException( 'Module "{}" does not contain the class "{}"'.format( plugin_module_name, plugin_cls_name ) ) return plugin_cls
[ "def", "load_plugin", "(", "plugin_name", ")", ":", "plugin_cls", "=", "plugin_map", ".", "get", "(", "plugin_name", ",", "None", ")", "if", "not", "plugin_cls", ":", "try", ":", "plugin_module_name", ",", "plugin_cls_name", "=", "plugin_name", ".", "split", "(", "\":\"", ")", "plugin_module", "=", "import_module", "(", "plugin_module_name", ")", "plugin_cls", "=", "getattr", "(", "plugin_module", ",", "plugin_cls_name", ")", "except", "ValueError", ":", "raise", "click", ".", "ClickException", "(", "'\"{}\" is not a valid plugin path'", ".", "format", "(", "plugin_name", ")", ")", "except", "ImportError", ":", "raise", "click", ".", "ClickException", "(", "'\"{}\" does not name a Python module'", ".", "format", "(", "plugin_module_name", ")", ")", "except", "AttributeError", ":", "raise", "click", ".", "ClickException", "(", "'Module \"{}\" does not contain the class \"{}\"'", ".", "format", "(", "plugin_module_name", ",", "plugin_cls_name", ")", ")", "return", "plugin_cls" ]
Given a plugin name, load plugin cls from plugin directory. Will throw an exception if no plugin can be found.
[ "Given", "a", "plugin", "name", "load", "plugin", "cls", "from", "plugin", "directory", ".", "Will", "throw", "an", "exception", "if", "no", "plugin", "can", "be", "found", "." ]
python
train
36.642857
NyashniyVladya/MarkovTextGenerator
MarkovTextGenerator/markov_text_generator.py
https://github.com/NyashniyVladya/MarkovTextGenerator/blob/3d90e02a507939709773ef01c7ff3ec68b2b8d4b/MarkovTextGenerator/markov_text_generator.py#L185-L211
def get_start_array(self, *start_words, **kwargs): """ Генерирует начало предложения. :start_words: Попытаться начать предложение с этих слов. """ if not self.start_arrays: raise MarkovTextExcept("Не с чего начинать генерацию.") if not start_words: return choice(self.start_arrays) _variants = [] _weights = [] for tokens in self.start_arrays: weight = 0b1 for word in start_words: word = word.strip().lower() for token in self.ONLY_WORDS.finditer(word): if token.group() in tokens: weight <<= 1 if weight > 0b1: _variants.append(tokens) _weights.append(weight) if not _variants: return choice(self.start_arrays) return choices(_variants, weights=_weights, k=1)[0]
[ "def", "get_start_array", "(", "self", ",", "*", "start_words", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "start_arrays", ":", "raise", "MarkovTextExcept", "(", "\"Не с чего начинать генерацию.\")", "", "if", "not", "start_words", ":", "return", "choice", "(", "self", ".", "start_arrays", ")", "_variants", "=", "[", "]", "_weights", "=", "[", "]", "for", "tokens", "in", "self", ".", "start_arrays", ":", "weight", "=", "0b1", "for", "word", "in", "start_words", ":", "word", "=", "word", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "token", "in", "self", ".", "ONLY_WORDS", ".", "finditer", "(", "word", ")", ":", "if", "token", ".", "group", "(", ")", "in", "tokens", ":", "weight", "<<=", "1", "if", "weight", ">", "0b1", ":", "_variants", ".", "append", "(", "tokens", ")", "_weights", ".", "append", "(", "weight", ")", "if", "not", "_variants", ":", "return", "choice", "(", "self", ".", "start_arrays", ")", "return", "choices", "(", "_variants", ",", "weights", "=", "_weights", ",", "k", "=", "1", ")", "[", "0", "]" ]
Генерирует начало предложения. :start_words: Попытаться начать предложение с этих слов.
[ "Генерирует", "начало", "предложения", ".", ":", "start_words", ":", "Попытаться", "начать", "предложение", "с", "этих", "слов", "." ]
python
valid
33.666667
pymc-devs/pymc
pymc/Model.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L221-L246
def sample(self, iter, length=None, verbose=0): """ Draws iter samples from the posterior. """ self._cur_trace_index = 0 self.max_trace_length = iter self._iter = iter self.verbose = verbose or 0 self.seed() # Assign Trace instances to tallyable objects. self.db.connect_model(self) # Initialize database -> initialize traces. if length is None: length = iter self.db._initialize(self._funs_to_tally, length) # Put traces on objects for v in self._variables_to_tally: v.trace = self.db._traces[v.__name__] # Loop self._current_iter = 0 self._loop() self._finalize()
[ "def", "sample", "(", "self", ",", "iter", ",", "length", "=", "None", ",", "verbose", "=", "0", ")", ":", "self", ".", "_cur_trace_index", "=", "0", "self", ".", "max_trace_length", "=", "iter", "self", ".", "_iter", "=", "iter", "self", ".", "verbose", "=", "verbose", "or", "0", "self", ".", "seed", "(", ")", "# Assign Trace instances to tallyable objects.", "self", ".", "db", ".", "connect_model", "(", "self", ")", "# Initialize database -> initialize traces.", "if", "length", "is", "None", ":", "length", "=", "iter", "self", ".", "db", ".", "_initialize", "(", "self", ".", "_funs_to_tally", ",", "length", ")", "# Put traces on objects", "for", "v", "in", "self", ".", "_variables_to_tally", ":", "v", ".", "trace", "=", "self", ".", "db", ".", "_traces", "[", "v", ".", "__name__", "]", "# Loop", "self", ".", "_current_iter", "=", "0", "self", ".", "_loop", "(", ")", "self", ".", "_finalize", "(", ")" ]
Draws iter samples from the posterior.
[ "Draws", "iter", "samples", "from", "the", "posterior", "." ]
python
train
27.692308
alimanfoo/vcfnp
vcfnp/array.py
https://github.com/alimanfoo/vcfnp/blob/c3f63fb11ada56d4a88076c61c81f99b8ee78b8f/vcfnp/array.py#L203-L216
def _filenames_from_arg(filename): """Utility function to deal with polymorphic filenames argument.""" if isinstance(filename, string_types): filenames = [filename] elif isinstance(filename, (list, tuple)): filenames = filename else: raise Exception('filename argument must be string, list or tuple') for fn in filenames: if not os.path.exists(fn): raise ValueError('file not found: %s' % fn) if not os.path.isfile(fn): raise ValueError('not a file: %s' % fn) return filenames
[ "def", "_filenames_from_arg", "(", "filename", ")", ":", "if", "isinstance", "(", "filename", ",", "string_types", ")", ":", "filenames", "=", "[", "filename", "]", "elif", "isinstance", "(", "filename", ",", "(", "list", ",", "tuple", ")", ")", ":", "filenames", "=", "filename", "else", ":", "raise", "Exception", "(", "'filename argument must be string, list or tuple'", ")", "for", "fn", "in", "filenames", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "fn", ")", ":", "raise", "ValueError", "(", "'file not found: %s'", "%", "fn", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "fn", ")", ":", "raise", "ValueError", "(", "'not a file: %s'", "%", "fn", ")", "return", "filenames" ]
Utility function to deal with polymorphic filenames argument.
[ "Utility", "function", "to", "deal", "with", "polymorphic", "filenames", "argument", "." ]
python
train
39.357143
dmwm/DBS
Server/Python/src/dbs/web/DBSWriterModel.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSWriterModel.py#L73-L98
def insertPrimaryDataset(self): """ API to insert A primary dataset in DBS :param primaryDSObj: primary dataset object :type primaryDSObj: dict :key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required) :key primary_ds_name: Name of the primary dataset (Required) """ try : body = request.body.read() indata = cjson.decode(body) indata = validateJSONInputNoCopy("primds", indata) indata.update({"creation_date": dbsUtils().getTime(), "create_by": dbsUtils().getCreateBy() }) self.dbsPrimaryDataset.insertPrimaryDataset(indata) except cjson.DecodeError as dc: dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert PrimaryDataset input", self.logger.exception, str(dc)) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except HTTPError as he: raise he except Exception as ex: sError = "DBSWriterModel/insertPrimaryDataset. %s\n Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
[ "def", "insertPrimaryDataset", "(", "self", ")", ":", "try", ":", "body", "=", "request", ".", "body", ".", "read", "(", ")", "indata", "=", "cjson", ".", "decode", "(", "body", ")", "indata", "=", "validateJSONInputNoCopy", "(", "\"primds\"", ",", "indata", ")", "indata", ".", "update", "(", "{", "\"creation_date\"", ":", "dbsUtils", "(", ")", ".", "getTime", "(", ")", ",", "\"create_by\"", ":", "dbsUtils", "(", ")", ".", "getCreateBy", "(", ")", "}", ")", "self", ".", "dbsPrimaryDataset", ".", "insertPrimaryDataset", "(", "indata", ")", "except", "cjson", ".", "DecodeError", "as", "dc", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"Wrong format/data from insert PrimaryDataset input\"", ",", "self", ".", "logger", ".", "exception", ",", "str", "(", "dc", ")", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "message", ")", "except", "HTTPError", "as", "he", ":", "raise", "he", "except", "Exception", "as", "ex", ":", "sError", "=", "\"DBSWriterModel/insertPrimaryDataset. %s\\n Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")" ]
API to insert A primary dataset in DBS :param primaryDSObj: primary dataset object :type primaryDSObj: dict :key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required) :key primary_ds_name: Name of the primary dataset (Required)
[ "API", "to", "insert", "A", "primary", "dataset", "in", "DBS" ]
python
train
51.076923
PySimpleGUI/PySimpleGUI
PySimpleGUIWeb/Demo Programs/widgets_overview_app.py
https://github.com/PySimpleGUI/PySimpleGUI/blob/08184197f5bd4580ab5e5aca28bdda30f87b86fc/PySimpleGUIWeb/Demo Programs/widgets_overview_app.py#L281-L285
def list_view_on_selected(self, widget, selected_item_key): """ The selection event of the listView, returns a key of the clicked event. You can retrieve the item rapidly """ self.lbl.set_text('List selection: ' + self.listView.children[selected_item_key].get_text())
[ "def", "list_view_on_selected", "(", "self", ",", "widget", ",", "selected_item_key", ")", ":", "self", ".", "lbl", ".", "set_text", "(", "'List selection: '", "+", "self", ".", "listView", ".", "children", "[", "selected_item_key", "]", ".", "get_text", "(", ")", ")" ]
The selection event of the listView, returns a key of the clicked event. You can retrieve the item rapidly
[ "The", "selection", "event", "of", "the", "listView", "returns", "a", "key", "of", "the", "clicked", "event", ".", "You", "can", "retrieve", "the", "item", "rapidly" ]
python
train
59.8
dlecocq/nsq-py
nsq/backoff.py
https://github.com/dlecocq/nsq-py/blob/3ecacf6ab7719d38031179277113d875554a0c16/nsq/backoff.py#L83-L88
def ready(self): '''Whether or not enough time has passed since the last failure''' if self._last_failed: delta = time.time() - self._last_failed return delta >= self.backoff() return True
[ "def", "ready", "(", "self", ")", ":", "if", "self", ".", "_last_failed", ":", "delta", "=", "time", ".", "time", "(", ")", "-", "self", ".", "_last_failed", "return", "delta", ">=", "self", ".", "backoff", "(", ")", "return", "True" ]
Whether or not enough time has passed since the last failure
[ "Whether", "or", "not", "enough", "time", "has", "passed", "since", "the", "last", "failure" ]
python
train
38.5
hozn/stravalib
stravalib/client.py
https://github.com/hozn/stravalib/blob/5500ebc39e0bf4706bb1ca4c27b25e56becaaa5f/stravalib/client.py#L1479-L1522
def create_subscription(self, client_id, client_secret, callback_url, object_type=model.Subscription.OBJECT_TYPE_ACTIVITY, aspect_type=model.Subscription.ASPECT_TYPE_CREATE, verify_token=model.Subscription.VERIFY_TOKEN_DEFAULT): """ Creates a webhook event subscription. http://strava.github.io/api/partner/v3/events/#create-a-subscription :param client_id: application's ID, obtained during registration :type client_id: int :param client_secret: application's secret, obtained during registration :type client_secret: str :param callback_url: callback URL where Strava will first send a GET request to validate, then subsequently send POST requests with updates :type callback_url: str :param object_type: object_type (currently only `activity` is supported) :type object_type: str :param aspect_type: object_type (currently only `create` is supported) :type aspect_type: str :param verify_token: a token you can use to verify Strava's GET callback request :type verify_token: str :return: An instance of :class:`stravalib.model.Subscription`. :rtype: :class:`stravalib.model.Subscription` Notes: `object_type` and `aspect_type` are given defaults because there is currently only one valid value for each. `verify_token` is set to a default in the event that the author doesn't want to specify one. The appliction must have permission to make use of the webhook API. Access can be requested by contacting developers -at- strava.com. """ params = dict(client_id=client_id, client_secret=client_secret, object_type=object_type, aspect_type=aspect_type, callback_url=callback_url, verify_token=verify_token) raw = self.protocol.post('/push_subscriptions', use_webhook_server=True, **params) return model.Subscription.deserialize(raw, bind_client=self)
[ "def", "create_subscription", "(", "self", ",", "client_id", ",", "client_secret", ",", "callback_url", ",", "object_type", "=", "model", ".", "Subscription", ".", "OBJECT_TYPE_ACTIVITY", ",", "aspect_type", "=", "model", ".", "Subscription", ".", "ASPECT_TYPE_CREATE", ",", "verify_token", "=", "model", ".", "Subscription", ".", "VERIFY_TOKEN_DEFAULT", ")", ":", "params", "=", "dict", "(", "client_id", "=", "client_id", ",", "client_secret", "=", "client_secret", ",", "object_type", "=", "object_type", ",", "aspect_type", "=", "aspect_type", ",", "callback_url", "=", "callback_url", ",", "verify_token", "=", "verify_token", ")", "raw", "=", "self", ".", "protocol", ".", "post", "(", "'/push_subscriptions'", ",", "use_webhook_server", "=", "True", ",", "*", "*", "params", ")", "return", "model", ".", "Subscription", ".", "deserialize", "(", "raw", ",", "bind_client", "=", "self", ")" ]
Creates a webhook event subscription. http://strava.github.io/api/partner/v3/events/#create-a-subscription :param client_id: application's ID, obtained during registration :type client_id: int :param client_secret: application's secret, obtained during registration :type client_secret: str :param callback_url: callback URL where Strava will first send a GET request to validate, then subsequently send POST requests with updates :type callback_url: str :param object_type: object_type (currently only `activity` is supported) :type object_type: str :param aspect_type: object_type (currently only `create` is supported) :type aspect_type: str :param verify_token: a token you can use to verify Strava's GET callback request :type verify_token: str :return: An instance of :class:`stravalib.model.Subscription`. :rtype: :class:`stravalib.model.Subscription` Notes: `object_type` and `aspect_type` are given defaults because there is currently only one valid value for each. `verify_token` is set to a default in the event that the author doesn't want to specify one. The appliction must have permission to make use of the webhook API. Access can be requested by contacting developers -at- strava.com.
[ "Creates", "a", "webhook", "event", "subscription", "." ]
python
train
47.272727
mrooney/mintapi
mintapi/api.py
https://github.com/mrooney/mintapi/blob/44fddbeac79a68da657ad8118e02fcde968f8dfe/mintapi/api.py#L662-L672
def get_transactions(self, include_investment=False): """Returns the transaction data as a Pandas DataFrame.""" assert_pd() s = StringIO(self.get_transactions_csv( include_investment=include_investment)) s.seek(0) df = pd.read_csv(s, parse_dates=['Date']) df.columns = [c.lower().replace(' ', '_') for c in df.columns] df.category = (df.category.str.lower() .replace('uncategorized', pd.np.nan)) return df
[ "def", "get_transactions", "(", "self", ",", "include_investment", "=", "False", ")", ":", "assert_pd", "(", ")", "s", "=", "StringIO", "(", "self", ".", "get_transactions_csv", "(", "include_investment", "=", "include_investment", ")", ")", "s", ".", "seek", "(", "0", ")", "df", "=", "pd", ".", "read_csv", "(", "s", ",", "parse_dates", "=", "[", "'Date'", "]", ")", "df", ".", "columns", "=", "[", "c", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "'_'", ")", "for", "c", "in", "df", ".", "columns", "]", "df", ".", "category", "=", "(", "df", ".", "category", ".", "str", ".", "lower", "(", ")", ".", "replace", "(", "'uncategorized'", ",", "pd", ".", "np", ".", "nan", ")", ")", "return", "df" ]
Returns the transaction data as a Pandas DataFrame.
[ "Returns", "the", "transaction", "data", "as", "a", "Pandas", "DataFrame", "." ]
python
train
44.909091
farzadghanei/distutilazy
distutilazy/clean.py
https://github.com/farzadghanei/distutilazy/blob/c3c7d062f7cb79abb7677cac57dd752127ff78e7/distutilazy/clean.py#L228-L237
def clean_extra(self): """Clean extra files/directories specified by get_extra_paths()""" extra_paths = self.get_extra_paths() for path in extra_paths: if not os.path.exists(path): continue if os.path.isdir(path): self._clean_directory(path) else: self._clean_file(path)
[ "def", "clean_extra", "(", "self", ")", ":", "extra_paths", "=", "self", ".", "get_extra_paths", "(", ")", "for", "path", "in", "extra_paths", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "continue", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "self", ".", "_clean_directory", "(", "path", ")", "else", ":", "self", ".", "_clean_file", "(", "path", ")" ]
Clean extra files/directories specified by get_extra_paths()
[ "Clean", "extra", "files", "/", "directories", "specified", "by", "get_extra_paths", "()" ]
python
train
36.9
bcbio/bcbio-nextgen
bcbio/bam/counts.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/counts.py#L41-L45
def coverage_pileup(self, space, start, end): """Retrieve pileup coverage across a specified region. """ return ((col.pos, self._normalize(col.n, self._total)) for col in self._bam.pileup(space, start, end))
[ "def", "coverage_pileup", "(", "self", ",", "space", ",", "start", ",", "end", ")", ":", "return", "(", "(", "col", ".", "pos", ",", "self", ".", "_normalize", "(", "col", ".", "n", ",", "self", ".", "_total", ")", ")", "for", "col", "in", "self", ".", "_bam", ".", "pileup", "(", "space", ",", "start", ",", "end", ")", ")" ]
Retrieve pileup coverage across a specified region.
[ "Retrieve", "pileup", "coverage", "across", "a", "specified", "region", "." ]
python
train
48.6
lrq3000/pyFileFixity
pyFileFixity/lib/aux_funcs.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L27-L33
def is_dir(dirname): '''Checks if a path is an actual directory that exists''' if not os.path.isdir(dirname): msg = "{0} is not a directory".format(dirname) raise argparse.ArgumentTypeError(msg) else: return dirname
[ "def", "is_dir", "(", "dirname", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "dirname", ")", ":", "msg", "=", "\"{0} is not a directory\"", ".", "format", "(", "dirname", ")", "raise", "argparse", ".", "ArgumentTypeError", "(", "msg", ")", "else", ":", "return", "dirname" ]
Checks if a path is an actual directory that exists
[ "Checks", "if", "a", "path", "is", "an", "actual", "directory", "that", "exists" ]
python
train
35
adamcharnock/swiftwind
swiftwind/costs/models.py
https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L152-L160
def get_amount_arrears_transactions(self, billing_cycle): """Get the sum of all transaction legs in to_account during given billing cycle""" previous_billing_cycle = billing_cycle.get_previous() if not previous_billing_cycle: return Decimal(0) return self.to_account.balance( transaction__date__lt=previous_billing_cycle.date_range.upper, transaction__date__gte=previous_billing_cycle.date_range.lower, )
[ "def", "get_amount_arrears_transactions", "(", "self", ",", "billing_cycle", ")", ":", "previous_billing_cycle", "=", "billing_cycle", ".", "get_previous", "(", ")", "if", "not", "previous_billing_cycle", ":", "return", "Decimal", "(", "0", ")", "return", "self", ".", "to_account", ".", "balance", "(", "transaction__date__lt", "=", "previous_billing_cycle", ".", "date_range", ".", "upper", ",", "transaction__date__gte", "=", "previous_billing_cycle", ".", "date_range", ".", "lower", ",", ")" ]
Get the sum of all transaction legs in to_account during given billing cycle
[ "Get", "the", "sum", "of", "all", "transaction", "legs", "in", "to_account", "during", "given", "billing", "cycle" ]
python
train
52.444444
genialis/resolwe
resolwe/flow/serializers/fields.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/fields.py#L38-L57
def to_internal_value(self, data): """Convert to internal value.""" user = getattr(self.context.get('request'), 'user') queryset = self.get_queryset() permission = get_full_perm('view', queryset.model) try: return get_objects_for_user( user, permission, queryset.filter(**{self.slug_field: data}), ).latest() except ObjectDoesNotExist: self.fail( 'does_not_exist', slug_name=self.slug_field, value=smart_text(data), model_name=queryset.model._meta.model_name, # pylint: disable=protected-access ) except (TypeError, ValueError): self.fail('invalid')
[ "def", "to_internal_value", "(", "self", ",", "data", ")", ":", "user", "=", "getattr", "(", "self", ".", "context", ".", "get", "(", "'request'", ")", ",", "'user'", ")", "queryset", "=", "self", ".", "get_queryset", "(", ")", "permission", "=", "get_full_perm", "(", "'view'", ",", "queryset", ".", "model", ")", "try", ":", "return", "get_objects_for_user", "(", "user", ",", "permission", ",", "queryset", ".", "filter", "(", "*", "*", "{", "self", ".", "slug_field", ":", "data", "}", ")", ",", ")", ".", "latest", "(", ")", "except", "ObjectDoesNotExist", ":", "self", ".", "fail", "(", "'does_not_exist'", ",", "slug_name", "=", "self", ".", "slug_field", ",", "value", "=", "smart_text", "(", "data", ")", ",", "model_name", "=", "queryset", ".", "model", ".", "_meta", ".", "model_name", ",", "# pylint: disable=protected-access", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "self", ".", "fail", "(", "'invalid'", ")" ]
Convert to internal value.
[ "Convert", "to", "internal", "value", "." ]
python
train
37.95
mozilla/DeepSpeech
bin/benchmark_nc.py
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L41-L61
def exec_command(command, cwd=None): r''' Helper to exec locally (subprocess) or remotely (paramiko) ''' rc = None stdout = stderr = None if ssh_conn is None: ld_library_path = {'LD_LIBRARY_PATH': '.:%s' % os.environ.get('LD_LIBRARY_PATH', '')} p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=ld_library_path, cwd=cwd) stdout, stderr = p.communicate() rc = p.returncode else: # environment= requires paramiko >= 2.1 (fails with 2.0.2) final_command = command if cwd is None else 'cd %s && %s %s' % (cwd, 'LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH', command) ssh_stdin, ssh_stdout, ssh_stderr = ssh_conn.exec_command(final_command) stdout = ''.join(ssh_stdout.readlines()) stderr = ''.join(ssh_stderr.readlines()) rc = ssh_stdout.channel.recv_exit_status() return rc, stdout, stderr
[ "def", "exec_command", "(", "command", ",", "cwd", "=", "None", ")", ":", "rc", "=", "None", "stdout", "=", "stderr", "=", "None", "if", "ssh_conn", "is", "None", ":", "ld_library_path", "=", "{", "'LD_LIBRARY_PATH'", ":", "'.:%s'", "%", "os", ".", "environ", ".", "get", "(", "'LD_LIBRARY_PATH'", ",", "''", ")", "}", "p", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "True", ",", "env", "=", "ld_library_path", ",", "cwd", "=", "cwd", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "rc", "=", "p", ".", "returncode", "else", ":", "# environment= requires paramiko >= 2.1 (fails with 2.0.2)", "final_command", "=", "command", "if", "cwd", "is", "None", "else", "'cd %s && %s %s'", "%", "(", "cwd", ",", "'LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH'", ",", "command", ")", "ssh_stdin", ",", "ssh_stdout", ",", "ssh_stderr", "=", "ssh_conn", ".", "exec_command", "(", "final_command", ")", "stdout", "=", "''", ".", "join", "(", "ssh_stdout", ".", "readlines", "(", ")", ")", "stderr", "=", "''", ".", "join", "(", "ssh_stderr", ".", "readlines", "(", ")", ")", "rc", "=", "ssh_stdout", ".", "channel", ".", "recv_exit_status", "(", ")", "return", "rc", ",", "stdout", ",", "stderr" ]
r''' Helper to exec locally (subprocess) or remotely (paramiko)
[ "r", "Helper", "to", "exec", "locally", "(", "subprocess", ")", "or", "remotely", "(", "paramiko", ")" ]
python
train
43.52381
apache/airflow
airflow/contrib/hooks/aws_sns_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_sns_hook.py#L40-L60
def publish_to_target(self, target_arn, message): """ Publish a message to a topic or an endpoint. :param target_arn: either a TopicArn or an EndpointArn :type target_arn: str :param message: the default message you want to send :param message: str """ conn = self.get_conn() messages = { 'default': message } return conn.publish( TargetArn=target_arn, Message=json.dumps(messages), MessageStructure='json' )
[ "def", "publish_to_target", "(", "self", ",", "target_arn", ",", "message", ")", ":", "conn", "=", "self", ".", "get_conn", "(", ")", "messages", "=", "{", "'default'", ":", "message", "}", "return", "conn", ".", "publish", "(", "TargetArn", "=", "target_arn", ",", "Message", "=", "json", ".", "dumps", "(", "messages", ")", ",", "MessageStructure", "=", "'json'", ")" ]
Publish a message to a topic or an endpoint. :param target_arn: either a TopicArn or an EndpointArn :type target_arn: str :param message: the default message you want to send :param message: str
[ "Publish", "a", "message", "to", "a", "topic", "or", "an", "endpoint", "." ]
python
test
25.52381
brutasse/graphite-api
graphite_api/_vendor/whisper.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/_vendor/whisper.py#L535-L549
def update(path,value,timestamp=None): """update(path,value,timestamp=None) path is a string value is a float timestamp is either an int or float """ value = float(value) fh = None try: fh = open(path,'r+b') return file_update(fh, value, timestamp) finally: if fh: fh.close()
[ "def", "update", "(", "path", ",", "value", ",", "timestamp", "=", "None", ")", ":", "value", "=", "float", "(", "value", ")", "fh", "=", "None", "try", ":", "fh", "=", "open", "(", "path", ",", "'r+b'", ")", "return", "file_update", "(", "fh", ",", "value", ",", "timestamp", ")", "finally", ":", "if", "fh", ":", "fh", ".", "close", "(", ")" ]
update(path,value,timestamp=None) path is a string value is a float timestamp is either an int or float
[ "update", "(", "path", "value", "timestamp", "=", "None", ")" ]
python
train
19.333333
laginha/django-mobileesp
src/django_mobileesp/mdetect.py
https://github.com/laginha/django-mobileesp/blob/91d4babb2343b992970bdb076508d380680c8b7e/src/django_mobileesp/mdetect.py#L310-L320
def detectAndroid(self): """Return detection of an Android device Detects *any* Android OS-based device: phone, tablet, and multi-media player. Also detects Google TV. """ if UAgentInfo.deviceAndroid in self.__userAgent \ or self.detectGoogleTV(): return True return False
[ "def", "detectAndroid", "(", "self", ")", ":", "if", "UAgentInfo", ".", "deviceAndroid", "in", "self", ".", "__userAgent", "or", "self", ".", "detectGoogleTV", "(", ")", ":", "return", "True", "return", "False" ]
Return detection of an Android device Detects *any* Android OS-based device: phone, tablet, and multi-media player. Also detects Google TV.
[ "Return", "detection", "of", "an", "Android", "device" ]
python
train
30.454545
r4fek/django-cassandra-engine
django_cassandra_engine/models/__init__.py
https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/models/__init__.py#L80-L84
def add_field(self, field, **kwargs): """Add each field as a private field.""" getattr(self, self._private_fields_name).append(field) self._expire_cache(reverse=True) self._expire_cache(reverse=False)
[ "def", "add_field", "(", "self", ",", "field", ",", "*", "*", "kwargs", ")", ":", "getattr", "(", "self", ",", "self", ".", "_private_fields_name", ")", ".", "append", "(", "field", ")", "self", ".", "_expire_cache", "(", "reverse", "=", "True", ")", "self", ".", "_expire_cache", "(", "reverse", "=", "False", ")" ]
Add each field as a private field.
[ "Add", "each", "field", "as", "a", "private", "field", "." ]
python
train
45.6
FNNDSC/chrisapp
chrisapp/base.py
https://github.com/FNNDSC/chrisapp/blob/b176655f97206240fe173dfe86736f82f0d85bc4/chrisapp/base.py#L292-L306
def launch(self, args=None): """ This method triggers the parsing of arguments. """ self.options = self.parse_args(args) if self.options.saveinputmeta: # save original input options self.save_input_meta() if self.options.inputmeta: # read new options from JSON file self.options = self.get_options_from_file(self.options.inputmeta) self.run(self.options) # if required save meta data for the output after running the plugin app if self.options.saveoutputmeta: self.save_output_meta()
[ "def", "launch", "(", "self", ",", "args", "=", "None", ")", ":", "self", ".", "options", "=", "self", ".", "parse_args", "(", "args", ")", "if", "self", ".", "options", ".", "saveinputmeta", ":", "# save original input options", "self", ".", "save_input_meta", "(", ")", "if", "self", ".", "options", ".", "inputmeta", ":", "# read new options from JSON file", "self", ".", "options", "=", "self", ".", "get_options_from_file", "(", "self", ".", "options", ".", "inputmeta", ")", "self", ".", "run", "(", "self", ".", "options", ")", "# if required save meta data for the output after running the plugin app", "if", "self", ".", "options", ".", "saveoutputmeta", ":", "self", ".", "save_output_meta", "(", ")" ]
This method triggers the parsing of arguments.
[ "This", "method", "triggers", "the", "parsing", "of", "arguments", "." ]
python
train
40.066667
RJT1990/pyflux
pyflux/gas/gasrank.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/gas/gasrank.py#L89-L95
def _create_ids(self, home_teams, away_teams): """ Creates IDs for both players/teams """ categories = pd.Categorical(np.append(home_teams,away_teams)) home_id, away_id = categories.codes[0:int(len(categories)/2)], categories.codes[int(len(categories)/2):len(categories)+1] return home_id, away_id
[ "def", "_create_ids", "(", "self", ",", "home_teams", ",", "away_teams", ")", ":", "categories", "=", "pd", ".", "Categorical", "(", "np", ".", "append", "(", "home_teams", ",", "away_teams", ")", ")", "home_id", ",", "away_id", "=", "categories", ".", "codes", "[", "0", ":", "int", "(", "len", "(", "categories", ")", "/", "2", ")", "]", ",", "categories", ".", "codes", "[", "int", "(", "len", "(", "categories", ")", "/", "2", ")", ":", "len", "(", "categories", ")", "+", "1", "]", "return", "home_id", ",", "away_id" ]
Creates IDs for both players/teams
[ "Creates", "IDs", "for", "both", "players", "/", "teams" ]
python
train
48.428571
koalalorenzo/python-digitalocean
digitalocean/Manager.py
https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/Manager.py#L229-L239
def get_all_floating_ips(self): """ This function returns a list of FloatingIP objects. """ data = self.get_data("floating_ips") floating_ips = list() for jsoned in data['floating_ips']: floating_ip = FloatingIP(**jsoned) floating_ip.token = self.token floating_ips.append(floating_ip) return floating_ips
[ "def", "get_all_floating_ips", "(", "self", ")", ":", "data", "=", "self", ".", "get_data", "(", "\"floating_ips\"", ")", "floating_ips", "=", "list", "(", ")", "for", "jsoned", "in", "data", "[", "'floating_ips'", "]", ":", "floating_ip", "=", "FloatingIP", "(", "*", "*", "jsoned", ")", "floating_ip", ".", "token", "=", "self", ".", "token", "floating_ips", ".", "append", "(", "floating_ip", ")", "return", "floating_ips" ]
This function returns a list of FloatingIP objects.
[ "This", "function", "returns", "a", "list", "of", "FloatingIP", "objects", "." ]
python
valid
35.545455
Parsl/parsl
parsl/providers/aws/aws.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/providers/aws/aws.py#L222-L275
def create_session(self): """Create a session. First we look in self.key_file for a path to a json file with the credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'. Next we look at self.profile for a profile name and try to use the Session call to automatically pick up the keys for the profile from the user default keys file ~/.aws/config. Finally, boto3 will look for the keys in environment variables: AWS_ACCESS_KEY_ID: The access key for your AWS account. AWS_SECRET_ACCESS_KEY: The secret key for your AWS account. AWS_SESSION_TOKEN: The session key for your AWS account. This is only needed when you are using temporary credentials. The AWS_SECURITY_TOKEN environment variable can also be used, but is only supported for backwards compatibility purposes. AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python. """ session = None if self.key_file is not None: credfile = os.path.expandvars(os.path.expanduser(self.key_file)) try: with open(credfile, 'r') as f: creds = json.load(f) except json.JSONDecodeError as e: logger.error( "EC2Provider '{}': json decode error in credential file {}".format(self.label, credfile) ) raise e except Exception as e: logger.debug( "EC2Provider '{0}' caught exception while reading credential file: {1}".format( self.label, credfile ) ) raise e logger.debug("EC2Provider '{}': Using credential file to create session".format(self.label)) session = boto3.session.Session(region_name=self.region, **creds) elif self.profile is not None: logger.debug("EC2Provider '{}': Using profile name to create session".format(self.label)) session = boto3.session.Session( profile_name=self.profile, region_name=self.region ) else: logger.debug("EC2Provider '{}': Using environment variables to create session".format(self.label)) session = boto3.session.Session(region_name=self.region) return session
[ "def", "create_session", "(", "self", ")", ":", "session", "=", "None", "if", "self", ".", "key_file", "is", "not", "None", ":", "credfile", "=", "os", ".", "path", ".", "expandvars", "(", "os", ".", "path", ".", "expanduser", "(", "self", ".", "key_file", ")", ")", "try", ":", "with", "open", "(", "credfile", ",", "'r'", ")", "as", "f", ":", "creds", "=", "json", ".", "load", "(", "f", ")", "except", "json", ".", "JSONDecodeError", "as", "e", ":", "logger", ".", "error", "(", "\"EC2Provider '{}': json decode error in credential file {}\"", ".", "format", "(", "self", ".", "label", ",", "credfile", ")", ")", "raise", "e", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "\"EC2Provider '{0}' caught exception while reading credential file: {1}\"", ".", "format", "(", "self", ".", "label", ",", "credfile", ")", ")", "raise", "e", "logger", ".", "debug", "(", "\"EC2Provider '{}': Using credential file to create session\"", ".", "format", "(", "self", ".", "label", ")", ")", "session", "=", "boto3", ".", "session", ".", "Session", "(", "region_name", "=", "self", ".", "region", ",", "*", "*", "creds", ")", "elif", "self", ".", "profile", "is", "not", "None", ":", "logger", ".", "debug", "(", "\"EC2Provider '{}': Using profile name to create session\"", ".", "format", "(", "self", ".", "label", ")", ")", "session", "=", "boto3", ".", "session", ".", "Session", "(", "profile_name", "=", "self", ".", "profile", ",", "region_name", "=", "self", ".", "region", ")", "else", ":", "logger", ".", "debug", "(", "\"EC2Provider '{}': Using environment variables to create session\"", ".", "format", "(", "self", ".", "label", ")", ")", "session", "=", "boto3", ".", "session", ".", "Session", "(", "region_name", "=", "self", ".", "region", ")", "return", "session" ]
Create a session. First we look in self.key_file for a path to a json file with the credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'. Next we look at self.profile for a profile name and try to use the Session call to automatically pick up the keys for the profile from the user default keys file ~/.aws/config. Finally, boto3 will look for the keys in environment variables: AWS_ACCESS_KEY_ID: The access key for your AWS account. AWS_SECRET_ACCESS_KEY: The secret key for your AWS account. AWS_SESSION_TOKEN: The session key for your AWS account. This is only needed when you are using temporary credentials. The AWS_SECURITY_TOKEN environment variable can also be used, but is only supported for backwards compatibility purposes. AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python.
[ "Create", "a", "session", "." ]
python
valid
43.407407
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile_ext.py#L277-L295
def get_port_profile_status_output_port_profile_mac_association_applied_interface_interface_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_profile_status = ET.Element("get_port_profile_status") config = get_port_profile_status output = ET.SubElement(get_port_profile_status, "output") port_profile = ET.SubElement(output, "port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') mac_association = ET.SubElement(port_profile, "mac-association") mac_key = ET.SubElement(mac_association, "mac") mac_key.text = kwargs.pop('mac') applied_interface = ET.SubElement(mac_association, "applied-interface") interface_type = ET.SubElement(applied_interface, "interface-type") interface_type.text = kwargs.pop('interface_type') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_port_profile_status_output_port_profile_mac_association_applied_interface_interface_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_port_profile_status", "=", "ET", ".", "Element", "(", "\"get_port_profile_status\"", ")", "config", "=", "get_port_profile_status", "output", "=", "ET", ".", "SubElement", "(", "get_port_profile_status", ",", "\"output\"", ")", "port_profile", "=", "ET", ".", "SubElement", "(", "output", ",", "\"port-profile\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "port_profile", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "mac_association", "=", "ET", ".", "SubElement", "(", "port_profile", ",", "\"mac-association\"", ")", "mac_key", "=", "ET", ".", "SubElement", "(", "mac_association", ",", "\"mac\"", ")", "mac_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'mac'", ")", "applied_interface", "=", "ET", ".", "SubElement", "(", "mac_association", ",", "\"applied-interface\"", ")", "interface_type", "=", "ET", ".", "SubElement", "(", "applied_interface", ",", "\"interface-type\"", ")", "interface_type", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_type'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
52.105263
knipknap/SpiffWorkflow
SpiffWorkflow/specs/ThreadSplit.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/specs/ThreadSplit.py#L70-L78
def connect(self, task_spec): """ Connect the *following* task to this one. In other words, the given task is added as an output task. task -- the task to connect to. """ self.thread_starter.outputs.append(task_spec) task_spec._connect_notify(self.thread_starter)
[ "def", "connect", "(", "self", ",", "task_spec", ")", ":", "self", ".", "thread_starter", ".", "outputs", ".", "append", "(", "task_spec", ")", "task_spec", ".", "_connect_notify", "(", "self", ".", "thread_starter", ")" ]
Connect the *following* task to this one. In other words, the given task is added as an output task. task -- the task to connect to.
[ "Connect", "the", "*", "following", "*", "task", "to", "this", "one", ".", "In", "other", "words", "the", "given", "task", "is", "added", "as", "an", "output", "task", "." ]
python
valid
34.666667
python-gitlab/python-gitlab
gitlab/mixins.py
https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/mixins.py#L508-L521
def time_estimate(self, duration, **kwargs): """Set an estimated time of work for the object. Args: duration (str): Duration in human format (e.g. 3h30) **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTimeTrackingError: If the time tracking update cannot be done """ path = '%s/%s/time_estimate' % (self.manager.path, self.get_id()) data = {'duration': duration} return self.manager.gitlab.http_post(path, post_data=data, **kwargs)
[ "def", "time_estimate", "(", "self", ",", "duration", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'%s/%s/time_estimate'", "%", "(", "self", ".", "manager", ".", "path", ",", "self", ".", "get_id", "(", ")", ")", "data", "=", "{", "'duration'", ":", "duration", "}", "return", "self", ".", "manager", ".", "gitlab", ".", "http_post", "(", "path", ",", "post_data", "=", "data", ",", "*", "*", "kwargs", ")" ]
Set an estimated time of work for the object. Args: duration (str): Duration in human format (e.g. 3h30) **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTimeTrackingError: If the time tracking update cannot be done
[ "Set", "an", "estimated", "time", "of", "work", "for", "the", "object", "." ]
python
train
43.428571
aws/sagemaker-python-sdk
src/sagemaker/chainer/estimator.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/chainer/estimator.py#L113-L137
def create_model(self, model_server_workers=None, role=None, vpc_config_override=VPC_CONFIG_DEFAULT): """Create a SageMaker ``ChainerModel`` object that can be deployed to an ``Endpoint``. Args: role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during transform jobs. If not specified, the role from the Estimator will be used. model_server_workers (int): Optional. The number of worker processes used by the inference server. If None, server will use one worker per vCPU. vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model. Default: use subnets and security groups from this Estimator. * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. Returns: sagemaker.chainer.model.ChainerModel: A SageMaker ``ChainerModel`` object. See :func:`~sagemaker.chainer.model.ChainerModel` for full details. """ role = role or self.role return ChainerModel(self.model_data, role, self.entry_point, source_dir=self._model_source_dir(), enable_cloudwatch_metrics=self.enable_cloudwatch_metrics, name=self._current_job_name, container_log_level=self.container_log_level, code_location=self.code_location, py_version=self.py_version, framework_version=self.framework_version, model_server_workers=model_server_workers, image=self.image_name, sagemaker_session=self.sagemaker_session, vpc_config=self.get_vpc_config(vpc_config_override), dependencies=self.dependencies)
[ "def", "create_model", "(", "self", ",", "model_server_workers", "=", "None", ",", "role", "=", "None", ",", "vpc_config_override", "=", "VPC_CONFIG_DEFAULT", ")", ":", "role", "=", "role", "or", "self", ".", "role", "return", "ChainerModel", "(", "self", ".", "model_data", ",", "role", ",", "self", ".", "entry_point", ",", "source_dir", "=", "self", ".", "_model_source_dir", "(", ")", ",", "enable_cloudwatch_metrics", "=", "self", ".", "enable_cloudwatch_metrics", ",", "name", "=", "self", ".", "_current_job_name", ",", "container_log_level", "=", "self", ".", "container_log_level", ",", "code_location", "=", "self", ".", "code_location", ",", "py_version", "=", "self", ".", "py_version", ",", "framework_version", "=", "self", ".", "framework_version", ",", "model_server_workers", "=", "model_server_workers", ",", "image", "=", "self", ".", "image_name", ",", "sagemaker_session", "=", "self", ".", "sagemaker_session", ",", "vpc_config", "=", "self", ".", "get_vpc_config", "(", "vpc_config_override", ")", ",", "dependencies", "=", "self", ".", "dependencies", ")" ]
Create a SageMaker ``ChainerModel`` object that can be deployed to an ``Endpoint``. Args: role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during transform jobs. If not specified, the role from the Estimator will be used. model_server_workers (int): Optional. The number of worker processes used by the inference server. If None, server will use one worker per vCPU. vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model. Default: use subnets and security groups from this Estimator. * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. Returns: sagemaker.chainer.model.ChainerModel: A SageMaker ``ChainerModel`` object. See :func:`~sagemaker.chainer.model.ChainerModel` for full details.
[ "Create", "a", "SageMaker", "ChainerModel", "object", "that", "can", "be", "deployed", "to", "an", "Endpoint", "." ]
python
train
72.8
argaen/aiocache
aiocache/base.py
https://github.com/argaen/aiocache/blob/fdd282f37283ca04e22209f4d2ae4900f29e1688/aiocache/base.py#L440-L459
async def raw(self, command, *args, _conn=None, **kwargs): """ Send the raw command to the underlying client. Note that by using this CMD you will lose compatibility with other backends. Due to limitations with aiomcache client, args have to be provided as bytes. For rest of backends, str. :param command: str with the command. :param timeout: int or float in seconds specifying maximum timeout for the operations to last :returns: whatever the underlying client returns :raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout """ start = time.monotonic() ret = await self._raw( command, *args, encoding=self.serializer.encoding, _conn=_conn, **kwargs ) logger.debug("%s (%.4f)s", command, time.monotonic() - start) return ret
[ "async", "def", "raw", "(", "self", ",", "command", ",", "*", "args", ",", "_conn", "=", "None", ",", "*", "*", "kwargs", ")", ":", "start", "=", "time", ".", "monotonic", "(", ")", "ret", "=", "await", "self", ".", "_raw", "(", "command", ",", "*", "args", ",", "encoding", "=", "self", ".", "serializer", ".", "encoding", ",", "_conn", "=", "_conn", ",", "*", "*", "kwargs", ")", "logger", ".", "debug", "(", "\"%s (%.4f)s\"", ",", "command", ",", "time", ".", "monotonic", "(", ")", "-", "start", ")", "return", "ret" ]
Send the raw command to the underlying client. Note that by using this CMD you will lose compatibility with other backends. Due to limitations with aiomcache client, args have to be provided as bytes. For rest of backends, str. :param command: str with the command. :param timeout: int or float in seconds specifying maximum timeout for the operations to last :returns: whatever the underlying client returns :raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
[ "Send", "the", "raw", "command", "to", "the", "underlying", "client", ".", "Note", "that", "by", "using", "this", "CMD", "you", "will", "lose", "compatibility", "with", "other", "backends", "." ]
python
train
43.6
FocusLab/Albertson
albertson/base.py
https://github.com/FocusLab/Albertson/blob/a42f9873559df9188c40c34fdffb079d78eaa3fe/albertson/base.py#L55-L63
def get_conn(self, aws_access_key=None, aws_secret_key=None): ''' Hook point for overriding how the CounterPool gets its connection to AWS. ''' return boto.connect_dynamodb( aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, )
[ "def", "get_conn", "(", "self", ",", "aws_access_key", "=", "None", ",", "aws_secret_key", "=", "None", ")", ":", "return", "boto", ".", "connect_dynamodb", "(", "aws_access_key_id", "=", "aws_access_key", ",", "aws_secret_access_key", "=", "aws_secret_key", ",", ")" ]
Hook point for overriding how the CounterPool gets its connection to AWS.
[ "Hook", "point", "for", "overriding", "how", "the", "CounterPool", "gets", "its", "connection", "to", "AWS", "." ]
python
valid
34.555556
pgmpy/pgmpy
pgmpy/estimators/ConstraintBasedEstimator.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/estimators/ConstraintBasedEstimator.py#L444-L534
def build_skeleton(nodes, independencies): """Estimates a graph skeleton (UndirectedGraph) from a set of independencies using (the first part of) the PC algorithm. The independencies can either be provided as an instance of the `Independencies`-class or by passing a decision function that decides any conditional independency assertion. Returns a tuple `(skeleton, separating_sets)`. If an Independencies-instance is passed, the contained IndependenceAssertions have to admit a faithful BN representation. This is the case if they are obtained as a set of d-seperations of some Bayesian network or if the independence assertions are closed under the semi-graphoid axioms. Otherwise the procedure may fail to identify the correct structure. Parameters ---------- nodes: list, array-like A list of node/variable names of the network skeleton. independencies: Independencies-instance or function. The source of independency information from which to build the skeleton. The provided Independencies should admit a faithful representation. Can either be provided as an Independencies()-instance or by passing a function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs, otherwise `False`. (X, Y being individual nodes and Zs a list of nodes). Returns ------- skeleton: UndirectedGraph An estimate for the undirected graph skeleton of the BN underlying the data. separating_sets: dict A dict containing for each pair of not directly connected nodes a separating set ("witnessing set") of variables that makes then conditionally independent. (needed for edge orientation procedures) Reference --------- [1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550) http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf [2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009 Section 3.4.2.1 (page 85), Algorithm 3.3 Examples -------- >>> from pgmpy.estimators import ConstraintBasedEstimator >>> from pgmpy.models import DAG >>> from pgmpy.independencies import Independencies >>> # build skeleton from list of independencies: ... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D']) >>> # we need to compute closure, otherwise this set of independencies doesn't ... # admit a faithful representation: ... ind = ind.closure() >>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton("ABCD", ind) >>> print(skel.edges()) [('A', 'D'), ('B', 'D'), ('C', 'D')] >>> # build skeleton from d-seperations of DAG: ... model = DAG([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]) >>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton(model.nodes(), model.get_independencies()) >>> print(skel.edges()) [('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')] """ nodes = list(nodes) if isinstance(independencies, Independencies): def is_independent(X, Y, Zs): return IndependenceAssertion(X, Y, Zs) in independencies elif callable(independencies): is_independent = independencies else: raise ValueError("'independencies' must be either Independencies-instance " + "or a ternary function that decides independencies.") graph = UndirectedGraph(combinations(nodes, 2)) lim_neighbors = 0 separating_sets = dict() while not all([len(list(graph.neighbors(node))) < lim_neighbors for node in nodes]): for node in nodes: for neighbor in list(graph.neighbors(node)): # search if there is a set of neighbors (of size lim_neighbors) # that makes X and Y independent: for separating_set in combinations(set(graph.neighbors(node)) - set([neighbor]), lim_neighbors): if is_independent(node, neighbor, separating_set): separating_sets[frozenset((node, neighbor))] = separating_set graph.remove_edge(node, neighbor) break lim_neighbors += 1 return graph, separating_sets
[ "def", "build_skeleton", "(", "nodes", ",", "independencies", ")", ":", "nodes", "=", "list", "(", "nodes", ")", "if", "isinstance", "(", "independencies", ",", "Independencies", ")", ":", "def", "is_independent", "(", "X", ",", "Y", ",", "Zs", ")", ":", "return", "IndependenceAssertion", "(", "X", ",", "Y", ",", "Zs", ")", "in", "independencies", "elif", "callable", "(", "independencies", ")", ":", "is_independent", "=", "independencies", "else", ":", "raise", "ValueError", "(", "\"'independencies' must be either Independencies-instance \"", "+", "\"or a ternary function that decides independencies.\"", ")", "graph", "=", "UndirectedGraph", "(", "combinations", "(", "nodes", ",", "2", ")", ")", "lim_neighbors", "=", "0", "separating_sets", "=", "dict", "(", ")", "while", "not", "all", "(", "[", "len", "(", "list", "(", "graph", ".", "neighbors", "(", "node", ")", ")", ")", "<", "lim_neighbors", "for", "node", "in", "nodes", "]", ")", ":", "for", "node", "in", "nodes", ":", "for", "neighbor", "in", "list", "(", "graph", ".", "neighbors", "(", "node", ")", ")", ":", "# search if there is a set of neighbors (of size lim_neighbors)", "# that makes X and Y independent:", "for", "separating_set", "in", "combinations", "(", "set", "(", "graph", ".", "neighbors", "(", "node", ")", ")", "-", "set", "(", "[", "neighbor", "]", ")", ",", "lim_neighbors", ")", ":", "if", "is_independent", "(", "node", ",", "neighbor", ",", "separating_set", ")", ":", "separating_sets", "[", "frozenset", "(", "(", "node", ",", "neighbor", ")", ")", "]", "=", "separating_set", "graph", ".", "remove_edge", "(", "node", ",", "neighbor", ")", "break", "lim_neighbors", "+=", "1", "return", "graph", ",", "separating_sets" ]
Estimates a graph skeleton (UndirectedGraph) from a set of independencies using (the first part of) the PC algorithm. The independencies can either be provided as an instance of the `Independencies`-class or by passing a decision function that decides any conditional independency assertion. Returns a tuple `(skeleton, separating_sets)`. If an Independencies-instance is passed, the contained IndependenceAssertions have to admit a faithful BN representation. This is the case if they are obtained as a set of d-seperations of some Bayesian network or if the independence assertions are closed under the semi-graphoid axioms. Otherwise the procedure may fail to identify the correct structure. Parameters ---------- nodes: list, array-like A list of node/variable names of the network skeleton. independencies: Independencies-instance or function. The source of independency information from which to build the skeleton. The provided Independencies should admit a faithful representation. Can either be provided as an Independencies()-instance or by passing a function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs, otherwise `False`. (X, Y being individual nodes and Zs a list of nodes). Returns ------- skeleton: UndirectedGraph An estimate for the undirected graph skeleton of the BN underlying the data. separating_sets: dict A dict containing for each pair of not directly connected nodes a separating set ("witnessing set") of variables that makes then conditionally independent. (needed for edge orientation procedures) Reference --------- [1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550) http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf [2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009 Section 3.4.2.1 (page 85), Algorithm 3.3 Examples -------- >>> from pgmpy.estimators import ConstraintBasedEstimator >>> from pgmpy.models import DAG >>> from pgmpy.independencies import Independencies >>> # build skeleton from list of independencies: ... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D']) >>> # we need to compute closure, otherwise this set of independencies doesn't ... # admit a faithful representation: ... ind = ind.closure() >>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton("ABCD", ind) >>> print(skel.edges()) [('A', 'D'), ('B', 'D'), ('C', 'D')] >>> # build skeleton from d-seperations of DAG: ... model = DAG([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]) >>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton(model.nodes(), model.get_independencies()) >>> print(skel.edges()) [('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]
[ "Estimates", "a", "graph", "skeleton", "(", "UndirectedGraph", ")", "from", "a", "set", "of", "independencies", "using", "(", "the", "first", "part", "of", ")", "the", "PC", "algorithm", ".", "The", "independencies", "can", "either", "be", "provided", "as", "an", "instance", "of", "the", "Independencies", "-", "class", "or", "by", "passing", "a", "decision", "function", "that", "decides", "any", "conditional", "independency", "assertion", ".", "Returns", "a", "tuple", "(", "skeleton", "separating_sets", ")", "." ]
python
train
49.505495
gmr/tinman
tinman/application.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/application.py#L102-L114
def _import_module(self, module_path): """Dynamically import a module returning a handle to it. :param str module_path: The module path :rtype: module """ LOGGER.debug('Importing %s', module_path) try: return __import__(module_path) except ImportError as error: LOGGER.critical('Could not import %s: %s', module_path, error) return None
[ "def", "_import_module", "(", "self", ",", "module_path", ")", ":", "LOGGER", ".", "debug", "(", "'Importing %s'", ",", "module_path", ")", "try", ":", "return", "__import__", "(", "module_path", ")", "except", "ImportError", "as", "error", ":", "LOGGER", ".", "critical", "(", "'Could not import %s: %s'", ",", "module_path", ",", "error", ")", "return", "None" ]
Dynamically import a module returning a handle to it. :param str module_path: The module path :rtype: module
[ "Dynamically", "import", "a", "module", "returning", "a", "handle", "to", "it", "." ]
python
train
32.153846
hobson/aima
aima/logic.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/logic.py#L547-L561
def pl_resolution(KB, alpha): "Propositional-logic resolution: say if alpha follows from KB. [Fig. 7.12]" clauses = KB.clauses + conjuncts(to_cnf(~alpha)) new = set() while True: n = len(clauses) pairs = [(clauses[i], clauses[j]) for i in range(n) for j in range(i+1, n)] for (ci, cj) in pairs: resolvents = pl_resolve(ci, cj) if FALSE in resolvents: return True new = new.union(set(resolvents)) if new.issubset(set(clauses)): return False for c in new: if c not in clauses: clauses.append(c)
[ "def", "pl_resolution", "(", "KB", ",", "alpha", ")", ":", "clauses", "=", "KB", ".", "clauses", "+", "conjuncts", "(", "to_cnf", "(", "~", "alpha", ")", ")", "new", "=", "set", "(", ")", "while", "True", ":", "n", "=", "len", "(", "clauses", ")", "pairs", "=", "[", "(", "clauses", "[", "i", "]", ",", "clauses", "[", "j", "]", ")", "for", "i", "in", "range", "(", "n", ")", "for", "j", "in", "range", "(", "i", "+", "1", ",", "n", ")", "]", "for", "(", "ci", ",", "cj", ")", "in", "pairs", ":", "resolvents", "=", "pl_resolve", "(", "ci", ",", "cj", ")", "if", "FALSE", "in", "resolvents", ":", "return", "True", "new", "=", "new", ".", "union", "(", "set", "(", "resolvents", ")", ")", "if", "new", ".", "issubset", "(", "set", "(", "clauses", ")", ")", ":", "return", "False", "for", "c", "in", "new", ":", "if", "c", "not", "in", "clauses", ":", "clauses", ".", "append", "(", "c", ")" ]
Propositional-logic resolution: say if alpha follows from KB. [Fig. 7.12]
[ "Propositional", "-", "logic", "resolution", ":", "say", "if", "alpha", "follows", "from", "KB", ".", "[", "Fig", ".", "7", ".", "12", "]" ]
python
valid
39.933333
incf-nidash/nidmresults
nidmresults/graph.py
https://github.com/incf-nidash/nidmresults/blob/438f7cce6abc4a4379b629bd76f4d427891e033f/nidmresults/graph.py#L358-L376
def _get_model_fitting(self, con_est_id): """ Retreive model fitting that corresponds to contrast with identifier 'con_id' from the list of model fitting objects stored in self.model_fittings """ for (mpe_id, pe_ids), contrasts in self.contrasts.items(): for contrast in contrasts: if contrast.estimation.id == con_est_id: model_fitting_id = mpe_id pe_map_ids = pe_ids break for model_fitting in self.model_fittings: if model_fitting.activity.id == model_fitting_id: return (model_fitting, pe_map_ids) raise Exception("Model fitting of contrast : " + str(con_est_id) + " not found.")
[ "def", "_get_model_fitting", "(", "self", ",", "con_est_id", ")", ":", "for", "(", "mpe_id", ",", "pe_ids", ")", ",", "contrasts", "in", "self", ".", "contrasts", ".", "items", "(", ")", ":", "for", "contrast", "in", "contrasts", ":", "if", "contrast", ".", "estimation", ".", "id", "==", "con_est_id", ":", "model_fitting_id", "=", "mpe_id", "pe_map_ids", "=", "pe_ids", "break", "for", "model_fitting", "in", "self", ".", "model_fittings", ":", "if", "model_fitting", ".", "activity", ".", "id", "==", "model_fitting_id", ":", "return", "(", "model_fitting", ",", "pe_map_ids", ")", "raise", "Exception", "(", "\"Model fitting of contrast : \"", "+", "str", "(", "con_est_id", ")", "+", "\" not found.\"", ")" ]
Retreive model fitting that corresponds to contrast with identifier 'con_id' from the list of model fitting objects stored in self.model_fittings
[ "Retreive", "model", "fitting", "that", "corresponds", "to", "contrast", "with", "identifier", "con_id", "from", "the", "list", "of", "model", "fitting", "objects", "stored", "in", "self", ".", "model_fittings" ]
python
train
40.578947
gem/oq-engine
openquake/commonlib/source.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/source.py#L394-L417
def new(self, sources_by_grp): """ Generate a new CompositeSourceModel from the given dictionary. :param sources_by_group: a dictionary grp_id -> sources :returns: a new CompositeSourceModel instance """ source_models = [] for sm in self.source_models: src_groups = [] for src_group in sm.src_groups: sg = copy.copy(src_group) sg.sources = sorted(sources_by_grp.get(sg.id, []), key=operator.attrgetter('id')) src_groups.append(sg) newsm = logictree.LtSourceModel( sm.names, sm.weight, sm.path, src_groups, sm.num_gsim_paths, sm.ordinal, sm.samples) source_models.append(newsm) new = self.__class__(self.gsim_lt, self.source_model_lt, source_models, self.optimize_same_id) new.info.update_eff_ruptures(new.get_num_ruptures()) new.info.tot_weight = new.get_weight() return new
[ "def", "new", "(", "self", ",", "sources_by_grp", ")", ":", "source_models", "=", "[", "]", "for", "sm", "in", "self", ".", "source_models", ":", "src_groups", "=", "[", "]", "for", "src_group", "in", "sm", ".", "src_groups", ":", "sg", "=", "copy", ".", "copy", "(", "src_group", ")", "sg", ".", "sources", "=", "sorted", "(", "sources_by_grp", ".", "get", "(", "sg", ".", "id", ",", "[", "]", ")", ",", "key", "=", "operator", ".", "attrgetter", "(", "'id'", ")", ")", "src_groups", ".", "append", "(", "sg", ")", "newsm", "=", "logictree", ".", "LtSourceModel", "(", "sm", ".", "names", ",", "sm", ".", "weight", ",", "sm", ".", "path", ",", "src_groups", ",", "sm", ".", "num_gsim_paths", ",", "sm", ".", "ordinal", ",", "sm", ".", "samples", ")", "source_models", ".", "append", "(", "newsm", ")", "new", "=", "self", ".", "__class__", "(", "self", ".", "gsim_lt", ",", "self", ".", "source_model_lt", ",", "source_models", ",", "self", ".", "optimize_same_id", ")", "new", ".", "info", ".", "update_eff_ruptures", "(", "new", ".", "get_num_ruptures", "(", ")", ")", "new", ".", "info", ".", "tot_weight", "=", "new", ".", "get_weight", "(", ")", "return", "new" ]
Generate a new CompositeSourceModel from the given dictionary. :param sources_by_group: a dictionary grp_id -> sources :returns: a new CompositeSourceModel instance
[ "Generate", "a", "new", "CompositeSourceModel", "from", "the", "given", "dictionary", "." ]
python
train
43.041667
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/run_attacks_and_defenses.py#L16-L44
def parse_args(): """Parses command line arguments.""" parser = argparse.ArgumentParser( description='Tool to run attacks and defenses.') parser.add_argument('--attacks_dir', required=True, help='Location of all attacks.') parser.add_argument('--targeted_attacks_dir', required=True, help='Location of all targeted attacks.') parser.add_argument('--defenses_dir', required=True, help='Location of all defenses.') parser.add_argument('--dataset_dir', required=True, help='Location of the dataset.') parser.add_argument('--dataset_metadata', required=True, help='Location of the dataset metadata.') parser.add_argument('--intermediate_results_dir', required=True, help='Directory to store intermediate results.') parser.add_argument('--output_dir', required=True, help=('Output directory.')) parser.add_argument('--epsilon', required=False, type=int, default=16, help='Maximum allowed size of adversarial perturbation') parser.add_argument('--gpu', dest='use_gpu', action='store_true') parser.add_argument('--nogpu', dest='use_gpu', action='store_false') parser.set_defaults(use_gpu=False) parser.add_argument('--save_all_classification', dest='save_all_classification', action='store_true') parser.add_argument('--nosave_all_classification', dest='save_all_classification', action='store_false') parser.set_defaults(save_all_classification=False) return parser.parse_args()
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Tool to run attacks and defenses.'", ")", "parser", ".", "add_argument", "(", "'--attacks_dir'", ",", "required", "=", "True", ",", "help", "=", "'Location of all attacks.'", ")", "parser", ".", "add_argument", "(", "'--targeted_attacks_dir'", ",", "required", "=", "True", ",", "help", "=", "'Location of all targeted attacks.'", ")", "parser", ".", "add_argument", "(", "'--defenses_dir'", ",", "required", "=", "True", ",", "help", "=", "'Location of all defenses.'", ")", "parser", ".", "add_argument", "(", "'--dataset_dir'", ",", "required", "=", "True", ",", "help", "=", "'Location of the dataset.'", ")", "parser", ".", "add_argument", "(", "'--dataset_metadata'", ",", "required", "=", "True", ",", "help", "=", "'Location of the dataset metadata.'", ")", "parser", ".", "add_argument", "(", "'--intermediate_results_dir'", ",", "required", "=", "True", ",", "help", "=", "'Directory to store intermediate results.'", ")", "parser", ".", "add_argument", "(", "'--output_dir'", ",", "required", "=", "True", ",", "help", "=", "(", "'Output directory.'", ")", ")", "parser", ".", "add_argument", "(", "'--epsilon'", ",", "required", "=", "False", ",", "type", "=", "int", ",", "default", "=", "16", ",", "help", "=", "'Maximum allowed size of adversarial perturbation'", ")", "parser", ".", "add_argument", "(", "'--gpu'", ",", "dest", "=", "'use_gpu'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--nogpu'", ",", "dest", "=", "'use_gpu'", ",", "action", "=", "'store_false'", ")", "parser", ".", "set_defaults", "(", "use_gpu", "=", "False", ")", "parser", ".", "add_argument", "(", "'--save_all_classification'", ",", "dest", "=", "'save_all_classification'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--nosave_all_classification'", ",", "dest", "=", "'save_all_classification'", ",", "action", "=", "'store_false'", ")", "parser", ".", "set_defaults", "(", "save_all_classification", "=", "False", ")", "return", "parser", ".", "parse_args", "(", ")" ]
Parses command line arguments.
[ "Parses", "command", "line", "arguments", "." ]
python
train
55.310345
astropy/astropy-helpers
astropy_helpers/setup_helpers.py
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/setup_helpers.py#L484-L505
def iter_setup_packages(srcdir, packages): """ A generator that finds and imports all of the ``setup_package.py`` modules in the source packages. Returns ------- modgen : generator A generator that yields (modname, mod), where `mod` is the module and `modname` is the module name for the ``setup_package.py`` modules. """ for packagename in packages: package_parts = packagename.split('.') package_path = os.path.join(srcdir, *package_parts) setup_package = os.path.relpath( os.path.join(package_path, 'setup_package.py')) if os.path.isfile(setup_package): module = import_file(setup_package, name=packagename + '.setup_package') yield module
[ "def", "iter_setup_packages", "(", "srcdir", ",", "packages", ")", ":", "for", "packagename", "in", "packages", ":", "package_parts", "=", "packagename", ".", "split", "(", "'.'", ")", "package_path", "=", "os", ".", "path", ".", "join", "(", "srcdir", ",", "*", "package_parts", ")", "setup_package", "=", "os", ".", "path", ".", "relpath", "(", "os", ".", "path", ".", "join", "(", "package_path", ",", "'setup_package.py'", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "setup_package", ")", ":", "module", "=", "import_file", "(", "setup_package", ",", "name", "=", "packagename", "+", "'.setup_package'", ")", "yield", "module" ]
A generator that finds and imports all of the ``setup_package.py`` modules in the source packages. Returns ------- modgen : generator A generator that yields (modname, mod), where `mod` is the module and `modname` is the module name for the ``setup_package.py`` modules.
[ "A", "generator", "that", "finds", "and", "imports", "all", "of", "the", "setup_package", ".", "py", "modules", "in", "the", "source", "packages", "." ]
python
train
35
blockstack-packages/blockstack-gpg
blockstack_gpg/gpg.py
https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L206-L232
def gpg_download_key( key_id, key_server, config_dir=None ): """ Download a GPG key from a key server. Do not import it into any keyrings. Return the ASCII-armored key """ config_dir = get_config_dir( config_dir ) tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir ) gpg = gnupg.GPG( homedir=tmpdir ) recvdat = gpg.recv_keys( key_server, key_id ) fingerprint = None try: assert recvdat.count == 1 assert len(recvdat.fingerprints) == 1 fingerprint = recvdat.fingerprints[0] except AssertionError, e: log.exception(e) log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server)) shutil.rmtree( tmpdir ) return None keydat = gpg.export_keys( [fingerprint] ) shutil.rmtree( tmpdir ) return str(keydat)
[ "def", "gpg_download_key", "(", "key_id", ",", "key_server", ",", "config_dir", "=", "None", ")", ":", "config_dir", "=", "get_config_dir", "(", "config_dir", ")", "tmpdir", "=", "make_gpg_tmphome", "(", "prefix", "=", "\"download\"", ",", "config_dir", "=", "config_dir", ")", "gpg", "=", "gnupg", ".", "GPG", "(", "homedir", "=", "tmpdir", ")", "recvdat", "=", "gpg", ".", "recv_keys", "(", "key_server", ",", "key_id", ")", "fingerprint", "=", "None", "try", ":", "assert", "recvdat", ".", "count", "==", "1", "assert", "len", "(", "recvdat", ".", "fingerprints", ")", "==", "1", "fingerprint", "=", "recvdat", ".", "fingerprints", "[", "0", "]", "except", "AssertionError", ",", "e", ":", "log", ".", "exception", "(", "e", ")", "log", ".", "error", "(", "\"Failed to fetch key '%s' from '%s'\"", "%", "(", "key_id", ",", "key_server", ")", ")", "shutil", ".", "rmtree", "(", "tmpdir", ")", "return", "None", "keydat", "=", "gpg", ".", "export_keys", "(", "[", "fingerprint", "]", ")", "shutil", ".", "rmtree", "(", "tmpdir", ")", "return", "str", "(", "keydat", ")" ]
Download a GPG key from a key server. Do not import it into any keyrings. Return the ASCII-armored key
[ "Download", "a", "GPG", "key", "from", "a", "key", "server", ".", "Do", "not", "import", "it", "into", "any", "keyrings", ".", "Return", "the", "ASCII", "-", "armored", "key" ]
python
train
30.333333
kejbaly2/metrique
metrique/reporting.py
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/reporting.py#L87-L99
def add_image(self, figure, dpi=72): ''' Adds an image to the last chapter/section. The image will be stored in the `{self.title}_files` directory. :param matplotlib.figure figure: A matplotlib figure to be saved into the report ''' name = os.path.join(self._dir, '/fig%s.png' % self.fig_counter) self.fig_counter += 1 figure.savefig(name, dpi=dpi) plt.close(figure) self.body += '<img src="%s" />\n' % name
[ "def", "add_image", "(", "self", ",", "figure", ",", "dpi", "=", "72", ")", ":", "name", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_dir", ",", "'/fig%s.png'", "%", "self", ".", "fig_counter", ")", "self", ".", "fig_counter", "+=", "1", "figure", ".", "savefig", "(", "name", ",", "dpi", "=", "dpi", ")", "plt", ".", "close", "(", "figure", ")", "self", ".", "body", "+=", "'<img src=\"%s\" />\\n'", "%", "name" ]
Adds an image to the last chapter/section. The image will be stored in the `{self.title}_files` directory. :param matplotlib.figure figure: A matplotlib figure to be saved into the report
[ "Adds", "an", "image", "to", "the", "last", "chapter", "/", "section", ".", "The", "image", "will", "be", "stored", "in", "the", "{", "self", ".", "title", "}", "_files", "directory", "." ]
python
train
37.538462
ptmcg/littletable
littletable.py
https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1535-L1556
def dump(self, out=sys.stdout, row_fn=repr, limit=-1, indent=0): """Dump out the contents of this table in a nested listing. @param out: output stream to write to @param row_fn: function to call to display individual rows @param limit: number of records to show at deepest level of pivot (-1=show all) @param indent: current nesting level """ NL = '\n' if indent: out.write(" "*indent + self.pivot_key_str()) else: out.write("Pivot: %s" % ','.join(self._pivot_attrs)) out.write(NL) if self.has_subtables(): do_all(sub.dump(out, row_fn, limit, indent+1) for sub in self.subtables if sub) else: if limit >= 0: showslice = slice(0, limit) else: showslice = slice(None, None) do_all(out.write(" "*(indent+1) + row_fn(r) + NL) for r in self.obs[showslice]) out.flush()
[ "def", "dump", "(", "self", ",", "out", "=", "sys", ".", "stdout", ",", "row_fn", "=", "repr", ",", "limit", "=", "-", "1", ",", "indent", "=", "0", ")", ":", "NL", "=", "'\\n'", "if", "indent", ":", "out", ".", "write", "(", "\" \"", "*", "indent", "+", "self", ".", "pivot_key_str", "(", ")", ")", "else", ":", "out", ".", "write", "(", "\"Pivot: %s\"", "%", "','", ".", "join", "(", "self", ".", "_pivot_attrs", ")", ")", "out", ".", "write", "(", "NL", ")", "if", "self", ".", "has_subtables", "(", ")", ":", "do_all", "(", "sub", ".", "dump", "(", "out", ",", "row_fn", ",", "limit", ",", "indent", "+", "1", ")", "for", "sub", "in", "self", ".", "subtables", "if", "sub", ")", "else", ":", "if", "limit", ">=", "0", ":", "showslice", "=", "slice", "(", "0", ",", "limit", ")", "else", ":", "showslice", "=", "slice", "(", "None", ",", "None", ")", "do_all", "(", "out", ".", "write", "(", "\" \"", "*", "(", "indent", "+", "1", ")", "+", "row_fn", "(", "r", ")", "+", "NL", ")", "for", "r", "in", "self", ".", "obs", "[", "showslice", "]", ")", "out", ".", "flush", "(", ")" ]
Dump out the contents of this table in a nested listing. @param out: output stream to write to @param row_fn: function to call to display individual rows @param limit: number of records to show at deepest level of pivot (-1=show all) @param indent: current nesting level
[ "Dump", "out", "the", "contents", "of", "this", "table", "in", "a", "nested", "listing", "." ]
python
train
43.818182
saltstack/salt
salt/modules/xbpspkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xbpspkg.py#L433-L484
def remove(name=None, pkgs=None, recursive=True, **kwargs): ''' name The name of the package to be deleted. recursive Also remove dependent packages (not required elsewhere). Default mode: enabled. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> [recursive=False] salt '*' pkg.remove <package1>,<package2>,<package3> [recursive=False] salt '*' pkg.remove pkgs='["foo", "bar"]' [recursive=False] ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} old = list_pkgs() # keep only installed packages targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = ['xbps-remove', '-y'] if recursive: cmd.append('-R') cmd.extend(targets) __salt__['cmd.run'](cmd, output_loglevel='trace') __context__.pop('pkg.list_pkgs', None) new = list_pkgs() return salt.utils.data.compare_dicts(old, new)
[ "def", "remove", "(", "name", "=", "None", ",", "pkgs", "=", "None", ",", "recursive", "=", "True", ",", "*", "*", "kwargs", ")", ":", "try", ":", "pkg_params", ",", "pkg_type", "=", "__salt__", "[", "'pkg_resource.parse_targets'", "]", "(", "name", ",", "pkgs", ")", "except", "MinionError", "as", "exc", ":", "raise", "CommandExecutionError", "(", "exc", ")", "if", "not", "pkg_params", ":", "return", "{", "}", "old", "=", "list_pkgs", "(", ")", "# keep only installed packages", "targets", "=", "[", "x", "for", "x", "in", "pkg_params", "if", "x", "in", "old", "]", "if", "not", "targets", ":", "return", "{", "}", "cmd", "=", "[", "'xbps-remove'", ",", "'-y'", "]", "if", "recursive", ":", "cmd", ".", "append", "(", "'-R'", ")", "cmd", ".", "extend", "(", "targets", ")", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "output_loglevel", "=", "'trace'", ")", "__context__", ".", "pop", "(", "'pkg.list_pkgs'", ",", "None", ")", "new", "=", "list_pkgs", "(", ")", "return", "salt", ".", "utils", ".", "data", ".", "compare_dicts", "(", "old", ",", "new", ")" ]
name The name of the package to be deleted. recursive Also remove dependent packages (not required elsewhere). Default mode: enabled. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> [recursive=False] salt '*' pkg.remove <package1>,<package2>,<package3> [recursive=False] salt '*' pkg.remove pkgs='["foo", "bar"]' [recursive=False]
[ "name", "The", "name", "of", "the", "package", "to", "be", "deleted", "." ]
python
train
25.538462
BD2KGenomics/protect
src/protect/mutation_calling/fusion.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L287-L352
def split_fusion_transcript(annotation_path, transcripts): """ Finds the breakpoint in the fusion transcript and splits the 5' donor from the 3' acceptor :param str annotation_path: Path to transcript annotation file :param dict transcripts: Dictionary of fusion transcripts :return: 5' donor sequences and 3' acceptor sequences :rtype: tuple """ annotation = collections.defaultdict(dict) forward = 'ACGTN' reverse = 'TGCAN' trans = string.maketrans(forward, reverse) # Pull in assembled transcript annotation five_pr_splits = collections.defaultdict(dict) three_pr_splits = collections.defaultdict(dict) regex = re.compile(r'ID=(?P<ID>.*);Name=(?P<Name>.*);Target=(?P<Target>.*)\s(?P<start>\d+)\s(?P<stop>\d+)') with open(annotation_path, 'r') as gff: for line in gff: print(line) if line.startswith('#'): _, eyd, fusion = line.strip().split() fusion, start_stop = fusion.split(':') left_break, right_break = start_stop.split('-') annotation[fusion][eyd] = {} annotation[fusion][eyd]['left_break'] = left_break annotation[fusion][eyd]['right_break'] = right_break else: line = line.strip().split('\t') fusion = line[0] strand = line[6] block_start = line[3] block_stop = line[4] attr = line[8] m = regex.search(attr) if m: transcript_id = m.group('Name') rb = any([block_start == annotation[fusion][transcript_id]['right_break'], block_stop == annotation[fusion][transcript_id]['right_break']]) lb = any([block_start == annotation[fusion][transcript_id]['left_break'], block_stop == annotation[fusion][transcript_id]['left_break']]) if strand == '-' and rb: transcript_split = int(m.group('stop')) + 1 # Off by one # Take the reverse complement to orient transcripts from 5' to 3' five_seq = transcripts[transcript_id][transcript_split:] five_pr_splits[fusion][transcript_id] = five_seq.translate(trans)[::-1] three_seq = transcripts[transcript_id][:transcript_split] three_pr_splits[fusion][transcript_id] = three_seq.translate(trans)[::-1] elif strand == '+' and lb: transcript_split = int(m.group('stop')) s1 = transcripts[transcript_id][:transcript_split] five_pr_splits[fusion][transcript_id] = s1 s2 = transcripts[transcript_id][transcript_split:] three_pr_splits[fusion][transcript_id] = s2 return five_pr_splits, three_pr_splits
[ "def", "split_fusion_transcript", "(", "annotation_path", ",", "transcripts", ")", ":", "annotation", "=", "collections", ".", "defaultdict", "(", "dict", ")", "forward", "=", "'ACGTN'", "reverse", "=", "'TGCAN'", "trans", "=", "string", ".", "maketrans", "(", "forward", ",", "reverse", ")", "# Pull in assembled transcript annotation", "five_pr_splits", "=", "collections", ".", "defaultdict", "(", "dict", ")", "three_pr_splits", "=", "collections", ".", "defaultdict", "(", "dict", ")", "regex", "=", "re", ".", "compile", "(", "r'ID=(?P<ID>.*);Name=(?P<Name>.*);Target=(?P<Target>.*)\\s(?P<start>\\d+)\\s(?P<stop>\\d+)'", ")", "with", "open", "(", "annotation_path", ",", "'r'", ")", "as", "gff", ":", "for", "line", "in", "gff", ":", "print", "(", "line", ")", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "_", ",", "eyd", ",", "fusion", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "fusion", ",", "start_stop", "=", "fusion", ".", "split", "(", "':'", ")", "left_break", ",", "right_break", "=", "start_stop", ".", "split", "(", "'-'", ")", "annotation", "[", "fusion", "]", "[", "eyd", "]", "=", "{", "}", "annotation", "[", "fusion", "]", "[", "eyd", "]", "[", "'left_break'", "]", "=", "left_break", "annotation", "[", "fusion", "]", "[", "eyd", "]", "[", "'right_break'", "]", "=", "right_break", "else", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "fusion", "=", "line", "[", "0", "]", "strand", "=", "line", "[", "6", "]", "block_start", "=", "line", "[", "3", "]", "block_stop", "=", "line", "[", "4", "]", "attr", "=", "line", "[", "8", "]", "m", "=", "regex", ".", "search", "(", "attr", ")", "if", "m", ":", "transcript_id", "=", "m", ".", "group", "(", "'Name'", ")", "rb", "=", "any", "(", "[", "block_start", "==", "annotation", "[", "fusion", "]", "[", "transcript_id", "]", "[", "'right_break'", "]", ",", "block_stop", "==", "annotation", "[", "fusion", "]", "[", "transcript_id", "]", "[", "'right_break'", "]", "]", ")", "lb", "=", "any", "(", "[", "block_start", "==", "annotation", "[", "fusion", "]", "[", "transcript_id", "]", "[", "'left_break'", "]", ",", "block_stop", "==", "annotation", "[", "fusion", "]", "[", "transcript_id", "]", "[", "'left_break'", "]", "]", ")", "if", "strand", "==", "'-'", "and", "rb", ":", "transcript_split", "=", "int", "(", "m", ".", "group", "(", "'stop'", ")", ")", "+", "1", "# Off by one", "# Take the reverse complement to orient transcripts from 5' to 3'", "five_seq", "=", "transcripts", "[", "transcript_id", "]", "[", "transcript_split", ":", "]", "five_pr_splits", "[", "fusion", "]", "[", "transcript_id", "]", "=", "five_seq", ".", "translate", "(", "trans", ")", "[", ":", ":", "-", "1", "]", "three_seq", "=", "transcripts", "[", "transcript_id", "]", "[", ":", "transcript_split", "]", "three_pr_splits", "[", "fusion", "]", "[", "transcript_id", "]", "=", "three_seq", ".", "translate", "(", "trans", ")", "[", ":", ":", "-", "1", "]", "elif", "strand", "==", "'+'", "and", "lb", ":", "transcript_split", "=", "int", "(", "m", ".", "group", "(", "'stop'", ")", ")", "s1", "=", "transcripts", "[", "transcript_id", "]", "[", ":", "transcript_split", "]", "five_pr_splits", "[", "fusion", "]", "[", "transcript_id", "]", "=", "s1", "s2", "=", "transcripts", "[", "transcript_id", "]", "[", "transcript_split", ":", "]", "three_pr_splits", "[", "fusion", "]", "[", "transcript_id", "]", "=", "s2", "return", "five_pr_splits", ",", "three_pr_splits" ]
Finds the breakpoint in the fusion transcript and splits the 5' donor from the 3' acceptor :param str annotation_path: Path to transcript annotation file :param dict transcripts: Dictionary of fusion transcripts :return: 5' donor sequences and 3' acceptor sequences :rtype: tuple
[ "Finds", "the", "breakpoint", "in", "the", "fusion", "transcript", "and", "splits", "the", "5", "donor", "from", "the", "3", "acceptor" ]
python
train
44.651515
angr/pyvex
pyvex/lifting/__init__.py
https://github.com/angr/pyvex/blob/c418edc1146982b2a0579bf56e5993c1c7046b19/pyvex/lifting/__init__.py#L216-L229
def register(lifter, arch_name): """ Registers a Lifter or Postprocessor to be used by pyvex. Lifters are are given priority based on the order in which they are registered. Postprocessors will be run in registration order. :param lifter: The Lifter or Postprocessor to register :vartype lifter: :class:`Lifter` or :class:`Postprocessor` """ if issubclass(lifter, Lifter): l.debug("Registering lifter %s for architecture %s.", lifter.__name__, arch_name) lifters[arch_name].append(lifter) if issubclass(lifter, Postprocessor): l.debug("Registering postprocessor %s for architecture %s.", lifter.__name__, arch_name) postprocessors[arch_name].append(lifter)
[ "def", "register", "(", "lifter", ",", "arch_name", ")", ":", "if", "issubclass", "(", "lifter", ",", "Lifter", ")", ":", "l", ".", "debug", "(", "\"Registering lifter %s for architecture %s.\"", ",", "lifter", ".", "__name__", ",", "arch_name", ")", "lifters", "[", "arch_name", "]", ".", "append", "(", "lifter", ")", "if", "issubclass", "(", "lifter", ",", "Postprocessor", ")", ":", "l", ".", "debug", "(", "\"Registering postprocessor %s for architecture %s.\"", ",", "lifter", ".", "__name__", ",", "arch_name", ")", "postprocessors", "[", "arch_name", "]", ".", "append", "(", "lifter", ")" ]
Registers a Lifter or Postprocessor to be used by pyvex. Lifters are are given priority based on the order in which they are registered. Postprocessors will be run in registration order. :param lifter: The Lifter or Postprocessor to register :vartype lifter: :class:`Lifter` or :class:`Postprocessor`
[ "Registers", "a", "Lifter", "or", "Postprocessor", "to", "be", "used", "by", "pyvex", ".", "Lifters", "are", "are", "given", "priority", "based", "on", "the", "order", "in", "which", "they", "are", "registered", ".", "Postprocessors", "will", "be", "run", "in", "registration", "order", "." ]
python
train
51.285714
seb-m/pyinotify
python3/pyinotify.py
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L256-L264
def logger_init(): """Initialize logger instance.""" log = logging.getLogger("pyinotify") console_handler = logging.StreamHandler() console_handler.setFormatter( logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s")) log.addHandler(console_handler) log.setLevel(20) return log
[ "def", "logger_init", "(", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "\"pyinotify\"", ")", "console_handler", "=", "logging", ".", "StreamHandler", "(", ")", "console_handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "\"[%(asctime)s %(name)s %(levelname)s] %(message)s\"", ")", ")", "log", ".", "addHandler", "(", "console_handler", ")", "log", ".", "setLevel", "(", "20", ")", "return", "log" ]
Initialize logger instance.
[ "Initialize", "logger", "instance", "." ]
python
train
35.555556
wandb/client
wandb/apis/internal.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L1096-L1103
def get_file_stream_api(self): """This creates a new file pusher thread. Call start to initiate the thread that talks to W&B""" if not self._file_stream_api: if self._current_run_id is None: raise UsageError( 'Must have a current run to use file stream API.') self._file_stream_api = FileStreamApi(self, self._current_run_id) return self._file_stream_api
[ "def", "get_file_stream_api", "(", "self", ")", ":", "if", "not", "self", ".", "_file_stream_api", ":", "if", "self", ".", "_current_run_id", "is", "None", ":", "raise", "UsageError", "(", "'Must have a current run to use file stream API.'", ")", "self", ".", "_file_stream_api", "=", "FileStreamApi", "(", "self", ",", "self", ".", "_current_run_id", ")", "return", "self", ".", "_file_stream_api" ]
This creates a new file pusher thread. Call start to initiate the thread that talks to W&B
[ "This", "creates", "a", "new", "file", "pusher", "thread", ".", "Call", "start", "to", "initiate", "the", "thread", "that", "talks", "to", "W&B" ]
python
train
54
OpenAgInitiative/openag_python
openag/couch.py
https://github.com/OpenAgInitiative/openag_python/blob/f6202340292bbf7185e1a7d4290188c0dacbb8d0/openag/couch.py#L111-L128
def push_design_documents(self, design_path): """ Push the design documents stored in `design_path` to the server """ for db_name in os.listdir(design_path): if db_name.startswith("__") or db_name.startswith("."): continue db_path = os.path.join(design_path, db_name) doc = self._folder_to_dict(db_path) doc_id = "_design/openag" doc["_id"] = doc_id db = self[db_name] if doc_id in db: old_doc = db[doc_id] doc["_rev"] = old_doc["_rev"] if doc == old_doc: continue db[doc_id] = doc
[ "def", "push_design_documents", "(", "self", ",", "design_path", ")", ":", "for", "db_name", "in", "os", ".", "listdir", "(", "design_path", ")", ":", "if", "db_name", ".", "startswith", "(", "\"__\"", ")", "or", "db_name", ".", "startswith", "(", "\".\"", ")", ":", "continue", "db_path", "=", "os", ".", "path", ".", "join", "(", "design_path", ",", "db_name", ")", "doc", "=", "self", ".", "_folder_to_dict", "(", "db_path", ")", "doc_id", "=", "\"_design/openag\"", "doc", "[", "\"_id\"", "]", "=", "doc_id", "db", "=", "self", "[", "db_name", "]", "if", "doc_id", "in", "db", ":", "old_doc", "=", "db", "[", "doc_id", "]", "doc", "[", "\"_rev\"", "]", "=", "old_doc", "[", "\"_rev\"", "]", "if", "doc", "==", "old_doc", ":", "continue", "db", "[", "doc_id", "]", "=", "doc" ]
Push the design documents stored in `design_path` to the server
[ "Push", "the", "design", "documents", "stored", "in", "design_path", "to", "the", "server" ]
python
train
37.555556
openego/ding0
ding0/tools/pypsa_io.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/tools/pypsa_io.py#L298-L365
def edges_to_dict_of_dataframes(grid, edges): """ Export edges to DataFrame Parameters ---------- grid: ding0.Network edges: list Edges of Ding0.Network graph Returns ------- edges_dict: dict """ omega = 2 * pi * 50 srid = int(cfg_ding0.get('geo', 'srid')) lines = {'line_id': [], 'bus0': [], 'bus1': [], 'x': [], 'r': [], 's_nom': [], 'length': [], 'cables': [], 'geom': [], 'grid_id': []} # iterate over edges and add them one by one for edge in edges: line_name = '_'.join(['MV', str(grid.id_db), 'lin', str(edge['branch'].id_db)]) # TODO: find the real cause for being L, C, I_th_max type of Series if (isinstance(edge['branch'].type['L'], Series) or isinstance(edge['branch'].type['C'], Series)): x = omega * edge['branch'].type['L'].values[0] * 1e-3 else: x = omega * edge['branch'].type['L'] * 1e-3 if isinstance(edge['branch'].type['R'], Series): r = edge['branch'].type['R'].values[0] else: r = edge['branch'].type['R'] if (isinstance(edge['branch'].type['I_max_th'], Series) or isinstance(edge['branch'].type['U_n'], Series)): s_nom = sqrt(3) * edge['branch'].type['I_max_th'].values[0] * \ edge['branch'].type['U_n'].values[0] else: s_nom = sqrt(3) * edge['branch'].type['I_max_th'] * \ edge['branch'].type['U_n'] # get lengths of line l = edge['branch'].length / 1e3 lines['line_id'].append(line_name) lines['bus0'].append(edge['adj_nodes'][0].pypsa_id) lines['bus1'].append(edge['adj_nodes'][1].pypsa_id) lines['x'].append(x * l) lines['r'].append(r * l) lines['s_nom'].append(s_nom) lines['length'].append(l) lines['cables'].append(3) lines['geom'].append(from_shape( LineString([edge['adj_nodes'][0].geo_data, edge['adj_nodes'][1].geo_data]), srid=srid)) lines['grid_id'].append(grid.id_db) return {'Line': DataFrame(lines).set_index('line_id')}
[ "def", "edges_to_dict_of_dataframes", "(", "grid", ",", "edges", ")", ":", "omega", "=", "2", "*", "pi", "*", "50", "srid", "=", "int", "(", "cfg_ding0", ".", "get", "(", "'geo'", ",", "'srid'", ")", ")", "lines", "=", "{", "'line_id'", ":", "[", "]", ",", "'bus0'", ":", "[", "]", ",", "'bus1'", ":", "[", "]", ",", "'x'", ":", "[", "]", ",", "'r'", ":", "[", "]", ",", "'s_nom'", ":", "[", "]", ",", "'length'", ":", "[", "]", ",", "'cables'", ":", "[", "]", ",", "'geom'", ":", "[", "]", ",", "'grid_id'", ":", "[", "]", "}", "# iterate over edges and add them one by one", "for", "edge", "in", "edges", ":", "line_name", "=", "'_'", ".", "join", "(", "[", "'MV'", ",", "str", "(", "grid", ".", "id_db", ")", ",", "'lin'", ",", "str", "(", "edge", "[", "'branch'", "]", ".", "id_db", ")", "]", ")", "# TODO: find the real cause for being L, C, I_th_max type of Series", "if", "(", "isinstance", "(", "edge", "[", "'branch'", "]", ".", "type", "[", "'L'", "]", ",", "Series", ")", "or", "isinstance", "(", "edge", "[", "'branch'", "]", ".", "type", "[", "'C'", "]", ",", "Series", ")", ")", ":", "x", "=", "omega", "*", "edge", "[", "'branch'", "]", ".", "type", "[", "'L'", "]", ".", "values", "[", "0", "]", "*", "1e-3", "else", ":", "x", "=", "omega", "*", "edge", "[", "'branch'", "]", ".", "type", "[", "'L'", "]", "*", "1e-3", "if", "isinstance", "(", "edge", "[", "'branch'", "]", ".", "type", "[", "'R'", "]", ",", "Series", ")", ":", "r", "=", "edge", "[", "'branch'", "]", ".", "type", "[", "'R'", "]", ".", "values", "[", "0", "]", "else", ":", "r", "=", "edge", "[", "'branch'", "]", ".", "type", "[", "'R'", "]", "if", "(", "isinstance", "(", "edge", "[", "'branch'", "]", ".", "type", "[", "'I_max_th'", "]", ",", "Series", ")", "or", "isinstance", "(", "edge", "[", "'branch'", "]", ".", "type", "[", "'U_n'", "]", ",", "Series", ")", ")", ":", "s_nom", "=", "sqrt", "(", "3", ")", "*", "edge", "[", "'branch'", "]", ".", "type", "[", "'I_max_th'", "]", ".", "values", "[", "0", "]", "*", "edge", "[", "'branch'", "]", ".", "type", "[", "'U_n'", "]", ".", "values", "[", "0", "]", "else", ":", "s_nom", "=", "sqrt", "(", "3", ")", "*", "edge", "[", "'branch'", "]", ".", "type", "[", "'I_max_th'", "]", "*", "edge", "[", "'branch'", "]", ".", "type", "[", "'U_n'", "]", "# get lengths of line", "l", "=", "edge", "[", "'branch'", "]", ".", "length", "/", "1e3", "lines", "[", "'line_id'", "]", ".", "append", "(", "line_name", ")", "lines", "[", "'bus0'", "]", ".", "append", "(", "edge", "[", "'adj_nodes'", "]", "[", "0", "]", ".", "pypsa_id", ")", "lines", "[", "'bus1'", "]", ".", "append", "(", "edge", "[", "'adj_nodes'", "]", "[", "1", "]", ".", "pypsa_id", ")", "lines", "[", "'x'", "]", ".", "append", "(", "x", "*", "l", ")", "lines", "[", "'r'", "]", ".", "append", "(", "r", "*", "l", ")", "lines", "[", "'s_nom'", "]", ".", "append", "(", "s_nom", ")", "lines", "[", "'length'", "]", ".", "append", "(", "l", ")", "lines", "[", "'cables'", "]", ".", "append", "(", "3", ")", "lines", "[", "'geom'", "]", ".", "append", "(", "from_shape", "(", "LineString", "(", "[", "edge", "[", "'adj_nodes'", "]", "[", "0", "]", ".", "geo_data", ",", "edge", "[", "'adj_nodes'", "]", "[", "1", "]", ".", "geo_data", "]", ")", ",", "srid", "=", "srid", ")", ")", "lines", "[", "'grid_id'", "]", ".", "append", "(", "grid", ".", "id_db", ")", "return", "{", "'Line'", ":", "DataFrame", "(", "lines", ")", ".", "set_index", "(", "'line_id'", ")", "}" ]
Export edges to DataFrame Parameters ---------- grid: ding0.Network edges: list Edges of Ding0.Network graph Returns ------- edges_dict: dict
[ "Export", "edges", "to", "DataFrame" ]
python
train
33.014706
Kozea/pygal
pygal/graph/graph.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/graph.py#L887-L894
def _secondary_max(self): """Getter for the maximum series value""" return ( self.secondary_range[1] if (self.secondary_range and self.secondary_range[1] is not None) else (max(self._secondary_values) if self._secondary_values else None) )
[ "def", "_secondary_max", "(", "self", ")", ":", "return", "(", "self", ".", "secondary_range", "[", "1", "]", "if", "(", "self", ".", "secondary_range", "and", "self", ".", "secondary_range", "[", "1", "]", "is", "not", "None", ")", "else", "(", "max", "(", "self", ".", "_secondary_values", ")", "if", "self", ".", "_secondary_values", "else", "None", ")", ")" ]
Getter for the maximum series value
[ "Getter", "for", "the", "maximum", "series", "value" ]
python
train
38.5
theolind/pymysensors
mysensors/gateway_serial.py
https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/gateway_serial.py#L44-L66
def _connect(self): """Connect to the serial port. This should be run in a new thread.""" while self.protocol: _LOGGER.info('Trying to connect to %s', self.port) try: ser = serial.serial_for_url( self.port, self.baud, timeout=self.timeout) except serial.SerialException: _LOGGER.error('Unable to connect to %s', self.port) _LOGGER.info( 'Waiting %s secs before trying to connect again', self.reconnect_timeout) time.sleep(self.reconnect_timeout) else: transport = serial.threaded.ReaderThread( ser, lambda: self.protocol) transport.daemon = False poll_thread = threading.Thread(target=self._poll_queue) self._stop_event.clear() poll_thread.start() transport.start() transport.connect() return
[ "def", "_connect", "(", "self", ")", ":", "while", "self", ".", "protocol", ":", "_LOGGER", ".", "info", "(", "'Trying to connect to %s'", ",", "self", ".", "port", ")", "try", ":", "ser", "=", "serial", ".", "serial_for_url", "(", "self", ".", "port", ",", "self", ".", "baud", ",", "timeout", "=", "self", ".", "timeout", ")", "except", "serial", ".", "SerialException", ":", "_LOGGER", ".", "error", "(", "'Unable to connect to %s'", ",", "self", ".", "port", ")", "_LOGGER", ".", "info", "(", "'Waiting %s secs before trying to connect again'", ",", "self", ".", "reconnect_timeout", ")", "time", ".", "sleep", "(", "self", ".", "reconnect_timeout", ")", "else", ":", "transport", "=", "serial", ".", "threaded", ".", "ReaderThread", "(", "ser", ",", "lambda", ":", "self", ".", "protocol", ")", "transport", ".", "daemon", "=", "False", "poll_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_poll_queue", ")", "self", ".", "_stop_event", ".", "clear", "(", ")", "poll_thread", ".", "start", "(", ")", "transport", ".", "start", "(", ")", "transport", ".", "connect", "(", ")", "return" ]
Connect to the serial port. This should be run in a new thread.
[ "Connect", "to", "the", "serial", "port", ".", "This", "should", "be", "run", "in", "a", "new", "thread", "." ]
python
train
43.73913
sashahart/cookies
cookies.py
https://github.com/sashahart/cookies/blob/ab8185e06f221eaf65305f15e05852393723ac95/cookies.py#L995-L1016
def add(self, *args, **kwargs): """Add Cookie objects by their names, or create new ones under specified names. Any unnamed arguments are interpreted as existing cookies, and are added under the value in their .name attribute. With keyword arguments, the key is interpreted as the cookie name and the value as the UNENCODED value stored in the cookie. """ # Only the first one is accessible through the main interface, # others accessible through get_all (all_cookies). for cookie in args: self.all_cookies.append(cookie) if cookie.name in self: continue self[cookie.name] = cookie for key, value in kwargs.items(): cookie = self.cookie_class(key, value) self.all_cookies.append(cookie) if key in self: continue self[key] = cookie
[ "def", "add", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Only the first one is accessible through the main interface,", "# others accessible through get_all (all_cookies).", "for", "cookie", "in", "args", ":", "self", ".", "all_cookies", ".", "append", "(", "cookie", ")", "if", "cookie", ".", "name", "in", "self", ":", "continue", "self", "[", "cookie", ".", "name", "]", "=", "cookie", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "cookie", "=", "self", ".", "cookie_class", "(", "key", ",", "value", ")", "self", ".", "all_cookies", ".", "append", "(", "cookie", ")", "if", "key", "in", "self", ":", "continue", "self", "[", "key", "]", "=", "cookie" ]
Add Cookie objects by their names, or create new ones under specified names. Any unnamed arguments are interpreted as existing cookies, and are added under the value in their .name attribute. With keyword arguments, the key is interpreted as the cookie name and the value as the UNENCODED value stored in the cookie.
[ "Add", "Cookie", "objects", "by", "their", "names", "or", "create", "new", "ones", "under", "specified", "names", "." ]
python
train
41.545455
DistrictDataLabs/yellowbrick
yellowbrick/utils/helpers.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/utils/helpers.py#L33-L60
def get_model_name(model): """ Detects the model name for a Scikit-Learn model or pipeline. Parameters ---------- model: class or instance The object to determine the name for. If the model is an estimator it returns the class name; if it is a Pipeline it returns the class name of the final transformer or estimator in the Pipeline. Returns ------- name : string The name of the model or pipeline. """ if not is_estimator(model): raise YellowbrickTypeError( "Cannot detect the model name for non estimator: '{}'".format( type(model) ) ) else: if isinstance(model, Pipeline): return get_model_name(model.steps[-1][-1]) else: return model.__class__.__name__
[ "def", "get_model_name", "(", "model", ")", ":", "if", "not", "is_estimator", "(", "model", ")", ":", "raise", "YellowbrickTypeError", "(", "\"Cannot detect the model name for non estimator: '{}'\"", ".", "format", "(", "type", "(", "model", ")", ")", ")", "else", ":", "if", "isinstance", "(", "model", ",", "Pipeline", ")", ":", "return", "get_model_name", "(", "model", ".", "steps", "[", "-", "1", "]", "[", "-", "1", "]", ")", "else", ":", "return", "model", ".", "__class__", ".", "__name__" ]
Detects the model name for a Scikit-Learn model or pipeline. Parameters ---------- model: class or instance The object to determine the name for. If the model is an estimator it returns the class name; if it is a Pipeline it returns the class name of the final transformer or estimator in the Pipeline. Returns ------- name : string The name of the model or pipeline.
[ "Detects", "the", "model", "name", "for", "a", "Scikit", "-", "Learn", "model", "or", "pipeline", "." ]
python
train
28.714286
rjw57/starman
starman/kalman.py
https://github.com/rjw57/starman/blob/1f9475e2354c9630a61f4898ad871de1d2fdbc71/starman/kalman.py#L131-L204
def predict(self, control=None, control_matrix=None, process_matrix=None, process_covariance=None): """ Predict the next *a priori* state mean and covariance given the last posterior. As a special case the first call to this method will initialise the posterior and prior estimates from the *initial_state_estimate* and *initial_covariance* arguments passed when this object was created. In this case the *process_matrix* and *process_covariance* arguments are unused but are still recorded in the :py:attr:`.process_matrices` and :py:attr:`.process_covariances` attributes. Args: control (array or None): If specified, the control input for this predict step. control_matrix (array or None): If specified, the control matrix to use for this time step. process_matrix (array or None): If specified, the process matrix to use for this time step. process_covariance (array or None): If specified, the process covariance to use for this time step. """ # Sanitise arguments if process_matrix is None: process_matrix = self._defaults['process_matrix'] if process_covariance is None: process_covariance = self._defaults['process_covariance'] if control_matrix is None: control_matrix = self._defaults['control_matrix'] if len(self.prior_state_estimates) == 0: # Special case: first call self.prior_state_estimates.append(self._initial_state_estimate) else: # Usual case process_matrix = as_square_array(process_matrix) process_covariance = as_square_array(process_covariance) if process_matrix.shape[0] != process_covariance.shape[0]: raise ValueError("Process matrix and noise have incompatible " \ "shapes: {} vs {}".format( process_matrix.shape, process_covariance.shape)) if control_matrix is not None: control_matrix = np.atleast_2d(control_matrix) if control is not None: control = np.atleast_1d(control) # Update state mean and covariance prev_posterior_mean = self.posterior_state_estimates[-1].mean prev_posterior_cov = self.posterior_state_estimates[-1].cov prior_mean = process_matrix.dot(prev_posterior_mean) if control is not None: prior_mean += control_matrix.dot(control) prior_cov = process_matrix.dot(prev_posterior_cov).dot( process_matrix.T) + process_covariance self.prior_state_estimates.append( MultivariateNormal(mean=prior_mean, cov=prior_cov)) # Record transition matrix self.process_matrices.append(process_matrix) self.process_covariances.append(process_covariance) # Append empty list to measurements for this time step self.measurements.append([]) self.measurement_matrices.append([]) # Seed posterior estimates with the prior one. self.posterior_state_estimates.append(self.prior_state_estimates[-1])
[ "def", "predict", "(", "self", ",", "control", "=", "None", ",", "control_matrix", "=", "None", ",", "process_matrix", "=", "None", ",", "process_covariance", "=", "None", ")", ":", "# Sanitise arguments", "if", "process_matrix", "is", "None", ":", "process_matrix", "=", "self", ".", "_defaults", "[", "'process_matrix'", "]", "if", "process_covariance", "is", "None", ":", "process_covariance", "=", "self", ".", "_defaults", "[", "'process_covariance'", "]", "if", "control_matrix", "is", "None", ":", "control_matrix", "=", "self", ".", "_defaults", "[", "'control_matrix'", "]", "if", "len", "(", "self", ".", "prior_state_estimates", ")", "==", "0", ":", "# Special case: first call", "self", ".", "prior_state_estimates", ".", "append", "(", "self", ".", "_initial_state_estimate", ")", "else", ":", "# Usual case", "process_matrix", "=", "as_square_array", "(", "process_matrix", ")", "process_covariance", "=", "as_square_array", "(", "process_covariance", ")", "if", "process_matrix", ".", "shape", "[", "0", "]", "!=", "process_covariance", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "\"Process matrix and noise have incompatible \"", "\"shapes: {} vs {}\"", ".", "format", "(", "process_matrix", ".", "shape", ",", "process_covariance", ".", "shape", ")", ")", "if", "control_matrix", "is", "not", "None", ":", "control_matrix", "=", "np", ".", "atleast_2d", "(", "control_matrix", ")", "if", "control", "is", "not", "None", ":", "control", "=", "np", ".", "atleast_1d", "(", "control", ")", "# Update state mean and covariance", "prev_posterior_mean", "=", "self", ".", "posterior_state_estimates", "[", "-", "1", "]", ".", "mean", "prev_posterior_cov", "=", "self", ".", "posterior_state_estimates", "[", "-", "1", "]", ".", "cov", "prior_mean", "=", "process_matrix", ".", "dot", "(", "prev_posterior_mean", ")", "if", "control", "is", "not", "None", ":", "prior_mean", "+=", "control_matrix", ".", "dot", "(", "control", ")", "prior_cov", "=", "process_matrix", ".", "dot", "(", "prev_posterior_cov", ")", ".", "dot", "(", "process_matrix", ".", "T", ")", "+", "process_covariance", "self", ".", "prior_state_estimates", ".", "append", "(", "MultivariateNormal", "(", "mean", "=", "prior_mean", ",", "cov", "=", "prior_cov", ")", ")", "# Record transition matrix", "self", ".", "process_matrices", ".", "append", "(", "process_matrix", ")", "self", ".", "process_covariances", ".", "append", "(", "process_covariance", ")", "# Append empty list to measurements for this time step", "self", ".", "measurements", ".", "append", "(", "[", "]", ")", "self", ".", "measurement_matrices", ".", "append", "(", "[", "]", ")", "# Seed posterior estimates with the prior one.", "self", ".", "posterior_state_estimates", ".", "append", "(", "self", ".", "prior_state_estimates", "[", "-", "1", "]", ")" ]
Predict the next *a priori* state mean and covariance given the last posterior. As a special case the first call to this method will initialise the posterior and prior estimates from the *initial_state_estimate* and *initial_covariance* arguments passed when this object was created. In this case the *process_matrix* and *process_covariance* arguments are unused but are still recorded in the :py:attr:`.process_matrices` and :py:attr:`.process_covariances` attributes. Args: control (array or None): If specified, the control input for this predict step. control_matrix (array or None): If specified, the control matrix to use for this time step. process_matrix (array or None): If specified, the process matrix to use for this time step. process_covariance (array or None): If specified, the process covariance to use for this time step.
[ "Predict", "the", "next", "*", "a", "priori", "*", "state", "mean", "and", "covariance", "given", "the", "last", "posterior", ".", "As", "a", "special", "case", "the", "first", "call", "to", "this", "method", "will", "initialise", "the", "posterior", "and", "prior", "estimates", "from", "the", "*", "initial_state_estimate", "*", "and", "*", "initial_covariance", "*", "arguments", "passed", "when", "this", "object", "was", "created", ".", "In", "this", "case", "the", "*", "process_matrix", "*", "and", "*", "process_covariance", "*", "arguments", "are", "unused", "but", "are", "still", "recorded", "in", "the", ":", "py", ":", "attr", ":", ".", "process_matrices", "and", ":", "py", ":", "attr", ":", ".", "process_covariances", "attributes", "." ]
python
train
43.891892
twilio/twilio-python
twilio/rest/preview/hosted_numbers/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/hosted_numbers/__init__.py#L38-L44
def hosted_number_orders(self): """ :rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderList """ if self._hosted_number_orders is None: self._hosted_number_orders = HostedNumberOrderList(self) return self._hosted_number_orders
[ "def", "hosted_number_orders", "(", "self", ")", ":", "if", "self", ".", "_hosted_number_orders", "is", "None", ":", "self", ".", "_hosted_number_orders", "=", "HostedNumberOrderList", "(", "self", ")", "return", "self", ".", "_hosted_number_orders" ]
:rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderList
[ ":", "rtype", ":", "twilio", ".", "rest", ".", "preview", ".", "hosted_numbers", ".", "hosted_number_order", ".", "HostedNumberOrderList" ]
python
train
42.857143
mjj4791/python-buienradar
buienradar/buienradar.py
https://github.com/mjj4791/python-buienradar/blob/a70436f54e007ce921d5210cb296cf3e4adf9d09/buienradar/buienradar.py#L41-L52
def condition_from_code(condcode): """Get the condition name from the condition code.""" if condcode in __BRCONDITIONS: cond_data = __BRCONDITIONS[condcode] return {CONDCODE: condcode, CONDITION: cond_data[0], DETAILED: cond_data[1], EXACT: cond_data[2], EXACTNL: cond_data[3], } return None
[ "def", "condition_from_code", "(", "condcode", ")", ":", "if", "condcode", "in", "__BRCONDITIONS", ":", "cond_data", "=", "__BRCONDITIONS", "[", "condcode", "]", "return", "{", "CONDCODE", ":", "condcode", ",", "CONDITION", ":", "cond_data", "[", "0", "]", ",", "DETAILED", ":", "cond_data", "[", "1", "]", ",", "EXACT", ":", "cond_data", "[", "2", "]", ",", "EXACTNL", ":", "cond_data", "[", "3", "]", ",", "}", "return", "None" ]
Get the condition name from the condition code.
[ "Get", "the", "condition", "name", "from", "the", "condition", "code", "." ]
python
train
32.416667
danilobellini/audiolazy
audiolazy/lazy_poly.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_poly.py#L202-L206
def is_polynomial(self): """ Tells whether it is a linear combination of natural powers of ``x``. """ return all(isinstance(k, INT_TYPES) and k >= 0 for k in self._data)
[ "def", "is_polynomial", "(", "self", ")", ":", "return", "all", "(", "isinstance", "(", "k", ",", "INT_TYPES", ")", "and", "k", ">=", "0", "for", "k", "in", "self", ".", "_data", ")" ]
Tells whether it is a linear combination of natural powers of ``x``.
[ "Tells", "whether", "it", "is", "a", "linear", "combination", "of", "natural", "powers", "of", "x", "." ]
python
train
36.2
heronotears/lazyxml
demo/dump.py
https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/demo/dump.py#L54-L142
def main(): data = {'demo':{'foo': '<foo>', 'bar': ['1', '2']}} # xml写入文件 提供文件名 lazyxml.dump(data, 'xml/dump.xml') # xml写入文件 提供文件句柄 with open('xml/dump-fp.xml', 'w') as fp: lazyxml.dump(data, fp) # xml写入文件 提供类文件对象 from cStringIO import StringIO buffer = StringIO() lazyxml.dump(data, buffer) print buffer.getvalue() # <?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[1]]></foo><bar><![CDATA[2]]></bar></demo> buffer.close() # 默认 print lazyxml.dumps(data) # '<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>' # 不声明xml头部 print lazyxml.dumps(data, header_declare=False) # '<demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>' # 不使用CDATA格式 print lazyxml.dumps(data, cdata=False) # '<?xml version="1.0" encoding="utf-8"?><demo><foo>&lt;foo&gt;</foo><bar>1</bar><bar>2</bar></demo>' # 缩进和美观xml print lazyxml.dumps(data, indent=' ' * 4) # <?xml version="1.0" encoding="utf-8"?> # <demo> # <foo><![CDATA[<foo>]]></foo> # <bar><![CDATA[1]]></bar> # <bar><![CDATA[2]]></bar> # </demo> # 使用标签名称排序 print lazyxml.dumps(data, ksort=True) # '<?xml version="1.0" encoding="utf-8"?><demo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar><foo><![CDATA[<foo>]]></foo></demo>' # 使用标签名称倒序排序 print lazyxml.dumps(data, ksort=True, reverse=True) # '<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>' # 含有属性的xml数据 kw = { 'hasattr': True, 'ksort': True, 'indent': ' ' * 4, 'attrkey': ATTRKEY, 'valuekey': VALUEKEY } print lazyxml.dumps(ATTRDATA, **kw) """ <root a1="1" a2="2"> <test1 a="1" b="2" c="3"> <normal index="5" required="false"> <bar><![CDATA[1]]></bar> <bar><![CDATA[2]]></bar> <foo><![CDATA[<foo-1>]]></foo> </normal> <repeat1 index="1" required="false"> <bar><![CDATA[1]]></bar> <bar><![CDATA[2]]></bar> <foo><![CDATA[<foo-1>]]></foo> </repeat1> <repeat1 index="1" required="false"> <bar><![CDATA[3]]></bar> <bar><![CDATA[4]]></bar> <foo><![CDATA[<foo-2>]]></foo> </repeat1> <repeat2 index="2" required="true"><![CDATA[1]]></repeat2> <repeat2 index="2" required="true"><![CDATA[2]]></repeat2> <repeat3 index="3" required="true"> <sub><![CDATA[1]]></sub> <sub><![CDATA[2]]></sub> </repeat3> <repeat3 index="4" required="true"> <sub><![CDATA[1]]></sub> <sub><![CDATA[2]]></sub> <sub><![CDATA[3]]></sub> </repeat3> </test1> <test2 a="1" b="2" c="3"><![CDATA[测试用]]></test2> </root> """
[ "def", "main", "(", ")", ":", "data", "=", "{", "'demo'", ":", "{", "'foo'", ":", "'<foo>'", ",", "'bar'", ":", "[", "'1'", ",", "'2'", "]", "}", "}", "# xml写入文件 提供文件名", "lazyxml", ".", "dump", "(", "data", ",", "'xml/dump.xml'", ")", "# xml写入文件 提供文件句柄", "with", "open", "(", "'xml/dump-fp.xml'", ",", "'w'", ")", "as", "fp", ":", "lazyxml", ".", "dump", "(", "data", ",", "fp", ")", "# xml写入文件 提供类文件对象", "from", "cStringIO", "import", "StringIO", "buffer", "=", "StringIO", "(", ")", "lazyxml", ".", "dump", "(", "data", ",", "buffer", ")", "print", "buffer", ".", "getvalue", "(", ")", "# <?xml version=\"1.0\" encoding=\"utf-8\"?><demo><foo><![CDATA[1]]></foo><bar><![CDATA[2]]></bar></demo>", "buffer", ".", "close", "(", ")", "# 默认", "print", "lazyxml", ".", "dumps", "(", "data", ")", "# '<?xml version=\"1.0\" encoding=\"utf-8\"?><demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>'", "# 不声明xml头部", "print", "lazyxml", ".", "dumps", "(", "data", ",", "header_declare", "=", "False", ")", "# '<demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>'", "# 不使用CDATA格式", "print", "lazyxml", ".", "dumps", "(", "data", ",", "cdata", "=", "False", ")", "# '<?xml version=\"1.0\" encoding=\"utf-8\"?><demo><foo>&lt;foo&gt;</foo><bar>1</bar><bar>2</bar></demo>'", "# 缩进和美观xml", "print", "lazyxml", ".", "dumps", "(", "data", ",", "indent", "=", "' '", "*", "4", ")", "# <?xml version=\"1.0\" encoding=\"utf-8\"?>", "# <demo>", "# <foo><![CDATA[<foo>]]></foo>", "# <bar><![CDATA[1]]></bar>", "# <bar><![CDATA[2]]></bar>", "# </demo>", "# 使用标签名称排序", "print", "lazyxml", ".", "dumps", "(", "data", ",", "ksort", "=", "True", ")", "# '<?xml version=\"1.0\" encoding=\"utf-8\"?><demo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar><foo><![CDATA[<foo>]]></foo></demo>'", "# 使用标签名称倒序排序", "print", "lazyxml", ".", "dumps", "(", "data", ",", "ksort", "=", "True", ",", "reverse", "=", "True", ")", "# '<?xml version=\"1.0\" encoding=\"utf-8\"?><demo><foo><![CDATA[<foo>]]></foo><bar><![CDATA[1]]></bar><bar><![CDATA[2]]></bar></demo>'", "# 含有属性的xml数据", "kw", "=", "{", "'hasattr'", ":", "True", ",", "'ksort'", ":", "True", ",", "'indent'", ":", "' '", "*", "4", ",", "'attrkey'", ":", "ATTRKEY", ",", "'valuekey'", ":", "VALUEKEY", "}", "print", "lazyxml", ".", "dumps", "(", "ATTRDATA", ",", "*", "*", "kw", ")" ]
<root a1="1" a2="2"> <test1 a="1" b="2" c="3"> <normal index="5" required="false"> <bar><![CDATA[1]]></bar> <bar><![CDATA[2]]></bar> <foo><![CDATA[<foo-1>]]></foo> </normal> <repeat1 index="1" required="false"> <bar><![CDATA[1]]></bar> <bar><![CDATA[2]]></bar> <foo><![CDATA[<foo-1>]]></foo> </repeat1> <repeat1 index="1" required="false"> <bar><![CDATA[3]]></bar> <bar><![CDATA[4]]></bar> <foo><![CDATA[<foo-2>]]></foo> </repeat1> <repeat2 index="2" required="true"><![CDATA[1]]></repeat2> <repeat2 index="2" required="true"><![CDATA[2]]></repeat2> <repeat3 index="3" required="true"> <sub><![CDATA[1]]></sub> <sub><![CDATA[2]]></sub> </repeat3> <repeat3 index="4" required="true"> <sub><![CDATA[1]]></sub> <sub><![CDATA[2]]></sub> <sub><![CDATA[3]]></sub> </repeat3> </test1> <test2 a="1" b="2" c="3"><![CDATA[测试用]]></test2> </root>
[ "<root", "a1", "=", "1", "a2", "=", "2", ">", "<test1", "a", "=", "1", "b", "=", "2", "c", "=", "3", ">", "<normal", "index", "=", "5", "required", "=", "false", ">", "<bar", ">", "<!", "[", "CDATA", "[", "1", "]]", ">", "<", "/", "bar", ">", "<bar", ">", "<!", "[", "CDATA", "[", "2", "]]", ">", "<", "/", "bar", ">", "<foo", ">", "<!", "[", "CDATA", "[", "<foo", "-", "1", ">", "]]", ">", "<", "/", "foo", ">", "<", "/", "normal", ">", "<repeat1", "index", "=", "1", "required", "=", "false", ">", "<bar", ">", "<!", "[", "CDATA", "[", "1", "]]", ">", "<", "/", "bar", ">", "<bar", ">", "<!", "[", "CDATA", "[", "2", "]]", ">", "<", "/", "bar", ">", "<foo", ">", "<!", "[", "CDATA", "[", "<foo", "-", "1", ">", "]]", ">", "<", "/", "foo", ">", "<", "/", "repeat1", ">", "<repeat1", "index", "=", "1", "required", "=", "false", ">", "<bar", ">", "<!", "[", "CDATA", "[", "3", "]]", ">", "<", "/", "bar", ">", "<bar", ">", "<!", "[", "CDATA", "[", "4", "]]", ">", "<", "/", "bar", ">", "<foo", ">", "<!", "[", "CDATA", "[", "<foo", "-", "2", ">", "]]", ">", "<", "/", "foo", ">", "<", "/", "repeat1", ">", "<repeat2", "index", "=", "2", "required", "=", "true", ">", "<!", "[", "CDATA", "[", "1", "]]", ">", "<", "/", "repeat2", ">", "<repeat2", "index", "=", "2", "required", "=", "true", ">", "<!", "[", "CDATA", "[", "2", "]]", ">", "<", "/", "repeat2", ">", "<repeat3", "index", "=", "3", "required", "=", "true", ">", "<sub", ">", "<!", "[", "CDATA", "[", "1", "]]", ">", "<", "/", "sub", ">", "<sub", ">", "<!", "[", "CDATA", "[", "2", "]]", ">", "<", "/", "sub", ">", "<", "/", "repeat3", ">", "<repeat3", "index", "=", "4", "required", "=", "true", ">", "<sub", ">", "<!", "[", "CDATA", "[", "1", "]]", ">", "<", "/", "sub", ">", "<sub", ">", "<!", "[", "CDATA", "[", "2", "]]", ">", "<", "/", "sub", ">", "<sub", ">", "<!", "[", "CDATA", "[", "3", "]]", ">", "<", "/", "sub", ">", "<", "/", "repeat3", ">", "<", "/", "test1", ">", "<test2", "a", "=", "1", "b", "=", "2", "c", "=", "3", ">", "<!", "[", "CDATA", "[", "测试用", "]]", ">", "<", "/", "test2", ">", "<", "/", "root", ">" ]
python
train
33.898876
nickmckay/LiPD-utilities
Python/lipd/excel.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/excel.py#L1614-L1636
def cells_rt_meta(workbook, sheet, row, col): """ Traverse all cells in a row. If you find new data in a cell, add it to the list. :param obj workbook: :param str sheet: :param int row: :param int col: :return list: Cell data for a specific row """ logger_excel.info("enter cells_rt_meta") col_loop = 0 cell_data = [] temp_sheet = workbook.sheet_by_name(sheet) while col_loop < temp_sheet.ncols: col += 1 col_loop += 1 try: if temp_sheet.cell_value(row, col) != xlrd.empty_cell and temp_sheet.cell_value(row, col) != '': cell_data.append(temp_sheet.cell_value(row, col)) except IndexError as e: logger_excel.warn("cells_rt_meta: IndexError: sheet: {}, row: {}, col: {}, {}".format(sheet, row, col, e)) logger_excel.info("exit cells_right_meta") return cell_data
[ "def", "cells_rt_meta", "(", "workbook", ",", "sheet", ",", "row", ",", "col", ")", ":", "logger_excel", ".", "info", "(", "\"enter cells_rt_meta\"", ")", "col_loop", "=", "0", "cell_data", "=", "[", "]", "temp_sheet", "=", "workbook", ".", "sheet_by_name", "(", "sheet", ")", "while", "col_loop", "<", "temp_sheet", ".", "ncols", ":", "col", "+=", "1", "col_loop", "+=", "1", "try", ":", "if", "temp_sheet", ".", "cell_value", "(", "row", ",", "col", ")", "!=", "xlrd", ".", "empty_cell", "and", "temp_sheet", ".", "cell_value", "(", "row", ",", "col", ")", "!=", "''", ":", "cell_data", ".", "append", "(", "temp_sheet", ".", "cell_value", "(", "row", ",", "col", ")", ")", "except", "IndexError", "as", "e", ":", "logger_excel", ".", "warn", "(", "\"cells_rt_meta: IndexError: sheet: {}, row: {}, col: {}, {}\"", ".", "format", "(", "sheet", ",", "row", ",", "col", ",", "e", ")", ")", "logger_excel", ".", "info", "(", "\"exit cells_right_meta\"", ")", "return", "cell_data" ]
Traverse all cells in a row. If you find new data in a cell, add it to the list. :param obj workbook: :param str sheet: :param int row: :param int col: :return list: Cell data for a specific row
[ "Traverse", "all", "cells", "in", "a", "row", ".", "If", "you", "find", "new", "data", "in", "a", "cell", "add", "it", "to", "the", "list", ".", ":", "param", "obj", "workbook", ":", ":", "param", "str", "sheet", ":", ":", "param", "int", "row", ":", ":", "param", "int", "col", ":", ":", "return", "list", ":", "Cell", "data", "for", "a", "specific", "row" ]
python
train
37.869565
Trebek/pydealer
pydealer/stack.py
https://github.com/Trebek/pydealer/blob/2ac583dd8c55715658c740b614387775f4dda333/pydealer/stack.py#L470-L516
def get_list(self, terms, limit=0, sort=False, ranks=None): """ Get the specified cards from the stack. :arg term: The search term. Can be a card full name, value, suit, abbreviation, or stack indice. :arg int limit: The number of items to retrieve for each term. :arg bool sort: Whether or not to sort the results, by poker ranks. :arg dict ranks: The rank dict to reference for sorting. If ``None``, it will default to ``DEFAULT_RANKS``. :returns: A list of the specified cards, if found. """ ranks = ranks or self.ranks got_cards = [] try: indices = self.find_list(terms, limit=limit) got_cards = [self.cards[i] for i in indices if self.cards[i] not in got_cards] self.cards = [v for i, v in enumerate(self.cards) if i not in indices] except: indices = [] for item in terms: try: card = self.cards[item] if card not in got_cards: got_cards.append(card) indices.append(item) except: indices += self.find(item, limit=limit) got_cards += [self.cards[i] for i in indices if self.cards[i] not in got_cards] self.cards = [v for i, v in enumerate(self.cards) if i not in indices] if sort: got_cards = sort_cards(got_cards, ranks) return got_cards
[ "def", "get_list", "(", "self", ",", "terms", ",", "limit", "=", "0", ",", "sort", "=", "False", ",", "ranks", "=", "None", ")", ":", "ranks", "=", "ranks", "or", "self", ".", "ranks", "got_cards", "=", "[", "]", "try", ":", "indices", "=", "self", ".", "find_list", "(", "terms", ",", "limit", "=", "limit", ")", "got_cards", "=", "[", "self", ".", "cards", "[", "i", "]", "for", "i", "in", "indices", "if", "self", ".", "cards", "[", "i", "]", "not", "in", "got_cards", "]", "self", ".", "cards", "=", "[", "v", "for", "i", ",", "v", "in", "enumerate", "(", "self", ".", "cards", ")", "if", "i", "not", "in", "indices", "]", "except", ":", "indices", "=", "[", "]", "for", "item", "in", "terms", ":", "try", ":", "card", "=", "self", ".", "cards", "[", "item", "]", "if", "card", "not", "in", "got_cards", ":", "got_cards", ".", "append", "(", "card", ")", "indices", ".", "append", "(", "item", ")", "except", ":", "indices", "+=", "self", ".", "find", "(", "item", ",", "limit", "=", "limit", ")", "got_cards", "+=", "[", "self", ".", "cards", "[", "i", "]", "for", "i", "in", "indices", "if", "self", ".", "cards", "[", "i", "]", "not", "in", "got_cards", "]", "self", ".", "cards", "=", "[", "v", "for", "i", ",", "v", "in", "enumerate", "(", "self", ".", "cards", ")", "if", "i", "not", "in", "indices", "]", "if", "sort", ":", "got_cards", "=", "sort_cards", "(", "got_cards", ",", "ranks", ")", "return", "got_cards" ]
Get the specified cards from the stack. :arg term: The search term. Can be a card full name, value, suit, abbreviation, or stack indice. :arg int limit: The number of items to retrieve for each term. :arg bool sort: Whether or not to sort the results, by poker ranks. :arg dict ranks: The rank dict to reference for sorting. If ``None``, it will default to ``DEFAULT_RANKS``. :returns: A list of the specified cards, if found.
[ "Get", "the", "specified", "cards", "from", "the", "stack", "." ]
python
train
34.340426
psd-tools/psd-tools
src/psd_tools/api/psd_image.py
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/api/psd_image.py#L442-L450
def _get_pattern(self, pattern_id): """Get pattern item by id.""" for key in ('PATTERNS1', 'PATTERNS2', 'PATTERNS3'): if key in self.tagged_blocks: data = self.tagged_blocks.get_data(key) for pattern in data: if pattern.pattern_id == pattern_id: return pattern return None
[ "def", "_get_pattern", "(", "self", ",", "pattern_id", ")", ":", "for", "key", "in", "(", "'PATTERNS1'", ",", "'PATTERNS2'", ",", "'PATTERNS3'", ")", ":", "if", "key", "in", "self", ".", "tagged_blocks", ":", "data", "=", "self", ".", "tagged_blocks", ".", "get_data", "(", "key", ")", "for", "pattern", "in", "data", ":", "if", "pattern", ".", "pattern_id", "==", "pattern_id", ":", "return", "pattern", "return", "None" ]
Get pattern item by id.
[ "Get", "pattern", "item", "by", "id", "." ]
python
train
41.777778
hasgeek/coaster
coaster/sqlalchemy/mixins.py
https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/sqlalchemy/mixins.py#L112-L117
def uuid(cls): """UUID column, or synonym to existing :attr:`id` column if that is a UUID""" if hasattr(cls, '__uuid_primary_key__') and cls.__uuid_primary_key__: return synonym('id') else: return immutable(Column(UUIDType(binary=False), default=uuid_.uuid4, unique=True, nullable=False))
[ "def", "uuid", "(", "cls", ")", ":", "if", "hasattr", "(", "cls", ",", "'__uuid_primary_key__'", ")", "and", "cls", ".", "__uuid_primary_key__", ":", "return", "synonym", "(", "'id'", ")", "else", ":", "return", "immutable", "(", "Column", "(", "UUIDType", "(", "binary", "=", "False", ")", ",", "default", "=", "uuid_", ".", "uuid4", ",", "unique", "=", "True", ",", "nullable", "=", "False", ")", ")" ]
UUID column, or synonym to existing :attr:`id` column if that is a UUID
[ "UUID", "column", "or", "synonym", "to", "existing", ":", "attr", ":", "id", "column", "if", "that", "is", "a", "UUID" ]
python
train
55.166667
ga4gh/ga4gh-server
ga4gh/server/backend.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/backend.py#L405-L421
def continuousGenerator(self, request): """ Returns a generator over the (continuous, nextPageToken) pairs defined by the (JSON string) request. """ compoundId = None if request.continuous_set_id != "": compoundId = datamodel.ContinuousSetCompoundId.parse( request.continuous_set_id) if compoundId is None: raise exceptions.ContinuousSetNotSpecifiedException() dataset = self.getDataRepository().getDataset( compoundId.dataset_id) continuousSet = dataset.getContinuousSet(request.continuous_set_id) iterator = paging.ContinuousIterator(request, continuousSet) return iterator
[ "def", "continuousGenerator", "(", "self", ",", "request", ")", ":", "compoundId", "=", "None", "if", "request", ".", "continuous_set_id", "!=", "\"\"", ":", "compoundId", "=", "datamodel", ".", "ContinuousSetCompoundId", ".", "parse", "(", "request", ".", "continuous_set_id", ")", "if", "compoundId", "is", "None", ":", "raise", "exceptions", ".", "ContinuousSetNotSpecifiedException", "(", ")", "dataset", "=", "self", ".", "getDataRepository", "(", ")", ".", "getDataset", "(", "compoundId", ".", "dataset_id", ")", "continuousSet", "=", "dataset", ".", "getContinuousSet", "(", "request", ".", "continuous_set_id", ")", "iterator", "=", "paging", ".", "ContinuousIterator", "(", "request", ",", "continuousSet", ")", "return", "iterator" ]
Returns a generator over the (continuous, nextPageToken) pairs defined by the (JSON string) request.
[ "Returns", "a", "generator", "over", "the", "(", "continuous", "nextPageToken", ")", "pairs", "defined", "by", "the", "(", "JSON", "string", ")", "request", "." ]
python
train
41.176471
twisted/mantissa
xmantissa/people.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/people.py#L2357-L2366
def render_addPersonForm(self, ctx, data): """ Create and return a L{liveform.LiveForm} for creating a new L{Person}. """ addPersonForm = liveform.LiveForm( self.addPerson, self._baseParameters, description='Add Person') addPersonForm.compact() addPersonForm.jsClass = u'Mantissa.People.AddPersonForm' addPersonForm.setFragmentParent(self) return addPersonForm
[ "def", "render_addPersonForm", "(", "self", ",", "ctx", ",", "data", ")", ":", "addPersonForm", "=", "liveform", ".", "LiveForm", "(", "self", ".", "addPerson", ",", "self", ".", "_baseParameters", ",", "description", "=", "'Add Person'", ")", "addPersonForm", ".", "compact", "(", ")", "addPersonForm", ".", "jsClass", "=", "u'Mantissa.People.AddPersonForm'", "addPersonForm", ".", "setFragmentParent", "(", "self", ")", "return", "addPersonForm" ]
Create and return a L{liveform.LiveForm} for creating a new L{Person}.
[ "Create", "and", "return", "a", "L", "{", "liveform", ".", "LiveForm", "}", "for", "creating", "a", "new", "L", "{", "Person", "}", "." ]
python
train
42.7
LionelAuroux/pyrser
pyrser/dsl.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/dsl.py#L599-L624
def add_rpt(self, sequence, mod, pt): """Add a repeater to the previous sequence""" modstr = self.value(mod) if modstr == '!!': # cursor on the REPEATER self._stream.restore_context() # log the error self.diagnostic.notify( error.Severity.ERROR, "Cannot repeat a lookahead rule", error.LocationInfo.from_stream(self._stream, is_error=True) ) raise self.diagnostic if modstr == '!': # cursor on the REPEATER self._stream.restore_context() # log the error self.diagnostic.notify( error.Severity.ERROR, "Cannot repeat a negated rule", error.LocationInfo.from_stream(self._stream, is_error=True) ) raise self.diagnostic oldnode = sequence sequence.parser_tree = pt.functor(oldnode.parser_tree) return True
[ "def", "add_rpt", "(", "self", ",", "sequence", ",", "mod", ",", "pt", ")", ":", "modstr", "=", "self", ".", "value", "(", "mod", ")", "if", "modstr", "==", "'!!'", ":", "# cursor on the REPEATER", "self", ".", "_stream", ".", "restore_context", "(", ")", "# log the error", "self", ".", "diagnostic", ".", "notify", "(", "error", ".", "Severity", ".", "ERROR", ",", "\"Cannot repeat a lookahead rule\"", ",", "error", ".", "LocationInfo", ".", "from_stream", "(", "self", ".", "_stream", ",", "is_error", "=", "True", ")", ")", "raise", "self", ".", "diagnostic", "if", "modstr", "==", "'!'", ":", "# cursor on the REPEATER", "self", ".", "_stream", ".", "restore_context", "(", ")", "# log the error", "self", ".", "diagnostic", ".", "notify", "(", "error", ".", "Severity", ".", "ERROR", ",", "\"Cannot repeat a negated rule\"", ",", "error", ".", "LocationInfo", ".", "from_stream", "(", "self", ".", "_stream", ",", "is_error", "=", "True", ")", ")", "raise", "self", ".", "diagnostic", "oldnode", "=", "sequence", "sequence", ".", "parser_tree", "=", "pt", ".", "functor", "(", "oldnode", ".", "parser_tree", ")", "return", "True" ]
Add a repeater to the previous sequence
[ "Add", "a", "repeater", "to", "the", "previous", "sequence" ]
python
test
33.538462
pysal/mapclassify
mapclassify/classifiers.py
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2178-L2183
def _ss(self, class_def): """calculates sum of squares for a class""" yc = self.y[class_def] css = yc - yc.mean() css *= css return sum(css)
[ "def", "_ss", "(", "self", ",", "class_def", ")", ":", "yc", "=", "self", ".", "y", "[", "class_def", "]", "css", "=", "yc", "-", "yc", ".", "mean", "(", ")", "css", "*=", "css", "return", "sum", "(", "css", ")" ]
calculates sum of squares for a class
[ "calculates", "sum", "of", "squares", "for", "a", "class" ]
python
train
29.166667
chop-dbhi/varify-data-warehouse
vdw/samples/migrations/0018_create_project_groups.py
https://github.com/chop-dbhi/varify-data-warehouse/blob/1600ee1bc5fae6c68fd03b23624467298570cca8/vdw/samples/migrations/0018_create_project_groups.py#L22-L30
def backwards(self, orm): "Write your backwards methods here." from django.contrib.auth.models import Group projects = orm['samples.Project'].objects.all() names = [PROJECT_GROUP_TEMPLATE.format(p.name) for p in projects] # Remove groups named after these teams Group.objects.filter(name__in=names).delete()
[ "def", "backwards", "(", "self", ",", "orm", ")", ":", "from", "django", ".", "contrib", ".", "auth", ".", "models", "import", "Group", "projects", "=", "orm", "[", "'samples.Project'", "]", ".", "objects", ".", "all", "(", ")", "names", "=", "[", "PROJECT_GROUP_TEMPLATE", ".", "format", "(", "p", ".", "name", ")", "for", "p", "in", "projects", "]", "# Remove groups named after these teams", "Group", ".", "objects", ".", "filter", "(", "name__in", "=", "names", ")", ".", "delete", "(", ")" ]
Write your backwards methods here.
[ "Write", "your", "backwards", "methods", "here", "." ]
python
train
38.777778
jupyterhub/chartpress
chartpress.py
https://github.com/jupyterhub/chartpress/blob/541f132f31c9f3a66750d7847fb28c7ce5a0ca6d/chartpress.py#L45-L54
def git_remote(git_repo): """Return the URL for remote git repository. Depending on the system setup it returns ssh or https remote. """ github_token = os.getenv(GITHUB_TOKEN_KEY) if github_token: return 'https://{0}@github.com/{1}'.format( github_token, git_repo) return '[email protected]:{0}'.format(git_repo)
[ "def", "git_remote", "(", "git_repo", ")", ":", "github_token", "=", "os", ".", "getenv", "(", "GITHUB_TOKEN_KEY", ")", "if", "github_token", ":", "return", "'https://{0}@github.com/{1}'", ".", "format", "(", "github_token", ",", "git_repo", ")", "return", "'[email protected]:{0}'", ".", "format", "(", "git_repo", ")" ]
Return the URL for remote git repository. Depending on the system setup it returns ssh or https remote.
[ "Return", "the", "URL", "for", "remote", "git", "repository", "." ]
python
train
34.5
apache/spark
python/pyspark/ml/param/__init__.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L138-L146
def toListInt(value): """ Convert a value to list of ints, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_integer(v), value)): return [int(v) for v in value] raise TypeError("Could not convert %s to list of ints" % value)
[ "def", "toListInt", "(", "value", ")", ":", "if", "TypeConverters", ".", "_can_convert_to_list", "(", "value", ")", ":", "value", "=", "TypeConverters", ".", "toList", "(", "value", ")", "if", "all", "(", "map", "(", "lambda", "v", ":", "TypeConverters", ".", "_is_integer", "(", "v", ")", ",", "value", ")", ")", ":", "return", "[", "int", "(", "v", ")", "for", "v", "in", "value", "]", "raise", "TypeError", "(", "\"Could not convert %s to list of ints\"", "%", "value", ")" ]
Convert a value to list of ints, if possible.
[ "Convert", "a", "value", "to", "list", "of", "ints", "if", "possible", "." ]
python
train
43
WebarchivCZ/WA-KAT
src/wa_kat/templates/static/js/Lib/site-packages/components/input_controller.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/components/input_controller.py#L65-L114
def _set_typeahead(cls, el, value): """ Convert given `el` to typeahead input and set it to `value`. This method also sets the dropdown icons and descriptors. Args: el (obj): Element reference to the input you want to convert to typeahead. value (list): List of dicts with two keys: ``source`` and ``val``. """ PlaceholderHandler.reset_placeholder_dropdown(el) # if there is no elements, show alert icon in glyph if not value and not el.value: DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-alert") return # if there is only one element, don't use typeahead, just put the # information to the input, set different dropdown glyph and put source # to the dropdown if len(value) == 1: source = value[0]["source"].strip() dropdown_el = DropdownHandler.set_dropdown_glyph( el.id, "glyphicon-eye-open" ) dropdown_content = "<span class='gray_text'>&nbsp;(%s)</span>" # save the source to the dropdown menu if source: dropdown_el.html = dropdown_content % source[::-1] el.value = value[0]["val"] return # get reference to parent element parent_id = el.parent.id if "typeahead" not in parent_id.lower(): parent_id = el.parent.parent.id if parent_id in cls._set_by_typeahead: window.destroy_typeahead_tag("#" + parent_id) # if there are multiple elements, put them to the typeahead and show # dropdown glyph window.make_typeahead_tag("#" + parent_id, value) DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-menu-down") PlaceholderHandler.set_placeholder_dropdown(el) cls._set_by_typeahead.add(parent_id)
[ "def", "_set_typeahead", "(", "cls", ",", "el", ",", "value", ")", ":", "PlaceholderHandler", ".", "reset_placeholder_dropdown", "(", "el", ")", "# if there is no elements, show alert icon in glyph", "if", "not", "value", "and", "not", "el", ".", "value", ":", "DropdownHandler", ".", "set_dropdown_glyph", "(", "el", ".", "id", ",", "\"glyphicon-alert\"", ")", "return", "# if there is only one element, don't use typeahead, just put the", "# information to the input, set different dropdown glyph and put source", "# to the dropdown", "if", "len", "(", "value", ")", "==", "1", ":", "source", "=", "value", "[", "0", "]", "[", "\"source\"", "]", ".", "strip", "(", ")", "dropdown_el", "=", "DropdownHandler", ".", "set_dropdown_glyph", "(", "el", ".", "id", ",", "\"glyphicon-eye-open\"", ")", "dropdown_content", "=", "\"<span class='gray_text'>&nbsp;(%s)</span>\"", "# save the source to the dropdown menu", "if", "source", ":", "dropdown_el", ".", "html", "=", "dropdown_content", "%", "source", "[", ":", ":", "-", "1", "]", "el", ".", "value", "=", "value", "[", "0", "]", "[", "\"val\"", "]", "return", "# get reference to parent element", "parent_id", "=", "el", ".", "parent", ".", "id", "if", "\"typeahead\"", "not", "in", "parent_id", ".", "lower", "(", ")", ":", "parent_id", "=", "el", ".", "parent", ".", "parent", ".", "id", "if", "parent_id", "in", "cls", ".", "_set_by_typeahead", ":", "window", ".", "destroy_typeahead_tag", "(", "\"#\"", "+", "parent_id", ")", "# if there are multiple elements, put them to the typeahead and show", "# dropdown glyph", "window", ".", "make_typeahead_tag", "(", "\"#\"", "+", "parent_id", ",", "value", ")", "DropdownHandler", ".", "set_dropdown_glyph", "(", "el", ".", "id", ",", "\"glyphicon-menu-down\"", ")", "PlaceholderHandler", ".", "set_placeholder_dropdown", "(", "el", ")", "cls", ".", "_set_by_typeahead", ".", "add", "(", "parent_id", ")" ]
Convert given `el` to typeahead input and set it to `value`. This method also sets the dropdown icons and descriptors. Args: el (obj): Element reference to the input you want to convert to typeahead. value (list): List of dicts with two keys: ``source`` and ``val``.
[ "Convert", "given", "el", "to", "typeahead", "input", "and", "set", "it", "to", "value", "." ]
python
train
37.44
sibirrer/lenstronomy
lenstronomy/Analysis/lens_properties.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Analysis/lens_properties.py#L52-L80
def velocity_dispersion(self, kwargs_lens, kwargs_lens_light, lens_light_model_bool_list=None, aniso_param=1, r_eff=None, R_slit=0.81, dR_slit=0.1, psf_fwhm=0.7, num_evaluate=1000): """ computes the LOS velocity dispersion of the lens within a slit of size R_slit x dR_slit and seeing psf_fwhm. The assumptions are a Hernquist light profile and the spherical power-law lens model at the first position. Further information can be found in the AnalyticKinematics() class. :param kwargs_lens: lens model parameters :param kwargs_lens_light: deflector light parameters :param aniso_param: scaled r_ani with respect to the half light radius :param r_eff: half light radius, if not provided, will be computed from the lens light model :param R_slit: width of the slit :param dR_slit: length of the slit :param psf_fwhm: full width at half maximum of the seeing condition :param num_evaluate: number of spectral rendering of the light distribution that end up on the slit :return: velocity dispersion in units [km/s] """ gamma = kwargs_lens[0]['gamma'] if 'center_x' in kwargs_lens_light[0]: center_x, center_y = kwargs_lens_light[0]['center_x'], kwargs_lens_light[0]['center_y'] else: center_x, center_y = 0, 0 if r_eff is None: r_eff = self.lens_analysis.half_light_radius_lens(kwargs_lens_light, center_x=center_x, center_y=center_y, model_bool_list=lens_light_model_bool_list) theta_E = kwargs_lens[0]['theta_E'] r_ani = aniso_param * r_eff sigma2 = self.analytic_kinematics.vel_disp(gamma, theta_E, r_eff, r_ani, R_slit, dR_slit, FWHM=psf_fwhm, rendering_number=num_evaluate) return sigma2
[ "def", "velocity_dispersion", "(", "self", ",", "kwargs_lens", ",", "kwargs_lens_light", ",", "lens_light_model_bool_list", "=", "None", ",", "aniso_param", "=", "1", ",", "r_eff", "=", "None", ",", "R_slit", "=", "0.81", ",", "dR_slit", "=", "0.1", ",", "psf_fwhm", "=", "0.7", ",", "num_evaluate", "=", "1000", ")", ":", "gamma", "=", "kwargs_lens", "[", "0", "]", "[", "'gamma'", "]", "if", "'center_x'", "in", "kwargs_lens_light", "[", "0", "]", ":", "center_x", ",", "center_y", "=", "kwargs_lens_light", "[", "0", "]", "[", "'center_x'", "]", ",", "kwargs_lens_light", "[", "0", "]", "[", "'center_y'", "]", "else", ":", "center_x", ",", "center_y", "=", "0", ",", "0", "if", "r_eff", "is", "None", ":", "r_eff", "=", "self", ".", "lens_analysis", ".", "half_light_radius_lens", "(", "kwargs_lens_light", ",", "center_x", "=", "center_x", ",", "center_y", "=", "center_y", ",", "model_bool_list", "=", "lens_light_model_bool_list", ")", "theta_E", "=", "kwargs_lens", "[", "0", "]", "[", "'theta_E'", "]", "r_ani", "=", "aniso_param", "*", "r_eff", "sigma2", "=", "self", ".", "analytic_kinematics", ".", "vel_disp", "(", "gamma", ",", "theta_E", ",", "r_eff", ",", "r_ani", ",", "R_slit", ",", "dR_slit", ",", "FWHM", "=", "psf_fwhm", ",", "rendering_number", "=", "num_evaluate", ")", "return", "sigma2" ]
computes the LOS velocity dispersion of the lens within a slit of size R_slit x dR_slit and seeing psf_fwhm. The assumptions are a Hernquist light profile and the spherical power-law lens model at the first position. Further information can be found in the AnalyticKinematics() class. :param kwargs_lens: lens model parameters :param kwargs_lens_light: deflector light parameters :param aniso_param: scaled r_ani with respect to the half light radius :param r_eff: half light radius, if not provided, will be computed from the lens light model :param R_slit: width of the slit :param dR_slit: length of the slit :param psf_fwhm: full width at half maximum of the seeing condition :param num_evaluate: number of spectral rendering of the light distribution that end up on the slit :return: velocity dispersion in units [km/s]
[ "computes", "the", "LOS", "velocity", "dispersion", "of", "the", "lens", "within", "a", "slit", "of", "size", "R_slit", "x", "dR_slit", "and", "seeing", "psf_fwhm", ".", "The", "assumptions", "are", "a", "Hernquist", "light", "profile", "and", "the", "spherical", "power", "-", "law", "lens", "model", "at", "the", "first", "position", "." ]
python
train
62.137931
maxalbert/tohu
tohu/v6/base.py
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/base.py#L121-L129
def reset(self, seed): """ Reset this generator's seed generator and any clones. """ logger.debug(f'Resetting {self} (seed={seed})') self.seed_generator.reset(seed) for c in self.clones: c.reset(seed)
[ "def", "reset", "(", "self", ",", "seed", ")", ":", "logger", ".", "debug", "(", "f'Resetting {self} (seed={seed})'", ")", "self", ".", "seed_generator", ".", "reset", "(", "seed", ")", "for", "c", "in", "self", ".", "clones", ":", "c", ".", "reset", "(", "seed", ")" ]
Reset this generator's seed generator and any clones.
[ "Reset", "this", "generator", "s", "seed", "generator", "and", "any", "clones", "." ]
python
train
28.111111
mongodb/mongo-python-driver
pymongo/message.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/message.py#L715-L729
def _query_compressed(options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, check_keys=False, ctx=None): """Internal compressed query message helper.""" op_query, max_bson_size = _query( options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, check_keys) rid, msg = _compress(2004, op_query, ctx) return rid, msg, max_bson_size
[ "def", "_query_compressed", "(", "options", ",", "collection_name", ",", "num_to_skip", ",", "num_to_return", ",", "query", ",", "field_selector", ",", "opts", ",", "check_keys", "=", "False", ",", "ctx", "=", "None", ")", ":", "op_query", ",", "max_bson_size", "=", "_query", "(", "options", ",", "collection_name", ",", "num_to_skip", ",", "num_to_return", ",", "query", ",", "field_selector", ",", "opts", ",", "check_keys", ")", "rid", ",", "msg", "=", "_compress", "(", "2004", ",", "op_query", ",", "ctx", ")", "return", "rid", ",", "msg", ",", "max_bson_size" ]
Internal compressed query message helper.
[ "Internal", "compressed", "query", "message", "helper", "." ]
python
train
32.866667
cloud9ers/gurumate
environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py#L2505-L2530
def _compute_dependencies(self): """Recompute this distribution's dependencies.""" from _markerlib import compile as compile_marker dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: distvers, mark = self._preparse_requirement(req) parsed = parse_requirements(distvers).next() parsed.marker_fn = compile_marker(mark) reqs.append(parsed) def reqs_for_extra(extra): for req in reqs: if req.marker_fn(override={'extra':extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: extra = safe_extra(extra.strip()) dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm
[ "def", "_compute_dependencies", "(", "self", ")", ":", "from", "_markerlib", "import", "compile", "as", "compile_marker", "dm", "=", "self", ".", "__dep_map", "=", "{", "None", ":", "[", "]", "}", "reqs", "=", "[", "]", "# Including any condition expressions", "for", "req", "in", "self", ".", "_parsed_pkg_info", ".", "get_all", "(", "'Requires-Dist'", ")", "or", "[", "]", ":", "distvers", ",", "mark", "=", "self", ".", "_preparse_requirement", "(", "req", ")", "parsed", "=", "parse_requirements", "(", "distvers", ")", ".", "next", "(", ")", "parsed", ".", "marker_fn", "=", "compile_marker", "(", "mark", ")", "reqs", ".", "append", "(", "parsed", ")", "def", "reqs_for_extra", "(", "extra", ")", ":", "for", "req", "in", "reqs", ":", "if", "req", ".", "marker_fn", "(", "override", "=", "{", "'extra'", ":", "extra", "}", ")", ":", "yield", "req", "common", "=", "frozenset", "(", "reqs_for_extra", "(", "None", ")", ")", "dm", "[", "None", "]", ".", "extend", "(", "common", ")", "for", "extra", "in", "self", ".", "_parsed_pkg_info", ".", "get_all", "(", "'Provides-Extra'", ")", "or", "[", "]", ":", "extra", "=", "safe_extra", "(", "extra", ".", "strip", "(", ")", ")", "dm", "[", "extra", "]", "=", "list", "(", "frozenset", "(", "reqs_for_extra", "(", "extra", ")", ")", "-", "common", ")", "return", "dm" ]
Recompute this distribution's dependencies.
[ "Recompute", "this", "distribution", "s", "dependencies", "." ]
python
test
36.692308
mosdef-hub/mbuild
mbuild/formats/lammpsdata.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/formats/lammpsdata.py#L14-L250
def write_lammpsdata(structure, filename, atom_style='full'): """Output a LAMMPS data file. Outputs a LAMMPS data file in the 'full' atom style format. Assumes use of 'real' units. See http://lammps.sandia.gov/doc/atom_style.html for more information on atom styles. Parameters ---------- structure : parmed.Structure ParmEd structure object filename : str Path of the output file atom_style: str Defines the style of atoms to be saved in a LAMMPS data file. The following atom styles are currently supported: 'full', 'atomic', 'charge', 'molecular' see http://lammps.sandia.gov/doc/atom_style.html for more information on atom styles. Notes ----- See http://lammps.sandia.gov/doc/2001/data_format.html for a full description of the LAMMPS data format. Currently the following sections are supported (in addition to the header): *Masses*, *Nonbond Coeffs*, *Bond Coeffs*, *Angle Coeffs*, *Dihedral Coeffs*, *Atoms*, *Bonds*, *Angles*, *Dihedrals* Some of this function has beed adopted from `mdtraj`'s support of the LAMMPSTRJ trajectory format. See https://github.com/mdtraj/mdtraj/blob/master/mdtraj/formats/lammpstrj.py for details. """ if atom_style not in ['atomic', 'charge', 'molecular', 'full']: raise ValueError('Atom style "{}" is invalid or is not currently supported'.format(atom_style)) xyz = np.array([[atom.xx,atom.xy,atom.xz] for atom in structure.atoms]) forcefield = True if structure[0].type == '': forcefield = False # Internally use nm box = Box(lengths=np.array([0.1 * val for val in structure.box[0:3]]), angles=structure.box[3:6]) if forcefield: types = [atom.type for atom in structure.atoms] else: types = [atom.name for atom in structure.atoms] unique_types = list(set(types)) unique_types.sort(key=natural_sort) charges = [atom.charge for atom in structure.atoms] bonds = [[bond.atom1.idx+1, bond.atom2.idx+1] for bond in structure.bonds] angles = [[angle.atom1.idx+1, angle.atom2.idx+1, angle.atom3.idx+1] for angle in structure.angles] dihedrals = [[dihedral.atom1.idx+1, dihedral.atom2.idx+1, dihedral.atom3.idx+1, dihedral.atom4.idx+1] for dihedral in structure.rb_torsions] if bonds: if len(structure.bond_types) == 0: bond_types = np.ones(len(bonds),dtype=int) else: unique_bond_types = dict(enumerate(set([(round(bond.type.k,3), round(bond.type.req,3)) for bond in structure.bonds]))) unique_bond_types = OrderedDict([(y,x+1) for x,y in unique_bond_types.items()]) bond_types = [unique_bond_types[(round(bond.type.k,3), round(bond.type.req,3))] for bond in structure.bonds] if angles: unique_angle_types = dict(enumerate(set([(round(angle.type.k,3), round(angle.type.theteq,3)) for angle in structure.angles]))) unique_angle_types = OrderedDict([(y,x+1) for x,y in unique_angle_types.items()]) angle_types = [unique_angle_types[(round(angle.type.k,3), round(angle.type.theteq,3))] for angle in structure.angles] if dihedrals: unique_dihedral_types = dict(enumerate(set([(round(dihedral.type.c0,3), round(dihedral.type.c1,3), round(dihedral.type.c2,3), round(dihedral.type.c3,3), round(dihedral.type.c4,3), round(dihedral.type.c5,3), round(dihedral.type.scee,1), round(dihedral.type.scnb,1)) for dihedral in structure.rb_torsions]))) unique_dihedral_types = OrderedDict([(y,x+1) for x,y in unique_dihedral_types.items()]) dihedral_types = [unique_dihedral_types[(round(dihedral.type.c0,3), round(dihedral.type.c1,3), round(dihedral.type.c2,3), round(dihedral.type.c3,3), round(dihedral.type.c4,3), round(dihedral.type.c5,3), round(dihedral.type.scee,1), round(dihedral.type.scnb,1))] for dihedral in structure.rb_torsions] with open(filename, 'w') as data: data.write(filename+' - created by mBuild\n\n') data.write('{:d} atoms\n'.format(len(structure.atoms))) if atom_style in ['full', 'molecular']: data.write('{:d} bonds\n'.format(len(bonds))) data.write('{:d} angles\n'.format(len(angles))) data.write('{:d} dihedrals\n\n'.format(len(dihedrals))) data.write('{:d} atom types\n'.format(len(set(types)))) if atom_style in ['full', 'molecular']: if bonds: data.write('{:d} bond types\n'.format(len(set(bond_types)))) if angles: data.write('{:d} angle types\n'.format(len(set(angle_types)))) if dihedrals: data.write('{:d} dihedral types\n'.format(len(set(dihedral_types)))) data.write('\n') # Box data if np.allclose(box.angles, np.array([90, 90, 90])): for i,dim in enumerate(['x','y','z']): data.write('{0:.6f} {1:.6f} {2}lo {2}hi\n'.format( 10.0 * box.mins[i], 10.0 * box.maxs[i], dim)) else: a, b, c = 10.0 * box.lengths alpha, beta, gamma = np.radians(box.angles) lx = a xy = b * np.cos(gamma) xz = c * np.cos(beta) ly = np.sqrt(b**2 - xy**2) yz = (b*c*np.cos(alpha) - xy*xz) / ly lz = np.sqrt(c**2 - xz**2 - yz**2) xlo, ylo, zlo = 10.0 * box.mins xhi = xlo + lx yhi = ylo + ly zhi = zlo + lz xlo_bound = xlo + np.min([0.0, xy, xz, xy+xz]) xhi_bound = xhi + np.max([0.0, xy, xz, xy+xz]) ylo_bound = ylo + np.min([0.0, yz]) yhi_bound = yhi + np.max([0.0, yz]) zlo_bound = zlo zhi_bound = zhi data.write('{0:.6f} {1:.6f} xlo xhi\n'.format( xlo_bound, xhi_bound)) data.write('{0:.6f} {1:.6f} ylo yhi\n'.format( ylo_bound, yhi_bound)) data.write('{0:.6f} {1:.6f} zlo zhi\n'.format( zlo_bound, zhi_bound)) data.write('{0:.6f} {1:.6f} {2:6f} xy xz yz\n'.format( xy, xz, yz)) # Mass data masses = [atom.mass for atom in structure.atoms] mass_dict = dict([(unique_types.index(atom_type)+1,mass) for atom_type,mass in zip(types,masses)]) data.write('\nMasses\n\n') for atom_type,mass in mass_dict.items(): data.write('{:d}\t{:.6f}\t# {}\n'.format(atom_type,mass,unique_types[atom_type-1])) if forcefield: # Pair coefficients epsilons = [atom.epsilon for atom in structure.atoms] sigmas = [atom.sigma for atom in structure.atoms] epsilon_dict = dict([(unique_types.index(atom_type)+1,epsilon) for atom_type,epsilon in zip(types,epsilons)]) sigma_dict = dict([(unique_types.index(atom_type)+1,sigma) for atom_type,sigma in zip(types,sigmas)]) data.write('\nPair Coeffs # lj\n\n') for idx,epsilon in epsilon_dict.items(): data.write('{}\t{:.5f}\t{:.5f}\n'.format(idx,epsilon,sigma_dict[idx])) # Bond coefficients if bonds: data.write('\nBond Coeffs # harmonic\n\n') for params,idx in unique_bond_types.items(): data.write('{}\t{}\t{}\n'.format(idx,*params)) # Angle coefficients if angles: data.write('\nAngle Coeffs # harmonic\n\n') for params,idx in unique_angle_types.items(): data.write('{}\t{}\t{:.5f}\n'.format(idx,*params)) # Dihedral coefficients if dihedrals: data.write('\nDihedral Coeffs # opls\n\n') for params,idx in unique_dihedral_types.items(): opls_coeffs = RB_to_OPLS(params[0], params[1], params[2], params[3], params[4], params[5]) data.write('{}\t{:.5f}\t{:.5f}\t{:.5f}\t{:.5f}\n'.format(idx,*opls_coeffs)) # Atom data data.write('\nAtoms\n\n') if atom_style == 'atomic': atom_line = '{index:d}\t{type_index:d}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n' elif atom_style == 'charge': atom_line = '{index:d}\t{type_index:d}\t{charge:.6f}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n' elif atom_style == 'molecular': atom_line = '{index:d}\t{zero:d}\t{type_index:d}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n' elif atom_style == 'full': atom_line ='{index:d}\t{zero:d}\t{type_index:d}\t{charge:.6f}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n' for i,coords in enumerate(xyz): data.write(atom_line.format( index=i+1,type_index=unique_types.index(types[i])+1, zero=0,charge=charges[i], x=coords[0],y=coords[1],z=coords[2])) if atom_style in ['full', 'molecular']: # Bond data if bonds: data.write('\nBonds\n\n') for i,bond in enumerate(bonds): data.write('{:d}\t{:d}\t{:d}\t{:d}\n'.format( i+1,bond_types[i],bond[0],bond[1])) # Angle data if angles: data.write('\nAngles\n\n') for i,angle in enumerate(angles): data.write('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n'.format( i+1,angle_types[i],angle[0],angle[1],angle[2])) # Dihedral data if dihedrals: data.write('\nDihedrals\n\n') for i,dihedral in enumerate(dihedrals): data.write('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n'.format( i+1,dihedral_types[i],dihedral[0], dihedral[1],dihedral[2],dihedral[3]))
[ "def", "write_lammpsdata", "(", "structure", ",", "filename", ",", "atom_style", "=", "'full'", ")", ":", "if", "atom_style", "not", "in", "[", "'atomic'", ",", "'charge'", ",", "'molecular'", ",", "'full'", "]", ":", "raise", "ValueError", "(", "'Atom style \"{}\" is invalid or is not currently supported'", ".", "format", "(", "atom_style", ")", ")", "xyz", "=", "np", ".", "array", "(", "[", "[", "atom", ".", "xx", ",", "atom", ".", "xy", ",", "atom", ".", "xz", "]", "for", "atom", "in", "structure", ".", "atoms", "]", ")", "forcefield", "=", "True", "if", "structure", "[", "0", "]", ".", "type", "==", "''", ":", "forcefield", "=", "False", "# Internally use nm", "box", "=", "Box", "(", "lengths", "=", "np", ".", "array", "(", "[", "0.1", "*", "val", "for", "val", "in", "structure", ".", "box", "[", "0", ":", "3", "]", "]", ")", ",", "angles", "=", "structure", ".", "box", "[", "3", ":", "6", "]", ")", "if", "forcefield", ":", "types", "=", "[", "atom", ".", "type", "for", "atom", "in", "structure", ".", "atoms", "]", "else", ":", "types", "=", "[", "atom", ".", "name", "for", "atom", "in", "structure", ".", "atoms", "]", "unique_types", "=", "list", "(", "set", "(", "types", ")", ")", "unique_types", ".", "sort", "(", "key", "=", "natural_sort", ")", "charges", "=", "[", "atom", ".", "charge", "for", "atom", "in", "structure", ".", "atoms", "]", "bonds", "=", "[", "[", "bond", ".", "atom1", ".", "idx", "+", "1", ",", "bond", ".", "atom2", ".", "idx", "+", "1", "]", "for", "bond", "in", "structure", ".", "bonds", "]", "angles", "=", "[", "[", "angle", ".", "atom1", ".", "idx", "+", "1", ",", "angle", ".", "atom2", ".", "idx", "+", "1", ",", "angle", ".", "atom3", ".", "idx", "+", "1", "]", "for", "angle", "in", "structure", ".", "angles", "]", "dihedrals", "=", "[", "[", "dihedral", ".", "atom1", ".", "idx", "+", "1", ",", "dihedral", ".", "atom2", ".", "idx", "+", "1", ",", "dihedral", ".", "atom3", ".", "idx", "+", "1", ",", "dihedral", ".", "atom4", ".", "idx", "+", "1", "]", "for", "dihedral", "in", "structure", ".", "rb_torsions", "]", "if", "bonds", ":", "if", "len", "(", "structure", ".", "bond_types", ")", "==", "0", ":", "bond_types", "=", "np", ".", "ones", "(", "len", "(", "bonds", ")", ",", "dtype", "=", "int", ")", "else", ":", "unique_bond_types", "=", "dict", "(", "enumerate", "(", "set", "(", "[", "(", "round", "(", "bond", ".", "type", ".", "k", ",", "3", ")", ",", "round", "(", "bond", ".", "type", ".", "req", ",", "3", ")", ")", "for", "bond", "in", "structure", ".", "bonds", "]", ")", ")", ")", "unique_bond_types", "=", "OrderedDict", "(", "[", "(", "y", ",", "x", "+", "1", ")", "for", "x", ",", "y", "in", "unique_bond_types", ".", "items", "(", ")", "]", ")", "bond_types", "=", "[", "unique_bond_types", "[", "(", "round", "(", "bond", ".", "type", ".", "k", ",", "3", ")", ",", "round", "(", "bond", ".", "type", ".", "req", ",", "3", ")", ")", "]", "for", "bond", "in", "structure", ".", "bonds", "]", "if", "angles", ":", "unique_angle_types", "=", "dict", "(", "enumerate", "(", "set", "(", "[", "(", "round", "(", "angle", ".", "type", ".", "k", ",", "3", ")", ",", "round", "(", "angle", ".", "type", ".", "theteq", ",", "3", ")", ")", "for", "angle", "in", "structure", ".", "angles", "]", ")", ")", ")", "unique_angle_types", "=", "OrderedDict", "(", "[", "(", "y", ",", "x", "+", "1", ")", "for", "x", ",", "y", "in", "unique_angle_types", ".", "items", "(", ")", "]", ")", "angle_types", "=", "[", "unique_angle_types", "[", "(", "round", "(", "angle", ".", "type", ".", "k", ",", "3", ")", ",", "round", "(", "angle", ".", "type", ".", "theteq", ",", "3", ")", ")", "]", "for", "angle", "in", "structure", ".", "angles", "]", "if", "dihedrals", ":", "unique_dihedral_types", "=", "dict", "(", "enumerate", "(", "set", "(", "[", "(", "round", "(", "dihedral", ".", "type", ".", "c0", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "c1", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "c2", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "c3", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "c4", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "c5", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "scee", ",", "1", ")", ",", "round", "(", "dihedral", ".", "type", ".", "scnb", ",", "1", ")", ")", "for", "dihedral", "in", "structure", ".", "rb_torsions", "]", ")", ")", ")", "unique_dihedral_types", "=", "OrderedDict", "(", "[", "(", "y", ",", "x", "+", "1", ")", "for", "x", ",", "y", "in", "unique_dihedral_types", ".", "items", "(", ")", "]", ")", "dihedral_types", "=", "[", "unique_dihedral_types", "[", "(", "round", "(", "dihedral", ".", "type", ".", "c0", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "c1", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "c2", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "c3", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "c4", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "c5", ",", "3", ")", ",", "round", "(", "dihedral", ".", "type", ".", "scee", ",", "1", ")", ",", "round", "(", "dihedral", ".", "type", ".", "scnb", ",", "1", ")", ")", "]", "for", "dihedral", "in", "structure", ".", "rb_torsions", "]", "with", "open", "(", "filename", ",", "'w'", ")", "as", "data", ":", "data", ".", "write", "(", "filename", "+", "' - created by mBuild\\n\\n'", ")", "data", ".", "write", "(", "'{:d} atoms\\n'", ".", "format", "(", "len", "(", "structure", ".", "atoms", ")", ")", ")", "if", "atom_style", "in", "[", "'full'", ",", "'molecular'", "]", ":", "data", ".", "write", "(", "'{:d} bonds\\n'", ".", "format", "(", "len", "(", "bonds", ")", ")", ")", "data", ".", "write", "(", "'{:d} angles\\n'", ".", "format", "(", "len", "(", "angles", ")", ")", ")", "data", ".", "write", "(", "'{:d} dihedrals\\n\\n'", ".", "format", "(", "len", "(", "dihedrals", ")", ")", ")", "data", ".", "write", "(", "'{:d} atom types\\n'", ".", "format", "(", "len", "(", "set", "(", "types", ")", ")", ")", ")", "if", "atom_style", "in", "[", "'full'", ",", "'molecular'", "]", ":", "if", "bonds", ":", "data", ".", "write", "(", "'{:d} bond types\\n'", ".", "format", "(", "len", "(", "set", "(", "bond_types", ")", ")", ")", ")", "if", "angles", ":", "data", ".", "write", "(", "'{:d} angle types\\n'", ".", "format", "(", "len", "(", "set", "(", "angle_types", ")", ")", ")", ")", "if", "dihedrals", ":", "data", ".", "write", "(", "'{:d} dihedral types\\n'", ".", "format", "(", "len", "(", "set", "(", "dihedral_types", ")", ")", ")", ")", "data", ".", "write", "(", "'\\n'", ")", "# Box data", "if", "np", ".", "allclose", "(", "box", ".", "angles", ",", "np", ".", "array", "(", "[", "90", ",", "90", ",", "90", "]", ")", ")", ":", "for", "i", ",", "dim", "in", "enumerate", "(", "[", "'x'", ",", "'y'", ",", "'z'", "]", ")", ":", "data", ".", "write", "(", "'{0:.6f} {1:.6f} {2}lo {2}hi\\n'", ".", "format", "(", "10.0", "*", "box", ".", "mins", "[", "i", "]", ",", "10.0", "*", "box", ".", "maxs", "[", "i", "]", ",", "dim", ")", ")", "else", ":", "a", ",", "b", ",", "c", "=", "10.0", "*", "box", ".", "lengths", "alpha", ",", "beta", ",", "gamma", "=", "np", ".", "radians", "(", "box", ".", "angles", ")", "lx", "=", "a", "xy", "=", "b", "*", "np", ".", "cos", "(", "gamma", ")", "xz", "=", "c", "*", "np", ".", "cos", "(", "beta", ")", "ly", "=", "np", ".", "sqrt", "(", "b", "**", "2", "-", "xy", "**", "2", ")", "yz", "=", "(", "b", "*", "c", "*", "np", ".", "cos", "(", "alpha", ")", "-", "xy", "*", "xz", ")", "/", "ly", "lz", "=", "np", ".", "sqrt", "(", "c", "**", "2", "-", "xz", "**", "2", "-", "yz", "**", "2", ")", "xlo", ",", "ylo", ",", "zlo", "=", "10.0", "*", "box", ".", "mins", "xhi", "=", "xlo", "+", "lx", "yhi", "=", "ylo", "+", "ly", "zhi", "=", "zlo", "+", "lz", "xlo_bound", "=", "xlo", "+", "np", ".", "min", "(", "[", "0.0", ",", "xy", ",", "xz", ",", "xy", "+", "xz", "]", ")", "xhi_bound", "=", "xhi", "+", "np", ".", "max", "(", "[", "0.0", ",", "xy", ",", "xz", ",", "xy", "+", "xz", "]", ")", "ylo_bound", "=", "ylo", "+", "np", ".", "min", "(", "[", "0.0", ",", "yz", "]", ")", "yhi_bound", "=", "yhi", "+", "np", ".", "max", "(", "[", "0.0", ",", "yz", "]", ")", "zlo_bound", "=", "zlo", "zhi_bound", "=", "zhi", "data", ".", "write", "(", "'{0:.6f} {1:.6f} xlo xhi\\n'", ".", "format", "(", "xlo_bound", ",", "xhi_bound", ")", ")", "data", ".", "write", "(", "'{0:.6f} {1:.6f} ylo yhi\\n'", ".", "format", "(", "ylo_bound", ",", "yhi_bound", ")", ")", "data", ".", "write", "(", "'{0:.6f} {1:.6f} zlo zhi\\n'", ".", "format", "(", "zlo_bound", ",", "zhi_bound", ")", ")", "data", ".", "write", "(", "'{0:.6f} {1:.6f} {2:6f} xy xz yz\\n'", ".", "format", "(", "xy", ",", "xz", ",", "yz", ")", ")", "# Mass data", "masses", "=", "[", "atom", ".", "mass", "for", "atom", "in", "structure", ".", "atoms", "]", "mass_dict", "=", "dict", "(", "[", "(", "unique_types", ".", "index", "(", "atom_type", ")", "+", "1", ",", "mass", ")", "for", "atom_type", ",", "mass", "in", "zip", "(", "types", ",", "masses", ")", "]", ")", "data", ".", "write", "(", "'\\nMasses\\n\\n'", ")", "for", "atom_type", ",", "mass", "in", "mass_dict", ".", "items", "(", ")", ":", "data", ".", "write", "(", "'{:d}\\t{:.6f}\\t# {}\\n'", ".", "format", "(", "atom_type", ",", "mass", ",", "unique_types", "[", "atom_type", "-", "1", "]", ")", ")", "if", "forcefield", ":", "# Pair coefficients", "epsilons", "=", "[", "atom", ".", "epsilon", "for", "atom", "in", "structure", ".", "atoms", "]", "sigmas", "=", "[", "atom", ".", "sigma", "for", "atom", "in", "structure", ".", "atoms", "]", "epsilon_dict", "=", "dict", "(", "[", "(", "unique_types", ".", "index", "(", "atom_type", ")", "+", "1", ",", "epsilon", ")", "for", "atom_type", ",", "epsilon", "in", "zip", "(", "types", ",", "epsilons", ")", "]", ")", "sigma_dict", "=", "dict", "(", "[", "(", "unique_types", ".", "index", "(", "atom_type", ")", "+", "1", ",", "sigma", ")", "for", "atom_type", ",", "sigma", "in", "zip", "(", "types", ",", "sigmas", ")", "]", ")", "data", ".", "write", "(", "'\\nPair Coeffs # lj\\n\\n'", ")", "for", "idx", ",", "epsilon", "in", "epsilon_dict", ".", "items", "(", ")", ":", "data", ".", "write", "(", "'{}\\t{:.5f}\\t{:.5f}\\n'", ".", "format", "(", "idx", ",", "epsilon", ",", "sigma_dict", "[", "idx", "]", ")", ")", "# Bond coefficients", "if", "bonds", ":", "data", ".", "write", "(", "'\\nBond Coeffs # harmonic\\n\\n'", ")", "for", "params", ",", "idx", "in", "unique_bond_types", ".", "items", "(", ")", ":", "data", ".", "write", "(", "'{}\\t{}\\t{}\\n'", ".", "format", "(", "idx", ",", "*", "params", ")", ")", "# Angle coefficients", "if", "angles", ":", "data", ".", "write", "(", "'\\nAngle Coeffs # harmonic\\n\\n'", ")", "for", "params", ",", "idx", "in", "unique_angle_types", ".", "items", "(", ")", ":", "data", ".", "write", "(", "'{}\\t{}\\t{:.5f}\\n'", ".", "format", "(", "idx", ",", "*", "params", ")", ")", "# Dihedral coefficients", "if", "dihedrals", ":", "data", ".", "write", "(", "'\\nDihedral Coeffs # opls\\n\\n'", ")", "for", "params", ",", "idx", "in", "unique_dihedral_types", ".", "items", "(", ")", ":", "opls_coeffs", "=", "RB_to_OPLS", "(", "params", "[", "0", "]", ",", "params", "[", "1", "]", ",", "params", "[", "2", "]", ",", "params", "[", "3", "]", ",", "params", "[", "4", "]", ",", "params", "[", "5", "]", ")", "data", ".", "write", "(", "'{}\\t{:.5f}\\t{:.5f}\\t{:.5f}\\t{:.5f}\\n'", ".", "format", "(", "idx", ",", "*", "opls_coeffs", ")", ")", "# Atom data", "data", ".", "write", "(", "'\\nAtoms\\n\\n'", ")", "if", "atom_style", "==", "'atomic'", ":", "atom_line", "=", "'{index:d}\\t{type_index:d}\\t{x:.6f}\\t{y:.6f}\\t{z:.6f}\\n'", "elif", "atom_style", "==", "'charge'", ":", "atom_line", "=", "'{index:d}\\t{type_index:d}\\t{charge:.6f}\\t{x:.6f}\\t{y:.6f}\\t{z:.6f}\\n'", "elif", "atom_style", "==", "'molecular'", ":", "atom_line", "=", "'{index:d}\\t{zero:d}\\t{type_index:d}\\t{x:.6f}\\t{y:.6f}\\t{z:.6f}\\n'", "elif", "atom_style", "==", "'full'", ":", "atom_line", "=", "'{index:d}\\t{zero:d}\\t{type_index:d}\\t{charge:.6f}\\t{x:.6f}\\t{y:.6f}\\t{z:.6f}\\n'", "for", "i", ",", "coords", "in", "enumerate", "(", "xyz", ")", ":", "data", ".", "write", "(", "atom_line", ".", "format", "(", "index", "=", "i", "+", "1", ",", "type_index", "=", "unique_types", ".", "index", "(", "types", "[", "i", "]", ")", "+", "1", ",", "zero", "=", "0", ",", "charge", "=", "charges", "[", "i", "]", ",", "x", "=", "coords", "[", "0", "]", ",", "y", "=", "coords", "[", "1", "]", ",", "z", "=", "coords", "[", "2", "]", ")", ")", "if", "atom_style", "in", "[", "'full'", ",", "'molecular'", "]", ":", "# Bond data", "if", "bonds", ":", "data", ".", "write", "(", "'\\nBonds\\n\\n'", ")", "for", "i", ",", "bond", "in", "enumerate", "(", "bonds", ")", ":", "data", ".", "write", "(", "'{:d}\\t{:d}\\t{:d}\\t{:d}\\n'", ".", "format", "(", "i", "+", "1", ",", "bond_types", "[", "i", "]", ",", "bond", "[", "0", "]", ",", "bond", "[", "1", "]", ")", ")", "# Angle data", "if", "angles", ":", "data", ".", "write", "(", "'\\nAngles\\n\\n'", ")", "for", "i", ",", "angle", "in", "enumerate", "(", "angles", ")", ":", "data", ".", "write", "(", "'{:d}\\t{:d}\\t{:d}\\t{:d}\\t{:d}\\n'", ".", "format", "(", "i", "+", "1", ",", "angle_types", "[", "i", "]", ",", "angle", "[", "0", "]", ",", "angle", "[", "1", "]", ",", "angle", "[", "2", "]", ")", ")", "# Dihedral data", "if", "dihedrals", ":", "data", ".", "write", "(", "'\\nDihedrals\\n\\n'", ")", "for", "i", ",", "dihedral", "in", "enumerate", "(", "dihedrals", ")", ":", "data", ".", "write", "(", "'{:d}\\t{:d}\\t{:d}\\t{:d}\\t{:d}\\t{:d}\\n'", ".", "format", "(", "i", "+", "1", ",", "dihedral_types", "[", "i", "]", ",", "dihedral", "[", "0", "]", ",", "dihedral", "[", "1", "]", ",", "dihedral", "[", "2", "]", ",", "dihedral", "[", "3", "]", ")", ")" ]
Output a LAMMPS data file. Outputs a LAMMPS data file in the 'full' atom style format. Assumes use of 'real' units. See http://lammps.sandia.gov/doc/atom_style.html for more information on atom styles. Parameters ---------- structure : parmed.Structure ParmEd structure object filename : str Path of the output file atom_style: str Defines the style of atoms to be saved in a LAMMPS data file. The following atom styles are currently supported: 'full', 'atomic', 'charge', 'molecular' see http://lammps.sandia.gov/doc/atom_style.html for more information on atom styles. Notes ----- See http://lammps.sandia.gov/doc/2001/data_format.html for a full description of the LAMMPS data format. Currently the following sections are supported (in addition to the header): *Masses*, *Nonbond Coeffs*, *Bond Coeffs*, *Angle Coeffs*, *Dihedral Coeffs*, *Atoms*, *Bonds*, *Angles*, *Dihedrals* Some of this function has beed adopted from `mdtraj`'s support of the LAMMPSTRJ trajectory format. See https://github.com/mdtraj/mdtraj/blob/master/mdtraj/formats/lammpstrj.py for details.
[ "Output", "a", "LAMMPS", "data", "file", ".", "Outputs", "a", "LAMMPS", "data", "file", "in", "the", "full", "atom", "style", "format", ".", "Assumes", "use", "of", "real", "units", ".", "See", "http", ":", "//", "lammps", ".", "sandia", ".", "gov", "/", "doc", "/", "atom_style", ".", "html", "for", "more", "information", "on", "atom", "styles", "." ]
python
train
45.590717
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L192-L217
def get_profile_histogram(x, y, n_bins=100): '''Takes 2D point data (x,y) and creates a profile histogram similar to the TProfile in ROOT. It calculates the y mean for every bin at the bin center and gives the y mean error as error bars. Parameters ---------- x : array like data x positions y : array like data y positions n_bins : int the number of bins used to create the histogram ''' if len(x) != len(y): raise ValueError('x and y dimensions have to be the same') y = y.astype(np.float32) n, bin_edges = np.histogram(x, bins=n_bins) # needed to calculate the number of points per bin sy = np.histogram(x, bins=n_bins, weights=y)[0] # the sum of the bin values sy2 = np.histogram(x, bins=n_bins, weights=y * y)[0] # the quadratic sum of the bin values bin_centers = (bin_edges[1:] + bin_edges[:-1]) / 2 # calculate the bin center for all bins mean = sy / n # calculate the mean of all bins std = np.sqrt((sy2 / n - mean * mean)) # TODO: not understood, need check if this is really the standard deviation std_mean = std / np.sqrt((n - 1)) mean[np.isnan(mean)] = 0. std_mean[np.isnan(std_mean)] = 0. return bin_centers, mean, std_mean
[ "def", "get_profile_histogram", "(", "x", ",", "y", ",", "n_bins", "=", "100", ")", ":", "if", "len", "(", "x", ")", "!=", "len", "(", "y", ")", ":", "raise", "ValueError", "(", "'x and y dimensions have to be the same'", ")", "y", "=", "y", ".", "astype", "(", "np", ".", "float32", ")", "n", ",", "bin_edges", "=", "np", ".", "histogram", "(", "x", ",", "bins", "=", "n_bins", ")", "# needed to calculate the number of points per bin", "sy", "=", "np", ".", "histogram", "(", "x", ",", "bins", "=", "n_bins", ",", "weights", "=", "y", ")", "[", "0", "]", "# the sum of the bin values", "sy2", "=", "np", ".", "histogram", "(", "x", ",", "bins", "=", "n_bins", ",", "weights", "=", "y", "*", "y", ")", "[", "0", "]", "# the quadratic sum of the bin values", "bin_centers", "=", "(", "bin_edges", "[", "1", ":", "]", "+", "bin_edges", "[", ":", "-", "1", "]", ")", "/", "2", "# calculate the bin center for all bins", "mean", "=", "sy", "/", "n", "# calculate the mean of all bins", "std", "=", "np", ".", "sqrt", "(", "(", "sy2", "/", "n", "-", "mean", "*", "mean", ")", ")", "# TODO: not understood, need check if this is really the standard deviation", "std_mean", "=", "std", "/", "np", ".", "sqrt", "(", "(", "n", "-", "1", ")", ")", "mean", "[", "np", ".", "isnan", "(", "mean", ")", "]", "=", "0.", "std_mean", "[", "np", ".", "isnan", "(", "std_mean", ")", "]", "=", "0.", "return", "bin_centers", ",", "mean", ",", "std_mean" ]
Takes 2D point data (x,y) and creates a profile histogram similar to the TProfile in ROOT. It calculates the y mean for every bin at the bin center and gives the y mean error as error bars. Parameters ---------- x : array like data x positions y : array like data y positions n_bins : int the number of bins used to create the histogram
[ "Takes", "2D", "point", "data", "(", "x", "y", ")", "and", "creates", "a", "profile", "histogram", "similar", "to", "the", "TProfile", "in", "ROOT", ".", "It", "calculates", "the", "y", "mean", "for", "every", "bin", "at", "the", "bin", "center", "and", "gives", "the", "y", "mean", "error", "as", "error", "bars", "." ]
python
train
47.346154
kytos/python-openflow
pyof/foundation/basic_types.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/foundation/basic_types.py#L307-L326
def unpack(self, buff, offset=0): """Unpack a binary message into this object's attributes. Unpack the binary value *buff* and update this object attributes based on the results. Args: buff (bytes): Binary data package to be unpacked. offset (int): Where to begin unpacking. Raises: Exception: If there is a struct unpacking error. """ try: unpacked_data = struct.unpack('!4B', buff[offset:offset+4]) self._value = '.'.join([str(x) for x in unpacked_data]) except struct.error as exception: raise exceptions.UnpackException('%s; %s: %s' % (exception, offset, buff))
[ "def", "unpack", "(", "self", ",", "buff", ",", "offset", "=", "0", ")", ":", "try", ":", "unpacked_data", "=", "struct", ".", "unpack", "(", "'!4B'", ",", "buff", "[", "offset", ":", "offset", "+", "4", "]", ")", "self", ".", "_value", "=", "'.'", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "unpacked_data", "]", ")", "except", "struct", ".", "error", "as", "exception", ":", "raise", "exceptions", ".", "UnpackException", "(", "'%s; %s: %s'", "%", "(", "exception", ",", "offset", ",", "buff", ")", ")" ]
Unpack a binary message into this object's attributes. Unpack the binary value *buff* and update this object attributes based on the results. Args: buff (bytes): Binary data package to be unpacked. offset (int): Where to begin unpacking. Raises: Exception: If there is a struct unpacking error.
[ "Unpack", "a", "binary", "message", "into", "this", "object", "s", "attributes", "." ]
python
train
37.35
drj11/pypng
code/png.py
https://github.com/drj11/pypng/blob/b8220ca9f58e4c5bc1d507e713744fcb8c049225/code/png.py#L1468-L1518
def _deinterlace(self, raw): """ Read raw pixel data, undo filters, deinterlace, and flatten. Return a single array of values. """ # Values per row (of the target image) vpr = self.width * self.planes # Values per image vpi = vpr * self.height # Interleaving writes to the output array randomly # (well, not quite), so the entire output array must be in memory. # Make a result array, and make it big enough. if self.bitdepth > 8: a = array('H', [0] * vpi) else: a = bytearray([0] * vpi) source_offset = 0 for lines in adam7_generate(self.width, self.height): # The previous (reconstructed) scanline. # `None` at the beginning of a pass # to indicate that there is no previous line. recon = None for x, y, xstep in lines: # Pixels per row (reduced pass image) ppr = int(math.ceil((self.width - x) / float(xstep))) # Row size in bytes for this pass. row_size = int(math.ceil(self.psize * ppr)) filter_type = raw[source_offset] source_offset += 1 scanline = raw[source_offset: source_offset + row_size] source_offset += row_size recon = self.undo_filter(filter_type, scanline, recon) # Convert so that there is one element per pixel value flat = self._bytes_to_values(recon, width=ppr) if xstep == 1: assert x == 0 offset = y * vpr a[offset: offset + vpr] = flat else: offset = y * vpr + x * self.planes end_offset = (y + 1) * vpr skip = self.planes * xstep for i in range(self.planes): a[offset + i: end_offset: skip] = \ flat[i:: self.planes] return a
[ "def", "_deinterlace", "(", "self", ",", "raw", ")", ":", "# Values per row (of the target image)", "vpr", "=", "self", ".", "width", "*", "self", ".", "planes", "# Values per image", "vpi", "=", "vpr", "*", "self", ".", "height", "# Interleaving writes to the output array randomly", "# (well, not quite), so the entire output array must be in memory.", "# Make a result array, and make it big enough.", "if", "self", ".", "bitdepth", ">", "8", ":", "a", "=", "array", "(", "'H'", ",", "[", "0", "]", "*", "vpi", ")", "else", ":", "a", "=", "bytearray", "(", "[", "0", "]", "*", "vpi", ")", "source_offset", "=", "0", "for", "lines", "in", "adam7_generate", "(", "self", ".", "width", ",", "self", ".", "height", ")", ":", "# The previous (reconstructed) scanline.", "# `None` at the beginning of a pass", "# to indicate that there is no previous line.", "recon", "=", "None", "for", "x", ",", "y", ",", "xstep", "in", "lines", ":", "# Pixels per row (reduced pass image)", "ppr", "=", "int", "(", "math", ".", "ceil", "(", "(", "self", ".", "width", "-", "x", ")", "/", "float", "(", "xstep", ")", ")", ")", "# Row size in bytes for this pass.", "row_size", "=", "int", "(", "math", ".", "ceil", "(", "self", ".", "psize", "*", "ppr", ")", ")", "filter_type", "=", "raw", "[", "source_offset", "]", "source_offset", "+=", "1", "scanline", "=", "raw", "[", "source_offset", ":", "source_offset", "+", "row_size", "]", "source_offset", "+=", "row_size", "recon", "=", "self", ".", "undo_filter", "(", "filter_type", ",", "scanline", ",", "recon", ")", "# Convert so that there is one element per pixel value", "flat", "=", "self", ".", "_bytes_to_values", "(", "recon", ",", "width", "=", "ppr", ")", "if", "xstep", "==", "1", ":", "assert", "x", "==", "0", "offset", "=", "y", "*", "vpr", "a", "[", "offset", ":", "offset", "+", "vpr", "]", "=", "flat", "else", ":", "offset", "=", "y", "*", "vpr", "+", "x", "*", "self", ".", "planes", "end_offset", "=", "(", "y", "+", "1", ")", "*", "vpr", "skip", "=", "self", ".", "planes", "*", "xstep", "for", "i", "in", "range", "(", "self", ".", "planes", ")", ":", "a", "[", "offset", "+", "i", ":", "end_offset", ":", "skip", "]", "=", "flat", "[", "i", ":", ":", "self", ".", "planes", "]", "return", "a" ]
Read raw pixel data, undo filters, deinterlace, and flatten. Return a single array of values.
[ "Read", "raw", "pixel", "data", "undo", "filters", "deinterlace", "and", "flatten", ".", "Return", "a", "single", "array", "of", "values", "." ]
python
train
39.568627
StackStorm/pybind
pybind/slxos/v17s_1_02/telemetry/collector/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/telemetry/collector/__init__.py#L168-L189
def _set_collector_profile(self, v, load=False): """ Setter method for collector_profile, mapped from YANG variable /telemetry/collector/collector_profile (list) If this variable is read-only (config: false) in the source YANG file, then _set_collector_profile is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_collector_profile() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("collector_profiletype collector_profilename",collector_profile.collector_profile, yang_name="collector-profile", rest_name="profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-profiletype collector-profilename', extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}), is_container='list', yang_name="collector-profile", rest_name="profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """collector_profile must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("collector_profiletype collector_profilename",collector_profile.collector_profile, yang_name="collector-profile", rest_name="profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-profiletype collector-profilename', extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}), is_container='list', yang_name="collector-profile", rest_name="profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""", }) self.__collector_profile = t if hasattr(self, '_set'): self._set()
[ "def", "_set_collector_profile", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"collector_profiletype collector_profilename\"", ",", "collector_profile", ".", "collector_profile", ",", "yang_name", "=", "\"collector-profile\"", ",", "rest_name", "=", "\"profile\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'collector-profiletype collector-profilename'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Create a profile for Collector'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'alt-name'", ":", "u'profile'", ",", "u'cli-full-command'", ":", "None", ",", "u'callpoint'", ":", "u'CollectorProfile'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"collector-profile\"", ",", "rest_name", "=", "\"profile\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Create a profile for Collector'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'alt-name'", ":", "u'profile'", ",", "u'cli-full-command'", ":", "None", ",", "u'callpoint'", ":", "u'CollectorProfile'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-telemetry'", ",", "defining_module", "=", "'brocade-telemetry'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"collector_profile must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"collector_profiletype collector_profilename\",collector_profile.collector_profile, yang_name=\"collector-profile\", rest_name=\"profile\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-profiletype collector-profilename', extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}), is_container='list', yang_name=\"collector-profile\", rest_name=\"profile\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create a profile for Collector', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'callpoint': u'CollectorProfile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__collector_profile", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for collector_profile, mapped from YANG variable /telemetry/collector/collector_profile (list) If this variable is read-only (config: false) in the source YANG file, then _set_collector_profile is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_collector_profile() directly.
[ "Setter", "method", "for", "collector_profile", "mapped", "from", "YANG", "variable", "/", "telemetry", "/", "collector", "/", "collector_profile", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_collector_profile", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_collector_profile", "()", "directly", "." ]
python
train
131.272727
kgori/treeCl
treeCl/tree.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/tree.py#L1091-L1114
def autocorrelated_relaxed_clock(self, root_rate, autocorrel, distribution='lognormal'): """ Attaches rates to each node according to autocorrelated lognormal model from Kishino et al.(2001), or autocorrelated exponential """ optioncheck(distribution, ['exponential', 'lognormal']) if autocorrel == 0: for node in self._tree.preorder_node_iter(): node.rate = root_rate return for node in self._tree.preorder_node_iter(): if node == self._tree.seed_node: node.rate = root_rate else: parent_rate = node.parent_node.rate bl = node.edge_length if distribution == 'lognormal': node.rate = logn_correlated_rate(parent_rate, bl, autocorrel) else: node.rate = np.random.exponential(parent_rate)
[ "def", "autocorrelated_relaxed_clock", "(", "self", ",", "root_rate", ",", "autocorrel", ",", "distribution", "=", "'lognormal'", ")", ":", "optioncheck", "(", "distribution", ",", "[", "'exponential'", ",", "'lognormal'", "]", ")", "if", "autocorrel", "==", "0", ":", "for", "node", "in", "self", ".", "_tree", ".", "preorder_node_iter", "(", ")", ":", "node", ".", "rate", "=", "root_rate", "return", "for", "node", "in", "self", ".", "_tree", ".", "preorder_node_iter", "(", ")", ":", "if", "node", "==", "self", ".", "_tree", ".", "seed_node", ":", "node", ".", "rate", "=", "root_rate", "else", ":", "parent_rate", "=", "node", ".", "parent_node", ".", "rate", "bl", "=", "node", ".", "edge_length", "if", "distribution", "==", "'lognormal'", ":", "node", ".", "rate", "=", "logn_correlated_rate", "(", "parent_rate", ",", "bl", ",", "autocorrel", ")", "else", ":", "node", ".", "rate", "=", "np", ".", "random", ".", "exponential", "(", "parent_rate", ")" ]
Attaches rates to each node according to autocorrelated lognormal model from Kishino et al.(2001), or autocorrelated exponential
[ "Attaches", "rates", "to", "each", "node", "according", "to", "autocorrelated", "lognormal", "model", "from", "Kishino", "et", "al", ".", "(", "2001", ")", "or", "autocorrelated", "exponential" ]
python
train
41.458333
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L11447-L11471
def hil_gps_send(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, vn, ve, vd, cog, satellites_visible, force_mavlink1=False): ''' The global position, as returned by the Global Positioning System (GPS). This is NOT the global position estimate of the sytem, but rather a RAW sensor value. See message GLOBAL_POSITION for the global position estimate. Coordinate frame is right- handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : 0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix. (uint8_t) lat : Latitude (WGS84), in degrees * 1E7 (int32_t) lon : Longitude (WGS84), in degrees * 1E7 (int32_t) alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t) eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t) epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: 65535 (uint16_t) vn : GPS velocity in cm/s in NORTH direction in earth-fixed NED frame (int16_t) ve : GPS velocity in cm/s in EAST direction in earth-fixed NED frame (int16_t) vd : GPS velocity in cm/s in DOWN direction in earth-fixed NED frame (int16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t) ''' return self.send(self.hil_gps_encode(time_usec, fix_type, lat, lon, alt, eph, epv, vel, vn, ve, vd, cog, satellites_visible), force_mavlink1=force_mavlink1)
[ "def", "hil_gps_send", "(", "self", ",", "time_usec", ",", "fix_type", ",", "lat", ",", "lon", ",", "alt", ",", "eph", ",", "epv", ",", "vel", ",", "vn", ",", "ve", ",", "vd", ",", "cog", ",", "satellites_visible", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "hil_gps_encode", "(", "time_usec", ",", "fix_type", ",", "lat", ",", "lon", ",", "alt", ",", "eph", ",", "epv", ",", "vel", ",", "vn", ",", "ve", ",", "vd", ",", "cog", ",", "satellites_visible", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
The global position, as returned by the Global Positioning System (GPS). This is NOT the global position estimate of the sytem, but rather a RAW sensor value. See message GLOBAL_POSITION for the global position estimate. Coordinate frame is right- handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : 0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix. (uint8_t) lat : Latitude (WGS84), in degrees * 1E7 (int32_t) lon : Longitude (WGS84), in degrees * 1E7 (int32_t) alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t) eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t) epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: 65535 (uint16_t) vn : GPS velocity in cm/s in NORTH direction in earth-fixed NED frame (int16_t) ve : GPS velocity in cm/s in EAST direction in earth-fixed NED frame (int16_t) vd : GPS velocity in cm/s in DOWN direction in earth-fixed NED frame (int16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t)
[ "The", "global", "position", "as", "returned", "by", "the", "Global", "Positioning", "System", "(", "GPS", ")", ".", "This", "is", "NOT", "the", "global", "position", "estimate", "of", "the", "sytem", "but", "rather", "a", "RAW", "sensor", "value", ".", "See", "message", "GLOBAL_POSITION", "for", "the", "global", "position", "estimate", ".", "Coordinate", "frame", "is", "right", "-", "handed", "Z", "-", "axis", "up", "(", "GPS", "frame", ")", "." ]
python
train
95.04
theislab/scanpy
scanpy/plotting/_tools/scatterplots.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/plotting/_tools/scatterplots.py#L289-L304
def tsne(adata, **kwargs) -> Union[Axes, List[Axes], None]: """\ Scatter plot in tSNE basis. Parameters ---------- {adata_color_etc} {edges_arrows} {scatter_bulk} {show_save_ax} Returns ------- If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. """ return plot_scatter(adata, 'tsne', **kwargs)
[ "def", "tsne", "(", "adata", ",", "*", "*", "kwargs", ")", "->", "Union", "[", "Axes", ",", "List", "[", "Axes", "]", ",", "None", "]", ":", "return", "plot_scatter", "(", "adata", ",", "'tsne'", ",", "*", "*", "kwargs", ")" ]
\ Scatter plot in tSNE basis. Parameters ---------- {adata_color_etc} {edges_arrows} {scatter_bulk} {show_save_ax} Returns ------- If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
[ "\\", "Scatter", "plot", "in", "tSNE", "basis", "." ]
python
train
21.75