repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
bwohlberg/sporco
sporco/dictlrn/cbpdndl.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/cbpdndl.py#L126-L136
def ccmod_class_label_lookup(label): """Get a CCMOD class from a label string.""" clsmod = {'ism': admm_ccmod.ConvCnstrMOD_IterSM, 'cg': admm_ccmod.ConvCnstrMOD_CG, 'cns': admm_ccmod.ConvCnstrMOD_Consensus, 'fista': fista_ccmod.ConvCnstrMOD} if label in clsmod: return clsmod[label] else: raise ValueError('Unknown ConvCnstrMOD solver method %s' % label)
[ "def", "ccmod_class_label_lookup", "(", "label", ")", ":", "clsmod", "=", "{", "'ism'", ":", "admm_ccmod", ".", "ConvCnstrMOD_IterSM", ",", "'cg'", ":", "admm_ccmod", ".", "ConvCnstrMOD_CG", ",", "'cns'", ":", "admm_ccmod", ".", "ConvCnstrMOD_Consensus", ",", "'fista'", ":", "fista_ccmod", ".", "ConvCnstrMOD", "}", "if", "label", "in", "clsmod", ":", "return", "clsmod", "[", "label", "]", "else", ":", "raise", "ValueError", "(", "'Unknown ConvCnstrMOD solver method %s'", "%", "label", ")" ]
Get a CCMOD class from a label string.
[ "Get", "a", "CCMOD", "class", "from", "a", "label", "string", "." ]
python
train
38.090909
JdeRobot/base
src/libs/comm_py/comm/ros/publisherMotors.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/libs/comm_py/comm/ros/publisherMotors.py#L58-L65
def publish (self): ''' Function to publish cmdvel. ''' self.lock.acquire() tw = cmdvel2Twist(self.data) self.lock.release() self.pub.publish(tw)
[ "def", "publish", "(", "self", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "tw", "=", "cmdvel2Twist", "(", "self", ".", "data", ")", "self", ".", "lock", ".", "release", "(", ")", "self", ".", "pub", ".", "publish", "(", "tw", ")" ]
Function to publish cmdvel.
[ "Function", "to", "publish", "cmdvel", "." ]
python
train
24.375
ewels/MultiQC
multiqc/modules/rseqc/read_distribution.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/rseqc/read_distribution.py#L16-L111
def parse_reports(self): """ Find RSeQC read_distribution reports and parse their data """ # Set up vars self.read_dist = dict() first_regexes = { 'total_reads': r"Total Reads\s+(\d+)\s*", 'total_tags': r"Total Tags\s+(\d+)\s*", 'total_assigned_tags': r"Total Assigned Tags\s+(\d+)\s*", } second_regexes = { 'cds_exons': r"CDS_Exons\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*", '5_utr_exons': r"5'UTR_Exons\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*", '3_utr_exons': r"3'UTR_Exons\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*", 'introns': r"Introns\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*", 'tss_up_1kb': r"TSS_up_1kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*", 'tss_up_5kb': r"TSS_up_5kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*", 'tss_up_10kb': r"TSS_up_10kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*", 'tes_down_1kb': r"TES_down_1kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*", 'tes_down_5kb': r"TES_down_5kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*", 'tes_down_10kb': r"TES_down_10kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*", } # Go through files and parse data using regexes for f in self.find_log_files('rseqc/read_distribution'): d = dict() for k, r in first_regexes.items(): r_search = re.search(r, f['f'], re.MULTILINE) if r_search: d[k] = int(r_search.group(1)) for k, r in second_regexes.items(): r_search = re.search(r, f['f'], re.MULTILINE) if r_search: d['{}_total_bases'.format(k)] = int(r_search.group(1)) d['{}_tag_count'.format(k)] = int(r_search.group(2)) d['{}_tags_kb'.format(k)] = float(r_search.group(2)) d['other_intergenic_tag_count'] = d['total_tags']-d['total_assigned_tags'] # Calculate some percentages for parsed file if 'total_tags' in d: t = float(d['total_tags']) pcts = dict() for k in d: if k.endswith('_tag_count'): pk = '{}_tag_pct'.format(k[:-10]) pcts[pk] = (float(d[k]) / t)*100.0 d.update(pcts) if len(d) > 0: if f['s_name'] in self.read_dist: log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f, section='read_distribution') self.read_dist[f['s_name']] = d # Filter to strip out ignored sample names self.read_dist = self.ignore_samples(self.read_dist) if len(self.read_dist) > 0: # Write to file self.write_data_file(self.read_dist, 'multiqc_rseqc_read_distribution') # Plot bar graph of groups keys = OrderedDict() keys['cds_exons_tag_count'] = {'name': "CDS_Exons"} keys['5_utr_exons_tag_count'] = {'name': "5'UTR_Exons"} keys['3_utr_exons_tag_count'] = {'name': "3'UTR_Exons"} keys['introns_tag_count'] = {'name': "Introns"} keys['tss_up_1kb_tag_count'] = {'name': "TSS_up_1kb"} keys['tss_up_5kb_tag_count'] = {'name': "TSS_up_5kb"} keys['tss_up_10kb_tag_count'] = {'name': "TSS_up_10kb"} keys['tes_down_1kb_tag_count'] = {'name': "TES_down_1kb"} keys['tes_down_5kb_tag_count'] = {'name': "TES_down_5kb"} keys['tes_down_10kb_tag_count'] = {'name': "TES_down_10kb"} keys['other_intergenic_tag_count'] = {'name': "Other_intergenic"} # Config for the plot pconfig = { 'id': 'rseqc_read_distribution_plot', 'title': 'RSeQC: Read Distribution', 'ylab': '# Tags', 'cpswitch_counts_label': 'Number of Tags', 'cpswitch_c_active': False } self.add_section ( name = 'Read Distribution', anchor = 'rseqc-read_distribution', description = '<a href="http://rseqc.sourceforge.net/#read-distribution-py" target="_blank">Read Distribution</a>' \ " calculates how mapped reads are distributed over genome features.", plot = bargraph.plot(self.read_dist, keys, pconfig) ) # Return number of samples found return len(self.read_dist)
[ "def", "parse_reports", "(", "self", ")", ":", "# Set up vars", "self", ".", "read_dist", "=", "dict", "(", ")", "first_regexes", "=", "{", "'total_reads'", ":", "r\"Total Reads\\s+(\\d+)\\s*\"", ",", "'total_tags'", ":", "r\"Total Tags\\s+(\\d+)\\s*\"", ",", "'total_assigned_tags'", ":", "r\"Total Assigned Tags\\s+(\\d+)\\s*\"", ",", "}", "second_regexes", "=", "{", "'cds_exons'", ":", "r\"CDS_Exons\\s+(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\\s*\"", ",", "'5_utr_exons'", ":", "r\"5'UTR_Exons\\s+(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\\s*\"", ",", "'3_utr_exons'", ":", "r\"3'UTR_Exons\\s+(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\\s*\"", ",", "'introns'", ":", "r\"Introns\\s+(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\\s*\"", ",", "'tss_up_1kb'", ":", "r\"TSS_up_1kb\\s+(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\\s*\"", ",", "'tss_up_5kb'", ":", "r\"TSS_up_5kb\\s+(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\\s*\"", ",", "'tss_up_10kb'", ":", "r\"TSS_up_10kb\\s+(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\\s*\"", ",", "'tes_down_1kb'", ":", "r\"TES_down_1kb\\s+(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\\s*\"", ",", "'tes_down_5kb'", ":", "r\"TES_down_5kb\\s+(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\\s*\"", ",", "'tes_down_10kb'", ":", "r\"TES_down_10kb\\s+(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\\s*\"", ",", "}", "# Go through files and parse data using regexes", "for", "f", "in", "self", ".", "find_log_files", "(", "'rseqc/read_distribution'", ")", ":", "d", "=", "dict", "(", ")", "for", "k", ",", "r", "in", "first_regexes", ".", "items", "(", ")", ":", "r_search", "=", "re", ".", "search", "(", "r", ",", "f", "[", "'f'", "]", ",", "re", ".", "MULTILINE", ")", "if", "r_search", ":", "d", "[", "k", "]", "=", "int", "(", "r_search", ".", "group", "(", "1", ")", ")", "for", "k", ",", "r", "in", "second_regexes", ".", "items", "(", ")", ":", "r_search", "=", "re", ".", "search", "(", "r", ",", "f", "[", "'f'", "]", ",", "re", ".", "MULTILINE", ")", "if", "r_search", ":", "d", "[", "'{}_total_bases'", ".", "format", "(", "k", ")", "]", "=", "int", "(", "r_search", ".", "group", "(", "1", ")", ")", "d", "[", "'{}_tag_count'", ".", "format", "(", "k", ")", "]", "=", "int", "(", "r_search", ".", "group", "(", "2", ")", ")", "d", "[", "'{}_tags_kb'", ".", "format", "(", "k", ")", "]", "=", "float", "(", "r_search", ".", "group", "(", "2", ")", ")", "d", "[", "'other_intergenic_tag_count'", "]", "=", "d", "[", "'total_tags'", "]", "-", "d", "[", "'total_assigned_tags'", "]", "# Calculate some percentages for parsed file", "if", "'total_tags'", "in", "d", ":", "t", "=", "float", "(", "d", "[", "'total_tags'", "]", ")", "pcts", "=", "dict", "(", ")", "for", "k", "in", "d", ":", "if", "k", ".", "endswith", "(", "'_tag_count'", ")", ":", "pk", "=", "'{}_tag_pct'", ".", "format", "(", "k", "[", ":", "-", "10", "]", ")", "pcts", "[", "pk", "]", "=", "(", "float", "(", "d", "[", "k", "]", ")", "/", "t", ")", "*", "100.0", "d", ".", "update", "(", "pcts", ")", "if", "len", "(", "d", ")", ">", "0", ":", "if", "f", "[", "'s_name'", "]", "in", "self", ".", "read_dist", ":", "log", ".", "debug", "(", "\"Duplicate sample name found! Overwriting: {}\"", ".", "format", "(", "f", "[", "'s_name'", "]", ")", ")", "self", ".", "add_data_source", "(", "f", ",", "section", "=", "'read_distribution'", ")", "self", ".", "read_dist", "[", "f", "[", "'s_name'", "]", "]", "=", "d", "# Filter to strip out ignored sample names", "self", ".", "read_dist", "=", "self", ".", "ignore_samples", "(", "self", ".", "read_dist", ")", "if", "len", "(", "self", ".", "read_dist", ")", ">", "0", ":", "# Write to file", "self", ".", "write_data_file", "(", "self", ".", "read_dist", ",", "'multiqc_rseqc_read_distribution'", ")", "# Plot bar graph of groups", "keys", "=", "OrderedDict", "(", ")", "keys", "[", "'cds_exons_tag_count'", "]", "=", "{", "'name'", ":", "\"CDS_Exons\"", "}", "keys", "[", "'5_utr_exons_tag_count'", "]", "=", "{", "'name'", ":", "\"5'UTR_Exons\"", "}", "keys", "[", "'3_utr_exons_tag_count'", "]", "=", "{", "'name'", ":", "\"3'UTR_Exons\"", "}", "keys", "[", "'introns_tag_count'", "]", "=", "{", "'name'", ":", "\"Introns\"", "}", "keys", "[", "'tss_up_1kb_tag_count'", "]", "=", "{", "'name'", ":", "\"TSS_up_1kb\"", "}", "keys", "[", "'tss_up_5kb_tag_count'", "]", "=", "{", "'name'", ":", "\"TSS_up_5kb\"", "}", "keys", "[", "'tss_up_10kb_tag_count'", "]", "=", "{", "'name'", ":", "\"TSS_up_10kb\"", "}", "keys", "[", "'tes_down_1kb_tag_count'", "]", "=", "{", "'name'", ":", "\"TES_down_1kb\"", "}", "keys", "[", "'tes_down_5kb_tag_count'", "]", "=", "{", "'name'", ":", "\"TES_down_5kb\"", "}", "keys", "[", "'tes_down_10kb_tag_count'", "]", "=", "{", "'name'", ":", "\"TES_down_10kb\"", "}", "keys", "[", "'other_intergenic_tag_count'", "]", "=", "{", "'name'", ":", "\"Other_intergenic\"", "}", "# Config for the plot", "pconfig", "=", "{", "'id'", ":", "'rseqc_read_distribution_plot'", ",", "'title'", ":", "'RSeQC: Read Distribution'", ",", "'ylab'", ":", "'# Tags'", ",", "'cpswitch_counts_label'", ":", "'Number of Tags'", ",", "'cpswitch_c_active'", ":", "False", "}", "self", ".", "add_section", "(", "name", "=", "'Read Distribution'", ",", "anchor", "=", "'rseqc-read_distribution'", ",", "description", "=", "'<a href=\"http://rseqc.sourceforge.net/#read-distribution-py\" target=\"_blank\">Read Distribution</a>'", "\" calculates how mapped reads are distributed over genome features.\"", ",", "plot", "=", "bargraph", ".", "plot", "(", "self", ".", "read_dist", ",", "keys", ",", "pconfig", ")", ")", "# Return number of samples found", "return", "len", "(", "self", ".", "read_dist", ")" ]
Find RSeQC read_distribution reports and parse their data
[ "Find", "RSeQC", "read_distribution", "reports", "and", "parse", "their", "data" ]
python
train
42.53125
RiotGames/cloud-inquisitor
backend/cloud_inquisitor/config.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/config.py#L188-L209
def delete(self, namespace, key): """Remove a configuration item from the database Args: namespace (`str`): Namespace of the config item key (`str`): Key to delete Returns: `None` """ if self.key_exists(namespace, key): obj = db.ConfigItem.find_one( ConfigItem.namespace_prefix == namespace, ConfigItem.key == key ) del self.__data[namespace][key] db.session.delete(obj) db.session.commit() else: raise KeyError('{}/{}'.format(namespace, key))
[ "def", "delete", "(", "self", ",", "namespace", ",", "key", ")", ":", "if", "self", ".", "key_exists", "(", "namespace", ",", "key", ")", ":", "obj", "=", "db", ".", "ConfigItem", ".", "find_one", "(", "ConfigItem", ".", "namespace_prefix", "==", "namespace", ",", "ConfigItem", ".", "key", "==", "key", ")", "del", "self", ".", "__data", "[", "namespace", "]", "[", "key", "]", "db", ".", "session", ".", "delete", "(", "obj", ")", "db", ".", "session", ".", "commit", "(", ")", "else", ":", "raise", "KeyError", "(", "'{}/{}'", ".", "format", "(", "namespace", ",", "key", ")", ")" ]
Remove a configuration item from the database Args: namespace (`str`): Namespace of the config item key (`str`): Key to delete Returns: `None`
[ "Remove", "a", "configuration", "item", "from", "the", "database" ]
python
train
27.909091
ic-labs/django-icekit
icekit_events/forms.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit_events/forms.py#L171-L176
def _set_queryset(self, queryset): """ Set the queryset on the ``ModelChoiceField`` and choices on the widget. """ self.fields[0].queryset = self.widget.queryset = queryset self.widget.choices = self.fields[0].choices
[ "def", "_set_queryset", "(", "self", ",", "queryset", ")", ":", "self", ".", "fields", "[", "0", "]", ".", "queryset", "=", "self", ".", "widget", ".", "queryset", "=", "queryset", "self", ".", "widget", ".", "choices", "=", "self", ".", "fields", "[", "0", "]", ".", "choices" ]
Set the queryset on the ``ModelChoiceField`` and choices on the widget.
[ "Set", "the", "queryset", "on", "the", "ModelChoiceField", "and", "choices", "on", "the", "widget", "." ]
python
train
42
fermiPy/fermipy
fermipy/version.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/version.py#L134-L139
def write_release_version(version): """Write the release version to ``_version.py``.""" dirname = os.path.abspath(os.path.dirname(__file__)) f = open(os.path.join(dirname, "_version.py"), "wt") f.write("__version__ = '%s'\n" % version) f.close()
[ "def", "write_release_version", "(", "version", ")", ":", "dirname", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "\"_version.py\"", ")", ",", "\"wt\"", ")", "f", ".", "write", "(", "\"__version__ = '%s'\\n\"", "%", "version", ")", "f", ".", "close", "(", ")" ]
Write the release version to ``_version.py``.
[ "Write", "the", "release", "version", "to", "_version", ".", "py", "." ]
python
train
43.333333
tanghaibao/jcvi
jcvi/apps/fetch.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/fetch.py#L360-L468
def entrez(args): """ %prog entrez <filename|term> `filename` contains a list of terms to search. Or just one term. If the results are small in size, e.g. "--format=acc", use "--batchsize=100" to speed the download. """ p = OptionParser(entrez.__doc__) allowed_databases = {"fasta": ["genome", "nuccore", "nucgss", "protein", "nucest"], "asn.1": ["genome", "nuccore", "nucgss", "protein", "gene"], "xml": ["genome", "nuccore", "nucgss", "nucest", "gene"], "gb": ["genome", "nuccore", "nucgss"], "est": ["nucest"], "gss": ["nucgss"], "acc": ["nuccore"], } valid_formats = tuple(allowed_databases.keys()) valid_databases = ("genome", "nuccore", "nucest", "nucgss", "protein", "gene") p.add_option("--noversion", dest="noversion", default=False, action="store_true", help="Remove trailing accession versions") p.add_option("--format", default="fasta", choices=valid_formats, help="download format [default: %default]") p.add_option("--database", default="nuccore", choices=valid_databases, help="search database [default: %default]") p.add_option("--retmax", default=1000000, type="int", help="how many results to return [default: %default]") p.add_option("--skipcheck", default=False, action="store_true", help="turn off prompt to check file existence [default: %default]") p.add_option("--batchsize", default=500, type="int", help="download the results in batch for speed-up [default: %default]") p.set_outdir(outdir=None) p.add_option("--outprefix", default="out", help="output file name prefix [default: %default]") p.set_email() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) filename, = args if op.exists(filename): pf = filename.rsplit(".", 1)[0] list_of_terms = [row.strip() for row in open(filename)] if opts.noversion: list_of_terms = [x.rsplit(".", 1)[0] for x in list_of_terms] else: pf = filename # the filename is the search term list_of_terms = [filename.strip()] fmt = opts.format database = opts.database batchsize = opts.batchsize assert database in allowed_databases[fmt], \ "For output format '{0}', allowed databases are: {1}".\ format(fmt, allowed_databases[fmt]) assert batchsize >= 1, "batchsize must >= 1" if " " in pf: pf = opts.outprefix outfile = "{0}.{1}".format(pf, fmt) outdir = opts.outdir if outdir: mkdir(outdir) # If noprompt, will not check file existence if not outdir: fw = must_open(outfile, "w", checkexists=True, skipcheck=opts.skipcheck) if fw is None: return seen = set() totalsize = 0 for id, size, term, handle in batch_entrez(list_of_terms, retmax=opts.retmax, rettype=fmt, db=database, batchsize=batchsize, email=opts.email): if outdir: outfile = urljoin(outdir, "{0}.{1}".format(term, fmt)) fw = must_open(outfile, "w", checkexists=True, skipcheck=opts.skipcheck) if fw is None: continue rec = handle.read() if id in seen: logging.error("Duplicate key ({0}) found".format(rec)) continue totalsize += size print(rec, file=fw) print(file=fw) seen.add(id) if seen: print("A total of {0} {1} records downloaded.". format(totalsize, fmt.upper()), file=sys.stderr) return outfile
[ "def", "entrez", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "entrez", ".", "__doc__", ")", "allowed_databases", "=", "{", "\"fasta\"", ":", "[", "\"genome\"", ",", "\"nuccore\"", ",", "\"nucgss\"", ",", "\"protein\"", ",", "\"nucest\"", "]", ",", "\"asn.1\"", ":", "[", "\"genome\"", ",", "\"nuccore\"", ",", "\"nucgss\"", ",", "\"protein\"", ",", "\"gene\"", "]", ",", "\"xml\"", ":", "[", "\"genome\"", ",", "\"nuccore\"", ",", "\"nucgss\"", ",", "\"nucest\"", ",", "\"gene\"", "]", ",", "\"gb\"", ":", "[", "\"genome\"", ",", "\"nuccore\"", ",", "\"nucgss\"", "]", ",", "\"est\"", ":", "[", "\"nucest\"", "]", ",", "\"gss\"", ":", "[", "\"nucgss\"", "]", ",", "\"acc\"", ":", "[", "\"nuccore\"", "]", ",", "}", "valid_formats", "=", "tuple", "(", "allowed_databases", ".", "keys", "(", ")", ")", "valid_databases", "=", "(", "\"genome\"", ",", "\"nuccore\"", ",", "\"nucest\"", ",", "\"nucgss\"", ",", "\"protein\"", ",", "\"gene\"", ")", "p", ".", "add_option", "(", "\"--noversion\"", ",", "dest", "=", "\"noversion\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Remove trailing accession versions\"", ")", "p", ".", "add_option", "(", "\"--format\"", ",", "default", "=", "\"fasta\"", ",", "choices", "=", "valid_formats", ",", "help", "=", "\"download format [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--database\"", ",", "default", "=", "\"nuccore\"", ",", "choices", "=", "valid_databases", ",", "help", "=", "\"search database [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--retmax\"", ",", "default", "=", "1000000", ",", "type", "=", "\"int\"", ",", "help", "=", "\"how many results to return [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--skipcheck\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"turn off prompt to check file existence [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--batchsize\"", ",", "default", "=", "500", ",", "type", "=", "\"int\"", ",", "help", "=", "\"download the results in batch for speed-up [default: %default]\"", ")", "p", ".", "set_outdir", "(", "outdir", "=", "None", ")", "p", ".", "add_option", "(", "\"--outprefix\"", ",", "default", "=", "\"out\"", ",", "help", "=", "\"output file name prefix [default: %default]\"", ")", "p", ".", "set_email", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "p", ".", "print_help", "(", ")", ")", "filename", ",", "=", "args", "if", "op", ".", "exists", "(", "filename", ")", ":", "pf", "=", "filename", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "list_of_terms", "=", "[", "row", ".", "strip", "(", ")", "for", "row", "in", "open", "(", "filename", ")", "]", "if", "opts", ".", "noversion", ":", "list_of_terms", "=", "[", "x", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "for", "x", "in", "list_of_terms", "]", "else", ":", "pf", "=", "filename", "# the filename is the search term", "list_of_terms", "=", "[", "filename", ".", "strip", "(", ")", "]", "fmt", "=", "opts", ".", "format", "database", "=", "opts", ".", "database", "batchsize", "=", "opts", ".", "batchsize", "assert", "database", "in", "allowed_databases", "[", "fmt", "]", ",", "\"For output format '{0}', allowed databases are: {1}\"", ".", "format", "(", "fmt", ",", "allowed_databases", "[", "fmt", "]", ")", "assert", "batchsize", ">=", "1", ",", "\"batchsize must >= 1\"", "if", "\" \"", "in", "pf", ":", "pf", "=", "opts", ".", "outprefix", "outfile", "=", "\"{0}.{1}\"", ".", "format", "(", "pf", ",", "fmt", ")", "outdir", "=", "opts", ".", "outdir", "if", "outdir", ":", "mkdir", "(", "outdir", ")", "# If noprompt, will not check file existence", "if", "not", "outdir", ":", "fw", "=", "must_open", "(", "outfile", ",", "\"w\"", ",", "checkexists", "=", "True", ",", "skipcheck", "=", "opts", ".", "skipcheck", ")", "if", "fw", "is", "None", ":", "return", "seen", "=", "set", "(", ")", "totalsize", "=", "0", "for", "id", ",", "size", ",", "term", ",", "handle", "in", "batch_entrez", "(", "list_of_terms", ",", "retmax", "=", "opts", ".", "retmax", ",", "rettype", "=", "fmt", ",", "db", "=", "database", ",", "batchsize", "=", "batchsize", ",", "email", "=", "opts", ".", "email", ")", ":", "if", "outdir", ":", "outfile", "=", "urljoin", "(", "outdir", ",", "\"{0}.{1}\"", ".", "format", "(", "term", ",", "fmt", ")", ")", "fw", "=", "must_open", "(", "outfile", ",", "\"w\"", ",", "checkexists", "=", "True", ",", "skipcheck", "=", "opts", ".", "skipcheck", ")", "if", "fw", "is", "None", ":", "continue", "rec", "=", "handle", ".", "read", "(", ")", "if", "id", "in", "seen", ":", "logging", ".", "error", "(", "\"Duplicate key ({0}) found\"", ".", "format", "(", "rec", ")", ")", "continue", "totalsize", "+=", "size", "print", "(", "rec", ",", "file", "=", "fw", ")", "print", "(", "file", "=", "fw", ")", "seen", ".", "add", "(", "id", ")", "if", "seen", ":", "print", "(", "\"A total of {0} {1} records downloaded.\"", ".", "format", "(", "totalsize", ",", "fmt", ".", "upper", "(", ")", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "outfile" ]
%prog entrez <filename|term> `filename` contains a list of terms to search. Or just one term. If the results are small in size, e.g. "--format=acc", use "--batchsize=100" to speed the download.
[ "%prog", "entrez", "<filename|term", ">" ]
python
train
35.568807
QInfer/python-qinfer
src/qinfer/ipy.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/ipy.py#L94-L104
def start(self, max): """ Displays the progress bar for a given maximum value. :param float max: Maximum value of the progress bar. """ try: self.widget.max = max display(self.widget) except: pass
[ "def", "start", "(", "self", ",", "max", ")", ":", "try", ":", "self", ".", "widget", ".", "max", "=", "max", "display", "(", "self", ".", "widget", ")", "except", ":", "pass" ]
Displays the progress bar for a given maximum value. :param float max: Maximum value of the progress bar.
[ "Displays", "the", "progress", "bar", "for", "a", "given", "maximum", "value", "." ]
python
train
24.636364
markchil/gptools
gptools/kernel/core.py
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/kernel/core.py#L709-L732
def _compute_dk_dtau_on_partition(self, tau, p): """Evaluate the term inside the sum of Faa di Bruno's formula for the given partition. Parameters ---------- tau : :py:class:`Matrix`, (`M`, `D`) `M` inputs with dimension `D`. p : list of :py:class:`Array` Each element is a block of the partition representing the derivative orders to use. Returns ------- dk_dtau : :py:class:`Array`, (`M`,) The specified derivatives over the given partition at the specified locations. """ y, r2l2 = self._compute_y(tau, return_r2l2=True) # Compute the d^(|pi|)f/dy term: dk_dtau = self._compute_dk_dy(y, len(p)) # Multiply in each of the block terms: for b in p: dk_dtau *= self._compute_dy_dtau(tau, b, r2l2) return dk_dtau
[ "def", "_compute_dk_dtau_on_partition", "(", "self", ",", "tau", ",", "p", ")", ":", "y", ",", "r2l2", "=", "self", ".", "_compute_y", "(", "tau", ",", "return_r2l2", "=", "True", ")", "# Compute the d^(|pi|)f/dy term:", "dk_dtau", "=", "self", ".", "_compute_dk_dy", "(", "y", ",", "len", "(", "p", ")", ")", "# Multiply in each of the block terms:", "for", "b", "in", "p", ":", "dk_dtau", "*=", "self", ".", "_compute_dy_dtau", "(", "tau", ",", "b", ",", "r2l2", ")", "return", "dk_dtau" ]
Evaluate the term inside the sum of Faa di Bruno's formula for the given partition. Parameters ---------- tau : :py:class:`Matrix`, (`M`, `D`) `M` inputs with dimension `D`. p : list of :py:class:`Array` Each element is a block of the partition representing the derivative orders to use. Returns ------- dk_dtau : :py:class:`Array`, (`M`,) The specified derivatives over the given partition at the specified locations.
[ "Evaluate", "the", "term", "inside", "the", "sum", "of", "Faa", "di", "Bruno", "s", "formula", "for", "the", "given", "partition", ".", "Parameters", "----------", "tau", ":", ":", "py", ":", "class", ":", "Matrix", "(", "M", "D", ")", "M", "inputs", "with", "dimension", "D", ".", "p", ":", "list", "of", ":", "py", ":", "class", ":", "Array", "Each", "element", "is", "a", "block", "of", "the", "partition", "representing", "the", "derivative", "orders", "to", "use", ".", "Returns", "-------", "dk_dtau", ":", ":", "py", ":", "class", ":", "Array", "(", "M", ")", "The", "specified", "derivatives", "over", "the", "given", "partition", "at", "the", "specified", "locations", "." ]
python
train
37.375
Vital-Fernandez/dazer
bin/lib/Astro_Libraries/f2n.py
https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/Astro_Libraries/f2n.py#L928-L937
def tonet(self, outfile): """ Writes the PIL image into a png. We do not want to flip the image at this stage, as you might have written on it ! """ self.checkforpilimage() if self.verbose : print "Writing image to %s...\n%i x %i pixels, mode %s" % (outfile, self.pilimage.size[0], self.pilimage.size[1], self.pilimage.mode) self.pilimage.save(outfile, "PNG")
[ "def", "tonet", "(", "self", ",", "outfile", ")", ":", "self", ".", "checkforpilimage", "(", ")", "if", "self", ".", "verbose", ":", "print", "\"Writing image to %s...\\n%i x %i pixels, mode %s\"", "%", "(", "outfile", ",", "self", ".", "pilimage", ".", "size", "[", "0", "]", ",", "self", ".", "pilimage", ".", "size", "[", "1", "]", ",", "self", ".", "pilimage", ".", "mode", ")", "self", ".", "pilimage", ".", "save", "(", "outfile", ",", "\"PNG\"", ")" ]
Writes the PIL image into a png. We do not want to flip the image at this stage, as you might have written on it !
[ "Writes", "the", "PIL", "image", "into", "a", "png", ".", "We", "do", "not", "want", "to", "flip", "the", "image", "at", "this", "stage", "as", "you", "might", "have", "written", "on", "it", "!" ]
python
train
42.7
blockchain/api-v1-client-python
blockchain/v2/receive.py
https://github.com/blockchain/api-v1-client-python/blob/52ea562f824f04303e75239364e06722bec8620f/blockchain/v2/receive.py#L26-L42
def receive(xpub, callback, api_key): """Call the '/v2/receive' endpoint and create a forwarding address. :param str xpub: extended public key to generate payment address :param str callback: callback URI that will be called upon payment :param str api_key: Blockchain.info API V2 key :return: an instance of :class:`ReceiveResponse` class """ params = {'xpub': xpub, 'key': api_key, 'callback': callback} resource = 'v2/receive?' + util.urlencode(params) resp = util.call_api(resource, base_url='https://api.blockchain.info/') json_resp = json.loads(resp) payment_response = ReceiveResponse(json_resp['address'], json_resp['index'], json_resp['callback']) return payment_response
[ "def", "receive", "(", "xpub", ",", "callback", ",", "api_key", ")", ":", "params", "=", "{", "'xpub'", ":", "xpub", ",", "'key'", ":", "api_key", ",", "'callback'", ":", "callback", "}", "resource", "=", "'v2/receive?'", "+", "util", ".", "urlencode", "(", "params", ")", "resp", "=", "util", ".", "call_api", "(", "resource", ",", "base_url", "=", "'https://api.blockchain.info/'", ")", "json_resp", "=", "json", ".", "loads", "(", "resp", ")", "payment_response", "=", "ReceiveResponse", "(", "json_resp", "[", "'address'", "]", ",", "json_resp", "[", "'index'", "]", ",", "json_resp", "[", "'callback'", "]", ")", "return", "payment_response" ]
Call the '/v2/receive' endpoint and create a forwarding address. :param str xpub: extended public key to generate payment address :param str callback: callback URI that will be called upon payment :param str api_key: Blockchain.info API V2 key :return: an instance of :class:`ReceiveResponse` class
[ "Call", "the", "/", "v2", "/", "receive", "endpoint", "and", "create", "a", "forwarding", "address", ".", ":", "param", "str", "xpub", ":", "extended", "public", "key", "to", "generate", "payment", "address", ":", "param", "str", "callback", ":", "callback", "URI", "that", "will", "be", "called", "upon", "payment", ":", "param", "str", "api_key", ":", "Blockchain", ".", "info", "API", "V2", "key", ":", "return", ":", "an", "instance", "of", ":", "class", ":", "ReceiveResponse", "class" ]
python
train
46.823529
h2oai/h2o-3
h2o-py/h2o/h2o.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/h2o.py#L115-L146
def version_check(): """Used to verify that h2o-python module and the H2O server are compatible with each other.""" from .__init__ import __version__ as ver_pkg ci = h2oconn.cluster if not ci: raise H2OConnectionError("Connection not initialized. Did you run h2o.connect()?") ver_h2o = ci.version if ver_pkg == "SUBST_PROJECT_VERSION": ver_pkg = "UNKNOWN" if str(ver_h2o) != str(ver_pkg): branch_name_h2o = ci.branch_name build_number_h2o = ci.build_number if build_number_h2o is None or build_number_h2o == "unknown": raise H2OConnectionError( "Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. " "Upgrade H2O and h2o-Python to latest stable version - " "http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html" "".format(ver_h2o, ver_pkg)) elif build_number_h2o == "99999": raise H2OConnectionError( "Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. " "This is a developer build, please contact your developer." "".format(ver_h2o, ver_pkg)) else: raise H2OConnectionError( "Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. " "Install the matching h2o-Python version from - " "http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html." "".format(ver_h2o, ver_pkg, branch_name_h2o, build_number_h2o)) # Check age of the install if ci.build_too_old: print("Warning: Your H2O cluster version is too old ({})! Please download and install the latest " "version from http://h2o.ai/download/".format(ci.build_age))
[ "def", "version_check", "(", ")", ":", "from", ".", "__init__", "import", "__version__", "as", "ver_pkg", "ci", "=", "h2oconn", ".", "cluster", "if", "not", "ci", ":", "raise", "H2OConnectionError", "(", "\"Connection not initialized. Did you run h2o.connect()?\"", ")", "ver_h2o", "=", "ci", ".", "version", "if", "ver_pkg", "==", "\"SUBST_PROJECT_VERSION\"", ":", "ver_pkg", "=", "\"UNKNOWN\"", "if", "str", "(", "ver_h2o", ")", "!=", "str", "(", "ver_pkg", ")", ":", "branch_name_h2o", "=", "ci", ".", "branch_name", "build_number_h2o", "=", "ci", ".", "build_number", "if", "build_number_h2o", "is", "None", "or", "build_number_h2o", "==", "\"unknown\"", ":", "raise", "H2OConnectionError", "(", "\"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. \"", "\"Upgrade H2O and h2o-Python to latest stable version - \"", "\"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html\"", "\"\"", ".", "format", "(", "ver_h2o", ",", "ver_pkg", ")", ")", "elif", "build_number_h2o", "==", "\"99999\"", ":", "raise", "H2OConnectionError", "(", "\"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. \"", "\"This is a developer build, please contact your developer.\"", "\"\"", ".", "format", "(", "ver_h2o", ",", "ver_pkg", ")", ")", "else", ":", "raise", "H2OConnectionError", "(", "\"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. \"", "\"Install the matching h2o-Python version from - \"", "\"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html.\"", "\"\"", ".", "format", "(", "ver_h2o", ",", "ver_pkg", ",", "branch_name_h2o", ",", "build_number_h2o", ")", ")", "# Check age of the install", "if", "ci", ".", "build_too_old", ":", "print", "(", "\"Warning: Your H2O cluster version is too old ({})! Please download and install the latest \"", "\"version from http://h2o.ai/download/\"", ".", "format", "(", "ci", ".", "build_age", ")", ")" ]
Used to verify that h2o-python module and the H2O server are compatible with each other.
[ "Used", "to", "verify", "that", "h2o", "-", "python", "module", "and", "the", "H2O", "server", "are", "compatible", "with", "each", "other", "." ]
python
test
56.09375
SheffieldML/GPy
GPy/mappings/linear.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/mappings/linear.py#L41-L53
def to_dict(self): """ Convert the object into a json serializable dictionary. Note: It uses the private method _save_to_input_dict of the parent. :return dict: json serializable dictionary containing the needed information to instantiate the object """ input_dict = super(Linear, self)._save_to_input_dict() input_dict["class"] = "GPy.mappings.Linear" input_dict["A"] = self.A.values.tolist() return input_dict
[ "def", "to_dict", "(", "self", ")", ":", "input_dict", "=", "super", "(", "Linear", ",", "self", ")", ".", "_save_to_input_dict", "(", ")", "input_dict", "[", "\"class\"", "]", "=", "\"GPy.mappings.Linear\"", "input_dict", "[", "\"A\"", "]", "=", "self", ".", "A", ".", "values", ".", "tolist", "(", ")", "return", "input_dict" ]
Convert the object into a json serializable dictionary. Note: It uses the private method _save_to_input_dict of the parent. :return dict: json serializable dictionary containing the needed information to instantiate the object
[ "Convert", "the", "object", "into", "a", "json", "serializable", "dictionary", "." ]
python
train
36.461538
SuperCowPowers/bat
bat/utils/net_utils.py
https://github.com/SuperCowPowers/bat/blob/069e6bc52843dc07760969c531cc442ca7da8e0c/bat/utils/net_utils.py#L22-L32
def str_to_mac(mac_string): """Convert a readable string to a MAC address Args: mac_string (str): a readable string (e.g. '01:02:03:04:05:06') Returns: str: a MAC address in hex form """ sp = mac_string.split(':') mac_string = ''.join(sp) return binascii.unhexlify(mac_string)
[ "def", "str_to_mac", "(", "mac_string", ")", ":", "sp", "=", "mac_string", ".", "split", "(", "':'", ")", "mac_string", "=", "''", ".", "join", "(", "sp", ")", "return", "binascii", ".", "unhexlify", "(", "mac_string", ")" ]
Convert a readable string to a MAC address Args: mac_string (str): a readable string (e.g. '01:02:03:04:05:06') Returns: str: a MAC address in hex form
[ "Convert", "a", "readable", "string", "to", "a", "MAC", "address" ]
python
train
31.181818
log2timeline/dfvfs
examples/source_analyzer.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/examples/source_analyzer.py#L138-L189
def Analyze(self, source_path, output_writer): """Analyzes the source. Args: source_path (str): the source path. output_writer (StdoutWriter): the output writer. Raises: RuntimeError: if the source path does not exists, or if the source path is not a file or directory, or if the format of or within the source file is not supported. """ if not os.path.exists(source_path): raise RuntimeError('No such source: {0:s}.'.format(source_path)) scan_context = source_scanner.SourceScannerContext() scan_path_spec = None scan_step = 0 scan_context.OpenSourcePath(source_path) while True: self._source_scanner.Scan( scan_context, auto_recurse=self._auto_recurse, scan_path_spec=scan_path_spec) if not scan_context.updated: break if not self._auto_recurse: output_writer.WriteScanContext(scan_context, scan_step=scan_step) scan_step += 1 # The source is a directory or file. if scan_context.source_type in [ definitions.SOURCE_TYPE_DIRECTORY, definitions.SOURCE_TYPE_FILE]: break # The source scanner found a locked volume, e.g. an encrypted volume, # and we need a credential to unlock the volume. for locked_scan_node in scan_context.locked_scan_nodes: self._PromptUserForEncryptedVolumeCredential( scan_context, locked_scan_node, output_writer) if not self._auto_recurse: scan_node = scan_context.GetUnscannedScanNode() if not scan_node: return scan_path_spec = scan_node.path_spec if self._auto_recurse: output_writer.WriteScanContext(scan_context)
[ "def", "Analyze", "(", "self", ",", "source_path", ",", "output_writer", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "source_path", ")", ":", "raise", "RuntimeError", "(", "'No such source: {0:s}.'", ".", "format", "(", "source_path", ")", ")", "scan_context", "=", "source_scanner", ".", "SourceScannerContext", "(", ")", "scan_path_spec", "=", "None", "scan_step", "=", "0", "scan_context", ".", "OpenSourcePath", "(", "source_path", ")", "while", "True", ":", "self", ".", "_source_scanner", ".", "Scan", "(", "scan_context", ",", "auto_recurse", "=", "self", ".", "_auto_recurse", ",", "scan_path_spec", "=", "scan_path_spec", ")", "if", "not", "scan_context", ".", "updated", ":", "break", "if", "not", "self", ".", "_auto_recurse", ":", "output_writer", ".", "WriteScanContext", "(", "scan_context", ",", "scan_step", "=", "scan_step", ")", "scan_step", "+=", "1", "# The source is a directory or file.", "if", "scan_context", ".", "source_type", "in", "[", "definitions", ".", "SOURCE_TYPE_DIRECTORY", ",", "definitions", ".", "SOURCE_TYPE_FILE", "]", ":", "break", "# The source scanner found a locked volume, e.g. an encrypted volume,", "# and we need a credential to unlock the volume.", "for", "locked_scan_node", "in", "scan_context", ".", "locked_scan_nodes", ":", "self", ".", "_PromptUserForEncryptedVolumeCredential", "(", "scan_context", ",", "locked_scan_node", ",", "output_writer", ")", "if", "not", "self", ".", "_auto_recurse", ":", "scan_node", "=", "scan_context", ".", "GetUnscannedScanNode", "(", ")", "if", "not", "scan_node", ":", "return", "scan_path_spec", "=", "scan_node", ".", "path_spec", "if", "self", ".", "_auto_recurse", ":", "output_writer", ".", "WriteScanContext", "(", "scan_context", ")" ]
Analyzes the source. Args: source_path (str): the source path. output_writer (StdoutWriter): the output writer. Raises: RuntimeError: if the source path does not exists, or if the source path is not a file or directory, or if the format of or within the source file is not supported.
[ "Analyzes", "the", "source", "." ]
python
train
32.019231
kevinpt/hdlparse
hdlparse/vhdl_parser.py
https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/vhdl_parser.py#L692-L700
def register_array_types_from_sources(self, source_files): '''Add array type definitions from a file list to internal registry Args: source_files (list of str): Files to parse for array definitions ''' for fname in source_files: if is_vhdl(fname): self._register_array_types(self.extract_objects(fname))
[ "def", "register_array_types_from_sources", "(", "self", ",", "source_files", ")", ":", "for", "fname", "in", "source_files", ":", "if", "is_vhdl", "(", "fname", ")", ":", "self", ".", "_register_array_types", "(", "self", ".", "extract_objects", "(", "fname", ")", ")" ]
Add array type definitions from a file list to internal registry Args: source_files (list of str): Files to parse for array definitions
[ "Add", "array", "type", "definitions", "from", "a", "file", "list", "to", "internal", "registry" ]
python
train
36.888889
klavinslab/coral
coral/seqio/_dna.py
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/seqio/_dna.py#L204-L265
def _seqfeature_to_coral(feature): '''Convert a Biopython SeqFeature to a coral.Feature. :param feature: Biopython SeqFeature :type feature: Bio.SeqFeature ''' # Some genomic sequences don't have a label attribute # TODO: handle genomic cases differently than others. Some features lack # a label but should still be incorporated somehow. qualifiers = feature.qualifiers if 'label' in qualifiers: feature_name = qualifiers['label'][0] elif 'locus_tag' in qualifiers: feature_name = qualifiers['locus_tag'][0] else: raise FeatureNameError('Unrecognized feature name') # Features with gaps are special, require looking at subfeatures # Assumption: subfeatures are never more than one level deep if feature.location_operator == 'join': # Feature has gaps. Have to figure out start/stop from subfeatures, # calculate gap indices. A nested feature model may be required # eventually. # Reorder the sub_feature list by start location # Assumption: none of the subfeatures overlap so the last entry in # the reordered list also has the final stop point of the feature. # FIXME: Getting a deprecation warning about using sub_features # instead of feature.location being a CompoundFeatureLocation reordered = sorted(feature.location.parts, key=lambda location: location.start) starts = [int(location.start) for location in reordered] stops = [int(location.end) for location in reordered] feature_start = starts.pop(0) feature_stop = stops.pop(-1) starts = [start - feature_start for start in starts] stops = [stop - feature_start for stop in stops] feature_gaps = list(zip(stops, starts)) else: # Feature doesn't have gaps. Ignore subfeatures. feature_start = int(feature.location.start) feature_stop = int(feature.location.end) feature_gaps = [] feature_type = _process_feature_type(feature.type) if feature.location.strand == -1: feature_strand = 1 else: feature_strand = 0 if 'gene' in qualifiers: gene = qualifiers['gene'] else: gene = [] if 'locus_tag' in qualifiers: locus_tag = qualifiers['locus_tag'] else: locus_tag = [] coral_feature = coral.Feature(feature_name, feature_start, feature_stop, feature_type, gene=gene, locus_tag=locus_tag, qualifiers=qualifiers, strand=feature_strand, gaps=feature_gaps) return coral_feature
[ "def", "_seqfeature_to_coral", "(", "feature", ")", ":", "# Some genomic sequences don't have a label attribute", "# TODO: handle genomic cases differently than others. Some features lack", "# a label but should still be incorporated somehow.", "qualifiers", "=", "feature", ".", "qualifiers", "if", "'label'", "in", "qualifiers", ":", "feature_name", "=", "qualifiers", "[", "'label'", "]", "[", "0", "]", "elif", "'locus_tag'", "in", "qualifiers", ":", "feature_name", "=", "qualifiers", "[", "'locus_tag'", "]", "[", "0", "]", "else", ":", "raise", "FeatureNameError", "(", "'Unrecognized feature name'", ")", "# Features with gaps are special, require looking at subfeatures", "# Assumption: subfeatures are never more than one level deep", "if", "feature", ".", "location_operator", "==", "'join'", ":", "# Feature has gaps. Have to figure out start/stop from subfeatures,", "# calculate gap indices. A nested feature model may be required", "# eventually.", "# Reorder the sub_feature list by start location", "# Assumption: none of the subfeatures overlap so the last entry in", "# the reordered list also has the final stop point of the feature.", "# FIXME: Getting a deprecation warning about using sub_features", "# instead of feature.location being a CompoundFeatureLocation", "reordered", "=", "sorted", "(", "feature", ".", "location", ".", "parts", ",", "key", "=", "lambda", "location", ":", "location", ".", "start", ")", "starts", "=", "[", "int", "(", "location", ".", "start", ")", "for", "location", "in", "reordered", "]", "stops", "=", "[", "int", "(", "location", ".", "end", ")", "for", "location", "in", "reordered", "]", "feature_start", "=", "starts", ".", "pop", "(", "0", ")", "feature_stop", "=", "stops", ".", "pop", "(", "-", "1", ")", "starts", "=", "[", "start", "-", "feature_start", "for", "start", "in", "starts", "]", "stops", "=", "[", "stop", "-", "feature_start", "for", "stop", "in", "stops", "]", "feature_gaps", "=", "list", "(", "zip", "(", "stops", ",", "starts", ")", ")", "else", ":", "# Feature doesn't have gaps. Ignore subfeatures.", "feature_start", "=", "int", "(", "feature", ".", "location", ".", "start", ")", "feature_stop", "=", "int", "(", "feature", ".", "location", ".", "end", ")", "feature_gaps", "=", "[", "]", "feature_type", "=", "_process_feature_type", "(", "feature", ".", "type", ")", "if", "feature", ".", "location", ".", "strand", "==", "-", "1", ":", "feature_strand", "=", "1", "else", ":", "feature_strand", "=", "0", "if", "'gene'", "in", "qualifiers", ":", "gene", "=", "qualifiers", "[", "'gene'", "]", "else", ":", "gene", "=", "[", "]", "if", "'locus_tag'", "in", "qualifiers", ":", "locus_tag", "=", "qualifiers", "[", "'locus_tag'", "]", "else", ":", "locus_tag", "=", "[", "]", "coral_feature", "=", "coral", ".", "Feature", "(", "feature_name", ",", "feature_start", ",", "feature_stop", ",", "feature_type", ",", "gene", "=", "gene", ",", "locus_tag", "=", "locus_tag", ",", "qualifiers", "=", "qualifiers", ",", "strand", "=", "feature_strand", ",", "gaps", "=", "feature_gaps", ")", "return", "coral_feature" ]
Convert a Biopython SeqFeature to a coral.Feature. :param feature: Biopython SeqFeature :type feature: Bio.SeqFeature
[ "Convert", "a", "Biopython", "SeqFeature", "to", "a", "coral", ".", "Feature", "." ]
python
train
43.403226
vlukes/dicom2fem
dicom2fem/base.py
https://github.com/vlukes/dicom2fem/blob/3056c977ca7119e01984d3aa0c4448a1c6c2430f/dicom2fem/base.py#L34-L95
def get_debug(): """ Utility function providing ``debug()`` function. """ try: import IPython except ImportError: debug = None else: old_excepthook = sys.excepthook def debug(frame=None): if IPython.__version__ >= '0.11': from IPython.core.debugger import Pdb try: ip = get_ipython() except NameError: from IPython.frontend.terminal.embed \ import InteractiveShellEmbed ip = InteractiveShellEmbed() colors = ip.colors else: from IPython.Debugger import Pdb from IPython.Shell import IPShell from IPython import ipapi ip = ipapi.get() if ip is None: IPShell(argv=['']) ip = ipapi.get() colors = ip.options.colors sys.excepthook = old_excepthook if frame is None: frame = sys._getframe().f_back Pdb(colors).set_trace(frame) if debug is None: import pdb debug = pdb.set_trace debug.__doc__ = """ Start debugger on line where it is called, roughly equivalent to:: import pdb; pdb.set_trace() First, this function tries to start an `IPython`-enabled debugger using the `IPython` API. When this fails, the plain old `pdb` is used instead. """ return debug
[ "def", "get_debug", "(", ")", ":", "try", ":", "import", "IPython", "except", "ImportError", ":", "debug", "=", "None", "else", ":", "old_excepthook", "=", "sys", ".", "excepthook", "def", "debug", "(", "frame", "=", "None", ")", ":", "if", "IPython", ".", "__version__", ">=", "'0.11'", ":", "from", "IPython", ".", "core", ".", "debugger", "import", "Pdb", "try", ":", "ip", "=", "get_ipython", "(", ")", "except", "NameError", ":", "from", "IPython", ".", "frontend", ".", "terminal", ".", "embed", "import", "InteractiveShellEmbed", "ip", "=", "InteractiveShellEmbed", "(", ")", "colors", "=", "ip", ".", "colors", "else", ":", "from", "IPython", ".", "Debugger", "import", "Pdb", "from", "IPython", ".", "Shell", "import", "IPShell", "from", "IPython", "import", "ipapi", "ip", "=", "ipapi", ".", "get", "(", ")", "if", "ip", "is", "None", ":", "IPShell", "(", "argv", "=", "[", "''", "]", ")", "ip", "=", "ipapi", ".", "get", "(", ")", "colors", "=", "ip", ".", "options", ".", "colors", "sys", ".", "excepthook", "=", "old_excepthook", "if", "frame", "is", "None", ":", "frame", "=", "sys", ".", "_getframe", "(", ")", ".", "f_back", "Pdb", "(", "colors", ")", ".", "set_trace", "(", "frame", ")", "if", "debug", "is", "None", ":", "import", "pdb", "debug", "=", "pdb", ".", "set_trace", "debug", ".", "__doc__", "=", "\"\"\"\n Start debugger on line where it is called, roughly equivalent to::\n\n import pdb; pdb.set_trace()\n\n First, this function tries to start an `IPython`-enabled\n debugger using the `IPython` API.\n\n When this fails, the plain old `pdb` is used instead.\n \"\"\"", "return", "debug" ]
Utility function providing ``debug()`` function.
[ "Utility", "function", "providing", "debug", "()", "function", "." ]
python
train
23.854839
iotile/coretools
iotileemulate/iotile/emulate/reference/controller_features/config_database.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/reference/controller_features/config_database.py#L109-L122
def compact(self): """Remove all invalid config entries.""" saved_length = 0 to_remove = [] for i, entry in enumerate(self.entries): if not entry.valid: to_remove.append(i) saved_length += entry.data_space() for i in reversed(to_remove): del self.entries[i] self.data_index -= saved_length
[ "def", "compact", "(", "self", ")", ":", "saved_length", "=", "0", "to_remove", "=", "[", "]", "for", "i", ",", "entry", "in", "enumerate", "(", "self", ".", "entries", ")", ":", "if", "not", "entry", ".", "valid", ":", "to_remove", ".", "append", "(", "i", ")", "saved_length", "+=", "entry", ".", "data_space", "(", ")", "for", "i", "in", "reversed", "(", "to_remove", ")", ":", "del", "self", ".", "entries", "[", "i", "]", "self", ".", "data_index", "-=", "saved_length" ]
Remove all invalid config entries.
[ "Remove", "all", "invalid", "config", "entries", "." ]
python
train
27.357143
modin-project/modin
modin/backends/pandas/query_compiler.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L181-L209
def numeric_function_clean_dataframe(self, axis): """Preprocesses numeric functions to clean dataframe and pick numeric indices. Args: axis: '0' if columns and '1' if rows. Returns: Tuple with return value(if any), indices to apply func to & cleaned Manager. """ result = None query_compiler = self # If no numeric columns and over columns, then return empty Series if not axis and len(self.index) == 0: result = pandas.Series(dtype=np.int64) nonnumeric = [ col for col, dtype in zip(self.columns, self.dtypes) if not is_numeric_dtype(dtype) ] if len(nonnumeric) == len(self.columns): # If over rows and no numeric columns, return this if axis: result = pandas.Series([np.nan for _ in self.index]) else: result = pandas.Series([0 for _ in self.index]) else: query_compiler = self.drop(columns=nonnumeric) return result, query_compiler
[ "def", "numeric_function_clean_dataframe", "(", "self", ",", "axis", ")", ":", "result", "=", "None", "query_compiler", "=", "self", "# If no numeric columns and over columns, then return empty Series", "if", "not", "axis", "and", "len", "(", "self", ".", "index", ")", "==", "0", ":", "result", "=", "pandas", ".", "Series", "(", "dtype", "=", "np", ".", "int64", ")", "nonnumeric", "=", "[", "col", "for", "col", ",", "dtype", "in", "zip", "(", "self", ".", "columns", ",", "self", ".", "dtypes", ")", "if", "not", "is_numeric_dtype", "(", "dtype", ")", "]", "if", "len", "(", "nonnumeric", ")", "==", "len", "(", "self", ".", "columns", ")", ":", "# If over rows and no numeric columns, return this", "if", "axis", ":", "result", "=", "pandas", ".", "Series", "(", "[", "np", ".", "nan", "for", "_", "in", "self", ".", "index", "]", ")", "else", ":", "result", "=", "pandas", ".", "Series", "(", "[", "0", "for", "_", "in", "self", ".", "index", "]", ")", "else", ":", "query_compiler", "=", "self", ".", "drop", "(", "columns", "=", "nonnumeric", ")", "return", "result", ",", "query_compiler" ]
Preprocesses numeric functions to clean dataframe and pick numeric indices. Args: axis: '0' if columns and '1' if rows. Returns: Tuple with return value(if any), indices to apply func to & cleaned Manager.
[ "Preprocesses", "numeric", "functions", "to", "clean", "dataframe", "and", "pick", "numeric", "indices", "." ]
python
train
36.724138
rocky/python-xdis
xdis/opcodes/base.py
https://github.com/rocky/python-xdis/blob/46a2902ae8f5d8eee495eed67ac0690fd545453d/xdis/opcodes/base.py#L120-L156
def rm_op(l, name, op): """Remove an opcode. This is used when basing a new Python release off of another one, and there is an opcode that is in the old release that was removed in the new release. We are pretty aggressive about removing traces of the op. """ # opname is an array, so we need to keep the position in there. l['opname'][op] = '<%s>' % op if op in l['hasconst']: l['hasconst'].remove(op) if op in l['hascompare']: l['hascompare'].remove(op) if op in l['hascondition']: l['hascondition'].remove(op) if op in l['hasfree']: l['hasfree'].remove(op) if op in l['hasjabs']: l['hasjabs'].remove(op) if op in l['hasname']: l['hasname'].remove(op) if op in l['hasjrel']: l['hasjrel'].remove(op) if op in l['haslocal']: l['haslocal'].remove(op) if op in l['hasname']: l['hasname'].remove(op) if op in l['hasnargs']: l['hasnargs'].remove(op) if op in l['hasvargs']: l['hasvargs'].remove(op) if op in l['nofollow']: l['nofollow'].remove(op) assert l['opmap'][name] == op del l['opmap'][name]
[ "def", "rm_op", "(", "l", ",", "name", ",", "op", ")", ":", "# opname is an array, so we need to keep the position in there.", "l", "[", "'opname'", "]", "[", "op", "]", "=", "'<%s>'", "%", "op", "if", "op", "in", "l", "[", "'hasconst'", "]", ":", "l", "[", "'hasconst'", "]", ".", "remove", "(", "op", ")", "if", "op", "in", "l", "[", "'hascompare'", "]", ":", "l", "[", "'hascompare'", "]", ".", "remove", "(", "op", ")", "if", "op", "in", "l", "[", "'hascondition'", "]", ":", "l", "[", "'hascondition'", "]", ".", "remove", "(", "op", ")", "if", "op", "in", "l", "[", "'hasfree'", "]", ":", "l", "[", "'hasfree'", "]", ".", "remove", "(", "op", ")", "if", "op", "in", "l", "[", "'hasjabs'", "]", ":", "l", "[", "'hasjabs'", "]", ".", "remove", "(", "op", ")", "if", "op", "in", "l", "[", "'hasname'", "]", ":", "l", "[", "'hasname'", "]", ".", "remove", "(", "op", ")", "if", "op", "in", "l", "[", "'hasjrel'", "]", ":", "l", "[", "'hasjrel'", "]", ".", "remove", "(", "op", ")", "if", "op", "in", "l", "[", "'haslocal'", "]", ":", "l", "[", "'haslocal'", "]", ".", "remove", "(", "op", ")", "if", "op", "in", "l", "[", "'hasname'", "]", ":", "l", "[", "'hasname'", "]", ".", "remove", "(", "op", ")", "if", "op", "in", "l", "[", "'hasnargs'", "]", ":", "l", "[", "'hasnargs'", "]", ".", "remove", "(", "op", ")", "if", "op", "in", "l", "[", "'hasvargs'", "]", ":", "l", "[", "'hasvargs'", "]", ".", "remove", "(", "op", ")", "if", "op", "in", "l", "[", "'nofollow'", "]", ":", "l", "[", "'nofollow'", "]", ".", "remove", "(", "op", ")", "assert", "l", "[", "'opmap'", "]", "[", "name", "]", "==", "op", "del", "l", "[", "'opmap'", "]", "[", "name", "]" ]
Remove an opcode. This is used when basing a new Python release off of another one, and there is an opcode that is in the old release that was removed in the new release. We are pretty aggressive about removing traces of the op.
[ "Remove", "an", "opcode", ".", "This", "is", "used", "when", "basing", "a", "new", "Python", "release", "off", "of", "another", "one", "and", "there", "is", "an", "opcode", "that", "is", "in", "the", "old", "release", "that", "was", "removed", "in", "the", "new", "release", ".", "We", "are", "pretty", "aggressive", "about", "removing", "traces", "of", "the", "op", "." ]
python
train
30.513514
phaethon/kamene
kamene/layers/bluetooth.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/layers/bluetooth.py#L197-L202
def srbt(peer, pkts, inter=0.1, *args, **kargs): """send and receive using a bluetooth socket""" s = conf.BTsocket(peer=peer) a,b = sndrcv(s,pkts,inter=inter,*args,**kargs) s.close() return a,b
[ "def", "srbt", "(", "peer", ",", "pkts", ",", "inter", "=", "0.1", ",", "*", "args", ",", "*", "*", "kargs", ")", ":", "s", "=", "conf", ".", "BTsocket", "(", "peer", "=", "peer", ")", "a", ",", "b", "=", "sndrcv", "(", "s", ",", "pkts", ",", "inter", "=", "inter", ",", "*", "args", ",", "*", "*", "kargs", ")", "s", ".", "close", "(", ")", "return", "a", ",", "b" ]
send and receive using a bluetooth socket
[ "send", "and", "receive", "using", "a", "bluetooth", "socket" ]
python
train
34.666667
ga4gh/ga4gh-server
oidc-provider/simple_op/src/provider/server/server.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/oidc-provider/simple_op/src/provider/server/server.py#L70-L83
def pyoidcMiddleware(func): """Common wrapper for the underlying pyoidc library functions. Reads GET params and POST data before passing it on the library and converts the response from oic.utils.http_util to wsgi. :param func: underlying library function """ def wrapper(environ, start_response): data = get_or_post(environ) cookies = environ.get("HTTP_COOKIE", "") resp = func(request=data, cookie=cookies) return resp(environ, start_response) return wrapper
[ "def", "pyoidcMiddleware", "(", "func", ")", ":", "def", "wrapper", "(", "environ", ",", "start_response", ")", ":", "data", "=", "get_or_post", "(", "environ", ")", "cookies", "=", "environ", ".", "get", "(", "\"HTTP_COOKIE\"", ",", "\"\"", ")", "resp", "=", "func", "(", "request", "=", "data", ",", "cookie", "=", "cookies", ")", "return", "resp", "(", "environ", ",", "start_response", ")", "return", "wrapper" ]
Common wrapper for the underlying pyoidc library functions. Reads GET params and POST data before passing it on the library and converts the response from oic.utils.http_util to wsgi. :param func: underlying library function
[ "Common", "wrapper", "for", "the", "underlying", "pyoidc", "library", "functions", ".", "Reads", "GET", "params", "and", "POST", "data", "before", "passing", "it", "on", "the", "library", "and", "converts", "the", "response", "from", "oic", ".", "utils", ".", "http_util", "to", "wsgi", ".", ":", "param", "func", ":", "underlying", "library", "function" ]
python
train
36.357143
myint/autoflake
autoflake.py
https://github.com/myint/autoflake/blob/68fea68646922b920d55975f9f2adaeafd84df4f/autoflake.py#L577-L583
def get_indentation(line): """Return leading whitespace.""" if line.strip(): non_whitespace_index = len(line) - len(line.lstrip()) return line[:non_whitespace_index] else: return ''
[ "def", "get_indentation", "(", "line", ")", ":", "if", "line", ".", "strip", "(", ")", ":", "non_whitespace_index", "=", "len", "(", "line", ")", "-", "len", "(", "line", ".", "lstrip", "(", ")", ")", "return", "line", "[", ":", "non_whitespace_index", "]", "else", ":", "return", "''" ]
Return leading whitespace.
[ "Return", "leading", "whitespace", "." ]
python
test
30.142857
src-d/modelforge
modelforge/registry.py
https://github.com/src-d/modelforge/blob/4f73c2bf0318261ac01bc8b6c0d4250a5d303418/modelforge/registry.py#L17-L37
def initialize_registry(args: argparse.Namespace, backend: StorageBackend, log: logging.Logger): """ Initialize the registry and the index. :param args: :class:`argparse.Namespace` with "backend", "args", "force" and "log_level". :param backend: Backend which is responsible for working with model files. :param log: Logger supplied by supply_backend :return: None """ try: backend.reset(args.force) except ExistingBackendError: return 1 log.info("Resetting the index ...") backend.index.reset() try: backend.index.upload("reset", {}) except ValueError: return 1 log.info("Successfully initialized")
[ "def", "initialize_registry", "(", "args", ":", "argparse", ".", "Namespace", ",", "backend", ":", "StorageBackend", ",", "log", ":", "logging", ".", "Logger", ")", ":", "try", ":", "backend", ".", "reset", "(", "args", ".", "force", ")", "except", "ExistingBackendError", ":", "return", "1", "log", ".", "info", "(", "\"Resetting the index ...\"", ")", "backend", ".", "index", ".", "reset", "(", ")", "try", ":", "backend", ".", "index", ".", "upload", "(", "\"reset\"", ",", "{", "}", ")", "except", "ValueError", ":", "return", "1", "log", ".", "info", "(", "\"Successfully initialized\"", ")" ]
Initialize the registry and the index. :param args: :class:`argparse.Namespace` with "backend", "args", "force" and "log_level". :param backend: Backend which is responsible for working with model files. :param log: Logger supplied by supply_backend :return: None
[ "Initialize", "the", "registry", "and", "the", "index", "." ]
python
train
31.857143
Phylliade/ikpy
src/ikpy/chain.py
https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/chain.py#L48-L83
def forward_kinematics(self, joints, full_kinematics=False): """Returns the transformation matrix of the forward kinematics Parameters ---------- joints: list The list of the positions of each joint. Note : Inactive joints must be in the list. full_kinematics: bool Return the transformation matrices of each joint Returns ------- frame_matrix: The transformation matrix """ frame_matrix = np.eye(4) if full_kinematics: frame_matrixes = [] if len(self.links) != len(joints): raise ValueError("Your joints vector length is {} but you have {} links".format(len(joints), len(self.links))) for index, (link, joint_angle) in enumerate(zip(self.links, joints)): # Compute iteratively the position # NB : Use asarray to avoid old sympy problems frame_matrix = np.dot(frame_matrix, np.asarray(link.get_transformation_matrix(joint_angle))) if full_kinematics: # rotation_axe = np.dot(frame_matrix, link.rotation) frame_matrixes.append(frame_matrix) # Return the matrix, or matrixes if full_kinematics: return frame_matrixes else: return frame_matrix
[ "def", "forward_kinematics", "(", "self", ",", "joints", ",", "full_kinematics", "=", "False", ")", ":", "frame_matrix", "=", "np", ".", "eye", "(", "4", ")", "if", "full_kinematics", ":", "frame_matrixes", "=", "[", "]", "if", "len", "(", "self", ".", "links", ")", "!=", "len", "(", "joints", ")", ":", "raise", "ValueError", "(", "\"Your joints vector length is {} but you have {} links\"", ".", "format", "(", "len", "(", "joints", ")", ",", "len", "(", "self", ".", "links", ")", ")", ")", "for", "index", ",", "(", "link", ",", "joint_angle", ")", "in", "enumerate", "(", "zip", "(", "self", ".", "links", ",", "joints", ")", ")", ":", "# Compute iteratively the position", "# NB : Use asarray to avoid old sympy problems", "frame_matrix", "=", "np", ".", "dot", "(", "frame_matrix", ",", "np", ".", "asarray", "(", "link", ".", "get_transformation_matrix", "(", "joint_angle", ")", ")", ")", "if", "full_kinematics", ":", "# rotation_axe = np.dot(frame_matrix, link.rotation)", "frame_matrixes", ".", "append", "(", "frame_matrix", ")", "# Return the matrix, or matrixes", "if", "full_kinematics", ":", "return", "frame_matrixes", "else", ":", "return", "frame_matrix" ]
Returns the transformation matrix of the forward kinematics Parameters ---------- joints: list The list of the positions of each joint. Note : Inactive joints must be in the list. full_kinematics: bool Return the transformation matrices of each joint Returns ------- frame_matrix: The transformation matrix
[ "Returns", "the", "transformation", "matrix", "of", "the", "forward", "kinematics" ]
python
train
36.194444
NASA-AMMOS/AIT-Core
ait/core/db.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/db.py#L467-L479
def _create_table(self, packet_defn): ''' Creates a database table for the given PacketDefinition Arguments packet_defn The :class:`ait.core.tlm.PacketDefinition` instance for which a table entry should be made. ''' cols = ('%s %s' % (defn.name, self._getTypename(defn)) for defn in packet_defn.fields) sql = 'CREATE TABLE IF NOT EXISTS %s (%s)' % (packet_defn.name, ', '.join(cols)) self._conn.execute(sql) self._conn.commit()
[ "def", "_create_table", "(", "self", ",", "packet_defn", ")", ":", "cols", "=", "(", "'%s %s'", "%", "(", "defn", ".", "name", ",", "self", ".", "_getTypename", "(", "defn", ")", ")", "for", "defn", "in", "packet_defn", ".", "fields", ")", "sql", "=", "'CREATE TABLE IF NOT EXISTS %s (%s)'", "%", "(", "packet_defn", ".", "name", ",", "', '", ".", "join", "(", "cols", ")", ")", "self", ".", "_conn", ".", "execute", "(", "sql", ")", "self", ".", "_conn", ".", "commit", "(", ")" ]
Creates a database table for the given PacketDefinition Arguments packet_defn The :class:`ait.core.tlm.PacketDefinition` instance for which a table entry should be made.
[ "Creates", "a", "database", "table", "for", "the", "given", "PacketDefinition" ]
python
train
39.846154
BD2KGenomics/toil-lib
src/toil_lib/spark.py
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/spark.py#L300-L317
def stop(self, fileStore): """ Stop spark and hdfs worker containers :param job: The underlying job. """ subprocess.call(["docker", "exec", self.sparkContainerID, "rm", "-r", "/ephemeral/spark"]) subprocess.call(["docker", "stop", self.sparkContainerID]) subprocess.call(["docker", "rm", self.sparkContainerID]) _log.info("Stopped Spark worker.") subprocess.call(["docker", "exec", self.hdfsContainerID, "rm", "-r", "/ephemeral/hdfs"]) subprocess.call(["docker", "stop", self.hdfsContainerID]) subprocess.call(["docker", "rm", self.hdfsContainerID]) _log.info("Stopped HDFS datanode.") return
[ "def", "stop", "(", "self", ",", "fileStore", ")", ":", "subprocess", ".", "call", "(", "[", "\"docker\"", ",", "\"exec\"", ",", "self", ".", "sparkContainerID", ",", "\"rm\"", ",", "\"-r\"", ",", "\"/ephemeral/spark\"", "]", ")", "subprocess", ".", "call", "(", "[", "\"docker\"", ",", "\"stop\"", ",", "self", ".", "sparkContainerID", "]", ")", "subprocess", ".", "call", "(", "[", "\"docker\"", ",", "\"rm\"", ",", "self", ".", "sparkContainerID", "]", ")", "_log", ".", "info", "(", "\"Stopped Spark worker.\"", ")", "subprocess", ".", "call", "(", "[", "\"docker\"", ",", "\"exec\"", ",", "self", ".", "hdfsContainerID", ",", "\"rm\"", ",", "\"-r\"", ",", "\"/ephemeral/hdfs\"", "]", ")", "subprocess", ".", "call", "(", "[", "\"docker\"", ",", "\"stop\"", ",", "self", ".", "hdfsContainerID", "]", ")", "subprocess", ".", "call", "(", "[", "\"docker\"", ",", "\"rm\"", ",", "self", ".", "hdfsContainerID", "]", ")", "_log", ".", "info", "(", "\"Stopped HDFS datanode.\"", ")", "return" ]
Stop spark and hdfs worker containers :param job: The underlying job.
[ "Stop", "spark", "and", "hdfs", "worker", "containers" ]
python
test
37.944444
wavycloud/pyboto3
pyboto3/appstream.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/appstream.py#L74-L181
def create_fleet(Name=None, ImageName=None, InstanceType=None, ComputeCapacity=None, VpcConfig=None, MaxUserDurationInSeconds=None, DisconnectTimeoutInSeconds=None, Description=None, DisplayName=None, EnableDefaultInternetAccess=None): """ Creates a new fleet. See also: AWS API Documentation :example: response = client.create_fleet( Name='string', ImageName='string', InstanceType='string', ComputeCapacity={ 'DesiredInstances': 123 }, VpcConfig={ 'SubnetIds': [ 'string', ] }, MaxUserDurationInSeconds=123, DisconnectTimeoutInSeconds=123, Description='string', DisplayName='string', EnableDefaultInternetAccess=True|False ) :type Name: string :param Name: [REQUIRED] A unique identifier for the fleet. :type ImageName: string :param ImageName: [REQUIRED] Unique name of the image used by the fleet. :type InstanceType: string :param InstanceType: [REQUIRED] The instance type of compute resources for the fleet. Fleet instances are launched from this instance type. :type ComputeCapacity: dict :param ComputeCapacity: [REQUIRED] The parameters for the capacity allocated to the fleet. DesiredInstances (integer) -- [REQUIRED]The desired number of streaming instances. :type VpcConfig: dict :param VpcConfig: The VPC configuration for the fleet. SubnetIds (list) --The list of subnets to which a network interface is established from the fleet instance. (string) -- :type MaxUserDurationInSeconds: integer :param MaxUserDurationInSeconds: The maximum time for which a streaming session can run. The input can be any numeric value in seconds between 600 and 57600. :type DisconnectTimeoutInSeconds: integer :param DisconnectTimeoutInSeconds: The time after disconnection when a session is considered to have ended. If a user who got disconnected reconnects within this timeout interval, the user is connected back to their previous session. The input can be any numeric value in seconds between 60 and 57600. :type Description: string :param Description: The description of the fleet. :type DisplayName: string :param DisplayName: The display name of the fleet. :type EnableDefaultInternetAccess: boolean :param EnableDefaultInternetAccess: Enables or disables default Internet access for the fleet. :rtype: dict :return: { 'Fleet': { 'Arn': 'string', 'Name': 'string', 'DisplayName': 'string', 'Description': 'string', 'ImageName': 'string', 'InstanceType': 'string', 'ComputeCapacityStatus': { 'Desired': 123, 'Running': 123, 'InUse': 123, 'Available': 123 }, 'MaxUserDurationInSeconds': 123, 'DisconnectTimeoutInSeconds': 123, 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED', 'VpcConfig': { 'SubnetIds': [ 'string', ] }, 'CreatedTime': datetime(2015, 1, 1), 'FleetErrors': [ { 'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION', 'ErrorMessage': 'string' }, ], 'EnableDefaultInternetAccess': True|False } } :returns: (string) -- """ pass
[ "def", "create_fleet", "(", "Name", "=", "None", ",", "ImageName", "=", "None", ",", "InstanceType", "=", "None", ",", "ComputeCapacity", "=", "None", ",", "VpcConfig", "=", "None", ",", "MaxUserDurationInSeconds", "=", "None", ",", "DisconnectTimeoutInSeconds", "=", "None", ",", "Description", "=", "None", ",", "DisplayName", "=", "None", ",", "EnableDefaultInternetAccess", "=", "None", ")", ":", "pass" ]
Creates a new fleet. See also: AWS API Documentation :example: response = client.create_fleet( Name='string', ImageName='string', InstanceType='string', ComputeCapacity={ 'DesiredInstances': 123 }, VpcConfig={ 'SubnetIds': [ 'string', ] }, MaxUserDurationInSeconds=123, DisconnectTimeoutInSeconds=123, Description='string', DisplayName='string', EnableDefaultInternetAccess=True|False ) :type Name: string :param Name: [REQUIRED] A unique identifier for the fleet. :type ImageName: string :param ImageName: [REQUIRED] Unique name of the image used by the fleet. :type InstanceType: string :param InstanceType: [REQUIRED] The instance type of compute resources for the fleet. Fleet instances are launched from this instance type. :type ComputeCapacity: dict :param ComputeCapacity: [REQUIRED] The parameters for the capacity allocated to the fleet. DesiredInstances (integer) -- [REQUIRED]The desired number of streaming instances. :type VpcConfig: dict :param VpcConfig: The VPC configuration for the fleet. SubnetIds (list) --The list of subnets to which a network interface is established from the fleet instance. (string) -- :type MaxUserDurationInSeconds: integer :param MaxUserDurationInSeconds: The maximum time for which a streaming session can run. The input can be any numeric value in seconds between 600 and 57600. :type DisconnectTimeoutInSeconds: integer :param DisconnectTimeoutInSeconds: The time after disconnection when a session is considered to have ended. If a user who got disconnected reconnects within this timeout interval, the user is connected back to their previous session. The input can be any numeric value in seconds between 60 and 57600. :type Description: string :param Description: The description of the fleet. :type DisplayName: string :param DisplayName: The display name of the fleet. :type EnableDefaultInternetAccess: boolean :param EnableDefaultInternetAccess: Enables or disables default Internet access for the fleet. :rtype: dict :return: { 'Fleet': { 'Arn': 'string', 'Name': 'string', 'DisplayName': 'string', 'Description': 'string', 'ImageName': 'string', 'InstanceType': 'string', 'ComputeCapacityStatus': { 'Desired': 123, 'Running': 123, 'InUse': 123, 'Available': 123 }, 'MaxUserDurationInSeconds': 123, 'DisconnectTimeoutInSeconds': 123, 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED', 'VpcConfig': { 'SubnetIds': [ 'string', ] }, 'CreatedTime': datetime(2015, 1, 1), 'FleetErrors': [ { 'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION', 'ErrorMessage': 'string' }, ], 'EnableDefaultInternetAccess': True|False } } :returns: (string) --
[ "Creates", "a", "new", "fleet", ".", "See", "also", ":", "AWS", "API", "Documentation", ":", "example", ":", "response", "=", "client", ".", "create_fleet", "(", "Name", "=", "string", "ImageName", "=", "string", "InstanceType", "=", "string", "ComputeCapacity", "=", "{", "DesiredInstances", ":", "123", "}", "VpcConfig", "=", "{", "SubnetIds", ":", "[", "string", "]", "}", "MaxUserDurationInSeconds", "=", "123", "DisconnectTimeoutInSeconds", "=", "123", "Description", "=", "string", "DisplayName", "=", "string", "EnableDefaultInternetAccess", "=", "True|False", ")", ":", "type", "Name", ":", "string", ":", "param", "Name", ":", "[", "REQUIRED", "]", "A", "unique", "identifier", "for", "the", "fleet", "." ]
python
train
36.675926
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/tools/common.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/common.py#L404-L422
def check_tool_aux(command): """ Checks if 'command' can be found either in path or is a full name to an existing file. """ assert isinstance(command, basestring) dirname = os.path.dirname(command) if dirname: if os.path.exists(command): return command # Both NT and Cygwin will run .exe files by their unqualified names. elif on_windows() and os.path.exists(command + '.exe'): return command # Only NT will run .bat files by their unqualified names. elif os_name() == 'NT' and os.path.exists(command + '.bat'): return command else: paths = path.programs_path() if path.glob(paths, [command]): return command
[ "def", "check_tool_aux", "(", "command", ")", ":", "assert", "isinstance", "(", "command", ",", "basestring", ")", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "command", ")", "if", "dirname", ":", "if", "os", ".", "path", ".", "exists", "(", "command", ")", ":", "return", "command", "# Both NT and Cygwin will run .exe files by their unqualified names.", "elif", "on_windows", "(", ")", "and", "os", ".", "path", ".", "exists", "(", "command", "+", "'.exe'", ")", ":", "return", "command", "# Only NT will run .bat files by their unqualified names.", "elif", "os_name", "(", ")", "==", "'NT'", "and", "os", ".", "path", ".", "exists", "(", "command", "+", "'.bat'", ")", ":", "return", "command", "else", ":", "paths", "=", "path", ".", "programs_path", "(", ")", "if", "path", ".", "glob", "(", "paths", ",", "[", "command", "]", ")", ":", "return", "command" ]
Checks if 'command' can be found either in path or is a full name to an existing file.
[ "Checks", "if", "command", "can", "be", "found", "either", "in", "path", "or", "is", "a", "full", "name", "to", "an", "existing", "file", "." ]
python
train
38.210526
paramiko/paramiko
paramiko/sftp_file.py
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/sftp_file.py#L565-L570
def _check_exception(self): """if there's a saved exception, raise & clear it""" if self._saved_exception is not None: x = self._saved_exception self._saved_exception = None raise x
[ "def", "_check_exception", "(", "self", ")", ":", "if", "self", ".", "_saved_exception", "is", "not", "None", ":", "x", "=", "self", ".", "_saved_exception", "self", ".", "_saved_exception", "=", "None", "raise", "x" ]
if there's a saved exception, raise & clear it
[ "if", "there", "s", "a", "saved", "exception", "raise", "&", "clear", "it" ]
python
train
38
scanny/python-pptx
pptx/opc/pkgreader.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/opc/pkgreader.py#L76-L83
def _srels_for(phys_reader, source_uri): """ Return |_SerializedRelationshipCollection| instance populated with relationships for source identified by *source_uri*. """ rels_xml = phys_reader.rels_xml_for(source_uri) return _SerializedRelationshipCollection.load_from_xml( source_uri.baseURI, rels_xml)
[ "def", "_srels_for", "(", "phys_reader", ",", "source_uri", ")", ":", "rels_xml", "=", "phys_reader", ".", "rels_xml_for", "(", "source_uri", ")", "return", "_SerializedRelationshipCollection", ".", "load_from_xml", "(", "source_uri", ".", "baseURI", ",", "rels_xml", ")" ]
Return |_SerializedRelationshipCollection| instance populated with relationships for source identified by *source_uri*.
[ "Return", "|_SerializedRelationshipCollection|", "instance", "populated", "with", "relationships", "for", "source", "identified", "by", "*", "source_uri", "*", "." ]
python
train
44.375
denniskempin/safetynet
safetynet.py
https://github.com/denniskempin/safetynet/blob/fbcc4a112370fc20696f003d901114b4fe26d984/safetynet.py#L459-L470
def _TypecheckDecorator(subject=None, **kwargs): """Dispatches type checks based on what the subject is. Functions or methods are annotated directly. If this method is called with keyword arguments only, return a decorator. """ if subject is None: return _TypecheckDecoratorFactory(kwargs) elif inspect.isfunction(subject) or inspect.ismethod(subject): return _TypecheckFunction(subject, {}, 2, None) else: raise TypeError()
[ "def", "_TypecheckDecorator", "(", "subject", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "subject", "is", "None", ":", "return", "_TypecheckDecoratorFactory", "(", "kwargs", ")", "elif", "inspect", ".", "isfunction", "(", "subject", ")", "or", "inspect", ".", "ismethod", "(", "subject", ")", ":", "return", "_TypecheckFunction", "(", "subject", ",", "{", "}", ",", "2", ",", "None", ")", "else", ":", "raise", "TypeError", "(", ")" ]
Dispatches type checks based on what the subject is. Functions or methods are annotated directly. If this method is called with keyword arguments only, return a decorator.
[ "Dispatches", "type", "checks", "based", "on", "what", "the", "subject", "is", "." ]
python
train
36.666667
pgxcentre/geneparse
geneparse/readers/dataframe.py
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/readers/dataframe.py#L70-L98
def get_variant_by_name(self, name): """Get the genotypes for a given variant (by name). Args: name (str): The name of the variant to retrieve the genotypes. Returns: list: A list of Genotypes. This is a list in order to keep the same behaviour as the other functions. """ try: geno = self.df.loc[:, name].values info = self.map_info.loc[name, :] except KeyError: # The variant is not in the data, so we return an empty # list logging.variant_name_not_found(name) return [] else: return [Genotypes( Variant(info.name, info.chrom, info.pos, [info.a1, info.a2]), geno, reference=info.a2, coded=info.a1, multiallelic=False, )]
[ "def", "get_variant_by_name", "(", "self", ",", "name", ")", ":", "try", ":", "geno", "=", "self", ".", "df", ".", "loc", "[", ":", ",", "name", "]", ".", "values", "info", "=", "self", ".", "map_info", ".", "loc", "[", "name", ",", ":", "]", "except", "KeyError", ":", "# The variant is not in the data, so we return an empty", "# list", "logging", ".", "variant_name_not_found", "(", "name", ")", "return", "[", "]", "else", ":", "return", "[", "Genotypes", "(", "Variant", "(", "info", ".", "name", ",", "info", ".", "chrom", ",", "info", ".", "pos", ",", "[", "info", ".", "a1", ",", "info", ".", "a2", "]", ")", ",", "geno", ",", "reference", "=", "info", ".", "a2", ",", "coded", "=", "info", ".", "a1", ",", "multiallelic", "=", "False", ",", ")", "]" ]
Get the genotypes for a given variant (by name). Args: name (str): The name of the variant to retrieve the genotypes. Returns: list: A list of Genotypes. This is a list in order to keep the same behaviour as the other functions.
[ "Get", "the", "genotypes", "for", "a", "given", "variant", "(", "by", "name", ")", "." ]
python
train
29.931034
annayqho/TheCannon
code/lamost/xcalib_5labels/cross_validation.py
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/xcalib_5labels/cross_validation.py#L26-L32
def group_data(): """ Load the reference data, and assign each object a random integer from 0 to 7. Save the IDs. """ tr_obj = np.load("%s/ref_id.npz" %direc_ref)['arr_0'] groups = np.random.randint(0, 8, size=len(tr_obj)) np.savez("ref_groups.npz", groups)
[ "def", "group_data", "(", ")", ":", "tr_obj", "=", "np", ".", "load", "(", "\"%s/ref_id.npz\"", "%", "direc_ref", ")", "[", "'arr_0'", "]", "groups", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "8", ",", "size", "=", "len", "(", "tr_obj", ")", ")", "np", ".", "savez", "(", "\"ref_groups.npz\"", ",", "groups", ")" ]
Load the reference data, and assign each object a random integer from 0 to 7. Save the IDs.
[ "Load", "the", "reference", "data", "and", "assign", "each", "object", "a", "random", "integer", "from", "0", "to", "7", ".", "Save", "the", "IDs", "." ]
python
train
38.857143
ray-project/ray
python/ray/tune/suggest/suggestion.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/suggest/suggestion.py#L73-L102
def _generate_trials(self, experiment_spec, output_path=""): """Generates trials with configurations from `_suggest`. Creates a trial_id that is passed into `_suggest`. Yields: Trial objects constructed according to `spec` """ if "run" not in experiment_spec: raise TuneError("Must specify `run` in {}".format(experiment_spec)) for _ in range(experiment_spec.get("num_samples", 1)): trial_id = Trial.generate_id() while True: suggested_config = self._suggest(trial_id) if suggested_config is None: yield None else: break spec = copy.deepcopy(experiment_spec) spec["config"] = merge_dicts(spec["config"], suggested_config) flattened_config = resolve_nested_dict(spec["config"]) self._counter += 1 tag = "{0}_{1}".format( str(self._counter), format_vars(flattened_config)) yield create_trial_from_spec( spec, output_path, self._parser, experiment_tag=tag, trial_id=trial_id)
[ "def", "_generate_trials", "(", "self", ",", "experiment_spec", ",", "output_path", "=", "\"\"", ")", ":", "if", "\"run\"", "not", "in", "experiment_spec", ":", "raise", "TuneError", "(", "\"Must specify `run` in {}\"", ".", "format", "(", "experiment_spec", ")", ")", "for", "_", "in", "range", "(", "experiment_spec", ".", "get", "(", "\"num_samples\"", ",", "1", ")", ")", ":", "trial_id", "=", "Trial", ".", "generate_id", "(", ")", "while", "True", ":", "suggested_config", "=", "self", ".", "_suggest", "(", "trial_id", ")", "if", "suggested_config", "is", "None", ":", "yield", "None", "else", ":", "break", "spec", "=", "copy", ".", "deepcopy", "(", "experiment_spec", ")", "spec", "[", "\"config\"", "]", "=", "merge_dicts", "(", "spec", "[", "\"config\"", "]", ",", "suggested_config", ")", "flattened_config", "=", "resolve_nested_dict", "(", "spec", "[", "\"config\"", "]", ")", "self", ".", "_counter", "+=", "1", "tag", "=", "\"{0}_{1}\"", ".", "format", "(", "str", "(", "self", ".", "_counter", ")", ",", "format_vars", "(", "flattened_config", ")", ")", "yield", "create_trial_from_spec", "(", "spec", ",", "output_path", ",", "self", ".", "_parser", ",", "experiment_tag", "=", "tag", ",", "trial_id", "=", "trial_id", ")" ]
Generates trials with configurations from `_suggest`. Creates a trial_id that is passed into `_suggest`. Yields: Trial objects constructed according to `spec`
[ "Generates", "trials", "with", "configurations", "from", "_suggest", "." ]
python
train
39.9
flatangle/flatlib
flatlib/chart.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/chart.py#L105-L108
def getFixedStars(self): """ Returns a list with all fixed stars. """ IDs = const.LIST_FIXED_STARS return ephem.getFixedStarList(IDs, self.date)
[ "def", "getFixedStars", "(", "self", ")", ":", "IDs", "=", "const", ".", "LIST_FIXED_STARS", "return", "ephem", ".", "getFixedStarList", "(", "IDs", ",", "self", ".", "date", ")" ]
Returns a list with all fixed stars.
[ "Returns", "a", "list", "with", "all", "fixed", "stars", "." ]
python
train
41.25
rocky/python-uncompyle6
uncompyle6/main.py
https://github.com/rocky/python-uncompyle6/blob/c5d7944e657f0ad05a0e2edd34e1acb27001abc0/uncompyle6/main.py#L47-L120
def decompile( bytecode_version, co, out=None, showasm=None, showast=False, timestamp=None, showgrammar=False, code_objects={}, source_size=None, is_pypy=None, magic_int=None, mapstream=None, do_fragments=False): """ ingests and deparses a given code block 'co' if `bytecode_version` is None, use the current Python intepreter version. Caller is responsible for closing `out` and `mapstream` """ if bytecode_version is None: bytecode_version = sysinfo2float() # store final output stream for case of error real_out = out or sys.stdout def write(s): s += '\n' real_out.write(s) assert iscode(co) co_pypy_str = 'PyPy ' if is_pypy else '' run_pypy_str = 'PyPy ' if IS_PYPY else '' sys_version_lines = sys.version.split('\n') write('# uncompyle6 version %s\n' '# %sPython bytecode %s%s\n# Decompiled from: %sPython %s' % (VERSION, co_pypy_str, bytecode_version, " (%s)" % str(magic_int) if magic_int else "", run_pypy_str, '\n# '.join(sys_version_lines))) if co.co_filename: write('# Embedded file name: %s' % co.co_filename,) if timestamp: write('# Compiled at: %s' % datetime.datetime.fromtimestamp(timestamp)) if source_size: write('# Size of source mod 2**32: %d bytes' % source_size) debug_opts = { 'asm': showasm, 'ast': showast, 'grammar': showgrammar } try: if mapstream: if isinstance(mapstream, str): mapstream = _get_outstream(mapstream) deparsed = deparse_code_with_map(bytecode_version, co, out, showasm, showast, showgrammar, code_objects = code_objects, is_pypy = is_pypy, ) header_count = 3+len(sys_version_lines) linemap = [(line_no, deparsed.source_linemap[line_no]+header_count) for line_no in sorted(deparsed.source_linemap.keys())] mapstream.write("\n\n# %s\n" % linemap) else: if do_fragments: deparse_fn = code_deparse_fragments else: deparse_fn = code_deparse deparsed = deparse_fn(co, out, bytecode_version, debug_opts = debug_opts, is_pypy=is_pypy) pass return deparsed except pysource.SourceWalkerError as e: # deparsing failed raise pysource.SourceWalkerError(str(e))
[ "def", "decompile", "(", "bytecode_version", ",", "co", ",", "out", "=", "None", ",", "showasm", "=", "None", ",", "showast", "=", "False", ",", "timestamp", "=", "None", ",", "showgrammar", "=", "False", ",", "code_objects", "=", "{", "}", ",", "source_size", "=", "None", ",", "is_pypy", "=", "None", ",", "magic_int", "=", "None", ",", "mapstream", "=", "None", ",", "do_fragments", "=", "False", ")", ":", "if", "bytecode_version", "is", "None", ":", "bytecode_version", "=", "sysinfo2float", "(", ")", "# store final output stream for case of error", "real_out", "=", "out", "or", "sys", ".", "stdout", "def", "write", "(", "s", ")", ":", "s", "+=", "'\\n'", "real_out", ".", "write", "(", "s", ")", "assert", "iscode", "(", "co", ")", "co_pypy_str", "=", "'PyPy '", "if", "is_pypy", "else", "''", "run_pypy_str", "=", "'PyPy '", "if", "IS_PYPY", "else", "''", "sys_version_lines", "=", "sys", ".", "version", ".", "split", "(", "'\\n'", ")", "write", "(", "'# uncompyle6 version %s\\n'", "'# %sPython bytecode %s%s\\n# Decompiled from: %sPython %s'", "%", "(", "VERSION", ",", "co_pypy_str", ",", "bytecode_version", ",", "\" (%s)\"", "%", "str", "(", "magic_int", ")", "if", "magic_int", "else", "\"\"", ",", "run_pypy_str", ",", "'\\n# '", ".", "join", "(", "sys_version_lines", ")", ")", ")", "if", "co", ".", "co_filename", ":", "write", "(", "'# Embedded file name: %s'", "%", "co", ".", "co_filename", ",", ")", "if", "timestamp", ":", "write", "(", "'# Compiled at: %s'", "%", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "timestamp", ")", ")", "if", "source_size", ":", "write", "(", "'# Size of source mod 2**32: %d bytes'", "%", "source_size", ")", "debug_opts", "=", "{", "'asm'", ":", "showasm", ",", "'ast'", ":", "showast", ",", "'grammar'", ":", "showgrammar", "}", "try", ":", "if", "mapstream", ":", "if", "isinstance", "(", "mapstream", ",", "str", ")", ":", "mapstream", "=", "_get_outstream", "(", "mapstream", ")", "deparsed", "=", "deparse_code_with_map", "(", "bytecode_version", ",", "co", ",", "out", ",", "showasm", ",", "showast", ",", "showgrammar", ",", "code_objects", "=", "code_objects", ",", "is_pypy", "=", "is_pypy", ",", ")", "header_count", "=", "3", "+", "len", "(", "sys_version_lines", ")", "linemap", "=", "[", "(", "line_no", ",", "deparsed", ".", "source_linemap", "[", "line_no", "]", "+", "header_count", ")", "for", "line_no", "in", "sorted", "(", "deparsed", ".", "source_linemap", ".", "keys", "(", ")", ")", "]", "mapstream", ".", "write", "(", "\"\\n\\n# %s\\n\"", "%", "linemap", ")", "else", ":", "if", "do_fragments", ":", "deparse_fn", "=", "code_deparse_fragments", "else", ":", "deparse_fn", "=", "code_deparse", "deparsed", "=", "deparse_fn", "(", "co", ",", "out", ",", "bytecode_version", ",", "debug_opts", "=", "debug_opts", ",", "is_pypy", "=", "is_pypy", ")", "pass", "return", "deparsed", "except", "pysource", ".", "SourceWalkerError", "as", "e", ":", "# deparsing failed", "raise", "pysource", ".", "SourceWalkerError", "(", "str", "(", "e", ")", ")" ]
ingests and deparses a given code block 'co' if `bytecode_version` is None, use the current Python intepreter version. Caller is responsible for closing `out` and `mapstream`
[ "ingests", "and", "deparses", "a", "given", "code", "block", "co" ]
python
train
35.891892
helixyte/everest
everest/representers/mapping.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/representers/mapping.py#L249-L272
def map_to_resource(self, data_element, resource=None): """ Maps the given data element to a new resource or updates the given resource. :raises ValueError: If :param:`data_element` does not provide :class:`everest.representers.interfaces.IDataElement`. """ if not IDataElement.providedBy(data_element): # pylint:disable=E1101 raise ValueError('Expected data element, got %s.' % data_element) if resource is None: coll = \ create_staging_collection(data_element.mapping.mapped_class) agg = coll.get_aggregate() agg.add(data_element) if IMemberDataElement.providedBy(data_element): # pylint: disable=E1101 ent = next(iter(agg)) resource = \ data_element.mapping.mapped_class.create_from_entity(ent) else: resource = coll else: resource.update(data_element) return resource
[ "def", "map_to_resource", "(", "self", ",", "data_element", ",", "resource", "=", "None", ")", ":", "if", "not", "IDataElement", ".", "providedBy", "(", "data_element", ")", ":", "# pylint:disable=E1101", "raise", "ValueError", "(", "'Expected data element, got %s.'", "%", "data_element", ")", "if", "resource", "is", "None", ":", "coll", "=", "create_staging_collection", "(", "data_element", ".", "mapping", ".", "mapped_class", ")", "agg", "=", "coll", ".", "get_aggregate", "(", ")", "agg", ".", "add", "(", "data_element", ")", "if", "IMemberDataElement", ".", "providedBy", "(", "data_element", ")", ":", "# pylint: disable=E1101", "ent", "=", "next", "(", "iter", "(", "agg", ")", ")", "resource", "=", "data_element", ".", "mapping", ".", "mapped_class", ".", "create_from_entity", "(", "ent", ")", "else", ":", "resource", "=", "coll", "else", ":", "resource", ".", "update", "(", "data_element", ")", "return", "resource" ]
Maps the given data element to a new resource or updates the given resource. :raises ValueError: If :param:`data_element` does not provide :class:`everest.representers.interfaces.IDataElement`.
[ "Maps", "the", "given", "data", "element", "to", "a", "new", "resource", "or", "updates", "the", "given", "resource", "." ]
python
train
41.625
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4210-L4217
def relaxNGValidatePushElement(self, ctxt, elem): """Push a new element start on the RelaxNG validation stack. """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlRelaxNGValidatePushElement(ctxt__o, self._o, elem__o) return ret
[ "def", "relaxNGValidatePushElement", "(", "self", ",", "ctxt", ",", "elem", ")", ":", "if", "ctxt", "is", "None", ":", "ctxt__o", "=", "None", "else", ":", "ctxt__o", "=", "ctxt", ".", "_o", "if", "elem", "is", "None", ":", "elem__o", "=", "None", "else", ":", "elem__o", "=", "elem", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlRelaxNGValidatePushElement", "(", "ctxt__o", ",", "self", ".", "_o", ",", "elem__o", ")", "return", "ret" ]
Push a new element start on the RelaxNG validation stack.
[ "Push", "a", "new", "element", "start", "on", "the", "RelaxNG", "validation", "stack", "." ]
python
train
45
peterldowns/python-mustache
mustache/utils.py
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/utils.py#L12-L14
def html_escape(s, encoding='utf-8', encoding_errors='strict'): """ Return the HTML-escaped version of an input. """ return escape(make_unicode(s, encoding, encoding_errors), quote=True)
[ "def", "html_escape", "(", "s", ",", "encoding", "=", "'utf-8'", ",", "encoding_errors", "=", "'strict'", ")", ":", "return", "escape", "(", "make_unicode", "(", "s", ",", "encoding", ",", "encoding_errors", ")", ",", "quote", "=", "True", ")" ]
Return the HTML-escaped version of an input.
[ "Return", "the", "HTML", "-", "escaped", "version", "of", "an", "input", "." ]
python
train
64
SpamScope/mail-parser
mailparser/mailparser.py
https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L191-L204
def from_file_msg(cls, fp): """ Init a new object from a Outlook message file, mime type: application/vnd.ms-outlook Args: fp (string): file path of raw Outlook email Returns: Instance of MailParser """ log.debug("Parsing email from file Outlook") f, _ = msgconvert(fp) return cls.from_file(f, True)
[ "def", "from_file_msg", "(", "cls", ",", "fp", ")", ":", "log", ".", "debug", "(", "\"Parsing email from file Outlook\"", ")", "f", ",", "_", "=", "msgconvert", "(", "fp", ")", "return", "cls", ".", "from_file", "(", "f", ",", "True", ")" ]
Init a new object from a Outlook message file, mime type: application/vnd.ms-outlook Args: fp (string): file path of raw Outlook email Returns: Instance of MailParser
[ "Init", "a", "new", "object", "from", "a", "Outlook", "message", "file", "mime", "type", ":", "application", "/", "vnd", ".", "ms", "-", "outlook" ]
python
train
27.428571
shanbay/sea
sea/local.py
https://github.com/shanbay/sea/blob/a4484a571e3d68cc333411264077a59ea20cc5f1/sea/local.py#L58-L71
def _get_current_object(self): """Get current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context. """ loc = object.__getattribute__(self, '_Proxy__local') if not hasattr(loc, '__release_local__'): return loc(*self.__args, **self.__kwargs) try: # pragma: no cover # not sure what this is about return getattr(loc, self.__name__) except AttributeError: # pragma: no cover raise RuntimeError('no object bound to {0.__name__}'.format(self))
[ "def", "_get_current_object", "(", "self", ")", ":", "loc", "=", "object", ".", "__getattribute__", "(", "self", ",", "'_Proxy__local'", ")", "if", "not", "hasattr", "(", "loc", ",", "'__release_local__'", ")", ":", "return", "loc", "(", "*", "self", ".", "__args", ",", "*", "*", "self", ".", "__kwargs", ")", "try", ":", "# pragma: no cover", "# not sure what this is about", "return", "getattr", "(", "loc", ",", "self", ".", "__name__", ")", "except", "AttributeError", ":", "# pragma: no cover", "raise", "RuntimeError", "(", "'no object bound to {0.__name__}'", ".", "format", "(", "self", ")", ")" ]
Get current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context.
[ "Get", "current", "object", ".", "This", "is", "useful", "if", "you", "want", "the", "real", "object", "behind", "the", "proxy", "at", "a", "time", "for", "performance", "reasons", "or", "because", "you", "want", "to", "pass", "the", "object", "into", "a", "different", "context", "." ]
python
test
47.142857
hellosign/hellosign-python-sdk
hellosign_sdk/hsclient.py
https://github.com/hellosign/hellosign-python-sdk/blob/4325a29ad5766380a214eac3914511f62f7ecba4/hellosign_sdk/hsclient.py#L948-L961
def remove_team_member(self, account_id=None, email_address=None): ''' Remove a user from your Team Args: account_id (str): The id of the account of the user to remove from your team. email_address (str): The email address of the account to remove from your team. The account id prevails if both account_id and email_address are provided. Returns: A Team object ''' return self._add_remove_team_member(self.TEAM_REMOVE_MEMBER_URL, email_address, account_id)
[ "def", "remove_team_member", "(", "self", ",", "account_id", "=", "None", ",", "email_address", "=", "None", ")", ":", "return", "self", ".", "_add_remove_team_member", "(", "self", ".", "TEAM_REMOVE_MEMBER_URL", ",", "email_address", ",", "account_id", ")" ]
Remove a user from your Team Args: account_id (str): The id of the account of the user to remove from your team. email_address (str): The email address of the account to remove from your team. The account id prevails if both account_id and email_address are provided. Returns: A Team object
[ "Remove", "a", "user", "from", "your", "Team" ]
python
train
38.142857
biolink/ontobio
ontobio/slimmer.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/slimmer.py#L5-L75
def get_minimal_subgraph(g, nodes): """ given a set of nodes, extract a subgraph that excludes non-informative nodes - i.e. those that are not MRCAs of pairs of existing nodes. Note: no property chain reasoning is performed. As a result, edge labels are lost. """ logging.info("Slimming {} to {}".format(g,nodes)) # maps ancestor nodes to members of the focus node set they subsume mm = {} subnodes = set() for n in nodes: subnodes.add(n) ancs = nx.ancestors(g, n) ancs.add(n) for a in ancs: subnodes.add(a) if a not in mm: mm[a] = set() mm[a].add(n) # merge graph egraph = nx.MultiDiGraph() # TODO: ensure edge labels are preserved for a, aset in mm.items(): for p in g.predecessors(a): logging.info(" cmp {} -> {} // {} {}".format(len(aset),len(mm[p]), a, p)) if p in mm and len(aset) == len(mm[p]): egraph.add_edge(p, a) egraph.add_edge(a, p) logging.info("will merge {} <-> {} (members identical)".format(p,a)) nmap = {} leafmap = {} disposable = set() for cliq in nx.strongly_connected_components(egraph): leaders = set() leafs = set() for n in cliq: is_src = False if n in nodes: logging.info("Preserving: {} in {}".format(n,cliq)) leaders.add(n) is_src = True is_leaf = True for p in g.successors(n): if p in cliq: is_leaf = False if not(is_leaf or is_src): disposable.add(n) if is_leaf: logging.info("Clique leaf: {} in {}".format(n,cliq)) leafs.add(n) leader = None if len(leaders) > 1: logging.info("UHOH: {}".format(leaders)) if len(leaders) > 0: leader = list(leaders)[0] else: leader = list(leafs)[0] leafmap[n] = leafs subg = g.subgraph(subnodes) fg = remove_nodes(subg, disposable) return fg
[ "def", "get_minimal_subgraph", "(", "g", ",", "nodes", ")", ":", "logging", ".", "info", "(", "\"Slimming {} to {}\"", ".", "format", "(", "g", ",", "nodes", ")", ")", "# maps ancestor nodes to members of the focus node set they subsume", "mm", "=", "{", "}", "subnodes", "=", "set", "(", ")", "for", "n", "in", "nodes", ":", "subnodes", ".", "add", "(", "n", ")", "ancs", "=", "nx", ".", "ancestors", "(", "g", ",", "n", ")", "ancs", ".", "add", "(", "n", ")", "for", "a", "in", "ancs", ":", "subnodes", ".", "add", "(", "a", ")", "if", "a", "not", "in", "mm", ":", "mm", "[", "a", "]", "=", "set", "(", ")", "mm", "[", "a", "]", ".", "add", "(", "n", ")", "# merge graph", "egraph", "=", "nx", ".", "MultiDiGraph", "(", ")", "# TODO: ensure edge labels are preserved", "for", "a", ",", "aset", "in", "mm", ".", "items", "(", ")", ":", "for", "p", "in", "g", ".", "predecessors", "(", "a", ")", ":", "logging", ".", "info", "(", "\" cmp {} -> {} // {} {}\"", ".", "format", "(", "len", "(", "aset", ")", ",", "len", "(", "mm", "[", "p", "]", ")", ",", "a", ",", "p", ")", ")", "if", "p", "in", "mm", "and", "len", "(", "aset", ")", "==", "len", "(", "mm", "[", "p", "]", ")", ":", "egraph", ".", "add_edge", "(", "p", ",", "a", ")", "egraph", ".", "add_edge", "(", "a", ",", "p", ")", "logging", ".", "info", "(", "\"will merge {} <-> {} (members identical)\"", ".", "format", "(", "p", ",", "a", ")", ")", "nmap", "=", "{", "}", "leafmap", "=", "{", "}", "disposable", "=", "set", "(", ")", "for", "cliq", "in", "nx", ".", "strongly_connected_components", "(", "egraph", ")", ":", "leaders", "=", "set", "(", ")", "leafs", "=", "set", "(", ")", "for", "n", "in", "cliq", ":", "is_src", "=", "False", "if", "n", "in", "nodes", ":", "logging", ".", "info", "(", "\"Preserving: {} in {}\"", ".", "format", "(", "n", ",", "cliq", ")", ")", "leaders", ".", "add", "(", "n", ")", "is_src", "=", "True", "is_leaf", "=", "True", "for", "p", "in", "g", ".", "successors", "(", "n", ")", ":", "if", "p", "in", "cliq", ":", "is_leaf", "=", "False", "if", "not", "(", "is_leaf", "or", "is_src", ")", ":", "disposable", ".", "add", "(", "n", ")", "if", "is_leaf", ":", "logging", ".", "info", "(", "\"Clique leaf: {} in {}\"", ".", "format", "(", "n", ",", "cliq", ")", ")", "leafs", ".", "add", "(", "n", ")", "leader", "=", "None", "if", "len", "(", "leaders", ")", ">", "1", ":", "logging", ".", "info", "(", "\"UHOH: {}\"", ".", "format", "(", "leaders", ")", ")", "if", "len", "(", "leaders", ")", ">", "0", ":", "leader", "=", "list", "(", "leaders", ")", "[", "0", "]", "else", ":", "leader", "=", "list", "(", "leafs", ")", "[", "0", "]", "leafmap", "[", "n", "]", "=", "leafs", "subg", "=", "g", ".", "subgraph", "(", "subnodes", ")", "fg", "=", "remove_nodes", "(", "subg", ",", "disposable", ")", "return", "fg" ]
given a set of nodes, extract a subgraph that excludes non-informative nodes - i.e. those that are not MRCAs of pairs of existing nodes. Note: no property chain reasoning is performed. As a result, edge labels are lost.
[ "given", "a", "set", "of", "nodes", "extract", "a", "subgraph", "that", "excludes", "non", "-", "informative", "nodes", "-", "i", ".", "e", ".", "those", "that", "are", "not", "MRCAs", "of", "pairs", "of", "existing", "nodes", ".", "Note", ":", "no", "property", "chain", "reasoning", "is", "performed", ".", "As", "a", "result", "edge", "labels", "are", "lost", "." ]
python
train
30.28169
fermiPy/fermipy
fermipy/validate/tools.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/validate/tools.py#L398-L430
def calc_containment(self): """Calculate PSF containment.""" hists = self.hists hists_out = self._hists_eff quantiles = [0.34, 0.68, 0.90, 0.95] cth_axis_idx = dict(evclass=2, evtype=3) for k in ['evclass']: # ,'evtype']: print(k) non = hists['%s_psf_on' % k] noff = hists['%s_psf_off' % k] alpha = hists['%s_alpha' % k][..., None] if k == 'evclass': sep = self._sep_bins[None, :, None, 1:] else: sep = self._sep_bins[None, None, :, None, 1:] qval, qerr = calc_quantiles(sep, non, noff, alpha, quantiles) for i, q in enumerate(quantiles): hists_out['%s_cth_q%2i' % (k, q * 100)] = qval[i] hists_out['%s_cth_q%2i_err' % (k, q * 100)] = qerr[i] non = np.sum(non, axis=cth_axis_idx[k]) noff = np.sum(noff, axis=cth_axis_idx[k]) alpha = np.squeeze(alpha, axis=cth_axis_idx[k]) sep = np.squeeze(sep, axis=cth_axis_idx[k]) qval, qerr = calc_quantiles(sep, non, noff, alpha, quantiles) for i, q in enumerate(quantiles): hists_out['%s_q%2i' % (k, q * 100)] = qval[i] hists_out['%s_q%2i_err' % (k, q * 100)] = qerr[i]
[ "def", "calc_containment", "(", "self", ")", ":", "hists", "=", "self", ".", "hists", "hists_out", "=", "self", ".", "_hists_eff", "quantiles", "=", "[", "0.34", ",", "0.68", ",", "0.90", ",", "0.95", "]", "cth_axis_idx", "=", "dict", "(", "evclass", "=", "2", ",", "evtype", "=", "3", ")", "for", "k", "in", "[", "'evclass'", "]", ":", "# ,'evtype']:", "print", "(", "k", ")", "non", "=", "hists", "[", "'%s_psf_on'", "%", "k", "]", "noff", "=", "hists", "[", "'%s_psf_off'", "%", "k", "]", "alpha", "=", "hists", "[", "'%s_alpha'", "%", "k", "]", "[", "...", ",", "None", "]", "if", "k", "==", "'evclass'", ":", "sep", "=", "self", ".", "_sep_bins", "[", "None", ",", ":", ",", "None", ",", "1", ":", "]", "else", ":", "sep", "=", "self", ".", "_sep_bins", "[", "None", ",", "None", ",", ":", ",", "None", ",", "1", ":", "]", "qval", ",", "qerr", "=", "calc_quantiles", "(", "sep", ",", "non", ",", "noff", ",", "alpha", ",", "quantiles", ")", "for", "i", ",", "q", "in", "enumerate", "(", "quantiles", ")", ":", "hists_out", "[", "'%s_cth_q%2i'", "%", "(", "k", ",", "q", "*", "100", ")", "]", "=", "qval", "[", "i", "]", "hists_out", "[", "'%s_cth_q%2i_err'", "%", "(", "k", ",", "q", "*", "100", ")", "]", "=", "qerr", "[", "i", "]", "non", "=", "np", ".", "sum", "(", "non", ",", "axis", "=", "cth_axis_idx", "[", "k", "]", ")", "noff", "=", "np", ".", "sum", "(", "noff", ",", "axis", "=", "cth_axis_idx", "[", "k", "]", ")", "alpha", "=", "np", ".", "squeeze", "(", "alpha", ",", "axis", "=", "cth_axis_idx", "[", "k", "]", ")", "sep", "=", "np", ".", "squeeze", "(", "sep", ",", "axis", "=", "cth_axis_idx", "[", "k", "]", ")", "qval", ",", "qerr", "=", "calc_quantiles", "(", "sep", ",", "non", ",", "noff", ",", "alpha", ",", "quantiles", ")", "for", "i", ",", "q", "in", "enumerate", "(", "quantiles", ")", ":", "hists_out", "[", "'%s_q%2i'", "%", "(", "k", ",", "q", "*", "100", ")", "]", "=", "qval", "[", "i", "]", "hists_out", "[", "'%s_q%2i_err'", "%", "(", "k", ",", "q", "*", "100", ")", "]", "=", "qerr", "[", "i", "]" ]
Calculate PSF containment.
[ "Calculate", "PSF", "containment", "." ]
python
train
39.242424
iotile/coretools
iotilebuild/iotile/build/config/site_scons/trub_script.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/site_scons/trub_script.py#L15-L61
def build_update_script(file_name, slot_assignments=None, os_info=None, sensor_graph=None, app_info=None, use_safeupdate=False): """Build a trub script that loads given firmware into the given slots. slot_assignments should be a list of tuples in the following form: ("slot X" or "controller", firmware_image_name) The output of this autobuild action will be a trub script in build/output/<file_name> that assigns the given firmware to each slot in the order specified in the slot_assignments list. Args: file_name (str): The name of the output file that we should create. This file name should end in .trub slot_assignments (list of (str, str)): A list of tuples containing the slot name and the firmware image that we should use to build our update script. Optional os_info (tuple(int, str)): A tuple of OS version tag and X.Y version number that will be set as part of the OTA script if included. Optional. sensor_graph (str): Name of sgf file. Optional. app_info (tuple(int, str)): A tuple of App version tag and X.Y version number that will be set as part of the OTA script if included. Optional. use_safeupdate (bool): Enables safe firmware update """ resolver = ProductResolver.Create() env = Environment(tools=[]) files = [] if slot_assignments is not None: slots = [_parse_slot(x[0]) for x in slot_assignments] files = [ensure_image_is_hex(resolver.find_unique("firmware_image", x[1]).full_path) for x in slot_assignments] env['SLOTS'] = slots else: env['SLOTS'] = None env['USE_SAFEUPDATE'] = use_safeupdate env['OS_INFO'] = os_info env['APP_INFO'] = app_info env['UPDATE_SENSORGRAPH'] = False if sensor_graph is not None: files.append(sensor_graph) env['UPDATE_SENSORGRAPH'] = True env.Command([os.path.join('build', 'output', file_name)], files, action=Action(_build_reflash_script_action, "Building TRUB script at $TARGET"))
[ "def", "build_update_script", "(", "file_name", ",", "slot_assignments", "=", "None", ",", "os_info", "=", "None", ",", "sensor_graph", "=", "None", ",", "app_info", "=", "None", ",", "use_safeupdate", "=", "False", ")", ":", "resolver", "=", "ProductResolver", ".", "Create", "(", ")", "env", "=", "Environment", "(", "tools", "=", "[", "]", ")", "files", "=", "[", "]", "if", "slot_assignments", "is", "not", "None", ":", "slots", "=", "[", "_parse_slot", "(", "x", "[", "0", "]", ")", "for", "x", "in", "slot_assignments", "]", "files", "=", "[", "ensure_image_is_hex", "(", "resolver", ".", "find_unique", "(", "\"firmware_image\"", ",", "x", "[", "1", "]", ")", ".", "full_path", ")", "for", "x", "in", "slot_assignments", "]", "env", "[", "'SLOTS'", "]", "=", "slots", "else", ":", "env", "[", "'SLOTS'", "]", "=", "None", "env", "[", "'USE_SAFEUPDATE'", "]", "=", "use_safeupdate", "env", "[", "'OS_INFO'", "]", "=", "os_info", "env", "[", "'APP_INFO'", "]", "=", "app_info", "env", "[", "'UPDATE_SENSORGRAPH'", "]", "=", "False", "if", "sensor_graph", "is", "not", "None", ":", "files", ".", "append", "(", "sensor_graph", ")", "env", "[", "'UPDATE_SENSORGRAPH'", "]", "=", "True", "env", ".", "Command", "(", "[", "os", ".", "path", ".", "join", "(", "'build'", ",", "'output'", ",", "file_name", ")", "]", ",", "files", ",", "action", "=", "Action", "(", "_build_reflash_script_action", ",", "\"Building TRUB script at $TARGET\"", ")", ")" ]
Build a trub script that loads given firmware into the given slots. slot_assignments should be a list of tuples in the following form: ("slot X" or "controller", firmware_image_name) The output of this autobuild action will be a trub script in build/output/<file_name> that assigns the given firmware to each slot in the order specified in the slot_assignments list. Args: file_name (str): The name of the output file that we should create. This file name should end in .trub slot_assignments (list of (str, str)): A list of tuples containing the slot name and the firmware image that we should use to build our update script. Optional os_info (tuple(int, str)): A tuple of OS version tag and X.Y version number that will be set as part of the OTA script if included. Optional. sensor_graph (str): Name of sgf file. Optional. app_info (tuple(int, str)): A tuple of App version tag and X.Y version number that will be set as part of the OTA script if included. Optional. use_safeupdate (bool): Enables safe firmware update
[ "Build", "a", "trub", "script", "that", "loads", "given", "firmware", "into", "the", "given", "slots", "." ]
python
train
44.06383
benhoff/vexbot
vexbot/adapters/shell/interfaces.py
https://github.com/benhoff/vexbot/blob/9b844eb20e84eea92a0e7db7d86a90094956c38f/vexbot/adapters/shell/interfaces.py#L17-L26
def _remove_word(completer): """ Used to remove words from the completors """ def inner(word: str): try: completer.words.remove(word) except Exception: pass return inner
[ "def", "_remove_word", "(", "completer", ")", ":", "def", "inner", "(", "word", ":", "str", ")", ":", "try", ":", "completer", ".", "words", ".", "remove", "(", "word", ")", "except", "Exception", ":", "pass", "return", "inner" ]
Used to remove words from the completors
[ "Used", "to", "remove", "words", "from", "the", "completors" ]
python
train
22
helixyte/everest
everest/resources/utils.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/resources/utils.py#L103-L122
def as_member(entity, parent=None): """ Adapts an object to a location aware member resource. :param entity: a domain object for which a resource adapter has been registered :type entity: an object implementing :class:`everest.entities.interfaces.IEntity` :param parent: optional parent collection resource to make the new member a child of :type parent: an object implementing :class:`everest.resources.interfaces.ICollectionResource` :returns: an object implementing :class:`everest.resources.interfaces.IMemberResource` """ reg = get_current_registry() rc = reg.getAdapter(entity, IMemberResource) if not parent is None: rc.__parent__ = parent # interface method pylint: disable=E1121 return rc
[ "def", "as_member", "(", "entity", ",", "parent", "=", "None", ")", ":", "reg", "=", "get_current_registry", "(", ")", "rc", "=", "reg", ".", "getAdapter", "(", "entity", ",", "IMemberResource", ")", "if", "not", "parent", "is", "None", ":", "rc", ".", "__parent__", "=", "parent", "# interface method pylint: disable=E1121", "return", "rc" ]
Adapts an object to a location aware member resource. :param entity: a domain object for which a resource adapter has been registered :type entity: an object implementing :class:`everest.entities.interfaces.IEntity` :param parent: optional parent collection resource to make the new member a child of :type parent: an object implementing :class:`everest.resources.interfaces.ICollectionResource` :returns: an object implementing :class:`everest.resources.interfaces.IMemberResource`
[ "Adapts", "an", "object", "to", "a", "location", "aware", "member", "resource", "." ]
python
train
38.75
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/config/loader.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/config/loader.py#L648-L672
def _convert_to_config(self): """self.parsed_data->self.config, parse unrecognized extra args via KVLoader.""" # remove subconfigs list from namespace before transforming the Namespace if '_flags' in self.parsed_data: subcs = self.parsed_data._flags del self.parsed_data._flags else: subcs = [] for k, v in vars(self.parsed_data).iteritems(): if v is None: # it was a flag that shares the name of an alias subcs.append(self.alias_flags[k]) else: # eval the KV assignment self._exec_config_str(k, v) for subc in subcs: self._load_flag(subc) if self.extra_args: sub_parser = KeyValueConfigLoader() sub_parser.load_config(self.extra_args) self.config._merge(sub_parser.config) self.extra_args = sub_parser.extra_args
[ "def", "_convert_to_config", "(", "self", ")", ":", "# remove subconfigs list from namespace before transforming the Namespace", "if", "'_flags'", "in", "self", ".", "parsed_data", ":", "subcs", "=", "self", ".", "parsed_data", ".", "_flags", "del", "self", ".", "parsed_data", ".", "_flags", "else", ":", "subcs", "=", "[", "]", "for", "k", ",", "v", "in", "vars", "(", "self", ".", "parsed_data", ")", ".", "iteritems", "(", ")", ":", "if", "v", "is", "None", ":", "# it was a flag that shares the name of an alias", "subcs", ".", "append", "(", "self", ".", "alias_flags", "[", "k", "]", ")", "else", ":", "# eval the KV assignment", "self", ".", "_exec_config_str", "(", "k", ",", "v", ")", "for", "subc", "in", "subcs", ":", "self", ".", "_load_flag", "(", "subc", ")", "if", "self", ".", "extra_args", ":", "sub_parser", "=", "KeyValueConfigLoader", "(", ")", "sub_parser", ".", "load_config", "(", "self", ".", "extra_args", ")", "self", ".", "config", ".", "_merge", "(", "sub_parser", ".", "config", ")", "self", ".", "extra_args", "=", "sub_parser", ".", "extra_args" ]
self.parsed_data->self.config, parse unrecognized extra args via KVLoader.
[ "self", ".", "parsed_data", "-", ">", "self", ".", "config", "parse", "unrecognized", "extra", "args", "via", "KVLoader", "." ]
python
test
37.28
mikedh/trimesh
trimesh/util.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/util.py#L1109-L1139
def decode_keys(store, encoding='utf-8'): """ If a dictionary has keys that are bytes decode them to a str. Parameters --------- store : dict Dictionary with data Returns --------- result : dict Values are untouched but keys that were bytes are converted to ASCII strings. Example ----------- In [1]: d Out[1]: {1020: 'nah', b'hi': 'stuff'} In [2]: trimesh.util.decode_keys(d) Out[2]: {1020: 'nah', 'hi': 'stuff'} """ keys = store.keys() for key in keys: if hasattr(key, 'decode'): decoded = key.decode(encoding) if key != decoded: store[key.decode(encoding)] = store[key] store.pop(key) return store
[ "def", "decode_keys", "(", "store", ",", "encoding", "=", "'utf-8'", ")", ":", "keys", "=", "store", ".", "keys", "(", ")", "for", "key", "in", "keys", ":", "if", "hasattr", "(", "key", ",", "'decode'", ")", ":", "decoded", "=", "key", ".", "decode", "(", "encoding", ")", "if", "key", "!=", "decoded", ":", "store", "[", "key", ".", "decode", "(", "encoding", ")", "]", "=", "store", "[", "key", "]", "store", ".", "pop", "(", "key", ")", "return", "store" ]
If a dictionary has keys that are bytes decode them to a str. Parameters --------- store : dict Dictionary with data Returns --------- result : dict Values are untouched but keys that were bytes are converted to ASCII strings. Example ----------- In [1]: d Out[1]: {1020: 'nah', b'hi': 'stuff'} In [2]: trimesh.util.decode_keys(d) Out[2]: {1020: 'nah', 'hi': 'stuff'}
[ "If", "a", "dictionary", "has", "keys", "that", "are", "bytes", "decode", "them", "to", "a", "str", "." ]
python
train
23.483871
flatangle/flatlib
flatlib/predictives/primarydirections.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/predictives/primarydirections.py#L118-L131
def _buildTerms(self): """ Builds a data structure indexing the terms longitude by sign and object. """ termLons = tables.termLons(tables.EGYPTIAN_TERMS) res = {} for (ID, sign, lon) in termLons: try: res[sign][ID] = lon except KeyError: res[sign] = {} res[sign][ID] = lon return res
[ "def", "_buildTerms", "(", "self", ")", ":", "termLons", "=", "tables", ".", "termLons", "(", "tables", ".", "EGYPTIAN_TERMS", ")", "res", "=", "{", "}", "for", "(", "ID", ",", "sign", ",", "lon", ")", "in", "termLons", ":", "try", ":", "res", "[", "sign", "]", "[", "ID", "]", "=", "lon", "except", "KeyError", ":", "res", "[", "sign", "]", "=", "{", "}", "res", "[", "sign", "]", "[", "ID", "]", "=", "lon", "return", "res" ]
Builds a data structure indexing the terms longitude by sign and object.
[ "Builds", "a", "data", "structure", "indexing", "the", "terms", "longitude", "by", "sign", "and", "object", "." ]
python
train
29.071429
PSPC-SPAC-buyandsell/von_anchor
von_anchor/wallet/wallet.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/wallet/wallet.py#L1136-L1159
async def sign(self, message: bytes, verkey: str = None) -> bytes: """ Derive signing key and Sign message; return signature. Raise WalletState if wallet is closed. Raise AbsentMessage for missing message, or WalletState if wallet is closed. :param message: Content to sign, as bytes :param verkey: verification key corresponding to private signing key (default anchor's own) :return: signature, as bytes """ LOGGER.debug('Wallet.sign >>> message: %s, verkey: %s', message, verkey) if not message: LOGGER.debug('Wallet.sign <!< No message to sign') raise AbsentMessage('No message to sign') if not self.handle: LOGGER.debug('Wallet.sign <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = await crypto.crypto_sign(self.handle, verkey or self.verkey, message) LOGGER.debug('Wallet.sign <<< %s', rv) return rv
[ "async", "def", "sign", "(", "self", ",", "message", ":", "bytes", ",", "verkey", ":", "str", "=", "None", ")", "->", "bytes", ":", "LOGGER", ".", "debug", "(", "'Wallet.sign >>> message: %s, verkey: %s'", ",", "message", ",", "verkey", ")", "if", "not", "message", ":", "LOGGER", ".", "debug", "(", "'Wallet.sign <!< No message to sign'", ")", "raise", "AbsentMessage", "(", "'No message to sign'", ")", "if", "not", "self", ".", "handle", ":", "LOGGER", ".", "debug", "(", "'Wallet.sign <!< Wallet %s is closed'", ",", "self", ".", "name", ")", "raise", "WalletState", "(", "'Wallet {} is closed'", ".", "format", "(", "self", ".", "name", ")", ")", "rv", "=", "await", "crypto", ".", "crypto_sign", "(", "self", ".", "handle", ",", "verkey", "or", "self", ".", "verkey", ",", "message", ")", "LOGGER", ".", "debug", "(", "'Wallet.sign <<< %s'", ",", "rv", ")", "return", "rv" ]
Derive signing key and Sign message; return signature. Raise WalletState if wallet is closed. Raise AbsentMessage for missing message, or WalletState if wallet is closed. :param message: Content to sign, as bytes :param verkey: verification key corresponding to private signing key (default anchor's own) :return: signature, as bytes
[ "Derive", "signing", "key", "and", "Sign", "message", ";", "return", "signature", ".", "Raise", "WalletState", "if", "wallet", "is", "closed", ".", "Raise", "AbsentMessage", "for", "missing", "message", "or", "WalletState", "if", "wallet", "is", "closed", "." ]
python
train
41.291667
pantsbuild/pants
src/python/pants/util/contextutil.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/util/contextutil.py#L137-L153
def stdio_as(stdout_fd, stderr_fd, stdin_fd): """Redirect sys.{stdout, stderr, stdin} to alternate file descriptors. As a special case, if a given destination fd is `-1`, we will replace it with an open file handle to `/dev/null`. NB: If the filehandles for sys.{stdout, stderr, stdin} have previously been closed, it's possible that the OS has repurposed fds `0, 1, 2` to represent other files or sockets. It's impossible for this method to locate all python objects which refer to those fds, so it's up to the caller to guarantee that `0, 1, 2` are safe to replace. In Python3, the streams expect unicode. To write and read bytes, access their buffer, e.g. `stdin.buffer.read()`. """ with _stdio_stream_as(stdin_fd, 0, 'stdin', 'r'),\ _stdio_stream_as(stdout_fd, 1, 'stdout', 'w'),\ _stdio_stream_as(stderr_fd, 2, 'stderr', 'w'): yield
[ "def", "stdio_as", "(", "stdout_fd", ",", "stderr_fd", ",", "stdin_fd", ")", ":", "with", "_stdio_stream_as", "(", "stdin_fd", ",", "0", ",", "'stdin'", ",", "'r'", ")", ",", "_stdio_stream_as", "(", "stdout_fd", ",", "1", ",", "'stdout'", ",", "'w'", ")", ",", "_stdio_stream_as", "(", "stderr_fd", ",", "2", ",", "'stderr'", ",", "'w'", ")", ":", "yield" ]
Redirect sys.{stdout, stderr, stdin} to alternate file descriptors. As a special case, if a given destination fd is `-1`, we will replace it with an open file handle to `/dev/null`. NB: If the filehandles for sys.{stdout, stderr, stdin} have previously been closed, it's possible that the OS has repurposed fds `0, 1, 2` to represent other files or sockets. It's impossible for this method to locate all python objects which refer to those fds, so it's up to the caller to guarantee that `0, 1, 2` are safe to replace. In Python3, the streams expect unicode. To write and read bytes, access their buffer, e.g. `stdin.buffer.read()`.
[ "Redirect", "sys", ".", "{", "stdout", "stderr", "stdin", "}", "to", "alternate", "file", "descriptors", "." ]
python
train
50.823529
BlueBrain/NeuroM
neurom/core/_neuron.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/_neuron.py#L54-L90
def iter_neurites(obj, mapfun=None, filt=None, neurite_order=NeuriteIter.FileOrder): '''Iterator to a neurite, neuron or neuron population Applies optional neurite filter and mapping functions. Parameters: obj: a neurite, neuron or neuron population. mapfun: optional neurite mapping function. filt: optional neurite filter function. neurite_order (NeuriteIter): order upon which neurites should be iterated - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical Examples: Get the number of points in each neurite in a neuron population >>> from neurom.core import iter_neurites >>> n_points = [n for n in iter_neurites(pop, lambda x : len(x.points))] Get the number of points in each axon in a neuron population >>> import neurom as nm >>> from neurom.core import iter_neurites >>> filter = lambda n : n.type == nm.AXON >>> mapping = lambda n : len(n.points) >>> n_points = [n for n in iter_neurites(pop, mapping, filter)] ''' neurites = ((obj,) if isinstance(obj, Neurite) else obj.neurites if hasattr(obj, 'neurites') else obj) if neurite_order == NeuriteIter.NRN: last_position = max(NRN_ORDER.values()) + 1 neurites = sorted(neurites, key=lambda neurite: NRN_ORDER.get(neurite.type, last_position)) neurite_iter = iter(neurites) if filt is None else filter(filt, neurites) return neurite_iter if mapfun is None else map(mapfun, neurite_iter)
[ "def", "iter_neurites", "(", "obj", ",", "mapfun", "=", "None", ",", "filt", "=", "None", ",", "neurite_order", "=", "NeuriteIter", ".", "FileOrder", ")", ":", "neurites", "=", "(", "(", "obj", ",", ")", "if", "isinstance", "(", "obj", ",", "Neurite", ")", "else", "obj", ".", "neurites", "if", "hasattr", "(", "obj", ",", "'neurites'", ")", "else", "obj", ")", "if", "neurite_order", "==", "NeuriteIter", ".", "NRN", ":", "last_position", "=", "max", "(", "NRN_ORDER", ".", "values", "(", ")", ")", "+", "1", "neurites", "=", "sorted", "(", "neurites", ",", "key", "=", "lambda", "neurite", ":", "NRN_ORDER", ".", "get", "(", "neurite", ".", "type", ",", "last_position", ")", ")", "neurite_iter", "=", "iter", "(", "neurites", ")", "if", "filt", "is", "None", "else", "filter", "(", "filt", ",", "neurites", ")", "return", "neurite_iter", "if", "mapfun", "is", "None", "else", "map", "(", "mapfun", ",", "neurite_iter", ")" ]
Iterator to a neurite, neuron or neuron population Applies optional neurite filter and mapping functions. Parameters: obj: a neurite, neuron or neuron population. mapfun: optional neurite mapping function. filt: optional neurite filter function. neurite_order (NeuriteIter): order upon which neurites should be iterated - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical Examples: Get the number of points in each neurite in a neuron population >>> from neurom.core import iter_neurites >>> n_points = [n for n in iter_neurites(pop, lambda x : len(x.points))] Get the number of points in each axon in a neuron population >>> import neurom as nm >>> from neurom.core import iter_neurites >>> filter = lambda n : n.type == nm.AXON >>> mapping = lambda n : len(n.points) >>> n_points = [n for n in iter_neurites(pop, mapping, filter)]
[ "Iterator", "to", "a", "neurite", "neuron", "or", "neuron", "population" ]
python
train
42.945946
koordinates/python-client
koordinates/metadata.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/metadata.py#L47-L61
def get_xml(self, fp, format=FORMAT_NATIVE): """ Returns the XML metadata for this source, converted to the requested format. Converted metadata may not contain all the same information as the native format. :param file fp: A path, or an open file-like object which the content should be written to. :param str format: desired format for the output. This should be one of the available formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format. If you pass this function an open file-like object as the fp parameter, the function will not close that file for you. """ r = self._client.request('GET', getattr(self, format), stream=True) filename = stream.stream_response_to_file(r, path=fp) return filename
[ "def", "get_xml", "(", "self", ",", "fp", ",", "format", "=", "FORMAT_NATIVE", ")", ":", "r", "=", "self", ".", "_client", ".", "request", "(", "'GET'", ",", "getattr", "(", "self", ",", "format", ")", ",", "stream", "=", "True", ")", "filename", "=", "stream", ".", "stream_response_to_file", "(", "r", ",", "path", "=", "fp", ")", "return", "filename" ]
Returns the XML metadata for this source, converted to the requested format. Converted metadata may not contain all the same information as the native format. :param file fp: A path, or an open file-like object which the content should be written to. :param str format: desired format for the output. This should be one of the available formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format. If you pass this function an open file-like object as the fp parameter, the function will not close that file for you.
[ "Returns", "the", "XML", "metadata", "for", "this", "source", "converted", "to", "the", "requested", "format", ".", "Converted", "metadata", "may", "not", "contain", "all", "the", "same", "information", "as", "the", "native", "format", "." ]
python
train
54.933333
DLR-RM/RAFCON
source/rafcon/gui/controllers/execution_ticker.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/execution_ticker.py#L72-L82
def on_config_value_changed(self, config_m, prop_name, info): """Callback when a config value has been changed :param ConfigModel config_m: The config model that has been changed :param str prop_name: Should always be 'config' :param dict info: Information e.g. about the changed config key """ config_key = info['args'][1] if config_key in ["EXECUTION_TICKER_ENABLED"]: self.check_configuration()
[ "def", "on_config_value_changed", "(", "self", ",", "config_m", ",", "prop_name", ",", "info", ")", ":", "config_key", "=", "info", "[", "'args'", "]", "[", "1", "]", "if", "config_key", "in", "[", "\"EXECUTION_TICKER_ENABLED\"", "]", ":", "self", ".", "check_configuration", "(", ")" ]
Callback when a config value has been changed :param ConfigModel config_m: The config model that has been changed :param str prop_name: Should always be 'config' :param dict info: Information e.g. about the changed config key
[ "Callback", "when", "a", "config", "value", "has", "been", "changed" ]
python
train
41.545455
xolox/python-coloredlogs
coloredlogs/__init__.py
https://github.com/xolox/python-coloredlogs/blob/1cbf0c6bbee400c6ddbc43008143809934ec3e79/coloredlogs/__init__.py#L1363-L1370
def get_pattern(self, field_name): """ Get a regular expression to match a formatting directive that references the given field name. :param field_name: The name of the field to match (a string). :returns: A compiled regular expression object. """ return re.compile(self.raw_pattern.replace(r'\w+', field_name), re.VERBOSE)
[ "def", "get_pattern", "(", "self", ",", "field_name", ")", ":", "return", "re", ".", "compile", "(", "self", ".", "raw_pattern", ".", "replace", "(", "r'\\w+'", ",", "field_name", ")", ",", "re", ".", "VERBOSE", ")" ]
Get a regular expression to match a formatting directive that references the given field name. :param field_name: The name of the field to match (a string). :returns: A compiled regular expression object.
[ "Get", "a", "regular", "expression", "to", "match", "a", "formatting", "directive", "that", "references", "the", "given", "field", "name", "." ]
python
train
45.625
openstates/billy
billy/importers/bills.py
https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/bills.py#L429-L449
def populate_current_fields(abbr): """ Set/update _current_term and _current_session fields on all bills for a given location. """ meta = db.metadata.find_one({'_id': abbr}) current_term = meta['terms'][-1] current_session = current_term['sessions'][-1] for bill in db.bills.find({settings.LEVEL_FIELD: abbr}): if bill['session'] == current_session: bill['_current_session'] = True else: bill['_current_session'] = False if bill['session'] in current_term['sessions']: bill['_current_term'] = True else: bill['_current_term'] = False db.bills.save(bill, safe=True)
[ "def", "populate_current_fields", "(", "abbr", ")", ":", "meta", "=", "db", ".", "metadata", ".", "find_one", "(", "{", "'_id'", ":", "abbr", "}", ")", "current_term", "=", "meta", "[", "'terms'", "]", "[", "-", "1", "]", "current_session", "=", "current_term", "[", "'sessions'", "]", "[", "-", "1", "]", "for", "bill", "in", "db", ".", "bills", ".", "find", "(", "{", "settings", ".", "LEVEL_FIELD", ":", "abbr", "}", ")", ":", "if", "bill", "[", "'session'", "]", "==", "current_session", ":", "bill", "[", "'_current_session'", "]", "=", "True", "else", ":", "bill", "[", "'_current_session'", "]", "=", "False", "if", "bill", "[", "'session'", "]", "in", "current_term", "[", "'sessions'", "]", ":", "bill", "[", "'_current_term'", "]", "=", "True", "else", ":", "bill", "[", "'_current_term'", "]", "=", "False", "db", ".", "bills", ".", "save", "(", "bill", ",", "safe", "=", "True", ")" ]
Set/update _current_term and _current_session fields on all bills for a given location.
[ "Set", "/", "update", "_current_term", "and", "_current_session", "fields", "on", "all", "bills", "for", "a", "given", "location", "." ]
python
train
31.761905
planetarypy/planetaryimage
planetaryimage/cubefile.py
https://github.com/planetarypy/planetaryimage/blob/ee9aef4746ff7a003b1457565acb13f5f1db0375/planetaryimage/cubefile.py#L161-L199
def apply_numpy_specials(self, copy=True): """Convert isis special pixel values to numpy special pixel values. ======= ======= Isis Numpy ======= ======= Null nan Lrs -inf Lis -inf His inf Hrs inf ======= ======= Parameters ---------- copy : bool [True] Whether to apply the new special values to a copy of the pixel data and leave the original unaffected Returns ------- Numpy Array A numpy array with special values converted to numpy's nan, inf, and -inf """ if copy: data = self.data.astype(numpy.float64) elif self.data.dtype != numpy.float64: data = self.data = self.data.astype(numpy.float64) else: data = self.data data[data == self.specials['Null']] = numpy.nan data[data < self.specials['Min']] = numpy.NINF data[data > self.specials['Max']] = numpy.inf return data
[ "def", "apply_numpy_specials", "(", "self", ",", "copy", "=", "True", ")", ":", "if", "copy", ":", "data", "=", "self", ".", "data", ".", "astype", "(", "numpy", ".", "float64", ")", "elif", "self", ".", "data", ".", "dtype", "!=", "numpy", ".", "float64", ":", "data", "=", "self", ".", "data", "=", "self", ".", "data", ".", "astype", "(", "numpy", ".", "float64", ")", "else", ":", "data", "=", "self", ".", "data", "data", "[", "data", "==", "self", ".", "specials", "[", "'Null'", "]", "]", "=", "numpy", ".", "nan", "data", "[", "data", "<", "self", ".", "specials", "[", "'Min'", "]", "]", "=", "numpy", ".", "NINF", "data", "[", "data", ">", "self", ".", "specials", "[", "'Max'", "]", "]", "=", "numpy", ".", "inf", "return", "data" ]
Convert isis special pixel values to numpy special pixel values. ======= ======= Isis Numpy ======= ======= Null nan Lrs -inf Lis -inf His inf Hrs inf ======= ======= Parameters ---------- copy : bool [True] Whether to apply the new special values to a copy of the pixel data and leave the original unaffected Returns ------- Numpy Array A numpy array with special values converted to numpy's nan, inf, and -inf
[ "Convert", "isis", "special", "pixel", "values", "to", "numpy", "special", "pixel", "values", "." ]
python
train
27.897436
biolink/ontobio
ontobio/ontol.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L294-L303
def prefixes(self): """ list all prefixes used """ pset = set() for n in self.nodes(): pfx = self.prefix(n) if pfx is not None: pset.add(pfx) return list(pset)
[ "def", "prefixes", "(", "self", ")", ":", "pset", "=", "set", "(", ")", "for", "n", "in", "self", ".", "nodes", "(", ")", ":", "pfx", "=", "self", ".", "prefix", "(", "n", ")", "if", "pfx", "is", "not", "None", ":", "pset", ".", "add", "(", "pfx", ")", "return", "list", "(", "pset", ")" ]
list all prefixes used
[ "list", "all", "prefixes", "used" ]
python
train
23.8
log2timeline/plaso
plaso/storage/fake/writer.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/fake/writer.py#L112-L137
def AddEvent(self, event): """Adds an event. Args: event (EventObject): event. Raises: IOError: when the storage writer is closed or if the event data identifier type is not supported. OSError: when the storage writer is closed or if the event data identifier type is not supported. """ self._RaiseIfNotWritable() # TODO: change to no longer allow event_data_identifier is None # after refactoring every parser to generate event data. event_data_identifier = event.GetEventDataIdentifier() if event_data_identifier: if not isinstance(event_data_identifier, identifiers.FakeIdentifier): raise IOError('Unsupported event data identifier type: {0:s}'.format( type(event_data_identifier))) event = self._PrepareAttributeContainer(event) self._events.append(event) self.number_of_events += 1
[ "def", "AddEvent", "(", "self", ",", "event", ")", ":", "self", ".", "_RaiseIfNotWritable", "(", ")", "# TODO: change to no longer allow event_data_identifier is None", "# after refactoring every parser to generate event data.", "event_data_identifier", "=", "event", ".", "GetEventDataIdentifier", "(", ")", "if", "event_data_identifier", ":", "if", "not", "isinstance", "(", "event_data_identifier", ",", "identifiers", ".", "FakeIdentifier", ")", ":", "raise", "IOError", "(", "'Unsupported event data identifier type: {0:s}'", ".", "format", "(", "type", "(", "event_data_identifier", ")", ")", ")", "event", "=", "self", ".", "_PrepareAttributeContainer", "(", "event", ")", "self", ".", "_events", ".", "append", "(", "event", ")", "self", ".", "number_of_events", "+=", "1" ]
Adds an event. Args: event (EventObject): event. Raises: IOError: when the storage writer is closed or if the event data identifier type is not supported. OSError: when the storage writer is closed or if the event data identifier type is not supported.
[ "Adds", "an", "event", "." ]
python
train
33.769231
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3612-L3626
def get_items_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1): """ Get items of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=DELIVERY_NOTE_ITEMS, per_page=per_page, page=page, params={'delivery_note_id': delivery_note_id}, )
[ "def", "get_items_of_delivery_note_per_page", "(", "self", ",", "delivery_note_id", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "DELIVERY_NOTE_ITEMS", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "{", "'delivery_note_id'", ":", "delivery_note_id", "}", ",", ")" ]
Get items of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
[ "Get", "items", "of", "delivery", "note", "per", "page" ]
python
train
35.8
nabetama/slacky
slacky/rest/rest.py
https://github.com/nabetama/slacky/blob/dde62ce49af9b8f581729c36d2ac790310b570e4/slacky/rest/rest.py#L467-L472
def list(self, **kwargs): """ https://api.slack.com/methods/groups.list """ if kwargs: self.params.update(kwargs) return FromUrl('https://slack.com/api/groups.list', self._requests)(data=self.params).get()
[ "def", "list", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ":", "self", ".", "params", ".", "update", "(", "kwargs", ")", "return", "FromUrl", "(", "'https://slack.com/api/groups.list'", ",", "self", ".", "_requests", ")", "(", "data", "=", "self", ".", "params", ")", ".", "get", "(", ")" ]
https://api.slack.com/methods/groups.list
[ "https", ":", "//", "api", ".", "slack", ".", "com", "/", "methods", "/", "groups", ".", "list" ]
python
train
40.666667
polyaxon/polyaxon-cli
polyaxon_cli/cli/tensorboard.py
https://github.com/polyaxon/polyaxon-cli/blob/a7f5eed74d4d909cad79059f3c21c58606881449/polyaxon_cli/cli/tensorboard.py#L134-L238
def start(ctx, file): # pylint:disable=redefined-builtin """Start a tensorboard deployment for project/experiment/experiment group. Project tensorboard will aggregate all experiments under the project. Experiment group tensorboard will aggregate all experiments under the group. Experiment tensorboard will show all metrics for an experiment. Uses [Caching](/references/polyaxon-cli/#caching) Example: using the default tensorflow image 1.4.1. \b ```bash $ polyaxon tensorboard start ``` Example: with custom image and resources \b ```bash $ polyaxon tensorboard start -f file -f file_override ... ``` Example: starting a tensorboard for an experiment group \b ```bash $ polyaxon tensorboard -g 1 start -f file ``` Example: starting a tensorboard for an experiment \b ```bash $ polyaxon tensorboard -xp 112 start -f file ``` """ specification = None job_config = None if file: specification = check_polyaxonfile(file, log=False).specification if specification: # pylint:disable=protected-access check_polyaxonfile_kind(specification=specification, kind=specification._TENSORBOARD) job_config = specification.parsed_data user, project_name = get_project_or_local(ctx.obj.get('project')) group = ctx.obj.get('group') experiment = ctx.obj.get('experiment') if experiment: try: response = PolyaxonClient().experiment.start_tensorboard( username=user, project_name=project_name, experiment_id=experiment, job_config=job_config) obj = 'experiment `{}`'.format(experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not start tensorboard experiment `{}`.'.format(experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) elif group: try: response = PolyaxonClient().experiment_group.start_tensorboard( username=user, project_name=project_name, group_id=group, job_config=job_config) obj = 'group `{}`'.format(group) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not start tensorboard group `{}`.'.format(group)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) else: try: response = PolyaxonClient().project.start_tensorboard( username=user, project_name=project_name, job_config=job_config) obj = 'project `{}`'.format(project_name) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not start tensorboard project `{}`.'.format(project_name)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.status_code == 200: Printer.print_header("A tensorboard for this {} is already running on:".format(obj)) click.echo(get_tensorboard_url(user=user, project_name=project_name, experiment=experiment, group=group)) sys.exit(0) if response.status_code != 201: Printer.print_error('Something went wrong, Tensorboard was not created.') sys.exit(1) Printer.print_success('Tensorboard is being deployed for {}'.format(obj)) clint.textui.puts("It may take some time before you can access tensorboard.\n") clint.textui.puts("Your tensorboard will be available on:\n") with clint.textui.indent(4): clint.textui.puts(get_tensorboard_url(user, project_name, experiment, group))
[ "def", "start", "(", "ctx", ",", "file", ")", ":", "# pylint:disable=redefined-builtin", "specification", "=", "None", "job_config", "=", "None", "if", "file", ":", "specification", "=", "check_polyaxonfile", "(", "file", ",", "log", "=", "False", ")", ".", "specification", "if", "specification", ":", "# pylint:disable=protected-access", "check_polyaxonfile_kind", "(", "specification", "=", "specification", ",", "kind", "=", "specification", ".", "_TENSORBOARD", ")", "job_config", "=", "specification", ".", "parsed_data", "user", ",", "project_name", "=", "get_project_or_local", "(", "ctx", ".", "obj", ".", "get", "(", "'project'", ")", ")", "group", "=", "ctx", ".", "obj", ".", "get", "(", "'group'", ")", "experiment", "=", "ctx", ".", "obj", ".", "get", "(", "'experiment'", ")", "if", "experiment", ":", "try", ":", "response", "=", "PolyaxonClient", "(", ")", ".", "experiment", ".", "start_tensorboard", "(", "username", "=", "user", ",", "project_name", "=", "project_name", ",", "experiment_id", "=", "experiment", ",", "job_config", "=", "job_config", ")", "obj", "=", "'experiment `{}`'", ".", "format", "(", "experiment", ")", "except", "(", "PolyaxonHTTPError", ",", "PolyaxonShouldExitError", ",", "PolyaxonClientException", ")", "as", "e", ":", "Printer", ".", "print_error", "(", "'Could not start tensorboard experiment `{}`.'", ".", "format", "(", "experiment", ")", ")", "Printer", ".", "print_error", "(", "'Error message `{}`.'", ".", "format", "(", "e", ")", ")", "sys", ".", "exit", "(", "1", ")", "elif", "group", ":", "try", ":", "response", "=", "PolyaxonClient", "(", ")", ".", "experiment_group", ".", "start_tensorboard", "(", "username", "=", "user", ",", "project_name", "=", "project_name", ",", "group_id", "=", "group", ",", "job_config", "=", "job_config", ")", "obj", "=", "'group `{}`'", ".", "format", "(", "group", ")", "except", "(", "PolyaxonHTTPError", ",", "PolyaxonShouldExitError", ",", "PolyaxonClientException", ")", "as", "e", ":", "Printer", ".", "print_error", "(", "'Could not start tensorboard group `{}`.'", ".", "format", "(", "group", ")", ")", "Printer", ".", "print_error", "(", "'Error message `{}`.'", ".", "format", "(", "e", ")", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "try", ":", "response", "=", "PolyaxonClient", "(", ")", ".", "project", ".", "start_tensorboard", "(", "username", "=", "user", ",", "project_name", "=", "project_name", ",", "job_config", "=", "job_config", ")", "obj", "=", "'project `{}`'", ".", "format", "(", "project_name", ")", "except", "(", "PolyaxonHTTPError", ",", "PolyaxonShouldExitError", ",", "PolyaxonClientException", ")", "as", "e", ":", "Printer", ".", "print_error", "(", "'Could not start tensorboard project `{}`.'", ".", "format", "(", "project_name", ")", ")", "Printer", ".", "print_error", "(", "'Error message `{}`.'", ".", "format", "(", "e", ")", ")", "sys", ".", "exit", "(", "1", ")", "if", "response", ".", "status_code", "==", "200", ":", "Printer", ".", "print_header", "(", "\"A tensorboard for this {} is already running on:\"", ".", "format", "(", "obj", ")", ")", "click", ".", "echo", "(", "get_tensorboard_url", "(", "user", "=", "user", ",", "project_name", "=", "project_name", ",", "experiment", "=", "experiment", ",", "group", "=", "group", ")", ")", "sys", ".", "exit", "(", "0", ")", "if", "response", ".", "status_code", "!=", "201", ":", "Printer", ".", "print_error", "(", "'Something went wrong, Tensorboard was not created.'", ")", "sys", ".", "exit", "(", "1", ")", "Printer", ".", "print_success", "(", "'Tensorboard is being deployed for {}'", ".", "format", "(", "obj", ")", ")", "clint", ".", "textui", ".", "puts", "(", "\"It may take some time before you can access tensorboard.\\n\"", ")", "clint", ".", "textui", ".", "puts", "(", "\"Your tensorboard will be available on:\\n\"", ")", "with", "clint", ".", "textui", ".", "indent", "(", "4", ")", ":", "clint", ".", "textui", ".", "puts", "(", "get_tensorboard_url", "(", "user", ",", "project_name", ",", "experiment", ",", "group", ")", ")" ]
Start a tensorboard deployment for project/experiment/experiment group. Project tensorboard will aggregate all experiments under the project. Experiment group tensorboard will aggregate all experiments under the group. Experiment tensorboard will show all metrics for an experiment. Uses [Caching](/references/polyaxon-cli/#caching) Example: using the default tensorflow image 1.4.1. \b ```bash $ polyaxon tensorboard start ``` Example: with custom image and resources \b ```bash $ polyaxon tensorboard start -f file -f file_override ... ``` Example: starting a tensorboard for an experiment group \b ```bash $ polyaxon tensorboard -g 1 start -f file ``` Example: starting a tensorboard for an experiment \b ```bash $ polyaxon tensorboard -xp 112 start -f file ```
[ "Start", "a", "tensorboard", "deployment", "for", "project", "/", "experiment", "/", "experiment", "group", "." ]
python
valid
37.009524
gwpy/gwpy
gwpy/detector/io/clf.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/io/clf.py#L90-L136
def read_channel_list_file(*source): """Read a `~gwpy.detector.ChannelList` from a Channel List File """ # read file(s) config = configparser.ConfigParser(dict_type=OrderedDict) source = file_list(source) success_ = config.read(*source) if len(success_) != len(source): raise IOError("Failed to read one or more CLF files") # create channel list out = ChannelList() out.source = source append = out.append # loop over all groups and channels for group in config.sections(): params = OrderedDict(config.items(group)) channels = params.pop('channels').strip('\n').split('\n') if 'flow' in params or 'fhigh' in params: low = params.pop('flow', 0) high = params.pop('fhigh', inf) if isinstance(high, string_types) and high.lower() == 'nyquist': high = inf frange = float(low), float(high) else: frange = None for channel in channels: try: match = CHANNEL_DEFINITION.match(channel).groupdict() except AttributeError as exc: exc.args = ('Cannot parse %r as channel list entry' % channel,) raise # remove Nones from match match = dict((k, v) for k, v in match.items() if v is not None) match.setdefault('safe', 'safe') match.setdefault('fidelity', 'clean') # create channel and copy group params safe = match.get('safe', 'safe').lower() != 'unsafe' channel = Channel(match.pop('name'), frequency_range=frange, safe=safe, sample_rate=match.pop('sample_rate')) channel.params = params.copy() channel.params.update(match) channel.group = group # extract those params for which the Channel has an attribute for key in ['frametype']: setattr(channel, key, channel.params.pop(key, None)) append(channel) return out
[ "def", "read_channel_list_file", "(", "*", "source", ")", ":", "# read file(s)", "config", "=", "configparser", ".", "ConfigParser", "(", "dict_type", "=", "OrderedDict", ")", "source", "=", "file_list", "(", "source", ")", "success_", "=", "config", ".", "read", "(", "*", "source", ")", "if", "len", "(", "success_", ")", "!=", "len", "(", "source", ")", ":", "raise", "IOError", "(", "\"Failed to read one or more CLF files\"", ")", "# create channel list", "out", "=", "ChannelList", "(", ")", "out", ".", "source", "=", "source", "append", "=", "out", ".", "append", "# loop over all groups and channels", "for", "group", "in", "config", ".", "sections", "(", ")", ":", "params", "=", "OrderedDict", "(", "config", ".", "items", "(", "group", ")", ")", "channels", "=", "params", ".", "pop", "(", "'channels'", ")", ".", "strip", "(", "'\\n'", ")", ".", "split", "(", "'\\n'", ")", "if", "'flow'", "in", "params", "or", "'fhigh'", "in", "params", ":", "low", "=", "params", ".", "pop", "(", "'flow'", ",", "0", ")", "high", "=", "params", ".", "pop", "(", "'fhigh'", ",", "inf", ")", "if", "isinstance", "(", "high", ",", "string_types", ")", "and", "high", ".", "lower", "(", ")", "==", "'nyquist'", ":", "high", "=", "inf", "frange", "=", "float", "(", "low", ")", ",", "float", "(", "high", ")", "else", ":", "frange", "=", "None", "for", "channel", "in", "channels", ":", "try", ":", "match", "=", "CHANNEL_DEFINITION", ".", "match", "(", "channel", ")", ".", "groupdict", "(", ")", "except", "AttributeError", "as", "exc", ":", "exc", ".", "args", "=", "(", "'Cannot parse %r as channel list entry'", "%", "channel", ",", ")", "raise", "# remove Nones from match", "match", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "match", ".", "items", "(", ")", "if", "v", "is", "not", "None", ")", "match", ".", "setdefault", "(", "'safe'", ",", "'safe'", ")", "match", ".", "setdefault", "(", "'fidelity'", ",", "'clean'", ")", "# create channel and copy group params", "safe", "=", "match", ".", "get", "(", "'safe'", ",", "'safe'", ")", ".", "lower", "(", ")", "!=", "'unsafe'", "channel", "=", "Channel", "(", "match", ".", "pop", "(", "'name'", ")", ",", "frequency_range", "=", "frange", ",", "safe", "=", "safe", ",", "sample_rate", "=", "match", ".", "pop", "(", "'sample_rate'", ")", ")", "channel", ".", "params", "=", "params", ".", "copy", "(", ")", "channel", ".", "params", ".", "update", "(", "match", ")", "channel", ".", "group", "=", "group", "# extract those params for which the Channel has an attribute", "for", "key", "in", "[", "'frametype'", "]", ":", "setattr", "(", "channel", ",", "key", ",", "channel", ".", "params", ".", "pop", "(", "key", ",", "None", ")", ")", "append", "(", "channel", ")", "return", "out" ]
Read a `~gwpy.detector.ChannelList` from a Channel List File
[ "Read", "a", "~gwpy", ".", "detector", ".", "ChannelList", "from", "a", "Channel", "List", "File" ]
python
train
42.744681
aio-libs/aioftp
aioftp/client.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/aioftp/client.py#L562-L576
async def connect(self, host, port=DEFAULT_PORT): """ :py:func:`asyncio.coroutine` Connect to server. :param host: host name for connection :type host: :py:class:`str` :param port: port number for connection :type port: :py:class:`int` """ await super().connect(host, port) code, info = await self.command(None, "220", "120") return info
[ "async", "def", "connect", "(", "self", ",", "host", ",", "port", "=", "DEFAULT_PORT", ")", ":", "await", "super", "(", ")", ".", "connect", "(", "host", ",", "port", ")", "code", ",", "info", "=", "await", "self", ".", "command", "(", "None", ",", "\"220\"", ",", "\"120\"", ")", "return", "info" ]
:py:func:`asyncio.coroutine` Connect to server. :param host: host name for connection :type host: :py:class:`str` :param port: port number for connection :type port: :py:class:`int`
[ ":", "py", ":", "func", ":", "asyncio", ".", "coroutine" ]
python
valid
27.6
pallets/werkzeug
src/werkzeug/wrappers/base_request.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/wrappers/base_request.py#L480-L488
def values(self): """A :class:`werkzeug.datastructures.CombinedMultiDict` that combines :attr:`args` and :attr:`form`.""" args = [] for d in self.args, self.form: if not isinstance(d, MultiDict): d = MultiDict(d) args.append(d) return CombinedMultiDict(args)
[ "def", "values", "(", "self", ")", ":", "args", "=", "[", "]", "for", "d", "in", "self", ".", "args", ",", "self", ".", "form", ":", "if", "not", "isinstance", "(", "d", ",", "MultiDict", ")", ":", "d", "=", "MultiDict", "(", "d", ")", "args", ".", "append", "(", "d", ")", "return", "CombinedMultiDict", "(", "args", ")" ]
A :class:`werkzeug.datastructures.CombinedMultiDict` that combines :attr:`args` and :attr:`form`.
[ "A", ":", "class", ":", "werkzeug", ".", "datastructures", ".", "CombinedMultiDict", "that", "combines", ":", "attr", ":", "args", "and", ":", "attr", ":", "form", "." ]
python
train
36.666667
arviz-devs/arviz
arviz/plots/violinplot.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/plots/violinplot.py#L127-L137
def cat_hist(val, shade, ax, **kwargs_shade): """Auxiliary function to plot discrete-violinplots.""" bins = get_bins(val) binned_d, _ = np.histogram(val, bins=bins, normed=True) bin_edges = np.linspace(np.min(val), np.max(val), len(bins)) centers = 0.5 * (bin_edges + np.roll(bin_edges, 1))[:-1] heights = np.diff(bin_edges) lefts = -0.5 * binned_d ax.barh(centers, binned_d, height=heights, left=lefts, alpha=shade, **kwargs_shade)
[ "def", "cat_hist", "(", "val", ",", "shade", ",", "ax", ",", "*", "*", "kwargs_shade", ")", ":", "bins", "=", "get_bins", "(", "val", ")", "binned_d", ",", "_", "=", "np", ".", "histogram", "(", "val", ",", "bins", "=", "bins", ",", "normed", "=", "True", ")", "bin_edges", "=", "np", ".", "linspace", "(", "np", ".", "min", "(", "val", ")", ",", "np", ".", "max", "(", "val", ")", ",", "len", "(", "bins", ")", ")", "centers", "=", "0.5", "*", "(", "bin_edges", "+", "np", ".", "roll", "(", "bin_edges", ",", "1", ")", ")", "[", ":", "-", "1", "]", "heights", "=", "np", ".", "diff", "(", "bin_edges", ")", "lefts", "=", "-", "0.5", "*", "binned_d", "ax", ".", "barh", "(", "centers", ",", "binned_d", ",", "height", "=", "heights", ",", "left", "=", "lefts", ",", "alpha", "=", "shade", ",", "*", "*", "kwargs_shade", ")" ]
Auxiliary function to plot discrete-violinplots.
[ "Auxiliary", "function", "to", "plot", "discrete", "-", "violinplots", "." ]
python
train
41.454545
saltstack/salt
salt/modules/saltutil.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/saltutil.py#L1376-L1389
def term_all_jobs(): ''' Sends a termination signal (SIGTERM 15) to all currently running jobs CLI Example: .. code-block:: bash salt '*' saltutil.term_all_jobs ''' ret = [] for data in running(): ret.append(signal_job(data['jid'], signal.SIGTERM)) return ret
[ "def", "term_all_jobs", "(", ")", ":", "ret", "=", "[", "]", "for", "data", "in", "running", "(", ")", ":", "ret", ".", "append", "(", "signal_job", "(", "data", "[", "'jid'", "]", ",", "signal", ".", "SIGTERM", ")", ")", "return", "ret" ]
Sends a termination signal (SIGTERM 15) to all currently running jobs CLI Example: .. code-block:: bash salt '*' saltutil.term_all_jobs
[ "Sends", "a", "termination", "signal", "(", "SIGTERM", "15", ")", "to", "all", "currently", "running", "jobs" ]
python
train
21.214286
lwgray/pyEntrezId
PyEntrezId/Conversion.py
https://github.com/lwgray/pyEntrezId/blob/28286cf21b876dd4894bf21a222dfd1022441b75/PyEntrezId/Conversion.py#L27-L47
def convert_ensembl_to_entrez(self, ensembl): """Convert Ensembl Id to Entrez Gene Id""" if 'ENST' in ensembl: pass else: raise (IndexError) # Submit resquest to NCBI eutils/Gene database server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?" + self.options + "&db=gene&term={0}".format( ensembl) r = requests.get(server, headers={"Content-Type": "text/xml"}) if not r.ok: r.raise_for_status() sys.exit() # Process Request response = r.text info = xmltodict.parse(response) try: geneId = info['eSearchResult']['IdList']['Id'] except TypeError: raise (TypeError) return geneId
[ "def", "convert_ensembl_to_entrez", "(", "self", ",", "ensembl", ")", ":", "if", "'ENST'", "in", "ensembl", ":", "pass", "else", ":", "raise", "(", "IndexError", ")", "# Submit resquest to NCBI eutils/Gene database", "server", "=", "\"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?\"", "+", "self", ".", "options", "+", "\"&db=gene&term={0}\"", ".", "format", "(", "ensembl", ")", "r", "=", "requests", ".", "get", "(", "server", ",", "headers", "=", "{", "\"Content-Type\"", ":", "\"text/xml\"", "}", ")", "if", "not", "r", ".", "ok", ":", "r", ".", "raise_for_status", "(", ")", "sys", ".", "exit", "(", ")", "# Process Request", "response", "=", "r", ".", "text", "info", "=", "xmltodict", ".", "parse", "(", "response", ")", "try", ":", "geneId", "=", "info", "[", "'eSearchResult'", "]", "[", "'IdList'", "]", "[", "'Id'", "]", "except", "TypeError", ":", "raise", "(", "TypeError", ")", "return", "geneId" ]
Convert Ensembl Id to Entrez Gene Id
[ "Convert", "Ensembl", "Id", "to", "Entrez", "Gene", "Id" ]
python
train
36.095238
user-cont/conu
conu/backend/nspawn/container.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/nspawn/container.py#L128-L145
def get_metadata(self, refresh=True): """ return cached metadata by default :param refresh: bool, returns up to date metadata if set to True :return: dict """ if refresh or not self._metadata: ident = self._id or self.name if not ident: raise ConuException( "This container does not have a valid identifier.") out = run_cmd(["machinectl", "--no-pager", "show", ident], return_output=True, ignore_status=True) if "Could not get path to machine" in out: self._metadata = {} else: self._metadata = convert_kv_to_dict(out) return self._metadata
[ "def", "get_metadata", "(", "self", ",", "refresh", "=", "True", ")", ":", "if", "refresh", "or", "not", "self", ".", "_metadata", ":", "ident", "=", "self", ".", "_id", "or", "self", ".", "name", "if", "not", "ident", ":", "raise", "ConuException", "(", "\"This container does not have a valid identifier.\"", ")", "out", "=", "run_cmd", "(", "[", "\"machinectl\"", ",", "\"--no-pager\"", ",", "\"show\"", ",", "ident", "]", ",", "return_output", "=", "True", ",", "ignore_status", "=", "True", ")", "if", "\"Could not get path to machine\"", "in", "out", ":", "self", ".", "_metadata", "=", "{", "}", "else", ":", "self", ".", "_metadata", "=", "convert_kv_to_dict", "(", "out", ")", "return", "self", ".", "_metadata" ]
return cached metadata by default :param refresh: bool, returns up to date metadata if set to True :return: dict
[ "return", "cached", "metadata", "by", "default" ]
python
train
39.333333
BerkeleyAutomation/perception
perception/image.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L1548-L1568
def threshold(self, front_thresh=0.0, rear_thresh=100.0): """Creates a new DepthImage by setting all depths less than front_thresh and greater than rear_thresh to 0. Parameters ---------- front_thresh : float The lower-bound threshold. rear_thresh : float The upper bound threshold. Returns ------- :obj:`DepthImage` A new DepthImage created from the thresholding operation. """ data = np.copy(self._data) data[data < front_thresh] = 0.0 data[data > rear_thresh] = 0.0 return DepthImage(data, self._frame)
[ "def", "threshold", "(", "self", ",", "front_thresh", "=", "0.0", ",", "rear_thresh", "=", "100.0", ")", ":", "data", "=", "np", ".", "copy", "(", "self", ".", "_data", ")", "data", "[", "data", "<", "front_thresh", "]", "=", "0.0", "data", "[", "data", ">", "rear_thresh", "]", "=", "0.0", "return", "DepthImage", "(", "data", ",", "self", ".", "_frame", ")" ]
Creates a new DepthImage by setting all depths less than front_thresh and greater than rear_thresh to 0. Parameters ---------- front_thresh : float The lower-bound threshold. rear_thresh : float The upper bound threshold. Returns ------- :obj:`DepthImage` A new DepthImage created from the thresholding operation.
[ "Creates", "a", "new", "DepthImage", "by", "setting", "all", "depths", "less", "than", "front_thresh", "and", "greater", "than", "rear_thresh", "to", "0", "." ]
python
train
30.285714
storborg/replaylib
replaylib/noseplugin.py
https://github.com/storborg/replaylib/blob/16bc3752bb992e3fb364fce9bd7c3f95e887a42d/replaylib/noseplugin.py#L14-L25
def options(self, parser, env=os.environ): "Add options to nosetests." parser.add_option("--%s-record" % self.name, action="store", metavar="FILE", dest="record_filename", help="Record actions to this file.") parser.add_option("--%s-playback" % self.name, action="store", metavar="FILE", dest="playback_filename", help="Playback actions from this file.")
[ "def", "options", "(", "self", ",", "parser", ",", "env", "=", "os", ".", "environ", ")", ":", "parser", ".", "add_option", "(", "\"--%s-record\"", "%", "self", ".", "name", ",", "action", "=", "\"store\"", ",", "metavar", "=", "\"FILE\"", ",", "dest", "=", "\"record_filename\"", ",", "help", "=", "\"Record actions to this file.\"", ")", "parser", ".", "add_option", "(", "\"--%s-playback\"", "%", "self", ".", "name", ",", "action", "=", "\"store\"", ",", "metavar", "=", "\"FILE\"", ",", "dest", "=", "\"playback_filename\"", ",", "help", "=", "\"Playback actions from this file.\"", ")" ]
Add options to nosetests.
[ "Add", "options", "to", "nosetests", "." ]
python
train
47.916667
wummel/dosage
dosagelib/director.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/director.py#L91-L96
def getStrips(self, scraperobj): """Download comic strips.""" with lock: host_lock = get_host_lock(scraperobj.url) with host_lock: self._getStrips(scraperobj)
[ "def", "getStrips", "(", "self", ",", "scraperobj", ")", ":", "with", "lock", ":", "host_lock", "=", "get_host_lock", "(", "scraperobj", ".", "url", ")", "with", "host_lock", ":", "self", ".", "_getStrips", "(", "scraperobj", ")" ]
Download comic strips.
[ "Download", "comic", "strips", "." ]
python
train
33.5
nooperpudd/weibopy
weibopy/auth.py
https://github.com/nooperpudd/weibopy/blob/61f3fb0502c1f07a591388aaa7526e74c63eaeb1/weibopy/auth.py#L92-L123
def request(self, method, suffix, data): """ :param method: str, http method ["GET","POST","PUT"] :param suffix: the url suffix :param data: :return: """ url = self.site_url + suffix response = self.session.request(method, url, data=data) if response.status_code == 200: json_obj = response.json() if isinstance(json_obj, dict) and json_obj.get("error_code"): raise WeiboOauth2Error( json_obj.get("error_code"), json_obj.get("error"), json_obj.get('error_description') ) else: return json_obj else: raise WeiboRequestError( "Weibo API request error: status code: {code} url:{url} ->" " method:{method}: data={data}".format( code=response.status_code, url=response.url, method=method, data=data ) )
[ "def", "request", "(", "self", ",", "method", ",", "suffix", ",", "data", ")", ":", "url", "=", "self", ".", "site_url", "+", "suffix", "response", "=", "self", ".", "session", ".", "request", "(", "method", ",", "url", ",", "data", "=", "data", ")", "if", "response", ".", "status_code", "==", "200", ":", "json_obj", "=", "response", ".", "json", "(", ")", "if", "isinstance", "(", "json_obj", ",", "dict", ")", "and", "json_obj", ".", "get", "(", "\"error_code\"", ")", ":", "raise", "WeiboOauth2Error", "(", "json_obj", ".", "get", "(", "\"error_code\"", ")", ",", "json_obj", ".", "get", "(", "\"error\"", ")", ",", "json_obj", ".", "get", "(", "'error_description'", ")", ")", "else", ":", "return", "json_obj", "else", ":", "raise", "WeiboRequestError", "(", "\"Weibo API request error: status code: {code} url:{url} ->\"", "\" method:{method}: data={data}\"", ".", "format", "(", "code", "=", "response", ".", "status_code", ",", "url", "=", "response", ".", "url", ",", "method", "=", "method", ",", "data", "=", "data", ")", ")" ]
:param method: str, http method ["GET","POST","PUT"] :param suffix: the url suffix :param data: :return:
[ ":", "param", "method", ":", "str", "http", "method", "[", "GET", "POST", "PUT", "]", ":", "param", "suffix", ":", "the", "url", "suffix", ":", "param", "data", ":", ":", "return", ":" ]
python
train
32.65625
PmagPy/PmagPy
pmagpy/ipmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1408-L1423
def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat): """ Calculate paleolatitude for a reference location based on a paleomagnetic pole Required Parameters ---------- ref_loc_lon: longitude of reference location in degrees ref_loc_lat: latitude of reference location pole_plon: paleopole longitude in degrees pole_plat: paleopole latitude in degrees """ ref_loc = (ref_loc_lon, ref_loc_lat) pole = (pole_plon, pole_plat) paleo_lat = 90 - pmag.angle(pole, ref_loc) return float(paleo_lat)
[ "def", "lat_from_pole", "(", "ref_loc_lon", ",", "ref_loc_lat", ",", "pole_plon", ",", "pole_plat", ")", ":", "ref_loc", "=", "(", "ref_loc_lon", ",", "ref_loc_lat", ")", "pole", "=", "(", "pole_plon", ",", "pole_plat", ")", "paleo_lat", "=", "90", "-", "pmag", ".", "angle", "(", "pole", ",", "ref_loc", ")", "return", "float", "(", "paleo_lat", ")" ]
Calculate paleolatitude for a reference location based on a paleomagnetic pole Required Parameters ---------- ref_loc_lon: longitude of reference location in degrees ref_loc_lat: latitude of reference location pole_plon: paleopole longitude in degrees pole_plat: paleopole latitude in degrees
[ "Calculate", "paleolatitude", "for", "a", "reference", "location", "based", "on", "a", "paleomagnetic", "pole" ]
python
train
33.75
dogoncouch/logdissect
logdissect/parsers/linejson.py
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/linejson.py#L41-L59
def parse_file(self, sourcepath): """Parse an object-per-line JSON file into a log data dict""" # Open input file and read JSON array: with open(sourcepath, 'r') as logfile: jsonlist = logfile.readlines() # Set our attributes for this entry and add it to data.entries: data = {} data['entries'] = [] for line in jsonlist: entry = self.parse_line(line) data['entries'].append(entry) if self.tzone: for e in data['entries']: e['tzone'] = self.tzone # Return the parsed data return data
[ "def", "parse_file", "(", "self", ",", "sourcepath", ")", ":", "# Open input file and read JSON array:", "with", "open", "(", "sourcepath", ",", "'r'", ")", "as", "logfile", ":", "jsonlist", "=", "logfile", ".", "readlines", "(", ")", "# Set our attributes for this entry and add it to data.entries:", "data", "=", "{", "}", "data", "[", "'entries'", "]", "=", "[", "]", "for", "line", "in", "jsonlist", ":", "entry", "=", "self", ".", "parse_line", "(", "line", ")", "data", "[", "'entries'", "]", ".", "append", "(", "entry", ")", "if", "self", ".", "tzone", ":", "for", "e", "in", "data", "[", "'entries'", "]", ":", "e", "[", "'tzone'", "]", "=", "self", ".", "tzone", "# Return the parsed data", "return", "data" ]
Parse an object-per-line JSON file into a log data dict
[ "Parse", "an", "object", "-", "per", "-", "line", "JSON", "file", "into", "a", "log", "data", "dict" ]
python
train
32.210526
JarryShaw/PyPCAPKit
src/protocols/internet/ipv4.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/ipv4.py#L448-L505
def _read_mode_route(self, size, kind): """Read options with route data. Positional arguments: * size - int, length of option * kind - int, 7/131/137 (RR/LSR/SSR) Returns: * dict -- extracted option with route data Structure of these options: * [RFC 791] Loose Source Route +--------+--------+--------+---------//--------+ |10000011| length | pointer| route data | +--------+--------+--------+---------//--------+ * [RFC 791] Strict Source Route +--------+--------+--------+---------//--------+ |10001001| length | pointer| route data | +--------+--------+--------+---------//--------+ * [RFC 791] Record Route +--------+--------+--------+---------//--------+ |00000111| length | pointer| route data | +--------+--------+--------+---------//--------+ Octets Bits Name Description 0 0 ip.opt.kind Kind (7/131/137) 0 0 ip.opt.type.copy Copied Flag (0) 0 1 ip.opt.type.class Option Class (0/1) 0 3 ip.opt.type.number Option Number (3/7/9) 1 8 ip.opt.length Length 2 16 ip.opt.pointer Pointer (≥4) 3 24 ip.opt.data Route Data """ if size < 3 or (size - 3) % 4 != 0: raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') _rptr = self._read_unpack(1) if _rptr < 4: raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') data = dict( kind=kind, type=self._read_opt_type(kind), length=size, pointer=_rptr, ) counter = 4 address = list() endpoint = min(_rptr, size) while counter < endpoint: counter += 4 address.append(self._read_ipv4_addr()) data['ip'] = address or None return data
[ "def", "_read_mode_route", "(", "self", ",", "size", ",", "kind", ")", ":", "if", "size", "<", "3", "or", "(", "size", "-", "3", ")", "%", "4", "!=", "0", ":", "raise", "ProtocolError", "(", "f'{self.alias}: [Optno {kind}] invalid format'", ")", "_rptr", "=", "self", ".", "_read_unpack", "(", "1", ")", "if", "_rptr", "<", "4", ":", "raise", "ProtocolError", "(", "f'{self.alias}: [Optno {kind}] invalid format'", ")", "data", "=", "dict", "(", "kind", "=", "kind", ",", "type", "=", "self", ".", "_read_opt_type", "(", "kind", ")", ",", "length", "=", "size", ",", "pointer", "=", "_rptr", ",", ")", "counter", "=", "4", "address", "=", "list", "(", ")", "endpoint", "=", "min", "(", "_rptr", ",", "size", ")", "while", "counter", "<", "endpoint", ":", "counter", "+=", "4", "address", ".", "append", "(", "self", ".", "_read_ipv4_addr", "(", ")", ")", "data", "[", "'ip'", "]", "=", "address", "or", "None", "return", "data" ]
Read options with route data. Positional arguments: * size - int, length of option * kind - int, 7/131/137 (RR/LSR/SSR) Returns: * dict -- extracted option with route data Structure of these options: * [RFC 791] Loose Source Route +--------+--------+--------+---------//--------+ |10000011| length | pointer| route data | +--------+--------+--------+---------//--------+ * [RFC 791] Strict Source Route +--------+--------+--------+---------//--------+ |10001001| length | pointer| route data | +--------+--------+--------+---------//--------+ * [RFC 791] Record Route +--------+--------+--------+---------//--------+ |00000111| length | pointer| route data | +--------+--------+--------+---------//--------+ Octets Bits Name Description 0 0 ip.opt.kind Kind (7/131/137) 0 0 ip.opt.type.copy Copied Flag (0) 0 1 ip.opt.type.class Option Class (0/1) 0 3 ip.opt.type.number Option Number (3/7/9) 1 8 ip.opt.length Length 2 16 ip.opt.pointer Pointer (≥4) 3 24 ip.opt.data Route Data
[ "Read", "options", "with", "route", "data", "." ]
python
train
38.172414
lreis2415/PyGeoC
pygeoc/TauDEM.py
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L643-L763
def watershed_delineation(np, dem, outlet_file=None, thresh=0, singlebasin=False, workingdir=None, mpi_bin=None, bin_dir=None, logfile=None, runtime_file=None, hostfile=None): """Watershed Delineation.""" # 1. Check directories if not os.path.exists(dem): TauDEM.error('DEM: %s is not existed!' % dem) dem = os.path.abspath(dem) if workingdir is None: workingdir = os.path.dirname(dem) namecfg = TauDEMFilesUtils(workingdir) workingdir = namecfg.workspace UtilClass.mkdir(workingdir) # 2. Check log file if logfile is not None and FileClass.is_file_exists(logfile): os.remove(logfile) # 3. Get predefined intermediate file names filled_dem = namecfg.filldem flow_dir = namecfg.d8flow slope = namecfg.slp flow_dir_dinf = namecfg.dinf slope_dinf = namecfg.dinf_slp dir_code_dinf = namecfg.dinf_d8dir weight_dinf = namecfg.dinf_weight acc = namecfg.d8acc stream_raster = namecfg.stream_raster default_outlet = namecfg.outlet_pre modified_outlet = namecfg.outlet_m stream_skeleton = namecfg.stream_pd acc_with_weight = namecfg.d8acc_weight stream_order = namecfg.stream_order ch_network = namecfg.channel_net ch_coord = namecfg.channel_coord stream_net = namecfg.streamnet_shp subbasin = namecfg.subbsn dist2_stream_d8 = namecfg.dist2stream_d8 # 4. perform calculation UtilClass.writelog(logfile, '[Output] %d..., %s' % (10, 'pitremove DEM...'), 'a') TauDEM.pitremove(np, dem, filled_dem, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (20, 'Calculating D8 and Dinf flow direction...'), 'a') TauDEM.d8flowdir(np, filled_dem, flow_dir, slope, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) TauDEM.dinfflowdir(np, filled_dem, flow_dir_dinf, slope_dinf, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) DinfUtil.output_compressed_dinf(flow_dir_dinf, dir_code_dinf, weight_dinf) UtilClass.writelog(logfile, '[Output] %d..., %s' % (30, 'D8 flow accumulation...'), 'a') TauDEM.aread8(np, flow_dir, acc, None, None, False, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (40, 'Generating stream raster initially...'), 'a') min_accum, max_accum, mean_accum, std_accum = RasterUtilClass.raster_statistics(acc) TauDEM.threshold(np, acc, stream_raster, mean_accum, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (50, 'Moving outlet to stream...'), 'a') if outlet_file is None: outlet_file = default_outlet TauDEM.connectdown(np, flow_dir, acc, outlet_file, wtsd=None, workingdir=workingdir, mpiexedir=mpi_bin, exedir=bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) TauDEM.moveoutletstostrm(np, flow_dir, stream_raster, outlet_file, modified_outlet, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (60, 'Generating stream skeleton...'), 'a') TauDEM.peukerdouglas(np, filled_dem, stream_skeleton, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (70, 'Flow accumulation with outlet...'), 'a') tmp_outlet = None if singlebasin: tmp_outlet = modified_outlet TauDEM.aread8(np, flow_dir, acc_with_weight, tmp_outlet, stream_skeleton, False, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) if thresh <= 0: # find the optimal threshold using dropanalysis function UtilClass.writelog(logfile, '[Output] %d..., %s' % (75, 'Drop analysis to select optimal threshold...'), 'a') min_accum, max_accum, mean_accum, std_accum = \ RasterUtilClass.raster_statistics(acc_with_weight) if mean_accum - std_accum < 0: minthresh = mean_accum else: minthresh = mean_accum - std_accum maxthresh = mean_accum + std_accum numthresh = 20 logspace = 'true' drp_file = namecfg.drptxt TauDEM.dropanalysis(np, filled_dem, flow_dir, acc_with_weight, acc_with_weight, modified_outlet, minthresh, maxthresh, numthresh, logspace, drp_file, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) if not FileClass.is_file_exists(drp_file): raise RuntimeError('Dropanalysis failed and drp.txt was not created!') with open(drp_file, 'r', encoding='utf-8') as drpf: temp_contents = drpf.read() (beg, thresh) = temp_contents.rsplit(' ', 1) print(thresh) UtilClass.writelog(logfile, '[Output] %d..., %s' % (80, 'Generating stream raster...'), 'a') TauDEM.threshold(np, acc_with_weight, stream_raster, float(thresh), workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (90, 'Generating stream net...'), 'a') TauDEM.streamnet(np, filled_dem, flow_dir, acc_with_weight, stream_raster, modified_outlet, stream_order, ch_network, ch_coord, stream_net, subbasin, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (95, 'Calculating distance to stream (D8)...'), 'a') TauDEM.d8hdisttostrm(np, flow_dir, stream_raster, dist2_stream_d8, 1, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d.., %s' % (100, 'Original subbasin delineation is finished!'), 'a')
[ "def", "watershed_delineation", "(", "np", ",", "dem", ",", "outlet_file", "=", "None", ",", "thresh", "=", "0", ",", "singlebasin", "=", "False", ",", "workingdir", "=", "None", ",", "mpi_bin", "=", "None", ",", "bin_dir", "=", "None", ",", "logfile", "=", "None", ",", "runtime_file", "=", "None", ",", "hostfile", "=", "None", ")", ":", "# 1. Check directories", "if", "not", "os", ".", "path", ".", "exists", "(", "dem", ")", ":", "TauDEM", ".", "error", "(", "'DEM: %s is not existed!'", "%", "dem", ")", "dem", "=", "os", ".", "path", ".", "abspath", "(", "dem", ")", "if", "workingdir", "is", "None", ":", "workingdir", "=", "os", ".", "path", ".", "dirname", "(", "dem", ")", "namecfg", "=", "TauDEMFilesUtils", "(", "workingdir", ")", "workingdir", "=", "namecfg", ".", "workspace", "UtilClass", ".", "mkdir", "(", "workingdir", ")", "# 2. Check log file", "if", "logfile", "is", "not", "None", "and", "FileClass", ".", "is_file_exists", "(", "logfile", ")", ":", "os", ".", "remove", "(", "logfile", ")", "# 3. Get predefined intermediate file names", "filled_dem", "=", "namecfg", ".", "filldem", "flow_dir", "=", "namecfg", ".", "d8flow", "slope", "=", "namecfg", ".", "slp", "flow_dir_dinf", "=", "namecfg", ".", "dinf", "slope_dinf", "=", "namecfg", ".", "dinf_slp", "dir_code_dinf", "=", "namecfg", ".", "dinf_d8dir", "weight_dinf", "=", "namecfg", ".", "dinf_weight", "acc", "=", "namecfg", ".", "d8acc", "stream_raster", "=", "namecfg", ".", "stream_raster", "default_outlet", "=", "namecfg", ".", "outlet_pre", "modified_outlet", "=", "namecfg", ".", "outlet_m", "stream_skeleton", "=", "namecfg", ".", "stream_pd", "acc_with_weight", "=", "namecfg", ".", "d8acc_weight", "stream_order", "=", "namecfg", ".", "stream_order", "ch_network", "=", "namecfg", ".", "channel_net", "ch_coord", "=", "namecfg", ".", "channel_coord", "stream_net", "=", "namecfg", ".", "streamnet_shp", "subbasin", "=", "namecfg", ".", "subbsn", "dist2_stream_d8", "=", "namecfg", ".", "dist2stream_d8", "# 4. perform calculation", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d..., %s'", "%", "(", "10", ",", "'pitremove DEM...'", ")", ",", "'a'", ")", "TauDEM", ".", "pitremove", "(", "np", ",", "dem", ",", "filled_dem", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d..., %s'", "%", "(", "20", ",", "'Calculating D8 and Dinf flow direction...'", ")", ",", "'a'", ")", "TauDEM", ".", "d8flowdir", "(", "np", ",", "filled_dem", ",", "flow_dir", ",", "slope", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "TauDEM", ".", "dinfflowdir", "(", "np", ",", "filled_dem", ",", "flow_dir_dinf", ",", "slope_dinf", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "DinfUtil", ".", "output_compressed_dinf", "(", "flow_dir_dinf", ",", "dir_code_dinf", ",", "weight_dinf", ")", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d..., %s'", "%", "(", "30", ",", "'D8 flow accumulation...'", ")", ",", "'a'", ")", "TauDEM", ".", "aread8", "(", "np", ",", "flow_dir", ",", "acc", ",", "None", ",", "None", ",", "False", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d..., %s'", "%", "(", "40", ",", "'Generating stream raster initially...'", ")", ",", "'a'", ")", "min_accum", ",", "max_accum", ",", "mean_accum", ",", "std_accum", "=", "RasterUtilClass", ".", "raster_statistics", "(", "acc", ")", "TauDEM", ".", "threshold", "(", "np", ",", "acc", ",", "stream_raster", ",", "mean_accum", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d..., %s'", "%", "(", "50", ",", "'Moving outlet to stream...'", ")", ",", "'a'", ")", "if", "outlet_file", "is", "None", ":", "outlet_file", "=", "default_outlet", "TauDEM", ".", "connectdown", "(", "np", ",", "flow_dir", ",", "acc", ",", "outlet_file", ",", "wtsd", "=", "None", ",", "workingdir", "=", "workingdir", ",", "mpiexedir", "=", "mpi_bin", ",", "exedir", "=", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "TauDEM", ".", "moveoutletstostrm", "(", "np", ",", "flow_dir", ",", "stream_raster", ",", "outlet_file", ",", "modified_outlet", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d..., %s'", "%", "(", "60", ",", "'Generating stream skeleton...'", ")", ",", "'a'", ")", "TauDEM", ".", "peukerdouglas", "(", "np", ",", "filled_dem", ",", "stream_skeleton", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d..., %s'", "%", "(", "70", ",", "'Flow accumulation with outlet...'", ")", ",", "'a'", ")", "tmp_outlet", "=", "None", "if", "singlebasin", ":", "tmp_outlet", "=", "modified_outlet", "TauDEM", ".", "aread8", "(", "np", ",", "flow_dir", ",", "acc_with_weight", ",", "tmp_outlet", ",", "stream_skeleton", ",", "False", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "if", "thresh", "<=", "0", ":", "# find the optimal threshold using dropanalysis function", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d..., %s'", "%", "(", "75", ",", "'Drop analysis to select optimal threshold...'", ")", ",", "'a'", ")", "min_accum", ",", "max_accum", ",", "mean_accum", ",", "std_accum", "=", "RasterUtilClass", ".", "raster_statistics", "(", "acc_with_weight", ")", "if", "mean_accum", "-", "std_accum", "<", "0", ":", "minthresh", "=", "mean_accum", "else", ":", "minthresh", "=", "mean_accum", "-", "std_accum", "maxthresh", "=", "mean_accum", "+", "std_accum", "numthresh", "=", "20", "logspace", "=", "'true'", "drp_file", "=", "namecfg", ".", "drptxt", "TauDEM", ".", "dropanalysis", "(", "np", ",", "filled_dem", ",", "flow_dir", ",", "acc_with_weight", ",", "acc_with_weight", ",", "modified_outlet", ",", "minthresh", ",", "maxthresh", ",", "numthresh", ",", "logspace", ",", "drp_file", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "if", "not", "FileClass", ".", "is_file_exists", "(", "drp_file", ")", ":", "raise", "RuntimeError", "(", "'Dropanalysis failed and drp.txt was not created!'", ")", "with", "open", "(", "drp_file", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "drpf", ":", "temp_contents", "=", "drpf", ".", "read", "(", ")", "(", "beg", ",", "thresh", ")", "=", "temp_contents", ".", "rsplit", "(", "' '", ",", "1", ")", "print", "(", "thresh", ")", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d..., %s'", "%", "(", "80", ",", "'Generating stream raster...'", ")", ",", "'a'", ")", "TauDEM", ".", "threshold", "(", "np", ",", "acc_with_weight", ",", "stream_raster", ",", "float", "(", "thresh", ")", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d..., %s'", "%", "(", "90", ",", "'Generating stream net...'", ")", ",", "'a'", ")", "TauDEM", ".", "streamnet", "(", "np", ",", "filled_dem", ",", "flow_dir", ",", "acc_with_weight", ",", "stream_raster", ",", "modified_outlet", ",", "stream_order", ",", "ch_network", ",", "ch_coord", ",", "stream_net", ",", "subbasin", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d..., %s'", "%", "(", "95", ",", "'Calculating distance to stream (D8)...'", ")", ",", "'a'", ")", "TauDEM", ".", "d8hdisttostrm", "(", "np", ",", "flow_dir", ",", "stream_raster", ",", "dist2_stream_d8", ",", "1", ",", "workingdir", ",", "mpi_bin", ",", "bin_dir", ",", "log_file", "=", "logfile", ",", "runtime_file", "=", "runtime_file", ",", "hostfile", "=", "hostfile", ")", "UtilClass", ".", "writelog", "(", "logfile", ",", "'[Output] %d.., %s'", "%", "(", "100", ",", "'Original subbasin delineation is finished!'", ")", ",", "'a'", ")" ]
Watershed Delineation.
[ "Watershed", "Delineation", "." ]
python
train
59.330579
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_zone_rpc/show_zoning_enabled_configuration/output/enabled_configuration/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_zone_rpc/show_zoning_enabled_configuration/output/enabled_configuration/__init__.py#L127-L148
def _set_enabled_zone(self, v, load=False): """ Setter method for enabled_zone, mapped from YANG variable /brocade_zone_rpc/show_zoning_enabled_configuration/output/enabled_configuration/enabled_zone (list) If this variable is read-only (config: false) in the source YANG file, then _set_enabled_zone is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enabled_zone() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("zone_name",enabled_zone.enabled_zone, yang_name="enabled-zone", rest_name="enabled-zone", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='zone-name', extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}), is_container='list', yang_name="enabled-zone", rest_name="enabled-zone", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """enabled_zone must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("zone_name",enabled_zone.enabled_zone, yang_name="enabled-zone", rest_name="enabled-zone", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='zone-name', extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}), is_container='list', yang_name="enabled-zone", rest_name="enabled-zone", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)""", }) self.__enabled_zone = t if hasattr(self, '_set'): self._set()
[ "def", "_set_enabled_zone", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"zone_name\"", ",", "enabled_zone", ".", "enabled_zone", ",", "yang_name", "=", "\"enabled-zone\"", ",", "rest_name", "=", "\"enabled-zone\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'zone-name'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'List of enabled Zones'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"enabled-zone\"", ",", "rest_name", "=", "\"enabled-zone\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "False", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'List of enabled Zones'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-zone'", ",", "defining_module", "=", "'brocade-zone'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"enabled_zone must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"zone_name\",enabled_zone.enabled_zone, yang_name=\"enabled-zone\", rest_name=\"enabled-zone\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='zone-name', extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}), is_container='list', yang_name=\"enabled-zone\", rest_name=\"enabled-zone\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__enabled_zone", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for enabled_zone, mapped from YANG variable /brocade_zone_rpc/show_zoning_enabled_configuration/output/enabled_configuration/enabled_zone (list) If this variable is read-only (config: false) in the source YANG file, then _set_enabled_zone is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enabled_zone() directly.
[ "Setter", "method", "for", "enabled_zone", "mapped", "from", "YANG", "variable", "/", "brocade_zone_rpc", "/", "show_zoning_enabled_configuration", "/", "output", "/", "enabled_configuration", "/", "enabled_zone", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_enabled_zone", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_enabled_zone", "()", "directly", "." ]
python
train
96.318182
hayd/pep8radius
pep8radius/main.py
https://github.com/hayd/pep8radius/blob/0c1d14835d390f7feeb602f35a768e52ce306a0a/pep8radius/main.py#L243-L264
def apply_config_defaults(parser, args, root): """Update the parser's defaults from either the arguments' config_arg or the config files given in config_files(root).""" if root is None: try: from pep8radius.vcs import VersionControl root = VersionControl.which().root_dir() except NotImplementedError: pass # don't update local, could be using as module config = SafeConfigParser() config.read(args.global_config) if root and not args.ignore_local_config: config.read(local_config_files(root)) try: defaults = dict((k.lstrip('-').replace('-', '_'), v) for k, v in config.items("pep8")) parser.set_defaults(**defaults) except NoSectionError: pass # just do nothing, potentially this could raise ? return parser
[ "def", "apply_config_defaults", "(", "parser", ",", "args", ",", "root", ")", ":", "if", "root", "is", "None", ":", "try", ":", "from", "pep8radius", ".", "vcs", "import", "VersionControl", "root", "=", "VersionControl", ".", "which", "(", ")", ".", "root_dir", "(", ")", "except", "NotImplementedError", ":", "pass", "# don't update local, could be using as module", "config", "=", "SafeConfigParser", "(", ")", "config", ".", "read", "(", "args", ".", "global_config", ")", "if", "root", "and", "not", "args", ".", "ignore_local_config", ":", "config", ".", "read", "(", "local_config_files", "(", "root", ")", ")", "try", ":", "defaults", "=", "dict", "(", "(", "k", ".", "lstrip", "(", "'-'", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", ",", "v", ")", "for", "k", ",", "v", "in", "config", ".", "items", "(", "\"pep8\"", ")", ")", "parser", ".", "set_defaults", "(", "*", "*", "defaults", ")", "except", "NoSectionError", ":", "pass", "# just do nothing, potentially this could raise ?", "return", "parser" ]
Update the parser's defaults from either the arguments' config_arg or the config files given in config_files(root).
[ "Update", "the", "parser", "s", "defaults", "from", "either", "the", "arguments", "config_arg", "or", "the", "config", "files", "given", "in", "config_files", "(", "root", ")", "." ]
python
train
38
ewiger/mlab
src/mlab/awmstools.py
https://github.com/ewiger/mlab/blob/72a98adf6499f548848ad44c604f74d68f07fe4f/src/mlab/awmstools.py#L692-L713
def atIndices(indexable, indices, default=__unique): r"""Return a list of items in `indexable` at positions `indices`. Examples: >>> atIndices([1,2,3], [1,1,0]) [2, 2, 1] >>> atIndices([1,2,3], [1,1,0,4], 'default') [2, 2, 1, 'default'] >>> atIndices({'a':3, 'b':0}, ['a']) [3] """ if default is __unique: return [indexable[i] for i in indices] else: res = [] for i in indices: try: res.append(indexable[i]) except (IndexError, KeyError): res.append(default) return res
[ "def", "atIndices", "(", "indexable", ",", "indices", ",", "default", "=", "__unique", ")", ":", "if", "default", "is", "__unique", ":", "return", "[", "indexable", "[", "i", "]", "for", "i", "in", "indices", "]", "else", ":", "res", "=", "[", "]", "for", "i", "in", "indices", ":", "try", ":", "res", ".", "append", "(", "indexable", "[", "i", "]", ")", "except", "(", "IndexError", ",", "KeyError", ")", ":", "res", ".", "append", "(", "default", ")", "return", "res" ]
r"""Return a list of items in `indexable` at positions `indices`. Examples: >>> atIndices([1,2,3], [1,1,0]) [2, 2, 1] >>> atIndices([1,2,3], [1,1,0,4], 'default') [2, 2, 1, 'default'] >>> atIndices({'a':3, 'b':0}, ['a']) [3]
[ "r", "Return", "a", "list", "of", "items", "in", "indexable", "at", "positions", "indices", "." ]
python
train
26.454545
python-rope/rope
rope/base/oi/transform.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/oi/transform.py#L20-L29
def transform(self, pyobject): """Transform a `PyObject` to textual form""" if pyobject is None: return ('none',) object_type = type(pyobject) try: method = getattr(self, object_type.__name__ + '_to_textual') return method(pyobject) except AttributeError: return ('unknown',)
[ "def", "transform", "(", "self", ",", "pyobject", ")", ":", "if", "pyobject", "is", "None", ":", "return", "(", "'none'", ",", ")", "object_type", "=", "type", "(", "pyobject", ")", "try", ":", "method", "=", "getattr", "(", "self", ",", "object_type", ".", "__name__", "+", "'_to_textual'", ")", "return", "method", "(", "pyobject", ")", "except", "AttributeError", ":", "return", "(", "'unknown'", ",", ")" ]
Transform a `PyObject` to textual form
[ "Transform", "a", "PyObject", "to", "textual", "form" ]
python
train
35.4
lalinsky/python-phoenixdb
phoenixdb/avatica/client.py
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L343-L358
def create_statement(self, connection_id): """Creates a new statement. :param connection_id: ID of the current connection. :returns: New statement ID. """ request = requests_pb2.CreateStatementRequest() request.connection_id = connection_id response_data = self._apply(request) response = responses_pb2.CreateStatementResponse() response.ParseFromString(response_data) return response.statement_id
[ "def", "create_statement", "(", "self", ",", "connection_id", ")", ":", "request", "=", "requests_pb2", ".", "CreateStatementRequest", "(", ")", "request", ".", "connection_id", "=", "connection_id", "response_data", "=", "self", ".", "_apply", "(", "request", ")", "response", "=", "responses_pb2", ".", "CreateStatementResponse", "(", ")", "response", ".", "ParseFromString", "(", "response_data", ")", "return", "response", ".", "statement_id" ]
Creates a new statement. :param connection_id: ID of the current connection. :returns: New statement ID.
[ "Creates", "a", "new", "statement", "." ]
python
train
30.5625
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L1297-L1349
def ckw05(handle, subtype, degree, begtim, endtim, inst, ref, avflag, segid, sclkdp, packts, rate, nints, starts): """ Write a type 5 segment to a CK file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw05_c.html :param handle: Handle of an open CK file. :type handle: int :param subtype: CK type 5 subtype code. Can be: 0, 1, 2, 3 see naif docs via link above. :type subtype: int :param degree: Degree of interpolating polynomials. :type degree: int :param begtim: The beginning encoded SCLK of the segment. :type begtim: float :param endtim: The ending encoded SCLK of the segment. :type endtim: float :param inst: The NAIF instrument ID code. :type inst: int :param ref: The reference frame of the segment. :type ref: str :param avflag: True if the segment will contain angular velocity. :type avflag: bool :param segid: Segment identifier. :type segid: str :param sclkdp: Encoded SCLK times. :type sclkdp: Array of floats :param packts: Array of packets. :type packts: Some NxM vector of floats :param rate: Nominal SCLK rate in seconds per tick. :type rate: float :param nints: Number of intervals. :type nints: int :param starts: Encoded SCLK interval start times. :type starts: Array of floats """ handle = ctypes.c_int(handle) subtype = ctypes.c_int(subtype) degree = ctypes.c_int(degree) begtim = ctypes.c_double(begtim) endtim = ctypes.c_double(endtim) inst = ctypes.c_int(inst) ref = stypes.stringToCharP(ref) avflag = ctypes.c_int(avflag) segid = stypes.stringToCharP(segid) n = ctypes.c_int(len(packts)) sclkdp = stypes.toDoubleVector(sclkdp) packts = stypes.toDoubleMatrix(packts) rate = ctypes.c_double(rate) nints = ctypes.c_int(nints) starts = stypes.toDoubleVector(starts) libspice.ckw05_c(handle, subtype, degree, begtim, endtim, inst, ref, avflag, segid, n, sclkdp, packts, rate, nints, starts)
[ "def", "ckw05", "(", "handle", ",", "subtype", ",", "degree", ",", "begtim", ",", "endtim", ",", "inst", ",", "ref", ",", "avflag", ",", "segid", ",", "sclkdp", ",", "packts", ",", "rate", ",", "nints", ",", "starts", ")", ":", "handle", "=", "ctypes", ".", "c_int", "(", "handle", ")", "subtype", "=", "ctypes", ".", "c_int", "(", "subtype", ")", "degree", "=", "ctypes", ".", "c_int", "(", "degree", ")", "begtim", "=", "ctypes", ".", "c_double", "(", "begtim", ")", "endtim", "=", "ctypes", ".", "c_double", "(", "endtim", ")", "inst", "=", "ctypes", ".", "c_int", "(", "inst", ")", "ref", "=", "stypes", ".", "stringToCharP", "(", "ref", ")", "avflag", "=", "ctypes", ".", "c_int", "(", "avflag", ")", "segid", "=", "stypes", ".", "stringToCharP", "(", "segid", ")", "n", "=", "ctypes", ".", "c_int", "(", "len", "(", "packts", ")", ")", "sclkdp", "=", "stypes", ".", "toDoubleVector", "(", "sclkdp", ")", "packts", "=", "stypes", ".", "toDoubleMatrix", "(", "packts", ")", "rate", "=", "ctypes", ".", "c_double", "(", "rate", ")", "nints", "=", "ctypes", ".", "c_int", "(", "nints", ")", "starts", "=", "stypes", ".", "toDoubleVector", "(", "starts", ")", "libspice", ".", "ckw05_c", "(", "handle", ",", "subtype", ",", "degree", ",", "begtim", ",", "endtim", ",", "inst", ",", "ref", ",", "avflag", ",", "segid", ",", "n", ",", "sclkdp", ",", "packts", ",", "rate", ",", "nints", ",", "starts", ")" ]
Write a type 5 segment to a CK file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw05_c.html :param handle: Handle of an open CK file. :type handle: int :param subtype: CK type 5 subtype code. Can be: 0, 1, 2, 3 see naif docs via link above. :type subtype: int :param degree: Degree of interpolating polynomials. :type degree: int :param begtim: The beginning encoded SCLK of the segment. :type begtim: float :param endtim: The ending encoded SCLK of the segment. :type endtim: float :param inst: The NAIF instrument ID code. :type inst: int :param ref: The reference frame of the segment. :type ref: str :param avflag: True if the segment will contain angular velocity. :type avflag: bool :param segid: Segment identifier. :type segid: str :param sclkdp: Encoded SCLK times. :type sclkdp: Array of floats :param packts: Array of packets. :type packts: Some NxM vector of floats :param rate: Nominal SCLK rate in seconds per tick. :type rate: float :param nints: Number of intervals. :type nints: int :param starts: Encoded SCLK interval start times. :type starts: Array of floats
[ "Write", "a", "type", "5", "segment", "to", "a", "CK", "file", "." ]
python
train
38.264151
ReFirmLabs/binwalk
src/binwalk/modules/signature.py
https://github.com/ReFirmLabs/binwalk/blob/a0c5315fd2bae167e5c3d8469ce95d5defc743c2/src/binwalk/modules/signature.py#L107-L134
def validate(self, r): ''' Called automatically by self.result. ''' if self.show_invalid: r.valid = True elif r.valid: if not r.description: r.valid = False if r.size and (r.size + r.offset) > r.file.size: r.valid = False if r.jump and (r.jump + r.offset) > r.file.size: r.valid = False if hasattr(r, "location") and (r.location != r.offset): r.valid = False if r.valid: # Don't keep displaying signatures that repeat a bunch of times # (e.g., JFFS2 nodes) if r.id == self.one_of_many: r.display = False elif r.many: self.one_of_many = r.id else: self.one_of_many = None
[ "def", "validate", "(", "self", ",", "r", ")", ":", "if", "self", ".", "show_invalid", ":", "r", ".", "valid", "=", "True", "elif", "r", ".", "valid", ":", "if", "not", "r", ".", "description", ":", "r", ".", "valid", "=", "False", "if", "r", ".", "size", "and", "(", "r", ".", "size", "+", "r", ".", "offset", ")", ">", "r", ".", "file", ".", "size", ":", "r", ".", "valid", "=", "False", "if", "r", ".", "jump", "and", "(", "r", ".", "jump", "+", "r", ".", "offset", ")", ">", "r", ".", "file", ".", "size", ":", "r", ".", "valid", "=", "False", "if", "hasattr", "(", "r", ",", "\"location\"", ")", "and", "(", "r", ".", "location", "!=", "r", ".", "offset", ")", ":", "r", ".", "valid", "=", "False", "if", "r", ".", "valid", ":", "# Don't keep displaying signatures that repeat a bunch of times", "# (e.g., JFFS2 nodes)", "if", "r", ".", "id", "==", "self", ".", "one_of_many", ":", "r", ".", "display", "=", "False", "elif", "r", ".", "many", ":", "self", ".", "one_of_many", "=", "r", ".", "id", "else", ":", "self", ".", "one_of_many", "=", "None" ]
Called automatically by self.result.
[ "Called", "automatically", "by", "self", ".", "result", "." ]
python
train
29.535714
Toilal/rebulk
rebulk/loose.py
https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/loose.py#L44-L60
def call(function, *args, **kwargs): """ Call a function or constructor with given args and kwargs after removing args and kwargs that doesn't match function or constructor signature :param function: Function or constructor to call :type function: callable :param args: :type args: :param kwargs: :type kwargs: :return: sale vakye as default function call :rtype: object """ func = constructor_args if inspect.isclass(function) else function_args call_args, call_kwargs = func(function, *args, **kwargs) return function(*call_args, **call_kwargs)
[ "def", "call", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "func", "=", "constructor_args", "if", "inspect", ".", "isclass", "(", "function", ")", "else", "function_args", "call_args", ",", "call_kwargs", "=", "func", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "function", "(", "*", "call_args", ",", "*", "*", "call_kwargs", ")" ]
Call a function or constructor with given args and kwargs after removing args and kwargs that doesn't match function or constructor signature :param function: Function or constructor to call :type function: callable :param args: :type args: :param kwargs: :type kwargs: :return: sale vakye as default function call :rtype: object
[ "Call", "a", "function", "or", "constructor", "with", "given", "args", "and", "kwargs", "after", "removing", "args", "and", "kwargs", "that", "doesn", "t", "match", "function", "or", "constructor", "signature" ]
python
train
34.764706
serge-sans-paille/pythran
pythran/toolchain.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/toolchain.py#L118-L128
def generate_py(module_name, code, optimizations=None, module_dir=None): '''python + pythran spec -> py code Prints and returns the optimized python code. ''' pm, ir, _, _ = front_middle_end(module_name, code, optimizations, module_dir) return pm.dump(Python, ir)
[ "def", "generate_py", "(", "module_name", ",", "code", ",", "optimizations", "=", "None", ",", "module_dir", "=", "None", ")", ":", "pm", ",", "ir", ",", "_", ",", "_", "=", "front_middle_end", "(", "module_name", ",", "code", ",", "optimizations", ",", "module_dir", ")", "return", "pm", ".", "dump", "(", "Python", ",", "ir", ")" ]
python + pythran spec -> py code Prints and returns the optimized python code.
[ "python", "+", "pythran", "spec", "-", ">", "py", "code" ]
python
train
28.454545
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/snmp_server/user/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/snmp_server/user/__init__.py#L234-L255
def _set_priv(self, v, load=False): """ Setter method for priv, mapped from YANG variable /rbridge_id/snmp_server/user/priv (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_priv is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priv() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'AES128': {'value': 2}, u'DES': {'value': 0}, u'nopriv': {'value': 1}},), default=unicode("nopriv"), is_leaf=True, yang_name="priv", rest_name="priv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Privacy protocol for username (Default=nopriv)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """priv must be of a type compatible with enumeration""", 'defined-type': "brocade-snmp:enumeration", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'AES128': {'value': 2}, u'DES': {'value': 0}, u'nopriv': {'value': 1}},), default=unicode("nopriv"), is_leaf=True, yang_name="priv", rest_name="priv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Privacy protocol for username (Default=nopriv)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True)""", }) self.__priv = t if hasattr(self, '_set'): self._set()
[ "def", "_set_priv", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "unicode", ",", "restriction_type", "=", "\"dict_key\"", ",", "restriction_arg", "=", "{", "u'AES128'", ":", "{", "'value'", ":", "2", "}", ",", "u'DES'", ":", "{", "'value'", ":", "0", "}", ",", "u'nopriv'", ":", "{", "'value'", ":", "1", "}", "}", ",", ")", ",", "default", "=", "unicode", "(", "\"nopriv\"", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"priv\"", ",", "rest_name", "=", "\"priv\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Privacy protocol for username (Default=nopriv)'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-snmp'", ",", "defining_module", "=", "'brocade-snmp'", ",", "yang_type", "=", "'enumeration'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"priv must be of a type compatible with enumeration\"\"\"", ",", "'defined-type'", ":", "\"brocade-snmp:enumeration\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'AES128': {'value': 2}, u'DES': {'value': 0}, u'nopriv': {'value': 1}},), default=unicode(\"nopriv\"), is_leaf=True, yang_name=\"priv\", rest_name=\"priv\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Privacy protocol for username (Default=nopriv)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__priv", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for priv, mapped from YANG variable /rbridge_id/snmp_server/user/priv (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_priv is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priv() directly.
[ "Setter", "method", "for", "priv", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "snmp_server", "/", "user", "/", "priv", "(", "enumeration", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_priv", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_priv", "()", "directly", "." ]
python
train
91.772727
facebook/watchman
python/pywatchman/__init__.py
https://github.com/facebook/watchman/blob/d416c249dd8f463dc69fc2691d0f890598c045a9/python/pywatchman/__init__.py#L1113-L1143
def query(self, *args): """ Send a query to the watchman service and return the response This call will block until the response is returned. If any unilateral responses are sent by the service in between the request-response they will be buffered up in the client object and NOT returned via this method. """ log("calling client.query") self._connect() try: self.sendConn.send(args) res = self.receive() while self.isUnilateralResponse(res): res = self.receive() return res except EnvironmentError as ee: # When we can depend on Python 3, we can use PEP 3134 # exception chaining here. raise WatchmanEnvironmentError( "I/O error communicating with watchman daemon", ee.errno, ee.strerror, args, ) except WatchmanError as ex: ex.setCommand(args) raise
[ "def", "query", "(", "self", ",", "*", "args", ")", ":", "log", "(", "\"calling client.query\"", ")", "self", ".", "_connect", "(", ")", "try", ":", "self", ".", "sendConn", ".", "send", "(", "args", ")", "res", "=", "self", ".", "receive", "(", ")", "while", "self", ".", "isUnilateralResponse", "(", "res", ")", ":", "res", "=", "self", ".", "receive", "(", ")", "return", "res", "except", "EnvironmentError", "as", "ee", ":", "# When we can depend on Python 3, we can use PEP 3134", "# exception chaining here.", "raise", "WatchmanEnvironmentError", "(", "\"I/O error communicating with watchman daemon\"", ",", "ee", ".", "errno", ",", "ee", ".", "strerror", ",", "args", ",", ")", "except", "WatchmanError", "as", "ex", ":", "ex", ".", "setCommand", "(", "args", ")", "raise" ]
Send a query to the watchman service and return the response This call will block until the response is returned. If any unilateral responses are sent by the service in between the request-response they will be buffered up in the client object and NOT returned via this method.
[ "Send", "a", "query", "to", "the", "watchman", "service", "and", "return", "the", "response" ]
python
train
32.677419
saltstack/salt
salt/modules/grains.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/grains.py#L150-L173
def items(sanitize=False): ''' Return all of the minion's grains CLI Example: .. code-block:: bash salt '*' grains.items Sanitized CLI Example: .. code-block:: bash salt '*' grains.items sanitize=True ''' if salt.utils.data.is_true(sanitize): out = dict(__grains__) for key, func in six.iteritems(_SANITIZERS): if key in out: out[key] = func(out[key]) return out else: return __grains__
[ "def", "items", "(", "sanitize", "=", "False", ")", ":", "if", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "sanitize", ")", ":", "out", "=", "dict", "(", "__grains__", ")", "for", "key", ",", "func", "in", "six", ".", "iteritems", "(", "_SANITIZERS", ")", ":", "if", "key", "in", "out", ":", "out", "[", "key", "]", "=", "func", "(", "out", "[", "key", "]", ")", "return", "out", "else", ":", "return", "__grains__" ]
Return all of the minion's grains CLI Example: .. code-block:: bash salt '*' grains.items Sanitized CLI Example: .. code-block:: bash salt '*' grains.items sanitize=True
[ "Return", "all", "of", "the", "minion", "s", "grains" ]
python
train
20.041667
nitmir/django-cas-server
cas_server/cas.py
https://github.com/nitmir/django-cas-server/blob/d106181b94c444f1946269da5c20f6c904840ad3/cas_server/cas.py#L112-L115
def get_proxy_url(self, pgt): """Returns proxy url, given the proxy granting ticket""" params = urllib_parse.urlencode({'pgt': pgt, 'targetService': self.service_url}) return "%s/proxy?%s" % (self.server_url, params)
[ "def", "get_proxy_url", "(", "self", ",", "pgt", ")", ":", "params", "=", "urllib_parse", ".", "urlencode", "(", "{", "'pgt'", ":", "pgt", ",", "'targetService'", ":", "self", ".", "service_url", "}", ")", "return", "\"%s/proxy?%s\"", "%", "(", "self", ".", "server_url", ",", "params", ")" ]
Returns proxy url, given the proxy granting ticket
[ "Returns", "proxy", "url", "given", "the", "proxy", "granting", "ticket" ]
python
train
59.25
bitlabstudio/django-document-library
document_library/south_migrations/0019_set_persistent_categories_to_published.py
https://github.com/bitlabstudio/django-document-library/blob/508737277455f182e81780cfca8d8eceb989a45b/document_library/south_migrations/0019_set_persistent_categories_to_published.py#L24-L28
def forwards(self, orm): "Write your forwards methods here." for category in orm['document_library.DocumentCategory'].objects.all(): category.is_published = True category.save()
[ "def", "forwards", "(", "self", ",", "orm", ")", ":", "for", "category", "in", "orm", "[", "'document_library.DocumentCategory'", "]", ".", "objects", ".", "all", "(", ")", ":", "category", ".", "is_published", "=", "True", "category", ".", "save", "(", ")" ]
Write your forwards methods here.
[ "Write", "your", "forwards", "methods", "here", "." ]
python
train
42.6
spacetelescope/drizzlepac
drizzlepac/util.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/util.py#L860-L868
def isCommaList(inputFilelist): """Return True if the input is a comma separated list of names.""" if isinstance(inputFilelist, int) or isinstance(inputFilelist, np.int32): ilist = str(inputFilelist) else: ilist = inputFilelist if "," in ilist: return True return False
[ "def", "isCommaList", "(", "inputFilelist", ")", ":", "if", "isinstance", "(", "inputFilelist", ",", "int", ")", "or", "isinstance", "(", "inputFilelist", ",", "np", ".", "int32", ")", ":", "ilist", "=", "str", "(", "inputFilelist", ")", "else", ":", "ilist", "=", "inputFilelist", "if", "\",\"", "in", "ilist", ":", "return", "True", "return", "False" ]
Return True if the input is a comma separated list of names.
[ "Return", "True", "if", "the", "input", "is", "a", "comma", "separated", "list", "of", "names", "." ]
python
train
33.888889
moble/quaternion
calculus.py
https://github.com/moble/quaternion/blob/7a323e81b391d6892e2874073e495e0beb057e85/calculus.py#L9-L28
def derivative(f, t): """Fourth-order finite-differencing with non-uniform time steps The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a fourth-order formula -- though that's a squishy concept with non-uniform time steps. TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas. """ dfdt = np.empty_like(f) if (f.ndim == 1): _derivative(f, t, dfdt) elif (f.ndim == 2): _derivative_2d(f, t, dfdt) elif (f.ndim == 3): _derivative_3d(f, t, dfdt) else: raise NotImplementedError("Taking derivatives of {0}-dimensional arrays is not yet implemented".format(f.ndim)) return dfdt
[ "def", "derivative", "(", "f", ",", "t", ")", ":", "dfdt", "=", "np", ".", "empty_like", "(", "f", ")", "if", "(", "f", ".", "ndim", "==", "1", ")", ":", "_derivative", "(", "f", ",", "t", ",", "dfdt", ")", "elif", "(", "f", ".", "ndim", "==", "2", ")", ":", "_derivative_2d", "(", "f", ",", "t", ",", "dfdt", ")", "elif", "(", "f", ".", "ndim", "==", "3", ")", ":", "_derivative_3d", "(", "f", ",", "t", ",", "dfdt", ")", "else", ":", "raise", "NotImplementedError", "(", "\"Taking derivatives of {0}-dimensional arrays is not yet implemented\"", ".", "format", "(", "f", ".", "ndim", ")", ")", "return", "dfdt" ]
Fourth-order finite-differencing with non-uniform time steps The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a fourth-order formula -- though that's a squishy concept with non-uniform time steps. TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas.
[ "Fourth", "-", "order", "finite", "-", "differencing", "with", "non", "-", "uniform", "time", "steps" ]
python
train
42.65
lesscpy/lesscpy
lesscpy/lessc/utility.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/lessc/utility.py#L258-L276
def convergent_round(value, ndigits=0): """Convergent rounding. Round to neareas even, similar to Python3's round() method. """ if sys.version_info[0] < 3: if value < 0.0: return -convergent_round(-value) epsilon = 0.0000001 integral_part, _ = divmod(value, 1) if abs(value - (integral_part + 0.5)) < epsilon: if integral_part % 2.0 < epsilon: return integral_part else: nearest_even = integral_part + 0.5 return math.ceil(nearest_even) return round(value, ndigits)
[ "def", "convergent_round", "(", "value", ",", "ndigits", "=", "0", ")", ":", "if", "sys", ".", "version_info", "[", "0", "]", "<", "3", ":", "if", "value", "<", "0.0", ":", "return", "-", "convergent_round", "(", "-", "value", ")", "epsilon", "=", "0.0000001", "integral_part", ",", "_", "=", "divmod", "(", "value", ",", "1", ")", "if", "abs", "(", "value", "-", "(", "integral_part", "+", "0.5", ")", ")", "<", "epsilon", ":", "if", "integral_part", "%", "2.0", "<", "epsilon", ":", "return", "integral_part", "else", ":", "nearest_even", "=", "integral_part", "+", "0.5", "return", "math", ".", "ceil", "(", "nearest_even", ")", "return", "round", "(", "value", ",", "ndigits", ")" ]
Convergent rounding. Round to neareas even, similar to Python3's round() method.
[ "Convergent", "rounding", "." ]
python
valid
30.842105
intake/intake
intake/gui/source/gui.py
https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/gui/source/gui.py#L112-L121
def callback(self, sources): """When a source is selected, enable widgets that depend on that condition and do done_callback""" enable = bool(sources) if not enable: self.plot_widget.value = False enable_widget(self.plot_widget, enable) if self.done_callback: self.done_callback(sources)
[ "def", "callback", "(", "self", ",", "sources", ")", ":", "enable", "=", "bool", "(", "sources", ")", "if", "not", "enable", ":", "self", ".", "plot_widget", ".", "value", "=", "False", "enable_widget", "(", "self", ".", "plot_widget", ",", "enable", ")", "if", "self", ".", "done_callback", ":", "self", ".", "done_callback", "(", "sources", ")" ]
When a source is selected, enable widgets that depend on that condition and do done_callback
[ "When", "a", "source", "is", "selected", "enable", "widgets", "that", "depend", "on", "that", "condition", "and", "do", "done_callback" ]
python
train
35.1
chrippa/python-librtmp
librtmp/stream.py
https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L76-L81
def unpause(self): """Unpauses the stream.""" res = librtmp.RTMP_Pause(self.client.rtmp, 0) if res < 1: raise RTMPError("Failed to unpause")
[ "def", "unpause", "(", "self", ")", ":", "res", "=", "librtmp", ".", "RTMP_Pause", "(", "self", ".", "client", ".", "rtmp", ",", "0", ")", "if", "res", "<", "1", ":", "raise", "RTMPError", "(", "\"Failed to unpause\"", ")" ]
Unpauses the stream.
[ "Unpauses", "the", "stream", "." ]
python
train
28.666667
tmontaigu/pylas
pylas/lasdatas/base.py
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasdatas/base.py#L185-L210
def add_extra_dim(self, name, type, description=""): """ Adds a new extra dimension to the point record Parameters ---------- name: str the name of the dimension type: str type of the dimension (eg 'uint8') description: str, optional a small description of the dimension """ name = name.replace(" ", "_") type_id = extradims.get_id_for_extra_dim_type(type) extra_byte = ExtraBytesStruct( data_type=type_id, name=name.encode(), description=description.encode() ) try: extra_bytes_vlr = self.vlrs.get("ExtraBytesVlr")[0] except IndexError: extra_bytes_vlr = ExtraBytesVlr() self.vlrs.append(extra_bytes_vlr) finally: extra_bytes_vlr.extra_bytes_structs.append(extra_byte) self.points_data.add_extra_dims([(name, type)])
[ "def", "add_extra_dim", "(", "self", ",", "name", ",", "type", ",", "description", "=", "\"\"", ")", ":", "name", "=", "name", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "type_id", "=", "extradims", ".", "get_id_for_extra_dim_type", "(", "type", ")", "extra_byte", "=", "ExtraBytesStruct", "(", "data_type", "=", "type_id", ",", "name", "=", "name", ".", "encode", "(", ")", ",", "description", "=", "description", ".", "encode", "(", ")", ")", "try", ":", "extra_bytes_vlr", "=", "self", ".", "vlrs", ".", "get", "(", "\"ExtraBytesVlr\"", ")", "[", "0", "]", "except", "IndexError", ":", "extra_bytes_vlr", "=", "ExtraBytesVlr", "(", ")", "self", ".", "vlrs", ".", "append", "(", "extra_bytes_vlr", ")", "finally", ":", "extra_bytes_vlr", ".", "extra_bytes_structs", ".", "append", "(", "extra_byte", ")", "self", ".", "points_data", ".", "add_extra_dims", "(", "[", "(", "name", ",", "type", ")", "]", ")" ]
Adds a new extra dimension to the point record Parameters ---------- name: str the name of the dimension type: str type of the dimension (eg 'uint8') description: str, optional a small description of the dimension
[ "Adds", "a", "new", "extra", "dimension", "to", "the", "point", "record" ]
python
test
35.153846