repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
SBRG/ssbio | ssbio/pipeline/atlas2.py | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas2.py#L722-L785 | def calculate_residue_counts_perstrain(protein_pickle_path, outdir, pdbflex_keys_file, wt_pid_cutoff=None, force_rerun=False):
"""Writes out a feather file for a PROTEIN counting amino acid occurences for ALL STRAINS along with SUBSEQUENCES"""
from collections import defaultdict
from ssbio.protein.sequence.seqprop import SeqProp
from ssbio.protein.sequence.properties.residues import _aa_property_dict_one
log = logging.getLogger(__name__)
protein_id = op.splitext(op.basename(protein_pickle_path))[0].split('_')[0]
protein_df_outfile = op.join(outdir, '{}_protein_strain_properties.fthr'.format(protein_id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=protein_df_outfile):
protein = ssbio.io.load_pickle(protein_pickle_path)
# First calculate disorder cuz i forgot to
protein.get_all_disorder_predictions(representative_only=True)
# Then get all subsequences
all_protein_subseqs = protein.get_all_subsequences(pdbflex_keys_file=pdbflex_keys_file)
if not all_protein_subseqs:
log.error('{}: cannot run subsequence calculator'.format(protein.id))
return
# Each strain gets a dictionary
strain_to_infodict = defaultdict(dict)
for seqprop_to_analyze in protein.sequences:
if seqprop_to_analyze.id == protein.representative_sequence.id:
strain_id = 'K12'
elif type(seqprop_to_analyze) == SeqProp and seqprop_to_analyze.id != protein.id: # This is to filter out other KEGGProps or UniProtProps
strain_id = seqprop_to_analyze.id.split('_', 1)[1] # This split should work for all strains
else:
continue
## Additional filtering for genes marked as orthologous but actually have large deletions or something
## TODO: experiment with other cutoffs?
if wt_pid_cutoff:
aln = protein.sequence_alignments.get_by_id('{0}_{0}_{1}'.format(protein.id, seqprop_to_analyze.id))
if aln.annotations['percent_identity'] < wt_pid_cutoff:
continue
###### Calculate "all" properties ######
seqprop_to_analyze.get_biopython_pepstats()
# [ALL] aa_count
if 'amino_acids_percent-biop' not in seqprop_to_analyze.annotations: # May not run if weird amino acids in the sequence
log.warning('Protein {}, sequence {}: skipping, unable to run Biopython ProteinAnalysis'.format(protein.id,
seqprop_to_analyze.id))
continue
strain_to_infodict[strain_id].update({'aa_count_{}'.format(k): v for k, v in seqprop_to_analyze.annotations['amino_acids_content-biop'].items()})
# [ALL] aa_count_total
strain_to_infodict[strain_id]['aa_count_total'] = seqprop_to_analyze.seq_len
###### Calculate subsequence properties ######
for prop, propdict in all_protein_subseqs.items():
strain_to_infodict[strain_id].update(protein.get_subseq_props(property_dict=propdict, property_name=prop,
seqprop=seqprop_to_analyze))
protein_df = pd.DataFrame(strain_to_infodict)
protein_df.reset_index().to_feather(protein_df_outfile)
return protein_pickle_path, protein_df_outfile | [
"def",
"calculate_residue_counts_perstrain",
"(",
"protein_pickle_path",
",",
"outdir",
",",
"pdbflex_keys_file",
",",
"wt_pid_cutoff",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"from",
"collections",
"import",
"defaultdict",
"from",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"seqprop",
"import",
"SeqProp",
"from",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"properties",
".",
"residues",
"import",
"_aa_property_dict_one",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"protein_id",
"=",
"op",
".",
"splitext",
"(",
"op",
".",
"basename",
"(",
"protein_pickle_path",
")",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"protein_df_outfile",
"=",
"op",
".",
"join",
"(",
"outdir",
",",
"'{}_protein_strain_properties.fthr'",
".",
"format",
"(",
"protein_id",
")",
")",
"if",
"ssbio",
".",
"utils",
".",
"force_rerun",
"(",
"flag",
"=",
"force_rerun",
",",
"outfile",
"=",
"protein_df_outfile",
")",
":",
"protein",
"=",
"ssbio",
".",
"io",
".",
"load_pickle",
"(",
"protein_pickle_path",
")",
"# First calculate disorder cuz i forgot to",
"protein",
".",
"get_all_disorder_predictions",
"(",
"representative_only",
"=",
"True",
")",
"# Then get all subsequences",
"all_protein_subseqs",
"=",
"protein",
".",
"get_all_subsequences",
"(",
"pdbflex_keys_file",
"=",
"pdbflex_keys_file",
")",
"if",
"not",
"all_protein_subseqs",
":",
"log",
".",
"error",
"(",
"'{}: cannot run subsequence calculator'",
".",
"format",
"(",
"protein",
".",
"id",
")",
")",
"return",
"# Each strain gets a dictionary",
"strain_to_infodict",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"seqprop_to_analyze",
"in",
"protein",
".",
"sequences",
":",
"if",
"seqprop_to_analyze",
".",
"id",
"==",
"protein",
".",
"representative_sequence",
".",
"id",
":",
"strain_id",
"=",
"'K12'",
"elif",
"type",
"(",
"seqprop_to_analyze",
")",
"==",
"SeqProp",
"and",
"seqprop_to_analyze",
".",
"id",
"!=",
"protein",
".",
"id",
":",
"# This is to filter out other KEGGProps or UniProtProps",
"strain_id",
"=",
"seqprop_to_analyze",
".",
"id",
".",
"split",
"(",
"'_'",
",",
"1",
")",
"[",
"1",
"]",
"# This split should work for all strains",
"else",
":",
"continue",
"## Additional filtering for genes marked as orthologous but actually have large deletions or something",
"## TODO: experiment with other cutoffs?",
"if",
"wt_pid_cutoff",
":",
"aln",
"=",
"protein",
".",
"sequence_alignments",
".",
"get_by_id",
"(",
"'{0}_{0}_{1}'",
".",
"format",
"(",
"protein",
".",
"id",
",",
"seqprop_to_analyze",
".",
"id",
")",
")",
"if",
"aln",
".",
"annotations",
"[",
"'percent_identity'",
"]",
"<",
"wt_pid_cutoff",
":",
"continue",
"###### Calculate \"all\" properties ######",
"seqprop_to_analyze",
".",
"get_biopython_pepstats",
"(",
")",
"# [ALL] aa_count",
"if",
"'amino_acids_percent-biop'",
"not",
"in",
"seqprop_to_analyze",
".",
"annotations",
":",
"# May not run if weird amino acids in the sequence",
"log",
".",
"warning",
"(",
"'Protein {}, sequence {}: skipping, unable to run Biopython ProteinAnalysis'",
".",
"format",
"(",
"protein",
".",
"id",
",",
"seqprop_to_analyze",
".",
"id",
")",
")",
"continue",
"strain_to_infodict",
"[",
"strain_id",
"]",
".",
"update",
"(",
"{",
"'aa_count_{}'",
".",
"format",
"(",
"k",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"seqprop_to_analyze",
".",
"annotations",
"[",
"'amino_acids_content-biop'",
"]",
".",
"items",
"(",
")",
"}",
")",
"# [ALL] aa_count_total",
"strain_to_infodict",
"[",
"strain_id",
"]",
"[",
"'aa_count_total'",
"]",
"=",
"seqprop_to_analyze",
".",
"seq_len",
"###### Calculate subsequence properties ######",
"for",
"prop",
",",
"propdict",
"in",
"all_protein_subseqs",
".",
"items",
"(",
")",
":",
"strain_to_infodict",
"[",
"strain_id",
"]",
".",
"update",
"(",
"protein",
".",
"get_subseq_props",
"(",
"property_dict",
"=",
"propdict",
",",
"property_name",
"=",
"prop",
",",
"seqprop",
"=",
"seqprop_to_analyze",
")",
")",
"protein_df",
"=",
"pd",
".",
"DataFrame",
"(",
"strain_to_infodict",
")",
"protein_df",
".",
"reset_index",
"(",
")",
".",
"to_feather",
"(",
"protein_df_outfile",
")",
"return",
"protein_pickle_path",
",",
"protein_df_outfile"
] | Writes out a feather file for a PROTEIN counting amino acid occurences for ALL STRAINS along with SUBSEQUENCES | [
"Writes",
"out",
"a",
"feather",
"file",
"for",
"a",
"PROTEIN",
"counting",
"amino",
"acid",
"occurences",
"for",
"ALL",
"STRAINS",
"along",
"with",
"SUBSEQUENCES"
] | python | train |
tornadoweb/tornado | tornado/web.py | https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/web.py#L951-L971 | def render_linked_css(self, css_files: Iterable[str]) -> str:
"""Default method used to render the final css links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set() # type: Set[str]
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return "".join(
'<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths
) | [
"def",
"render_linked_css",
"(",
"self",
",",
"css_files",
":",
"Iterable",
"[",
"str",
"]",
")",
"->",
"str",
":",
"paths",
"=",
"[",
"]",
"unique_paths",
"=",
"set",
"(",
")",
"# type: Set[str]",
"for",
"path",
"in",
"css_files",
":",
"if",
"not",
"is_absolute",
"(",
"path",
")",
":",
"path",
"=",
"self",
".",
"static_url",
"(",
"path",
")",
"if",
"path",
"not",
"in",
"unique_paths",
":",
"paths",
".",
"append",
"(",
"path",
")",
"unique_paths",
".",
"add",
"(",
"path",
")",
"return",
"\"\"",
".",
"join",
"(",
"'<link href=\"'",
"+",
"escape",
".",
"xhtml_escape",
"(",
"p",
")",
"+",
"'\" '",
"'type=\"text/css\" rel=\"stylesheet\"/>'",
"for",
"p",
"in",
"paths",
")"
] | Default method used to render the final css links for the
rendered webpage.
Override this method in a sub-classed controller to change the output. | [
"Default",
"method",
"used",
"to",
"render",
"the",
"final",
"css",
"links",
"for",
"the",
"rendered",
"webpage",
"."
] | python | train |
jasonrbriggs/stomp.py | stomp/protocol.py | https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/protocol.py#L445-L454 | def _escape_headers(self, headers):
"""
:param dict(str,str) headers:
"""
for key, val in headers.items():
try:
val = val.replace('\\', '\\\\').replace('\n', '\\n').replace(':', '\\c').replace('\r', '\\r')
except:
pass
headers[key] = val | [
"def",
"_escape_headers",
"(",
"self",
",",
"headers",
")",
":",
"for",
"key",
",",
"val",
"in",
"headers",
".",
"items",
"(",
")",
":",
"try",
":",
"val",
"=",
"val",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
".",
"replace",
"(",
"'\\n'",
",",
"'\\\\n'",
")",
".",
"replace",
"(",
"':'",
",",
"'\\\\c'",
")",
".",
"replace",
"(",
"'\\r'",
",",
"'\\\\r'",
")",
"except",
":",
"pass",
"headers",
"[",
"key",
"]",
"=",
"val"
] | :param dict(str,str) headers: | [
":",
"param",
"dict",
"(",
"str",
"str",
")",
"headers",
":"
] | python | train |
biocore/deblur | deblur/workflow.py | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L366-L493 | def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename] | [
"def",
"remove_artifacts_seqs",
"(",
"seqs_fp",
",",
"ref_fp",
",",
"working_dir",
",",
"ref_db_fp",
",",
"negate",
"=",
"False",
",",
"threads",
"=",
"1",
",",
"verbose",
"=",
"False",
",",
"sim_thresh",
"=",
"None",
",",
"coverage_thresh",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'remove_artifacts_seqs file %s'",
"%",
"seqs_fp",
")",
"if",
"stat",
"(",
"seqs_fp",
")",
".",
"st_size",
"==",
"0",
":",
"logger",
".",
"warn",
"(",
"'file %s has size 0, continuing'",
"%",
"seqs_fp",
")",
"return",
"None",
",",
"0",
",",
"[",
"]",
"if",
"coverage_thresh",
"is",
"None",
":",
"if",
"negate",
":",
"coverage_thresh",
"=",
"0.95",
"*",
"100",
"else",
":",
"coverage_thresh",
"=",
"0.5",
"*",
"100",
"if",
"sim_thresh",
"is",
"None",
":",
"if",
"negate",
":",
"sim_thresh",
"=",
"0.95",
"*",
"100",
"else",
":",
"sim_thresh",
"=",
"0.65",
"*",
"100",
"# the minimal average bitscore per nucleotide",
"bitscore_thresh",
"=",
"0.65",
"output_fp",
"=",
"join",
"(",
"working_dir",
",",
"\"%s.no_artifacts\"",
"%",
"basename",
"(",
"seqs_fp",
")",
")",
"blast_output",
"=",
"join",
"(",
"working_dir",
",",
"'%s.sortmerna'",
"%",
"basename",
"(",
"seqs_fp",
")",
")",
"aligned_seq_ids",
"=",
"set",
"(",
")",
"for",
"i",
",",
"db",
"in",
"enumerate",
"(",
"ref_fp",
")",
":",
"logger",
".",
"debug",
"(",
"'running on ref_fp %s working dir %s refdb_fp %s seqs %s'",
"%",
"(",
"db",
",",
"working_dir",
",",
"ref_db_fp",
"[",
"i",
"]",
",",
"seqs_fp",
")",
")",
"# run SortMeRNA",
"# we use -e 100 to remove E-value based filtering by sortmerna",
"# since we use bitscore/identity/coverage filtering instead",
"params",
"=",
"[",
"'sortmerna'",
",",
"'--reads'",
",",
"seqs_fp",
",",
"'--ref'",
",",
"'%s,%s'",
"%",
"(",
"db",
",",
"ref_db_fp",
"[",
"i",
"]",
")",
",",
"'--aligned'",
",",
"blast_output",
",",
"'--blast'",
",",
"'3'",
",",
"'--best'",
",",
"'1'",
",",
"'--print_all_reads'",
",",
"'-v'",
",",
"'-e'",
",",
"'100'",
"]",
"sout",
",",
"serr",
",",
"res",
"=",
"_system_call",
"(",
"params",
")",
"if",
"not",
"res",
"==",
"0",
":",
"logger",
".",
"error",
"(",
"'sortmerna error on file %s'",
"%",
"seqs_fp",
")",
"logger",
".",
"error",
"(",
"'stdout : %s'",
"%",
"sout",
")",
"logger",
".",
"error",
"(",
"'stderr : %s'",
"%",
"serr",
")",
"return",
"output_fp",
",",
"0",
",",
"[",
"]",
"blast_output_filename",
"=",
"'%s.blast'",
"%",
"blast_output",
"with",
"open",
"(",
"blast_output_filename",
",",
"'r'",
")",
"as",
"bfl",
":",
"for",
"line",
"in",
"bfl",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"# if * means no match",
"if",
"line",
"[",
"1",
"]",
"==",
"'*'",
":",
"continue",
"# check if % identity[2] and coverage[13] are large enough",
"if",
"(",
"float",
"(",
"line",
"[",
"2",
"]",
")",
">=",
"sim_thresh",
")",
"and",
"(",
"float",
"(",
"line",
"[",
"13",
"]",
")",
">=",
"coverage_thresh",
")",
"and",
"(",
"float",
"(",
"line",
"[",
"11",
"]",
")",
">=",
"bitscore_thresh",
"*",
"len",
"(",
"line",
"[",
"0",
"]",
")",
")",
":",
"aligned_seq_ids",
".",
"add",
"(",
"line",
"[",
"0",
"]",
")",
"if",
"negate",
":",
"def",
"op",
"(",
"x",
")",
":",
"return",
"x",
"not",
"in",
"aligned_seq_ids",
"else",
":",
"def",
"op",
"(",
"x",
")",
":",
"return",
"x",
"in",
"aligned_seq_ids",
"# if negate = False, only output sequences",
"# matching to at least one of the databases",
"totalseqs",
"=",
"0",
"okseqs",
"=",
"0",
"badseqs",
"=",
"0",
"with",
"open",
"(",
"output_fp",
",",
"'w'",
")",
"as",
"out_f",
":",
"for",
"label",
",",
"seq",
"in",
"sequence_generator",
"(",
"seqs_fp",
")",
":",
"totalseqs",
"+=",
"1",
"label",
"=",
"label",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"op",
"(",
"label",
")",
":",
"out_f",
".",
"write",
"(",
"\">%s\\n%s\\n\"",
"%",
"(",
"label",
",",
"seq",
")",
")",
"okseqs",
"+=",
"1",
"else",
":",
"badseqs",
"+=",
"1",
"logger",
".",
"info",
"(",
"'total sequences %d, passing sequences %d, '",
"'failing sequences %d'",
"%",
"(",
"totalseqs",
",",
"okseqs",
",",
"badseqs",
")",
")",
"return",
"output_fp",
",",
"okseqs",
",",
"[",
"blast_output_filename",
"]"
] | Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created | [
"Remove",
"artifacts",
"from",
"FASTA",
"file",
"using",
"SortMeRNA",
"."
] | python | train |
humilis/humilis-lambdautils | lambdautils/state.py | https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L137-L143 | def get_secret(key, *args, **kwargs):
"""Retrieves a secret."""
env_value = os.environ.get(key.replace('.', '_').upper())
if not env_value:
# Backwards compatibility: the deprecated secrets vault
return _get_secret_from_vault(key, *args, **kwargs)
return env_value | [
"def",
"get_secret",
"(",
"key",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"env_value",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"key",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
".",
"upper",
"(",
")",
")",
"if",
"not",
"env_value",
":",
"# Backwards compatibility: the deprecated secrets vault",
"return",
"_get_secret_from_vault",
"(",
"key",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"env_value"
] | Retrieves a secret. | [
"Retrieves",
"a",
"secret",
"."
] | python | train |
wiredrive/wtframework | wtframework/wtf/web/web_utils.py | https://github.com/wiredrive/wtframework/blob/ef7f86c4d4cf7fb17745fd627b3cc4a41f4c0216/wtframework/wtf/web/web_utils.py#L57-L79 | def get_base_url(webdriver):
"""
Get the current base URL.
Args:
webdriver: Selenium WebDriver instance.
Returns:
str - base URL.
Usage::
driver.get("http://www.google.com/?q=hello+world")
WebUtils.get_base_url(driver)
#returns 'http://www.google.com'
"""
current_url = webdriver.current_url
try:
return re.findall("^[^/]+//[^/$]+", current_url)[0]
except:
raise RuntimeError(
u("Unable to process base url: {0}").format(current_url)) | [
"def",
"get_base_url",
"(",
"webdriver",
")",
":",
"current_url",
"=",
"webdriver",
".",
"current_url",
"try",
":",
"return",
"re",
".",
"findall",
"(",
"\"^[^/]+//[^/$]+\"",
",",
"current_url",
")",
"[",
"0",
"]",
"except",
":",
"raise",
"RuntimeError",
"(",
"u",
"(",
"\"Unable to process base url: {0}\"",
")",
".",
"format",
"(",
"current_url",
")",
")"
] | Get the current base URL.
Args:
webdriver: Selenium WebDriver instance.
Returns:
str - base URL.
Usage::
driver.get("http://www.google.com/?q=hello+world")
WebUtils.get_base_url(driver)
#returns 'http://www.google.com' | [
"Get",
"the",
"current",
"base",
"URL",
"."
] | python | train |
aws/aws-iot-device-sdk-python | AWSIoTPythonSDK/MQTTLib.py | https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/MQTTLib.py#L671-L707 | def subscribeAsync(self, topic, QoS, ackCallback=None, messageCallback=None):
"""
**Description**
Subscribe to the desired topic and register a message callback with SUBACK callback.
**Syntax**
.. code:: python
# Subscribe to "myTopic" with QoS0, custom SUBACK callback and a message callback
myAWSIoTMQTTClient.subscribe("myTopic", 0, ackCallback=mySubackCallback, messageCallback=customMessageCallback)
# Subscribe to "myTopic/#" with QoS1, custom SUBACK callback and a message callback
myAWSIoTMQTTClient.subscribe("myTopic/#", 1, ackCallback=mySubackCallback, messageCallback=customMessageCallback)
**Parameters**
*topic* - Topic name or filter to subscribe to.
*QoS* - Quality of Service. Could be 0 or 1.
*ackCallback* - Callback to be invoked when the client receives a SUBACK. Should be in form
:code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the disconnect request and
:code:`data` is the granted QoS for this subscription.
*messageCallback* - Function to be called when a new message for the subscribed topic
comes in. Should be in form :code:`customCallback(client, userdata, message)`, where
:code:`message` contains :code:`topic` and :code:`payload`. Note that :code:`client` and :code:`userdata` are
here just to be aligned with the underneath Paho callback function signature. These fields are pending to be
deprecated and should not be depended on.
**Returns**
Subscribe request packet id, for tracking purpose in the corresponding callback.
"""
return self._mqtt_core.subscribe_async(topic, QoS, ackCallback, messageCallback) | [
"def",
"subscribeAsync",
"(",
"self",
",",
"topic",
",",
"QoS",
",",
"ackCallback",
"=",
"None",
",",
"messageCallback",
"=",
"None",
")",
":",
"return",
"self",
".",
"_mqtt_core",
".",
"subscribe_async",
"(",
"topic",
",",
"QoS",
",",
"ackCallback",
",",
"messageCallback",
")"
] | **Description**
Subscribe to the desired topic and register a message callback with SUBACK callback.
**Syntax**
.. code:: python
# Subscribe to "myTopic" with QoS0, custom SUBACK callback and a message callback
myAWSIoTMQTTClient.subscribe("myTopic", 0, ackCallback=mySubackCallback, messageCallback=customMessageCallback)
# Subscribe to "myTopic/#" with QoS1, custom SUBACK callback and a message callback
myAWSIoTMQTTClient.subscribe("myTopic/#", 1, ackCallback=mySubackCallback, messageCallback=customMessageCallback)
**Parameters**
*topic* - Topic name or filter to subscribe to.
*QoS* - Quality of Service. Could be 0 or 1.
*ackCallback* - Callback to be invoked when the client receives a SUBACK. Should be in form
:code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the disconnect request and
:code:`data` is the granted QoS for this subscription.
*messageCallback* - Function to be called when a new message for the subscribed topic
comes in. Should be in form :code:`customCallback(client, userdata, message)`, where
:code:`message` contains :code:`topic` and :code:`payload`. Note that :code:`client` and :code:`userdata` are
here just to be aligned with the underneath Paho callback function signature. These fields are pending to be
deprecated and should not be depended on.
**Returns**
Subscribe request packet id, for tracking purpose in the corresponding callback. | [
"**",
"Description",
"**"
] | python | train |
DataONEorg/d1_python | lib_common/src/d1_common/checksum.py | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/checksum.py#L289-L301 | def format_checksum(checksum_pyxb):
"""Create string representation of a PyXB Checksum object.
Args:
PyXB Checksum object
Returns:
str : Combined hexadecimal value and algorithm name.
"""
return '{}/{}'.format(
checksum_pyxb.algorithm.upper().replace('-', ''), checksum_pyxb.value().lower()
) | [
"def",
"format_checksum",
"(",
"checksum_pyxb",
")",
":",
"return",
"'{}/{}'",
".",
"format",
"(",
"checksum_pyxb",
".",
"algorithm",
".",
"upper",
"(",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
",",
"checksum_pyxb",
".",
"value",
"(",
")",
".",
"lower",
"(",
")",
")"
] | Create string representation of a PyXB Checksum object.
Args:
PyXB Checksum object
Returns:
str : Combined hexadecimal value and algorithm name. | [
"Create",
"string",
"representation",
"of",
"a",
"PyXB",
"Checksum",
"object",
"."
] | python | train |
opendatateam/udata | udata/sentry.py | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/sentry.py#L24-L31 | def public_dsn(dsn):
'''Transform a standard Sentry DSN into a public one'''
m = RE_DSN.match(dsn)
if not m:
log.error('Unable to parse Sentry DSN')
public = '{scheme}://{client_id}@{domain}/{site_id}'.format(
**m.groupdict())
return public | [
"def",
"public_dsn",
"(",
"dsn",
")",
":",
"m",
"=",
"RE_DSN",
".",
"match",
"(",
"dsn",
")",
"if",
"not",
"m",
":",
"log",
".",
"error",
"(",
"'Unable to parse Sentry DSN'",
")",
"public",
"=",
"'{scheme}://{client_id}@{domain}/{site_id}'",
".",
"format",
"(",
"*",
"*",
"m",
".",
"groupdict",
"(",
")",
")",
"return",
"public"
] | Transform a standard Sentry DSN into a public one | [
"Transform",
"a",
"standard",
"Sentry",
"DSN",
"into",
"a",
"public",
"one"
] | python | train |
mcocdawc/chemcoord | version.py | https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/version.py#L136-L160 | def get_version(pep440=False):
"""Tracks the version number.
pep440: bool
When True, this function returns a version string suitable for
a release as defined by PEP 440. When False, the githash (if
available) will be appended to the version string.
The file VERSION holds the version information. If this is not a git
repository, then it is reasonable to assume that the version is not
being incremented and the version returned will be the release version as
read from the file.
However, if the script is located within an active git repository,
git-describe is used to get the version information.
The file VERSION will need to be changed by manually. This should be done
before running git tag (set to the same as the version in the tag).
"""
git_version = format_git_describe(call_git_describe(), pep440=pep440)
if git_version is None: # not a git repository
return read_release_version()
return git_version | [
"def",
"get_version",
"(",
"pep440",
"=",
"False",
")",
":",
"git_version",
"=",
"format_git_describe",
"(",
"call_git_describe",
"(",
")",
",",
"pep440",
"=",
"pep440",
")",
"if",
"git_version",
"is",
"None",
":",
"# not a git repository",
"return",
"read_release_version",
"(",
")",
"return",
"git_version"
] | Tracks the version number.
pep440: bool
When True, this function returns a version string suitable for
a release as defined by PEP 440. When False, the githash (if
available) will be appended to the version string.
The file VERSION holds the version information. If this is not a git
repository, then it is reasonable to assume that the version is not
being incremented and the version returned will be the release version as
read from the file.
However, if the script is located within an active git repository,
git-describe is used to get the version information.
The file VERSION will need to be changed by manually. This should be done
before running git tag (set to the same as the version in the tag). | [
"Tracks",
"the",
"version",
"number",
"."
] | python | train |
librosa/librosa | librosa/core/audio.py | https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L1020-L1118 | def chirp(fmin, fmax, sr=22050, length=None, duration=None, linear=False, phi=None):
"""Returns a chirp signal that goes from frequency `fmin` to frequency `fmax`
Parameters
----------
fmin : float > 0
initial frequency
fmax : float > 0
final frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal.
When both `duration` and `length` are defined, `length` would take priority.
duration : float > 0
desired duration in seconds.
When both `duration` and `length` are defined, `length` would take priority.
linear : boolean
- If `True`, use a linear sweep, i.e., frequency changes linearly with time
- If `False`, use a exponential sweep.
Default is `False`.
phi : float or None
phase offset, in radians.
If unspecified, defaults to `-np.pi * 0.5`.
Returns
-------
chirp_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized chirp signal
Raises
------
ParameterError
- If either `fmin` or `fmax` are not provided.
- If neither `length` nor `duration` are provided.
See Also
--------
scipy.signal.chirp
Examples
--------
>>> # Generate a exponential chirp from A4 to A5
>>> exponential_chirp = librosa.chirp(440, 880, duration=1)
>>> # Or generate the same signal using `length`
>>> exponential_chirp = librosa.chirp(440, 880, sr=22050, length=22050)
>>> # Or generate a linear chirp instead
>>> linear_chirp = librosa.chirp(440, 880, duration=1, linear=True)
Display spectrogram for both exponential and linear chirps
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S_exponential = librosa.feature.melspectrogram(y=exponential_chirp)
>>> ax = plt.subplot(2,1,1)
>>> librosa.display.specshow(librosa.power_to_db(S_exponential, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,2, sharex=ax)
>>> S_linear = librosa.feature.melspectrogram(y=linear_chirp)
>>> librosa.display.specshow(librosa.power_to_db(S_linear, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.tight_layout()
"""
if fmin is None or fmax is None:
raise ParameterError('both "fmin" and "fmax" must be provided')
# Compute signal duration
period = 1.0 / sr
if length is None:
if duration is None:
raise ParameterError('either "length" or "duration" must be provided')
else:
duration = period * length
if phi is None:
phi = -np.pi * 0.5
method = 'linear' if linear else 'logarithmic'
return scipy.signal.chirp(
np.arange(duration, step=period),
fmin,
duration,
fmax,
method=method,
phi=phi / np.pi * 180, # scipy.signal.chirp uses degrees for phase offset
) | [
"def",
"chirp",
"(",
"fmin",
",",
"fmax",
",",
"sr",
"=",
"22050",
",",
"length",
"=",
"None",
",",
"duration",
"=",
"None",
",",
"linear",
"=",
"False",
",",
"phi",
"=",
"None",
")",
":",
"if",
"fmin",
"is",
"None",
"or",
"fmax",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'both \"fmin\" and \"fmax\" must be provided'",
")",
"# Compute signal duration",
"period",
"=",
"1.0",
"/",
"sr",
"if",
"length",
"is",
"None",
":",
"if",
"duration",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'either \"length\" or \"duration\" must be provided'",
")",
"else",
":",
"duration",
"=",
"period",
"*",
"length",
"if",
"phi",
"is",
"None",
":",
"phi",
"=",
"-",
"np",
".",
"pi",
"*",
"0.5",
"method",
"=",
"'linear'",
"if",
"linear",
"else",
"'logarithmic'",
"return",
"scipy",
".",
"signal",
".",
"chirp",
"(",
"np",
".",
"arange",
"(",
"duration",
",",
"step",
"=",
"period",
")",
",",
"fmin",
",",
"duration",
",",
"fmax",
",",
"method",
"=",
"method",
",",
"phi",
"=",
"phi",
"/",
"np",
".",
"pi",
"*",
"180",
",",
"# scipy.signal.chirp uses degrees for phase offset",
")"
] | Returns a chirp signal that goes from frequency `fmin` to frequency `fmax`
Parameters
----------
fmin : float > 0
initial frequency
fmax : float > 0
final frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal.
When both `duration` and `length` are defined, `length` would take priority.
duration : float > 0
desired duration in seconds.
When both `duration` and `length` are defined, `length` would take priority.
linear : boolean
- If `True`, use a linear sweep, i.e., frequency changes linearly with time
- If `False`, use a exponential sweep.
Default is `False`.
phi : float or None
phase offset, in radians.
If unspecified, defaults to `-np.pi * 0.5`.
Returns
-------
chirp_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized chirp signal
Raises
------
ParameterError
- If either `fmin` or `fmax` are not provided.
- If neither `length` nor `duration` are provided.
See Also
--------
scipy.signal.chirp
Examples
--------
>>> # Generate a exponential chirp from A4 to A5
>>> exponential_chirp = librosa.chirp(440, 880, duration=1)
>>> # Or generate the same signal using `length`
>>> exponential_chirp = librosa.chirp(440, 880, sr=22050, length=22050)
>>> # Or generate a linear chirp instead
>>> linear_chirp = librosa.chirp(440, 880, duration=1, linear=True)
Display spectrogram for both exponential and linear chirps
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S_exponential = librosa.feature.melspectrogram(y=exponential_chirp)
>>> ax = plt.subplot(2,1,1)
>>> librosa.display.specshow(librosa.power_to_db(S_exponential, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,2, sharex=ax)
>>> S_linear = librosa.feature.melspectrogram(y=linear_chirp)
>>> librosa.display.specshow(librosa.power_to_db(S_linear, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.tight_layout() | [
"Returns",
"a",
"chirp",
"signal",
"that",
"goes",
"from",
"frequency",
"fmin",
"to",
"frequency",
"fmax"
] | python | test |
google/budou | budou/chunk.py | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L210-L220 | def swap(self, old_chunks, new_chunk):
"""Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
"""
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk) | [
"def",
"swap",
"(",
"self",
",",
"old_chunks",
",",
"new_chunk",
")",
":",
"indexes",
"=",
"[",
"self",
".",
"index",
"(",
"chunk",
")",
"for",
"chunk",
"in",
"old_chunks",
"]",
"del",
"self",
"[",
"indexes",
"[",
"0",
"]",
":",
"indexes",
"[",
"-",
"1",
"]",
"+",
"1",
"]",
"self",
".",
"insert",
"(",
"indexes",
"[",
"0",
"]",
",",
"new_chunk",
")"
] | Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted. | [
"Swaps",
"old",
"consecutive",
"chunks",
"with",
"new",
"chunk",
"."
] | python | train |
BernardFW/bernard | src/bernard/platforms/facebook/layers.py | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/layers.py#L289-L297 | def is_sharable(self):
"""
Can only be sharable if marked as such and no child element is blocking
sharing due to security reasons.
"""
return bool(
self.sharable and
all(x.is_sharable() for x in self.elements)
) | [
"def",
"is_sharable",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"sharable",
"and",
"all",
"(",
"x",
".",
"is_sharable",
"(",
")",
"for",
"x",
"in",
"self",
".",
"elements",
")",
")"
] | Can only be sharable if marked as such and no child element is blocking
sharing due to security reasons. | [
"Can",
"only",
"be",
"sharable",
"if",
"marked",
"as",
"such",
"and",
"no",
"child",
"element",
"is",
"blocking",
"sharing",
"due",
"to",
"security",
"reasons",
"."
] | python | train |
svenkreiss/databench | databench/analysis.py | https://github.com/svenkreiss/databench/blob/99d4adad494b60a42af6b8bfba94dd0c41ba0786/databench/analysis.py#L189-L205 | def emit(self, signal, message='__nomessagetoken__'):
"""Emit a signal to the frontend.
:param str signal: name of the signal
:param message: message to send
:returns: return value from frontend emit function
:rtype: tornado.concurrent.Future
"""
# call pre-emit hooks
if signal == 'log':
self.log_backend.info(message)
elif signal == 'warn':
self.log_backend.warn(message)
elif signal == 'error':
self.log_backend.error(message)
return self.emit_to_frontend(signal, message) | [
"def",
"emit",
"(",
"self",
",",
"signal",
",",
"message",
"=",
"'__nomessagetoken__'",
")",
":",
"# call pre-emit hooks",
"if",
"signal",
"==",
"'log'",
":",
"self",
".",
"log_backend",
".",
"info",
"(",
"message",
")",
"elif",
"signal",
"==",
"'warn'",
":",
"self",
".",
"log_backend",
".",
"warn",
"(",
"message",
")",
"elif",
"signal",
"==",
"'error'",
":",
"self",
".",
"log_backend",
".",
"error",
"(",
"message",
")",
"return",
"self",
".",
"emit_to_frontend",
"(",
"signal",
",",
"message",
")"
] | Emit a signal to the frontend.
:param str signal: name of the signal
:param message: message to send
:returns: return value from frontend emit function
:rtype: tornado.concurrent.Future | [
"Emit",
"a",
"signal",
"to",
"the",
"frontend",
"."
] | python | train |
armet/python-armet | armet/query/parser.py | https://github.com/armet/python-armet/blob/d61eca9082256cb1e7f7f3c7f2fbc4b697157de7/armet/query/parser.py#L295-L366 | def parse_segment(text):
"we expect foo=bar"
if not len(text):
return NoopQuerySegment()
q = QuerySegment()
# First we need to split the segment into key/value pairs. This is done
# by attempting to split the sequence for each equality comparison. Then
# discard any that did not split properly. Then chose the smallest key
# (greedily chose the first comparator we encounter in the string)
# followed by the smallest value (greedily chose the largest comparator
# possible.)
# translate into [('=', 'foo=bar')]
equalities = zip(constants.OPERATOR_EQUALITIES, itertools.repeat(text))
# Translate into [('=', ['foo', 'bar'])]
equalities = map(lambda x: (x[0], x[1].split(x[0], 1)), equalities)
# Remove unsplit entries and translate into [('=': ['foo', 'bar'])]
# Note that the result from this stage is iterated over twice.
equalities = list(filter(lambda x: len(x[1]) > 1, equalities))
# Get the smallest key and use the length of that to remove other items
key_len = len(min((x[1][0] for x in equalities), key=len))
equalities = filter(lambda x: len(x[1][0]) == key_len, equalities)
# Get the smallest value length. thus we have the earliest key and the
# smallest value.
op, (key, value) = min(equalities, key=lambda x: len(x[1][1]))
key, directive = parse_directive(key)
if directive:
op = constants.OPERATOR_EQUALITY_FALLBACK
q.directive = directive
# Process negation. This comes in both foo.not= and foo!= forms.
path = key.split(constants.SEP_PATH)
last = path[-1]
# Check for !=
if last.endswith(constants.OPERATOR_NEGATION):
last = last[:-1]
q.negated = not q.negated
# Check for foo.not=
if last == constants.PATH_NEGATION:
path.pop(-1)
q.negated = not q.negated
q.values = value.split(constants.SEP_VALUE)
# Check for suffixed operators (foo.gte=bar). Prioritize suffixed
# entries over actual equality checks.
if path[-1] in constants.OPERATOR_SUFFIXES:
# The case where foo.gte<=bar, which obviously makes no sense.
if op not in constants.OPERATOR_FALLBACK:
raise ValueError(
'Both path-style operator and equality style operator '
'provided. Please provide only a single style operator.')
q.operator = constants.OPERATOR_SUFFIX_MAP[path[-1]]
path.pop(-1)
else:
q.operator = constants.OPERATOR_EQUALITY_MAP[op]
if not len(path):
raise ValueError('No attribute navigation path provided.')
q.path = path
return q | [
"def",
"parse_segment",
"(",
"text",
")",
":",
"if",
"not",
"len",
"(",
"text",
")",
":",
"return",
"NoopQuerySegment",
"(",
")",
"q",
"=",
"QuerySegment",
"(",
")",
"# First we need to split the segment into key/value pairs. This is done",
"# by attempting to split the sequence for each equality comparison. Then",
"# discard any that did not split properly. Then chose the smallest key",
"# (greedily chose the first comparator we encounter in the string)",
"# followed by the smallest value (greedily chose the largest comparator",
"# possible.)",
"# translate into [('=', 'foo=bar')]",
"equalities",
"=",
"zip",
"(",
"constants",
".",
"OPERATOR_EQUALITIES",
",",
"itertools",
".",
"repeat",
"(",
"text",
")",
")",
"# Translate into [('=', ['foo', 'bar'])]",
"equalities",
"=",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
".",
"split",
"(",
"x",
"[",
"0",
"]",
",",
"1",
")",
")",
",",
"equalities",
")",
"# Remove unsplit entries and translate into [('=': ['foo', 'bar'])]",
"# Note that the result from this stage is iterated over twice.",
"equalities",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"len",
"(",
"x",
"[",
"1",
"]",
")",
">",
"1",
",",
"equalities",
")",
")",
"# Get the smallest key and use the length of that to remove other items",
"key_len",
"=",
"len",
"(",
"min",
"(",
"(",
"x",
"[",
"1",
"]",
"[",
"0",
"]",
"for",
"x",
"in",
"equalities",
")",
",",
"key",
"=",
"len",
")",
")",
"equalities",
"=",
"filter",
"(",
"lambda",
"x",
":",
"len",
"(",
"x",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"==",
"key_len",
",",
"equalities",
")",
"# Get the smallest value length. thus we have the earliest key and the",
"# smallest value.",
"op",
",",
"(",
"key",
",",
"value",
")",
"=",
"min",
"(",
"equalities",
",",
"key",
"=",
"lambda",
"x",
":",
"len",
"(",
"x",
"[",
"1",
"]",
"[",
"1",
"]",
")",
")",
"key",
",",
"directive",
"=",
"parse_directive",
"(",
"key",
")",
"if",
"directive",
":",
"op",
"=",
"constants",
".",
"OPERATOR_EQUALITY_FALLBACK",
"q",
".",
"directive",
"=",
"directive",
"# Process negation. This comes in both foo.not= and foo!= forms.",
"path",
"=",
"key",
".",
"split",
"(",
"constants",
".",
"SEP_PATH",
")",
"last",
"=",
"path",
"[",
"-",
"1",
"]",
"# Check for !=",
"if",
"last",
".",
"endswith",
"(",
"constants",
".",
"OPERATOR_NEGATION",
")",
":",
"last",
"=",
"last",
"[",
":",
"-",
"1",
"]",
"q",
".",
"negated",
"=",
"not",
"q",
".",
"negated",
"# Check for foo.not=",
"if",
"last",
"==",
"constants",
".",
"PATH_NEGATION",
":",
"path",
".",
"pop",
"(",
"-",
"1",
")",
"q",
".",
"negated",
"=",
"not",
"q",
".",
"negated",
"q",
".",
"values",
"=",
"value",
".",
"split",
"(",
"constants",
".",
"SEP_VALUE",
")",
"# Check for suffixed operators (foo.gte=bar). Prioritize suffixed",
"# entries over actual equality checks.",
"if",
"path",
"[",
"-",
"1",
"]",
"in",
"constants",
".",
"OPERATOR_SUFFIXES",
":",
"# The case where foo.gte<=bar, which obviously makes no sense.",
"if",
"op",
"not",
"in",
"constants",
".",
"OPERATOR_FALLBACK",
":",
"raise",
"ValueError",
"(",
"'Both path-style operator and equality style operator '",
"'provided. Please provide only a single style operator.'",
")",
"q",
".",
"operator",
"=",
"constants",
".",
"OPERATOR_SUFFIX_MAP",
"[",
"path",
"[",
"-",
"1",
"]",
"]",
"path",
".",
"pop",
"(",
"-",
"1",
")",
"else",
":",
"q",
".",
"operator",
"=",
"constants",
".",
"OPERATOR_EQUALITY_MAP",
"[",
"op",
"]",
"if",
"not",
"len",
"(",
"path",
")",
":",
"raise",
"ValueError",
"(",
"'No attribute navigation path provided.'",
")",
"q",
".",
"path",
"=",
"path",
"return",
"q"
] | we expect foo=bar | [
"we",
"expect",
"foo",
"=",
"bar"
] | python | valid |
google/openhtf | openhtf/util/console_output.py | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/console_output.py#L65-L68 | def _printed_len(some_string):
"""Compute the visible length of the string when printed."""
return len([x for x in ANSI_ESC_RE.sub('', some_string)
if x in string.printable]) | [
"def",
"_printed_len",
"(",
"some_string",
")",
":",
"return",
"len",
"(",
"[",
"x",
"for",
"x",
"in",
"ANSI_ESC_RE",
".",
"sub",
"(",
"''",
",",
"some_string",
")",
"if",
"x",
"in",
"string",
".",
"printable",
"]",
")"
] | Compute the visible length of the string when printed. | [
"Compute",
"the",
"visible",
"length",
"of",
"the",
"string",
"when",
"printed",
"."
] | python | train |
smdabdoub/phylotoast | bin/biom_phyla_summary.py | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/biom_phyla_summary.py#L27-L46 | def summarize_taxa(biom):
"""
Given an abundance table, group the counts by every
taxonomic level.
"""
tamtcounts = defaultdict(int)
tot_seqs = 0.0
for row, col, amt in biom['data']:
tot_seqs += amt
rtax = biom['rows'][row]['metadata']['taxonomy']
for i, t in enumerate(rtax):
t = t.strip()
if i == len(rtax)-1 and len(t) > 3 and len(rtax[-1]) > 3:
t = 's__'+rtax[i-1].strip().split('_')[-1]+'_'+t.split('_')[-1]
tamtcounts[t] += amt
lvlData = {lvl: levelData(tamtcounts, tot_seqs, lvl) for lvl in ['k', 'p', 'c', 'o', 'f', 'g', 's']}
return tot_seqs, lvlData | [
"def",
"summarize_taxa",
"(",
"biom",
")",
":",
"tamtcounts",
"=",
"defaultdict",
"(",
"int",
")",
"tot_seqs",
"=",
"0.0",
"for",
"row",
",",
"col",
",",
"amt",
"in",
"biom",
"[",
"'data'",
"]",
":",
"tot_seqs",
"+=",
"amt",
"rtax",
"=",
"biom",
"[",
"'rows'",
"]",
"[",
"row",
"]",
"[",
"'metadata'",
"]",
"[",
"'taxonomy'",
"]",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"rtax",
")",
":",
"t",
"=",
"t",
".",
"strip",
"(",
")",
"if",
"i",
"==",
"len",
"(",
"rtax",
")",
"-",
"1",
"and",
"len",
"(",
"t",
")",
">",
"3",
"and",
"len",
"(",
"rtax",
"[",
"-",
"1",
"]",
")",
">",
"3",
":",
"t",
"=",
"'s__'",
"+",
"rtax",
"[",
"i",
"-",
"1",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
"+",
"'_'",
"+",
"t",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
"tamtcounts",
"[",
"t",
"]",
"+=",
"amt",
"lvlData",
"=",
"{",
"lvl",
":",
"levelData",
"(",
"tamtcounts",
",",
"tot_seqs",
",",
"lvl",
")",
"for",
"lvl",
"in",
"[",
"'k'",
",",
"'p'",
",",
"'c'",
",",
"'o'",
",",
"'f'",
",",
"'g'",
",",
"'s'",
"]",
"}",
"return",
"tot_seqs",
",",
"lvlData"
] | Given an abundance table, group the counts by every
taxonomic level. | [
"Given",
"an",
"abundance",
"table",
"group",
"the",
"counts",
"by",
"every",
"taxonomic",
"level",
"."
] | python | train |
django-danceschool/django-danceschool | danceschool/payments/square/cms_plugins.py | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/payments/square/cms_plugins.py#L18-L26 | def render(self, context, instance, placeholder):
''' Add the cart-specific context to this form '''
context = super(SquareCheckoutFormPlugin, self).render(context, instance, placeholder)
context.update({
'squareApplicationId': getattr(settings,'SQUARE_APPLICATION_ID',''),
})
return context | [
"def",
"render",
"(",
"self",
",",
"context",
",",
"instance",
",",
"placeholder",
")",
":",
"context",
"=",
"super",
"(",
"SquareCheckoutFormPlugin",
",",
"self",
")",
".",
"render",
"(",
"context",
",",
"instance",
",",
"placeholder",
")",
"context",
".",
"update",
"(",
"{",
"'squareApplicationId'",
":",
"getattr",
"(",
"settings",
",",
"'SQUARE_APPLICATION_ID'",
",",
"''",
")",
",",
"}",
")",
"return",
"context"
] | Add the cart-specific context to this form | [
"Add",
"the",
"cart",
"-",
"specific",
"context",
"to",
"this",
"form"
] | python | train |
kisom/pypcapfile | pcapfile/protocols/linklayer/wifi.py | https://github.com/kisom/pypcapfile/blob/67520cfbb6c2e9ab3e7c181a8012ddc56ec5cad8/pcapfile/protocols/linklayer/wifi.py#L958-L972 | def strip_rx_flags(self, idx):
"""strip(2 byte) radiotap.rxflags
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
rx_flags = collections.namedtuple('rx_flags', ['reserved', 'badplcp'])
idx = Radiotap.align(idx, 2)
flags, = struct.unpack_from('<H', self._rtap, idx)
flag_bits = format(flags, '08b')[::-1]
rx_flags.reserved = int(flag_bits[0])
rx_flags.badplcp = int(flag_bits[1])
return idx + 2, rx_flags | [
"def",
"strip_rx_flags",
"(",
"self",
",",
"idx",
")",
":",
"rx_flags",
"=",
"collections",
".",
"namedtuple",
"(",
"'rx_flags'",
",",
"[",
"'reserved'",
",",
"'badplcp'",
"]",
")",
"idx",
"=",
"Radiotap",
".",
"align",
"(",
"idx",
",",
"2",
")",
"flags",
",",
"=",
"struct",
".",
"unpack_from",
"(",
"'<H'",
",",
"self",
".",
"_rtap",
",",
"idx",
")",
"flag_bits",
"=",
"format",
"(",
"flags",
",",
"'08b'",
")",
"[",
":",
":",
"-",
"1",
"]",
"rx_flags",
".",
"reserved",
"=",
"int",
"(",
"flag_bits",
"[",
"0",
"]",
")",
"rx_flags",
".",
"badplcp",
"=",
"int",
"(",
"flag_bits",
"[",
"1",
"]",
")",
"return",
"idx",
"+",
"2",
",",
"rx_flags"
] | strip(2 byte) radiotap.rxflags
:idx: int
:return: int
idx
:return: collections.namedtuple | [
"strip",
"(",
"2",
"byte",
")",
"radiotap",
".",
"rxflags",
":",
"idx",
":",
"int",
":",
"return",
":",
"int",
"idx",
":",
"return",
":",
"collections",
".",
"namedtuple"
] | python | valid |
astropy/photutils | photutils/psf/utils.py | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/utils.py#L15-L106 | def prepare_psf_model(psfmodel, xname=None, yname=None, fluxname=None,
renormalize_psf=True):
"""
Convert a 2D PSF model to one suitable for use with
`BasicPSFPhotometry` or its subclasses.
The resulting model may be a composite model, but should have only
the x, y, and flux related parameters un-fixed.
Parameters
----------
psfmodel : a 2D model
The model to assume as representative of the PSF.
xname : str or None
The name of the ``psfmodel`` parameter that corresponds to the
x-axis center of the PSF. If None, the model will be assumed to
be centered at x=0, and a new parameter will be added for the
offset.
yname : str or None
The name of the ``psfmodel`` parameter that corresponds to the
y-axis center of the PSF. If None, the model will be assumed to
be centered at y=0, and a new parameter will be added for the
offset.
fluxname : str or None
The name of the ``psfmodel`` parameter that corresponds to the
total flux of the star. If None, a scaling factor will be added
to the model.
renormalize_psf : bool
If True, the model will be integrated from -inf to inf and
re-scaled so that the total integrates to 1. Note that this
renormalization only occurs *once*, so if the total flux of
``psfmodel`` depends on position, this will *not* be correct.
Returns
-------
outmod : a model
A new model ready to be passed into `BasicPSFPhotometry` or its
subclasses.
"""
if xname is None:
xinmod = models.Shift(0, name='x_offset')
xname = 'offset_0'
else:
xinmod = models.Identity(1)
xname = xname + '_2'
xinmod.fittable = True
if yname is None:
yinmod = models.Shift(0, name='y_offset')
yname = 'offset_1'
else:
yinmod = models.Identity(1)
yname = yname + '_2'
yinmod.fittable = True
outmod = (xinmod & yinmod) | psfmodel
if fluxname is None:
outmod = outmod * models.Const2D(1, name='flux_scaling')
fluxname = 'amplitude_3'
else:
fluxname = fluxname + '_2'
if renormalize_psf:
# we do the import here because other machinery works w/o scipy
from scipy import integrate
integrand = integrate.dblquad(psfmodel, -np.inf, np.inf,
lambda x: -np.inf, lambda x: np.inf)[0]
normmod = models.Const2D(1./integrand, name='renormalize_scaling')
outmod = outmod * normmod
# final setup of the output model - fix all the non-offset/scale
# parameters
for pnm in outmod.param_names:
outmod.fixed[pnm] = pnm not in (xname, yname, fluxname)
# and set the names so that BasicPSFPhotometry knows what to do
outmod.xname = xname
outmod.yname = yname
outmod.fluxname = fluxname
# now some convenience aliases if reasonable
outmod.psfmodel = outmod[2]
if 'x_0' not in outmod.param_names and 'y_0' not in outmod.param_names:
outmod.x_0 = getattr(outmod, xname)
outmod.y_0 = getattr(outmod, yname)
if 'flux' not in outmod.param_names:
outmod.flux = getattr(outmod, fluxname)
return outmod | [
"def",
"prepare_psf_model",
"(",
"psfmodel",
",",
"xname",
"=",
"None",
",",
"yname",
"=",
"None",
",",
"fluxname",
"=",
"None",
",",
"renormalize_psf",
"=",
"True",
")",
":",
"if",
"xname",
"is",
"None",
":",
"xinmod",
"=",
"models",
".",
"Shift",
"(",
"0",
",",
"name",
"=",
"'x_offset'",
")",
"xname",
"=",
"'offset_0'",
"else",
":",
"xinmod",
"=",
"models",
".",
"Identity",
"(",
"1",
")",
"xname",
"=",
"xname",
"+",
"'_2'",
"xinmod",
".",
"fittable",
"=",
"True",
"if",
"yname",
"is",
"None",
":",
"yinmod",
"=",
"models",
".",
"Shift",
"(",
"0",
",",
"name",
"=",
"'y_offset'",
")",
"yname",
"=",
"'offset_1'",
"else",
":",
"yinmod",
"=",
"models",
".",
"Identity",
"(",
"1",
")",
"yname",
"=",
"yname",
"+",
"'_2'",
"yinmod",
".",
"fittable",
"=",
"True",
"outmod",
"=",
"(",
"xinmod",
"&",
"yinmod",
")",
"|",
"psfmodel",
"if",
"fluxname",
"is",
"None",
":",
"outmod",
"=",
"outmod",
"*",
"models",
".",
"Const2D",
"(",
"1",
",",
"name",
"=",
"'flux_scaling'",
")",
"fluxname",
"=",
"'amplitude_3'",
"else",
":",
"fluxname",
"=",
"fluxname",
"+",
"'_2'",
"if",
"renormalize_psf",
":",
"# we do the import here because other machinery works w/o scipy",
"from",
"scipy",
"import",
"integrate",
"integrand",
"=",
"integrate",
".",
"dblquad",
"(",
"psfmodel",
",",
"-",
"np",
".",
"inf",
",",
"np",
".",
"inf",
",",
"lambda",
"x",
":",
"-",
"np",
".",
"inf",
",",
"lambda",
"x",
":",
"np",
".",
"inf",
")",
"[",
"0",
"]",
"normmod",
"=",
"models",
".",
"Const2D",
"(",
"1.",
"/",
"integrand",
",",
"name",
"=",
"'renormalize_scaling'",
")",
"outmod",
"=",
"outmod",
"*",
"normmod",
"# final setup of the output model - fix all the non-offset/scale",
"# parameters",
"for",
"pnm",
"in",
"outmod",
".",
"param_names",
":",
"outmod",
".",
"fixed",
"[",
"pnm",
"]",
"=",
"pnm",
"not",
"in",
"(",
"xname",
",",
"yname",
",",
"fluxname",
")",
"# and set the names so that BasicPSFPhotometry knows what to do",
"outmod",
".",
"xname",
"=",
"xname",
"outmod",
".",
"yname",
"=",
"yname",
"outmod",
".",
"fluxname",
"=",
"fluxname",
"# now some convenience aliases if reasonable",
"outmod",
".",
"psfmodel",
"=",
"outmod",
"[",
"2",
"]",
"if",
"'x_0'",
"not",
"in",
"outmod",
".",
"param_names",
"and",
"'y_0'",
"not",
"in",
"outmod",
".",
"param_names",
":",
"outmod",
".",
"x_0",
"=",
"getattr",
"(",
"outmod",
",",
"xname",
")",
"outmod",
".",
"y_0",
"=",
"getattr",
"(",
"outmod",
",",
"yname",
")",
"if",
"'flux'",
"not",
"in",
"outmod",
".",
"param_names",
":",
"outmod",
".",
"flux",
"=",
"getattr",
"(",
"outmod",
",",
"fluxname",
")",
"return",
"outmod"
] | Convert a 2D PSF model to one suitable for use with
`BasicPSFPhotometry` or its subclasses.
The resulting model may be a composite model, but should have only
the x, y, and flux related parameters un-fixed.
Parameters
----------
psfmodel : a 2D model
The model to assume as representative of the PSF.
xname : str or None
The name of the ``psfmodel`` parameter that corresponds to the
x-axis center of the PSF. If None, the model will be assumed to
be centered at x=0, and a new parameter will be added for the
offset.
yname : str or None
The name of the ``psfmodel`` parameter that corresponds to the
y-axis center of the PSF. If None, the model will be assumed to
be centered at y=0, and a new parameter will be added for the
offset.
fluxname : str or None
The name of the ``psfmodel`` parameter that corresponds to the
total flux of the star. If None, a scaling factor will be added
to the model.
renormalize_psf : bool
If True, the model will be integrated from -inf to inf and
re-scaled so that the total integrates to 1. Note that this
renormalization only occurs *once*, so if the total flux of
``psfmodel`` depends on position, this will *not* be correct.
Returns
-------
outmod : a model
A new model ready to be passed into `BasicPSFPhotometry` or its
subclasses. | [
"Convert",
"a",
"2D",
"PSF",
"model",
"to",
"one",
"suitable",
"for",
"use",
"with",
"BasicPSFPhotometry",
"or",
"its",
"subclasses",
"."
] | python | train |
vertical-knowledge/ripozo-sqlalchemy | ripozo_sqlalchemy/alchemymanager.py | https://github.com/vertical-knowledge/ripozo-sqlalchemy/blob/4bcc57ec6db1b39b84b50553bb264e4950ce4ec2/ripozo_sqlalchemy/alchemymanager.py#L172-L214 | def retrieve_list(self, session, filters, *args, **kwargs):
"""
Retrieves a list of the model for this manager.
It is restricted by the filters provided.
:param Session session: The SQLAlchemy session to use
:param dict filters: The filters to restrict the returned
models on
:return: A tuple of the list of dictionary representation
of the models and the dictionary of meta data
:rtype: list, dict
"""
query = self.queryset(session)
translator = IntegerField('tmp')
pagination_count = translator.translate(
filters.pop(self.pagination_count_query_arg, self.paginate_by)
)
pagination_pk = translator.translate(
filters.pop(self.pagination_pk_query_arg, 1)
)
pagination_pk -= 1 # logic works zero based. Pagination shouldn't be though
query = query.filter_by(**filters)
if pagination_pk:
query = query.offset(pagination_pk * pagination_count)
if pagination_count:
query = query.limit(pagination_count + 1)
count = query.count()
next_link = None
previous_link = None
if count > pagination_count:
next_link = {self.pagination_pk_query_arg: pagination_pk + 2,
self.pagination_count_query_arg: pagination_count}
if pagination_pk > 0:
previous_link = {self.pagination_pk_query_arg: pagination_pk,
self.pagination_count_query_arg: pagination_count}
field_dict = self.dot_field_list_to_dict(self.list_fields)
props = self.serialize_model(query[:pagination_count], field_dict=field_dict)
meta = dict(links=dict(next=next_link, previous=previous_link))
return props, meta | [
"def",
"retrieve_list",
"(",
"self",
",",
"session",
",",
"filters",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"query",
"=",
"self",
".",
"queryset",
"(",
"session",
")",
"translator",
"=",
"IntegerField",
"(",
"'tmp'",
")",
"pagination_count",
"=",
"translator",
".",
"translate",
"(",
"filters",
".",
"pop",
"(",
"self",
".",
"pagination_count_query_arg",
",",
"self",
".",
"paginate_by",
")",
")",
"pagination_pk",
"=",
"translator",
".",
"translate",
"(",
"filters",
".",
"pop",
"(",
"self",
".",
"pagination_pk_query_arg",
",",
"1",
")",
")",
"pagination_pk",
"-=",
"1",
"# logic works zero based. Pagination shouldn't be though",
"query",
"=",
"query",
".",
"filter_by",
"(",
"*",
"*",
"filters",
")",
"if",
"pagination_pk",
":",
"query",
"=",
"query",
".",
"offset",
"(",
"pagination_pk",
"*",
"pagination_count",
")",
"if",
"pagination_count",
":",
"query",
"=",
"query",
".",
"limit",
"(",
"pagination_count",
"+",
"1",
")",
"count",
"=",
"query",
".",
"count",
"(",
")",
"next_link",
"=",
"None",
"previous_link",
"=",
"None",
"if",
"count",
">",
"pagination_count",
":",
"next_link",
"=",
"{",
"self",
".",
"pagination_pk_query_arg",
":",
"pagination_pk",
"+",
"2",
",",
"self",
".",
"pagination_count_query_arg",
":",
"pagination_count",
"}",
"if",
"pagination_pk",
">",
"0",
":",
"previous_link",
"=",
"{",
"self",
".",
"pagination_pk_query_arg",
":",
"pagination_pk",
",",
"self",
".",
"pagination_count_query_arg",
":",
"pagination_count",
"}",
"field_dict",
"=",
"self",
".",
"dot_field_list_to_dict",
"(",
"self",
".",
"list_fields",
")",
"props",
"=",
"self",
".",
"serialize_model",
"(",
"query",
"[",
":",
"pagination_count",
"]",
",",
"field_dict",
"=",
"field_dict",
")",
"meta",
"=",
"dict",
"(",
"links",
"=",
"dict",
"(",
"next",
"=",
"next_link",
",",
"previous",
"=",
"previous_link",
")",
")",
"return",
"props",
",",
"meta"
] | Retrieves a list of the model for this manager.
It is restricted by the filters provided.
:param Session session: The SQLAlchemy session to use
:param dict filters: The filters to restrict the returned
models on
:return: A tuple of the list of dictionary representation
of the models and the dictionary of meta data
:rtype: list, dict | [
"Retrieves",
"a",
"list",
"of",
"the",
"model",
"for",
"this",
"manager",
".",
"It",
"is",
"restricted",
"by",
"the",
"filters",
"provided",
"."
] | python | train |
sunt05/SuPy | src/supy/supy_util.py | https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L756-L808 | def gen_req_sfc(lat_x, lon_x, start, end, grid=[0.125, 0.125], scale=0):
'''generate a dict of reqs kwargs for (lat_x,lon_x) spanning [start, end]
Parameters
----------
lat_x : [type]
[description]
lon_x : [type]
[description]
start : [type]
[description]
end : [type]
[description]
grid : list, optional
[description] (the default is [0.125, 0.125], which [default_description])
scale : int, optional
[description] (the default is 0, which [default_description])
Returns
-------
[type]
[description]
Examples
--------
>>> gen_req_sfc(28, 116, '2015-01', '2015-01-31 23', grid=[0.125, 0.125], scale=0)
'''
# scale is a factor to rescale grid size
size = grid[0]*scale
# generate pd.Series for timestamps
ser_datetime = pd.date_range(start, end, freq='1h').to_series()
# surface requests
lat_c, lon_c = (roundPartial(x, grid[0]) for x in [lat_x, lon_x])
area = [lat_c+size, lon_c-size, lat_c-size, lon_c+size]
dict_req_sfc = {
'variable': list_var_sfc,
'product_type': 'reanalysis',
'area': area,
'grid': grid,
'format': 'netcdf'
}
list_dict_req_sfc = [
{**dict_req_sfc, **dict_dt}
for dict_dt
in list(gen_dict_dt_sub(ser_datetime).values())
]
dict_req_sfc = {
gen_fn(dict_req): gen_dict_proc(dict_req)
for dict_req in list_dict_req_sfc
}
return dict_req_sfc | [
"def",
"gen_req_sfc",
"(",
"lat_x",
",",
"lon_x",
",",
"start",
",",
"end",
",",
"grid",
"=",
"[",
"0.125",
",",
"0.125",
"]",
",",
"scale",
"=",
"0",
")",
":",
"# scale is a factor to rescale grid size",
"size",
"=",
"grid",
"[",
"0",
"]",
"*",
"scale",
"# generate pd.Series for timestamps",
"ser_datetime",
"=",
"pd",
".",
"date_range",
"(",
"start",
",",
"end",
",",
"freq",
"=",
"'1h'",
")",
".",
"to_series",
"(",
")",
"# surface requests",
"lat_c",
",",
"lon_c",
"=",
"(",
"roundPartial",
"(",
"x",
",",
"grid",
"[",
"0",
"]",
")",
"for",
"x",
"in",
"[",
"lat_x",
",",
"lon_x",
"]",
")",
"area",
"=",
"[",
"lat_c",
"+",
"size",
",",
"lon_c",
"-",
"size",
",",
"lat_c",
"-",
"size",
",",
"lon_c",
"+",
"size",
"]",
"dict_req_sfc",
"=",
"{",
"'variable'",
":",
"list_var_sfc",
",",
"'product_type'",
":",
"'reanalysis'",
",",
"'area'",
":",
"area",
",",
"'grid'",
":",
"grid",
",",
"'format'",
":",
"'netcdf'",
"}",
"list_dict_req_sfc",
"=",
"[",
"{",
"*",
"*",
"dict_req_sfc",
",",
"*",
"*",
"dict_dt",
"}",
"for",
"dict_dt",
"in",
"list",
"(",
"gen_dict_dt_sub",
"(",
"ser_datetime",
")",
".",
"values",
"(",
")",
")",
"]",
"dict_req_sfc",
"=",
"{",
"gen_fn",
"(",
"dict_req",
")",
":",
"gen_dict_proc",
"(",
"dict_req",
")",
"for",
"dict_req",
"in",
"list_dict_req_sfc",
"}",
"return",
"dict_req_sfc"
] | generate a dict of reqs kwargs for (lat_x,lon_x) spanning [start, end]
Parameters
----------
lat_x : [type]
[description]
lon_x : [type]
[description]
start : [type]
[description]
end : [type]
[description]
grid : list, optional
[description] (the default is [0.125, 0.125], which [default_description])
scale : int, optional
[description] (the default is 0, which [default_description])
Returns
-------
[type]
[description]
Examples
--------
>>> gen_req_sfc(28, 116, '2015-01', '2015-01-31 23', grid=[0.125, 0.125], scale=0) | [
"generate",
"a",
"dict",
"of",
"reqs",
"kwargs",
"for",
"(",
"lat_x",
"lon_x",
")",
"spanning",
"[",
"start",
"end",
"]"
] | python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_maps_ext.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_maps_ext.py#L93-L105 | def maps_get_default_rules_output_rules_groupname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
maps_get_default_rules = ET.Element("maps_get_default_rules")
config = maps_get_default_rules
output = ET.SubElement(maps_get_default_rules, "output")
rules = ET.SubElement(output, "rules")
groupname = ET.SubElement(rules, "groupname")
groupname.text = kwargs.pop('groupname')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"maps_get_default_rules_output_rules_groupname",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"maps_get_default_rules",
"=",
"ET",
".",
"Element",
"(",
"\"maps_get_default_rules\"",
")",
"config",
"=",
"maps_get_default_rules",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"maps_get_default_rules",
",",
"\"output\"",
")",
"rules",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"rules\"",
")",
"groupname",
"=",
"ET",
".",
"SubElement",
"(",
"rules",
",",
"\"groupname\"",
")",
"groupname",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'groupname'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
olitheolix/qtmacs | qtmacs/extensions/qtmacsscintilla_macros.py | https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/extensions/qtmacsscintilla_macros.py#L1588-L1594 | def qteToBeKilled(self):
"""
Remove all selections and install the original lexer.
"""
self.clearHighlighting()
self.qteWidget.qteSetLexer(self.originalLexer)
self.qteText.textChanged.disconnect(self.qteTextChanged) | [
"def",
"qteToBeKilled",
"(",
"self",
")",
":",
"self",
".",
"clearHighlighting",
"(",
")",
"self",
".",
"qteWidget",
".",
"qteSetLexer",
"(",
"self",
".",
"originalLexer",
")",
"self",
".",
"qteText",
".",
"textChanged",
".",
"disconnect",
"(",
"self",
".",
"qteTextChanged",
")"
] | Remove all selections and install the original lexer. | [
"Remove",
"all",
"selections",
"and",
"install",
"the",
"original",
"lexer",
"."
] | python | train |
astropy/astropy-helpers | astropy_helpers/utils.py | https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/utils.py#L37-L44 | def _get_platlib_dir(cmd):
"""
Given a build command, return the name of the appropriate platform-specific
build subdirectory directory (e.g. build/lib.linux-x86_64-2.7)
"""
plat_specifier = '.{0}-{1}'.format(cmd.plat_name, sys.version[0:3])
return os.path.join(cmd.build_base, 'lib' + plat_specifier) | [
"def",
"_get_platlib_dir",
"(",
"cmd",
")",
":",
"plat_specifier",
"=",
"'.{0}-{1}'",
".",
"format",
"(",
"cmd",
".",
"plat_name",
",",
"sys",
".",
"version",
"[",
"0",
":",
"3",
"]",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"cmd",
".",
"build_base",
",",
"'lib'",
"+",
"plat_specifier",
")"
] | Given a build command, return the name of the appropriate platform-specific
build subdirectory directory (e.g. build/lib.linux-x86_64-2.7) | [
"Given",
"a",
"build",
"command",
"return",
"the",
"name",
"of",
"the",
"appropriate",
"platform",
"-",
"specific",
"build",
"subdirectory",
"directory",
"(",
"e",
".",
"g",
".",
"build",
"/",
"lib",
".",
"linux",
"-",
"x86_64",
"-",
"2",
".",
"7",
")"
] | python | train |
clusterpoint/python-client-api | pycps/converters.py | https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/converters.py#L85-L138 | def dict_to_etree(source, root_tag=None):
""" Recursively load dict/list representation of an XML tree into an etree representation.
Args:
source -- A dictionary representing an XML document where identical children tags are
countained in a list.
Keyword args:
root_tag -- A parent tag in which to wrap the xml tree. If None, and the source dict
contains multiple root items, a list of etree's Elements will be returned.
Returns:
An ET.Element which is the root of an XML tree or a list of these.
>>> dict_to_etree({'foo': 'lorem'}) #doctest: +ELLIPSIS
<Element foo at 0x...>
>>> dict_to_etree({'foo': 'lorem', 'bar': 'ipsum'}) #doctest: +ELLIPSIS
[<Element foo at 0x...>, <Element bar at 0x...>]
>>> ET.tostring(dict_to_etree({'document': {'item1': 'foo', 'item2': 'bar'}}))
'<document><item2>bar</item2><item1>foo</item1></document>'
>>> ET.tostring(dict_to_etree({'foo': 'baz'}, root_tag='document'))
'<document><foo>baz</foo></document>'
>>> ET.tostring(dict_to_etree({'title': 'foo', 'list': [{'li':1}, {'li':2}]}, root_tag='document'))
'<document><list><li>1</li><li>2</li></list><title>foo</title></document>'
"""
def dict_to_etree_recursive(source, parent):
if hasattr(source, 'keys'):
for key, value in source.iteritems():
sub = ET.SubElement(parent, key)
dict_to_etree_recursive(value, sub)
elif isinstance(source, list):
for element in source:
dict_to_etree_recursive(element, parent)
else: # TODO: Add feature to include xml literals as special objects or a etree subtree
parent.text = source
if root_tag is None:
if len(source) == 1:
root_tag = source.keys()[0]
source = source[root_tag]
else:
roots = []
for tag, content in source.iteritems():
root = ET.Element(tag)
dict_to_etree_recursive(content, root)
roots.append(root)
return roots
root = ET.Element(root_tag)
dict_to_etree_recursive(source, root)
return root | [
"def",
"dict_to_etree",
"(",
"source",
",",
"root_tag",
"=",
"None",
")",
":",
"def",
"dict_to_etree_recursive",
"(",
"source",
",",
"parent",
")",
":",
"if",
"hasattr",
"(",
"source",
",",
"'keys'",
")",
":",
"for",
"key",
",",
"value",
"in",
"source",
".",
"iteritems",
"(",
")",
":",
"sub",
"=",
"ET",
".",
"SubElement",
"(",
"parent",
",",
"key",
")",
"dict_to_etree_recursive",
"(",
"value",
",",
"sub",
")",
"elif",
"isinstance",
"(",
"source",
",",
"list",
")",
":",
"for",
"element",
"in",
"source",
":",
"dict_to_etree_recursive",
"(",
"element",
",",
"parent",
")",
"else",
":",
"# TODO: Add feature to include xml literals as special objects or a etree subtree",
"parent",
".",
"text",
"=",
"source",
"if",
"root_tag",
"is",
"None",
":",
"if",
"len",
"(",
"source",
")",
"==",
"1",
":",
"root_tag",
"=",
"source",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"source",
"=",
"source",
"[",
"root_tag",
"]",
"else",
":",
"roots",
"=",
"[",
"]",
"for",
"tag",
",",
"content",
"in",
"source",
".",
"iteritems",
"(",
")",
":",
"root",
"=",
"ET",
".",
"Element",
"(",
"tag",
")",
"dict_to_etree_recursive",
"(",
"content",
",",
"root",
")",
"roots",
".",
"append",
"(",
"root",
")",
"return",
"roots",
"root",
"=",
"ET",
".",
"Element",
"(",
"root_tag",
")",
"dict_to_etree_recursive",
"(",
"source",
",",
"root",
")",
"return",
"root"
] | Recursively load dict/list representation of an XML tree into an etree representation.
Args:
source -- A dictionary representing an XML document where identical children tags are
countained in a list.
Keyword args:
root_tag -- A parent tag in which to wrap the xml tree. If None, and the source dict
contains multiple root items, a list of etree's Elements will be returned.
Returns:
An ET.Element which is the root of an XML tree or a list of these.
>>> dict_to_etree({'foo': 'lorem'}) #doctest: +ELLIPSIS
<Element foo at 0x...>
>>> dict_to_etree({'foo': 'lorem', 'bar': 'ipsum'}) #doctest: +ELLIPSIS
[<Element foo at 0x...>, <Element bar at 0x...>]
>>> ET.tostring(dict_to_etree({'document': {'item1': 'foo', 'item2': 'bar'}}))
'<document><item2>bar</item2><item1>foo</item1></document>'
>>> ET.tostring(dict_to_etree({'foo': 'baz'}, root_tag='document'))
'<document><foo>baz</foo></document>'
>>> ET.tostring(dict_to_etree({'title': 'foo', 'list': [{'li':1}, {'li':2}]}, root_tag='document'))
'<document><list><li>1</li><li>2</li></list><title>foo</title></document>' | [
"Recursively",
"load",
"dict",
"/",
"list",
"representation",
"of",
"an",
"XML",
"tree",
"into",
"an",
"etree",
"representation",
"."
] | python | train |
tadashi-aikawa/owlmixin | owlmixin/transformers.py | https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/transformers.py#L230-L266 | def to_pretty_json(self, ignore_none: bool=True, ignore_empty: bool=False) -> str:
"""From instance to pretty json string
:param ignore_none: Properties which is None are excluded if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Json string
Usage:
>>> from owlmixin.samples import Human
>>> human = Human.from_dict({
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... })
>>> print(human.to_pretty_json())
{
"favorites": [
{
"name": "Apple",
"names_by_lang": {
"de": "Apfel",
"en": "Apple"
}
},
{
"name": "Orange"
}
],
"id": 1,
"name": "Tom"
}
"""
return self.to_json(4, ignore_none, ignore_empty) | [
"def",
"to_pretty_json",
"(",
"self",
",",
"ignore_none",
":",
"bool",
"=",
"True",
",",
"ignore_empty",
":",
"bool",
"=",
"False",
")",
"->",
"str",
":",
"return",
"self",
".",
"to_json",
"(",
"4",
",",
"ignore_none",
",",
"ignore_empty",
")"
] | From instance to pretty json string
:param ignore_none: Properties which is None are excluded if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Json string
Usage:
>>> from owlmixin.samples import Human
>>> human = Human.from_dict({
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... })
>>> print(human.to_pretty_json())
{
"favorites": [
{
"name": "Apple",
"names_by_lang": {
"de": "Apfel",
"en": "Apple"
}
},
{
"name": "Orange"
}
],
"id": 1,
"name": "Tom"
} | [
"From",
"instance",
"to",
"pretty",
"json",
"string"
] | python | train |
JukeboxPipeline/jukebox-core | src/jukeboxcore/reftrack.py | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/reftrack.py#L2107-L2121 | def import_taskfile(self, refobj, taskfileinfo):
"""Import the given taskfileinfo and update the refobj
This will call :meth:`ReftypeInterface.import_taskfile`.
:param refobj: the refobject
:type refobj: refobject
:param taskfileinfo: the taskfileinfo to reference
:type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo`
:returns: None
:rtype: None
:raises: None
"""
inter = self.get_typ_interface(self.get_typ(refobj))
inter.import_taskfile(refobj, taskfileinfo) | [
"def",
"import_taskfile",
"(",
"self",
",",
"refobj",
",",
"taskfileinfo",
")",
":",
"inter",
"=",
"self",
".",
"get_typ_interface",
"(",
"self",
".",
"get_typ",
"(",
"refobj",
")",
")",
"inter",
".",
"import_taskfile",
"(",
"refobj",
",",
"taskfileinfo",
")"
] | Import the given taskfileinfo and update the refobj
This will call :meth:`ReftypeInterface.import_taskfile`.
:param refobj: the refobject
:type refobj: refobject
:param taskfileinfo: the taskfileinfo to reference
:type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo`
:returns: None
:rtype: None
:raises: None | [
"Import",
"the",
"given",
"taskfileinfo",
"and",
"update",
"the",
"refobj"
] | python | train |
okpy/ok-client | client/utils/auth.py | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/utils/auth.py#L72-L87 | def pick_free_port(hostname=REDIRECT_HOST, port=0):
""" Try to bind a port. Default=0 selects a free port. """
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((hostname, port)) # port=0 finds an open port
except OSError as e:
log.warning("Could not bind to %s:%s %s", hostname, port, e)
if port == 0:
print('Unable to find an open port for authentication.')
raise AuthenticationException(e)
else:
return pick_free_port(hostname, 0)
addr, port = s.getsockname()
s.close()
return port | [
"def",
"pick_free_port",
"(",
"hostname",
"=",
"REDIRECT_HOST",
",",
"port",
"=",
"0",
")",
":",
"import",
"socket",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"try",
":",
"s",
".",
"bind",
"(",
"(",
"hostname",
",",
"port",
")",
")",
"# port=0 finds an open port",
"except",
"OSError",
"as",
"e",
":",
"log",
".",
"warning",
"(",
"\"Could not bind to %s:%s %s\"",
",",
"hostname",
",",
"port",
",",
"e",
")",
"if",
"port",
"==",
"0",
":",
"print",
"(",
"'Unable to find an open port for authentication.'",
")",
"raise",
"AuthenticationException",
"(",
"e",
")",
"else",
":",
"return",
"pick_free_port",
"(",
"hostname",
",",
"0",
")",
"addr",
",",
"port",
"=",
"s",
".",
"getsockname",
"(",
")",
"s",
".",
"close",
"(",
")",
"return",
"port"
] | Try to bind a port. Default=0 selects a free port. | [
"Try",
"to",
"bind",
"a",
"port",
".",
"Default",
"=",
"0",
"selects",
"a",
"free",
"port",
"."
] | python | train |
HdrHistogram/HdrHistogram_py | hdrh/log.py | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/log.py#L51-L92 | def output_interval_histogram(self,
histogram,
start_time_stamp_sec=0,
end_time_stamp_sec=0,
max_value_unit_ratio=1000000.0):
'''Output an interval histogram, with the given timestamp and a
configurable maxValueUnitRatio.
(note that the specified timestamp will be used, and the timestamp in
the actual histogram will be ignored).
The max value reported with the interval line will be scaled by the
given max_value_unit_ratio.
The histogram start and end timestamps are assumed to be in msec units.
Logging will be in seconds, realtive by a base time
The default base time is 0.
By covention, histogram start/end time are generally stamped with
absolute times in msec since the epoch. For logging with absolute time
stamps, the base time would remain zero. For
logging with relative time stamps (time since a start point),
Params:
histogram The interval histogram to log.
start_time_stamp_sec The start timestamp to log with the
interval histogram, in seconds.
default: using the start/end timestamp indicated in the histogram
end_time_stamp_sec The end timestamp to log with the interval
histogram, in seconds.
default: using the start/end timestamp indicated in the histogram
max_value_unit_ratio The ratio by which to divide the histogram's max
value when reporting on it.
default: 1,000,000 (which is the msec : nsec ratio
'''
if not start_time_stamp_sec:
start_time_stamp_sec = \
(histogram.get_start_time_stamp() - self.base_time) / 1000.0
if not end_time_stamp_sec:
end_time_stamp_sec = (histogram.get_end_time_stamp() - self.base_time) / 1000.0
cpayload = histogram.encode()
self.log.write("%f,%f,%f,%s\n" %
(start_time_stamp_sec,
end_time_stamp_sec - start_time_stamp_sec,
histogram.get_max_value() // max_value_unit_ratio,
cpayload.decode('utf-8'))) | [
"def",
"output_interval_histogram",
"(",
"self",
",",
"histogram",
",",
"start_time_stamp_sec",
"=",
"0",
",",
"end_time_stamp_sec",
"=",
"0",
",",
"max_value_unit_ratio",
"=",
"1000000.0",
")",
":",
"if",
"not",
"start_time_stamp_sec",
":",
"start_time_stamp_sec",
"=",
"(",
"histogram",
".",
"get_start_time_stamp",
"(",
")",
"-",
"self",
".",
"base_time",
")",
"/",
"1000.0",
"if",
"not",
"end_time_stamp_sec",
":",
"end_time_stamp_sec",
"=",
"(",
"histogram",
".",
"get_end_time_stamp",
"(",
")",
"-",
"self",
".",
"base_time",
")",
"/",
"1000.0",
"cpayload",
"=",
"histogram",
".",
"encode",
"(",
")",
"self",
".",
"log",
".",
"write",
"(",
"\"%f,%f,%f,%s\\n\"",
"%",
"(",
"start_time_stamp_sec",
",",
"end_time_stamp_sec",
"-",
"start_time_stamp_sec",
",",
"histogram",
".",
"get_max_value",
"(",
")",
"//",
"max_value_unit_ratio",
",",
"cpayload",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")"
] | Output an interval histogram, with the given timestamp and a
configurable maxValueUnitRatio.
(note that the specified timestamp will be used, and the timestamp in
the actual histogram will be ignored).
The max value reported with the interval line will be scaled by the
given max_value_unit_ratio.
The histogram start and end timestamps are assumed to be in msec units.
Logging will be in seconds, realtive by a base time
The default base time is 0.
By covention, histogram start/end time are generally stamped with
absolute times in msec since the epoch. For logging with absolute time
stamps, the base time would remain zero. For
logging with relative time stamps (time since a start point),
Params:
histogram The interval histogram to log.
start_time_stamp_sec The start timestamp to log with the
interval histogram, in seconds.
default: using the start/end timestamp indicated in the histogram
end_time_stamp_sec The end timestamp to log with the interval
histogram, in seconds.
default: using the start/end timestamp indicated in the histogram
max_value_unit_ratio The ratio by which to divide the histogram's max
value when reporting on it.
default: 1,000,000 (which is the msec : nsec ratio | [
"Output",
"an",
"interval",
"histogram",
"with",
"the",
"given",
"timestamp",
"and",
"a",
"configurable",
"maxValueUnitRatio",
".",
"(",
"note",
"that",
"the",
"specified",
"timestamp",
"will",
"be",
"used",
"and",
"the",
"timestamp",
"in",
"the",
"actual",
"histogram",
"will",
"be",
"ignored",
")",
".",
"The",
"max",
"value",
"reported",
"with",
"the",
"interval",
"line",
"will",
"be",
"scaled",
"by",
"the",
"given",
"max_value_unit_ratio",
".",
"The",
"histogram",
"start",
"and",
"end",
"timestamps",
"are",
"assumed",
"to",
"be",
"in",
"msec",
"units",
".",
"Logging",
"will",
"be",
"in",
"seconds",
"realtive",
"by",
"a",
"base",
"time",
"The",
"default",
"base",
"time",
"is",
"0",
"."
] | python | train |
hubo1016/vlcp | vlcp/event/matchtree.py | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/matchtree.py#L67-L81 | def insert(self, matcher, obj):
'''
Insert a new matcher
:param matcher: an EventMatcher
:param obj: object to return
'''
current = self.subtree(matcher, True)
#current.matchers[(obj, matcher)] = None
if current._use_dict:
current.matchers_dict[(obj, matcher)] = None
else:
current.matchers_list.append((obj, matcher))
return current | [
"def",
"insert",
"(",
"self",
",",
"matcher",
",",
"obj",
")",
":",
"current",
"=",
"self",
".",
"subtree",
"(",
"matcher",
",",
"True",
")",
"#current.matchers[(obj, matcher)] = None",
"if",
"current",
".",
"_use_dict",
":",
"current",
".",
"matchers_dict",
"[",
"(",
"obj",
",",
"matcher",
")",
"]",
"=",
"None",
"else",
":",
"current",
".",
"matchers_list",
".",
"append",
"(",
"(",
"obj",
",",
"matcher",
")",
")",
"return",
"current"
] | Insert a new matcher
:param matcher: an EventMatcher
:param obj: object to return | [
"Insert",
"a",
"new",
"matcher",
":",
"param",
"matcher",
":",
"an",
"EventMatcher",
":",
"param",
"obj",
":",
"object",
"to",
"return"
] | python | train |
vtraag/leidenalg | setup.py | https://github.com/vtraag/leidenalg/blob/a9e15116973a81048edf02ef7cf800d54debe1cc/setup.py#L71-L75 | def ensure_dir_does_not_exist(*args):
"""Ensures that the given directory does not exist."""
path = os.path.join(*args)
if os.path.isdir(path):
shutil.rmtree(path) | [
"def",
"ensure_dir_does_not_exist",
"(",
"*",
"args",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"args",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"shutil",
".",
"rmtree",
"(",
"path",
")"
] | Ensures that the given directory does not exist. | [
"Ensures",
"that",
"the",
"given",
"directory",
"does",
"not",
"exist",
"."
] | python | train |
MacHu-GWU/pathlib_mate-project | pathlib_mate/mate_path_filters.py | https://github.com/MacHu-GWU/pathlib_mate-project/blob/f9fb99dd7cc9ea05d1bec8b9ce8f659e8d97b0f1/pathlib_mate/mate_path_filters.py#L156-L170 | def select_by_ext(self, ext, recursive=True):
"""
Select file path by extension.
:param ext:
**中文文档**
选择与预定义的若干个扩展名匹配的文件。
"""
ext = [ext.strip().lower() for ext in ensure_list(ext)]
def filters(p): return p.suffix.lower() in ext
return self.select_file(filters, recursive) | [
"def",
"select_by_ext",
"(",
"self",
",",
"ext",
",",
"recursive",
"=",
"True",
")",
":",
"ext",
"=",
"[",
"ext",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"for",
"ext",
"in",
"ensure_list",
"(",
"ext",
")",
"]",
"def",
"filters",
"(",
"p",
")",
":",
"return",
"p",
".",
"suffix",
".",
"lower",
"(",
")",
"in",
"ext",
"return",
"self",
".",
"select_file",
"(",
"filters",
",",
"recursive",
")"
] | Select file path by extension.
:param ext:
**中文文档**
选择与预定义的若干个扩展名匹配的文件。 | [
"Select",
"file",
"path",
"by",
"extension",
"."
] | python | valid |
Adarnof/adarnauth-esi | esi/managers.py | https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L105-L112 | def equivalent_to(self, token):
"""
Gets all tokens which match the character and scopes of a reference token
:param token: :class:`esi.models.Token`
:return: :class:`esi.managers.TokenQueryset`
"""
return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter(
models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk) | [
"def",
"equivalent_to",
"(",
"self",
",",
"token",
")",
":",
"return",
"self",
".",
"filter",
"(",
"character_id",
"=",
"token",
".",
"character_id",
")",
".",
"require_scopes_exact",
"(",
"token",
".",
"scopes",
".",
"all",
"(",
")",
")",
".",
"filter",
"(",
"models",
".",
"Q",
"(",
"user",
"=",
"token",
".",
"user",
")",
"|",
"models",
".",
"Q",
"(",
"user__isnull",
"=",
"True",
")",
")",
".",
"exclude",
"(",
"pk",
"=",
"token",
".",
"pk",
")"
] | Gets all tokens which match the character and scopes of a reference token
:param token: :class:`esi.models.Token`
:return: :class:`esi.managers.TokenQueryset` | [
"Gets",
"all",
"tokens",
"which",
"match",
"the",
"character",
"and",
"scopes",
"of",
"a",
"reference",
"token",
":",
"param",
"token",
":",
":",
"class",
":",
"esi",
".",
"models",
".",
"Token",
":",
"return",
":",
":",
"class",
":",
"esi",
".",
"managers",
".",
"TokenQueryset"
] | python | train |
log2timeline/dfvfs | dfvfs/analyzer/tsk_analyzer_helper.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/analyzer/tsk_analyzer_helper.py#L20-L57 | def GetFormatSpecification(self):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification or None if the format cannot
be defined by a specification object.
"""
format_specification = specification.FormatSpecification(
self.type_indicator)
# FAT volume header signature.
format_specification.AddNewSignature(b'\x55\xaa', offset=510)
if definitions.PREFERRED_NTFS_BACK_END == self.TYPE_INDICATOR:
# NTFS file system signature.
format_specification.AddNewSignature(b'NTFS ', offset=3)
# HFS boot block signature.
format_specification.AddNewSignature(b'LK', offset=0)
# HFS master directory block signature.
format_specification.AddNewSignature(b'BD', offset=0)
# HFS+ file system signature.
format_specification.AddNewSignature(b'H+', offset=1024)
# HFSX file system signature.
format_specification.AddNewSignature(b'HX', offset=1024)
# Ext file system signature.
format_specification.AddNewSignature(b'\x53\xef', offset=1080)
# ISO9660 file system signature.
format_specification.AddNewSignature(b'CD001', offset=32769)
# YAFFS file system signature.
return format_specification | [
"def",
"GetFormatSpecification",
"(",
"self",
")",
":",
"format_specification",
"=",
"specification",
".",
"FormatSpecification",
"(",
"self",
".",
"type_indicator",
")",
"# FAT volume header signature.",
"format_specification",
".",
"AddNewSignature",
"(",
"b'\\x55\\xaa'",
",",
"offset",
"=",
"510",
")",
"if",
"definitions",
".",
"PREFERRED_NTFS_BACK_END",
"==",
"self",
".",
"TYPE_INDICATOR",
":",
"# NTFS file system signature.",
"format_specification",
".",
"AddNewSignature",
"(",
"b'NTFS '",
",",
"offset",
"=",
"3",
")",
"# HFS boot block signature.",
"format_specification",
".",
"AddNewSignature",
"(",
"b'LK'",
",",
"offset",
"=",
"0",
")",
"# HFS master directory block signature.",
"format_specification",
".",
"AddNewSignature",
"(",
"b'BD'",
",",
"offset",
"=",
"0",
")",
"# HFS+ file system signature.",
"format_specification",
".",
"AddNewSignature",
"(",
"b'H+'",
",",
"offset",
"=",
"1024",
")",
"# HFSX file system signature.",
"format_specification",
".",
"AddNewSignature",
"(",
"b'HX'",
",",
"offset",
"=",
"1024",
")",
"# Ext file system signature.",
"format_specification",
".",
"AddNewSignature",
"(",
"b'\\x53\\xef'",
",",
"offset",
"=",
"1080",
")",
"# ISO9660 file system signature.",
"format_specification",
".",
"AddNewSignature",
"(",
"b'CD001'",
",",
"offset",
"=",
"32769",
")",
"# YAFFS file system signature.",
"return",
"format_specification"
] | Retrieves the format specification.
Returns:
FormatSpecification: format specification or None if the format cannot
be defined by a specification object. | [
"Retrieves",
"the",
"format",
"specification",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/relationship/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/relationship/sessions.py#L2065-L2085 | def get_root_families(self):
"""Gets the root families in the family hierarchy.
A node with no parents is an orphan. While all family ``Ids``
are known to the hierarchy, an orphan does not appear in the
hierarchy unless explicitly added as a root node or child of
another node.
return: (osid.relationship.FamilyList) - the root families
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_root_bins
if self._catalog_session is not None:
return self._catalog_session.get_root_catalogs()
return FamilyLookupSession(
self._proxy,
self._runtime).get_families_by_ids(list(self.get_root_family_ids())) | [
"def",
"get_root_families",
"(",
"self",
")",
":",
"# Implemented from template for",
"# osid.resource.BinHierarchySession.get_root_bins",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"get_root_catalogs",
"(",
")",
"return",
"FamilyLookupSession",
"(",
"self",
".",
"_proxy",
",",
"self",
".",
"_runtime",
")",
".",
"get_families_by_ids",
"(",
"list",
"(",
"self",
".",
"get_root_family_ids",
"(",
")",
")",
")"
] | Gets the root families in the family hierarchy.
A node with no parents is an orphan. While all family ``Ids``
are known to the hierarchy, an orphan does not appear in the
hierarchy unless explicitly added as a root node or child of
another node.
return: (osid.relationship.FamilyList) - the root families
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.* | [
"Gets",
"the",
"root",
"families",
"in",
"the",
"family",
"hierarchy",
"."
] | python | train |
ArchiveTeam/wpull | wpull/network/connection.py | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/network/connection.py#L248-L259 | def read(self, amount: int=-1) -> bytes:
'''Read data.'''
assert self._state == ConnectionState.created, \
'Expect conn created. Got {}.'.format(self._state)
data = yield from \
self.run_network_operation(
self.reader.read(amount),
close_timeout=self._timeout,
name='Read')
return data | [
"def",
"read",
"(",
"self",
",",
"amount",
":",
"int",
"=",
"-",
"1",
")",
"->",
"bytes",
":",
"assert",
"self",
".",
"_state",
"==",
"ConnectionState",
".",
"created",
",",
"'Expect conn created. Got {}.'",
".",
"format",
"(",
"self",
".",
"_state",
")",
"data",
"=",
"yield",
"from",
"self",
".",
"run_network_operation",
"(",
"self",
".",
"reader",
".",
"read",
"(",
"amount",
")",
",",
"close_timeout",
"=",
"self",
".",
"_timeout",
",",
"name",
"=",
"'Read'",
")",
"return",
"data"
] | Read data. | [
"Read",
"data",
"."
] | python | train |
NASA-AMMOS/AIT-Core | ait/core/geom.py | https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/geom.py#L700-L705 | def segments (self):
"""Return the Line segments that comprise this Polygon."""
for n in xrange(len(self.vertices) - 1):
yield Line(self.vertices[n], self.vertices[n + 1])
yield Line(self.vertices[-1], self.vertices[0]) | [
"def",
"segments",
"(",
"self",
")",
":",
"for",
"n",
"in",
"xrange",
"(",
"len",
"(",
"self",
".",
"vertices",
")",
"-",
"1",
")",
":",
"yield",
"Line",
"(",
"self",
".",
"vertices",
"[",
"n",
"]",
",",
"self",
".",
"vertices",
"[",
"n",
"+",
"1",
"]",
")",
"yield",
"Line",
"(",
"self",
".",
"vertices",
"[",
"-",
"1",
"]",
",",
"self",
".",
"vertices",
"[",
"0",
"]",
")"
] | Return the Line segments that comprise this Polygon. | [
"Return",
"the",
"Line",
"segments",
"that",
"comprise",
"this",
"Polygon",
"."
] | python | train |
RJT1990/pyflux | pyflux/garch/segarch.py | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/garch/segarch.py#L155-L217 | def _mb_model(self, beta, mini_batch):
""" Creates the structure of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
mini_batch : int
Mini batch size for the data sampling
Returns
----------
lambda : np.array
Contains the values for the conditional volatility series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the score terms for the time series
"""
# Transform latent variables
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
rand_int = np.random.randint(low=0, high=self.data_length-mini_batch+1)
sample = np.arange(start=rand_int, stop=rand_int+mini_batch)
sampled_data = self.data[sample]
Y = np.array(sampled_data[self.max_lag:])
X = np.ones(Y.shape[0])
scores = np.zeros(Y.shape[0])
lmda = np.ones(Y.shape[0])*parm[0]
theta = np.ones(Y.shape[0])*parm[-1]
# Loop over time series
for t in range(0,Y.shape[0]):
if t < self.max_lag:
lmda[t] = parm[0]/(1-np.sum(parm[1:(self.p+1)]))
theta[t] += (parm[-3] - (1.0/parm[-3]))*np.exp(lmda[t])*(np.sqrt(parm[-2])*sp.gamma((parm[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(parm[-2]/2.0))
else:
# Loop over GARCH terms
for p_term in range(0,self.p):
lmda[t] += parm[1+p_term]*lmda[t-p_term-1]
# Loop over Score terms
for q_term in range(0,self.q):
lmda[t] += parm[1+self.p+q_term]*scores[t-q_term-1]
if self.leverage is True:
lmda[t] += parm[-4]*np.sign(-(Y[t-1]-theta[t-1]))*(scores[t-1]+1)
theta[t] += (parm[-3] - (1.0/parm[-3]))*np.exp(lmda[t]/2.0)*(np.sqrt(parm[-2])*sp.gamma((parm[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(parm[-2]/2.0))
if (Y[t]-theta[t])>=0:
scores[t] = (((parm[-2]+1.0)*np.power(Y[t]-theta[t],2))/float(np.power(parm[-3], 2)*parm[-2]*np.exp(lmda[t]) + np.power(Y[t]-theta[t],2))) - 1.0
else:
scores[t] = (((parm[-2]+1.0)*np.power(Y[t]-theta[t],2))/float(np.power(parm[-3],-2)*parm[-2]*np.exp(lmda[t]) + np.power(Y[t]-theta[t],2))) - 1.0
return lmda, Y, scores, theta | [
"def",
"_mb_model",
"(",
"self",
",",
"beta",
",",
"mini_batch",
")",
":",
"# Transform latent variables",
"parm",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"k",
"]",
".",
"prior",
".",
"transform",
"(",
"beta",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"beta",
".",
"shape",
"[",
"0",
"]",
")",
"]",
")",
"rand_int",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"low",
"=",
"0",
",",
"high",
"=",
"self",
".",
"data_length",
"-",
"mini_batch",
"+",
"1",
")",
"sample",
"=",
"np",
".",
"arange",
"(",
"start",
"=",
"rand_int",
",",
"stop",
"=",
"rand_int",
"+",
"mini_batch",
")",
"sampled_data",
"=",
"self",
".",
"data",
"[",
"sample",
"]",
"Y",
"=",
"np",
".",
"array",
"(",
"sampled_data",
"[",
"self",
".",
"max_lag",
":",
"]",
")",
"X",
"=",
"np",
".",
"ones",
"(",
"Y",
".",
"shape",
"[",
"0",
"]",
")",
"scores",
"=",
"np",
".",
"zeros",
"(",
"Y",
".",
"shape",
"[",
"0",
"]",
")",
"lmda",
"=",
"np",
".",
"ones",
"(",
"Y",
".",
"shape",
"[",
"0",
"]",
")",
"*",
"parm",
"[",
"0",
"]",
"theta",
"=",
"np",
".",
"ones",
"(",
"Y",
".",
"shape",
"[",
"0",
"]",
")",
"*",
"parm",
"[",
"-",
"1",
"]",
"# Loop over time series",
"for",
"t",
"in",
"range",
"(",
"0",
",",
"Y",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"t",
"<",
"self",
".",
"max_lag",
":",
"lmda",
"[",
"t",
"]",
"=",
"parm",
"[",
"0",
"]",
"/",
"(",
"1",
"-",
"np",
".",
"sum",
"(",
"parm",
"[",
"1",
":",
"(",
"self",
".",
"p",
"+",
"1",
")",
"]",
")",
")",
"theta",
"[",
"t",
"]",
"+=",
"(",
"parm",
"[",
"-",
"3",
"]",
"-",
"(",
"1.0",
"/",
"parm",
"[",
"-",
"3",
"]",
")",
")",
"*",
"np",
".",
"exp",
"(",
"lmda",
"[",
"t",
"]",
")",
"*",
"(",
"np",
".",
"sqrt",
"(",
"parm",
"[",
"-",
"2",
"]",
")",
"*",
"sp",
".",
"gamma",
"(",
"(",
"parm",
"[",
"-",
"2",
"]",
"-",
"1.0",
")",
"/",
"2.0",
")",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"np",
".",
"pi",
")",
"*",
"sp",
".",
"gamma",
"(",
"parm",
"[",
"-",
"2",
"]",
"/",
"2.0",
")",
")",
"else",
":",
"# Loop over GARCH terms",
"for",
"p_term",
"in",
"range",
"(",
"0",
",",
"self",
".",
"p",
")",
":",
"lmda",
"[",
"t",
"]",
"+=",
"parm",
"[",
"1",
"+",
"p_term",
"]",
"*",
"lmda",
"[",
"t",
"-",
"p_term",
"-",
"1",
"]",
"# Loop over Score terms",
"for",
"q_term",
"in",
"range",
"(",
"0",
",",
"self",
".",
"q",
")",
":",
"lmda",
"[",
"t",
"]",
"+=",
"parm",
"[",
"1",
"+",
"self",
".",
"p",
"+",
"q_term",
"]",
"*",
"scores",
"[",
"t",
"-",
"q_term",
"-",
"1",
"]",
"if",
"self",
".",
"leverage",
"is",
"True",
":",
"lmda",
"[",
"t",
"]",
"+=",
"parm",
"[",
"-",
"4",
"]",
"*",
"np",
".",
"sign",
"(",
"-",
"(",
"Y",
"[",
"t",
"-",
"1",
"]",
"-",
"theta",
"[",
"t",
"-",
"1",
"]",
")",
")",
"*",
"(",
"scores",
"[",
"t",
"-",
"1",
"]",
"+",
"1",
")",
"theta",
"[",
"t",
"]",
"+=",
"(",
"parm",
"[",
"-",
"3",
"]",
"-",
"(",
"1.0",
"/",
"parm",
"[",
"-",
"3",
"]",
")",
")",
"*",
"np",
".",
"exp",
"(",
"lmda",
"[",
"t",
"]",
"/",
"2.0",
")",
"*",
"(",
"np",
".",
"sqrt",
"(",
"parm",
"[",
"-",
"2",
"]",
")",
"*",
"sp",
".",
"gamma",
"(",
"(",
"parm",
"[",
"-",
"2",
"]",
"-",
"1.0",
")",
"/",
"2.0",
")",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"np",
".",
"pi",
")",
"*",
"sp",
".",
"gamma",
"(",
"parm",
"[",
"-",
"2",
"]",
"/",
"2.0",
")",
")",
"if",
"(",
"Y",
"[",
"t",
"]",
"-",
"theta",
"[",
"t",
"]",
")",
">=",
"0",
":",
"scores",
"[",
"t",
"]",
"=",
"(",
"(",
"(",
"parm",
"[",
"-",
"2",
"]",
"+",
"1.0",
")",
"*",
"np",
".",
"power",
"(",
"Y",
"[",
"t",
"]",
"-",
"theta",
"[",
"t",
"]",
",",
"2",
")",
")",
"/",
"float",
"(",
"np",
".",
"power",
"(",
"parm",
"[",
"-",
"3",
"]",
",",
"2",
")",
"*",
"parm",
"[",
"-",
"2",
"]",
"*",
"np",
".",
"exp",
"(",
"lmda",
"[",
"t",
"]",
")",
"+",
"np",
".",
"power",
"(",
"Y",
"[",
"t",
"]",
"-",
"theta",
"[",
"t",
"]",
",",
"2",
")",
")",
")",
"-",
"1.0",
"else",
":",
"scores",
"[",
"t",
"]",
"=",
"(",
"(",
"(",
"parm",
"[",
"-",
"2",
"]",
"+",
"1.0",
")",
"*",
"np",
".",
"power",
"(",
"Y",
"[",
"t",
"]",
"-",
"theta",
"[",
"t",
"]",
",",
"2",
")",
")",
"/",
"float",
"(",
"np",
".",
"power",
"(",
"parm",
"[",
"-",
"3",
"]",
",",
"-",
"2",
")",
"*",
"parm",
"[",
"-",
"2",
"]",
"*",
"np",
".",
"exp",
"(",
"lmda",
"[",
"t",
"]",
")",
"+",
"np",
".",
"power",
"(",
"Y",
"[",
"t",
"]",
"-",
"theta",
"[",
"t",
"]",
",",
"2",
")",
")",
")",
"-",
"1.0",
"return",
"lmda",
",",
"Y",
",",
"scores",
",",
"theta"
] | Creates the structure of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
mini_batch : int
Mini batch size for the data sampling
Returns
----------
lambda : np.array
Contains the values for the conditional volatility series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the score terms for the time series | [
"Creates",
"the",
"structure",
"of",
"the",
"model"
] | python | train |
vertexproject/synapse | synapse/lib/gis.py | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/gis.py#L13-L25 | def latlong(text):
'''
Chop a latlong string and return (float,float).
Does not perform validation on the coordinates.
Args:
text (str): A longitude,latitude string.
Returns:
(float,float): A longitude, latitude float tuple.
'''
nlat, nlon = text.split(',')
return (float(nlat), float(nlon)) | [
"def",
"latlong",
"(",
"text",
")",
":",
"nlat",
",",
"nlon",
"=",
"text",
".",
"split",
"(",
"','",
")",
"return",
"(",
"float",
"(",
"nlat",
")",
",",
"float",
"(",
"nlon",
")",
")"
] | Chop a latlong string and return (float,float).
Does not perform validation on the coordinates.
Args:
text (str): A longitude,latitude string.
Returns:
(float,float): A longitude, latitude float tuple. | [
"Chop",
"a",
"latlong",
"string",
"and",
"return",
"(",
"float",
"float",
")",
".",
"Does",
"not",
"perform",
"validation",
"on",
"the",
"coordinates",
"."
] | python | train |
spyder-ide/spyder | spyder/utils/programs.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/programs.py#L251-L270 | def get_python_args(fname, python_args, interact, debug, end_args):
"""Construct Python interpreter arguments"""
p_args = []
if python_args is not None:
p_args += python_args.split()
if interact:
p_args.append('-i')
if debug:
p_args.extend(['-m', 'pdb'])
if fname is not None:
if os.name == 'nt' and debug:
# When calling pdb on Windows, one has to replace backslashes by
# slashes to avoid confusion with escape characters (otherwise,
# for example, '\t' will be interpreted as a tabulation):
p_args.append(osp.normpath(fname).replace(os.sep, '/'))
else:
p_args.append(fname)
if end_args:
p_args.extend(shell_split(end_args))
return p_args | [
"def",
"get_python_args",
"(",
"fname",
",",
"python_args",
",",
"interact",
",",
"debug",
",",
"end_args",
")",
":",
"p_args",
"=",
"[",
"]",
"if",
"python_args",
"is",
"not",
"None",
":",
"p_args",
"+=",
"python_args",
".",
"split",
"(",
")",
"if",
"interact",
":",
"p_args",
".",
"append",
"(",
"'-i'",
")",
"if",
"debug",
":",
"p_args",
".",
"extend",
"(",
"[",
"'-m'",
",",
"'pdb'",
"]",
")",
"if",
"fname",
"is",
"not",
"None",
":",
"if",
"os",
".",
"name",
"==",
"'nt'",
"and",
"debug",
":",
"# When calling pdb on Windows, one has to replace backslashes by\r",
"# slashes to avoid confusion with escape characters (otherwise, \r",
"# for example, '\\t' will be interpreted as a tabulation):\r",
"p_args",
".",
"append",
"(",
"osp",
".",
"normpath",
"(",
"fname",
")",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"'/'",
")",
")",
"else",
":",
"p_args",
".",
"append",
"(",
"fname",
")",
"if",
"end_args",
":",
"p_args",
".",
"extend",
"(",
"shell_split",
"(",
"end_args",
")",
")",
"return",
"p_args"
] | Construct Python interpreter arguments | [
"Construct",
"Python",
"interpreter",
"arguments"
] | python | train |
dossier/dossier.models | dossier/models/etl/interface.py | https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/etl/interface.py#L152-L231 | def html_to_fc(html=None, clean_html=None, clean_visible=None, encoding=None, url=None,
timestamp=None, other_features=None):
'''`html` is expected to be a raw string received over the wire from a
remote webserver, and `encoding`, if provided, is used to decode
it. Typically, encoding comes from the Content-Type header field.
The :func:`~streamcorpus_pipeline._clean_html.make_clean_html`
function handles character encodings.
'''
def add_feature(name, xs):
if name not in fc:
fc[name] = StringCounter()
fc[name] += StringCounter(xs)
timestamp = timestamp or int(time.time() * 1000)
other_features = other_features or {}
if clean_html is None:
if html is not None:
try:
clean_html_utf8 = make_clean_html(html, encoding=encoding)
except:
logger.warn('dropping doc because:', exc_info=True)
return
clean_html = clean_html_utf8.decode('utf-8')
else:
clean_html_utf8 = u''
clean_html = u''
else:
clean_html_utf8 = u''
if clean_visible is None or len(clean_visible) == 0:
clean_visible = make_clean_visible(clean_html_utf8).decode('utf-8')
elif isinstance(clean_visible, str):
clean_visible = clean_visible.decode('utf-8')
fc = FeatureCollection()
fc[u'meta_raw'] = html and uni(html, encoding) or u''
fc[u'meta_clean_html'] = clean_html
fc[u'meta_clean_visible'] = clean_visible
fc[u'meta_timestamp'] = unicode(timestamp)
url = url or u''
fc[u'meta_url'] = uni(url)
add_feature(u'icq', features.ICQs(clean_visible))
add_feature(u'skype', features.skypes(clean_visible))
add_feature(u'phone', features.phones(clean_visible))
add_feature(u'email', features.emails(clean_visible))
bowNP, normalizations = features.noun_phrases(
cleanse(clean_visible), included_unnormalized=True)
add_feature(u'bowNP', bowNP)
bowNP_unnorm = chain(*normalizations.values())
add_feature(u'bowNP_unnorm', bowNP_unnorm)
add_feature(u'image_url', features.image_urls(clean_html))
add_feature(u'a_url', features.a_urls(clean_html))
## get parsed versions, extract usernames
fc[u'img_url_path_dirs'] = features.path_dirs(fc[u'image_url'])
fc[u'img_url_hostnames'] = features.host_names(fc[u'image_url'])
fc[u'usernames'] = features.usernames(fc[u'image_url'])
fc[u'a_url_path_dirs'] = features.path_dirs(fc[u'a_url'])
fc[u'a_url_hostnames'] = features.host_names(fc[u'a_url'])
fc[u'usernames'] += features.usernames(fc[u'a_url'])
#fc[u'usernames'] += features.usernames2(
# fc[u'meta_clean_visible'])
# beginning of treating this as a pipeline...
xform = features.entity_names()
fc = xform.process(fc)
for feat_name, feat_val in other_features.iteritems():
fc[feat_name] += StringCounter(feat_val)
return fc | [
"def",
"html_to_fc",
"(",
"html",
"=",
"None",
",",
"clean_html",
"=",
"None",
",",
"clean_visible",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"url",
"=",
"None",
",",
"timestamp",
"=",
"None",
",",
"other_features",
"=",
"None",
")",
":",
"def",
"add_feature",
"(",
"name",
",",
"xs",
")",
":",
"if",
"name",
"not",
"in",
"fc",
":",
"fc",
"[",
"name",
"]",
"=",
"StringCounter",
"(",
")",
"fc",
"[",
"name",
"]",
"+=",
"StringCounter",
"(",
"xs",
")",
"timestamp",
"=",
"timestamp",
"or",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
"other_features",
"=",
"other_features",
"or",
"{",
"}",
"if",
"clean_html",
"is",
"None",
":",
"if",
"html",
"is",
"not",
"None",
":",
"try",
":",
"clean_html_utf8",
"=",
"make_clean_html",
"(",
"html",
",",
"encoding",
"=",
"encoding",
")",
"except",
":",
"logger",
".",
"warn",
"(",
"'dropping doc because:'",
",",
"exc_info",
"=",
"True",
")",
"return",
"clean_html",
"=",
"clean_html_utf8",
".",
"decode",
"(",
"'utf-8'",
")",
"else",
":",
"clean_html_utf8",
"=",
"u''",
"clean_html",
"=",
"u''",
"else",
":",
"clean_html_utf8",
"=",
"u''",
"if",
"clean_visible",
"is",
"None",
"or",
"len",
"(",
"clean_visible",
")",
"==",
"0",
":",
"clean_visible",
"=",
"make_clean_visible",
"(",
"clean_html_utf8",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"elif",
"isinstance",
"(",
"clean_visible",
",",
"str",
")",
":",
"clean_visible",
"=",
"clean_visible",
".",
"decode",
"(",
"'utf-8'",
")",
"fc",
"=",
"FeatureCollection",
"(",
")",
"fc",
"[",
"u'meta_raw'",
"]",
"=",
"html",
"and",
"uni",
"(",
"html",
",",
"encoding",
")",
"or",
"u''",
"fc",
"[",
"u'meta_clean_html'",
"]",
"=",
"clean_html",
"fc",
"[",
"u'meta_clean_visible'",
"]",
"=",
"clean_visible",
"fc",
"[",
"u'meta_timestamp'",
"]",
"=",
"unicode",
"(",
"timestamp",
")",
"url",
"=",
"url",
"or",
"u''",
"fc",
"[",
"u'meta_url'",
"]",
"=",
"uni",
"(",
"url",
")",
"add_feature",
"(",
"u'icq'",
",",
"features",
".",
"ICQs",
"(",
"clean_visible",
")",
")",
"add_feature",
"(",
"u'skype'",
",",
"features",
".",
"skypes",
"(",
"clean_visible",
")",
")",
"add_feature",
"(",
"u'phone'",
",",
"features",
".",
"phones",
"(",
"clean_visible",
")",
")",
"add_feature",
"(",
"u'email'",
",",
"features",
".",
"emails",
"(",
"clean_visible",
")",
")",
"bowNP",
",",
"normalizations",
"=",
"features",
".",
"noun_phrases",
"(",
"cleanse",
"(",
"clean_visible",
")",
",",
"included_unnormalized",
"=",
"True",
")",
"add_feature",
"(",
"u'bowNP'",
",",
"bowNP",
")",
"bowNP_unnorm",
"=",
"chain",
"(",
"*",
"normalizations",
".",
"values",
"(",
")",
")",
"add_feature",
"(",
"u'bowNP_unnorm'",
",",
"bowNP_unnorm",
")",
"add_feature",
"(",
"u'image_url'",
",",
"features",
".",
"image_urls",
"(",
"clean_html",
")",
")",
"add_feature",
"(",
"u'a_url'",
",",
"features",
".",
"a_urls",
"(",
"clean_html",
")",
")",
"## get parsed versions, extract usernames",
"fc",
"[",
"u'img_url_path_dirs'",
"]",
"=",
"features",
".",
"path_dirs",
"(",
"fc",
"[",
"u'image_url'",
"]",
")",
"fc",
"[",
"u'img_url_hostnames'",
"]",
"=",
"features",
".",
"host_names",
"(",
"fc",
"[",
"u'image_url'",
"]",
")",
"fc",
"[",
"u'usernames'",
"]",
"=",
"features",
".",
"usernames",
"(",
"fc",
"[",
"u'image_url'",
"]",
")",
"fc",
"[",
"u'a_url_path_dirs'",
"]",
"=",
"features",
".",
"path_dirs",
"(",
"fc",
"[",
"u'a_url'",
"]",
")",
"fc",
"[",
"u'a_url_hostnames'",
"]",
"=",
"features",
".",
"host_names",
"(",
"fc",
"[",
"u'a_url'",
"]",
")",
"fc",
"[",
"u'usernames'",
"]",
"+=",
"features",
".",
"usernames",
"(",
"fc",
"[",
"u'a_url'",
"]",
")",
"#fc[u'usernames'] += features.usernames2(",
"# fc[u'meta_clean_visible'])",
"# beginning of treating this as a pipeline...",
"xform",
"=",
"features",
".",
"entity_names",
"(",
")",
"fc",
"=",
"xform",
".",
"process",
"(",
"fc",
")",
"for",
"feat_name",
",",
"feat_val",
"in",
"other_features",
".",
"iteritems",
"(",
")",
":",
"fc",
"[",
"feat_name",
"]",
"+=",
"StringCounter",
"(",
"feat_val",
")",
"return",
"fc"
] | `html` is expected to be a raw string received over the wire from a
remote webserver, and `encoding`, if provided, is used to decode
it. Typically, encoding comes from the Content-Type header field.
The :func:`~streamcorpus_pipeline._clean_html.make_clean_html`
function handles character encodings. | [
"html",
"is",
"expected",
"to",
"be",
"a",
"raw",
"string",
"received",
"over",
"the",
"wire",
"from",
"a",
"remote",
"webserver",
"and",
"encoding",
"if",
"provided",
"is",
"used",
"to",
"decode",
"it",
".",
"Typically",
"encoding",
"comes",
"from",
"the",
"Content",
"-",
"Type",
"header",
"field",
".",
"The",
":",
"func",
":",
"~streamcorpus_pipeline",
".",
"_clean_html",
".",
"make_clean_html",
"function",
"handles",
"character",
"encodings",
"."
] | python | train |
mardix/Mocha | mocha/extras/jinja_helpers.py | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/extras/jinja_helpers.py#L45-L72 | def img_src(url, class_="", responsive=False, lazy_load=False, id_=""):
"""
Create an image src
{{ xyz.jpg | img_src }}
:param url:
:param class_:
:param responsive:
:param lazy_load:
:param id_:
:return:
"""
if not url.startswith("http://") and not url.startswith("https://"):
url = static_url(url)
data_src = ""
if responsive:
class_ += " responsive"
if lazy_load:
data_src = url
# 1x1 image
url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNgYAAAAAMAASsJTYQAAAAASUVORK5CYII="
class_ += " lazy"
img = "<img src=\"{src}\" class=\"{class_}\" id=\"{id_}\" data-src={data_src}>" \
.format(src=url, class_=class_, id_=id_, data_src=data_src)
return Markup(img) | [
"def",
"img_src",
"(",
"url",
",",
"class_",
"=",
"\"\"",
",",
"responsive",
"=",
"False",
",",
"lazy_load",
"=",
"False",
",",
"id_",
"=",
"\"\"",
")",
":",
"if",
"not",
"url",
".",
"startswith",
"(",
"\"http://\"",
")",
"and",
"not",
"url",
".",
"startswith",
"(",
"\"https://\"",
")",
":",
"url",
"=",
"static_url",
"(",
"url",
")",
"data_src",
"=",
"\"\"",
"if",
"responsive",
":",
"class_",
"+=",
"\" responsive\"",
"if",
"lazy_load",
":",
"data_src",
"=",
"url",
"# 1x1 image",
"url",
"=",
"\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNgYAAAAAMAASsJTYQAAAAASUVORK5CYII=\"",
"class_",
"+=",
"\" lazy\"",
"img",
"=",
"\"<img src=\\\"{src}\\\" class=\\\"{class_}\\\" id=\\\"{id_}\\\" data-src={data_src}>\"",
".",
"format",
"(",
"src",
"=",
"url",
",",
"class_",
"=",
"class_",
",",
"id_",
"=",
"id_",
",",
"data_src",
"=",
"data_src",
")",
"return",
"Markup",
"(",
"img",
")"
] | Create an image src
{{ xyz.jpg | img_src }}
:param url:
:param class_:
:param responsive:
:param lazy_load:
:param id_:
:return: | [
"Create",
"an",
"image",
"src"
] | python | train |
bkg/greenwich | greenwich/raster.py | https://github.com/bkg/greenwich/blob/57ec644dadfe43ce0ecf2cfd32a2de71e0c8c141/greenwich/raster.py#L474-L488 | def get_offset(self, envelope):
"""Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size).
Arguments:
envelope -- coordinate extent tuple or Envelope
"""
if isinstance(envelope, collections.Sequence):
envelope = Envelope(envelope)
if not (self.envelope.contains(envelope) or
self.envelope.intersects(envelope)):
raise ValueError('Envelope does not intersect with this extent')
coords = self.affine.transform((envelope.ul, envelope.lr))
nxy = [(min(dest, size) - origin) or 1
for size, origin, dest in zip(self.size, *coords)]
return coords[0] + tuple(nxy) | [
"def",
"get_offset",
"(",
"self",
",",
"envelope",
")",
":",
"if",
"isinstance",
"(",
"envelope",
",",
"collections",
".",
"Sequence",
")",
":",
"envelope",
"=",
"Envelope",
"(",
"envelope",
")",
"if",
"not",
"(",
"self",
".",
"envelope",
".",
"contains",
"(",
"envelope",
")",
"or",
"self",
".",
"envelope",
".",
"intersects",
"(",
"envelope",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Envelope does not intersect with this extent'",
")",
"coords",
"=",
"self",
".",
"affine",
".",
"transform",
"(",
"(",
"envelope",
".",
"ul",
",",
"envelope",
".",
"lr",
")",
")",
"nxy",
"=",
"[",
"(",
"min",
"(",
"dest",
",",
"size",
")",
"-",
"origin",
")",
"or",
"1",
"for",
"size",
",",
"origin",
",",
"dest",
"in",
"zip",
"(",
"self",
".",
"size",
",",
"*",
"coords",
")",
"]",
"return",
"coords",
"[",
"0",
"]",
"+",
"tuple",
"(",
"nxy",
")"
] | Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size).
Arguments:
envelope -- coordinate extent tuple or Envelope | [
"Returns",
"a",
"4",
"-",
"tuple",
"pixel",
"window",
"(",
"x_offset",
"y_offset",
"x_size",
"y_size",
")",
"."
] | python | test |
spyder-ide/spyder | spyder/plugins/console/widgets/shell.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/console/widgets/shell.py#L858-L875 | def show_completion_list(self, completions, completion_text=""):
"""Display the possible completions"""
if not completions:
return
if not isinstance(completions[0], tuple):
completions = [(c, '') for c in completions]
if len(completions) == 1 and completions[0][0] == completion_text:
return
self.completion_text = completion_text
# Sorting completion list (entries starting with underscore are
# put at the end of the list):
underscore = set([(comp, t) for (comp, t) in completions
if comp.startswith('_')])
completions = sorted(set(completions) - underscore,
key=lambda x: str_lower(x[0]))
completions += sorted(underscore, key=lambda x: str_lower(x[0]))
self.show_completion_widget(completions) | [
"def",
"show_completion_list",
"(",
"self",
",",
"completions",
",",
"completion_text",
"=",
"\"\"",
")",
":",
"if",
"not",
"completions",
":",
"return",
"if",
"not",
"isinstance",
"(",
"completions",
"[",
"0",
"]",
",",
"tuple",
")",
":",
"completions",
"=",
"[",
"(",
"c",
",",
"''",
")",
"for",
"c",
"in",
"completions",
"]",
"if",
"len",
"(",
"completions",
")",
"==",
"1",
"and",
"completions",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"completion_text",
":",
"return",
"self",
".",
"completion_text",
"=",
"completion_text",
"# Sorting completion list (entries starting with underscore are\r",
"# put at the end of the list):\r",
"underscore",
"=",
"set",
"(",
"[",
"(",
"comp",
",",
"t",
")",
"for",
"(",
"comp",
",",
"t",
")",
"in",
"completions",
"if",
"comp",
".",
"startswith",
"(",
"'_'",
")",
"]",
")",
"completions",
"=",
"sorted",
"(",
"set",
"(",
"completions",
")",
"-",
"underscore",
",",
"key",
"=",
"lambda",
"x",
":",
"str_lower",
"(",
"x",
"[",
"0",
"]",
")",
")",
"completions",
"+=",
"sorted",
"(",
"underscore",
",",
"key",
"=",
"lambda",
"x",
":",
"str_lower",
"(",
"x",
"[",
"0",
"]",
")",
")",
"self",
".",
"show_completion_widget",
"(",
"completions",
")"
] | Display the possible completions | [
"Display",
"the",
"possible",
"completions"
] | python | train |
assamite/creamas | creamas/core/simulation.py | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/simulation.py#L254-L270 | def next(self):
"""Trigger next agent to :py:meth:`~creamas.core.CreativeAgent.act` in
the current step.
"""
# all agents acted, init next step
t = time.time()
if len(self._agents_to_act) == 0:
self._init_step()
addr = self._agents_to_act.pop(0)
aiomas.run(until=self.env.trigger_act(addr=addr))
t2 = time.time()
self._step_processing_time += t2 - t
# all agents acted, finalize current step
if len(self._agents_to_act) == 0:
self._finalize_step() | [
"def",
"next",
"(",
"self",
")",
":",
"# all agents acted, init next step",
"t",
"=",
"time",
".",
"time",
"(",
")",
"if",
"len",
"(",
"self",
".",
"_agents_to_act",
")",
"==",
"0",
":",
"self",
".",
"_init_step",
"(",
")",
"addr",
"=",
"self",
".",
"_agents_to_act",
".",
"pop",
"(",
"0",
")",
"aiomas",
".",
"run",
"(",
"until",
"=",
"self",
".",
"env",
".",
"trigger_act",
"(",
"addr",
"=",
"addr",
")",
")",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"_step_processing_time",
"+=",
"t2",
"-",
"t",
"# all agents acted, finalize current step",
"if",
"len",
"(",
"self",
".",
"_agents_to_act",
")",
"==",
"0",
":",
"self",
".",
"_finalize_step",
"(",
")"
] | Trigger next agent to :py:meth:`~creamas.core.CreativeAgent.act` in
the current step. | [
"Trigger",
"next",
"agent",
"to",
":",
"py",
":",
"meth",
":",
"~creamas",
".",
"core",
".",
"CreativeAgent",
".",
"act",
"in",
"the",
"current",
"step",
"."
] | python | train |
nephila/python-taiga | taiga/models/models.py | https://github.com/nephila/python-taiga/blob/5b471d6b8b59e5d410162a6f1c2f0d4188445a56/taiga/models/models.py#L617-L627 | def attach(self, attached_file, **attrs):
"""
Attach a file to the :class:`Task`
:param attached_file: file path to attach
:param attrs: optional attributes for the attached file
"""
return TaskAttachments(self.requester).create(
self.project, self.id,
attached_file, **attrs
) | [
"def",
"attach",
"(",
"self",
",",
"attached_file",
",",
"*",
"*",
"attrs",
")",
":",
"return",
"TaskAttachments",
"(",
"self",
".",
"requester",
")",
".",
"create",
"(",
"self",
".",
"project",
",",
"self",
".",
"id",
",",
"attached_file",
",",
"*",
"*",
"attrs",
")"
] | Attach a file to the :class:`Task`
:param attached_file: file path to attach
:param attrs: optional attributes for the attached file | [
"Attach",
"a",
"file",
"to",
"the",
":",
"class",
":",
"Task"
] | python | train |
onnx/onnx | onnx/helper.py | https://github.com/onnx/onnx/blob/2f7dc10f03a072526d94b6820cedbf2a1ec5a2c4/onnx/helper.py#L538-L551 | def strip_doc_string(proto): # type: (google.protobuf.message.Message) -> None
"""
Empties `doc_string` field on any nested protobuf messages
"""
assert isinstance(proto, google.protobuf.message.Message)
for descriptor in proto.DESCRIPTOR.fields:
if descriptor.name == 'doc_string':
proto.ClearField(descriptor.name)
elif descriptor.type == descriptor.TYPE_MESSAGE:
if descriptor.label == descriptor.LABEL_REPEATED:
for x in getattr(proto, descriptor.name):
strip_doc_string(x)
elif proto.HasField(descriptor.name):
strip_doc_string(getattr(proto, descriptor.name)) | [
"def",
"strip_doc_string",
"(",
"proto",
")",
":",
"# type: (google.protobuf.message.Message) -> None",
"assert",
"isinstance",
"(",
"proto",
",",
"google",
".",
"protobuf",
".",
"message",
".",
"Message",
")",
"for",
"descriptor",
"in",
"proto",
".",
"DESCRIPTOR",
".",
"fields",
":",
"if",
"descriptor",
".",
"name",
"==",
"'doc_string'",
":",
"proto",
".",
"ClearField",
"(",
"descriptor",
".",
"name",
")",
"elif",
"descriptor",
".",
"type",
"==",
"descriptor",
".",
"TYPE_MESSAGE",
":",
"if",
"descriptor",
".",
"label",
"==",
"descriptor",
".",
"LABEL_REPEATED",
":",
"for",
"x",
"in",
"getattr",
"(",
"proto",
",",
"descriptor",
".",
"name",
")",
":",
"strip_doc_string",
"(",
"x",
")",
"elif",
"proto",
".",
"HasField",
"(",
"descriptor",
".",
"name",
")",
":",
"strip_doc_string",
"(",
"getattr",
"(",
"proto",
",",
"descriptor",
".",
"name",
")",
")"
] | Empties `doc_string` field on any nested protobuf messages | [
"Empties",
"doc_string",
"field",
"on",
"any",
"nested",
"protobuf",
"messages"
] | python | train |
aestrivex/bctpy | bct/utils/visualization.py | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/utils/visualization.py#L476-L599 | def reorder_matrix(m1, cost='line', verbose=False, H=1e4, Texp=10, T0=1e-3, Hbrk=10):
'''
This function rearranges the nodes in matrix M1 such that the matrix
elements are squeezed along the main diagonal. The function uses a
version of simulated annealing.
Parameters
----------
M1 : NxN np.ndarray
connection matrix weighted/binary directed/undirected
cost : str
'line' or 'circ' for shape of lattice (linear or ring lattice).
Default is linear lattice.
verbose : bool
print out cost at each iteration. Default False.
H : int
annealing parameter, default value 1e6
Texp : int
annealing parameter, default value 1. Coefficient of H s.t.
Texp0=1-Texp/H
T0 : float
annealing parameter, default value 1e-3
Hbrk : int
annealing parameter, default value = 10. Coefficient of H s.t.
Hbrk0 = H/Hkbr
Returns
-------
Mreordered : NxN np.ndarray
reordered connection matrix
Mindices : Nx1 np.ndarray
reordered indices
Mcost : float
objective function cost of reordered matrix
Notes
-----
Note that in general, the outcome will depend on the initial condition
(the setting of the random number seed). Also, there is no good way to
determine optimal annealing parameters in advance - these paramters
will need to be adjusted "by hand" (particularly H, Texp, and T0).
For large and/or dense matrices, it is highly recommended to perform
exploratory runs varying the settings of 'H' and 'Texp' and then select
the best values.
Based on extensive testing, it appears that T0 and Hbrk can remain
unchanged in most cases. Texp may be varied from 1-1/H to 1-10/H, for
example. H is the most important parameter - set to larger values as
the problem size increases. It is advisable to run this function
multiple times and select the solution(s) with the lowest 'cost'.
Setting 'Texp' to zero cancels annealing and uses a greedy algorithm
instead.
'''
from scipy import linalg, stats
n = len(m1)
if n < 2:
raise BCTParamError("align_matrix will infinite loop on a singleton "
"or null matrix.")
# generate cost function
if cost == 'line':
profile = stats.norm.pdf(range(1, n + 1), loc=0, scale=n / 2)[::-1]
elif cost == 'circ':
profile = stats.norm.pdf(
range(1, n + 1), loc=n / 2, scale=n / 4)[::-1]
else:
raise BCTParamError('cost must be line or circ')
costf = linalg.toeplitz(profile, r=profile) * np.logical_not(np.eye(n))
costf /= np.sum(costf)
# establish maxcost, lowcost, mincost
maxcost = np.sum(np.sort(costf.flat) * np.sort(m1.flat))
lowcost = np.sum(m1 * costf) / maxcost
mincost = lowcost
# initialize
anew = np.arange(n)
amin = np.arange(n)
h = 0
hcnt = 0
# adjust annealing parameters
# H determines the maximal number of steps (user specified)
# Texp determines the steepness of the temperature gradient
Texp = 1 - Texp / H
# T0 sets the initial temperature and scales the energy term (user provided)
# Hbrk sets a break point for the stimulation
Hbrk = H / Hbrk
while h < H:
h += 1
hcnt += 1
# terminate if no new mincost has been found for some time
if hcnt > Hbrk:
break
T = T0 * Texp**h
atmp = anew.copy()
r1, r2 = rng.randint(n, size=(2,))
while r1 == r2:
r2 = rng.randint(n)
atmp[r1] = anew[r2]
atmp[r2] = anew[r1]
costnew = np.sum((m1[np.ix_(atmp, atmp)]) * costf) / maxcost
# annealing
if costnew < lowcost or rng.random_sample() < np.exp(-(costnew - lowcost) / T):
anew = atmp
lowcost = costnew
# is this a new absolute best?
if lowcost < mincost:
amin = anew
mincost = lowcost
if verbose:
print('step %i ... current lowest cost = %f' % (h, mincost))
hcnt = 0
if verbose:
print('step %i ... final lowest cost = %f' % (h, mincost))
M_reordered = m1[np.ix_(amin, amin)]
M_indices = amin
cost = mincost
return M_reordered, M_indices, cost | [
"def",
"reorder_matrix",
"(",
"m1",
",",
"cost",
"=",
"'line'",
",",
"verbose",
"=",
"False",
",",
"H",
"=",
"1e4",
",",
"Texp",
"=",
"10",
",",
"T0",
"=",
"1e-3",
",",
"Hbrk",
"=",
"10",
")",
":",
"from",
"scipy",
"import",
"linalg",
",",
"stats",
"n",
"=",
"len",
"(",
"m1",
")",
"if",
"n",
"<",
"2",
":",
"raise",
"BCTParamError",
"(",
"\"align_matrix will infinite loop on a singleton \"",
"\"or null matrix.\"",
")",
"# generate cost function",
"if",
"cost",
"==",
"'line'",
":",
"profile",
"=",
"stats",
".",
"norm",
".",
"pdf",
"(",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
",",
"loc",
"=",
"0",
",",
"scale",
"=",
"n",
"/",
"2",
")",
"[",
":",
":",
"-",
"1",
"]",
"elif",
"cost",
"==",
"'circ'",
":",
"profile",
"=",
"stats",
".",
"norm",
".",
"pdf",
"(",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
",",
"loc",
"=",
"n",
"/",
"2",
",",
"scale",
"=",
"n",
"/",
"4",
")",
"[",
":",
":",
"-",
"1",
"]",
"else",
":",
"raise",
"BCTParamError",
"(",
"'cost must be line or circ'",
")",
"costf",
"=",
"linalg",
".",
"toeplitz",
"(",
"profile",
",",
"r",
"=",
"profile",
")",
"*",
"np",
".",
"logical_not",
"(",
"np",
".",
"eye",
"(",
"n",
")",
")",
"costf",
"/=",
"np",
".",
"sum",
"(",
"costf",
")",
"# establish maxcost, lowcost, mincost",
"maxcost",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"sort",
"(",
"costf",
".",
"flat",
")",
"*",
"np",
".",
"sort",
"(",
"m1",
".",
"flat",
")",
")",
"lowcost",
"=",
"np",
".",
"sum",
"(",
"m1",
"*",
"costf",
")",
"/",
"maxcost",
"mincost",
"=",
"lowcost",
"# initialize",
"anew",
"=",
"np",
".",
"arange",
"(",
"n",
")",
"amin",
"=",
"np",
".",
"arange",
"(",
"n",
")",
"h",
"=",
"0",
"hcnt",
"=",
"0",
"# adjust annealing parameters",
"# H determines the maximal number of steps (user specified)",
"# Texp determines the steepness of the temperature gradient",
"Texp",
"=",
"1",
"-",
"Texp",
"/",
"H",
"# T0 sets the initial temperature and scales the energy term (user provided)",
"# Hbrk sets a break point for the stimulation",
"Hbrk",
"=",
"H",
"/",
"Hbrk",
"while",
"h",
"<",
"H",
":",
"h",
"+=",
"1",
"hcnt",
"+=",
"1",
"# terminate if no new mincost has been found for some time",
"if",
"hcnt",
">",
"Hbrk",
":",
"break",
"T",
"=",
"T0",
"*",
"Texp",
"**",
"h",
"atmp",
"=",
"anew",
".",
"copy",
"(",
")",
"r1",
",",
"r2",
"=",
"rng",
".",
"randint",
"(",
"n",
",",
"size",
"=",
"(",
"2",
",",
")",
")",
"while",
"r1",
"==",
"r2",
":",
"r2",
"=",
"rng",
".",
"randint",
"(",
"n",
")",
"atmp",
"[",
"r1",
"]",
"=",
"anew",
"[",
"r2",
"]",
"atmp",
"[",
"r2",
"]",
"=",
"anew",
"[",
"r1",
"]",
"costnew",
"=",
"np",
".",
"sum",
"(",
"(",
"m1",
"[",
"np",
".",
"ix_",
"(",
"atmp",
",",
"atmp",
")",
"]",
")",
"*",
"costf",
")",
"/",
"maxcost",
"# annealing",
"if",
"costnew",
"<",
"lowcost",
"or",
"rng",
".",
"random_sample",
"(",
")",
"<",
"np",
".",
"exp",
"(",
"-",
"(",
"costnew",
"-",
"lowcost",
")",
"/",
"T",
")",
":",
"anew",
"=",
"atmp",
"lowcost",
"=",
"costnew",
"# is this a new absolute best?",
"if",
"lowcost",
"<",
"mincost",
":",
"amin",
"=",
"anew",
"mincost",
"=",
"lowcost",
"if",
"verbose",
":",
"print",
"(",
"'step %i ... current lowest cost = %f'",
"%",
"(",
"h",
",",
"mincost",
")",
")",
"hcnt",
"=",
"0",
"if",
"verbose",
":",
"print",
"(",
"'step %i ... final lowest cost = %f'",
"%",
"(",
"h",
",",
"mincost",
")",
")",
"M_reordered",
"=",
"m1",
"[",
"np",
".",
"ix_",
"(",
"amin",
",",
"amin",
")",
"]",
"M_indices",
"=",
"amin",
"cost",
"=",
"mincost",
"return",
"M_reordered",
",",
"M_indices",
",",
"cost"
] | This function rearranges the nodes in matrix M1 such that the matrix
elements are squeezed along the main diagonal. The function uses a
version of simulated annealing.
Parameters
----------
M1 : NxN np.ndarray
connection matrix weighted/binary directed/undirected
cost : str
'line' or 'circ' for shape of lattice (linear or ring lattice).
Default is linear lattice.
verbose : bool
print out cost at each iteration. Default False.
H : int
annealing parameter, default value 1e6
Texp : int
annealing parameter, default value 1. Coefficient of H s.t.
Texp0=1-Texp/H
T0 : float
annealing parameter, default value 1e-3
Hbrk : int
annealing parameter, default value = 10. Coefficient of H s.t.
Hbrk0 = H/Hkbr
Returns
-------
Mreordered : NxN np.ndarray
reordered connection matrix
Mindices : Nx1 np.ndarray
reordered indices
Mcost : float
objective function cost of reordered matrix
Notes
-----
Note that in general, the outcome will depend on the initial condition
(the setting of the random number seed). Also, there is no good way to
determine optimal annealing parameters in advance - these paramters
will need to be adjusted "by hand" (particularly H, Texp, and T0).
For large and/or dense matrices, it is highly recommended to perform
exploratory runs varying the settings of 'H' and 'Texp' and then select
the best values.
Based on extensive testing, it appears that T0 and Hbrk can remain
unchanged in most cases. Texp may be varied from 1-1/H to 1-10/H, for
example. H is the most important parameter - set to larger values as
the problem size increases. It is advisable to run this function
multiple times and select the solution(s) with the lowest 'cost'.
Setting 'Texp' to zero cancels annealing and uses a greedy algorithm
instead. | [
"This",
"function",
"rearranges",
"the",
"nodes",
"in",
"matrix",
"M1",
"such",
"that",
"the",
"matrix",
"elements",
"are",
"squeezed",
"along",
"the",
"main",
"diagonal",
".",
"The",
"function",
"uses",
"a",
"version",
"of",
"simulated",
"annealing",
"."
] | python | train |
chrisspen/burlap | burlap/mysql.py | https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L582-L591 | def database_exists(name, **kwargs):
"""
Check if a MySQL database exists.
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = query("SHOW DATABASES LIKE '%(name)s';" % {
'name': name
}, **kwargs)
return res.succeeded and (res == name) | [
"def",
"database_exists",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
",",
"'warnings'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"res",
"=",
"query",
"(",
"\"SHOW DATABASES LIKE '%(name)s';\"",
"%",
"{",
"'name'",
":",
"name",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"res",
".",
"succeeded",
"and",
"(",
"res",
"==",
"name",
")"
] | Check if a MySQL database exists. | [
"Check",
"if",
"a",
"MySQL",
"database",
"exists",
"."
] | python | valid |
pypa/pipenv | pipenv/patched/notpip/_internal/vcs/git.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/vcs/git.py#L82-L100 | def get_current_branch(self, location):
"""
Return the current branch, or None if HEAD isn't at a branch
(e.g. detached HEAD).
"""
# git-symbolic-ref exits with empty stdout if "HEAD" is a detached
# HEAD rather than a symbolic ref. In addition, the -q causes the
# command to exit with status code 1 instead of 128 in this case
# and to suppress the message to stderr.
args = ['symbolic-ref', '-q', 'HEAD']
output = self.run_command(
args, extra_ok_returncodes=(1, ), show_stdout=False, cwd=location,
)
ref = output.strip()
if ref.startswith('refs/heads/'):
return ref[len('refs/heads/'):]
return None | [
"def",
"get_current_branch",
"(",
"self",
",",
"location",
")",
":",
"# git-symbolic-ref exits with empty stdout if \"HEAD\" is a detached",
"# HEAD rather than a symbolic ref. In addition, the -q causes the",
"# command to exit with status code 1 instead of 128 in this case",
"# and to suppress the message to stderr.",
"args",
"=",
"[",
"'symbolic-ref'",
",",
"'-q'",
",",
"'HEAD'",
"]",
"output",
"=",
"self",
".",
"run_command",
"(",
"args",
",",
"extra_ok_returncodes",
"=",
"(",
"1",
",",
")",
",",
"show_stdout",
"=",
"False",
",",
"cwd",
"=",
"location",
",",
")",
"ref",
"=",
"output",
".",
"strip",
"(",
")",
"if",
"ref",
".",
"startswith",
"(",
"'refs/heads/'",
")",
":",
"return",
"ref",
"[",
"len",
"(",
"'refs/heads/'",
")",
":",
"]",
"return",
"None"
] | Return the current branch, or None if HEAD isn't at a branch
(e.g. detached HEAD). | [
"Return",
"the",
"current",
"branch",
"or",
"None",
"if",
"HEAD",
"isn",
"t",
"at",
"a",
"branch",
"(",
"e",
".",
"g",
".",
"detached",
"HEAD",
")",
"."
] | python | train |
log2timeline/plaso | plaso/cli/image_export_tool.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/image_export_tool.py#L415-L427 | def _ParseNamesString(self, names_string):
"""Parses the name string.
Args:
names_string (str): comma separated filenames to filter.
"""
if not names_string:
return
names_string = names_string.lower()
names = [name.strip() for name in names_string.split(',')]
file_entry_filter = file_entry_filters.NamesFileEntryFilter(names)
self._filter_collection.AddFilter(file_entry_filter) | [
"def",
"_ParseNamesString",
"(",
"self",
",",
"names_string",
")",
":",
"if",
"not",
"names_string",
":",
"return",
"names_string",
"=",
"names_string",
".",
"lower",
"(",
")",
"names",
"=",
"[",
"name",
".",
"strip",
"(",
")",
"for",
"name",
"in",
"names_string",
".",
"split",
"(",
"','",
")",
"]",
"file_entry_filter",
"=",
"file_entry_filters",
".",
"NamesFileEntryFilter",
"(",
"names",
")",
"self",
".",
"_filter_collection",
".",
"AddFilter",
"(",
"file_entry_filter",
")"
] | Parses the name string.
Args:
names_string (str): comma separated filenames to filter. | [
"Parses",
"the",
"name",
"string",
"."
] | python | train |
chaoss/grimoirelab-sigils | src/migration/utils.py | https://github.com/chaoss/grimoirelab-sigils/blob/33d395195acb316287143a535a2c6e4009bf0528/src/migration/utils.py#L40-L58 | def replace(pretty, old_str, new_str):
""" Replace strings giving some info on where
the replacement was done
"""
out_str = ''
line_number = 1
changes = 0
for line in pretty.splitlines(keepends=True):
new_line = line.replace(old_str, new_str)
if line.find(old_str) != -1:
logging.debug('%s', line_number)
logging.debug('< %s', line)
logging.debug('> %s', new_line)
changes += 1
out_str += new_line
line_number += 1
logging.info('Total changes(%s): %s', old_str, changes)
return out_str | [
"def",
"replace",
"(",
"pretty",
",",
"old_str",
",",
"new_str",
")",
":",
"out_str",
"=",
"''",
"line_number",
"=",
"1",
"changes",
"=",
"0",
"for",
"line",
"in",
"pretty",
".",
"splitlines",
"(",
"keepends",
"=",
"True",
")",
":",
"new_line",
"=",
"line",
".",
"replace",
"(",
"old_str",
",",
"new_str",
")",
"if",
"line",
".",
"find",
"(",
"old_str",
")",
"!=",
"-",
"1",
":",
"logging",
".",
"debug",
"(",
"'%s'",
",",
"line_number",
")",
"logging",
".",
"debug",
"(",
"'< %s'",
",",
"line",
")",
"logging",
".",
"debug",
"(",
"'> %s'",
",",
"new_line",
")",
"changes",
"+=",
"1",
"out_str",
"+=",
"new_line",
"line_number",
"+=",
"1",
"logging",
".",
"info",
"(",
"'Total changes(%s): %s'",
",",
"old_str",
",",
"changes",
")",
"return",
"out_str"
] | Replace strings giving some info on where
the replacement was done | [
"Replace",
"strings",
"giving",
"some",
"info",
"on",
"where",
"the",
"replacement",
"was",
"done"
] | python | train |
google/grr | grr/core/grr_response_core/lib/parsers/linux_file_parser.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/linux_file_parser.py#L663-L669 | def AddPassword(self, fileset):
"""Add the passwd entries to the shadow store."""
passwd = fileset.get("/etc/passwd")
if passwd:
self._ParseFile(passwd, self.ParsePasswdEntry)
else:
logging.debug("No /etc/passwd file.") | [
"def",
"AddPassword",
"(",
"self",
",",
"fileset",
")",
":",
"passwd",
"=",
"fileset",
".",
"get",
"(",
"\"/etc/passwd\"",
")",
"if",
"passwd",
":",
"self",
".",
"_ParseFile",
"(",
"passwd",
",",
"self",
".",
"ParsePasswdEntry",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"\"No /etc/passwd file.\"",
")"
] | Add the passwd entries to the shadow store. | [
"Add",
"the",
"passwd",
"entries",
"to",
"the",
"shadow",
"store",
"."
] | python | train |
openstack/networking-cisco | networking_cisco/neutronclient/hostingdevicescheduler.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/neutronclient/hostingdevicescheduler.py#L70-L75 | def associate_hosting_device_with_config_agent(
self, client, config_agent_id, body):
"""Associates a hosting_device with a config agent."""
return client.post((ConfigAgentHandlingHostingDevice.resource_path +
CFG_AGENT_HOSTING_DEVICES) % config_agent_id,
body=body) | [
"def",
"associate_hosting_device_with_config_agent",
"(",
"self",
",",
"client",
",",
"config_agent_id",
",",
"body",
")",
":",
"return",
"client",
".",
"post",
"(",
"(",
"ConfigAgentHandlingHostingDevice",
".",
"resource_path",
"+",
"CFG_AGENT_HOSTING_DEVICES",
")",
"%",
"config_agent_id",
",",
"body",
"=",
"body",
")"
] | Associates a hosting_device with a config agent. | [
"Associates",
"a",
"hosting_device",
"with",
"a",
"config",
"agent",
"."
] | python | train |
datakortet/dkfileutils | tasks.py | https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/tasks.py#L92-L123 | def build(ctx, less=False, docs=False, js=False, force=False):
"""Build everything and collectstatic.
"""
specified = any([less, docs, js])
buildall = not specified
if buildall or less:
less_fname = ctx.pkg.source_less / ctx.pkg.name + '.less'
if less_fname.exists():
lessc.LessRule(
ctx,
src='{pkg.source_less}/{pkg.name}.less',
dst='{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css',
force=force
)
elif less:
print("WARNING: build --less specified, but no file at:", less_fname)
if buildall or docs:
if WARN_ABOUT_SETTINGS:
warnings.warn(
"autodoc might need a dummy settings file in the root of "
"your package. Since it runs in a separate process you cannot"
"use settings.configure()"
)
doctools.build(ctx, force=force)
if buildall or js:
build_js(ctx, force)
if HAVE_SETTINGS and (force or changed(ctx.pkg.django_static)):
collectstatic(ctx, DJANGO_SETTINGS_MODULE) | [
"def",
"build",
"(",
"ctx",
",",
"less",
"=",
"False",
",",
"docs",
"=",
"False",
",",
"js",
"=",
"False",
",",
"force",
"=",
"False",
")",
":",
"specified",
"=",
"any",
"(",
"[",
"less",
",",
"docs",
",",
"js",
"]",
")",
"buildall",
"=",
"not",
"specified",
"if",
"buildall",
"or",
"less",
":",
"less_fname",
"=",
"ctx",
".",
"pkg",
".",
"source_less",
"/",
"ctx",
".",
"pkg",
".",
"name",
"+",
"'.less'",
"if",
"less_fname",
".",
"exists",
"(",
")",
":",
"lessc",
".",
"LessRule",
"(",
"ctx",
",",
"src",
"=",
"'{pkg.source_less}/{pkg.name}.less'",
",",
"dst",
"=",
"'{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css'",
",",
"force",
"=",
"force",
")",
"elif",
"less",
":",
"print",
"(",
"\"WARNING: build --less specified, but no file at:\"",
",",
"less_fname",
")",
"if",
"buildall",
"or",
"docs",
":",
"if",
"WARN_ABOUT_SETTINGS",
":",
"warnings",
".",
"warn",
"(",
"\"autodoc might need a dummy settings file in the root of \"",
"\"your package. Since it runs in a separate process you cannot\"",
"\"use settings.configure()\"",
")",
"doctools",
".",
"build",
"(",
"ctx",
",",
"force",
"=",
"force",
")",
"if",
"buildall",
"or",
"js",
":",
"build_js",
"(",
"ctx",
",",
"force",
")",
"if",
"HAVE_SETTINGS",
"and",
"(",
"force",
"or",
"changed",
"(",
"ctx",
".",
"pkg",
".",
"django_static",
")",
")",
":",
"collectstatic",
"(",
"ctx",
",",
"DJANGO_SETTINGS_MODULE",
")"
] | Build everything and collectstatic. | [
"Build",
"everything",
"and",
"collectstatic",
"."
] | python | train |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/wallet/wallet.py | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/wallet/wallet.py#L1136-L1159 | async def sign(self, message: bytes, verkey: str = None) -> bytes:
"""
Derive signing key and Sign message; return signature. Raise WalletState if wallet is closed.
Raise AbsentMessage for missing message, or WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param verkey: verification key corresponding to private signing key (default anchor's own)
:return: signature, as bytes
"""
LOGGER.debug('Wallet.sign >>> message: %s, verkey: %s', message, verkey)
if not message:
LOGGER.debug('Wallet.sign <!< No message to sign')
raise AbsentMessage('No message to sign')
if not self.handle:
LOGGER.debug('Wallet.sign <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await crypto.crypto_sign(self.handle, verkey or self.verkey, message)
LOGGER.debug('Wallet.sign <<< %s', rv)
return rv | [
"async",
"def",
"sign",
"(",
"self",
",",
"message",
":",
"bytes",
",",
"verkey",
":",
"str",
"=",
"None",
")",
"->",
"bytes",
":",
"LOGGER",
".",
"debug",
"(",
"'Wallet.sign >>> message: %s, verkey: %s'",
",",
"message",
",",
"verkey",
")",
"if",
"not",
"message",
":",
"LOGGER",
".",
"debug",
"(",
"'Wallet.sign <!< No message to sign'",
")",
"raise",
"AbsentMessage",
"(",
"'No message to sign'",
")",
"if",
"not",
"self",
".",
"handle",
":",
"LOGGER",
".",
"debug",
"(",
"'Wallet.sign <!< Wallet %s is closed'",
",",
"self",
".",
"name",
")",
"raise",
"WalletState",
"(",
"'Wallet {} is closed'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"rv",
"=",
"await",
"crypto",
".",
"crypto_sign",
"(",
"self",
".",
"handle",
",",
"verkey",
"or",
"self",
".",
"verkey",
",",
"message",
")",
"LOGGER",
".",
"debug",
"(",
"'Wallet.sign <<< %s'",
",",
"rv",
")",
"return",
"rv"
] | Derive signing key and Sign message; return signature. Raise WalletState if wallet is closed.
Raise AbsentMessage for missing message, or WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param verkey: verification key corresponding to private signing key (default anchor's own)
:return: signature, as bytes | [
"Derive",
"signing",
"key",
"and",
"Sign",
"message",
";",
"return",
"signature",
".",
"Raise",
"WalletState",
"if",
"wallet",
"is",
"closed",
".",
"Raise",
"AbsentMessage",
"for",
"missing",
"message",
"or",
"WalletState",
"if",
"wallet",
"is",
"closed",
"."
] | python | train |
dhocker/udmx-pyusb | pyudmx/pyudmx.py | https://github.com/dhocker/udmx-pyusb/blob/ee7d10604ecd83857154ed6739793de3b7bd5fc1/pyudmx/pyudmx.py#L43-L67 | def open(self, vendor_id: int = 0x16c0, product_id: int = 0x5dc, bus: int = None, address: int = None) -> bool:
"""
Open the first device that matches the search criteria. Th default parameters
are set up for the likely most common case of a single uDMX interface.
However, for the case of multiple uDMX interfaces, you can use the
bus and address paramters to further specifiy the uDMX interface
to be opened.
:param vendor_id:
:param product_id:
:param bus: USB bus number 1-n
:param address: USB device address 1-n
:return: Returns true if a device was opened. Otherwise, returns false.
"""
kwargs = {}
if vendor_id:
kwargs["idVendor"] = vendor_id
if product_id:
kwargs["idProduct"] = product_id
if bus:
kwargs["bus"] = bus
if address:
kwargs["address"] = address
# Find the uDMX interface
self._dev = usb.core.find(**kwargs)
return self._dev is not None | [
"def",
"open",
"(",
"self",
",",
"vendor_id",
":",
"int",
"=",
"0x16c0",
",",
"product_id",
":",
"int",
"=",
"0x5dc",
",",
"bus",
":",
"int",
"=",
"None",
",",
"address",
":",
"int",
"=",
"None",
")",
"->",
"bool",
":",
"kwargs",
"=",
"{",
"}",
"if",
"vendor_id",
":",
"kwargs",
"[",
"\"idVendor\"",
"]",
"=",
"vendor_id",
"if",
"product_id",
":",
"kwargs",
"[",
"\"idProduct\"",
"]",
"=",
"product_id",
"if",
"bus",
":",
"kwargs",
"[",
"\"bus\"",
"]",
"=",
"bus",
"if",
"address",
":",
"kwargs",
"[",
"\"address\"",
"]",
"=",
"address",
"# Find the uDMX interface",
"self",
".",
"_dev",
"=",
"usb",
".",
"core",
".",
"find",
"(",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_dev",
"is",
"not",
"None"
] | Open the first device that matches the search criteria. Th default parameters
are set up for the likely most common case of a single uDMX interface.
However, for the case of multiple uDMX interfaces, you can use the
bus and address paramters to further specifiy the uDMX interface
to be opened.
:param vendor_id:
:param product_id:
:param bus: USB bus number 1-n
:param address: USB device address 1-n
:return: Returns true if a device was opened. Otherwise, returns false. | [
"Open",
"the",
"first",
"device",
"that",
"matches",
"the",
"search",
"criteria",
".",
"Th",
"default",
"parameters",
"are",
"set",
"up",
"for",
"the",
"likely",
"most",
"common",
"case",
"of",
"a",
"single",
"uDMX",
"interface",
".",
"However",
"for",
"the",
"case",
"of",
"multiple",
"uDMX",
"interfaces",
"you",
"can",
"use",
"the",
"bus",
"and",
"address",
"paramters",
"to",
"further",
"specifiy",
"the",
"uDMX",
"interface",
"to",
"be",
"opened",
".",
":",
"param",
"vendor_id",
":",
":",
"param",
"product_id",
":",
":",
"param",
"bus",
":",
"USB",
"bus",
"number",
"1",
"-",
"n",
":",
"param",
"address",
":",
"USB",
"device",
"address",
"1",
"-",
"n",
":",
"return",
":",
"Returns",
"true",
"if",
"a",
"device",
"was",
"opened",
".",
"Otherwise",
"returns",
"false",
"."
] | python | train |
dshean/pygeotools | pygeotools/lib/geolib.py | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L2198-L2252 | def get_dem_mosaic_cmd(fn_list, o, fn_list_txt=None, tr=None, t_srs=None, t_projwin=None, georef_tile_size=None, threads=None, tile=None, stat=None):
"""
Create ASP dem_mosaic command
Useful for spawning many single-threaded mosaicing processes
"""
cmd = ['dem_mosaic',]
if o is None:
o = 'mos'
cmd.extend(['-o', o])
if threads is None:
from pygeotools.lib import iolib
threads = iolib.cpu_count()
cmd.extend(['--threads', threads])
if tr is not None:
cmd.extend(['--tr', tr])
if t_srs is not None:
#cmd.extend(['--t_srs', t_srs.ExportToProj4()])
cmd.extend(['--t_srs', '"%s"' % t_srs.ExportToProj4()])
#cmd.extend(['--t_srs', "%s" % t_srs.ExportToProj4()])
if t_projwin is not None:
cmd.append('--t_projwin')
cmd.extend(t_projwin)
cmd.append('--force-projwin')
if tile is not None:
#Not yet implemented
#cmd.extend(tile_list)
cmd.append('--tile-index')
cmd.append(tile)
if georef_tile_size is not None:
cmd.extend(['--georef-tile-size', georef_tile_size])
if stat is not None:
if stat == 'wmean':
stat = None
else:
cmd.append('--%s' % stat.replace('index',''))
if stat in ['lastindex', 'firstindex', 'medianindex']:
#This will write out the index map to -last.tif by default
cmd.append('--save-index-map')
#Make sure we don't have ndv that conflicts with 0-based DEM indices
cmd.extend(['--output-nodata-value','-9999'])
#else:
# cmd.extend(['--save-dem-weight', o+'_weight'])
#If user provided a file containing list of DEMs to mosaic (useful to avoid long bash command issues)
if fn_list_txt is not None:
if os.path.exists(fn_list_txt):
cmd.append('-l')
cmd.append(fn_list_txt)
else:
print("Could not find input text file containing list of inputs")
else:
cmd.extend(fn_list)
cmd = [str(i) for i in cmd]
#print(cmd)
#return subprocess.call(cmd)
return cmd | [
"def",
"get_dem_mosaic_cmd",
"(",
"fn_list",
",",
"o",
",",
"fn_list_txt",
"=",
"None",
",",
"tr",
"=",
"None",
",",
"t_srs",
"=",
"None",
",",
"t_projwin",
"=",
"None",
",",
"georef_tile_size",
"=",
"None",
",",
"threads",
"=",
"None",
",",
"tile",
"=",
"None",
",",
"stat",
"=",
"None",
")",
":",
"cmd",
"=",
"[",
"'dem_mosaic'",
",",
"]",
"if",
"o",
"is",
"None",
":",
"o",
"=",
"'mos'",
"cmd",
".",
"extend",
"(",
"[",
"'-o'",
",",
"o",
"]",
")",
"if",
"threads",
"is",
"None",
":",
"from",
"pygeotools",
".",
"lib",
"import",
"iolib",
"threads",
"=",
"iolib",
".",
"cpu_count",
"(",
")",
"cmd",
".",
"extend",
"(",
"[",
"'--threads'",
",",
"threads",
"]",
")",
"if",
"tr",
"is",
"not",
"None",
":",
"cmd",
".",
"extend",
"(",
"[",
"'--tr'",
",",
"tr",
"]",
")",
"if",
"t_srs",
"is",
"not",
"None",
":",
"#cmd.extend(['--t_srs', t_srs.ExportToProj4()])",
"cmd",
".",
"extend",
"(",
"[",
"'--t_srs'",
",",
"'\"%s\"'",
"%",
"t_srs",
".",
"ExportToProj4",
"(",
")",
"]",
")",
"#cmd.extend(['--t_srs', \"%s\" % t_srs.ExportToProj4()])",
"if",
"t_projwin",
"is",
"not",
"None",
":",
"cmd",
".",
"append",
"(",
"'--t_projwin'",
")",
"cmd",
".",
"extend",
"(",
"t_projwin",
")",
"cmd",
".",
"append",
"(",
"'--force-projwin'",
")",
"if",
"tile",
"is",
"not",
"None",
":",
"#Not yet implemented",
"#cmd.extend(tile_list)",
"cmd",
".",
"append",
"(",
"'--tile-index'",
")",
"cmd",
".",
"append",
"(",
"tile",
")",
"if",
"georef_tile_size",
"is",
"not",
"None",
":",
"cmd",
".",
"extend",
"(",
"[",
"'--georef-tile-size'",
",",
"georef_tile_size",
"]",
")",
"if",
"stat",
"is",
"not",
"None",
":",
"if",
"stat",
"==",
"'wmean'",
":",
"stat",
"=",
"None",
"else",
":",
"cmd",
".",
"append",
"(",
"'--%s'",
"%",
"stat",
".",
"replace",
"(",
"'index'",
",",
"''",
")",
")",
"if",
"stat",
"in",
"[",
"'lastindex'",
",",
"'firstindex'",
",",
"'medianindex'",
"]",
":",
"#This will write out the index map to -last.tif by default",
"cmd",
".",
"append",
"(",
"'--save-index-map'",
")",
"#Make sure we don't have ndv that conflicts with 0-based DEM indices",
"cmd",
".",
"extend",
"(",
"[",
"'--output-nodata-value'",
",",
"'-9999'",
"]",
")",
"#else:",
"# cmd.extend(['--save-dem-weight', o+'_weight'])",
"#If user provided a file containing list of DEMs to mosaic (useful to avoid long bash command issues)",
"if",
"fn_list_txt",
"is",
"not",
"None",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fn_list_txt",
")",
":",
"cmd",
".",
"append",
"(",
"'-l'",
")",
"cmd",
".",
"append",
"(",
"fn_list_txt",
")",
"else",
":",
"print",
"(",
"\"Could not find input text file containing list of inputs\"",
")",
"else",
":",
"cmd",
".",
"extend",
"(",
"fn_list",
")",
"cmd",
"=",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"cmd",
"]",
"#print(cmd)",
"#return subprocess.call(cmd)",
"return",
"cmd"
] | Create ASP dem_mosaic command
Useful for spawning many single-threaded mosaicing processes | [
"Create",
"ASP",
"dem_mosaic",
"command",
"Useful",
"for",
"spawning",
"many",
"single",
"-",
"threaded",
"mosaicing",
"processes"
] | python | train |
jobovy/galpy | galpy/orbit/FullOrbit.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/FullOrbit.py#L759-L836 | def _fit_orbit_mlogl(new_vxvv,vxvv,vxvv_err,pot,radec,lb,
customsky,lb_to_customsky,pmllpmbb_to_customsky,
tmockAA,
ro,vo,obs):
"""The log likelihood for fitting an orbit"""
#Use this _parse_args routine, which does forward and backward integration
iR,ivR,ivT,iz,ivz,iphi= tmockAA._parse_args(True,False,
new_vxvv[0],
new_vxvv[1],
new_vxvv[2],
new_vxvv[3],
new_vxvv[4],
new_vxvv[5])
if radec or lb or customsky:
#Need to transform to (l,b), (ra,dec), or a custom set
#First transform to X,Y,Z,vX,vY,vZ (Galactic)
X,Y,Z = coords.galcencyl_to_XYZ(iR.flatten(),iphi.flatten(),
iz.flatten(),
Xsun=obs[0]/ro,
Zsun=obs[2]/ro).T
vX,vY,vZ = coords.galcencyl_to_vxvyvz(ivR.flatten(),ivT.flatten(),
ivz.flatten(),iphi.flatten(),
vsun=nu.array(\
obs[3:6])/vo,Xsun=obs[0]/ro,Zsun=obs[2]/ro).T
bad_indx= (X == 0.)*(Y == 0.)*(Z == 0.)
if True in bad_indx: X[bad_indx]+= ro/10000.
lbdvrpmllpmbb= coords.rectgal_to_sphergal(X*ro,Y*ro,Z*ro,
vX*vo,vY*vo,vZ*vo,
degree=True)
if lb:
orb_vxvv= nu.array([lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
lbdvrpmllpmbb[:,2],
lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,3]]).T
elif radec:
#Further transform to ra,dec,pmra,pmdec
radec= coords.lb_to_radec(lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],degree=True,
epoch=None)
pmrapmdec= coords.pmllpmbb_to_pmrapmdec(lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
degree=True,
epoch=None)
orb_vxvv= nu.array([radec[:,0],radec[:,1],
lbdvrpmllpmbb[:,2],
pmrapmdec[:,0],pmrapmdec[:,1],
lbdvrpmllpmbb[:,3]]).T
elif customsky:
#Further transform to ra,dec,pmra,pmdec
customradec= lb_to_customsky(lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],degree=True)
custompmrapmdec= pmllpmbb_to_customsky(lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
degree=True)
orb_vxvv= nu.array([customradec[:,0],customradec[:,1],
lbdvrpmllpmbb[:,2],
custompmrapmdec[:,0],custompmrapmdec[:,1],
lbdvrpmllpmbb[:,3]]).T
else:
#shape=(2tintJ-1,6)
orb_vxvv= nu.array([iR.flatten(),ivR.flatten(),ivT.flatten(),
iz.flatten(),ivz.flatten(),iphi.flatten()]).T
out= 0.
for ii in range(vxvv.shape[0]):
sub_vxvv= (orb_vxvv-vxvv[ii,:].flatten())**2.
#print(sub_vxvv[nu.argmin(nu.sum(sub_vxvv,axis=1))])
if not vxvv_err is None:
sub_vxvv/= vxvv_err[ii,:]**2.
else:
sub_vxvv/= 0.01**2.
out+= logsumexp(-0.5*nu.sum(sub_vxvv,axis=1))
return -out | [
"def",
"_fit_orbit_mlogl",
"(",
"new_vxvv",
",",
"vxvv",
",",
"vxvv_err",
",",
"pot",
",",
"radec",
",",
"lb",
",",
"customsky",
",",
"lb_to_customsky",
",",
"pmllpmbb_to_customsky",
",",
"tmockAA",
",",
"ro",
",",
"vo",
",",
"obs",
")",
":",
"#Use this _parse_args routine, which does forward and backward integration",
"iR",
",",
"ivR",
",",
"ivT",
",",
"iz",
",",
"ivz",
",",
"iphi",
"=",
"tmockAA",
".",
"_parse_args",
"(",
"True",
",",
"False",
",",
"new_vxvv",
"[",
"0",
"]",
",",
"new_vxvv",
"[",
"1",
"]",
",",
"new_vxvv",
"[",
"2",
"]",
",",
"new_vxvv",
"[",
"3",
"]",
",",
"new_vxvv",
"[",
"4",
"]",
",",
"new_vxvv",
"[",
"5",
"]",
")",
"if",
"radec",
"or",
"lb",
"or",
"customsky",
":",
"#Need to transform to (l,b), (ra,dec), or a custom set",
"#First transform to X,Y,Z,vX,vY,vZ (Galactic)",
"X",
",",
"Y",
",",
"Z",
"=",
"coords",
".",
"galcencyl_to_XYZ",
"(",
"iR",
".",
"flatten",
"(",
")",
",",
"iphi",
".",
"flatten",
"(",
")",
",",
"iz",
".",
"flatten",
"(",
")",
",",
"Xsun",
"=",
"obs",
"[",
"0",
"]",
"/",
"ro",
",",
"Zsun",
"=",
"obs",
"[",
"2",
"]",
"/",
"ro",
")",
".",
"T",
"vX",
",",
"vY",
",",
"vZ",
"=",
"coords",
".",
"galcencyl_to_vxvyvz",
"(",
"ivR",
".",
"flatten",
"(",
")",
",",
"ivT",
".",
"flatten",
"(",
")",
",",
"ivz",
".",
"flatten",
"(",
")",
",",
"iphi",
".",
"flatten",
"(",
")",
",",
"vsun",
"=",
"nu",
".",
"array",
"(",
"obs",
"[",
"3",
":",
"6",
"]",
")",
"/",
"vo",
",",
"Xsun",
"=",
"obs",
"[",
"0",
"]",
"/",
"ro",
",",
"Zsun",
"=",
"obs",
"[",
"2",
"]",
"/",
"ro",
")",
".",
"T",
"bad_indx",
"=",
"(",
"X",
"==",
"0.",
")",
"*",
"(",
"Y",
"==",
"0.",
")",
"*",
"(",
"Z",
"==",
"0.",
")",
"if",
"True",
"in",
"bad_indx",
":",
"X",
"[",
"bad_indx",
"]",
"+=",
"ro",
"/",
"10000.",
"lbdvrpmllpmbb",
"=",
"coords",
".",
"rectgal_to_sphergal",
"(",
"X",
"*",
"ro",
",",
"Y",
"*",
"ro",
",",
"Z",
"*",
"ro",
",",
"vX",
"*",
"vo",
",",
"vY",
"*",
"vo",
",",
"vZ",
"*",
"vo",
",",
"degree",
"=",
"True",
")",
"if",
"lb",
":",
"orb_vxvv",
"=",
"nu",
".",
"array",
"(",
"[",
"lbdvrpmllpmbb",
"[",
":",
",",
"0",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"1",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"2",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"4",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"5",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"3",
"]",
"]",
")",
".",
"T",
"elif",
"radec",
":",
"#Further transform to ra,dec,pmra,pmdec",
"radec",
"=",
"coords",
".",
"lb_to_radec",
"(",
"lbdvrpmllpmbb",
"[",
":",
",",
"0",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"1",
"]",
",",
"degree",
"=",
"True",
",",
"epoch",
"=",
"None",
")",
"pmrapmdec",
"=",
"coords",
".",
"pmllpmbb_to_pmrapmdec",
"(",
"lbdvrpmllpmbb",
"[",
":",
",",
"4",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"5",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"0",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"1",
"]",
",",
"degree",
"=",
"True",
",",
"epoch",
"=",
"None",
")",
"orb_vxvv",
"=",
"nu",
".",
"array",
"(",
"[",
"radec",
"[",
":",
",",
"0",
"]",
",",
"radec",
"[",
":",
",",
"1",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"2",
"]",
",",
"pmrapmdec",
"[",
":",
",",
"0",
"]",
",",
"pmrapmdec",
"[",
":",
",",
"1",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"3",
"]",
"]",
")",
".",
"T",
"elif",
"customsky",
":",
"#Further transform to ra,dec,pmra,pmdec",
"customradec",
"=",
"lb_to_customsky",
"(",
"lbdvrpmllpmbb",
"[",
":",
",",
"0",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"1",
"]",
",",
"degree",
"=",
"True",
")",
"custompmrapmdec",
"=",
"pmllpmbb_to_customsky",
"(",
"lbdvrpmllpmbb",
"[",
":",
",",
"4",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"5",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"0",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"1",
"]",
",",
"degree",
"=",
"True",
")",
"orb_vxvv",
"=",
"nu",
".",
"array",
"(",
"[",
"customradec",
"[",
":",
",",
"0",
"]",
",",
"customradec",
"[",
":",
",",
"1",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"2",
"]",
",",
"custompmrapmdec",
"[",
":",
",",
"0",
"]",
",",
"custompmrapmdec",
"[",
":",
",",
"1",
"]",
",",
"lbdvrpmllpmbb",
"[",
":",
",",
"3",
"]",
"]",
")",
".",
"T",
"else",
":",
"#shape=(2tintJ-1,6)",
"orb_vxvv",
"=",
"nu",
".",
"array",
"(",
"[",
"iR",
".",
"flatten",
"(",
")",
",",
"ivR",
".",
"flatten",
"(",
")",
",",
"ivT",
".",
"flatten",
"(",
")",
",",
"iz",
".",
"flatten",
"(",
")",
",",
"ivz",
".",
"flatten",
"(",
")",
",",
"iphi",
".",
"flatten",
"(",
")",
"]",
")",
".",
"T",
"out",
"=",
"0.",
"for",
"ii",
"in",
"range",
"(",
"vxvv",
".",
"shape",
"[",
"0",
"]",
")",
":",
"sub_vxvv",
"=",
"(",
"orb_vxvv",
"-",
"vxvv",
"[",
"ii",
",",
":",
"]",
".",
"flatten",
"(",
")",
")",
"**",
"2.",
"#print(sub_vxvv[nu.argmin(nu.sum(sub_vxvv,axis=1))])",
"if",
"not",
"vxvv_err",
"is",
"None",
":",
"sub_vxvv",
"/=",
"vxvv_err",
"[",
"ii",
",",
":",
"]",
"**",
"2.",
"else",
":",
"sub_vxvv",
"/=",
"0.01",
"**",
"2.",
"out",
"+=",
"logsumexp",
"(",
"-",
"0.5",
"*",
"nu",
".",
"sum",
"(",
"sub_vxvv",
",",
"axis",
"=",
"1",
")",
")",
"return",
"-",
"out"
] | The log likelihood for fitting an orbit | [
"The",
"log",
"likelihood",
"for",
"fitting",
"an",
"orbit"
] | python | train |
tamasgal/km3pipe | km3pipe/math.py | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L425-L442 | def qrot(vector, quaternion):
"""Rotate a 3D vector using quaternion algebra.
Implemented by Vladimir Kulikovskiy.
Parameters
----------
vector: np.array
quaternion: np.array
Returns
-------
np.array
"""
t = 2 * np.cross(quaternion[1:], vector)
v_rot = vector + quaternion[0] * t + np.cross(quaternion[1:], t)
return v_rot | [
"def",
"qrot",
"(",
"vector",
",",
"quaternion",
")",
":",
"t",
"=",
"2",
"*",
"np",
".",
"cross",
"(",
"quaternion",
"[",
"1",
":",
"]",
",",
"vector",
")",
"v_rot",
"=",
"vector",
"+",
"quaternion",
"[",
"0",
"]",
"*",
"t",
"+",
"np",
".",
"cross",
"(",
"quaternion",
"[",
"1",
":",
"]",
",",
"t",
")",
"return",
"v_rot"
] | Rotate a 3D vector using quaternion algebra.
Implemented by Vladimir Kulikovskiy.
Parameters
----------
vector: np.array
quaternion: np.array
Returns
-------
np.array | [
"Rotate",
"a",
"3D",
"vector",
"using",
"quaternion",
"algebra",
"."
] | python | train |
core/uricore | uricore/wkz_wsgi.py | https://github.com/core/uricore/blob/dc5ef4be7bd93da4c39e5c1cbd1ae4f3ad3f1f2a/uricore/wkz_wsgi.py#L172-L178 | def make_limited_stream(stream, limit):
"""Makes a stream limited."""
if not isinstance(stream, LimitedStream):
if limit is None:
raise TypeError('stream not limited and no limit provided.')
stream = LimitedStream(stream, limit)
return stream | [
"def",
"make_limited_stream",
"(",
"stream",
",",
"limit",
")",
":",
"if",
"not",
"isinstance",
"(",
"stream",
",",
"LimitedStream",
")",
":",
"if",
"limit",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"'stream not limited and no limit provided.'",
")",
"stream",
"=",
"LimitedStream",
"(",
"stream",
",",
"limit",
")",
"return",
"stream"
] | Makes a stream limited. | [
"Makes",
"a",
"stream",
"limited",
"."
] | python | train |
quantopian/zipline | zipline/data/bcolz_daily_bars.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bcolz_daily_bars.py#L645-L676 | def sid_day_index(self, sid, day):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
int
Index into the data tape for the given sid and day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
"""
try:
day_loc = self.sessions.get_loc(day)
except Exception:
raise NoDataOnDate("day={0} is outside of calendar={1}".format(
day, self.sessions))
offset = day_loc - self._calendar_offsets[sid]
if offset < 0:
raise NoDataBeforeDate(
"No data on or before day={0} for sid={1}".format(
day, sid))
ix = self._first_rows[sid] + offset
if ix > self._last_rows[sid]:
raise NoDataAfterDate(
"No data on or after day={0} for sid={1}".format(
day, sid))
return ix | [
"def",
"sid_day_index",
"(",
"self",
",",
"sid",
",",
"day",
")",
":",
"try",
":",
"day_loc",
"=",
"self",
".",
"sessions",
".",
"get_loc",
"(",
"day",
")",
"except",
"Exception",
":",
"raise",
"NoDataOnDate",
"(",
"\"day={0} is outside of calendar={1}\"",
".",
"format",
"(",
"day",
",",
"self",
".",
"sessions",
")",
")",
"offset",
"=",
"day_loc",
"-",
"self",
".",
"_calendar_offsets",
"[",
"sid",
"]",
"if",
"offset",
"<",
"0",
":",
"raise",
"NoDataBeforeDate",
"(",
"\"No data on or before day={0} for sid={1}\"",
".",
"format",
"(",
"day",
",",
"sid",
")",
")",
"ix",
"=",
"self",
".",
"_first_rows",
"[",
"sid",
"]",
"+",
"offset",
"if",
"ix",
">",
"self",
".",
"_last_rows",
"[",
"sid",
"]",
":",
"raise",
"NoDataAfterDate",
"(",
"\"No data on or after day={0} for sid={1}\"",
".",
"format",
"(",
"day",
",",
"sid",
")",
")",
"return",
"ix"
] | Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
int
Index into the data tape for the given sid and day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity. | [
"Parameters",
"----------",
"sid",
":",
"int",
"The",
"asset",
"identifier",
".",
"day",
":",
"datetime64",
"-",
"like",
"Midnight",
"of",
"the",
"day",
"for",
"which",
"data",
"is",
"requested",
"."
] | python | train |
saltstack/salt | salt/modules/bamboohr.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bamboohr.py#L247-L288 | def _query(action=None,
command=None,
args=None,
method='GET',
data=None):
'''
Make a web call to BambooHR
The password can be any random text, so we chose Salty text.
'''
subdomain = __opts__.get('bamboohr', {}).get('subdomain', None)
path = 'https://api.bamboohr.com/api/gateway.php/{0}/v1/'.format(
subdomain
)
if action:
path += action
if command:
path += '/{0}'.format(command)
log.debug('BambooHR URL: %s', path)
if not isinstance(args, dict):
args = {}
return_content = None
result = salt.utils.http.query(
path,
method,
username=_apikey(),
password='saltypork',
params=args,
data=data,
decode=False,
text=True,
status=True,
opts=__opts__,
)
log.debug('BambooHR Response Status Code: %s', result['status'])
return [result['status'], result['text']] | [
"def",
"_query",
"(",
"action",
"=",
"None",
",",
"command",
"=",
"None",
",",
"args",
"=",
"None",
",",
"method",
"=",
"'GET'",
",",
"data",
"=",
"None",
")",
":",
"subdomain",
"=",
"__opts__",
".",
"get",
"(",
"'bamboohr'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'subdomain'",
",",
"None",
")",
"path",
"=",
"'https://api.bamboohr.com/api/gateway.php/{0}/v1/'",
".",
"format",
"(",
"subdomain",
")",
"if",
"action",
":",
"path",
"+=",
"action",
"if",
"command",
":",
"path",
"+=",
"'/{0}'",
".",
"format",
"(",
"command",
")",
"log",
".",
"debug",
"(",
"'BambooHR URL: %s'",
",",
"path",
")",
"if",
"not",
"isinstance",
"(",
"args",
",",
"dict",
")",
":",
"args",
"=",
"{",
"}",
"return_content",
"=",
"None",
"result",
"=",
"salt",
".",
"utils",
".",
"http",
".",
"query",
"(",
"path",
",",
"method",
",",
"username",
"=",
"_apikey",
"(",
")",
",",
"password",
"=",
"'saltypork'",
",",
"params",
"=",
"args",
",",
"data",
"=",
"data",
",",
"decode",
"=",
"False",
",",
"text",
"=",
"True",
",",
"status",
"=",
"True",
",",
"opts",
"=",
"__opts__",
",",
")",
"log",
".",
"debug",
"(",
"'BambooHR Response Status Code: %s'",
",",
"result",
"[",
"'status'",
"]",
")",
"return",
"[",
"result",
"[",
"'status'",
"]",
",",
"result",
"[",
"'text'",
"]",
"]"
] | Make a web call to BambooHR
The password can be any random text, so we chose Salty text. | [
"Make",
"a",
"web",
"call",
"to",
"BambooHR"
] | python | train |
dw/mitogen | ansible_mitogen/target.py | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/target.py#L554-L574 | def run_module_async(kwargs, job_id, timeout_secs, started_sender, econtext):
"""
Execute a module with its run status and result written to a file,
terminating on the process on completion. This function must run in a child
forked using :func:`create_fork_child`.
@param mitogen.core.Sender started_sender:
A sender that will receive :data:`True` once the job has reached a
point where its initial job file has been written. This is required to
avoid a race where an overly eager controller can check for a task
before it has reached that point in execution, which is possible at
least on Python 2.4, where forking is not available for async tasks.
"""
arunner = AsyncRunner(
job_id,
timeout_secs,
started_sender,
econtext,
kwargs
)
arunner.run() | [
"def",
"run_module_async",
"(",
"kwargs",
",",
"job_id",
",",
"timeout_secs",
",",
"started_sender",
",",
"econtext",
")",
":",
"arunner",
"=",
"AsyncRunner",
"(",
"job_id",
",",
"timeout_secs",
",",
"started_sender",
",",
"econtext",
",",
"kwargs",
")",
"arunner",
".",
"run",
"(",
")"
] | Execute a module with its run status and result written to a file,
terminating on the process on completion. This function must run in a child
forked using :func:`create_fork_child`.
@param mitogen.core.Sender started_sender:
A sender that will receive :data:`True` once the job has reached a
point where its initial job file has been written. This is required to
avoid a race where an overly eager controller can check for a task
before it has reached that point in execution, which is possible at
least on Python 2.4, where forking is not available for async tasks. | [
"Execute",
"a",
"module",
"with",
"its",
"run",
"status",
"and",
"result",
"written",
"to",
"a",
"file",
"terminating",
"on",
"the",
"process",
"on",
"completion",
".",
"This",
"function",
"must",
"run",
"in",
"a",
"child",
"forked",
"using",
":",
"func",
":",
"create_fork_child",
"."
] | python | train |
saltstack/salt | salt/client/ssh/state.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/state.py#L170-L259 | def prep_trans_tar(file_client, chunks, file_refs, pillar=None, id_=None, roster_grains=None):
'''
Generate the execution package from the saltenv file refs and a low state
data structure
'''
gendir = tempfile.mkdtemp()
trans_tar = salt.utils.files.mkstemp()
lowfn = os.path.join(gendir, 'lowstate.json')
pillarfn = os.path.join(gendir, 'pillar.json')
roster_grainsfn = os.path.join(gendir, 'roster_grains.json')
sync_refs = [
[salt.utils.url.create('_modules')],
[salt.utils.url.create('_states')],
[salt.utils.url.create('_grains')],
[salt.utils.url.create('_renderers')],
[salt.utils.url.create('_returners')],
[salt.utils.url.create('_output')],
[salt.utils.url.create('_utils')],
]
with salt.utils.files.fopen(lowfn, 'w+') as fp_:
salt.utils.json.dump(chunks, fp_)
if pillar:
with salt.utils.files.fopen(pillarfn, 'w+') as fp_:
salt.utils.json.dump(pillar, fp_)
if roster_grains:
with salt.utils.files.fopen(roster_grainsfn, 'w+') as fp_:
salt.utils.json.dump(roster_grains, fp_)
if id_ is None:
id_ = ''
try:
cachedir = os.path.join('salt-ssh', id_).rstrip(os.sep)
except AttributeError:
# Minion ID should always be a str, but don't let an int break this
cachedir = os.path.join('salt-ssh', six.text_type(id_)).rstrip(os.sep)
for saltenv in file_refs:
# Location where files in this saltenv will be cached
cache_dest_root = os.path.join(cachedir, 'files', saltenv)
file_refs[saltenv].extend(sync_refs)
env_root = os.path.join(gendir, saltenv)
if not os.path.isdir(env_root):
os.makedirs(env_root)
for ref in file_refs[saltenv]:
for name in ref:
short = salt.utils.url.parse(name)[0].lstrip('/')
cache_dest = os.path.join(cache_dest_root, short)
try:
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
except IOError:
path = ''
if path:
tgt = os.path.join(env_root, short)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(path, tgt)
continue
try:
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
except IOError:
files = ''
if files:
for filename in files:
fn = filename[len(file_client.get_cachedir(cache_dest)):].strip('/')
tgt = os.path.join(
env_root,
short,
fn,
)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(filename, tgt)
continue
try:
# cwd may not exist if it was removed but salt was run from it
cwd = os.getcwd()
except OSError:
cwd = None
os.chdir(gendir)
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
for root, dirs, files in salt.utils.path.os_walk(gendir):
for name in files:
full = os.path.join(root, name)
tfp.add(full[len(gendir):].lstrip(os.sep))
if cwd:
os.chdir(cwd)
shutil.rmtree(gendir)
return trans_tar | [
"def",
"prep_trans_tar",
"(",
"file_client",
",",
"chunks",
",",
"file_refs",
",",
"pillar",
"=",
"None",
",",
"id_",
"=",
"None",
",",
"roster_grains",
"=",
"None",
")",
":",
"gendir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"trans_tar",
"=",
"salt",
".",
"utils",
".",
"files",
".",
"mkstemp",
"(",
")",
"lowfn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"gendir",
",",
"'lowstate.json'",
")",
"pillarfn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"gendir",
",",
"'pillar.json'",
")",
"roster_grainsfn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"gendir",
",",
"'roster_grains.json'",
")",
"sync_refs",
"=",
"[",
"[",
"salt",
".",
"utils",
".",
"url",
".",
"create",
"(",
"'_modules'",
")",
"]",
",",
"[",
"salt",
".",
"utils",
".",
"url",
".",
"create",
"(",
"'_states'",
")",
"]",
",",
"[",
"salt",
".",
"utils",
".",
"url",
".",
"create",
"(",
"'_grains'",
")",
"]",
",",
"[",
"salt",
".",
"utils",
".",
"url",
".",
"create",
"(",
"'_renderers'",
")",
"]",
",",
"[",
"salt",
".",
"utils",
".",
"url",
".",
"create",
"(",
"'_returners'",
")",
"]",
",",
"[",
"salt",
".",
"utils",
".",
"url",
".",
"create",
"(",
"'_output'",
")",
"]",
",",
"[",
"salt",
".",
"utils",
".",
"url",
".",
"create",
"(",
"'_utils'",
")",
"]",
",",
"]",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"lowfn",
",",
"'w+'",
")",
"as",
"fp_",
":",
"salt",
".",
"utils",
".",
"json",
".",
"dump",
"(",
"chunks",
",",
"fp_",
")",
"if",
"pillar",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"pillarfn",
",",
"'w+'",
")",
"as",
"fp_",
":",
"salt",
".",
"utils",
".",
"json",
".",
"dump",
"(",
"pillar",
",",
"fp_",
")",
"if",
"roster_grains",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"roster_grainsfn",
",",
"'w+'",
")",
"as",
"fp_",
":",
"salt",
".",
"utils",
".",
"json",
".",
"dump",
"(",
"roster_grains",
",",
"fp_",
")",
"if",
"id_",
"is",
"None",
":",
"id_",
"=",
"''",
"try",
":",
"cachedir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'salt-ssh'",
",",
"id_",
")",
".",
"rstrip",
"(",
"os",
".",
"sep",
")",
"except",
"AttributeError",
":",
"# Minion ID should always be a str, but don't let an int break this",
"cachedir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'salt-ssh'",
",",
"six",
".",
"text_type",
"(",
"id_",
")",
")",
".",
"rstrip",
"(",
"os",
".",
"sep",
")",
"for",
"saltenv",
"in",
"file_refs",
":",
"# Location where files in this saltenv will be cached",
"cache_dest_root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cachedir",
",",
"'files'",
",",
"saltenv",
")",
"file_refs",
"[",
"saltenv",
"]",
".",
"extend",
"(",
"sync_refs",
")",
"env_root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"gendir",
",",
"saltenv",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"env_root",
")",
":",
"os",
".",
"makedirs",
"(",
"env_root",
")",
"for",
"ref",
"in",
"file_refs",
"[",
"saltenv",
"]",
":",
"for",
"name",
"in",
"ref",
":",
"short",
"=",
"salt",
".",
"utils",
".",
"url",
".",
"parse",
"(",
"name",
")",
"[",
"0",
"]",
".",
"lstrip",
"(",
"'/'",
")",
"cache_dest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cache_dest_root",
",",
"short",
")",
"try",
":",
"path",
"=",
"file_client",
".",
"cache_file",
"(",
"name",
",",
"saltenv",
",",
"cachedir",
"=",
"cachedir",
")",
"except",
"IOError",
":",
"path",
"=",
"''",
"if",
"path",
":",
"tgt",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env_root",
",",
"short",
")",
"tgt_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"tgt",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"tgt_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"tgt_dir",
")",
"shutil",
".",
"copy",
"(",
"path",
",",
"tgt",
")",
"continue",
"try",
":",
"files",
"=",
"file_client",
".",
"cache_dir",
"(",
"name",
",",
"saltenv",
",",
"cachedir",
"=",
"cachedir",
")",
"except",
"IOError",
":",
"files",
"=",
"''",
"if",
"files",
":",
"for",
"filename",
"in",
"files",
":",
"fn",
"=",
"filename",
"[",
"len",
"(",
"file_client",
".",
"get_cachedir",
"(",
"cache_dest",
")",
")",
":",
"]",
".",
"strip",
"(",
"'/'",
")",
"tgt",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env_root",
",",
"short",
",",
"fn",
",",
")",
"tgt_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"tgt",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"tgt_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"tgt_dir",
")",
"shutil",
".",
"copy",
"(",
"filename",
",",
"tgt",
")",
"continue",
"try",
":",
"# cwd may not exist if it was removed but salt was run from it",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"except",
"OSError",
":",
"cwd",
"=",
"None",
"os",
".",
"chdir",
"(",
"gendir",
")",
"with",
"closing",
"(",
"tarfile",
".",
"open",
"(",
"trans_tar",
",",
"'w:gz'",
")",
")",
"as",
"tfp",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"salt",
".",
"utils",
".",
"path",
".",
"os_walk",
"(",
"gendir",
")",
":",
"for",
"name",
"in",
"files",
":",
"full",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"name",
")",
"tfp",
".",
"add",
"(",
"full",
"[",
"len",
"(",
"gendir",
")",
":",
"]",
".",
"lstrip",
"(",
"os",
".",
"sep",
")",
")",
"if",
"cwd",
":",
"os",
".",
"chdir",
"(",
"cwd",
")",
"shutil",
".",
"rmtree",
"(",
"gendir",
")",
"return",
"trans_tar"
] | Generate the execution package from the saltenv file refs and a low state
data structure | [
"Generate",
"the",
"execution",
"package",
"from",
"the",
"saltenv",
"file",
"refs",
"and",
"a",
"low",
"state",
"data",
"structure"
] | python | train |
Unidata/MetPy | metpy/interpolate/grid.py | https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/interpolate/grid.py#L111-L133 | def get_boundary_coords(x, y, spatial_pad=0):
r"""Return bounding box based on given x and y coordinates assuming northern hemisphere.
x: numeric
x coordinates.
y: numeric
y coordinates.
spatial_pad: numeric
Number of meters to add to the x and y dimensions to reduce
edge effects.
Returns
-------
bbox: dictionary
dictionary containing coordinates for corners of study area
"""
west = np.min(x) - spatial_pad
east = np.max(x) + spatial_pad
north = np.max(y) + spatial_pad
south = np.min(y) - spatial_pad
return {'west': west, 'south': south, 'east': east, 'north': north} | [
"def",
"get_boundary_coords",
"(",
"x",
",",
"y",
",",
"spatial_pad",
"=",
"0",
")",
":",
"west",
"=",
"np",
".",
"min",
"(",
"x",
")",
"-",
"spatial_pad",
"east",
"=",
"np",
".",
"max",
"(",
"x",
")",
"+",
"spatial_pad",
"north",
"=",
"np",
".",
"max",
"(",
"y",
")",
"+",
"spatial_pad",
"south",
"=",
"np",
".",
"min",
"(",
"y",
")",
"-",
"spatial_pad",
"return",
"{",
"'west'",
":",
"west",
",",
"'south'",
":",
"south",
",",
"'east'",
":",
"east",
",",
"'north'",
":",
"north",
"}"
] | r"""Return bounding box based on given x and y coordinates assuming northern hemisphere.
x: numeric
x coordinates.
y: numeric
y coordinates.
spatial_pad: numeric
Number of meters to add to the x and y dimensions to reduce
edge effects.
Returns
-------
bbox: dictionary
dictionary containing coordinates for corners of study area | [
"r",
"Return",
"bounding",
"box",
"based",
"on",
"given",
"x",
"and",
"y",
"coordinates",
"assuming",
"northern",
"hemisphere",
"."
] | python | train |
joke2k/faker | faker/providers/address/__init__.py | https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/address/__init__.py#L52-L57 | def street_name(self):
"""
:example 'Crist Parks'
"""
pattern = self.random_element(self.street_name_formats)
return self.generator.parse(pattern) | [
"def",
"street_name",
"(",
"self",
")",
":",
"pattern",
"=",
"self",
".",
"random_element",
"(",
"self",
".",
"street_name_formats",
")",
"return",
"self",
".",
"generator",
".",
"parse",
"(",
"pattern",
")"
] | :example 'Crist Parks' | [
":",
"example",
"Crist",
"Parks"
] | python | train |
PaulHancock/Aegean | AegeanTools/wcs_helpers.py | https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L100-L118 | def from_file(cls, filename, beam=None):
"""
Create a new WCSHelper class from a given fits file.
Parameters
----------
filename : string
The file to be read
beam : :class:`AegeanTools.fits_image.Beam` or None
The synthesized beam. If the supplied beam is None then one is constructed form the header.
Returns
-------
obj : :class:`AegeanTools.wcs_helpers.WCSHelper`
A helper object
"""
header = fits.getheader(filename)
return cls.from_header(header, beam) | [
"def",
"from_file",
"(",
"cls",
",",
"filename",
",",
"beam",
"=",
"None",
")",
":",
"header",
"=",
"fits",
".",
"getheader",
"(",
"filename",
")",
"return",
"cls",
".",
"from_header",
"(",
"header",
",",
"beam",
")"
] | Create a new WCSHelper class from a given fits file.
Parameters
----------
filename : string
The file to be read
beam : :class:`AegeanTools.fits_image.Beam` or None
The synthesized beam. If the supplied beam is None then one is constructed form the header.
Returns
-------
obj : :class:`AegeanTools.wcs_helpers.WCSHelper`
A helper object | [
"Create",
"a",
"new",
"WCSHelper",
"class",
"from",
"a",
"given",
"fits",
"file",
"."
] | python | train |
MycroftAI/mycroft-precise | precise/scripts/train_generated.py | https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_generated.py#L217-L224 | def generate_samples(self):
"""Generate training samples (network inputs and outputs)"""
filenames = glob_all(self.args.random_data_folder, '*.wav')
shuffle(filenames)
while True:
for fn in filenames:
for x, y in self.vectors_from_fn(fn):
yield x, y | [
"def",
"generate_samples",
"(",
"self",
")",
":",
"filenames",
"=",
"glob_all",
"(",
"self",
".",
"args",
".",
"random_data_folder",
",",
"'*.wav'",
")",
"shuffle",
"(",
"filenames",
")",
"while",
"True",
":",
"for",
"fn",
"in",
"filenames",
":",
"for",
"x",
",",
"y",
"in",
"self",
".",
"vectors_from_fn",
"(",
"fn",
")",
":",
"yield",
"x",
",",
"y"
] | Generate training samples (network inputs and outputs) | [
"Generate",
"training",
"samples",
"(",
"network",
"inputs",
"and",
"outputs",
")"
] | python | train |
sentinel-hub/eo-learn | coregistration/eolearn/coregistration/coregistration_utilities.py | https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/coregistration/eolearn/coregistration/coregistration_utilities.py#L56-L62 | def random_partition(n, n_data):
"""return n random rows of data (and also the other len(data)-n rows)"""
all_idxs = np.arange(n_data)
np.random.shuffle(all_idxs)
idxs1 = all_idxs[:n]
idxs2 = all_idxs[n:]
return idxs1, idxs2 | [
"def",
"random_partition",
"(",
"n",
",",
"n_data",
")",
":",
"all_idxs",
"=",
"np",
".",
"arange",
"(",
"n_data",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"all_idxs",
")",
"idxs1",
"=",
"all_idxs",
"[",
":",
"n",
"]",
"idxs2",
"=",
"all_idxs",
"[",
"n",
":",
"]",
"return",
"idxs1",
",",
"idxs2"
] | return n random rows of data (and also the other len(data)-n rows) | [
"return",
"n",
"random",
"rows",
"of",
"data",
"(",
"and",
"also",
"the",
"other",
"len",
"(",
"data",
")",
"-",
"n",
"rows",
")"
] | python | train |
calmjs/calmjs.parse | src/calmjs/parse/rules.py | https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/rules.py#L67-L124 | def minify(drop_semi=True):
"""
Rules for minifying output.
Arguments:
drop_semi
Drop semicolons whenever possible. Note that if Dedent and
OptionalNewline has a handler defined, it will stop final break
statements from being resolved due to reliance on normalized
resolution.
"""
layout_handlers = {
OpenBlock: layout_handler_openbrace,
CloseBlock: layout_handler_closebrace,
EndStatement: layout_handler_semicolon,
Space: layout_handler_space_minimum,
OptionalSpace: layout_handler_space_minimum,
RequiredSpace: layout_handler_space_imply,
(Space, OpenBlock): layout_handler_openbrace,
(Space, EndStatement): layout_handler_semicolon,
(OptionalSpace, EndStatement): layout_handler_semicolon,
}
if drop_semi:
# if these are defined, they should be dropped; should really
# provide these as a flag.
# layout_handlers.update({
# OptionalNewline: None,
# Dedent: None,
# })
layout_handlers.update({
EndStatement: layout_handler_semicolon_optional,
# these two rules rely on the normalized resolution
(OptionalSpace, EndStatement): layout_handler_semicolon_optional,
(EndStatement, CloseBlock): layout_handler_closebrace,
# this is a fallback rule for when Dedent is defined by
# some other rule, which won't neuter all optional
# semicolons.
(EndStatement, Dedent): rule_handler_noop,
((OptionalSpace, EndStatement), CloseBlock):
layout_handler_closebrace,
})
def minify_rule():
return {
'layout_handlers': layout_handlers,
'deferrable_handlers': {
Literal: deferrable_handler_literal_continuation,
},
}
return minify_rule | [
"def",
"minify",
"(",
"drop_semi",
"=",
"True",
")",
":",
"layout_handlers",
"=",
"{",
"OpenBlock",
":",
"layout_handler_openbrace",
",",
"CloseBlock",
":",
"layout_handler_closebrace",
",",
"EndStatement",
":",
"layout_handler_semicolon",
",",
"Space",
":",
"layout_handler_space_minimum",
",",
"OptionalSpace",
":",
"layout_handler_space_minimum",
",",
"RequiredSpace",
":",
"layout_handler_space_imply",
",",
"(",
"Space",
",",
"OpenBlock",
")",
":",
"layout_handler_openbrace",
",",
"(",
"Space",
",",
"EndStatement",
")",
":",
"layout_handler_semicolon",
",",
"(",
"OptionalSpace",
",",
"EndStatement",
")",
":",
"layout_handler_semicolon",
",",
"}",
"if",
"drop_semi",
":",
"# if these are defined, they should be dropped; should really",
"# provide these as a flag.",
"# layout_handlers.update({",
"# OptionalNewline: None,",
"# Dedent: None,",
"# })",
"layout_handlers",
".",
"update",
"(",
"{",
"EndStatement",
":",
"layout_handler_semicolon_optional",
",",
"# these two rules rely on the normalized resolution",
"(",
"OptionalSpace",
",",
"EndStatement",
")",
":",
"layout_handler_semicolon_optional",
",",
"(",
"EndStatement",
",",
"CloseBlock",
")",
":",
"layout_handler_closebrace",
",",
"# this is a fallback rule for when Dedent is defined by",
"# some other rule, which won't neuter all optional",
"# semicolons.",
"(",
"EndStatement",
",",
"Dedent",
")",
":",
"rule_handler_noop",
",",
"(",
"(",
"OptionalSpace",
",",
"EndStatement",
")",
",",
"CloseBlock",
")",
":",
"layout_handler_closebrace",
",",
"}",
")",
"def",
"minify_rule",
"(",
")",
":",
"return",
"{",
"'layout_handlers'",
":",
"layout_handlers",
",",
"'deferrable_handlers'",
":",
"{",
"Literal",
":",
"deferrable_handler_literal_continuation",
",",
"}",
",",
"}",
"return",
"minify_rule"
] | Rules for minifying output.
Arguments:
drop_semi
Drop semicolons whenever possible. Note that if Dedent and
OptionalNewline has a handler defined, it will stop final break
statements from being resolved due to reliance on normalized
resolution. | [
"Rules",
"for",
"minifying",
"output",
"."
] | python | train |
Contraz/demosys-py | demosys/loaders/scene/gltf.py | https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/loaders/scene/gltf.py#L452-L455 | def get_bbox(self, primitive):
"""Get the bounding box for the mesh"""
accessor = primitive.attributes.get('POSITION')
return accessor.min, accessor.max | [
"def",
"get_bbox",
"(",
"self",
",",
"primitive",
")",
":",
"accessor",
"=",
"primitive",
".",
"attributes",
".",
"get",
"(",
"'POSITION'",
")",
"return",
"accessor",
".",
"min",
",",
"accessor",
".",
"max"
] | Get the bounding box for the mesh | [
"Get",
"the",
"bounding",
"box",
"for",
"the",
"mesh"
] | python | valid |
mcieslik-mctp/papy | src/numap/NuMap.py | https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/numap/NuMap.py#L184-L248 | def _pool_put(pool_semaphore, tasks, put_to_pool_in, pool_size, id_self, \
is_stopping):
"""
(internal) Intended to be run in a seperate thread. Feeds tasks into
to the pool whenever semaphore permits. Finishes if self._stopping is
set.
"""
log.debug('NuMap(%s) started pool_putter.' % id_self)
last_tasks = {}
for task in xrange(tasks.lenght):
last_tasks[task] = -1
stop_tasks = []
while True:
# are we stopping the Weaver?
if is_stopping():
log.debug('NuMap(%s) pool_putter has been told to stop.' % \
id_self)
tasks.stop()
# try to get a task
try:
log.debug('NuMap(%s) pool_putter waits for next task.' % \
id_self)
task = tasks.next()
log.debug('NuMap(%s) pool_putter received next task.' % id_self)
except StopIteration:
# Weaver raised a StopIteration
stop_task = tasks.i # current task
log.debug('NuMap(%s) pool_putter caught StopIteration from task %s.' % \
(id_self, stop_task))
if stop_task not in stop_tasks:
# task raised stop for the first time.
log.debug('NuMap(%s) pool_putter task %s first-time finished.' % \
(id_self, stop_task))
stop_tasks.append(stop_task)
pool_semaphore.acquire()
log.debug('NuMap(%s) pool_putter sends a sentinel for task %s.' % \
(id_self, stop_task))
put_to_pool_in((stop_task, None, last_tasks[stop_task]))
if len(stop_tasks) == tasks.lenght:
log.debug('NuMap(%s) pool_putter sent sentinels for all tasks.' % \
id_self)
# all tasks have been stopped
for _worker in xrange(pool_size):
put_to_pool_in(None)
log.debug('NuMap(%s) pool_putter sent sentinel for %s workers' % \
(id_self, pool_size))
# this kills the pool_putter
break
# multiple StopIterations for a tasks are ignored.
# This is for stride.
continue
# got task
last_tasks[tasks.i] = task[-1][0] # last valid result
log.debug('NuMap(%s) pool_putter waits for semaphore for task %s' % \
(id_self, task))
pool_semaphore.acquire()
log.debug('NuMap(%s) pool_putter gets semaphore for task %s' % \
(id_self, task))
#gc.disable()
put_to_pool_in(task)
#gc.enable()
log.debug('NuMap(%s) pool_putter submits task %s to worker.' % \
(id_self, task))
log.debug('NuMap(%s) pool_putter returns' % id_self) | [
"def",
"_pool_put",
"(",
"pool_semaphore",
",",
"tasks",
",",
"put_to_pool_in",
",",
"pool_size",
",",
"id_self",
",",
"is_stopping",
")",
":",
"log",
".",
"debug",
"(",
"'NuMap(%s) started pool_putter.'",
"%",
"id_self",
")",
"last_tasks",
"=",
"{",
"}",
"for",
"task",
"in",
"xrange",
"(",
"tasks",
".",
"lenght",
")",
":",
"last_tasks",
"[",
"task",
"]",
"=",
"-",
"1",
"stop_tasks",
"=",
"[",
"]",
"while",
"True",
":",
"# are we stopping the Weaver?",
"if",
"is_stopping",
"(",
")",
":",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter has been told to stop.'",
"%",
"id_self",
")",
"tasks",
".",
"stop",
"(",
")",
"# try to get a task",
"try",
":",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter waits for next task.'",
"%",
"id_self",
")",
"task",
"=",
"tasks",
".",
"next",
"(",
")",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter received next task.'",
"%",
"id_self",
")",
"except",
"StopIteration",
":",
"# Weaver raised a StopIteration",
"stop_task",
"=",
"tasks",
".",
"i",
"# current task",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter caught StopIteration from task %s.'",
"%",
"(",
"id_self",
",",
"stop_task",
")",
")",
"if",
"stop_task",
"not",
"in",
"stop_tasks",
":",
"# task raised stop for the first time.",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter task %s first-time finished.'",
"%",
"(",
"id_self",
",",
"stop_task",
")",
")",
"stop_tasks",
".",
"append",
"(",
"stop_task",
")",
"pool_semaphore",
".",
"acquire",
"(",
")",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter sends a sentinel for task %s.'",
"%",
"(",
"id_self",
",",
"stop_task",
")",
")",
"put_to_pool_in",
"(",
"(",
"stop_task",
",",
"None",
",",
"last_tasks",
"[",
"stop_task",
"]",
")",
")",
"if",
"len",
"(",
"stop_tasks",
")",
"==",
"tasks",
".",
"lenght",
":",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter sent sentinels for all tasks.'",
"%",
"id_self",
")",
"# all tasks have been stopped",
"for",
"_worker",
"in",
"xrange",
"(",
"pool_size",
")",
":",
"put_to_pool_in",
"(",
"None",
")",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter sent sentinel for %s workers'",
"%",
"(",
"id_self",
",",
"pool_size",
")",
")",
"# this kills the pool_putter",
"break",
"# multiple StopIterations for a tasks are ignored. ",
"# This is for stride.",
"continue",
"# got task",
"last_tasks",
"[",
"tasks",
".",
"i",
"]",
"=",
"task",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"# last valid result",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter waits for semaphore for task %s'",
"%",
"(",
"id_self",
",",
"task",
")",
")",
"pool_semaphore",
".",
"acquire",
"(",
")",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter gets semaphore for task %s'",
"%",
"(",
"id_self",
",",
"task",
")",
")",
"#gc.disable()",
"put_to_pool_in",
"(",
"task",
")",
"#gc.enable()",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter submits task %s to worker.'",
"%",
"(",
"id_self",
",",
"task",
")",
")",
"log",
".",
"debug",
"(",
"'NuMap(%s) pool_putter returns'",
"%",
"id_self",
")"
] | (internal) Intended to be run in a seperate thread. Feeds tasks into
to the pool whenever semaphore permits. Finishes if self._stopping is
set. | [
"(",
"internal",
")",
"Intended",
"to",
"be",
"run",
"in",
"a",
"seperate",
"thread",
".",
"Feeds",
"tasks",
"into",
"to",
"the",
"pool",
"whenever",
"semaphore",
"permits",
".",
"Finishes",
"if",
"self",
".",
"_stopping",
"is",
"set",
"."
] | python | train |
amzn/ion-python | amazon/ion/reader_text.py | https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_text.py#L369-L379 | def read_data_event(self, whence, complete=False, can_flush=False):
"""Creates a transition to a co-routine for retrieving data as bytes.
Args:
whence (Coroutine): The co-routine to return to after the data is satisfied.
complete (Optional[bool]): True if STREAM_END should be emitted if no bytes are read or
available; False if INCOMPLETE should be emitted in that case.
can_flush (Optional[bool]): True if NEXT may be requested after INCOMPLETE is emitted as a result of this
data request.
"""
return Transition(None, _read_data_handler(whence, self, complete, can_flush)) | [
"def",
"read_data_event",
"(",
"self",
",",
"whence",
",",
"complete",
"=",
"False",
",",
"can_flush",
"=",
"False",
")",
":",
"return",
"Transition",
"(",
"None",
",",
"_read_data_handler",
"(",
"whence",
",",
"self",
",",
"complete",
",",
"can_flush",
")",
")"
] | Creates a transition to a co-routine for retrieving data as bytes.
Args:
whence (Coroutine): The co-routine to return to after the data is satisfied.
complete (Optional[bool]): True if STREAM_END should be emitted if no bytes are read or
available; False if INCOMPLETE should be emitted in that case.
can_flush (Optional[bool]): True if NEXT may be requested after INCOMPLETE is emitted as a result of this
data request. | [
"Creates",
"a",
"transition",
"to",
"a",
"co",
"-",
"routine",
"for",
"retrieving",
"data",
"as",
"bytes",
"."
] | python | train |
sentinel-hub/sentinelhub-py | sentinelhub/ogc.py | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/ogc.py#L171-L209 | def _get_common_url_parameters(request):
""" Returns parameters common dictionary for WMS, WCS and FIS request.
:param request: OGC-type request with specified bounding box, cloud coverage for specific product.
:type request: OgcRequest or GeopediaRequest
:return: dictionary with parameters
:rtype: dict
"""
params = {
'SERVICE': request.service_type.value
}
if hasattr(request, 'maxcc'):
params['MAXCC'] = 100.0 * request.maxcc
if hasattr(request, 'custom_url_params') and request.custom_url_params is not None:
params = {**params,
**{k.value: str(v) for k, v in request.custom_url_params.items()}}
if CustomUrlParam.EVALSCRIPT.value in params:
evalscript = params[CustomUrlParam.EVALSCRIPT.value]
params[CustomUrlParam.EVALSCRIPT.value] = b64encode(evalscript.encode()).decode()
if CustomUrlParam.GEOMETRY.value in params:
geometry = params[CustomUrlParam.GEOMETRY.value]
crs = request.bbox.crs
if isinstance(geometry, Geometry):
if geometry.crs is not crs:
raise ValueError('Geometry object in custom_url_params should have the same CRS as given BBox')
else:
geometry = Geometry(geometry, crs)
if geometry.crs is CRS.WGS84:
geometry = geometry.reverse()
params[CustomUrlParam.GEOMETRY.value] = geometry.wkt
return params | [
"def",
"_get_common_url_parameters",
"(",
"request",
")",
":",
"params",
"=",
"{",
"'SERVICE'",
":",
"request",
".",
"service_type",
".",
"value",
"}",
"if",
"hasattr",
"(",
"request",
",",
"'maxcc'",
")",
":",
"params",
"[",
"'MAXCC'",
"]",
"=",
"100.0",
"*",
"request",
".",
"maxcc",
"if",
"hasattr",
"(",
"request",
",",
"'custom_url_params'",
")",
"and",
"request",
".",
"custom_url_params",
"is",
"not",
"None",
":",
"params",
"=",
"{",
"*",
"*",
"params",
",",
"*",
"*",
"{",
"k",
".",
"value",
":",
"str",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"request",
".",
"custom_url_params",
".",
"items",
"(",
")",
"}",
"}",
"if",
"CustomUrlParam",
".",
"EVALSCRIPT",
".",
"value",
"in",
"params",
":",
"evalscript",
"=",
"params",
"[",
"CustomUrlParam",
".",
"EVALSCRIPT",
".",
"value",
"]",
"params",
"[",
"CustomUrlParam",
".",
"EVALSCRIPT",
".",
"value",
"]",
"=",
"b64encode",
"(",
"evalscript",
".",
"encode",
"(",
")",
")",
".",
"decode",
"(",
")",
"if",
"CustomUrlParam",
".",
"GEOMETRY",
".",
"value",
"in",
"params",
":",
"geometry",
"=",
"params",
"[",
"CustomUrlParam",
".",
"GEOMETRY",
".",
"value",
"]",
"crs",
"=",
"request",
".",
"bbox",
".",
"crs",
"if",
"isinstance",
"(",
"geometry",
",",
"Geometry",
")",
":",
"if",
"geometry",
".",
"crs",
"is",
"not",
"crs",
":",
"raise",
"ValueError",
"(",
"'Geometry object in custom_url_params should have the same CRS as given BBox'",
")",
"else",
":",
"geometry",
"=",
"Geometry",
"(",
"geometry",
",",
"crs",
")",
"if",
"geometry",
".",
"crs",
"is",
"CRS",
".",
"WGS84",
":",
"geometry",
"=",
"geometry",
".",
"reverse",
"(",
")",
"params",
"[",
"CustomUrlParam",
".",
"GEOMETRY",
".",
"value",
"]",
"=",
"geometry",
".",
"wkt",
"return",
"params"
] | Returns parameters common dictionary for WMS, WCS and FIS request.
:param request: OGC-type request with specified bounding box, cloud coverage for specific product.
:type request: OgcRequest or GeopediaRequest
:return: dictionary with parameters
:rtype: dict | [
"Returns",
"parameters",
"common",
"dictionary",
"for",
"WMS",
"WCS",
"and",
"FIS",
"request",
"."
] | python | train |
greenbone/ospd | ospd/ospd.py | https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/ospd.py#L618-L629 | def handle_stop_scan_command(self, scan_et):
""" Handles <stop_scan> command.
@return: Response string for <stop_scan> command.
"""
scan_id = scan_et.attrib.get('scan_id')
if scan_id is None or scan_id == '':
raise OSPDError('No scan_id attribute', 'stop_scan')
self.stop_scan(scan_id)
return simple_response_str('stop_scan', 200, 'OK') | [
"def",
"handle_stop_scan_command",
"(",
"self",
",",
"scan_et",
")",
":",
"scan_id",
"=",
"scan_et",
".",
"attrib",
".",
"get",
"(",
"'scan_id'",
")",
"if",
"scan_id",
"is",
"None",
"or",
"scan_id",
"==",
"''",
":",
"raise",
"OSPDError",
"(",
"'No scan_id attribute'",
",",
"'stop_scan'",
")",
"self",
".",
"stop_scan",
"(",
"scan_id",
")",
"return",
"simple_response_str",
"(",
"'stop_scan'",
",",
"200",
",",
"'OK'",
")"
] | Handles <stop_scan> command.
@return: Response string for <stop_scan> command. | [
"Handles",
"<stop_scan",
">",
"command",
"."
] | python | train |
exosite-labs/pyonep | pyonep/onep.py | https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/onep.py#L58-L64 | def add(self, auth, method, args, notimeout=False):
"""Append a deferred request for a particular auth/CIK."""
authstr = self._authstr(auth)
self._requests.setdefault(authstr, []).append((method, args))
self._notimeouts.setdefault(authstr, False)
if notimeout:
self._notimeouts[authstr] = notimeout | [
"def",
"add",
"(",
"self",
",",
"auth",
",",
"method",
",",
"args",
",",
"notimeout",
"=",
"False",
")",
":",
"authstr",
"=",
"self",
".",
"_authstr",
"(",
"auth",
")",
"self",
".",
"_requests",
".",
"setdefault",
"(",
"authstr",
",",
"[",
"]",
")",
".",
"append",
"(",
"(",
"method",
",",
"args",
")",
")",
"self",
".",
"_notimeouts",
".",
"setdefault",
"(",
"authstr",
",",
"False",
")",
"if",
"notimeout",
":",
"self",
".",
"_notimeouts",
"[",
"authstr",
"]",
"=",
"notimeout"
] | Append a deferred request for a particular auth/CIK. | [
"Append",
"a",
"deferred",
"request",
"for",
"a",
"particular",
"auth",
"/",
"CIK",
"."
] | python | train |
pybel/pybel | src/pybel/struct/mutation/induction_expansion.py | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/mutation/induction_expansion.py#L39-L49 | def get_multi_causal_downstream(graph, nbunch: Union[BaseEntity, Iterable[BaseEntity]]):
"""Get the union of all of the 2-level deep causal downstream subgraphs from the nbunch.
:param pybel.BELGraph graph: A BEL graph
:param nbunch: A BEL node or list of BEL nodes
:return: A subgraph of the original BEL graph
:rtype: pybel.BELGraph
"""
result = get_downstream_causal_subgraph(graph, nbunch)
expand_downstream_causal(graph, result)
return result | [
"def",
"get_multi_causal_downstream",
"(",
"graph",
",",
"nbunch",
":",
"Union",
"[",
"BaseEntity",
",",
"Iterable",
"[",
"BaseEntity",
"]",
"]",
")",
":",
"result",
"=",
"get_downstream_causal_subgraph",
"(",
"graph",
",",
"nbunch",
")",
"expand_downstream_causal",
"(",
"graph",
",",
"result",
")",
"return",
"result"
] | Get the union of all of the 2-level deep causal downstream subgraphs from the nbunch.
:param pybel.BELGraph graph: A BEL graph
:param nbunch: A BEL node or list of BEL nodes
:return: A subgraph of the original BEL graph
:rtype: pybel.BELGraph | [
"Get",
"the",
"union",
"of",
"all",
"of",
"the",
"2",
"-",
"level",
"deep",
"causal",
"downstream",
"subgraphs",
"from",
"the",
"nbunch",
"."
] | python | train |
nerdvegas/rez | src/rez/vendor/amqp/serialization.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/serialization.py#L451-L479 | def _load_properties(self, raw_bytes):
"""Given the raw bytes containing the property-flags and property-list
from a content-frame-header, parse and insert into a dictionary
stored in this object as an attribute named 'properties'."""
r = AMQPReader(raw_bytes)
#
# Read 16-bit shorts until we get one with a low bit set to zero
#
flags = []
while 1:
flag_bits = r.read_short()
flags.append(flag_bits)
if flag_bits & 1 == 0:
break
shift = 0
d = {}
for key, proptype in self.PROPERTIES:
if shift == 0:
if not flags:
break
flag_bits, flags = flags[0], flags[1:]
shift = 15
if flag_bits & (1 << shift):
d[key] = getattr(r, 'read_' + proptype)()
shift -= 1
self.properties = d | [
"def",
"_load_properties",
"(",
"self",
",",
"raw_bytes",
")",
":",
"r",
"=",
"AMQPReader",
"(",
"raw_bytes",
")",
"#",
"# Read 16-bit shorts until we get one with a low bit set to zero",
"#",
"flags",
"=",
"[",
"]",
"while",
"1",
":",
"flag_bits",
"=",
"r",
".",
"read_short",
"(",
")",
"flags",
".",
"append",
"(",
"flag_bits",
")",
"if",
"flag_bits",
"&",
"1",
"==",
"0",
":",
"break",
"shift",
"=",
"0",
"d",
"=",
"{",
"}",
"for",
"key",
",",
"proptype",
"in",
"self",
".",
"PROPERTIES",
":",
"if",
"shift",
"==",
"0",
":",
"if",
"not",
"flags",
":",
"break",
"flag_bits",
",",
"flags",
"=",
"flags",
"[",
"0",
"]",
",",
"flags",
"[",
"1",
":",
"]",
"shift",
"=",
"15",
"if",
"flag_bits",
"&",
"(",
"1",
"<<",
"shift",
")",
":",
"d",
"[",
"key",
"]",
"=",
"getattr",
"(",
"r",
",",
"'read_'",
"+",
"proptype",
")",
"(",
")",
"shift",
"-=",
"1",
"self",
".",
"properties",
"=",
"d"
] | Given the raw bytes containing the property-flags and property-list
from a content-frame-header, parse and insert into a dictionary
stored in this object as an attribute named 'properties'. | [
"Given",
"the",
"raw",
"bytes",
"containing",
"the",
"property",
"-",
"flags",
"and",
"property",
"-",
"list",
"from",
"a",
"content",
"-",
"frame",
"-",
"header",
"parse",
"and",
"insert",
"into",
"a",
"dictionary",
"stored",
"in",
"this",
"object",
"as",
"an",
"attribute",
"named",
"properties",
"."
] | python | train |
omza/azurestoragewrap | azurestoragewrap/queue.py | https://github.com/omza/azurestoragewrap/blob/976878e95d82ff0f7d8a00a5e4a7a3fb6268ab08/azurestoragewrap/queue.py#L314-L336 | def get(self, storagemodel:object, modeldefinition = None, hide = 0) -> StorageQueueModel:
""" get the next message in queue """
try:
if hide > 0:
messages = modeldefinition['queueservice'].get_messages(storagemodel._queuename, num_messages=1, visibility_timeout = hide)
else:
messages = modeldefinition['queueservice'].get_messages(storagemodel._queuename, num_messages=1)
""" parse retrieved message """
for message in messages:
storagemodel.mergemessage(message)
""" no message retrieved ?"""
if storagemodel.id is None:
storagemodel = None
except Exception as e:
storagemodel = None
msg = 'can not peek queue message: queue {} with message {} because {!s}'.format(storagemodel._queuename, storagemodel.content, e)
raise AzureStorageWrapException(msg=msg)
finally:
return storagemodel | [
"def",
"get",
"(",
"self",
",",
"storagemodel",
":",
"object",
",",
"modeldefinition",
"=",
"None",
",",
"hide",
"=",
"0",
")",
"->",
"StorageQueueModel",
":",
"try",
":",
"if",
"hide",
">",
"0",
":",
"messages",
"=",
"modeldefinition",
"[",
"'queueservice'",
"]",
".",
"get_messages",
"(",
"storagemodel",
".",
"_queuename",
",",
"num_messages",
"=",
"1",
",",
"visibility_timeout",
"=",
"hide",
")",
"else",
":",
"messages",
"=",
"modeldefinition",
"[",
"'queueservice'",
"]",
".",
"get_messages",
"(",
"storagemodel",
".",
"_queuename",
",",
"num_messages",
"=",
"1",
")",
"\"\"\" parse retrieved message \"\"\"",
"for",
"message",
"in",
"messages",
":",
"storagemodel",
".",
"mergemessage",
"(",
"message",
")",
"\"\"\" no message retrieved ?\"\"\"",
"if",
"storagemodel",
".",
"id",
"is",
"None",
":",
"storagemodel",
"=",
"None",
"except",
"Exception",
"as",
"e",
":",
"storagemodel",
"=",
"None",
"msg",
"=",
"'can not peek queue message: queue {} with message {} because {!s}'",
".",
"format",
"(",
"storagemodel",
".",
"_queuename",
",",
"storagemodel",
".",
"content",
",",
"e",
")",
"raise",
"AzureStorageWrapException",
"(",
"msg",
"=",
"msg",
")",
"finally",
":",
"return",
"storagemodel"
] | get the next message in queue | [
"get",
"the",
"next",
"message",
"in",
"queue"
] | python | train |
PSU-OIT-ARC/elasticmodels | elasticmodels/analysis.py | https://github.com/PSU-OIT-ARC/elasticmodels/blob/67870508096f66123ef10b89789bbac06571cc80/elasticmodels/analysis.py#L42-L49 | def diff_analysis(using):
"""
Returns a diff string comparing the analysis defined in ES, with
the analysis defined in Python land for the connection `using`
"""
python_analysis = collect_analysis(using)
es_analysis = existing_analysis(using)
return compare_dicts(es_analysis, python_analysis) | [
"def",
"diff_analysis",
"(",
"using",
")",
":",
"python_analysis",
"=",
"collect_analysis",
"(",
"using",
")",
"es_analysis",
"=",
"existing_analysis",
"(",
"using",
")",
"return",
"compare_dicts",
"(",
"es_analysis",
",",
"python_analysis",
")"
] | Returns a diff string comparing the analysis defined in ES, with
the analysis defined in Python land for the connection `using` | [
"Returns",
"a",
"diff",
"string",
"comparing",
"the",
"analysis",
"defined",
"in",
"ES",
"with",
"the",
"analysis",
"defined",
"in",
"Python",
"land",
"for",
"the",
"connection",
"using"
] | python | train |
rameshg87/pyremotevbox | pyremotevbox/ZSI/TC.py | https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/TC.py#L144-L157 | def SimpleHREF(self, elt, ps, tag):
'''Simple HREF for non-string and non-struct and non-array.
Parameters:
elt -- the DOM element being parsed
ps -- the ParsedSoap object.
tag --
'''
if len(_children(elt)): return elt
href = _find_href(elt)
if not href:
if self.minOccurs is 0: return None
raise EvaluateException('Required' + tag + ' missing',
ps.Backtrace(elt))
return ps.FindLocalHREF(href, elt, 0) | [
"def",
"SimpleHREF",
"(",
"self",
",",
"elt",
",",
"ps",
",",
"tag",
")",
":",
"if",
"len",
"(",
"_children",
"(",
"elt",
")",
")",
":",
"return",
"elt",
"href",
"=",
"_find_href",
"(",
"elt",
")",
"if",
"not",
"href",
":",
"if",
"self",
".",
"minOccurs",
"is",
"0",
":",
"return",
"None",
"raise",
"EvaluateException",
"(",
"'Required'",
"+",
"tag",
"+",
"' missing'",
",",
"ps",
".",
"Backtrace",
"(",
"elt",
")",
")",
"return",
"ps",
".",
"FindLocalHREF",
"(",
"href",
",",
"elt",
",",
"0",
")"
] | Simple HREF for non-string and non-struct and non-array.
Parameters:
elt -- the DOM element being parsed
ps -- the ParsedSoap object.
tag -- | [
"Simple",
"HREF",
"for",
"non",
"-",
"string",
"and",
"non",
"-",
"struct",
"and",
"non",
"-",
"array",
".",
"Parameters",
":",
"elt",
"--",
"the",
"DOM",
"element",
"being",
"parsed",
"ps",
"--",
"the",
"ParsedSoap",
"object",
".",
"tag",
"--"
] | python | train |
CalebBell/thermo | thermo/chemical.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/chemical.py#L1297-L1311 | def mass_fractions(self):
r'''Dictionary of atom:mass-weighted fractional occurence of elements.
Useful when performing mass balances. For atom-fraction occurences, see
:obj:`atom_fractions`.
Examples
--------
>>> Chemical('water').mass_fractions
{'H': 0.11189834407236524, 'O': 0.8881016559276347}
'''
if self.__mass_fractions:
return self.__mass_fractions
else:
self.__mass_fractions = mass_fractions(self.atoms, self.MW)
return self.__mass_fractions | [
"def",
"mass_fractions",
"(",
"self",
")",
":",
"if",
"self",
".",
"__mass_fractions",
":",
"return",
"self",
".",
"__mass_fractions",
"else",
":",
"self",
".",
"__mass_fractions",
"=",
"mass_fractions",
"(",
"self",
".",
"atoms",
",",
"self",
".",
"MW",
")",
"return",
"self",
".",
"__mass_fractions"
] | r'''Dictionary of atom:mass-weighted fractional occurence of elements.
Useful when performing mass balances. For atom-fraction occurences, see
:obj:`atom_fractions`.
Examples
--------
>>> Chemical('water').mass_fractions
{'H': 0.11189834407236524, 'O': 0.8881016559276347} | [
"r",
"Dictionary",
"of",
"atom",
":",
"mass",
"-",
"weighted",
"fractional",
"occurence",
"of",
"elements",
".",
"Useful",
"when",
"performing",
"mass",
"balances",
".",
"For",
"atom",
"-",
"fraction",
"occurences",
"see",
":",
"obj",
":",
"atom_fractions",
"."
] | python | valid |
philippelt/netatmo-api-python | lnetatmo.py | https://github.com/philippelt/netatmo-api-python/blob/d749fca3637c07c2943aba7992f683fff1812f77/lnetatmo.py#L684-L695 | def motionDetected(self, home=None, camera=None):
"""
Return True if movement has been detected
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)['id']
except TypeError:
logger.warning("personSeenByCamera: Camera name or home is unknown")
return False
if self.lastEvent[cam_id]['type'] == 'movement':
return True
return False | [
"def",
"motionDetected",
"(",
"self",
",",
"home",
"=",
"None",
",",
"camera",
"=",
"None",
")",
":",
"try",
":",
"cam_id",
"=",
"self",
".",
"cameraByName",
"(",
"camera",
"=",
"camera",
",",
"home",
"=",
"home",
")",
"[",
"'id'",
"]",
"except",
"TypeError",
":",
"logger",
".",
"warning",
"(",
"\"personSeenByCamera: Camera name or home is unknown\"",
")",
"return",
"False",
"if",
"self",
".",
"lastEvent",
"[",
"cam_id",
"]",
"[",
"'type'",
"]",
"==",
"'movement'",
":",
"return",
"True",
"return",
"False"
] | Return True if movement has been detected | [
"Return",
"True",
"if",
"movement",
"has",
"been",
"detected"
] | python | train |
relekang/python-semantic-release | semantic_release/ci_checks.py | https://github.com/relekang/python-semantic-release/blob/76123f410180599a19e7c48da413880185bbea20/semantic_release/ci_checks.py#L31-L39 | def travis(branch: str):
"""
Performs necessary checks to ensure that the travis build is one
that should create releases.
:param branch: The branch the environment should be running against.
"""
assert os.environ.get('TRAVIS_BRANCH') == branch
assert os.environ.get('TRAVIS_PULL_REQUEST') == 'false' | [
"def",
"travis",
"(",
"branch",
":",
"str",
")",
":",
"assert",
"os",
".",
"environ",
".",
"get",
"(",
"'TRAVIS_BRANCH'",
")",
"==",
"branch",
"assert",
"os",
".",
"environ",
".",
"get",
"(",
"'TRAVIS_PULL_REQUEST'",
")",
"==",
"'false'"
] | Performs necessary checks to ensure that the travis build is one
that should create releases.
:param branch: The branch the environment should be running against. | [
"Performs",
"necessary",
"checks",
"to",
"ensure",
"that",
"the",
"travis",
"build",
"is",
"one",
"that",
"should",
"create",
"releases",
"."
] | python | train |
lablup/backend.ai-client-py | src/ai/backend/client/kernel.py | https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/kernel.py#L364-L418 | async def download(self, files: Sequence[Union[str, Path]],
dest: Union[str, Path] = '.',
show_progress: bool = False):
'''
Downloads the given list of files from the compute session.
:param files: The list of file paths in the compute session.
If they are relative paths, the path is calculated from
``/home/work`` in the compute session container.
:param dest: The destination directory in the client-side.
:param show_progress: Displays a progress bar during downloads.
'''
params = {}
if self.owner_access_key:
params['owner_access_key'] = self.owner_access_key
rqst = Request(self.session,
'GET', '/kernel/{}/download'.format(self.kernel_id),
params=params)
rqst.set_json({
'files': [*map(str, files)],
})
async with rqst.fetch() as resp:
chunk_size = 1 * 1024
file_names = None
tqdm_obj = tqdm(desc='Downloading files',
unit='bytes', unit_scale=True,
total=resp.content.total_bytes,
disable=not show_progress)
with tqdm_obj as pbar:
fp = None
while True:
chunk = await resp.aread(chunk_size)
if not chunk:
break
pbar.update(len(chunk))
# TODO: more elegant parsing of multipart response?
for part in chunk.split(b'\r\n'):
if part.startswith(b'--'):
if fp:
fp.close()
with tarfile.open(fp.name) as tarf:
tarf.extractall(path=dest)
file_names = tarf.getnames()
os.unlink(fp.name)
fp = tempfile.NamedTemporaryFile(suffix='.tar',
delete=False)
elif part.startswith(b'Content-') or part == b'':
continue
else:
fp.write(part)
if fp:
fp.close()
os.unlink(fp.name)
result = {'file_names': file_names}
return result | [
"async",
"def",
"download",
"(",
"self",
",",
"files",
":",
"Sequence",
"[",
"Union",
"[",
"str",
",",
"Path",
"]",
"]",
",",
"dest",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
"=",
"'.'",
",",
"show_progress",
":",
"bool",
"=",
"False",
")",
":",
"params",
"=",
"{",
"}",
"if",
"self",
".",
"owner_access_key",
":",
"params",
"[",
"'owner_access_key'",
"]",
"=",
"self",
".",
"owner_access_key",
"rqst",
"=",
"Request",
"(",
"self",
".",
"session",
",",
"'GET'",
",",
"'/kernel/{}/download'",
".",
"format",
"(",
"self",
".",
"kernel_id",
")",
",",
"params",
"=",
"params",
")",
"rqst",
".",
"set_json",
"(",
"{",
"'files'",
":",
"[",
"*",
"map",
"(",
"str",
",",
"files",
")",
"]",
",",
"}",
")",
"async",
"with",
"rqst",
".",
"fetch",
"(",
")",
"as",
"resp",
":",
"chunk_size",
"=",
"1",
"*",
"1024",
"file_names",
"=",
"None",
"tqdm_obj",
"=",
"tqdm",
"(",
"desc",
"=",
"'Downloading files'",
",",
"unit",
"=",
"'bytes'",
",",
"unit_scale",
"=",
"True",
",",
"total",
"=",
"resp",
".",
"content",
".",
"total_bytes",
",",
"disable",
"=",
"not",
"show_progress",
")",
"with",
"tqdm_obj",
"as",
"pbar",
":",
"fp",
"=",
"None",
"while",
"True",
":",
"chunk",
"=",
"await",
"resp",
".",
"aread",
"(",
"chunk_size",
")",
"if",
"not",
"chunk",
":",
"break",
"pbar",
".",
"update",
"(",
"len",
"(",
"chunk",
")",
")",
"# TODO: more elegant parsing of multipart response?",
"for",
"part",
"in",
"chunk",
".",
"split",
"(",
"b'\\r\\n'",
")",
":",
"if",
"part",
".",
"startswith",
"(",
"b'--'",
")",
":",
"if",
"fp",
":",
"fp",
".",
"close",
"(",
")",
"with",
"tarfile",
".",
"open",
"(",
"fp",
".",
"name",
")",
"as",
"tarf",
":",
"tarf",
".",
"extractall",
"(",
"path",
"=",
"dest",
")",
"file_names",
"=",
"tarf",
".",
"getnames",
"(",
")",
"os",
".",
"unlink",
"(",
"fp",
".",
"name",
")",
"fp",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.tar'",
",",
"delete",
"=",
"False",
")",
"elif",
"part",
".",
"startswith",
"(",
"b'Content-'",
")",
"or",
"part",
"==",
"b''",
":",
"continue",
"else",
":",
"fp",
".",
"write",
"(",
"part",
")",
"if",
"fp",
":",
"fp",
".",
"close",
"(",
")",
"os",
".",
"unlink",
"(",
"fp",
".",
"name",
")",
"result",
"=",
"{",
"'file_names'",
":",
"file_names",
"}",
"return",
"result"
] | Downloads the given list of files from the compute session.
:param files: The list of file paths in the compute session.
If they are relative paths, the path is calculated from
``/home/work`` in the compute session container.
:param dest: The destination directory in the client-side.
:param show_progress: Displays a progress bar during downloads. | [
"Downloads",
"the",
"given",
"list",
"of",
"files",
"from",
"the",
"compute",
"session",
"."
] | python | train |
datastore/datastore | datastore/core/basic.py | https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/basic.py#L451-L456 | def query(self, query):
'''Returns an iterable of objects matching criteria expressed in `query`.
LoggingDatastore logs the access.
'''
self.logger.info('%s: query %s' % (self, query))
return super(LoggingDatastore, self).query(query) | [
"def",
"query",
"(",
"self",
",",
"query",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'%s: query %s'",
"%",
"(",
"self",
",",
"query",
")",
")",
"return",
"super",
"(",
"LoggingDatastore",
",",
"self",
")",
".",
"query",
"(",
"query",
")"
] | Returns an iterable of objects matching criteria expressed in `query`.
LoggingDatastore logs the access. | [
"Returns",
"an",
"iterable",
"of",
"objects",
"matching",
"criteria",
"expressed",
"in",
"query",
".",
"LoggingDatastore",
"logs",
"the",
"access",
"."
] | python | train |
pmacosta/pmisc | pmisc/compat3.py | https://github.com/pmacosta/pmisc/blob/dd2bb32e59eee872f1ef2db2d9921a396ab9f50b/pmisc/compat3.py#L30-L38 | def _readlines(fname, fpointer1=open, fpointer2=open): # pragma: no cover
"""Read all lines from file."""
# fpointer1, fpointer2 arguments to ease testing
try:
with fpointer1(fname, "r") as fobj:
return fobj.readlines()
except UnicodeDecodeError: # pragma: no cover
with fpointer2(fname, "r", encoding="utf-8") as fobj:
return fobj.readlines() | [
"def",
"_readlines",
"(",
"fname",
",",
"fpointer1",
"=",
"open",
",",
"fpointer2",
"=",
"open",
")",
":",
"# pragma: no cover",
"# fpointer1, fpointer2 arguments to ease testing",
"try",
":",
"with",
"fpointer1",
"(",
"fname",
",",
"\"r\"",
")",
"as",
"fobj",
":",
"return",
"fobj",
".",
"readlines",
"(",
")",
"except",
"UnicodeDecodeError",
":",
"# pragma: no cover",
"with",
"fpointer2",
"(",
"fname",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"fobj",
":",
"return",
"fobj",
".",
"readlines",
"(",
")"
] | Read all lines from file. | [
"Read",
"all",
"lines",
"from",
"file",
"."
] | python | train |
scot-dev/scot | scot/datatools.py | https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/scot/datatools.py#L28-L68 | def cut_segments(x2d, tr, start, stop):
"""Cut continuous signal into segments.
Parameters
----------
x2d : array, shape (m, n)
Input data with m signals and n samples.
tr : list of int
Trigger positions.
start : int
Window start (offset relative to trigger).
stop : int
Window end (offset relative to trigger).
Returns
-------
x3d : array, shape (len(tr), m, stop-start)
Segments cut from data. Individual segments are stacked along the first
dimension.
See also
--------
cat_trials : Concatenate segments.
Examples
--------
>>> data = np.random.randn(5, 1000) # 5 channels, 1000 samples
>>> tr = [750, 500, 250] # three segments
>>> x3d = cut_segments(data, tr, 50, 100) # each segment is 50 samples
>>> x3d.shape
(3, 5, 50)
"""
if start != int(start):
raise ValueError("start index must be an integer")
if stop != int(stop):
raise ValueError("stop index must be an integer")
x2d = np.atleast_2d(x2d)
tr = np.asarray(tr, dtype=int).ravel()
win = np.arange(start, stop, dtype=int)
return np.concatenate([x2d[np.newaxis, :, t + win] for t in tr]) | [
"def",
"cut_segments",
"(",
"x2d",
",",
"tr",
",",
"start",
",",
"stop",
")",
":",
"if",
"start",
"!=",
"int",
"(",
"start",
")",
":",
"raise",
"ValueError",
"(",
"\"start index must be an integer\"",
")",
"if",
"stop",
"!=",
"int",
"(",
"stop",
")",
":",
"raise",
"ValueError",
"(",
"\"stop index must be an integer\"",
")",
"x2d",
"=",
"np",
".",
"atleast_2d",
"(",
"x2d",
")",
"tr",
"=",
"np",
".",
"asarray",
"(",
"tr",
",",
"dtype",
"=",
"int",
")",
".",
"ravel",
"(",
")",
"win",
"=",
"np",
".",
"arange",
"(",
"start",
",",
"stop",
",",
"dtype",
"=",
"int",
")",
"return",
"np",
".",
"concatenate",
"(",
"[",
"x2d",
"[",
"np",
".",
"newaxis",
",",
":",
",",
"t",
"+",
"win",
"]",
"for",
"t",
"in",
"tr",
"]",
")"
] | Cut continuous signal into segments.
Parameters
----------
x2d : array, shape (m, n)
Input data with m signals and n samples.
tr : list of int
Trigger positions.
start : int
Window start (offset relative to trigger).
stop : int
Window end (offset relative to trigger).
Returns
-------
x3d : array, shape (len(tr), m, stop-start)
Segments cut from data. Individual segments are stacked along the first
dimension.
See also
--------
cat_trials : Concatenate segments.
Examples
--------
>>> data = np.random.randn(5, 1000) # 5 channels, 1000 samples
>>> tr = [750, 500, 250] # three segments
>>> x3d = cut_segments(data, tr, 50, 100) # each segment is 50 samples
>>> x3d.shape
(3, 5, 50) | [
"Cut",
"continuous",
"signal",
"into",
"segments",
"."
] | python | train |
mcs07/PubChemPy | pubchempy.py | https://github.com/mcs07/PubChemPy/blob/e3c4f4a9b6120433e5cc3383464c7a79e9b2b86e/pubchempy.py#L795-L799 | def sids(self):
"""Requires an extra request. Result is cached."""
if self.cid:
results = get_json(self.cid, operation='sids')
return results['InformationList']['Information'][0]['SID'] if results else [] | [
"def",
"sids",
"(",
"self",
")",
":",
"if",
"self",
".",
"cid",
":",
"results",
"=",
"get_json",
"(",
"self",
".",
"cid",
",",
"operation",
"=",
"'sids'",
")",
"return",
"results",
"[",
"'InformationList'",
"]",
"[",
"'Information'",
"]",
"[",
"0",
"]",
"[",
"'SID'",
"]",
"if",
"results",
"else",
"[",
"]"
] | Requires an extra request. Result is cached. | [
"Requires",
"an",
"extra",
"request",
".",
"Result",
"is",
"cached",
"."
] | python | train |
cokelaer/spectrum | src/spectrum/window.py | https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/window.py#L1206-L1227 | def window_riesz(N):
r"""Riesz tapering window
:param N: window length
.. math:: w(n) = 1 - \left| \frac{n}{N/2} \right|^2
with :math:`-N/2 \leq n \leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'riesz')
.. seealso:: :func:`create_window`, :class:`Window`
"""
n = linspace(-N/2., (N)/2., N)
w = 1 - abs(n/(N/2.))**2.
return w | [
"def",
"window_riesz",
"(",
"N",
")",
":",
"n",
"=",
"linspace",
"(",
"-",
"N",
"/",
"2.",
",",
"(",
"N",
")",
"/",
"2.",
",",
"N",
")",
"w",
"=",
"1",
"-",
"abs",
"(",
"n",
"/",
"(",
"N",
"/",
"2.",
")",
")",
"**",
"2.",
"return",
"w"
] | r"""Riesz tapering window
:param N: window length
.. math:: w(n) = 1 - \left| \frac{n}{N/2} \right|^2
with :math:`-N/2 \leq n \leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'riesz')
.. seealso:: :func:`create_window`, :class:`Window` | [
"r",
"Riesz",
"tapering",
"window"
] | python | valid |
tensorflow/hub | tensorflow_hub/estimator.py | https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/estimator.py#L171-L214 | def _make_estimator_serving_session(estimator, serving_input_fn,
checkpoint_path):
"""Returns a session constructed using `estimator` and `serving_input_fn`.
The Estimator API does not provide an API to construct a graph and session,
making it necessary for this function to replicate how an estimator builds
a graph.
This code is based on `Estimator.export_savedmodel` (another function that
has to replicate how an estimator builds a graph).
Args:
estimator: tf.Estimator to use when constructing the session.
serving_input_fn: A function that takes no arguments and returns a
`ServingInputReceiver`. It is used to construct the session.
checkpoint_path: The checkpoint path to restore in the session. Must not
be None.
"""
with tf.Graph().as_default() as g:
mode = tf_v1.estimator.ModeKeys.PREDICT
tf_v1.train.create_global_step(g)
tf_v1.set_random_seed(estimator.config.tf_random_seed)
serving_input_receiver = serving_input_fn()
estimator_spec = estimator.model_fn(
features=serving_input_receiver.features,
labels=None,
mode=mode,
config=estimator.config)
# pylint: disable=protected-access
# Note that MonitoredSession(), despite the name is not a Session, and
# can't be used to export Modules as one can't use them with Savers.
# As so this must use a raw tf.Session().
session = tf_v1.Session(config=estimator._session_config)
# pylint: enable=protected-access
with session.as_default():
# TODO(b/71839662): Consider if this needs to support TPUEstimatorSpec
# which does not have a scaffold member.
saver_for_restore = estimator_spec.scaffold.saver or tf_v1.train.Saver(
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
return session | [
"def",
"_make_estimator_serving_session",
"(",
"estimator",
",",
"serving_input_fn",
",",
"checkpoint_path",
")",
":",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
"as",
"g",
":",
"mode",
"=",
"tf_v1",
".",
"estimator",
".",
"ModeKeys",
".",
"PREDICT",
"tf_v1",
".",
"train",
".",
"create_global_step",
"(",
"g",
")",
"tf_v1",
".",
"set_random_seed",
"(",
"estimator",
".",
"config",
".",
"tf_random_seed",
")",
"serving_input_receiver",
"=",
"serving_input_fn",
"(",
")",
"estimator_spec",
"=",
"estimator",
".",
"model_fn",
"(",
"features",
"=",
"serving_input_receiver",
".",
"features",
",",
"labels",
"=",
"None",
",",
"mode",
"=",
"mode",
",",
"config",
"=",
"estimator",
".",
"config",
")",
"# pylint: disable=protected-access",
"# Note that MonitoredSession(), despite the name is not a Session, and",
"# can't be used to export Modules as one can't use them with Savers.",
"# As so this must use a raw tf.Session().",
"session",
"=",
"tf_v1",
".",
"Session",
"(",
"config",
"=",
"estimator",
".",
"_session_config",
")",
"# pylint: enable=protected-access",
"with",
"session",
".",
"as_default",
"(",
")",
":",
"# TODO(b/71839662): Consider if this needs to support TPUEstimatorSpec",
"# which does not have a scaffold member.",
"saver_for_restore",
"=",
"estimator_spec",
".",
"scaffold",
".",
"saver",
"or",
"tf_v1",
".",
"train",
".",
"Saver",
"(",
"sharded",
"=",
"True",
")",
"saver_for_restore",
".",
"restore",
"(",
"session",
",",
"checkpoint_path",
")",
"return",
"session"
] | Returns a session constructed using `estimator` and `serving_input_fn`.
The Estimator API does not provide an API to construct a graph and session,
making it necessary for this function to replicate how an estimator builds
a graph.
This code is based on `Estimator.export_savedmodel` (another function that
has to replicate how an estimator builds a graph).
Args:
estimator: tf.Estimator to use when constructing the session.
serving_input_fn: A function that takes no arguments and returns a
`ServingInputReceiver`. It is used to construct the session.
checkpoint_path: The checkpoint path to restore in the session. Must not
be None. | [
"Returns",
"a",
"session",
"constructed",
"using",
"estimator",
"and",
"serving_input_fn",
"."
] | python | train |
google/grumpy | third_party/stdlib/fpformat.py | https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/fpformat.py#L90-L104 | def fix(x, digs):
"""Format x as [-]ddd.ddd with 'digs' digits after the point
and at least one digit before.
If digs <= 0, the point is suppressed."""
if type(x) != type(''): x = repr(x)
try:
sign, intpart, fraction, expo = extract(x)
except NotANumber:
return x
intpart, fraction = unexpo(intpart, fraction, expo)
intpart, fraction = roundfrac(intpart, fraction, digs)
while intpart and intpart[0] == '0': intpart = intpart[1:]
if intpart == '': intpart = '0'
if digs > 0: return sign + intpart + '.' + fraction
else: return sign + intpart | [
"def",
"fix",
"(",
"x",
",",
"digs",
")",
":",
"if",
"type",
"(",
"x",
")",
"!=",
"type",
"(",
"''",
")",
":",
"x",
"=",
"repr",
"(",
"x",
")",
"try",
":",
"sign",
",",
"intpart",
",",
"fraction",
",",
"expo",
"=",
"extract",
"(",
"x",
")",
"except",
"NotANumber",
":",
"return",
"x",
"intpart",
",",
"fraction",
"=",
"unexpo",
"(",
"intpart",
",",
"fraction",
",",
"expo",
")",
"intpart",
",",
"fraction",
"=",
"roundfrac",
"(",
"intpart",
",",
"fraction",
",",
"digs",
")",
"while",
"intpart",
"and",
"intpart",
"[",
"0",
"]",
"==",
"'0'",
":",
"intpart",
"=",
"intpart",
"[",
"1",
":",
"]",
"if",
"intpart",
"==",
"''",
":",
"intpart",
"=",
"'0'",
"if",
"digs",
">",
"0",
":",
"return",
"sign",
"+",
"intpart",
"+",
"'.'",
"+",
"fraction",
"else",
":",
"return",
"sign",
"+",
"intpart"
] | Format x as [-]ddd.ddd with 'digs' digits after the point
and at least one digit before.
If digs <= 0, the point is suppressed. | [
"Format",
"x",
"as",
"[",
"-",
"]",
"ddd",
".",
"ddd",
"with",
"digs",
"digits",
"after",
"the",
"point",
"and",
"at",
"least",
"one",
"digit",
"before",
".",
"If",
"digs",
"<",
"=",
"0",
"the",
"point",
"is",
"suppressed",
"."
] | python | valid |
pantsbuild/pants | src/python/pants/build_graph/target.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/target.py#L377-L382 | def assert_list(self, putative_list, expected_type=string_types, key_arg=None):
"""
:API: public
"""
return assert_list(putative_list, expected_type, key_arg=key_arg,
raise_type=lambda msg: TargetDefinitionException(self, msg)) | [
"def",
"assert_list",
"(",
"self",
",",
"putative_list",
",",
"expected_type",
"=",
"string_types",
",",
"key_arg",
"=",
"None",
")",
":",
"return",
"assert_list",
"(",
"putative_list",
",",
"expected_type",
",",
"key_arg",
"=",
"key_arg",
",",
"raise_type",
"=",
"lambda",
"msg",
":",
"TargetDefinitionException",
"(",
"self",
",",
"msg",
")",
")"
] | :API: public | [
":",
"API",
":",
"public"
] | python | train |
bitshares/uptick | uptick/vesting.py | https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/vesting.py#L56-L63 | def reserve(ctx, amount, symbol, account):
""" Reserve/Burn tokens
"""
print_tx(
ctx.bitshares.reserve(
Amount(amount, symbol, bitshares_instance=ctx.bitshares), account=account
)
) | [
"def",
"reserve",
"(",
"ctx",
",",
"amount",
",",
"symbol",
",",
"account",
")",
":",
"print_tx",
"(",
"ctx",
".",
"bitshares",
".",
"reserve",
"(",
"Amount",
"(",
"amount",
",",
"symbol",
",",
"bitshares_instance",
"=",
"ctx",
".",
"bitshares",
")",
",",
"account",
"=",
"account",
")",
")"
] | Reserve/Burn tokens | [
"Reserve",
"/",
"Burn",
"tokens"
] | python | train |
not-na/peng3d | peng3d/gui/menus.py | https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/gui/menus.py#L473-L487 | def update_progressbar(self):
"""
Updates the progressbar by re-calculating the label.
It is not required to manually call this method since setting any of the
properties of this class will automatically trigger a re-calculation.
"""
n,nmin,nmax = self.wprogressbar.n,self.wprogressbar.nmin,self.wprogressbar.nmax
if (nmax-nmin)==0:
percent = 0 # prevents ZeroDivisionError
else:
percent = max(min((n-nmin)/(nmax-nmin),1.),0.)*100
dat = {"value":round(n,4),"n":round(n,4),"nmin":round(nmin,4),"nmax":round(nmax,4),"percent":round(percent,4),"p":round(percent,4)}
txt = self._label_progressbar.format(**dat)
self.wprogresslabel.label = txt | [
"def",
"update_progressbar",
"(",
"self",
")",
":",
"n",
",",
"nmin",
",",
"nmax",
"=",
"self",
".",
"wprogressbar",
".",
"n",
",",
"self",
".",
"wprogressbar",
".",
"nmin",
",",
"self",
".",
"wprogressbar",
".",
"nmax",
"if",
"(",
"nmax",
"-",
"nmin",
")",
"==",
"0",
":",
"percent",
"=",
"0",
"# prevents ZeroDivisionError",
"else",
":",
"percent",
"=",
"max",
"(",
"min",
"(",
"(",
"n",
"-",
"nmin",
")",
"/",
"(",
"nmax",
"-",
"nmin",
")",
",",
"1.",
")",
",",
"0.",
")",
"*",
"100",
"dat",
"=",
"{",
"\"value\"",
":",
"round",
"(",
"n",
",",
"4",
")",
",",
"\"n\"",
":",
"round",
"(",
"n",
",",
"4",
")",
",",
"\"nmin\"",
":",
"round",
"(",
"nmin",
",",
"4",
")",
",",
"\"nmax\"",
":",
"round",
"(",
"nmax",
",",
"4",
")",
",",
"\"percent\"",
":",
"round",
"(",
"percent",
",",
"4",
")",
",",
"\"p\"",
":",
"round",
"(",
"percent",
",",
"4",
")",
"}",
"txt",
"=",
"self",
".",
"_label_progressbar",
".",
"format",
"(",
"*",
"*",
"dat",
")",
"self",
".",
"wprogresslabel",
".",
"label",
"=",
"txt"
] | Updates the progressbar by re-calculating the label.
It is not required to manually call this method since setting any of the
properties of this class will automatically trigger a re-calculation. | [
"Updates",
"the",
"progressbar",
"by",
"re",
"-",
"calculating",
"the",
"label",
".",
"It",
"is",
"not",
"required",
"to",
"manually",
"call",
"this",
"method",
"since",
"setting",
"any",
"of",
"the",
"properties",
"of",
"this",
"class",
"will",
"automatically",
"trigger",
"a",
"re",
"-",
"calculation",
"."
] | python | test |
googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/trainer/task.py | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/task.py#L30-L57 | def get_reader_input_fn(train_config, preprocess_output_dir, model_type,
data_paths, batch_size, shuffle, num_epochs=None):
"""Builds input layer for training."""
def get_input_features():
"""Read the input features from the given data paths."""
_, examples = util.read_examples(
input_files=data_paths,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs)
features = util.parse_example_tensor(examples=examples,
train_config=train_config,
keep_target=True)
target_name = train_config['target_column']
target = features.pop(target_name)
features, target = util.preprocess_input(
features=features,
target=target,
train_config=train_config,
preprocess_output_dir=preprocess_output_dir,
model_type=model_type)
return features, target
# Return a function to input the feaures into the model from a data path.
return get_input_features | [
"def",
"get_reader_input_fn",
"(",
"train_config",
",",
"preprocess_output_dir",
",",
"model_type",
",",
"data_paths",
",",
"batch_size",
",",
"shuffle",
",",
"num_epochs",
"=",
"None",
")",
":",
"def",
"get_input_features",
"(",
")",
":",
"\"\"\"Read the input features from the given data paths.\"\"\"",
"_",
",",
"examples",
"=",
"util",
".",
"read_examples",
"(",
"input_files",
"=",
"data_paths",
",",
"batch_size",
"=",
"batch_size",
",",
"shuffle",
"=",
"shuffle",
",",
"num_epochs",
"=",
"num_epochs",
")",
"features",
"=",
"util",
".",
"parse_example_tensor",
"(",
"examples",
"=",
"examples",
",",
"train_config",
"=",
"train_config",
",",
"keep_target",
"=",
"True",
")",
"target_name",
"=",
"train_config",
"[",
"'target_column'",
"]",
"target",
"=",
"features",
".",
"pop",
"(",
"target_name",
")",
"features",
",",
"target",
"=",
"util",
".",
"preprocess_input",
"(",
"features",
"=",
"features",
",",
"target",
"=",
"target",
",",
"train_config",
"=",
"train_config",
",",
"preprocess_output_dir",
"=",
"preprocess_output_dir",
",",
"model_type",
"=",
"model_type",
")",
"return",
"features",
",",
"target",
"# Return a function to input the feaures into the model from a data path.",
"return",
"get_input_features"
] | Builds input layer for training. | [
"Builds",
"input",
"layer",
"for",
"training",
"."
] | python | train |
annoviko/pyclustering | pyclustering/gcolor/sync.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/gcolor/sync.py#L116-L127 | def _create_connections(self, graph_matrix):
"""!
@brief Creates connection in the network in line with graph.
@param[in] graph_matrix (list): Matrix representation of the graph.
"""
for row in range(0, len(graph_matrix)):
for column in range (0, len(graph_matrix[row])):
if (graph_matrix[row][column] > 0):
self.set_connection(row, column); | [
"def",
"_create_connections",
"(",
"self",
",",
"graph_matrix",
")",
":",
"for",
"row",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"graph_matrix",
")",
")",
":",
"for",
"column",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"graph_matrix",
"[",
"row",
"]",
")",
")",
":",
"if",
"(",
"graph_matrix",
"[",
"row",
"]",
"[",
"column",
"]",
">",
"0",
")",
":",
"self",
".",
"set_connection",
"(",
"row",
",",
"column",
")"
] | !
@brief Creates connection in the network in line with graph.
@param[in] graph_matrix (list): Matrix representation of the graph. | [
"!"
] | python | valid |
fboender/ansible-cmdb | src/ansible-cmdb.py | https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/src/ansible-cmdb.py#L48-L65 | def get_data_dir():
"""
Find out our installation prefix and data directory. These can be in
different places depending on how ansible-cmdb was installed.
"""
data_dir_paths = [
os.path.join(os.path.dirname(ansiblecmdb.__file__), 'data'),
os.path.join(os.path.dirname(sys.argv[0]), '..', 'lib', 'ansiblecmdb', 'data'),
'/usr/local/lib/ansiblecmdb/data',
'/usr/lib/ansiblecmdb/data',
]
data_dir = util.find_path(data_dir_paths, 'tpl/html_fancy.tpl')
if not data_dir:
sys.stdout.write("Couldn't find the data dir for the templates. I tried: {0}\n".format(", ".join(data_dir_paths)))
sys.exit(1)
return data_dir | [
"def",
"get_data_dir",
"(",
")",
":",
"data_dir_paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"ansiblecmdb",
".",
"__file__",
")",
",",
"'data'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
",",
"'..'",
",",
"'lib'",
",",
"'ansiblecmdb'",
",",
"'data'",
")",
",",
"'/usr/local/lib/ansiblecmdb/data'",
",",
"'/usr/lib/ansiblecmdb/data'",
",",
"]",
"data_dir",
"=",
"util",
".",
"find_path",
"(",
"data_dir_paths",
",",
"'tpl/html_fancy.tpl'",
")",
"if",
"not",
"data_dir",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Couldn't find the data dir for the templates. I tried: {0}\\n\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"data_dir_paths",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"data_dir"
] | Find out our installation prefix and data directory. These can be in
different places depending on how ansible-cmdb was installed. | [
"Find",
"out",
"our",
"installation",
"prefix",
"and",
"data",
"directory",
".",
"These",
"can",
"be",
"in",
"different",
"places",
"depending",
"on",
"how",
"ansible",
"-",
"cmdb",
"was",
"installed",
"."
] | python | train |
Subsets and Splits