repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/signed_binary_utils.py#L278-L303 | def StreamSignedBinaryContents(blob_iterator,
chunk_size = 1024
):
"""Yields the contents of the given binary in chunks of the given size.
Args:
blob_iterator: An Iterator over all the binary's blobs.
chunk_size: Size, in bytes, of the chunks to yield.
"""
all_blobs_read = False
byte_buffer = io.BytesIO()
while not all_blobs_read or byte_buffer.getvalue():
while not all_blobs_read and byte_buffer.tell() < chunk_size:
try:
blob = next(blob_iterator)
except StopIteration:
all_blobs_read = True
break
byte_buffer.write(blob.data)
if byte_buffer.tell() > 0:
# Yield a chunk of the signed binary and reset the buffer to contain
# only data that hasn't been sent yet.
byte_buffer.seek(0)
yield byte_buffer.read(chunk_size)
byte_buffer = io.BytesIO(byte_buffer.read())
byte_buffer.seek(0, io.SEEK_END) | [
"def",
"StreamSignedBinaryContents",
"(",
"blob_iterator",
",",
"chunk_size",
"=",
"1024",
")",
":",
"all_blobs_read",
"=",
"False",
"byte_buffer",
"=",
"io",
".",
"BytesIO",
"(",
")",
"while",
"not",
"all_blobs_read",
"or",
"byte_buffer",
".",
"getvalue",
"(",
")",
":",
"while",
"not",
"all_blobs_read",
"and",
"byte_buffer",
".",
"tell",
"(",
")",
"<",
"chunk_size",
":",
"try",
":",
"blob",
"=",
"next",
"(",
"blob_iterator",
")",
"except",
"StopIteration",
":",
"all_blobs_read",
"=",
"True",
"break",
"byte_buffer",
".",
"write",
"(",
"blob",
".",
"data",
")",
"if",
"byte_buffer",
".",
"tell",
"(",
")",
">",
"0",
":",
"# Yield a chunk of the signed binary and reset the buffer to contain",
"# only data that hasn't been sent yet.",
"byte_buffer",
".",
"seek",
"(",
"0",
")",
"yield",
"byte_buffer",
".",
"read",
"(",
"chunk_size",
")",
"byte_buffer",
"=",
"io",
".",
"BytesIO",
"(",
"byte_buffer",
".",
"read",
"(",
")",
")",
"byte_buffer",
".",
"seek",
"(",
"0",
",",
"io",
".",
"SEEK_END",
")"
]
| Yields the contents of the given binary in chunks of the given size.
Args:
blob_iterator: An Iterator over all the binary's blobs.
chunk_size: Size, in bytes, of the chunks to yield. | [
"Yields",
"the",
"contents",
"of",
"the",
"given",
"binary",
"in",
"chunks",
"of",
"the",
"given",
"size",
"."
]
| python | train |
log2timeline/plaso | plaso/lib/lexer.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/lib/lexer.py#L409-L423 | def StringEscape(self, string, match, **unused_kwargs):
"""Escape backslashes found inside a string quote.
Backslashes followed by anything other than ['"rnbt] will just be included
in the string.
Args:
string: The string that matched.
match: the match object (instance of re.MatchObject).
Where match.group(1) contains the escaped code.
"""
if match.group(1) in '\'"rnbt':
self.string += string.decode('unicode_escape')
else:
self.string += string | [
"def",
"StringEscape",
"(",
"self",
",",
"string",
",",
"match",
",",
"*",
"*",
"unused_kwargs",
")",
":",
"if",
"match",
".",
"group",
"(",
"1",
")",
"in",
"'\\'\"rnbt'",
":",
"self",
".",
"string",
"+=",
"string",
".",
"decode",
"(",
"'unicode_escape'",
")",
"else",
":",
"self",
".",
"string",
"+=",
"string"
]
| Escape backslashes found inside a string quote.
Backslashes followed by anything other than ['"rnbt] will just be included
in the string.
Args:
string: The string that matched.
match: the match object (instance of re.MatchObject).
Where match.group(1) contains the escaped code. | [
"Escape",
"backslashes",
"found",
"inside",
"a",
"string",
"quote",
"."
]
| python | train |
user-cont/colin | colin/utils/cmd_tools.py | https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/utils/cmd_tools.py#L100-L124 | def exit_after(s):
"""
Use as decorator to exit process if
function takes longer than s seconds.
Direct call is available via exit_after(TIMEOUT_IN_S)(fce)(args).
Inspired by https://stackoverflow.com/a/31667005
"""
def outer(fn):
def inner(*args, **kwargs):
timer = threading.Timer(s, thread.interrupt_main)
timer.start()
try:
result = fn(*args, **kwargs)
except KeyboardInterrupt:
raise TimeoutError("Function '{}' hit the timeout ({}s).".format(fn.__name__, s))
finally:
timer.cancel()
return result
return inner
return outer | [
"def",
"exit_after",
"(",
"s",
")",
":",
"def",
"outer",
"(",
"fn",
")",
":",
"def",
"inner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"timer",
"=",
"threading",
".",
"Timer",
"(",
"s",
",",
"thread",
".",
"interrupt_main",
")",
"timer",
".",
"start",
"(",
")",
"try",
":",
"result",
"=",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"KeyboardInterrupt",
":",
"raise",
"TimeoutError",
"(",
"\"Function '{}' hit the timeout ({}s).\"",
".",
"format",
"(",
"fn",
".",
"__name__",
",",
"s",
")",
")",
"finally",
":",
"timer",
".",
"cancel",
"(",
")",
"return",
"result",
"return",
"inner",
"return",
"outer"
]
| Use as decorator to exit process if
function takes longer than s seconds.
Direct call is available via exit_after(TIMEOUT_IN_S)(fce)(args).
Inspired by https://stackoverflow.com/a/31667005 | [
"Use",
"as",
"decorator",
"to",
"exit",
"process",
"if",
"function",
"takes",
"longer",
"than",
"s",
"seconds",
"."
]
| python | train |
jaredLunde/vital-tools | vital/debug/stats.py | https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/debug/stats.py#L19-L25 | def mean(data):
"""Return the sample arithmetic mean of data."""
#: http://stackoverflow.com/a/27758326
n = len(data)
if n < 1:
raise ValueError('mean requires at least one data point')
return sum(data)/n | [
"def",
"mean",
"(",
"data",
")",
":",
"#: http://stackoverflow.com/a/27758326",
"n",
"=",
"len",
"(",
"data",
")",
"if",
"n",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'mean requires at least one data point'",
")",
"return",
"sum",
"(",
"data",
")",
"/",
"n"
]
| Return the sample arithmetic mean of data. | [
"Return",
"the",
"sample",
"arithmetic",
"mean",
"of",
"data",
"."
]
| python | train |
KelSolaar/Foundations | foundations/common.py | https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/common.py#L86-L97 | def ordered_uniqify(sequence):
"""
Uniqifies the given hashable sequence while preserving its order.
:param sequence: Sequence.
:type sequence: object
:return: Uniqified sequence.
:rtype: list
"""
items = set()
return [key for key in sequence if key not in items and not items.add(key)] | [
"def",
"ordered_uniqify",
"(",
"sequence",
")",
":",
"items",
"=",
"set",
"(",
")",
"return",
"[",
"key",
"for",
"key",
"in",
"sequence",
"if",
"key",
"not",
"in",
"items",
"and",
"not",
"items",
".",
"add",
"(",
"key",
")",
"]"
]
| Uniqifies the given hashable sequence while preserving its order.
:param sequence: Sequence.
:type sequence: object
:return: Uniqified sequence.
:rtype: list | [
"Uniqifies",
"the",
"given",
"hashable",
"sequence",
"while",
"preserving",
"its",
"order",
"."
]
| python | train |
saltstack/salt | salt/ext/ipaddress.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/ext/ipaddress.py#L1249-L1278 | def _is_valid_netmask(self, netmask):
"""Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
"""
mask = netmask.split('.')
if len(mask) == 4:
try:
for x in mask:
if int(x) not in self._valid_mask_octets:
return False
except ValueError:
# Found something that isn't an integer or isn't valid
return False
for idx, y in enumerate(mask):
if idx > 0 and y > mask[idx - 1]:
return False
return True
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= self._max_prefixlen | [
"def",
"_is_valid_netmask",
"(",
"self",
",",
"netmask",
")",
":",
"mask",
"=",
"netmask",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"mask",
")",
"==",
"4",
":",
"try",
":",
"for",
"x",
"in",
"mask",
":",
"if",
"int",
"(",
"x",
")",
"not",
"in",
"self",
".",
"_valid_mask_octets",
":",
"return",
"False",
"except",
"ValueError",
":",
"# Found something that isn't an integer or isn't valid",
"return",
"False",
"for",
"idx",
",",
"y",
"in",
"enumerate",
"(",
"mask",
")",
":",
"if",
"idx",
">",
"0",
"and",
"y",
">",
"mask",
"[",
"idx",
"-",
"1",
"]",
":",
"return",
"False",
"return",
"True",
"try",
":",
"netmask",
"=",
"int",
"(",
"netmask",
")",
"except",
"ValueError",
":",
"return",
"False",
"return",
"0",
"<=",
"netmask",
"<=",
"self",
".",
"_max_prefixlen"
]
| Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask. | [
"Verify",
"that",
"the",
"netmask",
"is",
"valid",
"."
]
| python | train |
pymc-devs/pymc | pymc/gp/cov_funs/cov_utils.py | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/gp/cov_funs/cov_utils.py#L23-L49 | def regularize_array(A):
"""
Takes an np.ndarray as an input.
- If the array is one-dimensional, it's assumed to be an array of input values.
- If the array is more than one-dimensional, its last index is assumed to curse
over spatial dimension.
Either way, the return value is at least two dimensional. A.shape[-1] gives the
number of spatial dimensions.
"""
if not isinstance(A,np.ndarray):
A = np.array(A, dtype=float)
else:
A = np.asarray(A, dtype=float)
if len(A.shape) <= 1:
return A.reshape(-1,1)
elif A.shape[-1]>1:
return A.reshape(-1, A.shape[-1])
else:
return A | [
"def",
"regularize_array",
"(",
"A",
")",
":",
"if",
"not",
"isinstance",
"(",
"A",
",",
"np",
".",
"ndarray",
")",
":",
"A",
"=",
"np",
".",
"array",
"(",
"A",
",",
"dtype",
"=",
"float",
")",
"else",
":",
"A",
"=",
"np",
".",
"asarray",
"(",
"A",
",",
"dtype",
"=",
"float",
")",
"if",
"len",
"(",
"A",
".",
"shape",
")",
"<=",
"1",
":",
"return",
"A",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"elif",
"A",
".",
"shape",
"[",
"-",
"1",
"]",
">",
"1",
":",
"return",
"A",
".",
"reshape",
"(",
"-",
"1",
",",
"A",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"else",
":",
"return",
"A"
]
| Takes an np.ndarray as an input.
- If the array is one-dimensional, it's assumed to be an array of input values.
- If the array is more than one-dimensional, its last index is assumed to curse
over spatial dimension.
Either way, the return value is at least two dimensional. A.shape[-1] gives the
number of spatial dimensions. | [
"Takes",
"an",
"np",
".",
"ndarray",
"as",
"an",
"input",
"."
]
| python | train |
quodlibet/mutagen | mutagen/mp4/_atom.py | https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/mp4/_atom.py#L84-L89 | def read(self, fileobj):
"""Return if all data could be read and the atom payload"""
fileobj.seek(self._dataoffset, 0)
data = fileobj.read(self.datalength)
return len(data) == self.datalength, data | [
"def",
"read",
"(",
"self",
",",
"fileobj",
")",
":",
"fileobj",
".",
"seek",
"(",
"self",
".",
"_dataoffset",
",",
"0",
")",
"data",
"=",
"fileobj",
".",
"read",
"(",
"self",
".",
"datalength",
")",
"return",
"len",
"(",
"data",
")",
"==",
"self",
".",
"datalength",
",",
"data"
]
| Return if all data could be read and the atom payload | [
"Return",
"if",
"all",
"data",
"could",
"be",
"read",
"and",
"the",
"atom",
"payload"
]
| python | train |
user-cont/conu | conu/backend/k8s/deployment.py | https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/k8s/deployment.py#L124-L136 | def all_pods_ready(self):
"""
Check if number of replicas with same selector is equals to number of ready replicas
:return: bool
"""
if self.get_status().replicas and self.get_status().ready_replicas:
if self.get_status().replicas == self.get_status().ready_replicas:
logger.info("All pods are ready for deployment %s in namespace: %s",
self.name, self.namespace)
return True
return False | [
"def",
"all_pods_ready",
"(",
"self",
")",
":",
"if",
"self",
".",
"get_status",
"(",
")",
".",
"replicas",
"and",
"self",
".",
"get_status",
"(",
")",
".",
"ready_replicas",
":",
"if",
"self",
".",
"get_status",
"(",
")",
".",
"replicas",
"==",
"self",
".",
"get_status",
"(",
")",
".",
"ready_replicas",
":",
"logger",
".",
"info",
"(",
"\"All pods are ready for deployment %s in namespace: %s\"",
",",
"self",
".",
"name",
",",
"self",
".",
"namespace",
")",
"return",
"True",
"return",
"False"
]
| Check if number of replicas with same selector is equals to number of ready replicas
:return: bool | [
"Check",
"if",
"number",
"of",
"replicas",
"with",
"same",
"selector",
"is",
"equals",
"to",
"number",
"of",
"ready",
"replicas",
":",
"return",
":",
"bool"
]
| python | train |
StanfordBioinformatics/scgpm_seqresults_dnanexus | scgpm_seqresults_dnanexus/dnanexus_utils.py | https://github.com/StanfordBioinformatics/scgpm_seqresults_dnanexus/blob/2bdaae5ec5d38a07fec99e0c5379074a591d77b6/scgpm_seqresults_dnanexus/dnanexus_utils.py#L67-L84 | def select_newest_project(dx_project_ids):
"""
Given a list of DNAnexus project IDs, returns the one that is newest as determined by creation date.
Args:
dx_project_ids: `list` of DNAnexus project IDs.
Returns:
`str`.
"""
if len(dx_project_ids) == 1:
return dx_project_ids[0]
projects = [dxpy.DXProject(x) for x in dx_project_ids]
created_times = [x.describe()["created"] for x in projects]
paired = zip(created_times,projects)
paired.sort(reverse=True)
return paired[0][0] | [
"def",
"select_newest_project",
"(",
"dx_project_ids",
")",
":",
"if",
"len",
"(",
"dx_project_ids",
")",
"==",
"1",
":",
"return",
"dx_project_ids",
"[",
"0",
"]",
"projects",
"=",
"[",
"dxpy",
".",
"DXProject",
"(",
"x",
")",
"for",
"x",
"in",
"dx_project_ids",
"]",
"created_times",
"=",
"[",
"x",
".",
"describe",
"(",
")",
"[",
"\"created\"",
"]",
"for",
"x",
"in",
"projects",
"]",
"paired",
"=",
"zip",
"(",
"created_times",
",",
"projects",
")",
"paired",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"return",
"paired",
"[",
"0",
"]",
"[",
"0",
"]"
]
| Given a list of DNAnexus project IDs, returns the one that is newest as determined by creation date.
Args:
dx_project_ids: `list` of DNAnexus project IDs.
Returns:
`str`. | [
"Given",
"a",
"list",
"of",
"DNAnexus",
"project",
"IDs",
"returns",
"the",
"one",
"that",
"is",
"newest",
"as",
"determined",
"by",
"creation",
"date",
".",
"Args",
":",
"dx_project_ids",
":",
"list",
"of",
"DNAnexus",
"project",
"IDs",
".",
"Returns",
":",
"str",
"."
]
| python | train |
SBRG/ssbio | ssbio/pipeline/atlas2.py | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas2.py#L390-L407 | def build_strain_specific_models(self, joblib=False, cores=1, force_rerun=False):
"""Wrapper function for _build_strain_specific_model"""
if len(self.df_orthology_matrix) == 0:
raise RuntimeError('Empty orthology matrix, please calculate first!')
ref_functional_genes = [g.id for g in self.reference_gempro.functional_genes]
log.info('Building strain specific models...')
if joblib:
result = DictList(Parallel(n_jobs=cores)(delayed(self._build_strain_specific_model)(s, ref_functional_genes, self.df_orthology_matrix, force_rerun=force_rerun) for s in self.strain_ids))
# if sc:
# strains_rdd = sc.parallelize(self.strain_ids)
# result = strains_rdd.map(self._build_strain_specific_model).collect()
else:
result = []
for s in tqdm(self.strain_ids):
result.append(self._build_strain_specific_model(s, ref_functional_genes, self.df_orthology_matrix, force_rerun=force_rerun))
for strain_id, gp_noseqs_path in result:
self.strain_infodict[strain_id]['gp_noseqs_path'] = gp_noseqs_path | [
"def",
"build_strain_specific_models",
"(",
"self",
",",
"joblib",
"=",
"False",
",",
"cores",
"=",
"1",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"len",
"(",
"self",
".",
"df_orthology_matrix",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"'Empty orthology matrix, please calculate first!'",
")",
"ref_functional_genes",
"=",
"[",
"g",
".",
"id",
"for",
"g",
"in",
"self",
".",
"reference_gempro",
".",
"functional_genes",
"]",
"log",
".",
"info",
"(",
"'Building strain specific models...'",
")",
"if",
"joblib",
":",
"result",
"=",
"DictList",
"(",
"Parallel",
"(",
"n_jobs",
"=",
"cores",
")",
"(",
"delayed",
"(",
"self",
".",
"_build_strain_specific_model",
")",
"(",
"s",
",",
"ref_functional_genes",
",",
"self",
".",
"df_orthology_matrix",
",",
"force_rerun",
"=",
"force_rerun",
")",
"for",
"s",
"in",
"self",
".",
"strain_ids",
")",
")",
"# if sc:",
"# strains_rdd = sc.parallelize(self.strain_ids)",
"# result = strains_rdd.map(self._build_strain_specific_model).collect()",
"else",
":",
"result",
"=",
"[",
"]",
"for",
"s",
"in",
"tqdm",
"(",
"self",
".",
"strain_ids",
")",
":",
"result",
".",
"append",
"(",
"self",
".",
"_build_strain_specific_model",
"(",
"s",
",",
"ref_functional_genes",
",",
"self",
".",
"df_orthology_matrix",
",",
"force_rerun",
"=",
"force_rerun",
")",
")",
"for",
"strain_id",
",",
"gp_noseqs_path",
"in",
"result",
":",
"self",
".",
"strain_infodict",
"[",
"strain_id",
"]",
"[",
"'gp_noseqs_path'",
"]",
"=",
"gp_noseqs_path"
]
| Wrapper function for _build_strain_specific_model | [
"Wrapper",
"function",
"for",
"_build_strain_specific_model"
]
| python | train |
bjmorgan/lattice_mc | lattice_mc/simulation.py | https://github.com/bjmorgan/lattice_mc/blob/7fa7be85f2f23a2d8dfd0830ecdb89d0dbf2bfd5/lattice_mc/simulation.py#L81-L93 | def define_lattice_from_file( self, filename, cell_lengths ):
"""
Set up the simulation lattice from a file containing site data.
Uses `init_lattice.lattice_from_sites_file`, which defines the site file spec.
Args:
filename (Str): sites file filename.
cell_lengths (List(x,y,z)): cell lengths for the simulation cell.
Returns:
None
"""
self.lattice = init_lattice.lattice_from_sites_file( filename, cell_lengths = cell_lengths ) | [
"def",
"define_lattice_from_file",
"(",
"self",
",",
"filename",
",",
"cell_lengths",
")",
":",
"self",
".",
"lattice",
"=",
"init_lattice",
".",
"lattice_from_sites_file",
"(",
"filename",
",",
"cell_lengths",
"=",
"cell_lengths",
")"
]
| Set up the simulation lattice from a file containing site data.
Uses `init_lattice.lattice_from_sites_file`, which defines the site file spec.
Args:
filename (Str): sites file filename.
cell_lengths (List(x,y,z)): cell lengths for the simulation cell.
Returns:
None | [
"Set",
"up",
"the",
"simulation",
"lattice",
"from",
"a",
"file",
"containing",
"site",
"data",
".",
"Uses",
"init_lattice",
".",
"lattice_from_sites_file",
"which",
"defines",
"the",
"site",
"file",
"spec",
".",
"Args",
":",
"filename",
"(",
"Str",
")",
":",
"sites",
"file",
"filename",
".",
"cell_lengths",
"(",
"List",
"(",
"x",
"y",
"z",
"))",
":",
"cell",
"lengths",
"for",
"the",
"simulation",
"cell",
"."
]
| python | train |
roboogle/gtkmvc3 | gtkmvco/examples/undo/undo_manager.py | https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/undo/undo_manager.py#L165-L175 | def undo(self):
"""
Raises IndexError if more than one group is open, otherwise closes it
and invokes undo_nested_group.
"""
if self.grouping_level() == 1:
self.end_grouping()
if self._open:
raise IndexError
self.undo_nested_group()
self.notify() | [
"def",
"undo",
"(",
"self",
")",
":",
"if",
"self",
".",
"grouping_level",
"(",
")",
"==",
"1",
":",
"self",
".",
"end_grouping",
"(",
")",
"if",
"self",
".",
"_open",
":",
"raise",
"IndexError",
"self",
".",
"undo_nested_group",
"(",
")",
"self",
".",
"notify",
"(",
")"
]
| Raises IndexError if more than one group is open, otherwise closes it
and invokes undo_nested_group. | [
"Raises",
"IndexError",
"if",
"more",
"than",
"one",
"group",
"is",
"open",
"otherwise",
"closes",
"it",
"and",
"invokes",
"undo_nested_group",
"."
]
| python | train |
mozilla/build-mar | src/mardor/signing.py | https://github.com/mozilla/build-mar/blob/d8c3b3469e55654d31f430cb343fd89392196c4e/src/mardor/signing.py#L59-L101 | def get_signature_data(fileobj, filesize):
"""Read data from MAR file that is required for MAR signatures.
Args:
fileboj (file-like object): file-like object to read the MAR data from
filesize (int): the total size of the file
Yields:
blocks of bytes representing the data required to generate or validate
signatures.
"""
# Read everything except the signature entries
# The first 8 bytes are covered, as is everything from the beginning
# of the additional section to the end of the file. The signature
# algorithm id and size fields are also covered.
fileobj.seek(0)
marfile = mar.parse_stream(fileobj)
if not marfile.signatures:
raise IOError("Can't generate signature data for file without signature blocks")
# MAR header
fileobj.seek(0)
block = fileobj.read(8)
yield block
# Signatures header
sigs = sigs_header.parse_stream(fileobj)
sig_types = [(sig.algorithm_id, sig.size) for sig in sigs.sigs]
block = Int64ub.build(filesize) + Int32ub.build(sigs.count)
yield block
# Signature algorithm id and size per entry
for algorithm_id, size in sig_types:
block = Int32ub.build(algorithm_id) + Int32ub.build(size)
yield block
# Everything else in the file is covered
for block in file_iter(fileobj):
yield block | [
"def",
"get_signature_data",
"(",
"fileobj",
",",
"filesize",
")",
":",
"# Read everything except the signature entries",
"# The first 8 bytes are covered, as is everything from the beginning",
"# of the additional section to the end of the file. The signature",
"# algorithm id and size fields are also covered.",
"fileobj",
".",
"seek",
"(",
"0",
")",
"marfile",
"=",
"mar",
".",
"parse_stream",
"(",
"fileobj",
")",
"if",
"not",
"marfile",
".",
"signatures",
":",
"raise",
"IOError",
"(",
"\"Can't generate signature data for file without signature blocks\"",
")",
"# MAR header",
"fileobj",
".",
"seek",
"(",
"0",
")",
"block",
"=",
"fileobj",
".",
"read",
"(",
"8",
")",
"yield",
"block",
"# Signatures header",
"sigs",
"=",
"sigs_header",
".",
"parse_stream",
"(",
"fileobj",
")",
"sig_types",
"=",
"[",
"(",
"sig",
".",
"algorithm_id",
",",
"sig",
".",
"size",
")",
"for",
"sig",
"in",
"sigs",
".",
"sigs",
"]",
"block",
"=",
"Int64ub",
".",
"build",
"(",
"filesize",
")",
"+",
"Int32ub",
".",
"build",
"(",
"sigs",
".",
"count",
")",
"yield",
"block",
"# Signature algorithm id and size per entry",
"for",
"algorithm_id",
",",
"size",
"in",
"sig_types",
":",
"block",
"=",
"Int32ub",
".",
"build",
"(",
"algorithm_id",
")",
"+",
"Int32ub",
".",
"build",
"(",
"size",
")",
"yield",
"block",
"# Everything else in the file is covered",
"for",
"block",
"in",
"file_iter",
"(",
"fileobj",
")",
":",
"yield",
"block"
]
| Read data from MAR file that is required for MAR signatures.
Args:
fileboj (file-like object): file-like object to read the MAR data from
filesize (int): the total size of the file
Yields:
blocks of bytes representing the data required to generate or validate
signatures. | [
"Read",
"data",
"from",
"MAR",
"file",
"that",
"is",
"required",
"for",
"MAR",
"signatures",
"."
]
| python | train |
edx/i18n-tools | i18n/transifex.py | https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L114-L125 | def clean_locale(configuration, locale):
"""
Strips out the warning from all of a locale's translated po files
about being an English source file.
Iterates over machine-generated files.
"""
dirname = configuration.get_messages_dir(locale)
if not dirname.exists():
# Happens when we have a supported locale that doesn't exist in Transifex
return
for filename in dirname.files('*.po'):
clean_file(configuration, dirname.joinpath(filename)) | [
"def",
"clean_locale",
"(",
"configuration",
",",
"locale",
")",
":",
"dirname",
"=",
"configuration",
".",
"get_messages_dir",
"(",
"locale",
")",
"if",
"not",
"dirname",
".",
"exists",
"(",
")",
":",
"# Happens when we have a supported locale that doesn't exist in Transifex",
"return",
"for",
"filename",
"in",
"dirname",
".",
"files",
"(",
"'*.po'",
")",
":",
"clean_file",
"(",
"configuration",
",",
"dirname",
".",
"joinpath",
"(",
"filename",
")",
")"
]
| Strips out the warning from all of a locale's translated po files
about being an English source file.
Iterates over machine-generated files. | [
"Strips",
"out",
"the",
"warning",
"from",
"all",
"of",
"a",
"locale",
"s",
"translated",
"po",
"files",
"about",
"being",
"an",
"English",
"source",
"file",
".",
"Iterates",
"over",
"machine",
"-",
"generated",
"files",
"."
]
| python | train |
ungarj/mapchete | mapchete/formats/__init__.py | https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/__init__.py#L78-L93 | def available_input_formats():
"""
Return all available input formats.
Returns
-------
formats : list
all available input formats
"""
input_formats = []
for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):
logger.debug("driver found: %s", v)
driver_ = v.load()
if hasattr(driver_, "METADATA") and (driver_.METADATA["mode"] in ["r", "rw"]):
input_formats.append(driver_.METADATA["driver_name"])
return input_formats | [
"def",
"available_input_formats",
"(",
")",
":",
"input_formats",
"=",
"[",
"]",
"for",
"v",
"in",
"pkg_resources",
".",
"iter_entry_points",
"(",
"DRIVERS_ENTRY_POINT",
")",
":",
"logger",
".",
"debug",
"(",
"\"driver found: %s\"",
",",
"v",
")",
"driver_",
"=",
"v",
".",
"load",
"(",
")",
"if",
"hasattr",
"(",
"driver_",
",",
"\"METADATA\"",
")",
"and",
"(",
"driver_",
".",
"METADATA",
"[",
"\"mode\"",
"]",
"in",
"[",
"\"r\"",
",",
"\"rw\"",
"]",
")",
":",
"input_formats",
".",
"append",
"(",
"driver_",
".",
"METADATA",
"[",
"\"driver_name\"",
"]",
")",
"return",
"input_formats"
]
| Return all available input formats.
Returns
-------
formats : list
all available input formats | [
"Return",
"all",
"available",
"input",
"formats",
"."
]
| python | valid |
PmagPy/PmagPy | SPD/lib/lib_arai_plot_statistics.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_arai_plot_statistics.py#L257-L263 | def get_b_wiggle(x, y, y_int):
"""returns instantaneous slope from the ratio of NRM lost to TRM gained at the ith step"""
if x == 0:
b_wiggle = 0
else:
b_wiggle = old_div((y_int - y), x)
return b_wiggle | [
"def",
"get_b_wiggle",
"(",
"x",
",",
"y",
",",
"y_int",
")",
":",
"if",
"x",
"==",
"0",
":",
"b_wiggle",
"=",
"0",
"else",
":",
"b_wiggle",
"=",
"old_div",
"(",
"(",
"y_int",
"-",
"y",
")",
",",
"x",
")",
"return",
"b_wiggle"
]
| returns instantaneous slope from the ratio of NRM lost to TRM gained at the ith step | [
"returns",
"instantaneous",
"slope",
"from",
"the",
"ratio",
"of",
"NRM",
"lost",
"to",
"TRM",
"gained",
"at",
"the",
"ith",
"step"
]
| python | train |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L371-L381 | def _collapse_attributes(self, line, header, indexes):
"""Combine attributes in multiple columns into single named tuple.
"""
names = []
vals = []
pat = re.compile("[\W]+")
for i in indexes:
names.append(pat.sub("_", self._clean_header(header[i])))
vals.append(line[i])
Attrs = collections.namedtuple('Attrs', names)
return Attrs(*vals) | [
"def",
"_collapse_attributes",
"(",
"self",
",",
"line",
",",
"header",
",",
"indexes",
")",
":",
"names",
"=",
"[",
"]",
"vals",
"=",
"[",
"]",
"pat",
"=",
"re",
".",
"compile",
"(",
"\"[\\W]+\"",
")",
"for",
"i",
"in",
"indexes",
":",
"names",
".",
"append",
"(",
"pat",
".",
"sub",
"(",
"\"_\"",
",",
"self",
".",
"_clean_header",
"(",
"header",
"[",
"i",
"]",
")",
")",
")",
"vals",
".",
"append",
"(",
"line",
"[",
"i",
"]",
")",
"Attrs",
"=",
"collections",
".",
"namedtuple",
"(",
"'Attrs'",
",",
"names",
")",
"return",
"Attrs",
"(",
"*",
"vals",
")"
]
| Combine attributes in multiple columns into single named tuple. | [
"Combine",
"attributes",
"in",
"multiple",
"columns",
"into",
"single",
"named",
"tuple",
"."
]
| python | train |
django-danceschool/django-danceschool | danceschool/vouchers/handlers.py | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/vouchers/handlers.py#L20-L71 | def checkVoucherCode(sender,**kwargs):
'''
Check that the given voucher code is valid
'''
logger.debug('Signal to check RegistrationContactForm handled by vouchers app.')
formData = kwargs.get('formData',{})
request = kwargs.get('request',{})
registration = kwargs.get('registration',None)
session = getattr(request,'session',{}).get(REG_VALIDATION_STR,{})
id = formData.get('gift','')
first = formData.get('firstName')
last = formData.get('lastName')
email = formData.get('email')
# Clean out the session data relating to vouchers so that we can revalidate it.
session.pop('total_voucher_amount',0)
session.pop('voucher_names',None)
session.pop('gift',None)
if id == '':
return
if not getConstant('vouchers__enableVouchers'):
raise ValidationError({'gift': _('Vouchers are disabled.')})
if session.get('gift','') != '':
raise ValidationError({'gift': _('Can\'t have more than one voucher')})
eventids = [x.event.id for x in registration.temporaryeventregistration_set.exclude(dropIn=True)]
seriess = Series.objects.filter(id__in=eventids)
obj = Voucher.objects.filter(voucherId=id).first()
if not obj:
raise ValidationError({'gift':_('Invalid Voucher Id')})
else:
customer = Customer.objects.filter(
first_name=first,
last_name=last,
email=email).first()
# This will raise any other errors that may be relevant
try:
obj.validateForCustomerAndSeriess(customer,seriess)
except ValidationError as e:
# Ensures that the error is applied to the correct field
raise ValidationError({'gift': e})
# If we got this far, then the voucher is determined to be valid, so the registration
# can proceed with no errors.
return | [
"def",
"checkVoucherCode",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"debug",
"(",
"'Signal to check RegistrationContactForm handled by vouchers app.'",
")",
"formData",
"=",
"kwargs",
".",
"get",
"(",
"'formData'",
",",
"{",
"}",
")",
"request",
"=",
"kwargs",
".",
"get",
"(",
"'request'",
",",
"{",
"}",
")",
"registration",
"=",
"kwargs",
".",
"get",
"(",
"'registration'",
",",
"None",
")",
"session",
"=",
"getattr",
"(",
"request",
",",
"'session'",
",",
"{",
"}",
")",
".",
"get",
"(",
"REG_VALIDATION_STR",
",",
"{",
"}",
")",
"id",
"=",
"formData",
".",
"get",
"(",
"'gift'",
",",
"''",
")",
"first",
"=",
"formData",
".",
"get",
"(",
"'firstName'",
")",
"last",
"=",
"formData",
".",
"get",
"(",
"'lastName'",
")",
"email",
"=",
"formData",
".",
"get",
"(",
"'email'",
")",
"# Clean out the session data relating to vouchers so that we can revalidate it.\r",
"session",
".",
"pop",
"(",
"'total_voucher_amount'",
",",
"0",
")",
"session",
".",
"pop",
"(",
"'voucher_names'",
",",
"None",
")",
"session",
".",
"pop",
"(",
"'gift'",
",",
"None",
")",
"if",
"id",
"==",
"''",
":",
"return",
"if",
"not",
"getConstant",
"(",
"'vouchers__enableVouchers'",
")",
":",
"raise",
"ValidationError",
"(",
"{",
"'gift'",
":",
"_",
"(",
"'Vouchers are disabled.'",
")",
"}",
")",
"if",
"session",
".",
"get",
"(",
"'gift'",
",",
"''",
")",
"!=",
"''",
":",
"raise",
"ValidationError",
"(",
"{",
"'gift'",
":",
"_",
"(",
"'Can\\'t have more than one voucher'",
")",
"}",
")",
"eventids",
"=",
"[",
"x",
".",
"event",
".",
"id",
"for",
"x",
"in",
"registration",
".",
"temporaryeventregistration_set",
".",
"exclude",
"(",
"dropIn",
"=",
"True",
")",
"]",
"seriess",
"=",
"Series",
".",
"objects",
".",
"filter",
"(",
"id__in",
"=",
"eventids",
")",
"obj",
"=",
"Voucher",
".",
"objects",
".",
"filter",
"(",
"voucherId",
"=",
"id",
")",
".",
"first",
"(",
")",
"if",
"not",
"obj",
":",
"raise",
"ValidationError",
"(",
"{",
"'gift'",
":",
"_",
"(",
"'Invalid Voucher Id'",
")",
"}",
")",
"else",
":",
"customer",
"=",
"Customer",
".",
"objects",
".",
"filter",
"(",
"first_name",
"=",
"first",
",",
"last_name",
"=",
"last",
",",
"email",
"=",
"email",
")",
".",
"first",
"(",
")",
"# This will raise any other errors that may be relevant\r",
"try",
":",
"obj",
".",
"validateForCustomerAndSeriess",
"(",
"customer",
",",
"seriess",
")",
"except",
"ValidationError",
"as",
"e",
":",
"# Ensures that the error is applied to the correct field\r",
"raise",
"ValidationError",
"(",
"{",
"'gift'",
":",
"e",
"}",
")",
"# If we got this far, then the voucher is determined to be valid, so the registration\r",
"# can proceed with no errors.\r",
"return"
]
| Check that the given voucher code is valid | [
"Check",
"that",
"the",
"given",
"voucher",
"code",
"is",
"valid"
]
| python | train |
azraq27/neural | neural/dsets.py | https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L273-L285 | def resample_dset(dset,template,prefix=None,resam='NN'):
'''Resamples ``dset`` to the grid of ``template`` using resampling mode ``resam``.
Default prefix is to suffix ``_resam`` at the end of ``dset``
Available resampling modes:
:NN: Nearest Neighbor
:Li: Linear
:Cu: Cubic
:Bk: Blocky
'''
if prefix==None:
prefix = nl.suffix(dset,'_resam')
nl.run(['3dresample','-master',template,'-rmode',resam,'-prefix',prefix,'-inset',dset]) | [
"def",
"resample_dset",
"(",
"dset",
",",
"template",
",",
"prefix",
"=",
"None",
",",
"resam",
"=",
"'NN'",
")",
":",
"if",
"prefix",
"==",
"None",
":",
"prefix",
"=",
"nl",
".",
"suffix",
"(",
"dset",
",",
"'_resam'",
")",
"nl",
".",
"run",
"(",
"[",
"'3dresample'",
",",
"'-master'",
",",
"template",
",",
"'-rmode'",
",",
"resam",
",",
"'-prefix'",
",",
"prefix",
",",
"'-inset'",
",",
"dset",
"]",
")"
]
| Resamples ``dset`` to the grid of ``template`` using resampling mode ``resam``.
Default prefix is to suffix ``_resam`` at the end of ``dset``
Available resampling modes:
:NN: Nearest Neighbor
:Li: Linear
:Cu: Cubic
:Bk: Blocky | [
"Resamples",
"dset",
"to",
"the",
"grid",
"of",
"template",
"using",
"resampling",
"mode",
"resam",
".",
"Default",
"prefix",
"is",
"to",
"suffix",
"_resam",
"at",
"the",
"end",
"of",
"dset"
]
| python | train |
gagneurlab/concise | concise/preprocessing/splines.py | https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/splines.py#L93-L149 | def encodeSplines(x, n_bases=10, spline_order=3, start=None, end=None, warn=True):
"""**Deprecated**. Function version of the transformer class `EncodeSplines`.
Get B-spline base-function expansion
# Details
First, the knots for B-spline basis functions are placed
equidistantly on the [start, end] range.
(inferred from the data if None). Next, b_n(x) value is
is computed for each x and each n (spline-index) with
`scipy.interpolate.splev`.
# Arguments
x: a numpy array of positions with 2 dimensions
n_bases int: Number of spline bases.
spline_order: 2 for quadratic, 3 for qubic splines
start, end: range of values. If None, they are inferred from the data
as minimum and maximum value.
warn: Show warnings.
# Returns
`np.ndarray` of shape `(x.shape[0], x.shape[1], n_bases)`
"""
# TODO - make it general...
if len(x.shape) == 1:
x = x.reshape((-1, 1))
if start is None:
start = np.nanmin(x)
else:
if x.min() < start:
if warn:
print("WARNING, x.min() < start for some elements. Truncating them to start: x[x < start] = start")
x = _trunc(x, minval=start)
if end is None:
end = np.nanmax(x)
else:
if x.max() > end:
if warn:
print("WARNING, x.max() > end for some elements. Truncating them to end: x[x > end] = end")
x = _trunc(x, maxval=end)
bs = BSpline(start, end,
n_bases=n_bases,
spline_order=spline_order
)
# concatenate x to long
assert len(x.shape) == 2
n_rows = x.shape[0]
n_cols = x.shape[1]
x_long = x.reshape((-1,))
x_feat = bs.predict(x_long, add_intercept=False) # shape = (n_rows * n_cols, n_bases)
x_final = x_feat.reshape((n_rows, n_cols, n_bases))
return x_final | [
"def",
"encodeSplines",
"(",
"x",
",",
"n_bases",
"=",
"10",
",",
"spline_order",
"=",
"3",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"warn",
"=",
"True",
")",
":",
"# TODO - make it general...",
"if",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"1",
":",
"x",
"=",
"x",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"np",
".",
"nanmin",
"(",
"x",
")",
"else",
":",
"if",
"x",
".",
"min",
"(",
")",
"<",
"start",
":",
"if",
"warn",
":",
"print",
"(",
"\"WARNING, x.min() < start for some elements. Truncating them to start: x[x < start] = start\"",
")",
"x",
"=",
"_trunc",
"(",
"x",
",",
"minval",
"=",
"start",
")",
"if",
"end",
"is",
"None",
":",
"end",
"=",
"np",
".",
"nanmax",
"(",
"x",
")",
"else",
":",
"if",
"x",
".",
"max",
"(",
")",
">",
"end",
":",
"if",
"warn",
":",
"print",
"(",
"\"WARNING, x.max() > end for some elements. Truncating them to end: x[x > end] = end\"",
")",
"x",
"=",
"_trunc",
"(",
"x",
",",
"maxval",
"=",
"end",
")",
"bs",
"=",
"BSpline",
"(",
"start",
",",
"end",
",",
"n_bases",
"=",
"n_bases",
",",
"spline_order",
"=",
"spline_order",
")",
"# concatenate x to long",
"assert",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"2",
"n_rows",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
"n_cols",
"=",
"x",
".",
"shape",
"[",
"1",
"]",
"x_long",
"=",
"x",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
")",
")",
"x_feat",
"=",
"bs",
".",
"predict",
"(",
"x_long",
",",
"add_intercept",
"=",
"False",
")",
"# shape = (n_rows * n_cols, n_bases)",
"x_final",
"=",
"x_feat",
".",
"reshape",
"(",
"(",
"n_rows",
",",
"n_cols",
",",
"n_bases",
")",
")",
"return",
"x_final"
]
| **Deprecated**. Function version of the transformer class `EncodeSplines`.
Get B-spline base-function expansion
# Details
First, the knots for B-spline basis functions are placed
equidistantly on the [start, end] range.
(inferred from the data if None). Next, b_n(x) value is
is computed for each x and each n (spline-index) with
`scipy.interpolate.splev`.
# Arguments
x: a numpy array of positions with 2 dimensions
n_bases int: Number of spline bases.
spline_order: 2 for quadratic, 3 for qubic splines
start, end: range of values. If None, they are inferred from the data
as minimum and maximum value.
warn: Show warnings.
# Returns
`np.ndarray` of shape `(x.shape[0], x.shape[1], n_bases)` | [
"**",
"Deprecated",
"**",
".",
"Function",
"version",
"of",
"the",
"transformer",
"class",
"EncodeSplines",
".",
"Get",
"B",
"-",
"spline",
"base",
"-",
"function",
"expansion"
]
| python | train |
dmwm/DBS | Server/Python/src/dbs/dao/Oracle/FileBuffer/List.py | https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/dao/Oracle/FileBuffer/List.py#L20-L31 | def execute(self, conn, block_id="", transaction=False):
"""
simple execute
"""
if not conn:
dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/FileBuffer/List. Expects db connection from upper layer.")
sql = self.sql
binds = { "block_id" : block_id}
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = self.formatCursor(cursors[0])
return result | [
"def",
"execute",
"(",
"self",
",",
"conn",
",",
"block_id",
"=",
"\"\"",
",",
"transaction",
"=",
"False",
")",
":",
"if",
"not",
"conn",
":",
"dbsExceptionHandler",
"(",
"\"dbsException-db-conn-failed\"",
",",
"\"Oracle/FileBuffer/List. Expects db connection from upper layer.\"",
")",
"sql",
"=",
"self",
".",
"sql",
"binds",
"=",
"{",
"\"block_id\"",
":",
"block_id",
"}",
"cursors",
"=",
"self",
".",
"dbi",
".",
"processData",
"(",
"sql",
",",
"binds",
",",
"conn",
",",
"transaction",
",",
"returnCursor",
"=",
"True",
")",
"result",
"=",
"self",
".",
"formatCursor",
"(",
"cursors",
"[",
"0",
"]",
")",
"return",
"result"
]
| simple execute | [
"simple",
"execute"
]
| python | train |
project-rig/rig | rig/machine_control/machine_controller.py | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1803-L1840 | def get_chip_info(self, x, y):
"""Get general information about the resources available on a chip.
Returns
-------
:py:class:`.ChipInfo`
A named tuple indicating the number of working cores, the states of
all working cores, the set of working links and the size of the
largest free block in SDRAM and SRAM.
"""
info = self._send_scp(x, y, 0, SCPCommands.info, expected_args=3)
# Unpack values encoded in the argument fields
num_cores = info.arg1 & 0x1F
working_links = set(link for link in Links
if (info.arg1 >> (8 + link)) & 1)
largest_free_rtr_mc_block = (info.arg1 >> 14) & 0x7FF
ethernet_up = bool(info.arg1 & (1 << 25))
# Unpack the values in the data payload
data = struct.unpack_from("<18BHI", info.data)
core_states = [consts.AppState(c) for c in data[:18]]
local_ethernet_chip = ((data[18] >> 8) & 0xFF,
(data[18] >> 0) & 0xFF)
ip_address = ".".join(str((data[19] >> i) & 0xFF)
for i in range(0, 32, 8))
return ChipInfo(
num_cores=num_cores,
core_states=core_states[:num_cores],
working_links=working_links,
largest_free_sdram_block=info.arg2,
largest_free_sram_block=info.arg3,
largest_free_rtr_mc_block=largest_free_rtr_mc_block,
ethernet_up=ethernet_up,
ip_address=ip_address,
local_ethernet_chip=local_ethernet_chip,
) | [
"def",
"get_chip_info",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"info",
"=",
"self",
".",
"_send_scp",
"(",
"x",
",",
"y",
",",
"0",
",",
"SCPCommands",
".",
"info",
",",
"expected_args",
"=",
"3",
")",
"# Unpack values encoded in the argument fields",
"num_cores",
"=",
"info",
".",
"arg1",
"&",
"0x1F",
"working_links",
"=",
"set",
"(",
"link",
"for",
"link",
"in",
"Links",
"if",
"(",
"info",
".",
"arg1",
">>",
"(",
"8",
"+",
"link",
")",
")",
"&",
"1",
")",
"largest_free_rtr_mc_block",
"=",
"(",
"info",
".",
"arg1",
">>",
"14",
")",
"&",
"0x7FF",
"ethernet_up",
"=",
"bool",
"(",
"info",
".",
"arg1",
"&",
"(",
"1",
"<<",
"25",
")",
")",
"# Unpack the values in the data payload",
"data",
"=",
"struct",
".",
"unpack_from",
"(",
"\"<18BHI\"",
",",
"info",
".",
"data",
")",
"core_states",
"=",
"[",
"consts",
".",
"AppState",
"(",
"c",
")",
"for",
"c",
"in",
"data",
"[",
":",
"18",
"]",
"]",
"local_ethernet_chip",
"=",
"(",
"(",
"data",
"[",
"18",
"]",
">>",
"8",
")",
"&",
"0xFF",
",",
"(",
"data",
"[",
"18",
"]",
">>",
"0",
")",
"&",
"0xFF",
")",
"ip_address",
"=",
"\".\"",
".",
"join",
"(",
"str",
"(",
"(",
"data",
"[",
"19",
"]",
">>",
"i",
")",
"&",
"0xFF",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"32",
",",
"8",
")",
")",
"return",
"ChipInfo",
"(",
"num_cores",
"=",
"num_cores",
",",
"core_states",
"=",
"core_states",
"[",
":",
"num_cores",
"]",
",",
"working_links",
"=",
"working_links",
",",
"largest_free_sdram_block",
"=",
"info",
".",
"arg2",
",",
"largest_free_sram_block",
"=",
"info",
".",
"arg3",
",",
"largest_free_rtr_mc_block",
"=",
"largest_free_rtr_mc_block",
",",
"ethernet_up",
"=",
"ethernet_up",
",",
"ip_address",
"=",
"ip_address",
",",
"local_ethernet_chip",
"=",
"local_ethernet_chip",
",",
")"
]
| Get general information about the resources available on a chip.
Returns
-------
:py:class:`.ChipInfo`
A named tuple indicating the number of working cores, the states of
all working cores, the set of working links and the size of the
largest free block in SDRAM and SRAM. | [
"Get",
"general",
"information",
"about",
"the",
"resources",
"available",
"on",
"a",
"chip",
"."
]
| python | train |
matthew-brett/delocate | delocate/delocating.py | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L104-L147 | def copy_recurse(lib_path, copy_filt_func = None, copied_libs = None):
""" Analyze `lib_path` for library dependencies and copy libraries
`lib_path` is a directory containing libraries. The libraries might
themselves have dependencies. This function analyzes the dependencies and
copies library dependencies that match the filter `copy_filt_func`. It also
adjusts the depending libraries to use the copy. It keeps iterating over
`lib_path` until all matching dependencies (of dependencies of dependencies
...) have been copied.
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each depended library name; copy where
``copy_filt_func(libname)`` is True, don't copy otherwise
copied_libs : dict
Dict with (key, value) pairs of (``copied_lib_path``,
``dependings_dict``) where ``copied_lib_path`` is the canonical path of
a library that has been copied to `lib_path`, and ``dependings_dict``
is a dictionary with (key, value) pairs of (``depending_lib_path``,
``install_name``). ``depending_lib_path`` is the canonical path of the
library depending on ``copied_lib_path``, ``install_name`` is the name
that ``depending_lib_path`` uses to refer to ``copied_lib_path`` (in
its install names).
Returns
-------
copied_libs : dict
Input `copied_libs` dict with any extra libraries and / or dependencies
added.
"""
if copied_libs is None:
copied_libs = {}
else:
copied_libs = dict(copied_libs)
done = False
while not done:
in_len = len(copied_libs)
_copy_required(lib_path, copy_filt_func, copied_libs)
done = len(copied_libs) == in_len
return copied_libs | [
"def",
"copy_recurse",
"(",
"lib_path",
",",
"copy_filt_func",
"=",
"None",
",",
"copied_libs",
"=",
"None",
")",
":",
"if",
"copied_libs",
"is",
"None",
":",
"copied_libs",
"=",
"{",
"}",
"else",
":",
"copied_libs",
"=",
"dict",
"(",
"copied_libs",
")",
"done",
"=",
"False",
"while",
"not",
"done",
":",
"in_len",
"=",
"len",
"(",
"copied_libs",
")",
"_copy_required",
"(",
"lib_path",
",",
"copy_filt_func",
",",
"copied_libs",
")",
"done",
"=",
"len",
"(",
"copied_libs",
")",
"==",
"in_len",
"return",
"copied_libs"
]
| Analyze `lib_path` for library dependencies and copy libraries
`lib_path` is a directory containing libraries. The libraries might
themselves have dependencies. This function analyzes the dependencies and
copies library dependencies that match the filter `copy_filt_func`. It also
adjusts the depending libraries to use the copy. It keeps iterating over
`lib_path` until all matching dependencies (of dependencies of dependencies
...) have been copied.
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each depended library name; copy where
``copy_filt_func(libname)`` is True, don't copy otherwise
copied_libs : dict
Dict with (key, value) pairs of (``copied_lib_path``,
``dependings_dict``) where ``copied_lib_path`` is the canonical path of
a library that has been copied to `lib_path`, and ``dependings_dict``
is a dictionary with (key, value) pairs of (``depending_lib_path``,
``install_name``). ``depending_lib_path`` is the canonical path of the
library depending on ``copied_lib_path``, ``install_name`` is the name
that ``depending_lib_path`` uses to refer to ``copied_lib_path`` (in
its install names).
Returns
-------
copied_libs : dict
Input `copied_libs` dict with any extra libraries and / or dependencies
added. | [
"Analyze",
"lib_path",
"for",
"library",
"dependencies",
"and",
"copy",
"libraries"
]
| python | train |
acorg/dark-matter | dark/blast/params.py | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/blast/params.py#L12-L46 | def checkCompatibleParams(initialParams, laterParams):
"""
Check a later set of BLAST parameters against those originally found.
@param initialParams: A C{dict} with the originally encountered BLAST
parameter settings.
@param laterParams: A C{dict} with BLAST parameter settings encountered
later.
@return: A C{str} summary of the parameter differences if the parameter
sets differ, else C{None}.
"""
# Note that although the params contains a 'date', its value is empty
# (as far as I've seen). This could become an issue one day if it
# becomes non-empty and differs between JSON files that we cat
# together. In that case we may need to be more specific in our params
# compatible checking.
err = []
for param in initialParams:
if param in laterParams:
if (param not in VARIABLE_PARAMS and
initialParams[param] != laterParams[param]):
err.append(
'\tParam %r initial value %r differs from '
'later value %r' % (param, initialParams[param],
laterParams[param]))
else:
err.append('\t%r found in initial parameters, not found '
'in later parameters' % param)
for param in laterParams:
if param not in initialParams:
err.append('\t%r found in later parameters, not seen in '
'initial parameters' % param)
return 'Summary of differences:\n%s' % '\n'.join(err) if err else None | [
"def",
"checkCompatibleParams",
"(",
"initialParams",
",",
"laterParams",
")",
":",
"# Note that although the params contains a 'date', its value is empty",
"# (as far as I've seen). This could become an issue one day if it",
"# becomes non-empty and differs between JSON files that we cat",
"# together. In that case we may need to be more specific in our params",
"# compatible checking.",
"err",
"=",
"[",
"]",
"for",
"param",
"in",
"initialParams",
":",
"if",
"param",
"in",
"laterParams",
":",
"if",
"(",
"param",
"not",
"in",
"VARIABLE_PARAMS",
"and",
"initialParams",
"[",
"param",
"]",
"!=",
"laterParams",
"[",
"param",
"]",
")",
":",
"err",
".",
"append",
"(",
"'\\tParam %r initial value %r differs from '",
"'later value %r'",
"%",
"(",
"param",
",",
"initialParams",
"[",
"param",
"]",
",",
"laterParams",
"[",
"param",
"]",
")",
")",
"else",
":",
"err",
".",
"append",
"(",
"'\\t%r found in initial parameters, not found '",
"'in later parameters'",
"%",
"param",
")",
"for",
"param",
"in",
"laterParams",
":",
"if",
"param",
"not",
"in",
"initialParams",
":",
"err",
".",
"append",
"(",
"'\\t%r found in later parameters, not seen in '",
"'initial parameters'",
"%",
"param",
")",
"return",
"'Summary of differences:\\n%s'",
"%",
"'\\n'",
".",
"join",
"(",
"err",
")",
"if",
"err",
"else",
"None"
]
| Check a later set of BLAST parameters against those originally found.
@param initialParams: A C{dict} with the originally encountered BLAST
parameter settings.
@param laterParams: A C{dict} with BLAST parameter settings encountered
later.
@return: A C{str} summary of the parameter differences if the parameter
sets differ, else C{None}. | [
"Check",
"a",
"later",
"set",
"of",
"BLAST",
"parameters",
"against",
"those",
"originally",
"found",
"."
]
| python | train |
iotile/coretools | iotilebuild/iotile/build/utilities/template.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/utilities/template.py#L79-L106 | def render_template(template_name, info, out_path=None):
"""Render a template using the variables in info.
You can optionally render to a file by passing out_path.
Args:
template_name (str): The name of the template to load. This must
be a file in config/templates inside this package
out_path (str): An optional path of where to save the output
file, otherwise it is just returned as a string.
info (dict): A dictionary of variables passed into the template to
perform substitutions.
Returns:
string: The rendered template data.
"""
env = Environment(loader=PackageLoader('iotile.build', 'config/templates'),
trim_blocks=True, lstrip_blocks=True)
template = env.get_template(template_name)
result = template.render(info)
if out_path is not None:
with open(out_path, 'wb') as outfile:
outfile.write(result.encode('utf-8'))
return result | [
"def",
"render_template",
"(",
"template_name",
",",
"info",
",",
"out_path",
"=",
"None",
")",
":",
"env",
"=",
"Environment",
"(",
"loader",
"=",
"PackageLoader",
"(",
"'iotile.build'",
",",
"'config/templates'",
")",
",",
"trim_blocks",
"=",
"True",
",",
"lstrip_blocks",
"=",
"True",
")",
"template",
"=",
"env",
".",
"get_template",
"(",
"template_name",
")",
"result",
"=",
"template",
".",
"render",
"(",
"info",
")",
"if",
"out_path",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"out_path",
",",
"'wb'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"result",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return",
"result"
]
| Render a template using the variables in info.
You can optionally render to a file by passing out_path.
Args:
template_name (str): The name of the template to load. This must
be a file in config/templates inside this package
out_path (str): An optional path of where to save the output
file, otherwise it is just returned as a string.
info (dict): A dictionary of variables passed into the template to
perform substitutions.
Returns:
string: The rendered template data. | [
"Render",
"a",
"template",
"using",
"the",
"variables",
"in",
"info",
"."
]
| python | train |
saltstack/salt | salt/modules/boto_apigateway.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_apigateway.py#L998-L1025 | def create_api_method_response(restApiId, resourcePath, httpMethod, statusCode, responseParameters=None,
responseModels=None, region=None, key=None, keyid=None, profile=None):
'''
Create API method response for a method on a given resource in the given API
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_method_response restApiId resourcePath httpMethod \\
statusCode responseParameters='{"name", "True|False"}' responseModels='{"content-type", "model"}'
'''
try:
resource = describe_api_resource(restApiId, resourcePath, region=region,
key=key, keyid=keyid, profile=profile).get('resource')
if resource:
responseParameters = dict() if responseParameters is None else responseParameters
responseModels = dict() if responseModels is None else responseModels
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = conn.put_method_response(restApiId=restApiId, resourceId=resource['id'],
httpMethod=httpMethod, statusCode=str(statusCode), # future lint: disable=blacklisted-function
responseParameters=responseParameters, responseModels=responseModels)
return {'created': True, 'response': response}
return {'created': False, 'error': 'no such resource'}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} | [
"def",
"create_api_method_response",
"(",
"restApiId",
",",
"resourcePath",
",",
"httpMethod",
",",
"statusCode",
",",
"responseParameters",
"=",
"None",
",",
"responseModels",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"resource",
"=",
"describe_api_resource",
"(",
"restApiId",
",",
"resourcePath",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
".",
"get",
"(",
"'resource'",
")",
"if",
"resource",
":",
"responseParameters",
"=",
"dict",
"(",
")",
"if",
"responseParameters",
"is",
"None",
"else",
"responseParameters",
"responseModels",
"=",
"dict",
"(",
")",
"if",
"responseModels",
"is",
"None",
"else",
"responseModels",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"response",
"=",
"conn",
".",
"put_method_response",
"(",
"restApiId",
"=",
"restApiId",
",",
"resourceId",
"=",
"resource",
"[",
"'id'",
"]",
",",
"httpMethod",
"=",
"httpMethod",
",",
"statusCode",
"=",
"str",
"(",
"statusCode",
")",
",",
"# future lint: disable=blacklisted-function",
"responseParameters",
"=",
"responseParameters",
",",
"responseModels",
"=",
"responseModels",
")",
"return",
"{",
"'created'",
":",
"True",
",",
"'response'",
":",
"response",
"}",
"return",
"{",
"'created'",
":",
"False",
",",
"'error'",
":",
"'no such resource'",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'created'",
":",
"False",
",",
"'error'",
":",
"__utils__",
"[",
"'boto3.get_error'",
"]",
"(",
"e",
")",
"}"
]
| Create API method response for a method on a given resource in the given API
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_method_response restApiId resourcePath httpMethod \\
statusCode responseParameters='{"name", "True|False"}' responseModels='{"content-type", "model"}' | [
"Create",
"API",
"method",
"response",
"for",
"a",
"method",
"on",
"a",
"given",
"resource",
"in",
"the",
"given",
"API"
]
| python | train |
Gorialis/jishaku | jishaku/cog.py | https://github.com/Gorialis/jishaku/blob/fc7c479b9d510ede189a929c8aa6f7c8ef7f9a6e/jishaku/cog.py#L379-L396 | async def jsk_debug(self, ctx: commands.Context, *, command_string: str):
"""
Run a command timing execution and catching exceptions.
"""
alt_ctx = await copy_context_with(ctx, content=ctx.prefix + command_string)
if alt_ctx.command is None:
return await ctx.send(f'Command "{alt_ctx.invoked_with}" is not found')
start = time.perf_counter()
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
await alt_ctx.command.invoke(alt_ctx)
end = time.perf_counter()
return await ctx.send(f"Command `{alt_ctx.command.qualified_name}` finished in {end - start:.3f}s.") | [
"async",
"def",
"jsk_debug",
"(",
"self",
",",
"ctx",
":",
"commands",
".",
"Context",
",",
"*",
",",
"command_string",
":",
"str",
")",
":",
"alt_ctx",
"=",
"await",
"copy_context_with",
"(",
"ctx",
",",
"content",
"=",
"ctx",
".",
"prefix",
"+",
"command_string",
")",
"if",
"alt_ctx",
".",
"command",
"is",
"None",
":",
"return",
"await",
"ctx",
".",
"send",
"(",
"f'Command \"{alt_ctx.invoked_with}\" is not found'",
")",
"start",
"=",
"time",
".",
"perf_counter",
"(",
")",
"async",
"with",
"ReplResponseReactor",
"(",
"ctx",
".",
"message",
")",
":",
"with",
"self",
".",
"submit",
"(",
"ctx",
")",
":",
"await",
"alt_ctx",
".",
"command",
".",
"invoke",
"(",
"alt_ctx",
")",
"end",
"=",
"time",
".",
"perf_counter",
"(",
")",
"return",
"await",
"ctx",
".",
"send",
"(",
"f\"Command `{alt_ctx.command.qualified_name}` finished in {end - start:.3f}s.\"",
")"
]
| Run a command timing execution and catching exceptions. | [
"Run",
"a",
"command",
"timing",
"execution",
"and",
"catching",
"exceptions",
"."
]
| python | train |
saltstack/salt | salt/fileserver/__init__.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/__init__.py#L847-L871 | def symlink_list(self, load):
'''
Return a list of symlinked files and dirs
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
ret = {}
if 'saltenv' not in load:
return {}
if not isinstance(load['saltenv'], six.string_types):
load['saltenv'] = six.text_type(load['saltenv'])
for fsb in self.backends(load.pop('fsbackend', None)):
symlstr = '{0}.symlink_list'.format(fsb)
if symlstr in self.servers:
ret = self.servers[symlstr](load)
# some *fs do not handle prefix. Ensure it is filtered
prefix = load.get('prefix', '').strip('/')
if prefix != '':
ret = dict([
(x, y) for x, y in six.iteritems(ret) if x.startswith(prefix)
])
return ret | [
"def",
"symlink_list",
"(",
"self",
",",
"load",
")",
":",
"if",
"'env'",
"in",
"load",
":",
"# \"env\" is not supported; Use \"saltenv\".",
"load",
".",
"pop",
"(",
"'env'",
")",
"ret",
"=",
"{",
"}",
"if",
"'saltenv'",
"not",
"in",
"load",
":",
"return",
"{",
"}",
"if",
"not",
"isinstance",
"(",
"load",
"[",
"'saltenv'",
"]",
",",
"six",
".",
"string_types",
")",
":",
"load",
"[",
"'saltenv'",
"]",
"=",
"six",
".",
"text_type",
"(",
"load",
"[",
"'saltenv'",
"]",
")",
"for",
"fsb",
"in",
"self",
".",
"backends",
"(",
"load",
".",
"pop",
"(",
"'fsbackend'",
",",
"None",
")",
")",
":",
"symlstr",
"=",
"'{0}.symlink_list'",
".",
"format",
"(",
"fsb",
")",
"if",
"symlstr",
"in",
"self",
".",
"servers",
":",
"ret",
"=",
"self",
".",
"servers",
"[",
"symlstr",
"]",
"(",
"load",
")",
"# some *fs do not handle prefix. Ensure it is filtered",
"prefix",
"=",
"load",
".",
"get",
"(",
"'prefix'",
",",
"''",
")",
".",
"strip",
"(",
"'/'",
")",
"if",
"prefix",
"!=",
"''",
":",
"ret",
"=",
"dict",
"(",
"[",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"six",
".",
"iteritems",
"(",
"ret",
")",
"if",
"x",
".",
"startswith",
"(",
"prefix",
")",
"]",
")",
"return",
"ret"
]
| Return a list of symlinked files and dirs | [
"Return",
"a",
"list",
"of",
"symlinked",
"files",
"and",
"dirs"
]
| python | train |
ReFirmLabs/binwalk | src/binwalk/core/display.py | https://github.com/ReFirmLabs/binwalk/blob/a0c5315fd2bae167e5c3d8469ce95d5defc743c2/src/binwalk/core/display.py#L155-L178 | def _append_to_data_parts(self, data, start, end):
'''
Intelligently appends data to self.string_parts.
For use by self._format.
'''
try:
while data[start] == ' ':
start += 1
if start == end:
end = len(data[start:])
self.string_parts.append(data[start:end])
except KeyboardInterrupt as e:
raise e
except Exception:
try:
self.string_parts.append(data[start:])
except KeyboardInterrupt as e:
raise e
except Exception:
pass
return start | [
"def",
"_append_to_data_parts",
"(",
"self",
",",
"data",
",",
"start",
",",
"end",
")",
":",
"try",
":",
"while",
"data",
"[",
"start",
"]",
"==",
"' '",
":",
"start",
"+=",
"1",
"if",
"start",
"==",
"end",
":",
"end",
"=",
"len",
"(",
"data",
"[",
"start",
":",
"]",
")",
"self",
".",
"string_parts",
".",
"append",
"(",
"data",
"[",
"start",
":",
"end",
"]",
")",
"except",
"KeyboardInterrupt",
"as",
"e",
":",
"raise",
"e",
"except",
"Exception",
":",
"try",
":",
"self",
".",
"string_parts",
".",
"append",
"(",
"data",
"[",
"start",
":",
"]",
")",
"except",
"KeyboardInterrupt",
"as",
"e",
":",
"raise",
"e",
"except",
"Exception",
":",
"pass",
"return",
"start"
]
| Intelligently appends data to self.string_parts.
For use by self._format. | [
"Intelligently",
"appends",
"data",
"to",
"self",
".",
"string_parts",
".",
"For",
"use",
"by",
"self",
".",
"_format",
"."
]
| python | train |
zeth/inputs | inputs.py | https://github.com/zeth/inputs/blob/a46681dbf77d6ab07834f550e5855c1f50701f99/inputs.py#L99-L102 | def chunks(raw):
"""Yield successive EVENT_SIZE sized chunks from raw."""
for i in range(0, len(raw), EVENT_SIZE):
yield struct.unpack(EVENT_FORMAT, raw[i:i+EVENT_SIZE]) | [
"def",
"chunks",
"(",
"raw",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"raw",
")",
",",
"EVENT_SIZE",
")",
":",
"yield",
"struct",
".",
"unpack",
"(",
"EVENT_FORMAT",
",",
"raw",
"[",
"i",
":",
"i",
"+",
"EVENT_SIZE",
"]",
")"
]
| Yield successive EVENT_SIZE sized chunks from raw. | [
"Yield",
"successive",
"EVENT_SIZE",
"sized",
"chunks",
"from",
"raw",
"."
]
| python | train |
empymod/empymod | empymod/utils.py | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/utils.py#L1864-L1877 | def _check_shape(var, name, shape, shape2=None):
r"""Check that <var> has shape <shape>; if false raise ValueError(name)"""
varshape = np.shape(var)
if shape != varshape:
if shape2:
if shape2 != varshape:
print('* ERROR :: Parameter ' + name + ' has wrong shape!' +
' : ' + str(varshape) + ' instead of ' + str(shape) +
'or' + str(shape2) + '.')
raise ValueError(name)
else:
print('* ERROR :: Parameter ' + name + ' has wrong shape! : ' +
str(varshape) + ' instead of ' + str(shape) + '.')
raise ValueError(name) | [
"def",
"_check_shape",
"(",
"var",
",",
"name",
",",
"shape",
",",
"shape2",
"=",
"None",
")",
":",
"varshape",
"=",
"np",
".",
"shape",
"(",
"var",
")",
"if",
"shape",
"!=",
"varshape",
":",
"if",
"shape2",
":",
"if",
"shape2",
"!=",
"varshape",
":",
"print",
"(",
"'* ERROR :: Parameter '",
"+",
"name",
"+",
"' has wrong shape!'",
"+",
"' : '",
"+",
"str",
"(",
"varshape",
")",
"+",
"' instead of '",
"+",
"str",
"(",
"shape",
")",
"+",
"'or'",
"+",
"str",
"(",
"shape2",
")",
"+",
"'.'",
")",
"raise",
"ValueError",
"(",
"name",
")",
"else",
":",
"print",
"(",
"'* ERROR :: Parameter '",
"+",
"name",
"+",
"' has wrong shape! : '",
"+",
"str",
"(",
"varshape",
")",
"+",
"' instead of '",
"+",
"str",
"(",
"shape",
")",
"+",
"'.'",
")",
"raise",
"ValueError",
"(",
"name",
")"
]
| r"""Check that <var> has shape <shape>; if false raise ValueError(name) | [
"r",
"Check",
"that",
"<var",
">",
"has",
"shape",
"<shape",
">",
";",
"if",
"false",
"raise",
"ValueError",
"(",
"name",
")"
]
| python | train |
molmod/molmod | molmod/unit_cells.py | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/unit_cells.py#L128-L147 | def volume(self):
"""The volume of the unit cell
The actual definition of the volume depends on the number of active
directions:
* num_active == 0 -- always -1
* num_active == 1 -- length of the cell vector
* num_active == 2 -- surface of the parallelogram
* num_active == 3 -- volume of the parallelepiped
"""
active = self.active_inactive[0]
if len(active) == 0:
return -1
elif len(active) == 1:
return np.linalg.norm(self.matrix[:, active[0]])
elif len(active) == 2:
return np.linalg.norm(np.cross(self.matrix[:, active[0]], self.matrix[:, active[1]]))
elif len(active) == 3:
return abs(np.linalg.det(self.matrix)) | [
"def",
"volume",
"(",
"self",
")",
":",
"active",
"=",
"self",
".",
"active_inactive",
"[",
"0",
"]",
"if",
"len",
"(",
"active",
")",
"==",
"0",
":",
"return",
"-",
"1",
"elif",
"len",
"(",
"active",
")",
"==",
"1",
":",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"matrix",
"[",
":",
",",
"active",
"[",
"0",
"]",
"]",
")",
"elif",
"len",
"(",
"active",
")",
"==",
"2",
":",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"np",
".",
"cross",
"(",
"self",
".",
"matrix",
"[",
":",
",",
"active",
"[",
"0",
"]",
"]",
",",
"self",
".",
"matrix",
"[",
":",
",",
"active",
"[",
"1",
"]",
"]",
")",
")",
"elif",
"len",
"(",
"active",
")",
"==",
"3",
":",
"return",
"abs",
"(",
"np",
".",
"linalg",
".",
"det",
"(",
"self",
".",
"matrix",
")",
")"
]
| The volume of the unit cell
The actual definition of the volume depends on the number of active
directions:
* num_active == 0 -- always -1
* num_active == 1 -- length of the cell vector
* num_active == 2 -- surface of the parallelogram
* num_active == 3 -- volume of the parallelepiped | [
"The",
"volume",
"of",
"the",
"unit",
"cell"
]
| python | train |
saltstack/salt | salt/modules/aptly.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aptly.py#L259-L292 | def get_repo(name, config_path=_DEFAULT_CONFIG_PATH, with_packages=False):
'''
Get detailed information about a local package repository.
:param str name: The name of the local repository.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the repo.
:return: A dictionary containing information about the repository.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_repo name="test-repo"
'''
_validate_config(config_path)
with_packages = six.text_type(bool(with_packages)).lower()
ret = dict()
cmd = ['repo', 'show', '-config={}'.format(config_path),
'-with-packages={}'.format(with_packages), name]
cmd_ret = _cmd_run(cmd)
ret = _parse_show_output(cmd_ret=cmd_ret)
if ret:
log.debug('Found repository: %s', name)
else:
log.debug('Unable to find repository: %s', name)
return ret | [
"def",
"get_repo",
"(",
"name",
",",
"config_path",
"=",
"_DEFAULT_CONFIG_PATH",
",",
"with_packages",
"=",
"False",
")",
":",
"_validate_config",
"(",
"config_path",
")",
"with_packages",
"=",
"six",
".",
"text_type",
"(",
"bool",
"(",
"with_packages",
")",
")",
".",
"lower",
"(",
")",
"ret",
"=",
"dict",
"(",
")",
"cmd",
"=",
"[",
"'repo'",
",",
"'show'",
",",
"'-config={}'",
".",
"format",
"(",
"config_path",
")",
",",
"'-with-packages={}'",
".",
"format",
"(",
"with_packages",
")",
",",
"name",
"]",
"cmd_ret",
"=",
"_cmd_run",
"(",
"cmd",
")",
"ret",
"=",
"_parse_show_output",
"(",
"cmd_ret",
"=",
"cmd_ret",
")",
"if",
"ret",
":",
"log",
".",
"debug",
"(",
"'Found repository: %s'",
",",
"name",
")",
"else",
":",
"log",
".",
"debug",
"(",
"'Unable to find repository: %s'",
",",
"name",
")",
"return",
"ret"
]
| Get detailed information about a local package repository.
:param str name: The name of the local repository.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the repo.
:return: A dictionary containing information about the repository.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_repo name="test-repo" | [
"Get",
"detailed",
"information",
"about",
"a",
"local",
"package",
"repository",
"."
]
| python | train |
ska-sa/katcp-python | katcp/sensortree.py | https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/sensortree.py#L411-L438 | def register_sensor(self, child):
"""Register a sensor required by an aggregate sensor registered with
add_delayed.
Parameters
----------
child : :class:`katcp.Sensor` object
A child sensor required by one or more delayed aggregate sensors.
"""
child_name = self._get_sensor_reference(child)
if child_name in self._registered_sensors:
raise ValueError("Sensor %r already registered with aggregate"
" tree" % child)
self._registered_sensors[child_name] = child
completed = []
for parent, (_rule, names, sensors) in \
self._incomplete_aggregates.iteritems():
if child_name in names:
names.remove(child_name)
sensors.add(child)
if not names:
completed.append(parent)
for parent in completed:
rule_function, _names, sensors = \
self._incomplete_aggregates[parent]
del self._incomplete_aggregates[parent]
self.add(parent, rule_function, sensors) | [
"def",
"register_sensor",
"(",
"self",
",",
"child",
")",
":",
"child_name",
"=",
"self",
".",
"_get_sensor_reference",
"(",
"child",
")",
"if",
"child_name",
"in",
"self",
".",
"_registered_sensors",
":",
"raise",
"ValueError",
"(",
"\"Sensor %r already registered with aggregate\"",
"\" tree\"",
"%",
"child",
")",
"self",
".",
"_registered_sensors",
"[",
"child_name",
"]",
"=",
"child",
"completed",
"=",
"[",
"]",
"for",
"parent",
",",
"(",
"_rule",
",",
"names",
",",
"sensors",
")",
"in",
"self",
".",
"_incomplete_aggregates",
".",
"iteritems",
"(",
")",
":",
"if",
"child_name",
"in",
"names",
":",
"names",
".",
"remove",
"(",
"child_name",
")",
"sensors",
".",
"add",
"(",
"child",
")",
"if",
"not",
"names",
":",
"completed",
".",
"append",
"(",
"parent",
")",
"for",
"parent",
"in",
"completed",
":",
"rule_function",
",",
"_names",
",",
"sensors",
"=",
"self",
".",
"_incomplete_aggregates",
"[",
"parent",
"]",
"del",
"self",
".",
"_incomplete_aggregates",
"[",
"parent",
"]",
"self",
".",
"add",
"(",
"parent",
",",
"rule_function",
",",
"sensors",
")"
]
| Register a sensor required by an aggregate sensor registered with
add_delayed.
Parameters
----------
child : :class:`katcp.Sensor` object
A child sensor required by one or more delayed aggregate sensors. | [
"Register",
"a",
"sensor",
"required",
"by",
"an",
"aggregate",
"sensor",
"registered",
"with",
"add_delayed",
"."
]
| python | train |
TissueMAPS/TmClient | src/python/tmclient/api.py | https://github.com/TissueMAPS/TmClient/blob/6fb40622af19142cb5169a64b8c2965993a25ab1/src/python/tmclient/api.py#L1839-L1870 | def rename_feature(self, mapobject_type_name, name, new_name):
'''Renames a feature.
Parameters
----------
mapobject_type_name: str
name of the segmented objects type
name: str
name of the feature that should be renamed
new_name: str
name that should be given to the feature
See also
--------
:func:`tmserver.api.feature.update_feature`
:class:`tmlib.models.feature.Feature`
'''
logger.info(
'rename feature "%s" of experiment "%s", mapobject type "%s"',
name, self.experiment_name, mapobject_type_name
)
content = {
'name': new_name,
}
feature_id = self._get_feature_id(mapobject_type_name, name)
url = self._build_api_url(
'/experiments/{experiment_id}/features/{feature_id}'.format(
experiment_id=self._experiment_id, feature_id=feature_id
)
)
res = self._session.put(url, json=content)
res.raise_for_status() | [
"def",
"rename_feature",
"(",
"self",
",",
"mapobject_type_name",
",",
"name",
",",
"new_name",
")",
":",
"logger",
".",
"info",
"(",
"'rename feature \"%s\" of experiment \"%s\", mapobject type \"%s\"'",
",",
"name",
",",
"self",
".",
"experiment_name",
",",
"mapobject_type_name",
")",
"content",
"=",
"{",
"'name'",
":",
"new_name",
",",
"}",
"feature_id",
"=",
"self",
".",
"_get_feature_id",
"(",
"mapobject_type_name",
",",
"name",
")",
"url",
"=",
"self",
".",
"_build_api_url",
"(",
"'/experiments/{experiment_id}/features/{feature_id}'",
".",
"format",
"(",
"experiment_id",
"=",
"self",
".",
"_experiment_id",
",",
"feature_id",
"=",
"feature_id",
")",
")",
"res",
"=",
"self",
".",
"_session",
".",
"put",
"(",
"url",
",",
"json",
"=",
"content",
")",
"res",
".",
"raise_for_status",
"(",
")"
]
| Renames a feature.
Parameters
----------
mapobject_type_name: str
name of the segmented objects type
name: str
name of the feature that should be renamed
new_name: str
name that should be given to the feature
See also
--------
:func:`tmserver.api.feature.update_feature`
:class:`tmlib.models.feature.Feature` | [
"Renames",
"a",
"feature",
"."
]
| python | train |
limodou/uliweb | uliweb/lib/werkzeug/templates.py | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/templates.py#L377-L395 | def render(self, *args, **kwargs):
"""This function accepts either a dict or some keyword arguments which
will then be the context the template is evaluated in. The return
value will be the rendered template.
:param context: the function accepts the same arguments as the
:class:`dict` constructor.
:return: the rendered template as string
"""
ns = self.default_context.copy()
if len(args) == 1 and isinstance(args[0], MultiDict):
ns.update(args[0].to_dict(flat=True))
else:
ns.update(dict(*args))
if kwargs:
ns.update(kwargs)
context = Context(ns, self.charset, self.errors)
exec self.code in context.runtime, context
return context.get_value(self.unicode_mode) | [
"def",
"render",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ns",
"=",
"self",
".",
"default_context",
".",
"copy",
"(",
")",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"MultiDict",
")",
":",
"ns",
".",
"update",
"(",
"args",
"[",
"0",
"]",
".",
"to_dict",
"(",
"flat",
"=",
"True",
")",
")",
"else",
":",
"ns",
".",
"update",
"(",
"dict",
"(",
"*",
"args",
")",
")",
"if",
"kwargs",
":",
"ns",
".",
"update",
"(",
"kwargs",
")",
"context",
"=",
"Context",
"(",
"ns",
",",
"self",
".",
"charset",
",",
"self",
".",
"errors",
")",
"exec",
"self",
".",
"code",
"in",
"context",
".",
"runtime",
",",
"context",
"return",
"context",
".",
"get_value",
"(",
"self",
".",
"unicode_mode",
")"
]
| This function accepts either a dict or some keyword arguments which
will then be the context the template is evaluated in. The return
value will be the rendered template.
:param context: the function accepts the same arguments as the
:class:`dict` constructor.
:return: the rendered template as string | [
"This",
"function",
"accepts",
"either",
"a",
"dict",
"or",
"some",
"keyword",
"arguments",
"which",
"will",
"then",
"be",
"the",
"context",
"the",
"template",
"is",
"evaluated",
"in",
".",
"The",
"return",
"value",
"will",
"be",
"the",
"rendered",
"template",
"."
]
| python | train |
svinota/mdns | mdns/zeroconf.py | https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1926-L1931 | def update_record(self, now, rec):
"""Used to notify listeners of new information that has updated
a record."""
for listener in self.listeners:
listener.update_record(self, now, rec)
self.notify_all() | [
"def",
"update_record",
"(",
"self",
",",
"now",
",",
"rec",
")",
":",
"for",
"listener",
"in",
"self",
".",
"listeners",
":",
"listener",
".",
"update_record",
"(",
"self",
",",
"now",
",",
"rec",
")",
"self",
".",
"notify_all",
"(",
")"
]
| Used to notify listeners of new information that has updated
a record. | [
"Used",
"to",
"notify",
"listeners",
"of",
"new",
"information",
"that",
"has",
"updated",
"a",
"record",
"."
]
| python | train |
ihmeuw/vivarium | src/vivarium/framework/randomness.py | https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/framework/randomness.py#L630-L657 | def get_randomness_stream(self, decision_point: str, for_initialization: bool=False) -> RandomnessStream:
"""Provides a new source of random numbers for the given decision point.
Parameters
----------
decision_point :
A unique identifier for a stream of random numbers. Typically represents
a decision that needs to be made each time step like 'moves_left' or
'gets_disease'.
for_initialization :
A flag indicating whether this stream is used to generate key initialization information
that will be used to identify simulants in the Common Random Number framework. These streams
cannot be copied and should only be used to generate the state table columns specified
in ``builder.configuration.randomness.key_columns``.
Raises
------
RandomnessError :
If another location in the simulation has already created a randomness stream
with the same identifier.
"""
if decision_point in self._decision_points:
raise RandomnessError(f"Two separate places are attempting to create "
f"the same randomness stream for {decision_point}")
stream = RandomnessStream(key=decision_point, clock=self._clock, seed=self._seed,
index_map=self._key_mapping, manager=self, for_initialization=for_initialization)
self._decision_points[decision_point] = stream
return stream | [
"def",
"get_randomness_stream",
"(",
"self",
",",
"decision_point",
":",
"str",
",",
"for_initialization",
":",
"bool",
"=",
"False",
")",
"->",
"RandomnessStream",
":",
"if",
"decision_point",
"in",
"self",
".",
"_decision_points",
":",
"raise",
"RandomnessError",
"(",
"f\"Two separate places are attempting to create \"",
"f\"the same randomness stream for {decision_point}\"",
")",
"stream",
"=",
"RandomnessStream",
"(",
"key",
"=",
"decision_point",
",",
"clock",
"=",
"self",
".",
"_clock",
",",
"seed",
"=",
"self",
".",
"_seed",
",",
"index_map",
"=",
"self",
".",
"_key_mapping",
",",
"manager",
"=",
"self",
",",
"for_initialization",
"=",
"for_initialization",
")",
"self",
".",
"_decision_points",
"[",
"decision_point",
"]",
"=",
"stream",
"return",
"stream"
]
| Provides a new source of random numbers for the given decision point.
Parameters
----------
decision_point :
A unique identifier for a stream of random numbers. Typically represents
a decision that needs to be made each time step like 'moves_left' or
'gets_disease'.
for_initialization :
A flag indicating whether this stream is used to generate key initialization information
that will be used to identify simulants in the Common Random Number framework. These streams
cannot be copied and should only be used to generate the state table columns specified
in ``builder.configuration.randomness.key_columns``.
Raises
------
RandomnessError :
If another location in the simulation has already created a randomness stream
with the same identifier. | [
"Provides",
"a",
"new",
"source",
"of",
"random",
"numbers",
"for",
"the",
"given",
"decision",
"point",
"."
]
| python | train |
saltstack/salt | salt/modules/yumpkg.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/yumpkg.py#L3289-L3320 | def _complete_transaction(cleanup_only, recursive, max_attempts, run_count, cmd_ret_list):
'''
.. versionadded:: Fluorine
Called from ``complete_transaction`` to protect the arguments
used for tail recursion, ``run_count`` and ``cmd_ret_list``.
'''
cmd = ['yum-complete-transaction']
if cleanup_only:
cmd.append('--cleanup-only')
cmd_ret_list.append(__salt__['cmd.run_all'](
cmd,
output_loglevel='trace',
python_shell=False
))
if (cmd_ret_list[-1]['retcode'] == salt.defaults.exitcodes.EX_OK and
recursive and
'No unfinished transactions left.' not in cmd_ret_list[-1]['stdout']):
if run_count >= max_attempts:
cmd_ret_list[-1]['retcode'] = salt.defaults.exitcodes.EX_GENERIC
log.error('Attempt %s/%s exceeded `max_attempts` for command: `%s`',
run_count, max_attempts, ' '.join(cmd))
raise CommandExecutionError('The `max_attempts` limit was reached and unfinished transactions remain.'
' You may wish to increase `max_attempts` or re-execute this module.',
info={'results': cmd_ret_list})
else:
return _complete_transaction(cleanup_only, recursive, max_attempts, run_count + 1, cmd_ret_list)
return cmd_ret_list | [
"def",
"_complete_transaction",
"(",
"cleanup_only",
",",
"recursive",
",",
"max_attempts",
",",
"run_count",
",",
"cmd_ret_list",
")",
":",
"cmd",
"=",
"[",
"'yum-complete-transaction'",
"]",
"if",
"cleanup_only",
":",
"cmd",
".",
"append",
"(",
"'--cleanup-only'",
")",
"cmd_ret_list",
".",
"append",
"(",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"output_loglevel",
"=",
"'trace'",
",",
"python_shell",
"=",
"False",
")",
")",
"if",
"(",
"cmd_ret_list",
"[",
"-",
"1",
"]",
"[",
"'retcode'",
"]",
"==",
"salt",
".",
"defaults",
".",
"exitcodes",
".",
"EX_OK",
"and",
"recursive",
"and",
"'No unfinished transactions left.'",
"not",
"in",
"cmd_ret_list",
"[",
"-",
"1",
"]",
"[",
"'stdout'",
"]",
")",
":",
"if",
"run_count",
">=",
"max_attempts",
":",
"cmd_ret_list",
"[",
"-",
"1",
"]",
"[",
"'retcode'",
"]",
"=",
"salt",
".",
"defaults",
".",
"exitcodes",
".",
"EX_GENERIC",
"log",
".",
"error",
"(",
"'Attempt %s/%s exceeded `max_attempts` for command: `%s`'",
",",
"run_count",
",",
"max_attempts",
",",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
"raise",
"CommandExecutionError",
"(",
"'The `max_attempts` limit was reached and unfinished transactions remain.'",
"' You may wish to increase `max_attempts` or re-execute this module.'",
",",
"info",
"=",
"{",
"'results'",
":",
"cmd_ret_list",
"}",
")",
"else",
":",
"return",
"_complete_transaction",
"(",
"cleanup_only",
",",
"recursive",
",",
"max_attempts",
",",
"run_count",
"+",
"1",
",",
"cmd_ret_list",
")",
"return",
"cmd_ret_list"
]
| .. versionadded:: Fluorine
Called from ``complete_transaction`` to protect the arguments
used for tail recursion, ``run_count`` and ``cmd_ret_list``. | [
"..",
"versionadded",
"::",
"Fluorine"
]
| python | train |
Jajcus/pyxmpp2 | pyxmpp2/ext/vcard.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/vcard.py#L1243-L1257 | def as_xml(self,parent):
"""Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`"""
n=parent.newChild(None,self.name.upper(),None)
if self.type:
n.newTextChild(None,"TYPE",self.type)
n.newTextChild(None,"CRED",binascii.b2a_base64(self.cred))
return n | [
"def",
"as_xml",
"(",
"self",
",",
"parent",
")",
":",
"n",
"=",
"parent",
".",
"newChild",
"(",
"None",
",",
"self",
".",
"name",
".",
"upper",
"(",
")",
",",
"None",
")",
"if",
"self",
".",
"type",
":",
"n",
".",
"newTextChild",
"(",
"None",
",",
"\"TYPE\"",
",",
"self",
".",
"type",
")",
"n",
".",
"newTextChild",
"(",
"None",
",",
"\"CRED\"",
",",
"binascii",
".",
"b2a_base64",
"(",
"self",
".",
"cred",
")",
")",
"return",
"n"
]
| Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode` | [
"Create",
"vcard",
"-",
"tmp",
"XML",
"representation",
"of",
"the",
"field",
"."
]
| python | valid |
kakwa/ldapcherry | ldapcherry/__init__.py | https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/__init__.py#L205-L229 | def _init_auth(self, config):
""" Init authentication
@dict: configuration of ldapcherry
"""
self.auth_mode = self._get_param('auth', 'auth.mode', config)
if self.auth_mode in ['and', 'or', 'none']:
pass
elif self.auth_mode == 'custom':
# load custom auth module
auth_module = self._get_param('auth', 'auth.module', config)
auth = __import__(auth_module, globals(), locals(), ['Auth'], 0)
self.auth = auth.Auth(config['auth'], cherrypy.log)
else:
raise WrongParamValue(
'auth.mode',
'auth',
['and', 'or', 'none', 'custom'],
)
self.roles_file = self._get_param('roles', 'roles.file', config)
cherrypy.log.error(
msg="loading roles file '%(file)s'" % {'file': self.roles_file},
severity=logging.DEBUG
)
self.roles = Roles(self.roles_file) | [
"def",
"_init_auth",
"(",
"self",
",",
"config",
")",
":",
"self",
".",
"auth_mode",
"=",
"self",
".",
"_get_param",
"(",
"'auth'",
",",
"'auth.mode'",
",",
"config",
")",
"if",
"self",
".",
"auth_mode",
"in",
"[",
"'and'",
",",
"'or'",
",",
"'none'",
"]",
":",
"pass",
"elif",
"self",
".",
"auth_mode",
"==",
"'custom'",
":",
"# load custom auth module",
"auth_module",
"=",
"self",
".",
"_get_param",
"(",
"'auth'",
",",
"'auth.module'",
",",
"config",
")",
"auth",
"=",
"__import__",
"(",
"auth_module",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"[",
"'Auth'",
"]",
",",
"0",
")",
"self",
".",
"auth",
"=",
"auth",
".",
"Auth",
"(",
"config",
"[",
"'auth'",
"]",
",",
"cherrypy",
".",
"log",
")",
"else",
":",
"raise",
"WrongParamValue",
"(",
"'auth.mode'",
",",
"'auth'",
",",
"[",
"'and'",
",",
"'or'",
",",
"'none'",
",",
"'custom'",
"]",
",",
")",
"self",
".",
"roles_file",
"=",
"self",
".",
"_get_param",
"(",
"'roles'",
",",
"'roles.file'",
",",
"config",
")",
"cherrypy",
".",
"log",
".",
"error",
"(",
"msg",
"=",
"\"loading roles file '%(file)s'\"",
"%",
"{",
"'file'",
":",
"self",
".",
"roles_file",
"}",
",",
"severity",
"=",
"logging",
".",
"DEBUG",
")",
"self",
".",
"roles",
"=",
"Roles",
"(",
"self",
".",
"roles_file",
")"
]
| Init authentication
@dict: configuration of ldapcherry | [
"Init",
"authentication"
]
| python | train |
marcomusy/vtkplotter | vtkplotter/actors.py | https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L1773-L1806 | def addCurvatureScalars(self, method=0, lut=None):
"""
Build an ``Actor`` that contains the color coded surface
curvature calculated in three different ways.
:param int method: 0-gaussian, 1-mean, 2-max, 3-min curvature.
:param float lut: optional look up table.
:Example:
.. code-block:: python
from vtkplotter import *
t = Torus().addCurvatureScalars()
show(t)
|curvature|
"""
curve = vtk.vtkCurvatures()
curve.SetInputData(self.poly)
curve.SetCurvatureType(method)
curve.Update()
self.poly = curve.GetOutput()
scls = self.poly.GetPointData().GetScalars().GetRange()
print("curvature(): scalar range is", scls)
self.mapper.SetInputData(self.poly)
if lut:
self.mapper.SetLookupTable(lut)
self.mapper.SetUseLookupTableScalarRange(1)
self.mapper.Update()
self.Modified()
self.mapper.ScalarVisibilityOn()
return self | [
"def",
"addCurvatureScalars",
"(",
"self",
",",
"method",
"=",
"0",
",",
"lut",
"=",
"None",
")",
":",
"curve",
"=",
"vtk",
".",
"vtkCurvatures",
"(",
")",
"curve",
".",
"SetInputData",
"(",
"self",
".",
"poly",
")",
"curve",
".",
"SetCurvatureType",
"(",
"method",
")",
"curve",
".",
"Update",
"(",
")",
"self",
".",
"poly",
"=",
"curve",
".",
"GetOutput",
"(",
")",
"scls",
"=",
"self",
".",
"poly",
".",
"GetPointData",
"(",
")",
".",
"GetScalars",
"(",
")",
".",
"GetRange",
"(",
")",
"print",
"(",
"\"curvature(): scalar range is\"",
",",
"scls",
")",
"self",
".",
"mapper",
".",
"SetInputData",
"(",
"self",
".",
"poly",
")",
"if",
"lut",
":",
"self",
".",
"mapper",
".",
"SetLookupTable",
"(",
"lut",
")",
"self",
".",
"mapper",
".",
"SetUseLookupTableScalarRange",
"(",
"1",
")",
"self",
".",
"mapper",
".",
"Update",
"(",
")",
"self",
".",
"Modified",
"(",
")",
"self",
".",
"mapper",
".",
"ScalarVisibilityOn",
"(",
")",
"return",
"self"
]
| Build an ``Actor`` that contains the color coded surface
curvature calculated in three different ways.
:param int method: 0-gaussian, 1-mean, 2-max, 3-min curvature.
:param float lut: optional look up table.
:Example:
.. code-block:: python
from vtkplotter import *
t = Torus().addCurvatureScalars()
show(t)
|curvature| | [
"Build",
"an",
"Actor",
"that",
"contains",
"the",
"color",
"coded",
"surface",
"curvature",
"calculated",
"in",
"three",
"different",
"ways",
"."
]
| python | train |
fumitoh/modelx | modelx/core/system.py | https://github.com/fumitoh/modelx/blob/0180da34d052c44fb94dab9e115e218bbebfc9c3/modelx/core/system.py#L355-L373 | def custom_showtraceback(
self,
exc_tuple=None,
filename=None,
tb_offset=None,
exception_only=False,
running_compiled_code=False,
):
"""Custom showtraceback for monkey-patching IPython's InteractiveShell
https://stackoverflow.com/questions/1261668/cannot-override-sys-excepthook
"""
self.default_showtraceback(
exc_tuple,
filename,
tb_offset,
exception_only=True,
running_compiled_code=running_compiled_code,
) | [
"def",
"custom_showtraceback",
"(",
"self",
",",
"exc_tuple",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"tb_offset",
"=",
"None",
",",
"exception_only",
"=",
"False",
",",
"running_compiled_code",
"=",
"False",
",",
")",
":",
"self",
".",
"default_showtraceback",
"(",
"exc_tuple",
",",
"filename",
",",
"tb_offset",
",",
"exception_only",
"=",
"True",
",",
"running_compiled_code",
"=",
"running_compiled_code",
",",
")"
]
| Custom showtraceback for monkey-patching IPython's InteractiveShell
https://stackoverflow.com/questions/1261668/cannot-override-sys-excepthook | [
"Custom",
"showtraceback",
"for",
"monkey",
"-",
"patching",
"IPython",
"s",
"InteractiveShell"
]
| python | valid |
RiotGames/cloud-inquisitor | backend/cloud_inquisitor/utils.py | https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/utils.py#L549-L585 | def send_notification(*, subsystem, recipients, subject, body_html, body_text):
"""Method to send a notification. A plugin may use only part of the information, but all fields are required.
Args:
subsystem (`str`): Name of the subsystem originating the notification
recipients (`list` of :obj:`NotificationContact`): List of recipients
subject (`str`): Subject / title of the notification
body_html (`str)`: HTML formatted version of the message
body_text (`str`): Text formatted version of the message
Returns:
`None`
"""
from cloud_inquisitor import CINQ_PLUGINS
if not body_html and not body_text:
raise ValueError('body_html or body_text must be provided')
# Make sure that we don't have any duplicate recipients
recipients = list(set(recipients))
notifiers = map(lambda plugin: plugin.load(), CINQ_PLUGINS['cloud_inquisitor.plugins.notifiers']['plugins'])
for cls in filter(lambda x: x.enabled(), notifiers):
for recipient in recipients:
if isinstance(recipient, NotificationContact):
if recipient.type == cls.notifier_type:
try:
notifier = cls()
notifier.notify(subsystem, recipient.value, subject, body_html, body_text)
except Exception:
log.exception('Failed sending notification for {}/{}'.format(
recipient.type,
recipient.value
))
else:
log.warning('Unexpected recipient {}'.format(recipient)) | [
"def",
"send_notification",
"(",
"*",
",",
"subsystem",
",",
"recipients",
",",
"subject",
",",
"body_html",
",",
"body_text",
")",
":",
"from",
"cloud_inquisitor",
"import",
"CINQ_PLUGINS",
"if",
"not",
"body_html",
"and",
"not",
"body_text",
":",
"raise",
"ValueError",
"(",
"'body_html or body_text must be provided'",
")",
"# Make sure that we don't have any duplicate recipients",
"recipients",
"=",
"list",
"(",
"set",
"(",
"recipients",
")",
")",
"notifiers",
"=",
"map",
"(",
"lambda",
"plugin",
":",
"plugin",
".",
"load",
"(",
")",
",",
"CINQ_PLUGINS",
"[",
"'cloud_inquisitor.plugins.notifiers'",
"]",
"[",
"'plugins'",
"]",
")",
"for",
"cls",
"in",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"enabled",
"(",
")",
",",
"notifiers",
")",
":",
"for",
"recipient",
"in",
"recipients",
":",
"if",
"isinstance",
"(",
"recipient",
",",
"NotificationContact",
")",
":",
"if",
"recipient",
".",
"type",
"==",
"cls",
".",
"notifier_type",
":",
"try",
":",
"notifier",
"=",
"cls",
"(",
")",
"notifier",
".",
"notify",
"(",
"subsystem",
",",
"recipient",
".",
"value",
",",
"subject",
",",
"body_html",
",",
"body_text",
")",
"except",
"Exception",
":",
"log",
".",
"exception",
"(",
"'Failed sending notification for {}/{}'",
".",
"format",
"(",
"recipient",
".",
"type",
",",
"recipient",
".",
"value",
")",
")",
"else",
":",
"log",
".",
"warning",
"(",
"'Unexpected recipient {}'",
".",
"format",
"(",
"recipient",
")",
")"
]
| Method to send a notification. A plugin may use only part of the information, but all fields are required.
Args:
subsystem (`str`): Name of the subsystem originating the notification
recipients (`list` of :obj:`NotificationContact`): List of recipients
subject (`str`): Subject / title of the notification
body_html (`str)`: HTML formatted version of the message
body_text (`str`): Text formatted version of the message
Returns:
`None` | [
"Method",
"to",
"send",
"a",
"notification",
".",
"A",
"plugin",
"may",
"use",
"only",
"part",
"of",
"the",
"information",
"but",
"all",
"fields",
"are",
"required",
"."
]
| python | train |
AnalogJ/lexicon | lexicon/providers/auto.py | https://github.com/AnalogJ/lexicon/blob/9330b871988753cad44fe2876a217b4c67b1fa0e/lexicon/providers/auto.py#L157-L206 | def authenticate(self): # pylint: disable=too-many-locals
"""
Launch the authentication process: for 'auto' provider, it means first to find the relevant
provider, then call its authenticate() method. Almost every subsequent operation will then
be delegated to that provider.
"""
mapping_override = self.config.resolve('lexicon:auto:mapping_override')
mapping_override_processed = {}
if mapping_override:
for one_mapping in mapping_override.split(','):
one_mapping_processed = one_mapping.split(':')
mapping_override_processed[one_mapping_processed[0]
] = one_mapping_processed[1]
override_provider = mapping_override_processed.get(self.domain)
if override_provider:
provider = [
element for element in AVAILABLE_PROVIDERS.items()
if element[0] == override_provider][0]
LOGGER.info('Provider authoritatively mapped for domain %s: %s.',
self.domain, provider.__name__)
(provider_name, provider_module) = provider
else:
(provider_name, provider_module) = _relevant_provider_for_domain(self.domain)
LOGGER.info('Provider discovered for domain %s: %s.',
self.domain, provider_name)
new_config = ConfigResolver()
new_config.with_dict({'lexicon:provider_name': provider_name})
target_prefix = 'auto_{0}_'.format(provider_name)
for config_source in self.config._config_sources: # pylint: disable=protected-access
if not isinstance(config_source, ArgsConfigSource):
new_config.with_config_source(config_source)
else:
# ArgsConfigSource needs to be reprocessed to rescope the provided
# args to the delegate provider
new_dict = {}
for key, value in config_source._parameters.items(): # pylint: disable=protected-access
if key.startswith(target_prefix):
new_param_name = re.sub(
'^{0}'.format(target_prefix), '', key)
new_dict['lexicon:{0}:{1}'.format(
provider_name, new_param_name)] = value
elif not key.startswith('auto_'):
new_dict['lexicon:{0}'.format(key)] = value
new_config.with_dict(new_dict)
self.proxy_provider = provider_module.Provider(new_config)
self.proxy_provider.authenticate() | [
"def",
"authenticate",
"(",
"self",
")",
":",
"# pylint: disable=too-many-locals",
"mapping_override",
"=",
"self",
".",
"config",
".",
"resolve",
"(",
"'lexicon:auto:mapping_override'",
")",
"mapping_override_processed",
"=",
"{",
"}",
"if",
"mapping_override",
":",
"for",
"one_mapping",
"in",
"mapping_override",
".",
"split",
"(",
"','",
")",
":",
"one_mapping_processed",
"=",
"one_mapping",
".",
"split",
"(",
"':'",
")",
"mapping_override_processed",
"[",
"one_mapping_processed",
"[",
"0",
"]",
"]",
"=",
"one_mapping_processed",
"[",
"1",
"]",
"override_provider",
"=",
"mapping_override_processed",
".",
"get",
"(",
"self",
".",
"domain",
")",
"if",
"override_provider",
":",
"provider",
"=",
"[",
"element",
"for",
"element",
"in",
"AVAILABLE_PROVIDERS",
".",
"items",
"(",
")",
"if",
"element",
"[",
"0",
"]",
"==",
"override_provider",
"]",
"[",
"0",
"]",
"LOGGER",
".",
"info",
"(",
"'Provider authoritatively mapped for domain %s: %s.'",
",",
"self",
".",
"domain",
",",
"provider",
".",
"__name__",
")",
"(",
"provider_name",
",",
"provider_module",
")",
"=",
"provider",
"else",
":",
"(",
"provider_name",
",",
"provider_module",
")",
"=",
"_relevant_provider_for_domain",
"(",
"self",
".",
"domain",
")",
"LOGGER",
".",
"info",
"(",
"'Provider discovered for domain %s: %s.'",
",",
"self",
".",
"domain",
",",
"provider_name",
")",
"new_config",
"=",
"ConfigResolver",
"(",
")",
"new_config",
".",
"with_dict",
"(",
"{",
"'lexicon:provider_name'",
":",
"provider_name",
"}",
")",
"target_prefix",
"=",
"'auto_{0}_'",
".",
"format",
"(",
"provider_name",
")",
"for",
"config_source",
"in",
"self",
".",
"config",
".",
"_config_sources",
":",
"# pylint: disable=protected-access",
"if",
"not",
"isinstance",
"(",
"config_source",
",",
"ArgsConfigSource",
")",
":",
"new_config",
".",
"with_config_source",
"(",
"config_source",
")",
"else",
":",
"# ArgsConfigSource needs to be reprocessed to rescope the provided",
"# args to the delegate provider",
"new_dict",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"config_source",
".",
"_parameters",
".",
"items",
"(",
")",
":",
"# pylint: disable=protected-access",
"if",
"key",
".",
"startswith",
"(",
"target_prefix",
")",
":",
"new_param_name",
"=",
"re",
".",
"sub",
"(",
"'^{0}'",
".",
"format",
"(",
"target_prefix",
")",
",",
"''",
",",
"key",
")",
"new_dict",
"[",
"'lexicon:{0}:{1}'",
".",
"format",
"(",
"provider_name",
",",
"new_param_name",
")",
"]",
"=",
"value",
"elif",
"not",
"key",
".",
"startswith",
"(",
"'auto_'",
")",
":",
"new_dict",
"[",
"'lexicon:{0}'",
".",
"format",
"(",
"key",
")",
"]",
"=",
"value",
"new_config",
".",
"with_dict",
"(",
"new_dict",
")",
"self",
".",
"proxy_provider",
"=",
"provider_module",
".",
"Provider",
"(",
"new_config",
")",
"self",
".",
"proxy_provider",
".",
"authenticate",
"(",
")"
]
| Launch the authentication process: for 'auto' provider, it means first to find the relevant
provider, then call its authenticate() method. Almost every subsequent operation will then
be delegated to that provider. | [
"Launch",
"the",
"authentication",
"process",
":",
"for",
"auto",
"provider",
"it",
"means",
"first",
"to",
"find",
"the",
"relevant",
"provider",
"then",
"call",
"its",
"authenticate",
"()",
"method",
".",
"Almost",
"every",
"subsequent",
"operation",
"will",
"then",
"be",
"delegated",
"to",
"that",
"provider",
"."
]
| python | train |
vatlab/SoS | src/sos/actions_r.py | https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/actions_r.py#L41-L124 | def Rmarkdown(script=None,
input=None,
output=None,
args='{input:r}, output_file={output:ar}',
**kwargs):
'''Convert input file to output using Rmarkdown
The input can be specified in three ways:
1. instant script, which is assumed to be in md format
Rmarkdown: output='report.html'
script
2. one or more input files. The format is determined by extension of input file
Rmarkdown(input, output='report.html')
3. input file specified by command line option `-r` .
Rmarkdown(output='report.html')
If no output is specified, it is assumed to be in html format
and is written to standard output.
You can specify more options using the args parameter of the action. The default value
of args is `${input!r} --output ${output!ar}'
'''
if not R_library('rmarkdown').target_exists():
raise RuntimeError('Library rmarkdown does not exist')
input = sos_targets(collect_input(script, input))
output = sos_targets(output)
if len(output) == 0:
write_to_stdout = True
output = sos_targets(
tempfile.NamedTemporaryFile(
mode='w+t', suffix='.html', delete=False).name)
else:
write_to_stdout = False
#
ret = 1
try:
# render(input, output_format = NULL, output_file = NULL, output_dir = NULL,
# output_options = NULL, intermediates_dir = NULL,
# runtime = c("auto", "static", "shiny"),
# clean = TRUE, params = NULL, knit_meta = NULL, envir = parent.frame(),
# run_Rmarkdown = TRUE, quiet = FALSE, encoding = getOption("encoding"))
cmd = interpolate(f'Rscript -e "rmarkdown::render({args})"', {
'input': input,
'output': output
})
if 'ACTION' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('ACTION', f'Running command "{cmd}"')
if env.config['run_mode'] == 'interactive':
# need to catch output and send to python output, which will in trun be hijacked by SoS notebook
p = subprocess.Popen(
cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
#pid = p.pid
out, err = p.communicate()
sys.stdout.write(out.decode())
sys.stderr.write(err.decode())
ret = p.returncode
else:
p = subprocess.Popen(cmd, shell=True)
#pid = p.pid
ret = p.wait()
except Exception as e:
env.logger.error(e)
if ret != 0:
temp_file = os.path.join('.sos', f'{"Rmarkdown"}_{os.getpid()}.md')
shutil.copyfile(str(input), temp_file)
cmd = interpolate(f'Rscript -e "rmarkdown::render({args})"', {
'input': input,
'output': sos_targets(temp_file)
})
raise RuntimeError(
f'Failed to execute script. Please use command \n"{cmd}"\nunder {os.getcwd()} to test it.'
)
if write_to_stdout:
with open(str(output[0])) as out:
sys.stdout.write(out.read())
else:
env.logger.info(f'Report saved to {output}') | [
"def",
"Rmarkdown",
"(",
"script",
"=",
"None",
",",
"input",
"=",
"None",
",",
"output",
"=",
"None",
",",
"args",
"=",
"'{input:r}, output_file={output:ar}'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"R_library",
"(",
"'rmarkdown'",
")",
".",
"target_exists",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"'Library rmarkdown does not exist'",
")",
"input",
"=",
"sos_targets",
"(",
"collect_input",
"(",
"script",
",",
"input",
")",
")",
"output",
"=",
"sos_targets",
"(",
"output",
")",
"if",
"len",
"(",
"output",
")",
"==",
"0",
":",
"write_to_stdout",
"=",
"True",
"output",
"=",
"sos_targets",
"(",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w+t'",
",",
"suffix",
"=",
"'.html'",
",",
"delete",
"=",
"False",
")",
".",
"name",
")",
"else",
":",
"write_to_stdout",
"=",
"False",
"#",
"ret",
"=",
"1",
"try",
":",
"# render(input, output_format = NULL, output_file = NULL, output_dir = NULL,",
"# output_options = NULL, intermediates_dir = NULL,",
"# runtime = c(\"auto\", \"static\", \"shiny\"),",
"# clean = TRUE, params = NULL, knit_meta = NULL, envir = parent.frame(),",
"# run_Rmarkdown = TRUE, quiet = FALSE, encoding = getOption(\"encoding\"))",
"cmd",
"=",
"interpolate",
"(",
"f'Rscript -e \"rmarkdown::render({args})\"'",
",",
"{",
"'input'",
":",
"input",
",",
"'output'",
":",
"output",
"}",
")",
"if",
"'ACTION'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'ACTION'",
",",
"f'Running command \"{cmd}\"'",
")",
"if",
"env",
".",
"config",
"[",
"'run_mode'",
"]",
"==",
"'interactive'",
":",
"# need to catch output and send to python output, which will in trun be hijacked by SoS notebook",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"#pid = p.pid",
"out",
",",
"err",
"=",
"p",
".",
"communicate",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"out",
".",
"decode",
"(",
")",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"err",
".",
"decode",
"(",
")",
")",
"ret",
"=",
"p",
".",
"returncode",
"else",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
"#pid = p.pid",
"ret",
"=",
"p",
".",
"wait",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"env",
".",
"logger",
".",
"error",
"(",
"e",
")",
"if",
"ret",
"!=",
"0",
":",
"temp_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'.sos'",
",",
"f'{\"Rmarkdown\"}_{os.getpid()}.md'",
")",
"shutil",
".",
"copyfile",
"(",
"str",
"(",
"input",
")",
",",
"temp_file",
")",
"cmd",
"=",
"interpolate",
"(",
"f'Rscript -e \"rmarkdown::render({args})\"'",
",",
"{",
"'input'",
":",
"input",
",",
"'output'",
":",
"sos_targets",
"(",
"temp_file",
")",
"}",
")",
"raise",
"RuntimeError",
"(",
"f'Failed to execute script. Please use command \\n\"{cmd}\"\\nunder {os.getcwd()} to test it.'",
")",
"if",
"write_to_stdout",
":",
"with",
"open",
"(",
"str",
"(",
"output",
"[",
"0",
"]",
")",
")",
"as",
"out",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"out",
".",
"read",
"(",
")",
")",
"else",
":",
"env",
".",
"logger",
".",
"info",
"(",
"f'Report saved to {output}'",
")"
]
| Convert input file to output using Rmarkdown
The input can be specified in three ways:
1. instant script, which is assumed to be in md format
Rmarkdown: output='report.html'
script
2. one or more input files. The format is determined by extension of input file
Rmarkdown(input, output='report.html')
3. input file specified by command line option `-r` .
Rmarkdown(output='report.html')
If no output is specified, it is assumed to be in html format
and is written to standard output.
You can specify more options using the args parameter of the action. The default value
of args is `${input!r} --output ${output!ar}' | [
"Convert",
"input",
"file",
"to",
"output",
"using",
"Rmarkdown"
]
| python | train |
ARMmbed/yotta | yotta/lib/utils.py | https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/utils.py#L9-L20 | def islast(generator):
''' indicate whether the current item is the last one in a generator
'''
next_x = None
first = True
for x in generator:
if not first:
yield (next_x, False)
next_x = x
first = False
if not first:
yield (next_x, True) | [
"def",
"islast",
"(",
"generator",
")",
":",
"next_x",
"=",
"None",
"first",
"=",
"True",
"for",
"x",
"in",
"generator",
":",
"if",
"not",
"first",
":",
"yield",
"(",
"next_x",
",",
"False",
")",
"next_x",
"=",
"x",
"first",
"=",
"False",
"if",
"not",
"first",
":",
"yield",
"(",
"next_x",
",",
"True",
")"
]
| indicate whether the current item is the last one in a generator | [
"indicate",
"whether",
"the",
"current",
"item",
"is",
"the",
"last",
"one",
"in",
"a",
"generator"
]
| python | valid |
tanghaibao/jcvi | jcvi/assembly/hic.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L1375-L1418 | def movieframe(args):
"""
%prog movieframe tour test.clm contigs.ref.anchors
Draw heatmap and synteny in the same plot.
"""
p = OptionParser(movieframe.__doc__)
p.add_option("--label", help="Figure title")
p.set_beds()
p.set_outfile(outfile=None)
opts, args, iopts = p.set_image_options(args, figsize="16x8",
style="white", cmap="coolwarm",
format="png", dpi=120)
if len(args) != 3:
sys.exit(not p.print_help())
tour, clmfile, anchorsfile = args
tour = tour.split(",")
image_name = opts.outfile or ("movieframe." + iopts.format)
label = opts.label or op.basename(image_name).rsplit(".", 1)[0]
clm = CLMFile(clmfile)
totalbins, bins, breaks = make_bins(tour, clm.tig_to_size)
M = read_clm(clm, totalbins, bins)
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1]) # whole canvas
ax1 = fig.add_axes([.05, .1, .4, .8]) # heatmap
ax2 = fig.add_axes([.55, .1, .4, .8]) # dot plot
ax2_root = fig.add_axes([.5, 0, .5, 1]) # dot plot canvas
# Left axis: heatmap
plot_heatmap(ax1, M, breaks, iopts)
# Right axis: synteny
qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts,
sorted=False)
dotplot(anchorsfile, qbed, sbed, fig, ax2_root, ax2, sep=False, title="")
root.text(.5, .98, clm.name, color="g", ha="center", va="center")
root.text(.5, .95, label, color="darkslategray", ha="center", va="center")
normalize_axes(root)
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | [
"def",
"movieframe",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"movieframe",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--label\"",
",",
"help",
"=",
"\"Figure title\"",
")",
"p",
".",
"set_beds",
"(",
")",
"p",
".",
"set_outfile",
"(",
"outfile",
"=",
"None",
")",
"opts",
",",
"args",
",",
"iopts",
"=",
"p",
".",
"set_image_options",
"(",
"args",
",",
"figsize",
"=",
"\"16x8\"",
",",
"style",
"=",
"\"white\"",
",",
"cmap",
"=",
"\"coolwarm\"",
",",
"format",
"=",
"\"png\"",
",",
"dpi",
"=",
"120",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"3",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"tour",
",",
"clmfile",
",",
"anchorsfile",
"=",
"args",
"tour",
"=",
"tour",
".",
"split",
"(",
"\",\"",
")",
"image_name",
"=",
"opts",
".",
"outfile",
"or",
"(",
"\"movieframe.\"",
"+",
"iopts",
".",
"format",
")",
"label",
"=",
"opts",
".",
"label",
"or",
"op",
".",
"basename",
"(",
"image_name",
")",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"clm",
"=",
"CLMFile",
"(",
"clmfile",
")",
"totalbins",
",",
"bins",
",",
"breaks",
"=",
"make_bins",
"(",
"tour",
",",
"clm",
".",
"tig_to_size",
")",
"M",
"=",
"read_clm",
"(",
"clm",
",",
"totalbins",
",",
"bins",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"1",
",",
"(",
"iopts",
".",
"w",
",",
"iopts",
".",
"h",
")",
")",
"root",
"=",
"fig",
".",
"add_axes",
"(",
"[",
"0",
",",
"0",
",",
"1",
",",
"1",
"]",
")",
"# whole canvas",
"ax1",
"=",
"fig",
".",
"add_axes",
"(",
"[",
".05",
",",
".1",
",",
".4",
",",
".8",
"]",
")",
"# heatmap",
"ax2",
"=",
"fig",
".",
"add_axes",
"(",
"[",
".55",
",",
".1",
",",
".4",
",",
".8",
"]",
")",
"# dot plot",
"ax2_root",
"=",
"fig",
".",
"add_axes",
"(",
"[",
".5",
",",
"0",
",",
".5",
",",
"1",
"]",
")",
"# dot plot canvas",
"# Left axis: heatmap",
"plot_heatmap",
"(",
"ax1",
",",
"M",
",",
"breaks",
",",
"iopts",
")",
"# Right axis: synteny",
"qbed",
",",
"sbed",
",",
"qorder",
",",
"sorder",
",",
"is_self",
"=",
"check_beds",
"(",
"anchorsfile",
",",
"p",
",",
"opts",
",",
"sorted",
"=",
"False",
")",
"dotplot",
"(",
"anchorsfile",
",",
"qbed",
",",
"sbed",
",",
"fig",
",",
"ax2_root",
",",
"ax2",
",",
"sep",
"=",
"False",
",",
"title",
"=",
"\"\"",
")",
"root",
".",
"text",
"(",
".5",
",",
".98",
",",
"clm",
".",
"name",
",",
"color",
"=",
"\"g\"",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"center\"",
")",
"root",
".",
"text",
"(",
".5",
",",
".95",
",",
"label",
",",
"color",
"=",
"\"darkslategray\"",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"center\"",
")",
"normalize_axes",
"(",
"root",
")",
"savefig",
"(",
"image_name",
",",
"dpi",
"=",
"iopts",
".",
"dpi",
",",
"iopts",
"=",
"iopts",
")"
]
| %prog movieframe tour test.clm contigs.ref.anchors
Draw heatmap and synteny in the same plot. | [
"%prog",
"movieframe",
"tour",
"test",
".",
"clm",
"contigs",
".",
"ref",
".",
"anchors"
]
| python | train |
Alignak-monitoring/alignak | alignak/objects/host.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L1499-L1524 | def is_correct(self):
"""Check if the hosts list configuration is correct ::
* check if any loop exists in each host dependencies
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# Internal checks before executing inherited function...
loop = self.no_loop_in_parents("self", "parents")
if loop:
self.add_error("Loop detected while checking hosts")
state = False
for uuid, item in list(self.items.items()):
for elem in loop:
if elem == uuid:
self.add_error("Host %s is parent in dependency defined in %s"
% (item.get_name(), item.imported_from))
elif elem in item.parents:
self.add_error("Host %s is child in dependency defined in %s"
% (self[elem].get_name(), self[elem].imported_from))
return super(Hosts, self).is_correct() and state | [
"def",
"is_correct",
"(",
"self",
")",
":",
"state",
"=",
"True",
"# Internal checks before executing inherited function...",
"loop",
"=",
"self",
".",
"no_loop_in_parents",
"(",
"\"self\"",
",",
"\"parents\"",
")",
"if",
"loop",
":",
"self",
".",
"add_error",
"(",
"\"Loop detected while checking hosts\"",
")",
"state",
"=",
"False",
"for",
"uuid",
",",
"item",
"in",
"list",
"(",
"self",
".",
"items",
".",
"items",
"(",
")",
")",
":",
"for",
"elem",
"in",
"loop",
":",
"if",
"elem",
"==",
"uuid",
":",
"self",
".",
"add_error",
"(",
"\"Host %s is parent in dependency defined in %s\"",
"%",
"(",
"item",
".",
"get_name",
"(",
")",
",",
"item",
".",
"imported_from",
")",
")",
"elif",
"elem",
"in",
"item",
".",
"parents",
":",
"self",
".",
"add_error",
"(",
"\"Host %s is child in dependency defined in %s\"",
"%",
"(",
"self",
"[",
"elem",
"]",
".",
"get_name",
"(",
")",
",",
"self",
"[",
"elem",
"]",
".",
"imported_from",
")",
")",
"return",
"super",
"(",
"Hosts",
",",
"self",
")",
".",
"is_correct",
"(",
")",
"and",
"state"
]
| Check if the hosts list configuration is correct ::
* check if any loop exists in each host dependencies
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool | [
"Check",
"if",
"the",
"hosts",
"list",
"configuration",
"is",
"correct",
"::"
]
| python | train |
TeamHG-Memex/MaybeDont | maybedont/predict.py | https://github.com/TeamHG-Memex/MaybeDont/blob/34721f67b69d426adda324a0ed905d3860828af9/maybedont/predict.py#L199-L214 | def _nodup_filter(self, min_hash, all_urls, max_sample=200):
""" This filters results that are considered not duplicates.
But we really need to check that, because lsh.query does not always
return ALL duplicates, esp. when there are a lot of them, so
here we double-check and return only urls that are NOT duplicates.
Return estimated number of not duplicates.
"""
if not all_urls:
return 0
urls = random.sample(all_urls, max_sample) \
if len(all_urls) > max_sample else all_urls
filtered = [
url for url in urls
if min_hash.jaccard(self.seen_urls[url].min_hash) <
self.jaccard_threshold]
return int(len(filtered) / len(urls) * len(all_urls)) | [
"def",
"_nodup_filter",
"(",
"self",
",",
"min_hash",
",",
"all_urls",
",",
"max_sample",
"=",
"200",
")",
":",
"if",
"not",
"all_urls",
":",
"return",
"0",
"urls",
"=",
"random",
".",
"sample",
"(",
"all_urls",
",",
"max_sample",
")",
"if",
"len",
"(",
"all_urls",
")",
">",
"max_sample",
"else",
"all_urls",
"filtered",
"=",
"[",
"url",
"for",
"url",
"in",
"urls",
"if",
"min_hash",
".",
"jaccard",
"(",
"self",
".",
"seen_urls",
"[",
"url",
"]",
".",
"min_hash",
")",
"<",
"self",
".",
"jaccard_threshold",
"]",
"return",
"int",
"(",
"len",
"(",
"filtered",
")",
"/",
"len",
"(",
"urls",
")",
"*",
"len",
"(",
"all_urls",
")",
")"
]
| This filters results that are considered not duplicates.
But we really need to check that, because lsh.query does not always
return ALL duplicates, esp. when there are a lot of them, so
here we double-check and return only urls that are NOT duplicates.
Return estimated number of not duplicates. | [
"This",
"filters",
"results",
"that",
"are",
"considered",
"not",
"duplicates",
".",
"But",
"we",
"really",
"need",
"to",
"check",
"that",
"because",
"lsh",
".",
"query",
"does",
"not",
"always",
"return",
"ALL",
"duplicates",
"esp",
".",
"when",
"there",
"are",
"a",
"lot",
"of",
"them",
"so",
"here",
"we",
"double",
"-",
"check",
"and",
"return",
"only",
"urls",
"that",
"are",
"NOT",
"duplicates",
".",
"Return",
"estimated",
"number",
"of",
"not",
"duplicates",
"."
]
| python | train |
ardexa/ardexaplugin | ardexaplugin.py | https://github.com/ardexa/ardexaplugin/blob/5068532f601ae3042bd87af1063057e8f274f670/ardexaplugin.py#L280-L300 | def parse_address_list(addrs):
"""Yield each integer from a complex range string like "1-9,12,15-20,23"
>>> list(parse_address_list('1-9,12,15-20,23'))
[1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 16, 17, 18, 19, 20, 23]
>>> list(parse_address_list('1-9,12,15-20,2-3-4'))
Traceback (most recent call last):
...
ValueError: format error in 2-3-4
"""
for addr in addrs.split(','):
elem = addr.split('-')
if len(elem) == 1: # a number
yield int(elem[0])
elif len(elem) == 2: # a range inclusive
start, end = list(map(int, elem))
for i in range(start, end+1):
yield i
else: # more than one hyphen
raise ValueError('format error in %s' % addr) | [
"def",
"parse_address_list",
"(",
"addrs",
")",
":",
"for",
"addr",
"in",
"addrs",
".",
"split",
"(",
"','",
")",
":",
"elem",
"=",
"addr",
".",
"split",
"(",
"'-'",
")",
"if",
"len",
"(",
"elem",
")",
"==",
"1",
":",
"# a number",
"yield",
"int",
"(",
"elem",
"[",
"0",
"]",
")",
"elif",
"len",
"(",
"elem",
")",
"==",
"2",
":",
"# a range inclusive",
"start",
",",
"end",
"=",
"list",
"(",
"map",
"(",
"int",
",",
"elem",
")",
")",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"end",
"+",
"1",
")",
":",
"yield",
"i",
"else",
":",
"# more than one hyphen",
"raise",
"ValueError",
"(",
"'format error in %s'",
"%",
"addr",
")"
]
| Yield each integer from a complex range string like "1-9,12,15-20,23"
>>> list(parse_address_list('1-9,12,15-20,23'))
[1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 16, 17, 18, 19, 20, 23]
>>> list(parse_address_list('1-9,12,15-20,2-3-4'))
Traceback (most recent call last):
...
ValueError: format error in 2-3-4 | [
"Yield",
"each",
"integer",
"from",
"a",
"complex",
"range",
"string",
"like",
"1",
"-",
"9",
"12",
"15",
"-",
"20",
"23"
]
| python | valid |
Unidata/siphon | siphon/cdmr/cdmremote.py | https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/cdmremote.py#L46-L63 | def query(self):
"""Generate a new query for CDMRemote.
This handles turning on compression if necessary.
Returns
-------
HTTPQuery
The created query.
"""
q = super(CDMRemote, self).query()
# Turn on compression if it's been set on the object
if self.deflate:
q.add_query_parameter(deflate=self.deflate)
return q | [
"def",
"query",
"(",
"self",
")",
":",
"q",
"=",
"super",
"(",
"CDMRemote",
",",
"self",
")",
".",
"query",
"(",
")",
"# Turn on compression if it's been set on the object",
"if",
"self",
".",
"deflate",
":",
"q",
".",
"add_query_parameter",
"(",
"deflate",
"=",
"self",
".",
"deflate",
")",
"return",
"q"
]
| Generate a new query for CDMRemote.
This handles turning on compression if necessary.
Returns
-------
HTTPQuery
The created query. | [
"Generate",
"a",
"new",
"query",
"for",
"CDMRemote",
"."
]
| python | train |
tensorflow/probability | tensorflow_probability/python/optimizer/differential_evolution.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L771-L785 | def _ensure_list(tensor_or_list):
"""Converts the input arg to a list if it is not a list already.
Args:
tensor_or_list: A `Tensor` or a Python list of `Tensor`s. The argument to
convert to a list of `Tensor`s.
Returns:
A tuple of two elements. The first is a Python list of `Tensor`s containing
the original arguments. The second is a boolean indicating whether
the original argument was a list or tuple already.
"""
if isinstance(tensor_or_list, (list, tuple)):
return list(tensor_or_list), True
return [tensor_or_list], False | [
"def",
"_ensure_list",
"(",
"tensor_or_list",
")",
":",
"if",
"isinstance",
"(",
"tensor_or_list",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"list",
"(",
"tensor_or_list",
")",
",",
"True",
"return",
"[",
"tensor_or_list",
"]",
",",
"False"
]
| Converts the input arg to a list if it is not a list already.
Args:
tensor_or_list: A `Tensor` or a Python list of `Tensor`s. The argument to
convert to a list of `Tensor`s.
Returns:
A tuple of two elements. The first is a Python list of `Tensor`s containing
the original arguments. The second is a boolean indicating whether
the original argument was a list or tuple already. | [
"Converts",
"the",
"input",
"arg",
"to",
"a",
"list",
"if",
"it",
"is",
"not",
"a",
"list",
"already",
"."
]
| python | test |
illagrenan/django-upload-path | tasks.py | https://github.com/illagrenan/django-upload-path/blob/a3518f8f06826807f6cebb0cacc06906dbc2ebdd/tasks.py#L48-L54 | def coverage():
"""check code coverage quickly with the default Python"""
run("coverage run --source {PROJECT_NAME} -m py.test".format(PROJECT_NAME=PROJECT_NAME))
run("coverage report -m")
run("coverage html")
webbrowser.open('file://' + os.path.realpath("htmlcov/index.html"), new=2) | [
"def",
"coverage",
"(",
")",
":",
"run",
"(",
"\"coverage run --source {PROJECT_NAME} -m py.test\"",
".",
"format",
"(",
"PROJECT_NAME",
"=",
"PROJECT_NAME",
")",
")",
"run",
"(",
"\"coverage report -m\"",
")",
"run",
"(",
"\"coverage html\"",
")",
"webbrowser",
".",
"open",
"(",
"'file://'",
"+",
"os",
".",
"path",
".",
"realpath",
"(",
"\"htmlcov/index.html\"",
")",
",",
"new",
"=",
"2",
")"
]
| check code coverage quickly with the default Python | [
"check",
"code",
"coverage",
"quickly",
"with",
"the",
"default",
"Python"
]
| python | train |
mitsei/dlkit | dlkit/aws_adapter/repository/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/aws_adapter/repository/sessions.py#L986-L1002 | def update_asset(self, asset_form=None):
"""Updates an existing asset.
arg: asset_form (osid.repository.AssetForm): the form
containing the elements to be updated
raise: IllegalState - ``asset_form`` already used in anupdate
transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``asset_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``asset_form`` did not originate from
``get_asset_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
return Asset(self._provider_session.update_asset(asset_form), self._config_map) | [
"def",
"update_asset",
"(",
"self",
",",
"asset_form",
"=",
"None",
")",
":",
"return",
"Asset",
"(",
"self",
".",
"_provider_session",
".",
"update_asset",
"(",
"asset_form",
")",
",",
"self",
".",
"_config_map",
")"
]
| Updates an existing asset.
arg: asset_form (osid.repository.AssetForm): the form
containing the elements to be updated
raise: IllegalState - ``asset_form`` already used in anupdate
transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``asset_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``asset_form`` did not originate from
``get_asset_form_for_update()``
*compliance: mandatory -- This method must be implemented.* | [
"Updates",
"an",
"existing",
"asset",
"."
]
| python | train |
tswicegood/Dolt | dolt/__init__.py | https://github.com/tswicegood/Dolt/blob/e0da1918b7db18f885734a89f824b9e173cc30a5/dolt/__init__.py#L246-L271 | def _clone(self):
"""
Clones the state of the current operation.
The state is cloned so that you can freeze the state at a certain point for re-use.
::
>>> cat = dolt.cat
>>> cat.get_url()
'/cat'
>>> o = cat.foo
>>> o.get_url()
'/cat/foo'
>>> cat.get_url()
'/cat'
"""
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
q._params = self._params.copy()
q._headers = self._headers.copy()
q._attribute_stack = self._attribute_stack[:]
return q | [
"def",
"_clone",
"(",
"self",
")",
":",
"cls",
"=",
"self",
".",
"__class__",
"q",
"=",
"cls",
".",
"__new__",
"(",
"cls",
")",
"q",
".",
"__dict__",
"=",
"self",
".",
"__dict__",
".",
"copy",
"(",
")",
"q",
".",
"_params",
"=",
"self",
".",
"_params",
".",
"copy",
"(",
")",
"q",
".",
"_headers",
"=",
"self",
".",
"_headers",
".",
"copy",
"(",
")",
"q",
".",
"_attribute_stack",
"=",
"self",
".",
"_attribute_stack",
"[",
":",
"]",
"return",
"q"
]
| Clones the state of the current operation.
The state is cloned so that you can freeze the state at a certain point for re-use.
::
>>> cat = dolt.cat
>>> cat.get_url()
'/cat'
>>> o = cat.foo
>>> o.get_url()
'/cat/foo'
>>> cat.get_url()
'/cat' | [
"Clones",
"the",
"state",
"of",
"the",
"current",
"operation",
"."
]
| python | train |
jmvrbanac/lplight | lplight/client.py | https://github.com/jmvrbanac/lplight/blob/4d58b45e49ad9ba9e95f8c106d5c49e1658a69a7/lplight/client.py#L87-L95 | def get_bug_by_id(self, bug_id):
""" Retrieves a single bug by it's Launchpad bug_id
:param bug_id: The Launchpad id for the bug.
"""
uri = '{base}/bugs/{bug_id}'.format(base=self.BASE_URI, bug_id=bug_id)
resp = self._client.get(uri, model=models.Bug)
return resp | [
"def",
"get_bug_by_id",
"(",
"self",
",",
"bug_id",
")",
":",
"uri",
"=",
"'{base}/bugs/{bug_id}'",
".",
"format",
"(",
"base",
"=",
"self",
".",
"BASE_URI",
",",
"bug_id",
"=",
"bug_id",
")",
"resp",
"=",
"self",
".",
"_client",
".",
"get",
"(",
"uri",
",",
"model",
"=",
"models",
".",
"Bug",
")",
"return",
"resp"
]
| Retrieves a single bug by it's Launchpad bug_id
:param bug_id: The Launchpad id for the bug. | [
"Retrieves",
"a",
"single",
"bug",
"by",
"it",
"s",
"Launchpad",
"bug_id"
]
| python | train |
pmbarrett314/curses-menu | cursesmenu/items/submenu_item.py | https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/items/submenu_item.py#L45-L54 | def clean_up(self):
"""
This class overrides this method
"""
self.submenu.join()
self.menu.clear_screen()
curses.reset_prog_mode()
curses.curs_set(1) # reset doesn't do this right
curses.curs_set(0)
self.menu.resume() | [
"def",
"clean_up",
"(",
"self",
")",
":",
"self",
".",
"submenu",
".",
"join",
"(",
")",
"self",
".",
"menu",
".",
"clear_screen",
"(",
")",
"curses",
".",
"reset_prog_mode",
"(",
")",
"curses",
".",
"curs_set",
"(",
"1",
")",
"# reset doesn't do this right",
"curses",
".",
"curs_set",
"(",
"0",
")",
"self",
".",
"menu",
".",
"resume",
"(",
")"
]
| This class overrides this method | [
"This",
"class",
"overrides",
"this",
"method"
]
| python | test |
monkeython/scriba | scriba/schemes/data.py | https://github.com/monkeython/scriba/blob/fb8e7636ed07c3d035433fdd153599ac8b24dfc4/scriba/schemes/data.py#L23-L35 | def read(url, **args):
"""Loads an object from a data URI."""
info, data = url.path.split(',')
info = data_re.search(info).groupdict()
mediatype = info.setdefault('mediatype', 'text/plain;charset=US-ASCII')
if ';' in mediatype:
mimetype, params = mediatype.split(';', 1)
params = [p.split('=') for p in params.split(';')]
params = dict((k.strip(), v.strip()) for k, v in params)
else:
mimetype, params = mediatype, dict()
data = base64.b64decode(data) if info['base64'] else urllib.unquote(data)
return content_types.get(mimetype).parse(data, **params) | [
"def",
"read",
"(",
"url",
",",
"*",
"*",
"args",
")",
":",
"info",
",",
"data",
"=",
"url",
".",
"path",
".",
"split",
"(",
"','",
")",
"info",
"=",
"data_re",
".",
"search",
"(",
"info",
")",
".",
"groupdict",
"(",
")",
"mediatype",
"=",
"info",
".",
"setdefault",
"(",
"'mediatype'",
",",
"'text/plain;charset=US-ASCII'",
")",
"if",
"';'",
"in",
"mediatype",
":",
"mimetype",
",",
"params",
"=",
"mediatype",
".",
"split",
"(",
"';'",
",",
"1",
")",
"params",
"=",
"[",
"p",
".",
"split",
"(",
"'='",
")",
"for",
"p",
"in",
"params",
".",
"split",
"(",
"';'",
")",
"]",
"params",
"=",
"dict",
"(",
"(",
"k",
".",
"strip",
"(",
")",
",",
"v",
".",
"strip",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"params",
")",
"else",
":",
"mimetype",
",",
"params",
"=",
"mediatype",
",",
"dict",
"(",
")",
"data",
"=",
"base64",
".",
"b64decode",
"(",
"data",
")",
"if",
"info",
"[",
"'base64'",
"]",
"else",
"urllib",
".",
"unquote",
"(",
"data",
")",
"return",
"content_types",
".",
"get",
"(",
"mimetype",
")",
".",
"parse",
"(",
"data",
",",
"*",
"*",
"params",
")"
]
| Loads an object from a data URI. | [
"Loads",
"an",
"object",
"from",
"a",
"data",
"URI",
"."
]
| python | train |
GGiecold/Concurrent_AP | Concurrent_AP.py | https://github.com/GGiecold/Concurrent_AP/blob/d4cebe06268b5d520352a83cadb2f7520650460c/Concurrent_AP.py#L433-L471 | def get_sum(hdf5_file, path, array_out, out_lock, rows_slice):
"""Access an array at node 'path' of the 'hdf5_file', compute the sums
along a slice of rows specified by 'rows_slice' and add the resulting
vector to 'array_out'.
Parameters
----------
hdf5_file : string or file handle
The location of the HDF5 data structure containing the matrices of availabitilites,
responsibilities and similarities among others.
path : string
Specify the node where the matrix whose row-sums are to be computed is located
within the given hierarchical data format.
array_out : multiprocessing.Array object
This ctypes array is allocated from shared memory and used by various
processes to store the outcome of their computations.
out_lock : multiprocessing.Lock object
Synchronize access to the values stored in 'array_out'.
rows_slice : slice object
Specifies a range of rows indices.
"""
Worker.hdf5_lock.acquire()
with tables.open_file(hdf5_file, 'r+') as fileh:
hdf5_array = fileh.get_node(path)
tmp = hdf5_array[rows_slice, ...]
Worker.hdf5_lock.release()
szum = np.sum(tmp, axis = 0)
with out_lock:
array_out += szum
del tmp | [
"def",
"get_sum",
"(",
"hdf5_file",
",",
"path",
",",
"array_out",
",",
"out_lock",
",",
"rows_slice",
")",
":",
"Worker",
".",
"hdf5_lock",
".",
"acquire",
"(",
")",
"with",
"tables",
".",
"open_file",
"(",
"hdf5_file",
",",
"'r+'",
")",
"as",
"fileh",
":",
"hdf5_array",
"=",
"fileh",
".",
"get_node",
"(",
"path",
")",
"tmp",
"=",
"hdf5_array",
"[",
"rows_slice",
",",
"...",
"]",
"Worker",
".",
"hdf5_lock",
".",
"release",
"(",
")",
"szum",
"=",
"np",
".",
"sum",
"(",
"tmp",
",",
"axis",
"=",
"0",
")",
"with",
"out_lock",
":",
"array_out",
"+=",
"szum",
"del",
"tmp"
]
| Access an array at node 'path' of the 'hdf5_file', compute the sums
along a slice of rows specified by 'rows_slice' and add the resulting
vector to 'array_out'.
Parameters
----------
hdf5_file : string or file handle
The location of the HDF5 data structure containing the matrices of availabitilites,
responsibilities and similarities among others.
path : string
Specify the node where the matrix whose row-sums are to be computed is located
within the given hierarchical data format.
array_out : multiprocessing.Array object
This ctypes array is allocated from shared memory and used by various
processes to store the outcome of their computations.
out_lock : multiprocessing.Lock object
Synchronize access to the values stored in 'array_out'.
rows_slice : slice object
Specifies a range of rows indices. | [
"Access",
"an",
"array",
"at",
"node",
"path",
"of",
"the",
"hdf5_file",
"compute",
"the",
"sums",
"along",
"a",
"slice",
"of",
"rows",
"specified",
"by",
"rows_slice",
"and",
"add",
"the",
"resulting",
"vector",
"to",
"array_out",
".",
"Parameters",
"----------",
"hdf5_file",
":",
"string",
"or",
"file",
"handle",
"The",
"location",
"of",
"the",
"HDF5",
"data",
"structure",
"containing",
"the",
"matrices",
"of",
"availabitilites",
"responsibilities",
"and",
"similarities",
"among",
"others",
".",
"path",
":",
"string",
"Specify",
"the",
"node",
"where",
"the",
"matrix",
"whose",
"row",
"-",
"sums",
"are",
"to",
"be",
"computed",
"is",
"located",
"within",
"the",
"given",
"hierarchical",
"data",
"format",
".",
"array_out",
":",
"multiprocessing",
".",
"Array",
"object",
"This",
"ctypes",
"array",
"is",
"allocated",
"from",
"shared",
"memory",
"and",
"used",
"by",
"various",
"processes",
"to",
"store",
"the",
"outcome",
"of",
"their",
"computations",
".",
"out_lock",
":",
"multiprocessing",
".",
"Lock",
"object",
"Synchronize",
"access",
"to",
"the",
"values",
"stored",
"in",
"array_out",
".",
"rows_slice",
":",
"slice",
"object",
"Specifies",
"a",
"range",
"of",
"rows",
"indices",
"."
]
| python | train |
monarch-initiative/dipper | dipper/sources/OMIM.py | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIM.py#L634-L756 | def _process_morbidmap(self, limit):
"""
This will process the morbidmap file to get the links between
omim genes and diseases. Here, we create anonymous nodes for some
variant loci that are variants of the gene that causes the disease.
Triples created:
<some_anonymous_variant_locus>
is_allele_of
<omim_gene_id>
<some_anonymous_variant_locus> causes condition <omim_disease_id>
<assoc> hasSubject <some_anonymous_variant_locus>
<assoc> hasObject <omim_disease_id>
<assoc> hasPredicate <causes condition>
<assoc> DC:evidence <eco_id>
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
assoc_count = 0
src_key = 'morbidmap'
col = self.files[src_key]['columns']
raw = '/'.join((self.rawdir, self.files[src_key]['file']))
with open(raw) as reader:
line = reader.readline() # Copyright
line = reader.readline() # Generated: 2016-04-11
line = reader.readline() # EOF for field spec
line = reader.readline().strip() # columns header
line_counter = 4
row = line.split('\t') # includes funky leading octothorpe
if row != col: # assert
LOG.error('Expected %s to have columns: %s', raw, col)
LOG.error('But Found %s to have columns: %s', raw, row)
raise AssertionError('Incomming data headers have changed.')
for line in reader:
line_counter += 1
line = line.strip()
# since there are comments at the end of the file as well,
if line[0] == '#':
continue
row = line.split('\t')
if len(row) != len(col):
LOG.warning(
'Unexpected input on line: %i got: %s', line_counter, row)
continue
disorder = row[col.index('# Phenotype')]
gene_symbols = row[col.index('Gene Symbols')]
gene_num = row[col.index('MIM Number')]
# loc = row[col.index('Cyto Location')]
# LOG.info("morbidmap disorder: %s", disorder) # too verbose
# disorder = disorder label , number (mapping key)
# 3-M syndrome 1, 273750 (3)|CUL7, 3M1|609577|6p21.1
# but note that for those diseases where they are genomic loci
# (not genes though), the omim id is only listed as the gene
# Alopecia areata 1 (2)|AA1|104000|18p11.3-p11.2
# when there's a gene and disease
disorder_match = self.disorder_regex.match(disorder)
nogene_match = self.nogene_regex.match(disorder)
if disorder_match is not None:
disorder_parts = disorder_match.groups()
(disorder_label, disorder_num, phene_key) = disorder_parts
if self.test_mode and (
int(disorder_num) not in self.test_ids or
int(gene_num) not in self.test_ids):
continue
assoc_count += 1
gene_symbols = gene_symbols.split(', ')
gene_id = 'OMIM:' + str(gene_num)
self._make_pheno_assoc(
graph, gene_id, disorder_num, disorder_label, phene_key)
elif nogene_match is not None:
# this is a case where the disorder
# a blended gene/phenotype
# we lookup the NCBIGene feature and make the association
(disorder_label, phene_key) = nogene_match.groups()
disorder_num = gene_num
# make what's in the gene column the disease
disorder_id = 'OMIM:' + str(disorder_num)
if self.test_mode and int(disorder_num) not in self.test_ids:
continue
if disorder_id in self.omim_ncbigene_idmap:
# get the gene ids
gene_ids = self.omim_ncbigene_idmap[disorder_id]
if gene_ids is None:
continue
for gene_num in gene_ids:
# TODO add gene filter for testMode and NCBIGenes
gene_id = 'NCBIGene:' + str(gene_num).strip()
assoc_count += 1
self._make_pheno_assoc(
graph, gene_id, disorder_num, disorder_label, phene_key)
else:
# we can create an anonymous feature
# to house this thing for example, 158900
feature_id = self._make_anonymous_feature(gene_num)
assoc_count += 1
self._make_pheno_assoc(
graph, feature_id, disorder_num, disorder_label, phene_key)
LOG.info(
"We don't have an NCBIGene feature id to link %s with %s",
disorder_id, disorder_label)
if self.test_mode and gene_num not in self.test_ids:
continue
else:
LOG.warning(
"There are misformatted rows %i:%s", line_counter, line)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Added %d G2P associations", assoc_count) | [
"def",
"_process_morbidmap",
"(",
"self",
",",
"limit",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"line_counter",
"=",
"0",
"assoc_count",
"=",
"0",
"src_key",
"=",
"'morbidmap'",
"col",
"=",
"self",
".",
"files",
"[",
"src_key",
"]",
"[",
"'columns'",
"]",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"src_key",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
")",
"as",
"reader",
":",
"line",
"=",
"reader",
".",
"readline",
"(",
")",
"# Copyright",
"line",
"=",
"reader",
".",
"readline",
"(",
")",
"# Generated: 2016-04-11",
"line",
"=",
"reader",
".",
"readline",
"(",
")",
"# EOF for field spec",
"line",
"=",
"reader",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"# columns header",
"line_counter",
"=",
"4",
"row",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"# includes funky leading octothorpe",
"if",
"row",
"!=",
"col",
":",
"# assert",
"LOG",
".",
"error",
"(",
"'Expected %s to have columns: %s'",
",",
"raw",
",",
"col",
")",
"LOG",
".",
"error",
"(",
"'But Found %s to have columns: %s'",
",",
"raw",
",",
"row",
")",
"raise",
"AssertionError",
"(",
"'Incomming data headers have changed.'",
")",
"for",
"line",
"in",
"reader",
":",
"line_counter",
"+=",
"1",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"# since there are comments at the end of the file as well,",
"if",
"line",
"[",
"0",
"]",
"==",
"'#'",
":",
"continue",
"row",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"len",
"(",
"row",
")",
"!=",
"len",
"(",
"col",
")",
":",
"LOG",
".",
"warning",
"(",
"'Unexpected input on line: %i got: %s'",
",",
"line_counter",
",",
"row",
")",
"continue",
"disorder",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'# Phenotype'",
")",
"]",
"gene_symbols",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'Gene Symbols'",
")",
"]",
"gene_num",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'MIM Number'",
")",
"]",
"# loc = row[col.index('Cyto Location')]",
"# LOG.info(\"morbidmap disorder: %s\", disorder) # too verbose",
"# disorder = disorder label , number (mapping key)",
"# 3-M syndrome 1, 273750 (3)|CUL7, 3M1|609577|6p21.1",
"# but note that for those diseases where they are genomic loci",
"# (not genes though), the omim id is only listed as the gene",
"# Alopecia areata 1 (2)|AA1|104000|18p11.3-p11.2",
"# when there's a gene and disease",
"disorder_match",
"=",
"self",
".",
"disorder_regex",
".",
"match",
"(",
"disorder",
")",
"nogene_match",
"=",
"self",
".",
"nogene_regex",
".",
"match",
"(",
"disorder",
")",
"if",
"disorder_match",
"is",
"not",
"None",
":",
"disorder_parts",
"=",
"disorder_match",
".",
"groups",
"(",
")",
"(",
"disorder_label",
",",
"disorder_num",
",",
"phene_key",
")",
"=",
"disorder_parts",
"if",
"self",
".",
"test_mode",
"and",
"(",
"int",
"(",
"disorder_num",
")",
"not",
"in",
"self",
".",
"test_ids",
"or",
"int",
"(",
"gene_num",
")",
"not",
"in",
"self",
".",
"test_ids",
")",
":",
"continue",
"assoc_count",
"+=",
"1",
"gene_symbols",
"=",
"gene_symbols",
".",
"split",
"(",
"', '",
")",
"gene_id",
"=",
"'OMIM:'",
"+",
"str",
"(",
"gene_num",
")",
"self",
".",
"_make_pheno_assoc",
"(",
"graph",
",",
"gene_id",
",",
"disorder_num",
",",
"disorder_label",
",",
"phene_key",
")",
"elif",
"nogene_match",
"is",
"not",
"None",
":",
"# this is a case where the disorder",
"# a blended gene/phenotype",
"# we lookup the NCBIGene feature and make the association",
"(",
"disorder_label",
",",
"phene_key",
")",
"=",
"nogene_match",
".",
"groups",
"(",
")",
"disorder_num",
"=",
"gene_num",
"# make what's in the gene column the disease",
"disorder_id",
"=",
"'OMIM:'",
"+",
"str",
"(",
"disorder_num",
")",
"if",
"self",
".",
"test_mode",
"and",
"int",
"(",
"disorder_num",
")",
"not",
"in",
"self",
".",
"test_ids",
":",
"continue",
"if",
"disorder_id",
"in",
"self",
".",
"omim_ncbigene_idmap",
":",
"# get the gene ids",
"gene_ids",
"=",
"self",
".",
"omim_ncbigene_idmap",
"[",
"disorder_id",
"]",
"if",
"gene_ids",
"is",
"None",
":",
"continue",
"for",
"gene_num",
"in",
"gene_ids",
":",
"# TODO add gene filter for testMode and NCBIGenes",
"gene_id",
"=",
"'NCBIGene:'",
"+",
"str",
"(",
"gene_num",
")",
".",
"strip",
"(",
")",
"assoc_count",
"+=",
"1",
"self",
".",
"_make_pheno_assoc",
"(",
"graph",
",",
"gene_id",
",",
"disorder_num",
",",
"disorder_label",
",",
"phene_key",
")",
"else",
":",
"# we can create an anonymous feature",
"# to house this thing for example, 158900",
"feature_id",
"=",
"self",
".",
"_make_anonymous_feature",
"(",
"gene_num",
")",
"assoc_count",
"+=",
"1",
"self",
".",
"_make_pheno_assoc",
"(",
"graph",
",",
"feature_id",
",",
"disorder_num",
",",
"disorder_label",
",",
"phene_key",
")",
"LOG",
".",
"info",
"(",
"\"We don't have an NCBIGene feature id to link %s with %s\"",
",",
"disorder_id",
",",
"disorder_label",
")",
"if",
"self",
".",
"test_mode",
"and",
"gene_num",
"not",
"in",
"self",
".",
"test_ids",
":",
"continue",
"else",
":",
"LOG",
".",
"warning",
"(",
"\"There are misformatted rows %i:%s\"",
",",
"line_counter",
",",
"line",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Added %d G2P associations\"",
",",
"assoc_count",
")"
]
| This will process the morbidmap file to get the links between
omim genes and diseases. Here, we create anonymous nodes for some
variant loci that are variants of the gene that causes the disease.
Triples created:
<some_anonymous_variant_locus>
is_allele_of
<omim_gene_id>
<some_anonymous_variant_locus> causes condition <omim_disease_id>
<assoc> hasSubject <some_anonymous_variant_locus>
<assoc> hasObject <omim_disease_id>
<assoc> hasPredicate <causes condition>
<assoc> DC:evidence <eco_id>
:param limit:
:return: | [
"This",
"will",
"process",
"the",
"morbidmap",
"file",
"to",
"get",
"the",
"links",
"between",
"omim",
"genes",
"and",
"diseases",
".",
"Here",
"we",
"create",
"anonymous",
"nodes",
"for",
"some",
"variant",
"loci",
"that",
"are",
"variants",
"of",
"the",
"gene",
"that",
"causes",
"the",
"disease",
".",
"Triples",
"created",
":",
"<some_anonymous_variant_locus",
">",
"is_allele_of",
"<omim_gene_id",
">",
"<some_anonymous_variant_locus",
">",
"causes",
"condition",
"<omim_disease_id",
">",
"<assoc",
">",
"hasSubject",
"<some_anonymous_variant_locus",
">",
"<assoc",
">",
"hasObject",
"<omim_disease_id",
">",
"<assoc",
">",
"hasPredicate",
"<causes",
"condition",
">",
"<assoc",
">",
"DC",
":",
"evidence",
"<eco_id",
">",
":",
"param",
"limit",
":",
":",
"return",
":"
]
| python | train |
tcalmant/ipopo | pelix/ipopo/handlers/properties.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/properties.py#L169-L186 | def get_methods_names(public_properties):
"""
Generates the names of the fields where to inject the getter and setter
methods
:param public_properties: If True, returns the names of public property
accessors, else of hidden property ones
:return: getter and a setter field names
"""
if public_properties:
prefix = ipopo_constants.IPOPO_PROPERTY_PREFIX
else:
prefix = ipopo_constants.IPOPO_HIDDEN_PROPERTY_PREFIX
return (
"{0}{1}".format(prefix, ipopo_constants.IPOPO_GETTER_SUFFIX),
"{0}{1}".format(prefix, ipopo_constants.IPOPO_SETTER_SUFFIX),
) | [
"def",
"get_methods_names",
"(",
"public_properties",
")",
":",
"if",
"public_properties",
":",
"prefix",
"=",
"ipopo_constants",
".",
"IPOPO_PROPERTY_PREFIX",
"else",
":",
"prefix",
"=",
"ipopo_constants",
".",
"IPOPO_HIDDEN_PROPERTY_PREFIX",
"return",
"(",
"\"{0}{1}\"",
".",
"format",
"(",
"prefix",
",",
"ipopo_constants",
".",
"IPOPO_GETTER_SUFFIX",
")",
",",
"\"{0}{1}\"",
".",
"format",
"(",
"prefix",
",",
"ipopo_constants",
".",
"IPOPO_SETTER_SUFFIX",
")",
",",
")"
]
| Generates the names of the fields where to inject the getter and setter
methods
:param public_properties: If True, returns the names of public property
accessors, else of hidden property ones
:return: getter and a setter field names | [
"Generates",
"the",
"names",
"of",
"the",
"fields",
"where",
"to",
"inject",
"the",
"getter",
"and",
"setter",
"methods"
]
| python | train |
geophysics-ubonn/reda | lib/reda/containers/sEIT.py | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L207-L223 | def compute_K_analytical(self, spacing):
"""Assuming an equal electrode spacing, compute the K-factor over a
homogeneous half-space.
For more complex grids, please refer to the module:
reda.utils.geometric_factors
Parameters
----------
spacing: float
Electrode spacing
"""
assert isinstance(spacing, Number)
K = geometric_factors.compute_K_analytical(self.data, spacing)
self.data = geometric_factors.apply_K(self.data, K)
fix_sign_with_K(self.data) | [
"def",
"compute_K_analytical",
"(",
"self",
",",
"spacing",
")",
":",
"assert",
"isinstance",
"(",
"spacing",
",",
"Number",
")",
"K",
"=",
"geometric_factors",
".",
"compute_K_analytical",
"(",
"self",
".",
"data",
",",
"spacing",
")",
"self",
".",
"data",
"=",
"geometric_factors",
".",
"apply_K",
"(",
"self",
".",
"data",
",",
"K",
")",
"fix_sign_with_K",
"(",
"self",
".",
"data",
")"
]
| Assuming an equal electrode spacing, compute the K-factor over a
homogeneous half-space.
For more complex grids, please refer to the module:
reda.utils.geometric_factors
Parameters
----------
spacing: float
Electrode spacing | [
"Assuming",
"an",
"equal",
"electrode",
"spacing",
"compute",
"the",
"K",
"-",
"factor",
"over",
"a",
"homogeneous",
"half",
"-",
"space",
"."
]
| python | train |
twisted/mantissa | xmantissa/scrolltable.py | https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/scrolltable.py#L281-L297 | def resort(self, columnName):
"""
Re-sort the table.
@param columnName: the name of the column to sort by. This is a string
because it is passed from the browser.
"""
csc = self.currentSortColumn
newSortColumn = self.columns[columnName]
if newSortColumn is None:
raise Unsortable('column %r has no sort attribute' % (columnName,))
if csc is newSortColumn:
self.isAscending = not self.isAscending
else:
self.currentSortColumn = newSortColumn
self.isAscending = True
return self.isAscending | [
"def",
"resort",
"(",
"self",
",",
"columnName",
")",
":",
"csc",
"=",
"self",
".",
"currentSortColumn",
"newSortColumn",
"=",
"self",
".",
"columns",
"[",
"columnName",
"]",
"if",
"newSortColumn",
"is",
"None",
":",
"raise",
"Unsortable",
"(",
"'column %r has no sort attribute'",
"%",
"(",
"columnName",
",",
")",
")",
"if",
"csc",
"is",
"newSortColumn",
":",
"self",
".",
"isAscending",
"=",
"not",
"self",
".",
"isAscending",
"else",
":",
"self",
".",
"currentSortColumn",
"=",
"newSortColumn",
"self",
".",
"isAscending",
"=",
"True",
"return",
"self",
".",
"isAscending"
]
| Re-sort the table.
@param columnName: the name of the column to sort by. This is a string
because it is passed from the browser. | [
"Re",
"-",
"sort",
"the",
"table",
"."
]
| python | train |
bioidiap/gridtk | gridtk/sge.py | https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/sge.py#L92-L112 | def submit(self, command_line, name = None, array = None, dependencies = [], exec_dir = None, log_dir = "logs", dry_run = False, verbosity = 0, stop_on_failure = False, **kwargs):
"""Submits a job that will be executed in the grid."""
# add job to database
self.lock()
job = add_job(self.session, command_line, name, dependencies, array, exec_dir=exec_dir, log_dir=log_dir, stop_on_failure=stop_on_failure, context=self.context, **kwargs)
logger.info("Added job '%s' to the database." % job)
if dry_run:
print("Would have added the Job")
print(job)
print("to the database to be executed in the grid with options:", str(kwargs))
self.session.delete(job)
logger.info("Deleted job '%s' from the database due to dry-run option" % job)
job_id = None
else:
job_id = self._submit_to_grid(job, name, array, dependencies, log_dir, verbosity, **kwargs)
self.session.commit()
self.unlock()
return job_id | [
"def",
"submit",
"(",
"self",
",",
"command_line",
",",
"name",
"=",
"None",
",",
"array",
"=",
"None",
",",
"dependencies",
"=",
"[",
"]",
",",
"exec_dir",
"=",
"None",
",",
"log_dir",
"=",
"\"logs\"",
",",
"dry_run",
"=",
"False",
",",
"verbosity",
"=",
"0",
",",
"stop_on_failure",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# add job to database",
"self",
".",
"lock",
"(",
")",
"job",
"=",
"add_job",
"(",
"self",
".",
"session",
",",
"command_line",
",",
"name",
",",
"dependencies",
",",
"array",
",",
"exec_dir",
"=",
"exec_dir",
",",
"log_dir",
"=",
"log_dir",
",",
"stop_on_failure",
"=",
"stop_on_failure",
",",
"context",
"=",
"self",
".",
"context",
",",
"*",
"*",
"kwargs",
")",
"logger",
".",
"info",
"(",
"\"Added job '%s' to the database.\"",
"%",
"job",
")",
"if",
"dry_run",
":",
"print",
"(",
"\"Would have added the Job\"",
")",
"print",
"(",
"job",
")",
"print",
"(",
"\"to the database to be executed in the grid with options:\"",
",",
"str",
"(",
"kwargs",
")",
")",
"self",
".",
"session",
".",
"delete",
"(",
"job",
")",
"logger",
".",
"info",
"(",
"\"Deleted job '%s' from the database due to dry-run option\"",
"%",
"job",
")",
"job_id",
"=",
"None",
"else",
":",
"job_id",
"=",
"self",
".",
"_submit_to_grid",
"(",
"job",
",",
"name",
",",
"array",
",",
"dependencies",
",",
"log_dir",
",",
"verbosity",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"session",
".",
"commit",
"(",
")",
"self",
".",
"unlock",
"(",
")",
"return",
"job_id"
]
| Submits a job that will be executed in the grid. | [
"Submits",
"a",
"job",
"that",
"will",
"be",
"executed",
"in",
"the",
"grid",
"."
]
| python | train |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxanalysis.py | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxanalysis.py#L51-L68 | def describe(self, fields=None, **kwargs):
"""
:param fields: dict where the keys are field names that should
be returned, and values should be set to True (by default,
all fields are returned)
:type fields: dict
:returns: Description of the analysis
:rtype: dict
Returns a hash with key-value pairs containing information
about the analysis
"""
describe_input = {}
if fields is not None:
describe_input['fields'] = fields
self._desc = dxpy.api.analysis_describe(self._dxid, describe_input, **kwargs)
return self._desc | [
"def",
"describe",
"(",
"self",
",",
"fields",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"describe_input",
"=",
"{",
"}",
"if",
"fields",
"is",
"not",
"None",
":",
"describe_input",
"[",
"'fields'",
"]",
"=",
"fields",
"self",
".",
"_desc",
"=",
"dxpy",
".",
"api",
".",
"analysis_describe",
"(",
"self",
".",
"_dxid",
",",
"describe_input",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_desc"
]
| :param fields: dict where the keys are field names that should
be returned, and values should be set to True (by default,
all fields are returned)
:type fields: dict
:returns: Description of the analysis
:rtype: dict
Returns a hash with key-value pairs containing information
about the analysis | [
":",
"param",
"fields",
":",
"dict",
"where",
"the",
"keys",
"are",
"field",
"names",
"that",
"should",
"be",
"returned",
"and",
"values",
"should",
"be",
"set",
"to",
"True",
"(",
"by",
"default",
"all",
"fields",
"are",
"returned",
")",
":",
"type",
"fields",
":",
"dict",
":",
"returns",
":",
"Description",
"of",
"the",
"analysis",
":",
"rtype",
":",
"dict"
]
| python | train |
ybrs/single-beat | singlebeat/beat.py | https://github.com/ybrs/single-beat/blob/d036b62d2531710dfd806e9dc2a8d67c77616082/singlebeat/beat.py#L348-L353 | def cli_command_resume(self, msg):
"""\
sets state to waiting - so we resume spawning children
"""
if self.state == State.PAUSED:
self.state = State.WAITING | [
"def",
"cli_command_resume",
"(",
"self",
",",
"msg",
")",
":",
"if",
"self",
".",
"state",
"==",
"State",
".",
"PAUSED",
":",
"self",
".",
"state",
"=",
"State",
".",
"WAITING"
]
| \
sets state to waiting - so we resume spawning children | [
"\\",
"sets",
"state",
"to",
"waiting",
"-",
"so",
"we",
"resume",
"spawning",
"children"
]
| python | test |
trp07/messages | messages/telegram.py | https://github.com/trp07/messages/blob/7789ebc960335a59ea5d319fceed3dd349023648/messages/telegram.py#L125-L132 | def get_chat_id(self, username):
"""Lookup chat_id of username if chat_id is unknown via API call."""
if username is not None:
chats = requests.get(self.base_url + "/getUpdates").json()
user = username.split("@")[-1]
for chat in chats["result"]:
if chat["message"]["from"]["username"] == user:
return chat["message"]["from"]["id"] | [
"def",
"get_chat_id",
"(",
"self",
",",
"username",
")",
":",
"if",
"username",
"is",
"not",
"None",
":",
"chats",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"base_url",
"+",
"\"/getUpdates\"",
")",
".",
"json",
"(",
")",
"user",
"=",
"username",
".",
"split",
"(",
"\"@\"",
")",
"[",
"-",
"1",
"]",
"for",
"chat",
"in",
"chats",
"[",
"\"result\"",
"]",
":",
"if",
"chat",
"[",
"\"message\"",
"]",
"[",
"\"from\"",
"]",
"[",
"\"username\"",
"]",
"==",
"user",
":",
"return",
"chat",
"[",
"\"message\"",
"]",
"[",
"\"from\"",
"]",
"[",
"\"id\"",
"]"
]
| Lookup chat_id of username if chat_id is unknown via API call. | [
"Lookup",
"chat_id",
"of",
"username",
"if",
"chat_id",
"is",
"unknown",
"via",
"API",
"call",
"."
]
| python | test |
diffeo/rejester | rejester/run.py | https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/run.py#L620-L645 | def do_clear(self, args):
'''remove work units from a work spec'''
# Which units?
work_spec_name = self._get_work_spec_name(args)
units = args.unit or None
# What to do?
count = 0
if args.status is None:
all = units is None
count += self.task_master.del_work_units(work_spec_name, work_unit_keys=units, all=all)
elif args.status == 'available':
count += self.task_master.del_work_units(
work_spec_name, work_unit_keys=units, state=self.task_master.AVAILABLE)
elif args.status == 'pending':
count += self.task_master.del_work_units(
work_spec_name, work_unit_keys=units, state=self.task_master.PENDING)
elif args.status == 'blocked':
count += self.task_master.del_work_units(
work_spec_name, work_unit_keys=units, state=self.task_master.BLOCKED)
elif args.status == 'finished':
count += self.task_master.del_work_units(
work_spec_name, work_unit_keys=units, state=self.task_master.FINISHED)
elif args.status == 'failed':
count += self.task_master.del_work_units(
work_spec_name, work_unit_keys=units, state=self.task_master.FAILED)
self.stdout.write('Removed {0} work units.\n'.format(count)) | [
"def",
"do_clear",
"(",
"self",
",",
"args",
")",
":",
"# Which units?",
"work_spec_name",
"=",
"self",
".",
"_get_work_spec_name",
"(",
"args",
")",
"units",
"=",
"args",
".",
"unit",
"or",
"None",
"# What to do?",
"count",
"=",
"0",
"if",
"args",
".",
"status",
"is",
"None",
":",
"all",
"=",
"units",
"is",
"None",
"count",
"+=",
"self",
".",
"task_master",
".",
"del_work_units",
"(",
"work_spec_name",
",",
"work_unit_keys",
"=",
"units",
",",
"all",
"=",
"all",
")",
"elif",
"args",
".",
"status",
"==",
"'available'",
":",
"count",
"+=",
"self",
".",
"task_master",
".",
"del_work_units",
"(",
"work_spec_name",
",",
"work_unit_keys",
"=",
"units",
",",
"state",
"=",
"self",
".",
"task_master",
".",
"AVAILABLE",
")",
"elif",
"args",
".",
"status",
"==",
"'pending'",
":",
"count",
"+=",
"self",
".",
"task_master",
".",
"del_work_units",
"(",
"work_spec_name",
",",
"work_unit_keys",
"=",
"units",
",",
"state",
"=",
"self",
".",
"task_master",
".",
"PENDING",
")",
"elif",
"args",
".",
"status",
"==",
"'blocked'",
":",
"count",
"+=",
"self",
".",
"task_master",
".",
"del_work_units",
"(",
"work_spec_name",
",",
"work_unit_keys",
"=",
"units",
",",
"state",
"=",
"self",
".",
"task_master",
".",
"BLOCKED",
")",
"elif",
"args",
".",
"status",
"==",
"'finished'",
":",
"count",
"+=",
"self",
".",
"task_master",
".",
"del_work_units",
"(",
"work_spec_name",
",",
"work_unit_keys",
"=",
"units",
",",
"state",
"=",
"self",
".",
"task_master",
".",
"FINISHED",
")",
"elif",
"args",
".",
"status",
"==",
"'failed'",
":",
"count",
"+=",
"self",
".",
"task_master",
".",
"del_work_units",
"(",
"work_spec_name",
",",
"work_unit_keys",
"=",
"units",
",",
"state",
"=",
"self",
".",
"task_master",
".",
"FAILED",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"'Removed {0} work units.\\n'",
".",
"format",
"(",
"count",
")",
")"
]
| remove work units from a work spec | [
"remove",
"work",
"units",
"from",
"a",
"work",
"spec"
]
| python | train |
codelv/enaml-native-cli | enamlnativecli/main.py | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1557-L1567 | def on_message(self, handler, msg):
""" In remote debugging mode this simply acts as a forwarding
proxy for the two clients.
"""
if self.remote_debugging:
#: Forward to other clients
for h in self.handlers:
if h != handler:
h.write_message(msg, True)
else:
print(msg) | [
"def",
"on_message",
"(",
"self",
",",
"handler",
",",
"msg",
")",
":",
"if",
"self",
".",
"remote_debugging",
":",
"#: Forward to other clients",
"for",
"h",
"in",
"self",
".",
"handlers",
":",
"if",
"h",
"!=",
"handler",
":",
"h",
".",
"write_message",
"(",
"msg",
",",
"True",
")",
"else",
":",
"print",
"(",
"msg",
")"
]
| In remote debugging mode this simply acts as a forwarding
proxy for the two clients. | [
"In",
"remote",
"debugging",
"mode",
"this",
"simply",
"acts",
"as",
"a",
"forwarding",
"proxy",
"for",
"the",
"two",
"clients",
"."
]
| python | train |
keon/algorithms | algorithms/calculator/math_parser.py | https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/calculator/math_parser.py#L77-L99 | def parse(expression):
"""
Return array of parsed tokens in the expression
expression String: Math expression to parse in infix notation
"""
result = []
current = ""
for i in expression:
if i.isdigit() or i == '.':
current += i
else:
if len(current) > 0:
result.append(current)
current = ""
if i in __operators__ or i in __parenthesis__:
result.append(i)
else:
raise Exception("invalid syntax " + i)
if len(current) > 0:
result.append(current)
return result | [
"def",
"parse",
"(",
"expression",
")",
":",
"result",
"=",
"[",
"]",
"current",
"=",
"\"\"",
"for",
"i",
"in",
"expression",
":",
"if",
"i",
".",
"isdigit",
"(",
")",
"or",
"i",
"==",
"'.'",
":",
"current",
"+=",
"i",
"else",
":",
"if",
"len",
"(",
"current",
")",
">",
"0",
":",
"result",
".",
"append",
"(",
"current",
")",
"current",
"=",
"\"\"",
"if",
"i",
"in",
"__operators__",
"or",
"i",
"in",
"__parenthesis__",
":",
"result",
".",
"append",
"(",
"i",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"invalid syntax \"",
"+",
"i",
")",
"if",
"len",
"(",
"current",
")",
">",
"0",
":",
"result",
".",
"append",
"(",
"current",
")",
"return",
"result"
]
| Return array of parsed tokens in the expression
expression String: Math expression to parse in infix notation | [
"Return",
"array",
"of",
"parsed",
"tokens",
"in",
"the",
"expression",
"expression",
"String",
":",
"Math",
"expression",
"to",
"parse",
"in",
"infix",
"notation"
]
| python | train |
saltstack/salt | salt/modules/pagerduty_util.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pagerduty_util.py#L124-L196 | def _query(method='GET', profile=None, url=None, path='api/v1',
action=None, api_key=None, service=None, params=None,
data=None, subdomain=None, verify_ssl=True):
'''
Query the PagerDuty API.
This method should be in utils.pagerduty.
'''
if profile:
creds = __salt__['config.option'](profile)
else:
creds = {
'pagerduty.api_key': api_key,
'pagerduty.subdomain': subdomain,
}
if url is None:
url = 'https://{0}.pagerduty.com/{1}/{2}'.format(
creds['pagerduty.subdomain'],
path,
action
)
if params is None:
params = {}
if data is None:
data = {}
headers = {
'Authorization': 'Token token={0}'.format(creds['pagerduty.api_key'])
}
if method != 'GET':
headers['Content-type'] = 'application/json'
result = requests.request(
method,
url,
headers=headers,
params=params,
data=salt.utils.json.dumps(data),
verify=verify_ssl
)
if result.text is None or result.text == '':
return None
result_json = result.json()
# if this query supports pagination, loop and fetch all results, merge them together
if 'total' in result_json and 'offset' in result_json and 'limit' in result_json:
offset = result_json['offset']
limit = result_json['limit']
total = result_json['total']
while offset + limit < total:
offset = offset + limit
limit = 100
data['offset'] = offset
data['limit'] = limit
next_page_results = requests.request(method,
url,
headers=headers,
params=params,
data=data, # Already serialized above, don't do it again
verify=verify_ssl).json()
offset = next_page_results['offset']
limit = next_page_results['limit']
# merge results
for k, v in result_json.items():
if isinstance(v, list):
result_json[k] += next_page_results[k]
return result_json | [
"def",
"_query",
"(",
"method",
"=",
"'GET'",
",",
"profile",
"=",
"None",
",",
"url",
"=",
"None",
",",
"path",
"=",
"'api/v1'",
",",
"action",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"service",
"=",
"None",
",",
"params",
"=",
"None",
",",
"data",
"=",
"None",
",",
"subdomain",
"=",
"None",
",",
"verify_ssl",
"=",
"True",
")",
":",
"if",
"profile",
":",
"creds",
"=",
"__salt__",
"[",
"'config.option'",
"]",
"(",
"profile",
")",
"else",
":",
"creds",
"=",
"{",
"'pagerduty.api_key'",
":",
"api_key",
",",
"'pagerduty.subdomain'",
":",
"subdomain",
",",
"}",
"if",
"url",
"is",
"None",
":",
"url",
"=",
"'https://{0}.pagerduty.com/{1}/{2}'",
".",
"format",
"(",
"creds",
"[",
"'pagerduty.subdomain'",
"]",
",",
"path",
",",
"action",
")",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"{",
"}",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"{",
"}",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Token token={0}'",
".",
"format",
"(",
"creds",
"[",
"'pagerduty.api_key'",
"]",
")",
"}",
"if",
"method",
"!=",
"'GET'",
":",
"headers",
"[",
"'Content-type'",
"]",
"=",
"'application/json'",
"result",
"=",
"requests",
".",
"request",
"(",
"method",
",",
"url",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
",",
"data",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"verify",
"=",
"verify_ssl",
")",
"if",
"result",
".",
"text",
"is",
"None",
"or",
"result",
".",
"text",
"==",
"''",
":",
"return",
"None",
"result_json",
"=",
"result",
".",
"json",
"(",
")",
"# if this query supports pagination, loop and fetch all results, merge them together",
"if",
"'total'",
"in",
"result_json",
"and",
"'offset'",
"in",
"result_json",
"and",
"'limit'",
"in",
"result_json",
":",
"offset",
"=",
"result_json",
"[",
"'offset'",
"]",
"limit",
"=",
"result_json",
"[",
"'limit'",
"]",
"total",
"=",
"result_json",
"[",
"'total'",
"]",
"while",
"offset",
"+",
"limit",
"<",
"total",
":",
"offset",
"=",
"offset",
"+",
"limit",
"limit",
"=",
"100",
"data",
"[",
"'offset'",
"]",
"=",
"offset",
"data",
"[",
"'limit'",
"]",
"=",
"limit",
"next_page_results",
"=",
"requests",
".",
"request",
"(",
"method",
",",
"url",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"# Already serialized above, don't do it again",
"verify",
"=",
"verify_ssl",
")",
".",
"json",
"(",
")",
"offset",
"=",
"next_page_results",
"[",
"'offset'",
"]",
"limit",
"=",
"next_page_results",
"[",
"'limit'",
"]",
"# merge results",
"for",
"k",
",",
"v",
"in",
"result_json",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"result_json",
"[",
"k",
"]",
"+=",
"next_page_results",
"[",
"k",
"]",
"return",
"result_json"
]
| Query the PagerDuty API.
This method should be in utils.pagerduty. | [
"Query",
"the",
"PagerDuty",
"API",
"."
]
| python | train |
O365/python-o365 | O365/utils/token.py | https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/token.py#L192-L209 | def save_token(self):
"""
Saves the token dict in the store
:return bool: Success / Failure
"""
if self.token is None:
raise ValueError('You have to set the "token" first.')
try:
# set token will overwrite previous data
self.doc_ref.set({
self.field_name: self.serializer.dumps(self.token)
})
except Exception as e:
log.error('Token could not be saved: {}'.format(str(e)))
return False
return True | [
"def",
"save_token",
"(",
"self",
")",
":",
"if",
"self",
".",
"token",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'You have to set the \"token\" first.'",
")",
"try",
":",
"# set token will overwrite previous data",
"self",
".",
"doc_ref",
".",
"set",
"(",
"{",
"self",
".",
"field_name",
":",
"self",
".",
"serializer",
".",
"dumps",
"(",
"self",
".",
"token",
")",
"}",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"'Token could not be saved: {}'",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
")",
"return",
"False",
"return",
"True"
]
| Saves the token dict in the store
:return bool: Success / Failure | [
"Saves",
"the",
"token",
"dict",
"in",
"the",
"store",
":",
"return",
"bool",
":",
"Success",
"/",
"Failure"
]
| python | train |
Oneiroe/PySimpleAutomata | PySimpleAutomata/NFA.py | https://github.com/Oneiroe/PySimpleAutomata/blob/0f9f2705fd8ddd5d8118bc31552a640f5d00c359/PySimpleAutomata/NFA.py#L35-L99 | def nfa_intersection(nfa_1: dict, nfa_2: dict) -> dict:
""" Returns a NFA that reads the intersection of the NFAs in
input.
Let :math:`A_1 = (Σ,S_1,S_1^0,ρ_1,F_1)` and :math:`A_2 =(Σ,
S_2,S_2^0,ρ_2,F_2)` be two NFAs.
There is a NFA :math:`A_∧` that runs simultaneously both
:math:`A_1` and :math:`A_2` on the input word,
so :math:`L(A_∧) = L(A_1)∩L(A_2)`.
It is defined as:
:math:`A_∧ = ( Σ , S , S_0 , ρ , F )`
where
• :math:`S = S_1 × S_2`
• :math:`S_0 = S_1^0 × S_2^0`
• :math:`F = F_1 × F_2`
• :math:`((s,t), a, (s_X , t_X)) ∈ ρ` iff :math:`(s, a,s_X )
∈ ρ_1` and :math:`(t, a, t_X ) ∈ ρ_2`
:param dict nfa_1: first input NFA;
:param dict nfa_2: second input NFA;
:return: *(dict)* representing the intersected NFA.
"""
intersection = {
'alphabet': nfa_1['alphabet'].intersection(nfa_2['alphabet']),
'states': set(),
'initial_states': set(),
'accepting_states': set(),
'transitions': dict()
}
for init_1 in nfa_1['initial_states']:
for init_2 in nfa_2['initial_states']:
intersection['initial_states'].add((init_1, init_2))
intersection['states'].update(intersection['initial_states'])
boundary = set()
boundary.update(intersection['initial_states'])
while boundary:
(state_nfa_1, state_nfa_2) = boundary.pop()
if state_nfa_1 in nfa_1['accepting_states'] \
and state_nfa_2 in nfa_2['accepting_states']:
intersection['accepting_states'].add((state_nfa_1, state_nfa_2))
for a in intersection['alphabet']:
if (state_nfa_1, a) not in nfa_1['transitions'] \
or (state_nfa_2, a) not in nfa_2['transitions']:
continue
s1 = nfa_1['transitions'][state_nfa_1, a]
s2 = nfa_2['transitions'][state_nfa_2, a]
for destination_1 in s1:
for destination_2 in s2:
next_state = (destination_1, destination_2)
if next_state not in intersection['states']:
intersection['states'].add(next_state)
boundary.add(next_state)
intersection['transitions'].setdefault(
((state_nfa_1, state_nfa_2), a), set()).add(next_state)
if destination_1 in nfa_1['accepting_states'] \
and destination_2 in nfa_2['accepting_states']:
intersection['accepting_states'].add(next_state)
return intersection | [
"def",
"nfa_intersection",
"(",
"nfa_1",
":",
"dict",
",",
"nfa_2",
":",
"dict",
")",
"->",
"dict",
":",
"intersection",
"=",
"{",
"'alphabet'",
":",
"nfa_1",
"[",
"'alphabet'",
"]",
".",
"intersection",
"(",
"nfa_2",
"[",
"'alphabet'",
"]",
")",
",",
"'states'",
":",
"set",
"(",
")",
",",
"'initial_states'",
":",
"set",
"(",
")",
",",
"'accepting_states'",
":",
"set",
"(",
")",
",",
"'transitions'",
":",
"dict",
"(",
")",
"}",
"for",
"init_1",
"in",
"nfa_1",
"[",
"'initial_states'",
"]",
":",
"for",
"init_2",
"in",
"nfa_2",
"[",
"'initial_states'",
"]",
":",
"intersection",
"[",
"'initial_states'",
"]",
".",
"add",
"(",
"(",
"init_1",
",",
"init_2",
")",
")",
"intersection",
"[",
"'states'",
"]",
".",
"update",
"(",
"intersection",
"[",
"'initial_states'",
"]",
")",
"boundary",
"=",
"set",
"(",
")",
"boundary",
".",
"update",
"(",
"intersection",
"[",
"'initial_states'",
"]",
")",
"while",
"boundary",
":",
"(",
"state_nfa_1",
",",
"state_nfa_2",
")",
"=",
"boundary",
".",
"pop",
"(",
")",
"if",
"state_nfa_1",
"in",
"nfa_1",
"[",
"'accepting_states'",
"]",
"and",
"state_nfa_2",
"in",
"nfa_2",
"[",
"'accepting_states'",
"]",
":",
"intersection",
"[",
"'accepting_states'",
"]",
".",
"add",
"(",
"(",
"state_nfa_1",
",",
"state_nfa_2",
")",
")",
"for",
"a",
"in",
"intersection",
"[",
"'alphabet'",
"]",
":",
"if",
"(",
"state_nfa_1",
",",
"a",
")",
"not",
"in",
"nfa_1",
"[",
"'transitions'",
"]",
"or",
"(",
"state_nfa_2",
",",
"a",
")",
"not",
"in",
"nfa_2",
"[",
"'transitions'",
"]",
":",
"continue",
"s1",
"=",
"nfa_1",
"[",
"'transitions'",
"]",
"[",
"state_nfa_1",
",",
"a",
"]",
"s2",
"=",
"nfa_2",
"[",
"'transitions'",
"]",
"[",
"state_nfa_2",
",",
"a",
"]",
"for",
"destination_1",
"in",
"s1",
":",
"for",
"destination_2",
"in",
"s2",
":",
"next_state",
"=",
"(",
"destination_1",
",",
"destination_2",
")",
"if",
"next_state",
"not",
"in",
"intersection",
"[",
"'states'",
"]",
":",
"intersection",
"[",
"'states'",
"]",
".",
"add",
"(",
"next_state",
")",
"boundary",
".",
"add",
"(",
"next_state",
")",
"intersection",
"[",
"'transitions'",
"]",
".",
"setdefault",
"(",
"(",
"(",
"state_nfa_1",
",",
"state_nfa_2",
")",
",",
"a",
")",
",",
"set",
"(",
")",
")",
".",
"add",
"(",
"next_state",
")",
"if",
"destination_1",
"in",
"nfa_1",
"[",
"'accepting_states'",
"]",
"and",
"destination_2",
"in",
"nfa_2",
"[",
"'accepting_states'",
"]",
":",
"intersection",
"[",
"'accepting_states'",
"]",
".",
"add",
"(",
"next_state",
")",
"return",
"intersection"
]
| Returns a NFA that reads the intersection of the NFAs in
input.
Let :math:`A_1 = (Σ,S_1,S_1^0,ρ_1,F_1)` and :math:`A_2 =(Σ,
S_2,S_2^0,ρ_2,F_2)` be two NFAs.
There is a NFA :math:`A_∧` that runs simultaneously both
:math:`A_1` and :math:`A_2` on the input word,
so :math:`L(A_∧) = L(A_1)∩L(A_2)`.
It is defined as:
:math:`A_∧ = ( Σ , S , S_0 , ρ , F )`
where
• :math:`S = S_1 × S_2`
• :math:`S_0 = S_1^0 × S_2^0`
• :math:`F = F_1 × F_2`
• :math:`((s,t), a, (s_X , t_X)) ∈ ρ` iff :math:`(s, a,s_X )
∈ ρ_1` and :math:`(t, a, t_X ) ∈ ρ_2`
:param dict nfa_1: first input NFA;
:param dict nfa_2: second input NFA;
:return: *(dict)* representing the intersected NFA. | [
"Returns",
"a",
"NFA",
"that",
"reads",
"the",
"intersection",
"of",
"the",
"NFAs",
"in",
"input",
"."
]
| python | train |
laginha/django-mobileesp | src/django_mobileesp/mdetect.py | https://github.com/laginha/django-mobileesp/blob/91d4babb2343b992970bdb076508d380680c8b7e/src/django_mobileesp/mdetect.py#L977-L1001 | def detectTierRichCss(self):
"""Return detection of any device in the 'Rich CSS' Tier
The quick way to detect for a tier of devices.
This method detects for devices which are likely to be capable
of viewing CSS content optimized for the iPhone,
but may not necessarily support JavaScript.
Excludes all iPhone Tier devices.
"""
#The following devices are explicitly ok.
#Note: 'High' BlackBerry devices ONLY
if not self.detectMobileQuick():
return False
#Exclude iPhone Tier and e-Ink Kindle devices
if self.detectTierIphone() \
or self.detectKindle():
return False
#The following devices are explicitly ok.
#Note: 'High' BlackBerry devices ONLY
#Older Windows 'Mobile' isn't good enough for iPhone Tier.
return self.detectWebkit() \
or self.detectS60OssBrowser() \
or self.detectBlackBerryHigh() \
or self.detectWindowsMobile() \
or UAgentInfo.engineTelecaQ in self.__userAgent | [
"def",
"detectTierRichCss",
"(",
"self",
")",
":",
"#The following devices are explicitly ok.",
"#Note: 'High' BlackBerry devices ONLY",
"if",
"not",
"self",
".",
"detectMobileQuick",
"(",
")",
":",
"return",
"False",
"#Exclude iPhone Tier and e-Ink Kindle devices",
"if",
"self",
".",
"detectTierIphone",
"(",
")",
"or",
"self",
".",
"detectKindle",
"(",
")",
":",
"return",
"False",
"#The following devices are explicitly ok.",
"#Note: 'High' BlackBerry devices ONLY",
"#Older Windows 'Mobile' isn't good enough for iPhone Tier.",
"return",
"self",
".",
"detectWebkit",
"(",
")",
"or",
"self",
".",
"detectS60OssBrowser",
"(",
")",
"or",
"self",
".",
"detectBlackBerryHigh",
"(",
")",
"or",
"self",
".",
"detectWindowsMobile",
"(",
")",
"or",
"UAgentInfo",
".",
"engineTelecaQ",
"in",
"self",
".",
"__userAgent"
]
| Return detection of any device in the 'Rich CSS' Tier
The quick way to detect for a tier of devices.
This method detects for devices which are likely to be capable
of viewing CSS content optimized for the iPhone,
but may not necessarily support JavaScript.
Excludes all iPhone Tier devices. | [
"Return",
"detection",
"of",
"any",
"device",
"in",
"the",
"Rich",
"CSS",
"Tier"
]
| python | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L1502-L1590 | def kernels_list(self,
page=1,
page_size=20,
dataset=None,
competition=None,
parent_kernel=None,
search=None,
mine=False,
user=None,
language=None,
kernel_type=None,
output_type=None,
sort_by=None):
""" list kernels based on a set of search criteria
Parameters
==========
page: the page of results to return (default is 1)
page_size: results per page (default is 20)
dataset: if defined, filter to this dataset (default None)
competition: if defined, filter to this competition (default None)
parent_kernel: if defined, filter to those with specified parent
search: a custom search string to pass to the list query
mine: if true, group is specified as "my" to return personal kernels
user: filter results to a specific user
language: the programming language of the kernel
kernel_type: the type of kernel, one of valid_kernel_types (str)
output_type: the output type, one of valid_output_types (str)
sort_by: if defined, sort results by this string (valid_sort_by)
"""
if int(page) <= 0:
raise ValueError('Page number must be >= 1')
page_size = int(page_size)
if page_size <= 0:
raise ValueError('Page size must be >= 1')
if page_size > 100:
page_size = 100
valid_languages = ['all', 'python', 'r', 'sqlite', 'julia']
if language and language not in valid_languages:
raise ValueError('Invalid language specified. Valid options are ' +
str(valid_languages))
valid_kernel_types = ['all', 'script', 'notebook']
if kernel_type and kernel_type not in valid_kernel_types:
raise ValueError(
'Invalid kernel type specified. Valid options are ' +
str(valid_kernel_types))
valid_output_types = ['all', 'visualization', 'data']
if output_type and output_type not in valid_output_types:
raise ValueError(
'Invalid output type specified. Valid options are ' +
str(valid_output_types))
valid_sort_by = [
'hotness', 'commentCount', 'dateCreated', 'dateRun', 'relevance',
'scoreAscending', 'scoreDescending', 'viewCount', 'voteCount'
]
if sort_by and sort_by not in valid_sort_by:
raise ValueError(
'Invalid sort by type specified. Valid options are ' +
str(valid_sort_by))
if sort_by == 'relevance' and search == '':
raise ValueError('Cannot sort by relevance without a search term.')
self.validate_dataset_string(dataset)
self.validate_kernel_string(parent_kernel)
group = 'everyone'
if mine:
group = 'profile'
kernels_list_result = self.process_response(
self.kernels_list_with_http_info(
page=page,
page_size=page_size,
group=group,
user=user or '',
language=language or 'all',
kernel_type=kernel_type or 'all',
output_type=output_type or 'all',
sort_by=sort_by or 'hotness',
dataset=dataset or '',
competition=competition or '',
parent_kernel=parent_kernel or '',
search=search or ''))
return [Kernel(k) for k in kernels_list_result] | [
"def",
"kernels_list",
"(",
"self",
",",
"page",
"=",
"1",
",",
"page_size",
"=",
"20",
",",
"dataset",
"=",
"None",
",",
"competition",
"=",
"None",
",",
"parent_kernel",
"=",
"None",
",",
"search",
"=",
"None",
",",
"mine",
"=",
"False",
",",
"user",
"=",
"None",
",",
"language",
"=",
"None",
",",
"kernel_type",
"=",
"None",
",",
"output_type",
"=",
"None",
",",
"sort_by",
"=",
"None",
")",
":",
"if",
"int",
"(",
"page",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'Page number must be >= 1'",
")",
"page_size",
"=",
"int",
"(",
"page_size",
")",
"if",
"page_size",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'Page size must be >= 1'",
")",
"if",
"page_size",
">",
"100",
":",
"page_size",
"=",
"100",
"valid_languages",
"=",
"[",
"'all'",
",",
"'python'",
",",
"'r'",
",",
"'sqlite'",
",",
"'julia'",
"]",
"if",
"language",
"and",
"language",
"not",
"in",
"valid_languages",
":",
"raise",
"ValueError",
"(",
"'Invalid language specified. Valid options are '",
"+",
"str",
"(",
"valid_languages",
")",
")",
"valid_kernel_types",
"=",
"[",
"'all'",
",",
"'script'",
",",
"'notebook'",
"]",
"if",
"kernel_type",
"and",
"kernel_type",
"not",
"in",
"valid_kernel_types",
":",
"raise",
"ValueError",
"(",
"'Invalid kernel type specified. Valid options are '",
"+",
"str",
"(",
"valid_kernel_types",
")",
")",
"valid_output_types",
"=",
"[",
"'all'",
",",
"'visualization'",
",",
"'data'",
"]",
"if",
"output_type",
"and",
"output_type",
"not",
"in",
"valid_output_types",
":",
"raise",
"ValueError",
"(",
"'Invalid output type specified. Valid options are '",
"+",
"str",
"(",
"valid_output_types",
")",
")",
"valid_sort_by",
"=",
"[",
"'hotness'",
",",
"'commentCount'",
",",
"'dateCreated'",
",",
"'dateRun'",
",",
"'relevance'",
",",
"'scoreAscending'",
",",
"'scoreDescending'",
",",
"'viewCount'",
",",
"'voteCount'",
"]",
"if",
"sort_by",
"and",
"sort_by",
"not",
"in",
"valid_sort_by",
":",
"raise",
"ValueError",
"(",
"'Invalid sort by type specified. Valid options are '",
"+",
"str",
"(",
"valid_sort_by",
")",
")",
"if",
"sort_by",
"==",
"'relevance'",
"and",
"search",
"==",
"''",
":",
"raise",
"ValueError",
"(",
"'Cannot sort by relevance without a search term.'",
")",
"self",
".",
"validate_dataset_string",
"(",
"dataset",
")",
"self",
".",
"validate_kernel_string",
"(",
"parent_kernel",
")",
"group",
"=",
"'everyone'",
"if",
"mine",
":",
"group",
"=",
"'profile'",
"kernels_list_result",
"=",
"self",
".",
"process_response",
"(",
"self",
".",
"kernels_list_with_http_info",
"(",
"page",
"=",
"page",
",",
"page_size",
"=",
"page_size",
",",
"group",
"=",
"group",
",",
"user",
"=",
"user",
"or",
"''",
",",
"language",
"=",
"language",
"or",
"'all'",
",",
"kernel_type",
"=",
"kernel_type",
"or",
"'all'",
",",
"output_type",
"=",
"output_type",
"or",
"'all'",
",",
"sort_by",
"=",
"sort_by",
"or",
"'hotness'",
",",
"dataset",
"=",
"dataset",
"or",
"''",
",",
"competition",
"=",
"competition",
"or",
"''",
",",
"parent_kernel",
"=",
"parent_kernel",
"or",
"''",
",",
"search",
"=",
"search",
"or",
"''",
")",
")",
"return",
"[",
"Kernel",
"(",
"k",
")",
"for",
"k",
"in",
"kernels_list_result",
"]"
]
| list kernels based on a set of search criteria
Parameters
==========
page: the page of results to return (default is 1)
page_size: results per page (default is 20)
dataset: if defined, filter to this dataset (default None)
competition: if defined, filter to this competition (default None)
parent_kernel: if defined, filter to those with specified parent
search: a custom search string to pass to the list query
mine: if true, group is specified as "my" to return personal kernels
user: filter results to a specific user
language: the programming language of the kernel
kernel_type: the type of kernel, one of valid_kernel_types (str)
output_type: the output type, one of valid_output_types (str)
sort_by: if defined, sort results by this string (valid_sort_by) | [
"list",
"kernels",
"based",
"on",
"a",
"set",
"of",
"search",
"criteria"
]
| python | train |
limodou/uliweb | uliweb/lib/werkzeug/contrib/cache.py | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/cache.py#L73-L88 | def _items(mappingorseq):
"""Wrapper for efficient iteration over mappings represented by dicts
or sequences::
>>> for k, v in _items((i, i*i) for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
... assert k*k == v
"""
if hasattr(mappingorseq, "iteritems"):
return mappingorseq.iteritems()
elif hasattr(mappingorseq, "items"):
return mappingorseq.items()
return mappingorseq | [
"def",
"_items",
"(",
"mappingorseq",
")",
":",
"if",
"hasattr",
"(",
"mappingorseq",
",",
"\"iteritems\"",
")",
":",
"return",
"mappingorseq",
".",
"iteritems",
"(",
")",
"elif",
"hasattr",
"(",
"mappingorseq",
",",
"\"items\"",
")",
":",
"return",
"mappingorseq",
".",
"items",
"(",
")",
"return",
"mappingorseq"
]
| Wrapper for efficient iteration over mappings represented by dicts
or sequences::
>>> for k, v in _items((i, i*i) for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
... assert k*k == v | [
"Wrapper",
"for",
"efficient",
"iteration",
"over",
"mappings",
"represented",
"by",
"dicts",
"or",
"sequences",
"::"
]
| python | train |
nerox8664/pytorch2keras | pytorch2keras/padding_layers.py | https://github.com/nerox8664/pytorch2keras/blob/750eaf747323580e6732d0c5ba9f2f39cb096764/pytorch2keras/padding_layers.py#L9-L52 | def convert_padding(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert padding layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting padding...')
if params['mode'] == 'constant':
# raise AssertionError('Cannot convert non-constant padding')
if params['value'] != 0.0:
raise AssertionError('Cannot convert non-zero padding')
if names:
tf_name = 'PADD' + random_string(4)
else:
tf_name = w_name + str(random.random())
# Magic ordering
padding_name = tf_name
padding_layer = keras.layers.ZeroPadding2D(
padding=((params['pads'][2], params['pads'][6]), (params['pads'][3], params['pads'][7])),
name=padding_name
)
layers[scope_name] = padding_layer(layers[inputs[0]])
elif params['mode'] == 'reflect':
def target_layer(x, pads=params['pads']):
# x = tf.transpose(x, [0, 2, 3, 1])
layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'REFLECT')
# layer = tf.transpose(layer, [0, 3, 1, 2])
return layer
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]]) | [
"def",
"convert_padding",
"(",
"params",
",",
"w_name",
",",
"scope_name",
",",
"inputs",
",",
"layers",
",",
"weights",
",",
"names",
")",
":",
"print",
"(",
"'Converting padding...'",
")",
"if",
"params",
"[",
"'mode'",
"]",
"==",
"'constant'",
":",
"# raise AssertionError('Cannot convert non-constant padding')",
"if",
"params",
"[",
"'value'",
"]",
"!=",
"0.0",
":",
"raise",
"AssertionError",
"(",
"'Cannot convert non-zero padding'",
")",
"if",
"names",
":",
"tf_name",
"=",
"'PADD'",
"+",
"random_string",
"(",
"4",
")",
"else",
":",
"tf_name",
"=",
"w_name",
"+",
"str",
"(",
"random",
".",
"random",
"(",
")",
")",
"# Magic ordering",
"padding_name",
"=",
"tf_name",
"padding_layer",
"=",
"keras",
".",
"layers",
".",
"ZeroPadding2D",
"(",
"padding",
"=",
"(",
"(",
"params",
"[",
"'pads'",
"]",
"[",
"2",
"]",
",",
"params",
"[",
"'pads'",
"]",
"[",
"6",
"]",
")",
",",
"(",
"params",
"[",
"'pads'",
"]",
"[",
"3",
"]",
",",
"params",
"[",
"'pads'",
"]",
"[",
"7",
"]",
")",
")",
",",
"name",
"=",
"padding_name",
")",
"layers",
"[",
"scope_name",
"]",
"=",
"padding_layer",
"(",
"layers",
"[",
"inputs",
"[",
"0",
"]",
"]",
")",
"elif",
"params",
"[",
"'mode'",
"]",
"==",
"'reflect'",
":",
"def",
"target_layer",
"(",
"x",
",",
"pads",
"=",
"params",
"[",
"'pads'",
"]",
")",
":",
"# x = tf.transpose(x, [0, 2, 3, 1])",
"layer",
"=",
"tf",
".",
"pad",
"(",
"x",
",",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
"]",
",",
"[",
"pads",
"[",
"2",
"]",
",",
"pads",
"[",
"6",
"]",
"]",
",",
"[",
"pads",
"[",
"3",
"]",
",",
"pads",
"[",
"7",
"]",
"]",
"]",
",",
"'REFLECT'",
")",
"# layer = tf.transpose(layer, [0, 3, 1, 2])",
"return",
"layer",
"lambda_layer",
"=",
"keras",
".",
"layers",
".",
"Lambda",
"(",
"target_layer",
")",
"layers",
"[",
"scope_name",
"]",
"=",
"lambda_layer",
"(",
"layers",
"[",
"inputs",
"[",
"0",
"]",
"]",
")"
]
| Convert padding layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers | [
"Convert",
"padding",
"layer",
"."
]
| python | valid |
astropy/photutils | photutils/segmentation/properties.py | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L1024-L1036 | def covariance_eigvals(self):
"""
The two eigenvalues of the `covariance` matrix in decreasing
order.
"""
if not np.isnan(np.sum(self.covariance)):
eigvals = np.linalg.eigvals(self.covariance)
if np.any(eigvals < 0): # negative variance
return (np.nan, np.nan) * u.pix**2 # pragma: no cover
return (np.max(eigvals), np.min(eigvals)) * u.pix**2
else:
return (np.nan, np.nan) * u.pix**2 | [
"def",
"covariance_eigvals",
"(",
"self",
")",
":",
"if",
"not",
"np",
".",
"isnan",
"(",
"np",
".",
"sum",
"(",
"self",
".",
"covariance",
")",
")",
":",
"eigvals",
"=",
"np",
".",
"linalg",
".",
"eigvals",
"(",
"self",
".",
"covariance",
")",
"if",
"np",
".",
"any",
"(",
"eigvals",
"<",
"0",
")",
":",
"# negative variance",
"return",
"(",
"np",
".",
"nan",
",",
"np",
".",
"nan",
")",
"*",
"u",
".",
"pix",
"**",
"2",
"# pragma: no cover",
"return",
"(",
"np",
".",
"max",
"(",
"eigvals",
")",
",",
"np",
".",
"min",
"(",
"eigvals",
")",
")",
"*",
"u",
".",
"pix",
"**",
"2",
"else",
":",
"return",
"(",
"np",
".",
"nan",
",",
"np",
".",
"nan",
")",
"*",
"u",
".",
"pix",
"**",
"2"
]
| The two eigenvalues of the `covariance` matrix in decreasing
order. | [
"The",
"two",
"eigenvalues",
"of",
"the",
"covariance",
"matrix",
"in",
"decreasing",
"order",
"."
]
| python | train |
spyder-ide/spyder | spyder/utils/workers.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/workers.py#L243-L247 | def _clean_workers(self):
"""Delete periodically workers in workers bag."""
while self._bag_collector:
self._bag_collector.popleft()
self._timer_worker_delete.stop() | [
"def",
"_clean_workers",
"(",
"self",
")",
":",
"while",
"self",
".",
"_bag_collector",
":",
"self",
".",
"_bag_collector",
".",
"popleft",
"(",
")",
"self",
".",
"_timer_worker_delete",
".",
"stop",
"(",
")"
]
| Delete periodically workers in workers bag. | [
"Delete",
"periodically",
"workers",
"in",
"workers",
"bag",
"."
]
| python | train |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L12409-L12454 | def spkw02(handle, body, center, inframe, first, last, segid, intlen, n, polydg,
cdata, btime):
"""
Write a type 2 segment to an SPK file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkw02_c.html
:param handle: Handle of an SPK file open for writing.
:type handle: int
:param body: Body code for ephemeris object.
:type body: int
:param center: Body code for the center of motion of the body.
:type center: int
:param inframe: The reference frame of the states.
:type inframe: str
:param first: First valid time for which states can be computed.
:type first: float
:param last: Last valid time for which states can be computed.
:type last: float
:param segid: Segment identifier.
:type segid: str
:param intlen: Length of time covered by logical record.
:type intlen: float
:param n: Number of coefficient sets.
:type n: int
:param polydg: Chebyshev polynomial degree.
:type polydg: int
:param cdata: Array of Chebyshev coefficients.
:type cdata: Array of floats
:param btime: Begin time of first logical record.
:type btime: float
"""
handle = ctypes.c_int(handle)
body = ctypes.c_int(body)
center = ctypes.c_int(center)
inframe = stypes.stringToCharP(inframe)
first = ctypes.c_double(first)
last = ctypes.c_double(last)
segid = stypes.stringToCharP(segid)
intlen = ctypes.c_double(intlen)
n = ctypes.c_int(n)
polydg = ctypes.c_int(polydg)
cdata = stypes.toDoubleVector(cdata)
btime = ctypes.c_double(btime)
libspice.spkw02_c(handle, body, center, inframe, first, last, segid, intlen,
n, polydg, cdata, btime) | [
"def",
"spkw02",
"(",
"handle",
",",
"body",
",",
"center",
",",
"inframe",
",",
"first",
",",
"last",
",",
"segid",
",",
"intlen",
",",
"n",
",",
"polydg",
",",
"cdata",
",",
"btime",
")",
":",
"handle",
"=",
"ctypes",
".",
"c_int",
"(",
"handle",
")",
"body",
"=",
"ctypes",
".",
"c_int",
"(",
"body",
")",
"center",
"=",
"ctypes",
".",
"c_int",
"(",
"center",
")",
"inframe",
"=",
"stypes",
".",
"stringToCharP",
"(",
"inframe",
")",
"first",
"=",
"ctypes",
".",
"c_double",
"(",
"first",
")",
"last",
"=",
"ctypes",
".",
"c_double",
"(",
"last",
")",
"segid",
"=",
"stypes",
".",
"stringToCharP",
"(",
"segid",
")",
"intlen",
"=",
"ctypes",
".",
"c_double",
"(",
"intlen",
")",
"n",
"=",
"ctypes",
".",
"c_int",
"(",
"n",
")",
"polydg",
"=",
"ctypes",
".",
"c_int",
"(",
"polydg",
")",
"cdata",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"cdata",
")",
"btime",
"=",
"ctypes",
".",
"c_double",
"(",
"btime",
")",
"libspice",
".",
"spkw02_c",
"(",
"handle",
",",
"body",
",",
"center",
",",
"inframe",
",",
"first",
",",
"last",
",",
"segid",
",",
"intlen",
",",
"n",
",",
"polydg",
",",
"cdata",
",",
"btime",
")"
]
| Write a type 2 segment to an SPK file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkw02_c.html
:param handle: Handle of an SPK file open for writing.
:type handle: int
:param body: Body code for ephemeris object.
:type body: int
:param center: Body code for the center of motion of the body.
:type center: int
:param inframe: The reference frame of the states.
:type inframe: str
:param first: First valid time for which states can be computed.
:type first: float
:param last: Last valid time for which states can be computed.
:type last: float
:param segid: Segment identifier.
:type segid: str
:param intlen: Length of time covered by logical record.
:type intlen: float
:param n: Number of coefficient sets.
:type n: int
:param polydg: Chebyshev polynomial degree.
:type polydg: int
:param cdata: Array of Chebyshev coefficients.
:type cdata: Array of floats
:param btime: Begin time of first logical record.
:type btime: float | [
"Write",
"a",
"type",
"2",
"segment",
"to",
"an",
"SPK",
"file",
"."
]
| python | train |
decentfox/aioh2 | aioh2/protocol.py | https://github.com/decentfox/aioh2/blob/2f9b76161e99e32317083cd2ebd17ce2ed3e41ab/aioh2/protocol.py#L514-L526 | def end_stream(self, stream_id):
"""
Close the given stream locally.
This may block until the underlying transport becomes writable, or
other coroutines release the wlock on this stream.
:param stream_id: Which stream to close.
"""
with (yield from self._get_stream(stream_id).wlock):
yield from self._resumed.wait()
self._conn.end_stream(stream_id)
self._flush() | [
"def",
"end_stream",
"(",
"self",
",",
"stream_id",
")",
":",
"with",
"(",
"yield",
"from",
"self",
".",
"_get_stream",
"(",
"stream_id",
")",
".",
"wlock",
")",
":",
"yield",
"from",
"self",
".",
"_resumed",
".",
"wait",
"(",
")",
"self",
".",
"_conn",
".",
"end_stream",
"(",
"stream_id",
")",
"self",
".",
"_flush",
"(",
")"
]
| Close the given stream locally.
This may block until the underlying transport becomes writable, or
other coroutines release the wlock on this stream.
:param stream_id: Which stream to close. | [
"Close",
"the",
"given",
"stream",
"locally",
"."
]
| python | train |
ratt-ru/PyMORESANE | pymoresane/iuwt.py | https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/iuwt.py#L113-L149 | def ser_iuwt_recomposition(in1, scale_adjust, smoothed_array):
"""
This function calls the a trous algorithm code to recompose the input into a single array. This is the
implementation of the isotropic undecimated wavelet transform recomposition for a single CPU core.
INPUTS:
in1 (no default): Array containing wavelet coefficients.
scale_adjust (no default): Indicates the number of truncated array pages.
smoothed_array (default=None): For a complete inverse transform, this must be the smoothest approximation.
OUTPUTS:
recomposition Array containing the reconstructed image.
"""
wavelet_filter = (1./16)*np.array([1,4,6,4,1]) # Filter-bank for use in the a trous algorithm.
# Determines scale with adjustment and creates a zero array to store the output, unless smoothed_array is given.
max_scale = in1.shape[0] + scale_adjust
if smoothed_array is None:
recomposition = np.zeros([in1.shape[1], in1.shape[2]])
else:
recomposition = smoothed_array
# The following loops call the a trous algorithm code to recompose the input. The first loop assumes that there are
# non-zero wavelet coefficients at scales above scale_adjust, while the second loop completes the recomposition
# on the scales less than scale_adjust.
for i in range(max_scale-1, scale_adjust-1, -1):
recomposition = ser_a_trous(recomposition, wavelet_filter, i) + in1[i-scale_adjust,:,:]
if scale_adjust>0:
for i in range(scale_adjust-1, -1, -1):
recomposition = ser_a_trous(recomposition, wavelet_filter, i)
return recomposition | [
"def",
"ser_iuwt_recomposition",
"(",
"in1",
",",
"scale_adjust",
",",
"smoothed_array",
")",
":",
"wavelet_filter",
"=",
"(",
"1.",
"/",
"16",
")",
"*",
"np",
".",
"array",
"(",
"[",
"1",
",",
"4",
",",
"6",
",",
"4",
",",
"1",
"]",
")",
"# Filter-bank for use in the a trous algorithm.",
"# Determines scale with adjustment and creates a zero array to store the output, unless smoothed_array is given.",
"max_scale",
"=",
"in1",
".",
"shape",
"[",
"0",
"]",
"+",
"scale_adjust",
"if",
"smoothed_array",
"is",
"None",
":",
"recomposition",
"=",
"np",
".",
"zeros",
"(",
"[",
"in1",
".",
"shape",
"[",
"1",
"]",
",",
"in1",
".",
"shape",
"[",
"2",
"]",
"]",
")",
"else",
":",
"recomposition",
"=",
"smoothed_array",
"# The following loops call the a trous algorithm code to recompose the input. The first loop assumes that there are",
"# non-zero wavelet coefficients at scales above scale_adjust, while the second loop completes the recomposition",
"# on the scales less than scale_adjust.",
"for",
"i",
"in",
"range",
"(",
"max_scale",
"-",
"1",
",",
"scale_adjust",
"-",
"1",
",",
"-",
"1",
")",
":",
"recomposition",
"=",
"ser_a_trous",
"(",
"recomposition",
",",
"wavelet_filter",
",",
"i",
")",
"+",
"in1",
"[",
"i",
"-",
"scale_adjust",
",",
":",
",",
":",
"]",
"if",
"scale_adjust",
">",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"scale_adjust",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"recomposition",
"=",
"ser_a_trous",
"(",
"recomposition",
",",
"wavelet_filter",
",",
"i",
")",
"return",
"recomposition"
]
| This function calls the a trous algorithm code to recompose the input into a single array. This is the
implementation of the isotropic undecimated wavelet transform recomposition for a single CPU core.
INPUTS:
in1 (no default): Array containing wavelet coefficients.
scale_adjust (no default): Indicates the number of truncated array pages.
smoothed_array (default=None): For a complete inverse transform, this must be the smoothest approximation.
OUTPUTS:
recomposition Array containing the reconstructed image. | [
"This",
"function",
"calls",
"the",
"a",
"trous",
"algorithm",
"code",
"to",
"recompose",
"the",
"input",
"into",
"a",
"single",
"array",
".",
"This",
"is",
"the",
"implementation",
"of",
"the",
"isotropic",
"undecimated",
"wavelet",
"transform",
"recomposition",
"for",
"a",
"single",
"CPU",
"core",
"."
]
| python | train |
monarch-initiative/dipper | dipper/models/Genotype.py | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Genotype.py#L144-L161 | def addAffectedLocus(
self, allele_id, gene_id, rel_id=None):
"""
We make the assumption here that if the relationship is not provided,
it is a
GENO:is_allele_of.
Here, the allele should be a variant_locus, not a sequence alteration.
:param allele_id:
:param gene_id:
:param rel_id:
:return:
"""
if rel_id is None:
rel_id = self.globaltt['has_affected_feature']
self.graph.addTriple(allele_id, rel_id, gene_id)
return | [
"def",
"addAffectedLocus",
"(",
"self",
",",
"allele_id",
",",
"gene_id",
",",
"rel_id",
"=",
"None",
")",
":",
"if",
"rel_id",
"is",
"None",
":",
"rel_id",
"=",
"self",
".",
"globaltt",
"[",
"'has_affected_feature'",
"]",
"self",
".",
"graph",
".",
"addTriple",
"(",
"allele_id",
",",
"rel_id",
",",
"gene_id",
")",
"return"
]
| We make the assumption here that if the relationship is not provided,
it is a
GENO:is_allele_of.
Here, the allele should be a variant_locus, not a sequence alteration.
:param allele_id:
:param gene_id:
:param rel_id:
:return: | [
"We",
"make",
"the",
"assumption",
"here",
"that",
"if",
"the",
"relationship",
"is",
"not",
"provided",
"it",
"is",
"a",
"GENO",
":",
"is_allele_of",
"."
]
| python | train |
guykisel/inline-plz | inlineplz/linter_runner.py | https://github.com/guykisel/inline-plz/blob/b5b1744e9156e31f68b519c0d8022feff79888ae/inlineplz/linter_runner.py#L167-L178 | def cleanup():
"""Delete standard installation directories."""
for install_dir in linters.INSTALL_DIRS:
try:
shutil.rmtree(install_dir, ignore_errors=True)
except Exception:
print(
"{0}\nFailed to delete {1}".format(
traceback.format_exc(), install_dir
)
)
sys.stdout.flush() | [
"def",
"cleanup",
"(",
")",
":",
"for",
"install_dir",
"in",
"linters",
".",
"INSTALL_DIRS",
":",
"try",
":",
"shutil",
".",
"rmtree",
"(",
"install_dir",
",",
"ignore_errors",
"=",
"True",
")",
"except",
"Exception",
":",
"print",
"(",
"\"{0}\\nFailed to delete {1}\"",
".",
"format",
"(",
"traceback",
".",
"format_exc",
"(",
")",
",",
"install_dir",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
]
| Delete standard installation directories. | [
"Delete",
"standard",
"installation",
"directories",
"."
]
| python | train |
Becksteinlab/GromacsWrapper | gromacs/fileformats/top.py | https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/fileformats/top.py#L949-L1005 | def assemble_topology(self):
"""Call the various member self._make_* functions to convert the topology object into a string"""
self.logger.debug("starting to assemble topology...")
top = ''
self.logger.debug("making atom/pair/bond/angle/dihedral/improper types")
top += self.toptemplate
top = top.replace('*DEFAULTS*', ''.join( self._make_defaults(self.system)) )
top = top.replace('*ATOMTYPES*', ''.join( self._make_atomtypes(self.system)) )
top = top.replace('*NONBOND_PARAM*', ''.join( self._make_nonbond_param(self.system)) )
top = top.replace('*PAIRTYPES*', ''.join( self._make_pairtypes(self.system)) )
top = top.replace('*BONDTYPES*', ''.join( self._make_bondtypes(self.system)) )
top = top.replace('*CONSTRAINTTYPES*',''.join( self._make_constrainttypes(self.system)))
top = top.replace('*ANGLETYPES*', ''.join( self._make_angletypes(self.system)))
top = top.replace('*DIHEDRALTYPES*', ''.join( self._make_dihedraltypes(self.system)) )
top = top.replace('*IMPROPERTYPES*', ''.join( self._make_impropertypes(self.system)) )
top = top.replace('*CMAPTYPES*', ''.join( self._make_cmaptypes(self.system)) )
for i,(molname,m) in enumerate(self.system.dict_molname_mol.items()):
itp = self.itptemplate
itp = itp.replace('*MOLECULETYPE*', ''.join( self._make_moleculetype(m, molname, m.exclusion_numb)) )
itp = itp.replace('*ATOMS*', ''.join( self._make_atoms(m)) )
itp = itp.replace('*BONDS*', ''.join( self._make_bonds(m)) )
itp = itp.replace('*PAIRS*', ''.join( self._make_pairs(m)) )
itp = itp.replace('*SETTLES*', ''.join( self._make_settles(m)) )
itp = itp.replace('*VIRTUAL_SITES3*',''.join( self._make_virtual_sites3(m)) )
itp = itp.replace('*EXCLUSIONS*', ''.join( self._make_exclusions(m)) )
itp = itp.replace('*ANGLES*', ''.join( self._make_angles(m)) )
itp = itp.replace('*DIHEDRALS*', ''.join( self._make_dihedrals(m)) )
itp = itp.replace('*IMPROPERS*', ''.join( self._make_impropers(m)) )
itp = itp.replace('*CMAPS*', ''.join( self._make_cmaps(m)) )
if not self.multiple_output:
top += itp
else:
outfile = "mol_{0}.itp".format(molname)
top += '#include "mol_{0}.itp" \n'.format( molname )
with open(outfile, "w") as f:
f.writelines([itp])
top += '\n[system] \nConvertedSystem\n\n'
top += '[molecules] \n'
molecules = [("", 0)]
for m in self.system.molecules:
if (molecules[-1][0] != m.name):
molecules.append([m.name, 0])
if molecules[-1][0] == m.name:
molecules[-1][1] += 1
for molname, n in molecules[1:]:
top += '{0:s} {1:d}\n'.format(molname, n)
top += '\n'
with open(self.outfile, 'w') as f:
f.writelines([top]) | [
"def",
"assemble_topology",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"starting to assemble topology...\"",
")",
"top",
"=",
"''",
"self",
".",
"logger",
".",
"debug",
"(",
"\"making atom/pair/bond/angle/dihedral/improper types\"",
")",
"top",
"+=",
"self",
".",
"toptemplate",
"top",
"=",
"top",
".",
"replace",
"(",
"'*DEFAULTS*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_defaults",
"(",
"self",
".",
"system",
")",
")",
")",
"top",
"=",
"top",
".",
"replace",
"(",
"'*ATOMTYPES*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_atomtypes",
"(",
"self",
".",
"system",
")",
")",
")",
"top",
"=",
"top",
".",
"replace",
"(",
"'*NONBOND_PARAM*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_nonbond_param",
"(",
"self",
".",
"system",
")",
")",
")",
"top",
"=",
"top",
".",
"replace",
"(",
"'*PAIRTYPES*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_pairtypes",
"(",
"self",
".",
"system",
")",
")",
")",
"top",
"=",
"top",
".",
"replace",
"(",
"'*BONDTYPES*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_bondtypes",
"(",
"self",
".",
"system",
")",
")",
")",
"top",
"=",
"top",
".",
"replace",
"(",
"'*CONSTRAINTTYPES*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_constrainttypes",
"(",
"self",
".",
"system",
")",
")",
")",
"top",
"=",
"top",
".",
"replace",
"(",
"'*ANGLETYPES*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_angletypes",
"(",
"self",
".",
"system",
")",
")",
")",
"top",
"=",
"top",
".",
"replace",
"(",
"'*DIHEDRALTYPES*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_dihedraltypes",
"(",
"self",
".",
"system",
")",
")",
")",
"top",
"=",
"top",
".",
"replace",
"(",
"'*IMPROPERTYPES*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_impropertypes",
"(",
"self",
".",
"system",
")",
")",
")",
"top",
"=",
"top",
".",
"replace",
"(",
"'*CMAPTYPES*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_cmaptypes",
"(",
"self",
".",
"system",
")",
")",
")",
"for",
"i",
",",
"(",
"molname",
",",
"m",
")",
"in",
"enumerate",
"(",
"self",
".",
"system",
".",
"dict_molname_mol",
".",
"items",
"(",
")",
")",
":",
"itp",
"=",
"self",
".",
"itptemplate",
"itp",
"=",
"itp",
".",
"replace",
"(",
"'*MOLECULETYPE*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_moleculetype",
"(",
"m",
",",
"molname",
",",
"m",
".",
"exclusion_numb",
")",
")",
")",
"itp",
"=",
"itp",
".",
"replace",
"(",
"'*ATOMS*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_atoms",
"(",
"m",
")",
")",
")",
"itp",
"=",
"itp",
".",
"replace",
"(",
"'*BONDS*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_bonds",
"(",
"m",
")",
")",
")",
"itp",
"=",
"itp",
".",
"replace",
"(",
"'*PAIRS*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_pairs",
"(",
"m",
")",
")",
")",
"itp",
"=",
"itp",
".",
"replace",
"(",
"'*SETTLES*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_settles",
"(",
"m",
")",
")",
")",
"itp",
"=",
"itp",
".",
"replace",
"(",
"'*VIRTUAL_SITES3*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_virtual_sites3",
"(",
"m",
")",
")",
")",
"itp",
"=",
"itp",
".",
"replace",
"(",
"'*EXCLUSIONS*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_exclusions",
"(",
"m",
")",
")",
")",
"itp",
"=",
"itp",
".",
"replace",
"(",
"'*ANGLES*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_angles",
"(",
"m",
")",
")",
")",
"itp",
"=",
"itp",
".",
"replace",
"(",
"'*DIHEDRALS*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_dihedrals",
"(",
"m",
")",
")",
")",
"itp",
"=",
"itp",
".",
"replace",
"(",
"'*IMPROPERS*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_impropers",
"(",
"m",
")",
")",
")",
"itp",
"=",
"itp",
".",
"replace",
"(",
"'*CMAPS*'",
",",
"''",
".",
"join",
"(",
"self",
".",
"_make_cmaps",
"(",
"m",
")",
")",
")",
"if",
"not",
"self",
".",
"multiple_output",
":",
"top",
"+=",
"itp",
"else",
":",
"outfile",
"=",
"\"mol_{0}.itp\"",
".",
"format",
"(",
"molname",
")",
"top",
"+=",
"'#include \"mol_{0}.itp\" \\n'",
".",
"format",
"(",
"molname",
")",
"with",
"open",
"(",
"outfile",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"writelines",
"(",
"[",
"itp",
"]",
")",
"top",
"+=",
"'\\n[system] \\nConvertedSystem\\n\\n'",
"top",
"+=",
"'[molecules] \\n'",
"molecules",
"=",
"[",
"(",
"\"\"",
",",
"0",
")",
"]",
"for",
"m",
"in",
"self",
".",
"system",
".",
"molecules",
":",
"if",
"(",
"molecules",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"!=",
"m",
".",
"name",
")",
":",
"molecules",
".",
"append",
"(",
"[",
"m",
".",
"name",
",",
"0",
"]",
")",
"if",
"molecules",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"m",
".",
"name",
":",
"molecules",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"+=",
"1",
"for",
"molname",
",",
"n",
"in",
"molecules",
"[",
"1",
":",
"]",
":",
"top",
"+=",
"'{0:s} {1:d}\\n'",
".",
"format",
"(",
"molname",
",",
"n",
")",
"top",
"+=",
"'\\n'",
"with",
"open",
"(",
"self",
".",
"outfile",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"writelines",
"(",
"[",
"top",
"]",
")"
]
| Call the various member self._make_* functions to convert the topology object into a string | [
"Call",
"the",
"various",
"member",
"self",
".",
"_make_",
"*",
"functions",
"to",
"convert",
"the",
"topology",
"object",
"into",
"a",
"string"
]
| python | valid |
materialsproject/pymatgen | pymatgen/core/sites.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/sites.py#L477-L486 | def to_unit_cell(self, in_place=False):
"""
Move frac coords to within the unit cell cell.
"""
frac_coords = np.mod(self.frac_coords, 1)
if in_place:
self.frac_coords = frac_coords
else:
return PeriodicSite(self.species, frac_coords, self.lattice,
properties=self.properties) | [
"def",
"to_unit_cell",
"(",
"self",
",",
"in_place",
"=",
"False",
")",
":",
"frac_coords",
"=",
"np",
".",
"mod",
"(",
"self",
".",
"frac_coords",
",",
"1",
")",
"if",
"in_place",
":",
"self",
".",
"frac_coords",
"=",
"frac_coords",
"else",
":",
"return",
"PeriodicSite",
"(",
"self",
".",
"species",
",",
"frac_coords",
",",
"self",
".",
"lattice",
",",
"properties",
"=",
"self",
".",
"properties",
")"
]
| Move frac coords to within the unit cell cell. | [
"Move",
"frac",
"coords",
"to",
"within",
"the",
"unit",
"cell",
"cell",
"."
]
| python | train |
funilrys/PyFunceble | PyFunceble/adblock.py | https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/adblock.py#L309-L402 | def _format_decoded(self, to_format, result=None): # pragma: no cover
"""
Format the exctracted adblock line before passing it to the system.
:param to_format: The extracted line from the file.
:type to_format: str
:param result: A list of the result of this method.
:type result: list
:return: The list of domains or IP to test.
:rtype: list
"""
if not result:
# The result is not given.
# We set the result as an empty list.
result = []
for data in List(to_format).format():
# We loop through the different lines to format.
if data:
# The currently read line is not empty.
if "^" in data:
# There is an accent in the currently read line.
# We recall this method but with the current result state
# and splited data.
return self._format_decoded(data.split("^"), result)
if "#" in data:
# There is a dash in the currently read line.
# We recall this method but with the current result state
# and splited data.
return self._format_decoded(data.split("#"), result)
if "," in data:
# There is a comma in the currently read line.
# We recall this method but with the current result state
# and splited data.
return self._format_decoded(data.split(","), result)
if "!" in data:
# There is an exclamation mark in the currently read line.
# We recall this method but with the current result state
# and splited data.
return self._format_decoded(data.split("!"), result)
if "|" in data:
# There is a vertival bar in the currently read line.
# We recall this method but with the current result state
# and splited data.
return self._format_decoded(data.split("|"), result)
if data:
# The currently read line is not empty.
data = self._extract_base(data)
if data and (
self.checker.is_domain_valid(data)
or self.checker.is_ip_valid(data)
):
# The extraced base is not empty.
# and
# * The currently read line is a valid domain.
# or
# * The currently read line is a valid IP.
# We append the currently read line to the result.
result.append(data)
elif data:
# * The currently read line is not a valid domain.
# or
# * The currently read line is not a valid IP.
# We try to get the url base.
url_base = self.checker.is_url_valid(data, return_base=True)
if url_base:
# The url_base is not empty or equal to False or None.
# We append the url base to the result.
result.append(url_base)
# We return the result element.
return result | [
"def",
"_format_decoded",
"(",
"self",
",",
"to_format",
",",
"result",
"=",
"None",
")",
":",
"# pragma: no cover",
"if",
"not",
"result",
":",
"# The result is not given.",
"# We set the result as an empty list.",
"result",
"=",
"[",
"]",
"for",
"data",
"in",
"List",
"(",
"to_format",
")",
".",
"format",
"(",
")",
":",
"# We loop through the different lines to format.",
"if",
"data",
":",
"# The currently read line is not empty.",
"if",
"\"^\"",
"in",
"data",
":",
"# There is an accent in the currently read line.",
"# We recall this method but with the current result state",
"# and splited data.",
"return",
"self",
".",
"_format_decoded",
"(",
"data",
".",
"split",
"(",
"\"^\"",
")",
",",
"result",
")",
"if",
"\"#\"",
"in",
"data",
":",
"# There is a dash in the currently read line.",
"# We recall this method but with the current result state",
"# and splited data.",
"return",
"self",
".",
"_format_decoded",
"(",
"data",
".",
"split",
"(",
"\"#\"",
")",
",",
"result",
")",
"if",
"\",\"",
"in",
"data",
":",
"# There is a comma in the currently read line.",
"# We recall this method but with the current result state",
"# and splited data.",
"return",
"self",
".",
"_format_decoded",
"(",
"data",
".",
"split",
"(",
"\",\"",
")",
",",
"result",
")",
"if",
"\"!\"",
"in",
"data",
":",
"# There is an exclamation mark in the currently read line.",
"# We recall this method but with the current result state",
"# and splited data.",
"return",
"self",
".",
"_format_decoded",
"(",
"data",
".",
"split",
"(",
"\"!\"",
")",
",",
"result",
")",
"if",
"\"|\"",
"in",
"data",
":",
"# There is a vertival bar in the currently read line.",
"# We recall this method but with the current result state",
"# and splited data.",
"return",
"self",
".",
"_format_decoded",
"(",
"data",
".",
"split",
"(",
"\"|\"",
")",
",",
"result",
")",
"if",
"data",
":",
"# The currently read line is not empty.",
"data",
"=",
"self",
".",
"_extract_base",
"(",
"data",
")",
"if",
"data",
"and",
"(",
"self",
".",
"checker",
".",
"is_domain_valid",
"(",
"data",
")",
"or",
"self",
".",
"checker",
".",
"is_ip_valid",
"(",
"data",
")",
")",
":",
"# The extraced base is not empty.",
"# and",
"# * The currently read line is a valid domain.",
"# or",
"# * The currently read line is a valid IP.",
"# We append the currently read line to the result.",
"result",
".",
"append",
"(",
"data",
")",
"elif",
"data",
":",
"# * The currently read line is not a valid domain.",
"# or",
"# * The currently read line is not a valid IP.",
"# We try to get the url base.",
"url_base",
"=",
"self",
".",
"checker",
".",
"is_url_valid",
"(",
"data",
",",
"return_base",
"=",
"True",
")",
"if",
"url_base",
":",
"# The url_base is not empty or equal to False or None.",
"# We append the url base to the result.",
"result",
".",
"append",
"(",
"url_base",
")",
"# We return the result element.",
"return",
"result"
]
| Format the exctracted adblock line before passing it to the system.
:param to_format: The extracted line from the file.
:type to_format: str
:param result: A list of the result of this method.
:type result: list
:return: The list of domains or IP to test.
:rtype: list | [
"Format",
"the",
"exctracted",
"adblock",
"line",
"before",
"passing",
"it",
"to",
"the",
"system",
"."
]
| python | test |
dlecocq/nsq-py | nsq/http/__init__.py | https://github.com/dlecocq/nsq-py/blob/3ecacf6ab7719d38031179277113d875554a0c16/nsq/http/__init__.py#L43-L48 | def ok_check(function, *args, **kwargs):
'''Ensure that the response body is OK'''
req = function(*args, **kwargs)
if req.content.lower() != 'ok':
raise ClientException(req.content)
return req.content | [
"def",
"ok_check",
"(",
"function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"req",
"=",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"req",
".",
"content",
".",
"lower",
"(",
")",
"!=",
"'ok'",
":",
"raise",
"ClientException",
"(",
"req",
".",
"content",
")",
"return",
"req",
".",
"content"
]
| Ensure that the response body is OK | [
"Ensure",
"that",
"the",
"response",
"body",
"is",
"OK"
]
| python | train |
UCL-INGI/INGInious | inginious/common/tags.py | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/common/tags.py#L58-L67 | def get_type_as_str(self):
""" Return a textual description of the type """
if self.get_type() == 0:
return _("Skill")
elif self.get_type() == 1:
return _("Misconception")
elif self.get_type() == 2:
return _("Category")
else:
return _("Unknown type") | [
"def",
"get_type_as_str",
"(",
"self",
")",
":",
"if",
"self",
".",
"get_type",
"(",
")",
"==",
"0",
":",
"return",
"_",
"(",
"\"Skill\"",
")",
"elif",
"self",
".",
"get_type",
"(",
")",
"==",
"1",
":",
"return",
"_",
"(",
"\"Misconception\"",
")",
"elif",
"self",
".",
"get_type",
"(",
")",
"==",
"2",
":",
"return",
"_",
"(",
"\"Category\"",
")",
"else",
":",
"return",
"_",
"(",
"\"Unknown type\"",
")"
]
| Return a textual description of the type | [
"Return",
"a",
"textual",
"description",
"of",
"the",
"type"
]
| python | train |
dhylands/rshell | rshell/main.py | https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1808-L1815 | def print(self, *args, end='\n', file=None):
"""Convenience function so you don't need to remember to put the \n
at the end of the line.
"""
if file is None:
file = self.stdout
s = ' '.join(str(arg) for arg in args) + end
file.write(s) | [
"def",
"print",
"(",
"self",
",",
"*",
"args",
",",
"end",
"=",
"'\\n'",
",",
"file",
"=",
"None",
")",
":",
"if",
"file",
"is",
"None",
":",
"file",
"=",
"self",
".",
"stdout",
"s",
"=",
"' '",
".",
"join",
"(",
"str",
"(",
"arg",
")",
"for",
"arg",
"in",
"args",
")",
"+",
"end",
"file",
".",
"write",
"(",
"s",
")"
]
| Convenience function so you don't need to remember to put the \n
at the end of the line. | [
"Convenience",
"function",
"so",
"you",
"don",
"t",
"need",
"to",
"remember",
"to",
"put",
"the",
"\\",
"n",
"at",
"the",
"end",
"of",
"the",
"line",
"."
]
| python | train |
wiheto/teneto | teneto/temporalcommunity/integration.py | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/temporalcommunity/integration.py#L5-L45 | def integration(temporalcommunities, staticcommunities):
"""
Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533.
"""
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Icoeff = np.zeros(len(staticcommunities))
# calc integration for each node
for i, statcom in enumerate(len(staticcommunities)):
Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom])
return Icoeff | [
"def",
"integration",
"(",
"temporalcommunities",
",",
"staticcommunities",
")",
":",
"# make sure the static and temporal communities have the same number of nodes",
"if",
"staticcommunities",
".",
"shape",
"[",
"0",
"]",
"!=",
"temporalcommunities",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'Temporal and static communities have different dimensions'",
")",
"alleg",
"=",
"allegiance",
"(",
"temporalcommunities",
")",
"Icoeff",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"staticcommunities",
")",
")",
"# calc integration for each node",
"for",
"i",
",",
"statcom",
"in",
"enumerate",
"(",
"len",
"(",
"staticcommunities",
")",
")",
":",
"Icoeff",
"[",
"i",
"]",
"=",
"np",
".",
"mean",
"(",
"alleg",
"[",
"i",
",",
"staticcommunities",
"!=",
"statcom",
"]",
")",
"return",
"Icoeff"
]
| Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533. | [
"Calculates",
"the",
"integration",
"coefficient",
"for",
"each",
"node",
".",
"Measures",
"the",
"average",
"probability",
"that",
"a",
"node",
"is",
"in",
"the",
"same",
"community",
"as",
"nodes",
"from",
"other",
"systems",
"."
]
| python | train |
iotile/coretools | transport_plugins/websocket/iotile_transport_websocket/generic/async_client.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/websocket/iotile_transport_websocket/generic/async_client.py#L164-L190 | async def _manage_connection(self):
"""Internal coroutine for managing the client connection."""
try:
while True:
message = await self._con.recv()
try:
unpacked = unpack(message)
except Exception: # pylint:disable=broad-except;This is a background worker
self._logger.exception("Corrupt message received")
continue
if not VALID_SERVER_MESSAGE.matches(unpacked):
self._logger.warning("Dropping invalid message from server: %s", unpacked)
continue
# Don't block until all callbacks have finished since once of
# those callbacks may call self.send_command, which would deadlock
# since it couldn't get the response until it had already finished.
if not await self._manager.process_message(unpacked, wait=False):
self._logger.warning("No handler found for received message, message=%s", unpacked)
except asyncio.CancelledError:
self._logger.info("Closing connection to server due to stop()")
finally:
await self._manager.process_message(dict(type='event', name=self.DISCONNECT_EVENT, payload=None))
await self._con.close() | [
"async",
"def",
"_manage_connection",
"(",
"self",
")",
":",
"try",
":",
"while",
"True",
":",
"message",
"=",
"await",
"self",
".",
"_con",
".",
"recv",
"(",
")",
"try",
":",
"unpacked",
"=",
"unpack",
"(",
"message",
")",
"except",
"Exception",
":",
"# pylint:disable=broad-except;This is a background worker",
"self",
".",
"_logger",
".",
"exception",
"(",
"\"Corrupt message received\"",
")",
"continue",
"if",
"not",
"VALID_SERVER_MESSAGE",
".",
"matches",
"(",
"unpacked",
")",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"Dropping invalid message from server: %s\"",
",",
"unpacked",
")",
"continue",
"# Don't block until all callbacks have finished since once of",
"# those callbacks may call self.send_command, which would deadlock",
"# since it couldn't get the response until it had already finished.",
"if",
"not",
"await",
"self",
".",
"_manager",
".",
"process_message",
"(",
"unpacked",
",",
"wait",
"=",
"False",
")",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"No handler found for received message, message=%s\"",
",",
"unpacked",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Closing connection to server due to stop()\"",
")",
"finally",
":",
"await",
"self",
".",
"_manager",
".",
"process_message",
"(",
"dict",
"(",
"type",
"=",
"'event'",
",",
"name",
"=",
"self",
".",
"DISCONNECT_EVENT",
",",
"payload",
"=",
"None",
")",
")",
"await",
"self",
".",
"_con",
".",
"close",
"(",
")"
]
| Internal coroutine for managing the client connection. | [
"Internal",
"coroutine",
"for",
"managing",
"the",
"client",
"connection",
"."
]
| python | train |
Qiskit/qiskit-terra | qiskit/circuit/quantumcircuit.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/circuit/quantumcircuit.py#L481-L536 | def depth(self):
"""Return circuit depth (i.e. length of critical path).
This does not include compiler or simulator directives
such as 'barrier' or 'snapshot'.
Returns:
int: Depth of circuit.
Notes:
The circuit depth and the DAG depth need not bt the
same.
"""
# Labels the registers by ints
# and then the qubit position in
# a register is given by reg_int+qubit_num
reg_offset = 0
reg_map = {}
for reg in self.qregs+self.cregs:
reg_map[reg.name] = reg_offset
reg_offset += reg.size
# A list that holds the height of each qubit
# and classical bit.
op_stack = [0]*reg_offset
# Here we are playing a modified version of
# Tetris where we stack gates, but multi-qubit
# gates, or measurements have a block for each
# qubit or cbit that are connected by a virtual
# line so that they all stacked at the same depth.
# Conditional gates act on all cbits in the register
# they are conditioned on.
# We do not consider barriers or snapshots as
# They are transpiler and simulator directives.
# The max stack height is the circuit depth.
for instr, qargs, cargs in self.data:
if instr.name not in ['barrier', 'snapshot']:
levels = []
reg_ints = []
for ind, reg in enumerate(qargs+cargs):
# Add to the stacks of the qubits and
# cbits used in the gate.
reg_ints.append(reg_map[reg[0].name]+reg[1])
levels.append(op_stack[reg_ints[ind]] + 1)
if instr.control:
# Controls operate over all bits in the
# classical register they use.
cint = reg_map[instr.control[0].name]
for off in range(instr.control[0].size):
if cint+off not in reg_ints:
reg_ints.append(cint+off)
levels.append(op_stack[cint+off]+1)
max_level = max(levels)
for ind in reg_ints:
op_stack[ind] = max_level
return max(op_stack) | [
"def",
"depth",
"(",
"self",
")",
":",
"# Labels the registers by ints",
"# and then the qubit position in",
"# a register is given by reg_int+qubit_num",
"reg_offset",
"=",
"0",
"reg_map",
"=",
"{",
"}",
"for",
"reg",
"in",
"self",
".",
"qregs",
"+",
"self",
".",
"cregs",
":",
"reg_map",
"[",
"reg",
".",
"name",
"]",
"=",
"reg_offset",
"reg_offset",
"+=",
"reg",
".",
"size",
"# A list that holds the height of each qubit",
"# and classical bit.",
"op_stack",
"=",
"[",
"0",
"]",
"*",
"reg_offset",
"# Here we are playing a modified version of",
"# Tetris where we stack gates, but multi-qubit",
"# gates, or measurements have a block for each",
"# qubit or cbit that are connected by a virtual",
"# line so that they all stacked at the same depth.",
"# Conditional gates act on all cbits in the register",
"# they are conditioned on.",
"# We do not consider barriers or snapshots as",
"# They are transpiler and simulator directives.",
"# The max stack height is the circuit depth.",
"for",
"instr",
",",
"qargs",
",",
"cargs",
"in",
"self",
".",
"data",
":",
"if",
"instr",
".",
"name",
"not",
"in",
"[",
"'barrier'",
",",
"'snapshot'",
"]",
":",
"levels",
"=",
"[",
"]",
"reg_ints",
"=",
"[",
"]",
"for",
"ind",
",",
"reg",
"in",
"enumerate",
"(",
"qargs",
"+",
"cargs",
")",
":",
"# Add to the stacks of the qubits and",
"# cbits used in the gate.",
"reg_ints",
".",
"append",
"(",
"reg_map",
"[",
"reg",
"[",
"0",
"]",
".",
"name",
"]",
"+",
"reg",
"[",
"1",
"]",
")",
"levels",
".",
"append",
"(",
"op_stack",
"[",
"reg_ints",
"[",
"ind",
"]",
"]",
"+",
"1",
")",
"if",
"instr",
".",
"control",
":",
"# Controls operate over all bits in the",
"# classical register they use.",
"cint",
"=",
"reg_map",
"[",
"instr",
".",
"control",
"[",
"0",
"]",
".",
"name",
"]",
"for",
"off",
"in",
"range",
"(",
"instr",
".",
"control",
"[",
"0",
"]",
".",
"size",
")",
":",
"if",
"cint",
"+",
"off",
"not",
"in",
"reg_ints",
":",
"reg_ints",
".",
"append",
"(",
"cint",
"+",
"off",
")",
"levels",
".",
"append",
"(",
"op_stack",
"[",
"cint",
"+",
"off",
"]",
"+",
"1",
")",
"max_level",
"=",
"max",
"(",
"levels",
")",
"for",
"ind",
"in",
"reg_ints",
":",
"op_stack",
"[",
"ind",
"]",
"=",
"max_level",
"return",
"max",
"(",
"op_stack",
")"
]
| Return circuit depth (i.e. length of critical path).
This does not include compiler or simulator directives
such as 'barrier' or 'snapshot'.
Returns:
int: Depth of circuit.
Notes:
The circuit depth and the DAG depth need not bt the
same. | [
"Return",
"circuit",
"depth",
"(",
"i",
".",
"e",
".",
"length",
"of",
"critical",
"path",
")",
".",
"This",
"does",
"not",
"include",
"compiler",
"or",
"simulator",
"directives",
"such",
"as",
"barrier",
"or",
"snapshot",
"."
]
| python | test |
twilio/twilio-python | twilio/rest/proxy/v1/service/session/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/proxy/v1/service/session/__init__.py#L154-L163 | def get(self, sid):
"""
Constructs a SessionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.proxy.v1.service.session.SessionContext
:rtype: twilio.rest.proxy.v1.service.session.SessionContext
"""
return SessionContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) | [
"def",
"get",
"(",
"self",
",",
"sid",
")",
":",
"return",
"SessionContext",
"(",
"self",
".",
"_version",
",",
"service_sid",
"=",
"self",
".",
"_solution",
"[",
"'service_sid'",
"]",
",",
"sid",
"=",
"sid",
",",
")"
]
| Constructs a SessionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.proxy.v1.service.session.SessionContext
:rtype: twilio.rest.proxy.v1.service.session.SessionContext | [
"Constructs",
"a",
"SessionContext"
]
| python | train |
ray-project/ray | python/ray/rllib/evaluation/metrics.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/evaluation/metrics.py#L45-L53 | def collect_metrics(local_evaluator=None,
remote_evaluators=[],
timeout_seconds=180):
"""Gathers episode metrics from PolicyEvaluator instances."""
episodes, num_dropped = collect_episodes(
local_evaluator, remote_evaluators, timeout_seconds=timeout_seconds)
metrics = summarize_episodes(episodes, episodes, num_dropped)
return metrics | [
"def",
"collect_metrics",
"(",
"local_evaluator",
"=",
"None",
",",
"remote_evaluators",
"=",
"[",
"]",
",",
"timeout_seconds",
"=",
"180",
")",
":",
"episodes",
",",
"num_dropped",
"=",
"collect_episodes",
"(",
"local_evaluator",
",",
"remote_evaluators",
",",
"timeout_seconds",
"=",
"timeout_seconds",
")",
"metrics",
"=",
"summarize_episodes",
"(",
"episodes",
",",
"episodes",
",",
"num_dropped",
")",
"return",
"metrics"
]
| Gathers episode metrics from PolicyEvaluator instances. | [
"Gathers",
"episode",
"metrics",
"from",
"PolicyEvaluator",
"instances",
"."
]
| python | train |
lambdamusic/Ontospy | ontospy/extras/shell_lib.py | https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L277-L304 | def _printTaxonomy(self, hrlinetop=True):
"""
print(a local taxonomy for the object)
"""
if not self.currentEntity: # ==> ontology level
return
if hrlinetop:
self._print("----------------")
self._print("TAXONOMY:", "IMPORTANT")
x = self.currentEntity['object']
parents = x.parents()
if not parents:
if self.currentEntity['type'] == 'class':
self._print("owl:Thing")
elif self.currentEntity['type'] == 'property':
self._print("RDF:Property")
elif self.currentEntity['type'] == 'concept':
self._print("SKOS:Concept")
else:
pass
else:
for p in parents:
self._print(p.qname)
self._print("..." + x.qname, "TEXT")
for c in x.children():
self._print("......" + c.qname)
self._print("----------------") | [
"def",
"_printTaxonomy",
"(",
"self",
",",
"hrlinetop",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"currentEntity",
":",
"# ==> ontology level",
"return",
"if",
"hrlinetop",
":",
"self",
".",
"_print",
"(",
"\"----------------\"",
")",
"self",
".",
"_print",
"(",
"\"TAXONOMY:\"",
",",
"\"IMPORTANT\"",
")",
"x",
"=",
"self",
".",
"currentEntity",
"[",
"'object'",
"]",
"parents",
"=",
"x",
".",
"parents",
"(",
")",
"if",
"not",
"parents",
":",
"if",
"self",
".",
"currentEntity",
"[",
"'type'",
"]",
"==",
"'class'",
":",
"self",
".",
"_print",
"(",
"\"owl:Thing\"",
")",
"elif",
"self",
".",
"currentEntity",
"[",
"'type'",
"]",
"==",
"'property'",
":",
"self",
".",
"_print",
"(",
"\"RDF:Property\"",
")",
"elif",
"self",
".",
"currentEntity",
"[",
"'type'",
"]",
"==",
"'concept'",
":",
"self",
".",
"_print",
"(",
"\"SKOS:Concept\"",
")",
"else",
":",
"pass",
"else",
":",
"for",
"p",
"in",
"parents",
":",
"self",
".",
"_print",
"(",
"p",
".",
"qname",
")",
"self",
".",
"_print",
"(",
"\"...\"",
"+",
"x",
".",
"qname",
",",
"\"TEXT\"",
")",
"for",
"c",
"in",
"x",
".",
"children",
"(",
")",
":",
"self",
".",
"_print",
"(",
"\"......\"",
"+",
"c",
".",
"qname",
")",
"self",
".",
"_print",
"(",
"\"----------------\"",
")"
]
| print(a local taxonomy for the object) | [
"print",
"(",
"a",
"local",
"taxonomy",
"for",
"the",
"object",
")"
]
| python | train |
flowersteam/explauto | explauto/sensorimotor_model/forward/lwr.py | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/forward/lwr.py#L126-L153 | def predict_dims(self, q, dims_x, dims_y, dims_out, sigma=None, k=None):
"""Provide a prediction of q in the output space
@param xq an array of float of length dim_x
@param estimated_sigma if False (default), sigma_sq=self.sigma_sq, else it is estimated from the neighbor distances in self._weights(.)
"""
assert len(q) == len(dims_x) + len(dims_y)
sigma_sq = self.sigma_sq if sigma is None else sigma*sigma
k = k or self.k
dists, index = self.dataset.nn_dims(q[:len(dims_x)], q[len(dims_x):], dims_x, dims_y, k=k)
w = self._weights(dists, index, sigma_sq)
Xq = np.array(np.append([1.0], q), ndmin = 2)
X = np.array([np.append([1.0], self.dataset.get_dims(i, dims_x=dims_x, dims_y=dims_y)) for i in index])
Y = np.array([self.dataset.get_dims(i, dims=dims_out) for i in index])
W = np.diag(w)
WX = np.dot(W, X)
WXT = WX.T
B = np.dot(np.linalg.pinv(np.dot(WXT, WX)),WXT)
self.mat = np.dot(B, np.dot(W, Y))
Yq = np.dot(Xq, self.mat)
return Yq.ravel() | [
"def",
"predict_dims",
"(",
"self",
",",
"q",
",",
"dims_x",
",",
"dims_y",
",",
"dims_out",
",",
"sigma",
"=",
"None",
",",
"k",
"=",
"None",
")",
":",
"assert",
"len",
"(",
"q",
")",
"==",
"len",
"(",
"dims_x",
")",
"+",
"len",
"(",
"dims_y",
")",
"sigma_sq",
"=",
"self",
".",
"sigma_sq",
"if",
"sigma",
"is",
"None",
"else",
"sigma",
"*",
"sigma",
"k",
"=",
"k",
"or",
"self",
".",
"k",
"dists",
",",
"index",
"=",
"self",
".",
"dataset",
".",
"nn_dims",
"(",
"q",
"[",
":",
"len",
"(",
"dims_x",
")",
"]",
",",
"q",
"[",
"len",
"(",
"dims_x",
")",
":",
"]",
",",
"dims_x",
",",
"dims_y",
",",
"k",
"=",
"k",
")",
"w",
"=",
"self",
".",
"_weights",
"(",
"dists",
",",
"index",
",",
"sigma_sq",
")",
"Xq",
"=",
"np",
".",
"array",
"(",
"np",
".",
"append",
"(",
"[",
"1.0",
"]",
",",
"q",
")",
",",
"ndmin",
"=",
"2",
")",
"X",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"append",
"(",
"[",
"1.0",
"]",
",",
"self",
".",
"dataset",
".",
"get_dims",
"(",
"i",
",",
"dims_x",
"=",
"dims_x",
",",
"dims_y",
"=",
"dims_y",
")",
")",
"for",
"i",
"in",
"index",
"]",
")",
"Y",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"dataset",
".",
"get_dims",
"(",
"i",
",",
"dims",
"=",
"dims_out",
")",
"for",
"i",
"in",
"index",
"]",
")",
"W",
"=",
"np",
".",
"diag",
"(",
"w",
")",
"WX",
"=",
"np",
".",
"dot",
"(",
"W",
",",
"X",
")",
"WXT",
"=",
"WX",
".",
"T",
"B",
"=",
"np",
".",
"dot",
"(",
"np",
".",
"linalg",
".",
"pinv",
"(",
"np",
".",
"dot",
"(",
"WXT",
",",
"WX",
")",
")",
",",
"WXT",
")",
"self",
".",
"mat",
"=",
"np",
".",
"dot",
"(",
"B",
",",
"np",
".",
"dot",
"(",
"W",
",",
"Y",
")",
")",
"Yq",
"=",
"np",
".",
"dot",
"(",
"Xq",
",",
"self",
".",
"mat",
")",
"return",
"Yq",
".",
"ravel",
"(",
")"
]
| Provide a prediction of q in the output space
@param xq an array of float of length dim_x
@param estimated_sigma if False (default), sigma_sq=self.sigma_sq, else it is estimated from the neighbor distances in self._weights(.) | [
"Provide",
"a",
"prediction",
"of",
"q",
"in",
"the",
"output",
"space"
]
| python | train |
Jaymon/prom | prom/query.py | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1065-L1080 | def value(self):
"""convenience method to just get one value or tuple of values for the query"""
field_vals = None
field_names = self.fields_select.names()
fcount = len(field_names)
if fcount:
d = self._query('get_one')
if d:
field_vals = [d.get(fn, None) for fn in field_names]
if fcount == 1:
field_vals = field_vals[0]
else:
raise ValueError("no select fields were set, so cannot return value")
return field_vals | [
"def",
"value",
"(",
"self",
")",
":",
"field_vals",
"=",
"None",
"field_names",
"=",
"self",
".",
"fields_select",
".",
"names",
"(",
")",
"fcount",
"=",
"len",
"(",
"field_names",
")",
"if",
"fcount",
":",
"d",
"=",
"self",
".",
"_query",
"(",
"'get_one'",
")",
"if",
"d",
":",
"field_vals",
"=",
"[",
"d",
".",
"get",
"(",
"fn",
",",
"None",
")",
"for",
"fn",
"in",
"field_names",
"]",
"if",
"fcount",
"==",
"1",
":",
"field_vals",
"=",
"field_vals",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"no select fields were set, so cannot return value\"",
")",
"return",
"field_vals"
]
| convenience method to just get one value or tuple of values for the query | [
"convenience",
"method",
"to",
"just",
"get",
"one",
"value",
"or",
"tuple",
"of",
"values",
"for",
"the",
"query"
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.