repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
picklepete/pyicloud
|
pyicloud/base.py
|
https://github.com/picklepete/pyicloud/blob/9bb6d750662ce24c8febc94807ddbdcdf3cadaa2/pyicloud/base.py#L292-L299
|
def devices(self):
""" Return all devices."""
service_root = self.webservices['findme']['url']
return FindMyiPhoneServiceManager(
service_root,
self.session,
self.params
)
|
[
"def",
"devices",
"(",
"self",
")",
":",
"service_root",
"=",
"self",
".",
"webservices",
"[",
"'findme'",
"]",
"[",
"'url'",
"]",
"return",
"FindMyiPhoneServiceManager",
"(",
"service_root",
",",
"self",
".",
"session",
",",
"self",
".",
"params",
")"
] |
Return all devices.
|
[
"Return",
"all",
"devices",
"."
] |
python
|
train
|
dopefishh/pympi
|
pympi/Elan.py
|
https://github.com/dopefishh/pympi/blob/79c747cde45b5ba203ed93154d8c123ac9c3ef56/pympi/Elan.py#L1013-L1047
|
def merge_tiers(self, tiers, tiernew=None, gapt=0, sep='_', safe=False):
"""Merge tiers into a new tier and when the gap is lower then the
threshhold glue the annotations together.
:param list tiers: List of tier names.
:param str tiernew: Name for the new tier, if ``None`` the name will be
generated.
:param int gapt: Threshhold for the gaps, if the this is set to 10 it
means that all gaps below 10 are ignored.
:param str sep: Separator for the merged annotations.
:param bool safe: Ignore zero length annotations(when working with
possible malformed data).
:returns: Name of the created tier.
:raises KeyError: If a tier is non existent.
"""
if tiernew is None:
tiernew = u'{}_merged'.format('_'.join(tiers))
self.add_tier(tiernew)
aa = [(sys.maxsize, sys.maxsize, None)] + sorted((
a for t in tiers for a in self.get_annotation_data_for_tier(t)),
reverse=True)
l = None
while aa:
begin, end, value = aa.pop()
if l is None:
l = [begin, end, [value]]
elif begin - l[1] >= gapt:
if not safe or l[1] > l[0]:
self.add_annotation(tiernew, l[0], l[1], sep.join(l[2]))
l = [begin, end, [value]]
else:
if end > l[1]:
l[1] = end
l[2].append(value)
return tiernew
|
[
"def",
"merge_tiers",
"(",
"self",
",",
"tiers",
",",
"tiernew",
"=",
"None",
",",
"gapt",
"=",
"0",
",",
"sep",
"=",
"'_'",
",",
"safe",
"=",
"False",
")",
":",
"if",
"tiernew",
"is",
"None",
":",
"tiernew",
"=",
"u'{}_merged'",
".",
"format",
"(",
"'_'",
".",
"join",
"(",
"tiers",
")",
")",
"self",
".",
"add_tier",
"(",
"tiernew",
")",
"aa",
"=",
"[",
"(",
"sys",
".",
"maxsize",
",",
"sys",
".",
"maxsize",
",",
"None",
")",
"]",
"+",
"sorted",
"(",
"(",
"a",
"for",
"t",
"in",
"tiers",
"for",
"a",
"in",
"self",
".",
"get_annotation_data_for_tier",
"(",
"t",
")",
")",
",",
"reverse",
"=",
"True",
")",
"l",
"=",
"None",
"while",
"aa",
":",
"begin",
",",
"end",
",",
"value",
"=",
"aa",
".",
"pop",
"(",
")",
"if",
"l",
"is",
"None",
":",
"l",
"=",
"[",
"begin",
",",
"end",
",",
"[",
"value",
"]",
"]",
"elif",
"begin",
"-",
"l",
"[",
"1",
"]",
">=",
"gapt",
":",
"if",
"not",
"safe",
"or",
"l",
"[",
"1",
"]",
">",
"l",
"[",
"0",
"]",
":",
"self",
".",
"add_annotation",
"(",
"tiernew",
",",
"l",
"[",
"0",
"]",
",",
"l",
"[",
"1",
"]",
",",
"sep",
".",
"join",
"(",
"l",
"[",
"2",
"]",
")",
")",
"l",
"=",
"[",
"begin",
",",
"end",
",",
"[",
"value",
"]",
"]",
"else",
":",
"if",
"end",
">",
"l",
"[",
"1",
"]",
":",
"l",
"[",
"1",
"]",
"=",
"end",
"l",
"[",
"2",
"]",
".",
"append",
"(",
"value",
")",
"return",
"tiernew"
] |
Merge tiers into a new tier and when the gap is lower then the
threshhold glue the annotations together.
:param list tiers: List of tier names.
:param str tiernew: Name for the new tier, if ``None`` the name will be
generated.
:param int gapt: Threshhold for the gaps, if the this is set to 10 it
means that all gaps below 10 are ignored.
:param str sep: Separator for the merged annotations.
:param bool safe: Ignore zero length annotations(when working with
possible malformed data).
:returns: Name of the created tier.
:raises KeyError: If a tier is non existent.
|
[
"Merge",
"tiers",
"into",
"a",
"new",
"tier",
"and",
"when",
"the",
"gap",
"is",
"lower",
"then",
"the",
"threshhold",
"glue",
"the",
"annotations",
"together",
"."
] |
python
|
test
|
nerdynick/PySQLPool
|
src/PySQLPool/query.py
|
https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/query.py#L161-L194
|
def queryMany(self, query, args):
"""
Executes a series of the same Insert Statments
Each tuple in the args list will be applied to the query and executed.
This is the equivilant of MySQLDB.cursor.executemany()
@author: Nick Verbeck
@since: 9/7/2008
"""
self.lastError = None
self.affectedRows = None
self.rowcount = None
self.record = None
cursor = None
try:
try:
self._GetConnection()
self.conn.query = query
#Execute query and store results
cursor = self.conn.getCursor()
self.affectedRows = cursor.executemany(query, args)
self.conn.updateCheckTime()
except Exception, e:
self.lastError = e
finally:
if cursor is not None:
cursor.close()
self._ReturnConnection()
if self.lastError is not None:
raise self.lastError
else:
return self.affectedRows
|
[
"def",
"queryMany",
"(",
"self",
",",
"query",
",",
"args",
")",
":",
"self",
".",
"lastError",
"=",
"None",
"self",
".",
"affectedRows",
"=",
"None",
"self",
".",
"rowcount",
"=",
"None",
"self",
".",
"record",
"=",
"None",
"cursor",
"=",
"None",
"try",
":",
"try",
":",
"self",
".",
"_GetConnection",
"(",
")",
"self",
".",
"conn",
".",
"query",
"=",
"query",
"#Execute query and store results",
"cursor",
"=",
"self",
".",
"conn",
".",
"getCursor",
"(",
")",
"self",
".",
"affectedRows",
"=",
"cursor",
".",
"executemany",
"(",
"query",
",",
"args",
")",
"self",
".",
"conn",
".",
"updateCheckTime",
"(",
")",
"except",
"Exception",
",",
"e",
":",
"self",
".",
"lastError",
"=",
"e",
"finally",
":",
"if",
"cursor",
"is",
"not",
"None",
":",
"cursor",
".",
"close",
"(",
")",
"self",
".",
"_ReturnConnection",
"(",
")",
"if",
"self",
".",
"lastError",
"is",
"not",
"None",
":",
"raise",
"self",
".",
"lastError",
"else",
":",
"return",
"self",
".",
"affectedRows"
] |
Executes a series of the same Insert Statments
Each tuple in the args list will be applied to the query and executed.
This is the equivilant of MySQLDB.cursor.executemany()
@author: Nick Verbeck
@since: 9/7/2008
|
[
"Executes",
"a",
"series",
"of",
"the",
"same",
"Insert",
"Statments",
"Each",
"tuple",
"in",
"the",
"args",
"list",
"will",
"be",
"applied",
"to",
"the",
"query",
"and",
"executed",
".",
"This",
"is",
"the",
"equivilant",
"of",
"MySQLDB",
".",
"cursor",
".",
"executemany",
"()"
] |
python
|
train
|
Accelize/pycosio
|
pycosio/storage/s3.py
|
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/s3.py#L101-L111
|
def _get_session(self):
"""
S3 Boto3 Session.
Returns:
boto3.session.Session: session
"""
if self._session is None:
self._session = _boto3.session.Session(
**self._storage_parameters.get('session', dict()))
return self._session
|
[
"def",
"_get_session",
"(",
"self",
")",
":",
"if",
"self",
".",
"_session",
"is",
"None",
":",
"self",
".",
"_session",
"=",
"_boto3",
".",
"session",
".",
"Session",
"(",
"*",
"*",
"self",
".",
"_storage_parameters",
".",
"get",
"(",
"'session'",
",",
"dict",
"(",
")",
")",
")",
"return",
"self",
".",
"_session"
] |
S3 Boto3 Session.
Returns:
boto3.session.Session: session
|
[
"S3",
"Boto3",
"Session",
"."
] |
python
|
train
|
happyleavesaoc/python-snapcast
|
snapcast/control/group.py
|
https://github.com/happyleavesaoc/python-snapcast/blob/9b3c483358677327c7fd6d0666bf474c19d87f19/snapcast/control/group.py#L154-L157
|
def callback(self):
"""Run callback."""
if self._callback_func and callable(self._callback_func):
self._callback_func(self)
|
[
"def",
"callback",
"(",
"self",
")",
":",
"if",
"self",
".",
"_callback_func",
"and",
"callable",
"(",
"self",
".",
"_callback_func",
")",
":",
"self",
".",
"_callback_func",
"(",
"self",
")"
] |
Run callback.
|
[
"Run",
"callback",
"."
] |
python
|
train
|
pydata/xarray
|
xarray/core/dataarray.py
|
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataarray.py#L1139-L1196
|
def expand_dims(self, dim=None, axis=None, **dim_kwargs):
"""Return a new object with an additional axis (or axes) inserted at
the corresponding position in the array shape.
If dim is already a scalar coordinate, it will be promoted to a 1D
coordinate consisting of a single value.
Parameters
----------
dim : str, sequence of str, dict, or None
Dimensions to include on the new variable.
If provided as str or sequence of str, then dimensions are inserted
with length 1. If provided as a dict, then the keys are the new
dimensions and the values are either integers (giving the length of
the new dimensions) or sequence/ndarray (giving the coordinates of
the new dimensions). **WARNING** for python 3.5, if ``dim`` is
dict-like, then it must be an ``OrderedDict``. This is to ensure
that the order in which the dims are given is maintained.
axis : integer, list (or tuple) of integers, or None
Axis position(s) where new axis is to be inserted (position(s) on
the result array). If a list (or tuple) of integers is passed,
multiple axes are inserted. In this case, dim arguments should be
same length list. If axis=None is passed, all the axes will be
inserted to the start of the result array.
**dim_kwargs : int or sequence/ndarray
The keywords are arbitrary dimensions being inserted and the values
are either the lengths of the new dims (if int is given), or their
coordinates. Note, this is an alternative to passing a dict to the
dim kwarg and will only be used if dim is None. **WARNING** for
python 3.5 ``dim_kwargs`` is not available.
Returns
-------
expanded : same type as caller
This object, but with an additional dimension(s).
"""
if isinstance(dim, int):
raise TypeError('dim should be str or sequence of strs or dict')
elif isinstance(dim, str):
dim = OrderedDict(((dim, 1),))
elif isinstance(dim, (list, tuple)):
if len(dim) != len(set(dim)):
raise ValueError('dims should not contain duplicate values.')
dim = OrderedDict(((d, 1) for d in dim))
# TODO: get rid of the below code block when python 3.5 is no longer
# supported.
python36_plus = sys.version_info[0] == 3 and sys.version_info[1] > 5
not_ordereddict = dim is not None and not isinstance(dim, OrderedDict)
if not python36_plus and not_ordereddict:
raise TypeError("dim must be an OrderedDict for python <3.6")
elif not python36_plus and dim_kwargs:
raise ValueError("dim_kwargs isn't available for python <3.6")
dim_kwargs = OrderedDict(dim_kwargs)
dim = either_dict_or_kwargs(dim, dim_kwargs, 'expand_dims')
ds = self._to_temp_dataset().expand_dims(dim, axis)
return self._from_temp_dataset(ds)
|
[
"def",
"expand_dims",
"(",
"self",
",",
"dim",
"=",
"None",
",",
"axis",
"=",
"None",
",",
"*",
"*",
"dim_kwargs",
")",
":",
"if",
"isinstance",
"(",
"dim",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"'dim should be str or sequence of strs or dict'",
")",
"elif",
"isinstance",
"(",
"dim",
",",
"str",
")",
":",
"dim",
"=",
"OrderedDict",
"(",
"(",
"(",
"dim",
",",
"1",
")",
",",
")",
")",
"elif",
"isinstance",
"(",
"dim",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"len",
"(",
"dim",
")",
"!=",
"len",
"(",
"set",
"(",
"dim",
")",
")",
":",
"raise",
"ValueError",
"(",
"'dims should not contain duplicate values.'",
")",
"dim",
"=",
"OrderedDict",
"(",
"(",
"(",
"d",
",",
"1",
")",
"for",
"d",
"in",
"dim",
")",
")",
"# TODO: get rid of the below code block when python 3.5 is no longer",
"# supported.",
"python36_plus",
"=",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"3",
"and",
"sys",
".",
"version_info",
"[",
"1",
"]",
">",
"5",
"not_ordereddict",
"=",
"dim",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"dim",
",",
"OrderedDict",
")",
"if",
"not",
"python36_plus",
"and",
"not_ordereddict",
":",
"raise",
"TypeError",
"(",
"\"dim must be an OrderedDict for python <3.6\"",
")",
"elif",
"not",
"python36_plus",
"and",
"dim_kwargs",
":",
"raise",
"ValueError",
"(",
"\"dim_kwargs isn't available for python <3.6\"",
")",
"dim_kwargs",
"=",
"OrderedDict",
"(",
"dim_kwargs",
")",
"dim",
"=",
"either_dict_or_kwargs",
"(",
"dim",
",",
"dim_kwargs",
",",
"'expand_dims'",
")",
"ds",
"=",
"self",
".",
"_to_temp_dataset",
"(",
")",
".",
"expand_dims",
"(",
"dim",
",",
"axis",
")",
"return",
"self",
".",
"_from_temp_dataset",
"(",
"ds",
")"
] |
Return a new object with an additional axis (or axes) inserted at
the corresponding position in the array shape.
If dim is already a scalar coordinate, it will be promoted to a 1D
coordinate consisting of a single value.
Parameters
----------
dim : str, sequence of str, dict, or None
Dimensions to include on the new variable.
If provided as str or sequence of str, then dimensions are inserted
with length 1. If provided as a dict, then the keys are the new
dimensions and the values are either integers (giving the length of
the new dimensions) or sequence/ndarray (giving the coordinates of
the new dimensions). **WARNING** for python 3.5, if ``dim`` is
dict-like, then it must be an ``OrderedDict``. This is to ensure
that the order in which the dims are given is maintained.
axis : integer, list (or tuple) of integers, or None
Axis position(s) where new axis is to be inserted (position(s) on
the result array). If a list (or tuple) of integers is passed,
multiple axes are inserted. In this case, dim arguments should be
same length list. If axis=None is passed, all the axes will be
inserted to the start of the result array.
**dim_kwargs : int or sequence/ndarray
The keywords are arbitrary dimensions being inserted and the values
are either the lengths of the new dims (if int is given), or their
coordinates. Note, this is an alternative to passing a dict to the
dim kwarg and will only be used if dim is None. **WARNING** for
python 3.5 ``dim_kwargs`` is not available.
Returns
-------
expanded : same type as caller
This object, but with an additional dimension(s).
|
[
"Return",
"a",
"new",
"object",
"with",
"an",
"additional",
"axis",
"(",
"or",
"axes",
")",
"inserted",
"at",
"the",
"corresponding",
"position",
"in",
"the",
"array",
"shape",
"."
] |
python
|
train
|
androguard/androguard
|
androguard/session.py
|
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/session.py#L392-L403
|
def get_nb_strings(self):
"""
Return the total number of strings in all Analysis objects
"""
nb = 0
seen = []
for digest, dx in self.analyzed_vms.items():
if dx in seen:
continue
seen.append(dx)
nb += len(dx.get_strings_analysis())
return nb
|
[
"def",
"get_nb_strings",
"(",
"self",
")",
":",
"nb",
"=",
"0",
"seen",
"=",
"[",
"]",
"for",
"digest",
",",
"dx",
"in",
"self",
".",
"analyzed_vms",
".",
"items",
"(",
")",
":",
"if",
"dx",
"in",
"seen",
":",
"continue",
"seen",
".",
"append",
"(",
"dx",
")",
"nb",
"+=",
"len",
"(",
"dx",
".",
"get_strings_analysis",
"(",
")",
")",
"return",
"nb"
] |
Return the total number of strings in all Analysis objects
|
[
"Return",
"the",
"total",
"number",
"of",
"strings",
"in",
"all",
"Analysis",
"objects"
] |
python
|
train
|
gem/oq-engine
|
openquake/hazardlib/sourceconverter.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/sourceconverter.py#L688-L710
|
def convert_multiPointSource(self, node):
"""
Convert the given node into a MultiPointSource object.
:param node: a node with tag multiPointGeometry
:returns: a :class:`openquake.hazardlib.source.MultiPointSource`
"""
geom = node.multiPointGeometry
lons, lats = zip(*split_coords_2d(~geom.posList))
msr = valid.SCALEREL[~node.magScaleRel]()
return source.MultiPointSource(
source_id=node['id'],
name=node['name'],
tectonic_region_type=node.attrib.get('tectonicRegion'),
mfd=self.convert_mfdist(node),
magnitude_scaling_relationship=msr,
rupture_aspect_ratio=~node.ruptAspectRatio,
upper_seismogenic_depth=~geom.upperSeismoDepth,
lower_seismogenic_depth=~geom.lowerSeismoDepth,
nodal_plane_distribution=self.convert_npdist(node),
hypocenter_distribution=self.convert_hpdist(node),
mesh=geo.Mesh(F32(lons), F32(lats)),
temporal_occurrence_model=self.get_tom(node))
|
[
"def",
"convert_multiPointSource",
"(",
"self",
",",
"node",
")",
":",
"geom",
"=",
"node",
".",
"multiPointGeometry",
"lons",
",",
"lats",
"=",
"zip",
"(",
"*",
"split_coords_2d",
"(",
"~",
"geom",
".",
"posList",
")",
")",
"msr",
"=",
"valid",
".",
"SCALEREL",
"[",
"~",
"node",
".",
"magScaleRel",
"]",
"(",
")",
"return",
"source",
".",
"MultiPointSource",
"(",
"source_id",
"=",
"node",
"[",
"'id'",
"]",
",",
"name",
"=",
"node",
"[",
"'name'",
"]",
",",
"tectonic_region_type",
"=",
"node",
".",
"attrib",
".",
"get",
"(",
"'tectonicRegion'",
")",
",",
"mfd",
"=",
"self",
".",
"convert_mfdist",
"(",
"node",
")",
",",
"magnitude_scaling_relationship",
"=",
"msr",
",",
"rupture_aspect_ratio",
"=",
"~",
"node",
".",
"ruptAspectRatio",
",",
"upper_seismogenic_depth",
"=",
"~",
"geom",
".",
"upperSeismoDepth",
",",
"lower_seismogenic_depth",
"=",
"~",
"geom",
".",
"lowerSeismoDepth",
",",
"nodal_plane_distribution",
"=",
"self",
".",
"convert_npdist",
"(",
"node",
")",
",",
"hypocenter_distribution",
"=",
"self",
".",
"convert_hpdist",
"(",
"node",
")",
",",
"mesh",
"=",
"geo",
".",
"Mesh",
"(",
"F32",
"(",
"lons",
")",
",",
"F32",
"(",
"lats",
")",
")",
",",
"temporal_occurrence_model",
"=",
"self",
".",
"get_tom",
"(",
"node",
")",
")"
] |
Convert the given node into a MultiPointSource object.
:param node: a node with tag multiPointGeometry
:returns: a :class:`openquake.hazardlib.source.MultiPointSource`
|
[
"Convert",
"the",
"given",
"node",
"into",
"a",
"MultiPointSource",
"object",
"."
] |
python
|
train
|
quantmind/pulsar
|
pulsar/async/monitor.py
|
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/async/monitor.py#L202-L215
|
def add_monitor(self, actor, monitor_name, **params):
'''Add a new ``monitor``.
:param monitor_class: a :class:`.Monitor` class.
:param monitor_name: a unique name for the monitor.
:param kwargs: dictionary of key-valued parameters for the monitor.
:return: the :class:`.Monitor` added.
'''
if monitor_name in self.registered:
raise KeyError('Monitor "%s" already available' % monitor_name)
params.update(actor.actorparams())
params['name'] = monitor_name
params['kind'] = 'monitor'
return actor.spawn(**params)
|
[
"def",
"add_monitor",
"(",
"self",
",",
"actor",
",",
"monitor_name",
",",
"*",
"*",
"params",
")",
":",
"if",
"monitor_name",
"in",
"self",
".",
"registered",
":",
"raise",
"KeyError",
"(",
"'Monitor \"%s\" already available'",
"%",
"monitor_name",
")",
"params",
".",
"update",
"(",
"actor",
".",
"actorparams",
"(",
")",
")",
"params",
"[",
"'name'",
"]",
"=",
"monitor_name",
"params",
"[",
"'kind'",
"]",
"=",
"'monitor'",
"return",
"actor",
".",
"spawn",
"(",
"*",
"*",
"params",
")"
] |
Add a new ``monitor``.
:param monitor_class: a :class:`.Monitor` class.
:param monitor_name: a unique name for the monitor.
:param kwargs: dictionary of key-valued parameters for the monitor.
:return: the :class:`.Monitor` added.
|
[
"Add",
"a",
"new",
"monitor",
"."
] |
python
|
train
|
kensho-technologies/graphql-compiler
|
graphql_compiler/compiler/common.py
|
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/common.py#L122-L152
|
def _compile_graphql_generic(language, lowering_func, query_emitter_func,
schema, graphql_string, type_equivalence_hints, compiler_metadata):
"""Compile the GraphQL input, lowering and emitting the query using the given functions.
Args:
language: string indicating the target language to compile to.
lowering_func: Function to lower the compiler IR into a compatible form for the target
language backend.
query_emitter_func: Function that emits a query in the target language from the lowered IR.
schema: GraphQL schema object describing the schema of the graph to be queried.
graphql_string: the GraphQL query to compile to the target language, as a string.
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
compiler_metadata: optional target specific metadata for usage by the query_emitter_func.
Returns:
a CompilationResult object
"""
ir_and_metadata = graphql_to_ir(
schema, graphql_string, type_equivalence_hints=type_equivalence_hints)
lowered_ir_blocks = lowering_func(
ir_and_metadata.ir_blocks, ir_and_metadata.query_metadata_table,
type_equivalence_hints=type_equivalence_hints)
query = query_emitter_func(lowered_ir_blocks, compiler_metadata)
return CompilationResult(
query=query,
language=language,
output_metadata=ir_and_metadata.output_metadata,
input_metadata=ir_and_metadata.input_metadata)
|
[
"def",
"_compile_graphql_generic",
"(",
"language",
",",
"lowering_func",
",",
"query_emitter_func",
",",
"schema",
",",
"graphql_string",
",",
"type_equivalence_hints",
",",
"compiler_metadata",
")",
":",
"ir_and_metadata",
"=",
"graphql_to_ir",
"(",
"schema",
",",
"graphql_string",
",",
"type_equivalence_hints",
"=",
"type_equivalence_hints",
")",
"lowered_ir_blocks",
"=",
"lowering_func",
"(",
"ir_and_metadata",
".",
"ir_blocks",
",",
"ir_and_metadata",
".",
"query_metadata_table",
",",
"type_equivalence_hints",
"=",
"type_equivalence_hints",
")",
"query",
"=",
"query_emitter_func",
"(",
"lowered_ir_blocks",
",",
"compiler_metadata",
")",
"return",
"CompilationResult",
"(",
"query",
"=",
"query",
",",
"language",
"=",
"language",
",",
"output_metadata",
"=",
"ir_and_metadata",
".",
"output_metadata",
",",
"input_metadata",
"=",
"ir_and_metadata",
".",
"input_metadata",
")"
] |
Compile the GraphQL input, lowering and emitting the query using the given functions.
Args:
language: string indicating the target language to compile to.
lowering_func: Function to lower the compiler IR into a compatible form for the target
language backend.
query_emitter_func: Function that emits a query in the target language from the lowered IR.
schema: GraphQL schema object describing the schema of the graph to be queried.
graphql_string: the GraphQL query to compile to the target language, as a string.
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
compiler_metadata: optional target specific metadata for usage by the query_emitter_func.
Returns:
a CompilationResult object
|
[
"Compile",
"the",
"GraphQL",
"input",
"lowering",
"and",
"emitting",
"the",
"query",
"using",
"the",
"given",
"functions",
"."
] |
python
|
train
|
arve0/fijibin
|
fijibin/__init__.py
|
https://github.com/arve0/fijibin/blob/a3d2e983cb9ff2bcbb56a800084bc3b35cb9292f/fijibin/__init__.py#L56-L120
|
def fetch(force=False):
"""Fetch and extract latest Life-Line version of Fiji is just ImageJ
to *~/.bin*.
Parameters
----------
force : bool
Force overwrite of existing Fiji in *~/.bin*.
"""
try:
# python 2
from urllib2 import urlopen, HTTPError, URLError
except ImportError:
# python 3
from urllib.request import urlopen, HTTPError, URLError
if os.path.isdir(FIJI_ROOT) and not force:
return
elif not os.path.isdir(FIJI_ROOT):
print('Fiji missing in %s' % FIJI_ROOT)
if force:
print('Deleting %s' % FIJI_ROOT)
shutil.rmtree(FIJI_ROOT, ignore_errors=True)
print('Downloading fiji from %s' % URL)
try:
req = urlopen(URL)
try:
size = int(req.info()['content-length'])
except AttributeError:
size = -1
chunk = 512*1024
fp = BytesIO()
i = 0
while 1:
data = req.read(chunk)
if not data:
break
fp.write(data)
if size > 0:
percent = fp.tell() // (size/100)
msg = 'Downloaded %d percent \r' % percent
else:
msg = 'Downloaded %d bytes\r' % fp.tell()
sys.stdout.write(msg)
except (HTTPError, URLError) as e:
print('Error getting fiji: {}'.format(e))
sys.exit(1)
try:
print('\nExtracting zip')
z = ZipFile(fp)
z.extractall(BIN_FOLDER)
# move to Fiji-VERSION.app to easily check if it exists (upon fijibin upgrade)
os.rename(EXTRACT_FOLDER, FIJI_ROOT)
except (BadZipFile, IOError) as e:
print('Error extracting zip: {}'.format(e))
sys.exit(1)
for path in BIN_NAMES.values():
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
|
[
"def",
"fetch",
"(",
"force",
"=",
"False",
")",
":",
"try",
":",
"# python 2",
"from",
"urllib2",
"import",
"urlopen",
",",
"HTTPError",
",",
"URLError",
"except",
"ImportError",
":",
"# python 3",
"from",
"urllib",
".",
"request",
"import",
"urlopen",
",",
"HTTPError",
",",
"URLError",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"FIJI_ROOT",
")",
"and",
"not",
"force",
":",
"return",
"elif",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"FIJI_ROOT",
")",
":",
"print",
"(",
"'Fiji missing in %s'",
"%",
"FIJI_ROOT",
")",
"if",
"force",
":",
"print",
"(",
"'Deleting %s'",
"%",
"FIJI_ROOT",
")",
"shutil",
".",
"rmtree",
"(",
"FIJI_ROOT",
",",
"ignore_errors",
"=",
"True",
")",
"print",
"(",
"'Downloading fiji from %s'",
"%",
"URL",
")",
"try",
":",
"req",
"=",
"urlopen",
"(",
"URL",
")",
"try",
":",
"size",
"=",
"int",
"(",
"req",
".",
"info",
"(",
")",
"[",
"'content-length'",
"]",
")",
"except",
"AttributeError",
":",
"size",
"=",
"-",
"1",
"chunk",
"=",
"512",
"*",
"1024",
"fp",
"=",
"BytesIO",
"(",
")",
"i",
"=",
"0",
"while",
"1",
":",
"data",
"=",
"req",
".",
"read",
"(",
"chunk",
")",
"if",
"not",
"data",
":",
"break",
"fp",
".",
"write",
"(",
"data",
")",
"if",
"size",
">",
"0",
":",
"percent",
"=",
"fp",
".",
"tell",
"(",
")",
"//",
"(",
"size",
"/",
"100",
")",
"msg",
"=",
"'Downloaded %d percent \\r'",
"%",
"percent",
"else",
":",
"msg",
"=",
"'Downloaded %d bytes\\r'",
"%",
"fp",
".",
"tell",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"msg",
")",
"except",
"(",
"HTTPError",
",",
"URLError",
")",
"as",
"e",
":",
"print",
"(",
"'Error getting fiji: {}'",
".",
"format",
"(",
"e",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"try",
":",
"print",
"(",
"'\\nExtracting zip'",
")",
"z",
"=",
"ZipFile",
"(",
"fp",
")",
"z",
".",
"extractall",
"(",
"BIN_FOLDER",
")",
"# move to Fiji-VERSION.app to easily check if it exists (upon fijibin upgrade)",
"os",
".",
"rename",
"(",
"EXTRACT_FOLDER",
",",
"FIJI_ROOT",
")",
"except",
"(",
"BadZipFile",
",",
"IOError",
")",
"as",
"e",
":",
"print",
"(",
"'Error extracting zip: {}'",
".",
"format",
"(",
"e",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"for",
"path",
"in",
"BIN_NAMES",
".",
"values",
"(",
")",
":",
"st",
"=",
"os",
".",
"stat",
"(",
"path",
")",
"os",
".",
"chmod",
"(",
"path",
",",
"st",
".",
"st_mode",
"|",
"stat",
".",
"S_IEXEC",
")"
] |
Fetch and extract latest Life-Line version of Fiji is just ImageJ
to *~/.bin*.
Parameters
----------
force : bool
Force overwrite of existing Fiji in *~/.bin*.
|
[
"Fetch",
"and",
"extract",
"latest",
"Life",
"-",
"Line",
"version",
"of",
"Fiji",
"is",
"just",
"ImageJ",
"to",
"*",
"~",
"/",
".",
"bin",
"*",
"."
] |
python
|
train
|
echinopsii/net.echinopsii.ariane.community.cli.python3
|
ariane_clip3/directory.py
|
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/directory.py#L567-L584
|
def get_routing_areas():
"""
:return: all routing areas
"""
LOGGER.debug("RoutingAreaService.get_routing_areas")
args = {'http_operation': 'GET', 'operation_path': ''}
response = RoutingAreaService.requester.call(args)
ret = None
if response.rc == 0:
ret = []
for routing_area in response.response_content['routingAreas']:
ret.append(RoutingArea.json_2_routing_area(routing_area))
elif response.rc != 404:
err_msg = 'RoutingAreaService.get_routing_areas - Problem while getting routing areas. ' \
'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(err_msg)
return ret
|
[
"def",
"get_routing_areas",
"(",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"RoutingAreaService.get_routing_areas\"",
")",
"args",
"=",
"{",
"'http_operation'",
":",
"'GET'",
",",
"'operation_path'",
":",
"''",
"}",
"response",
"=",
"RoutingAreaService",
".",
"requester",
".",
"call",
"(",
"args",
")",
"ret",
"=",
"None",
"if",
"response",
".",
"rc",
"==",
"0",
":",
"ret",
"=",
"[",
"]",
"for",
"routing_area",
"in",
"response",
".",
"response_content",
"[",
"'routingAreas'",
"]",
":",
"ret",
".",
"append",
"(",
"RoutingArea",
".",
"json_2_routing_area",
"(",
"routing_area",
")",
")",
"elif",
"response",
".",
"rc",
"!=",
"404",
":",
"err_msg",
"=",
"'RoutingAreaService.get_routing_areas - Problem while getting routing areas. '",
"'Reason: '",
"+",
"str",
"(",
"response",
".",
"response_content",
")",
"+",
"'-'",
"+",
"str",
"(",
"response",
".",
"error_message",
")",
"+",
"\" (\"",
"+",
"str",
"(",
"response",
".",
"rc",
")",
"+",
"\")\"",
"LOGGER",
".",
"warning",
"(",
"err_msg",
")",
"return",
"ret"
] |
:return: all routing areas
|
[
":",
"return",
":",
"all",
"routing",
"areas"
] |
python
|
train
|
MSchnei/pyprf_feature
|
pyprf_feature/analysis/pyprf_sim.py
|
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/pyprf_sim.py#L35-L209
|
def pyprf_sim(strPrior, strStmApr, lgcNoise=False, lgcRtnNrl=True,
lstRat=None, lgcTest=False):
"""
Simulate pRF response given pRF parameters and stimulus apertures.
Parameters
----------
strPrior : str
Absolute file path of config file used for pRF fitting.
strStmApr : str
Absolute file path to stimulus aperture used in in-silico experiment.
lgcNoise : boolean
Should noise be added to the simulated pRF time course. By default, no
noise is added.
lgcRtnNrl : boolean
Should neural time course, unconvolved with hrf, be returned as well?
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
Notes
-----
[1] This function does not return any arguments but, instead, saves nii
filex to disk.
[2] strStmApr should be a path to a npy file that contains a 3D numpy
array. This arrays consists of binary images in boolean array from that
represent the stimulus aperture. Images are stacked along last axis.
"""
# %% Load configuration settings that were used for fitting
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strPrior, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# If suppressive surround flag is on, make sure to retrieve results from
# that fitting
if lstRat is not None:
cfg.strPathOut = cfg.strPathOut + '_supsur'
# %% Load previous pRF fitting results
# Derive paths to the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [cfg.strPathOut + '_x_pos.nii.gz',
cfg.strPathOut + '_y_pos.nii.gz',
cfg.strPathOut + '_SD.nii.gz']
# Check if fitting has been performed, i.e. whether parameter files exist
# Throw error message if they do not exist.
errorMsg = 'Files that should have resulted from fitting do not exist. \
\nPlease perform pRF fitting first, calling e.g.: \
\npyprf_feature -config /path/to/my_config_file.csv'
assert os.path.isfile(lstWnrPrm[0]), errorMsg
assert os.path.isfile(lstWnrPrm[1]), errorMsg
assert os.path.isfile(lstWnrPrm[2]), errorMsg
# Load the x, y, sigma winner parameters from pyprf_feature
aryIntGssPrm = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Also load suppresive surround params if suppressive surround flag was on
if lstRat is not None:
# Load beta parameters estimates, aka weights, this is later needed to
# scale responses of the center wrt to the surround
lstPathBeta = [cfg.strPathOut + '_Betas.nii.gz']
aryBetas = load_res_prm(lstPathBeta,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Load ratio of prf sizes
lstPathRat = [cfg.strPathOut + '_Ratios.nii.gz']
aryRat = load_res_prm(lstPathRat, lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their initial parameters, too
# Get inclusion mask and nii header
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, _, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=100.)
# Apply inclusion mask
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
if lstRat is not None:
aryBetas = aryBetas[aryLgcVar, :]
aryRat = aryRat[aryLgcVar]
# %% Load stimulus aperture and create model responses to stimuli
# Load stimulus aperture
aryStmApr = np.load(strStmApr)
# Which dimensions does the representation have in pixel space?
tplStmApr = aryStmApr.shape[:2]
# Convert winner parameters from degrees of visual angle to pixel
vecIntX, vecIntY, vecIntSd = rmp_deg_pixel_xys(aryIntGssPrm[:, 0],
aryIntGssPrm[:, 1],
aryIntGssPrm[:, 2],
tplStmApr,
cfg.varExtXmin,
cfg.varExtXmax,
cfg.varExtYmin,
cfg.varExtYmax)
aryIntGssPrmPxl = np.column_stack((vecIntX, vecIntY, vecIntSd))
# Create 2D Gauss model responses to spatial conditions.
print('---Create 2D Gauss model responses to spatial conditions')
aryMdlRsp = crt_mdl_rsp(aryStmApr, tplStmApr, aryIntGssPrmPxl, cfg.varPar)
# If supsur flag was provided, also create responses with supsur params
# and combine positive center response with negative surround response
if lstRat is not None:
aryIntGssPrmPxlSur = np.copy(aryIntGssPrmPxl)
# Adjust pRF sizes using the ratio of pRF sizes
aryIntGssPrmPxlSur[:, 2] = np.multiply(aryIntGssPrmPxlSur[:, 2],
aryRat)
aryMdlRspSur = crt_mdl_rsp(aryStmApr, tplStmApr, aryIntGssPrmPxlSur,
cfg.varPar)
# Now the responses of the center and the surround need to be combined
# in a meaningful way. One way this could be done is to take the ratio
# of gain parameters that were found when fitting (i.e. betas)
varGainRat = np.divide(aryBetas[:, 0], aryBetas[:, 1])
aryMdlRsp = np.subtract(aryMdlRsp,
np.multiply(varGainRat, aryMdlRspSur))
# %% Convolve time courses with hrf function
# First temporally upsamle the model response
aryMdlRspUps = np.repeat(aryMdlRsp, cfg.varTmpOvsmpl, axis=-1)
# Convolve with hrf function
arySimRsp = crt_prf_tc(aryMdlRspUps, aryMdlRsp.shape[-1], cfg.varTr,
cfg.varTmpOvsmpl, 1, tplStmApr, cfg.varPar)
# Squeeze simulated reponse. This step is necessary because crt_prf_tc is,
# in principle, capable of convolving with deriavtes of canonical function
if arySimRsp.shape[1] > 1:
print('***WARNING: pyprf_sim expects 1 hrf function, currently***')
arySimRsp = np.squeeze(arySimRsp)
# Save memory by deleting upsampled time course
del(aryMdlRspUps)
# %% Add auto-correlated noise
if lgcNoise:
print('***Adding noise feature not yet implemented***')
# %% Export simulated prf, and if desired neural, time courses as nii
# List with name suffices of output images:
lstNiiNames = ['_SimPrfTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
print('---Save simulated pRF time courses')
export_nii(arySimRsp, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
if lgcRtnNrl:
# List with name suffices of output images:
lstNiiNames = ['_SimNrlTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
print('---Save simulated neural time courses')
export_nii(aryMdlRsp, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
|
[
"def",
"pyprf_sim",
"(",
"strPrior",
",",
"strStmApr",
",",
"lgcNoise",
"=",
"False",
",",
"lgcRtnNrl",
"=",
"True",
",",
"lstRat",
"=",
"None",
",",
"lgcTest",
"=",
"False",
")",
":",
"# %% Load configuration settings that were used for fitting",
"# Load config parameters from csv file into dictionary:",
"dicCnfg",
"=",
"load_config",
"(",
"strPrior",
",",
"lgcTest",
"=",
"lgcTest",
")",
"# Load config parameters from dictionary into namespace:",
"cfg",
"=",
"cls_set_config",
"(",
"dicCnfg",
")",
"# If suppressive surround flag is on, make sure to retrieve results from",
"# that fitting",
"if",
"lstRat",
"is",
"not",
"None",
":",
"cfg",
".",
"strPathOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_supsur'",
"# %% Load previous pRF fitting results",
"# Derive paths to the x, y, sigma winner parameters from pyprf_feature",
"lstWnrPrm",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_x_pos.nii.gz'",
",",
"cfg",
".",
"strPathOut",
"+",
"'_y_pos.nii.gz'",
",",
"cfg",
".",
"strPathOut",
"+",
"'_SD.nii.gz'",
"]",
"# Check if fitting has been performed, i.e. whether parameter files exist",
"# Throw error message if they do not exist.",
"errorMsg",
"=",
"'Files that should have resulted from fitting do not exist. \\\n \\nPlease perform pRF fitting first, calling e.g.: \\\n \\npyprf_feature -config /path/to/my_config_file.csv'",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"0",
"]",
")",
",",
"errorMsg",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"1",
"]",
")",
",",
"errorMsg",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"2",
"]",
")",
",",
"errorMsg",
"# Load the x, y, sigma winner parameters from pyprf_feature",
"aryIntGssPrm",
"=",
"load_res_prm",
"(",
"lstWnrPrm",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Also load suppresive surround params if suppressive surround flag was on",
"if",
"lstRat",
"is",
"not",
"None",
":",
"# Load beta parameters estimates, aka weights, this is later needed to",
"# scale responses of the center wrt to the surround",
"lstPathBeta",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_Betas.nii.gz'",
"]",
"aryBetas",
"=",
"load_res_prm",
"(",
"lstPathBeta",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Load ratio of prf sizes",
"lstPathRat",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_Ratios.nii.gz'",
"]",
"aryRat",
"=",
"load_res_prm",
"(",
"lstPathRat",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Some voxels were excluded because they did not have sufficient mean",
"# and/or variance - exclude their initial parameters, too",
"# Get inclusion mask and nii header",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"hdrMsk",
",",
"aryAff",
",",
"_",
",",
"tplNiiShp",
"=",
"prep_func",
"(",
"cfg",
".",
"strPathNiiMask",
",",
"cfg",
".",
"lstPathNiiFunc",
",",
"varAvgThr",
"=",
"100.",
")",
"# Apply inclusion mask",
"aryIntGssPrm",
"=",
"aryIntGssPrm",
"[",
"aryLgcVar",
",",
":",
"]",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryBetas",
"=",
"aryBetas",
"[",
"aryLgcVar",
",",
":",
"]",
"aryRat",
"=",
"aryRat",
"[",
"aryLgcVar",
"]",
"# %% Load stimulus aperture and create model responses to stimuli",
"# Load stimulus aperture",
"aryStmApr",
"=",
"np",
".",
"load",
"(",
"strStmApr",
")",
"# Which dimensions does the representation have in pixel space?",
"tplStmApr",
"=",
"aryStmApr",
".",
"shape",
"[",
":",
"2",
"]",
"# Convert winner parameters from degrees of visual angle to pixel",
"vecIntX",
",",
"vecIntY",
",",
"vecIntSd",
"=",
"rmp_deg_pixel_xys",
"(",
"aryIntGssPrm",
"[",
":",
",",
"0",
"]",
",",
"aryIntGssPrm",
"[",
":",
",",
"1",
"]",
",",
"aryIntGssPrm",
"[",
":",
",",
"2",
"]",
",",
"tplStmApr",
",",
"cfg",
".",
"varExtXmin",
",",
"cfg",
".",
"varExtXmax",
",",
"cfg",
".",
"varExtYmin",
",",
"cfg",
".",
"varExtYmax",
")",
"aryIntGssPrmPxl",
"=",
"np",
".",
"column_stack",
"(",
"(",
"vecIntX",
",",
"vecIntY",
",",
"vecIntSd",
")",
")",
"# Create 2D Gauss model responses to spatial conditions.",
"print",
"(",
"'---Create 2D Gauss model responses to spatial conditions'",
")",
"aryMdlRsp",
"=",
"crt_mdl_rsp",
"(",
"aryStmApr",
",",
"tplStmApr",
",",
"aryIntGssPrmPxl",
",",
"cfg",
".",
"varPar",
")",
"# If supsur flag was provided, also create responses with supsur params",
"# and combine positive center response with negative surround response",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryIntGssPrmPxlSur",
"=",
"np",
".",
"copy",
"(",
"aryIntGssPrmPxl",
")",
"# Adjust pRF sizes using the ratio of pRF sizes",
"aryIntGssPrmPxlSur",
"[",
":",
",",
"2",
"]",
"=",
"np",
".",
"multiply",
"(",
"aryIntGssPrmPxlSur",
"[",
":",
",",
"2",
"]",
",",
"aryRat",
")",
"aryMdlRspSur",
"=",
"crt_mdl_rsp",
"(",
"aryStmApr",
",",
"tplStmApr",
",",
"aryIntGssPrmPxlSur",
",",
"cfg",
".",
"varPar",
")",
"# Now the responses of the center and the surround need to be combined",
"# in a meaningful way. One way this could be done is to take the ratio",
"# of gain parameters that were found when fitting (i.e. betas)",
"varGainRat",
"=",
"np",
".",
"divide",
"(",
"aryBetas",
"[",
":",
",",
"0",
"]",
",",
"aryBetas",
"[",
":",
",",
"1",
"]",
")",
"aryMdlRsp",
"=",
"np",
".",
"subtract",
"(",
"aryMdlRsp",
",",
"np",
".",
"multiply",
"(",
"varGainRat",
",",
"aryMdlRspSur",
")",
")",
"# %% Convolve time courses with hrf function",
"# First temporally upsamle the model response",
"aryMdlRspUps",
"=",
"np",
".",
"repeat",
"(",
"aryMdlRsp",
",",
"cfg",
".",
"varTmpOvsmpl",
",",
"axis",
"=",
"-",
"1",
")",
"# Convolve with hrf function",
"arySimRsp",
"=",
"crt_prf_tc",
"(",
"aryMdlRspUps",
",",
"aryMdlRsp",
".",
"shape",
"[",
"-",
"1",
"]",
",",
"cfg",
".",
"varTr",
",",
"cfg",
".",
"varTmpOvsmpl",
",",
"1",
",",
"tplStmApr",
",",
"cfg",
".",
"varPar",
")",
"# Squeeze simulated reponse. This step is necessary because crt_prf_tc is,",
"# in principle, capable of convolving with deriavtes of canonical function",
"if",
"arySimRsp",
".",
"shape",
"[",
"1",
"]",
">",
"1",
":",
"print",
"(",
"'***WARNING: pyprf_sim expects 1 hrf function, currently***'",
")",
"arySimRsp",
"=",
"np",
".",
"squeeze",
"(",
"arySimRsp",
")",
"# Save memory by deleting upsampled time course",
"del",
"(",
"aryMdlRspUps",
")",
"# %% Add auto-correlated noise",
"if",
"lgcNoise",
":",
"print",
"(",
"'***Adding noise feature not yet implemented***'",
")",
"# %% Export simulated prf, and if desired neural, time courses as nii",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_SimPrfTc'",
"]",
"# Create full path names from nii file names and output path",
"lstNiiNames",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"strNii",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# export beta parameter as a single 4D nii file",
"print",
"(",
"'---Save simulated pRF time courses'",
")",
"export_nii",
"(",
"arySimRsp",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'4D'",
")",
"print",
"(",
"'------Done.'",
")",
"if",
"lgcRtnNrl",
":",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_SimNrlTc'",
"]",
"# Create full path names from nii file names and output path",
"lstNiiNames",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"strNii",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# export beta parameter as a single 4D nii file",
"print",
"(",
"'---Save simulated neural time courses'",
")",
"export_nii",
"(",
"aryMdlRsp",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'4D'",
")",
"print",
"(",
"'------Done.'",
")"
] |
Simulate pRF response given pRF parameters and stimulus apertures.
Parameters
----------
strPrior : str
Absolute file path of config file used for pRF fitting.
strStmApr : str
Absolute file path to stimulus aperture used in in-silico experiment.
lgcNoise : boolean
Should noise be added to the simulated pRF time course. By default, no
noise is added.
lgcRtnNrl : boolean
Should neural time course, unconvolved with hrf, be returned as well?
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
Notes
-----
[1] This function does not return any arguments but, instead, saves nii
filex to disk.
[2] strStmApr should be a path to a npy file that contains a 3D numpy
array. This arrays consists of binary images in boolean array from that
represent the stimulus aperture. Images are stacked along last axis.
|
[
"Simulate",
"pRF",
"response",
"given",
"pRF",
"parameters",
"and",
"stimulus",
"apertures",
"."
] |
python
|
train
|
peepall/FancyLogger
|
FancyLogger/processing/__init__.py
|
https://github.com/peepall/FancyLogger/blob/7f13f1397e76ed768fb6b6358194118831fafc6d/FancyLogger/processing/__init__.py#L225-L283
|
def run(self):
"""
The main loop for the logger process. Will receive remote processes orders one by one and wait for the next one.
Then return from this method when the main application calls for exit, which is a regular command.
"""
# Initialize the file logger
self.log = getLogger()
# Deserialize configuration
self.set_config_command = dill.loads(self.set_config_command)
self.set_configuration(self.set_config_command)
for handler in self.file_handlers:
if isinstance(handler, StreamHandler)\
and (handler.stream == sys.stdout or handler.stream == sys.stderr):
self.critical(LogMessageCommand(text='Cannot use logging.StreamHandler with \'sys.stdout\' nor '
'\'sys.stderr\' because those are reserved by the logger process',
level=logging.CRITICAL))
continue
self.log.addHandler(hdlr=handler)
self.log.setLevel(self.console_level)
while True:
o = dill.loads(self.queue.get())
if isinstance(o, LogMessageCommand):
if o.level == logging.DEBUG:
self.debug(command=o)
elif o.level == logging.INFO:
self.info(command=o)
elif o.level == logging.WARNING:
self.warning(command=o)
elif o.level == logging.ERROR:
self.error(command=o)
elif o.level == logging.CRITICAL:
self.critical(command=o)
elif isinstance(o, UpdateProgressCommand):
self.update(command=o)
elif isinstance(o, NewTaskCommand):
self.set_task(command=o)
elif isinstance(o, FlushCommand):
self.flush()
elif isinstance(o, StacktraceCommand):
self.throw(command=o)
elif isinstance(o, SetConfigurationCommand):
self.set_configuration(command=o)
elif isinstance(o, ExitCommand):
return
elif isinstance(o, SetLevelCommand):
self.set_level(command=o)
|
[
"def",
"run",
"(",
"self",
")",
":",
"# Initialize the file logger",
"self",
".",
"log",
"=",
"getLogger",
"(",
")",
"# Deserialize configuration",
"self",
".",
"set_config_command",
"=",
"dill",
".",
"loads",
"(",
"self",
".",
"set_config_command",
")",
"self",
".",
"set_configuration",
"(",
"self",
".",
"set_config_command",
")",
"for",
"handler",
"in",
"self",
".",
"file_handlers",
":",
"if",
"isinstance",
"(",
"handler",
",",
"StreamHandler",
")",
"and",
"(",
"handler",
".",
"stream",
"==",
"sys",
".",
"stdout",
"or",
"handler",
".",
"stream",
"==",
"sys",
".",
"stderr",
")",
":",
"self",
".",
"critical",
"(",
"LogMessageCommand",
"(",
"text",
"=",
"'Cannot use logging.StreamHandler with \\'sys.stdout\\' nor '",
"'\\'sys.stderr\\' because those are reserved by the logger process'",
",",
"level",
"=",
"logging",
".",
"CRITICAL",
")",
")",
"continue",
"self",
".",
"log",
".",
"addHandler",
"(",
"hdlr",
"=",
"handler",
")",
"self",
".",
"log",
".",
"setLevel",
"(",
"self",
".",
"console_level",
")",
"while",
"True",
":",
"o",
"=",
"dill",
".",
"loads",
"(",
"self",
".",
"queue",
".",
"get",
"(",
")",
")",
"if",
"isinstance",
"(",
"o",
",",
"LogMessageCommand",
")",
":",
"if",
"o",
".",
"level",
"==",
"logging",
".",
"DEBUG",
":",
"self",
".",
"debug",
"(",
"command",
"=",
"o",
")",
"elif",
"o",
".",
"level",
"==",
"logging",
".",
"INFO",
":",
"self",
".",
"info",
"(",
"command",
"=",
"o",
")",
"elif",
"o",
".",
"level",
"==",
"logging",
".",
"WARNING",
":",
"self",
".",
"warning",
"(",
"command",
"=",
"o",
")",
"elif",
"o",
".",
"level",
"==",
"logging",
".",
"ERROR",
":",
"self",
".",
"error",
"(",
"command",
"=",
"o",
")",
"elif",
"o",
".",
"level",
"==",
"logging",
".",
"CRITICAL",
":",
"self",
".",
"critical",
"(",
"command",
"=",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"UpdateProgressCommand",
")",
":",
"self",
".",
"update",
"(",
"command",
"=",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"NewTaskCommand",
")",
":",
"self",
".",
"set_task",
"(",
"command",
"=",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"FlushCommand",
")",
":",
"self",
".",
"flush",
"(",
")",
"elif",
"isinstance",
"(",
"o",
",",
"StacktraceCommand",
")",
":",
"self",
".",
"throw",
"(",
"command",
"=",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"SetConfigurationCommand",
")",
":",
"self",
".",
"set_configuration",
"(",
"command",
"=",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"ExitCommand",
")",
":",
"return",
"elif",
"isinstance",
"(",
"o",
",",
"SetLevelCommand",
")",
":",
"self",
".",
"set_level",
"(",
"command",
"=",
"o",
")"
] |
The main loop for the logger process. Will receive remote processes orders one by one and wait for the next one.
Then return from this method when the main application calls for exit, which is a regular command.
|
[
"The",
"main",
"loop",
"for",
"the",
"logger",
"process",
".",
"Will",
"receive",
"remote",
"processes",
"orders",
"one",
"by",
"one",
"and",
"wait",
"for",
"the",
"next",
"one",
".",
"Then",
"return",
"from",
"this",
"method",
"when",
"the",
"main",
"application",
"calls",
"for",
"exit",
"which",
"is",
"a",
"regular",
"command",
"."
] |
python
|
train
|
tanghaibao/jcvi
|
jcvi/utils/brewer2mpl.py
|
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/brewer2mpl.py#L161-L174
|
def get_mpl_colormap(self, **kwargs):
"""
A color map that can be used in matplotlib plots. Requires matplotlib
to be importable. Keyword arguments are passed to
`matplotlib.colors.LinearSegmentedColormap.from_list`.
"""
if not HAVE_MPL: # pragma: no cover
raise RuntimeError('matplotlib not available.')
cmap = LinearSegmentedColormap.from_list(self.name,
self.mpl_colors, **kwargs)
return cmap
|
[
"def",
"get_mpl_colormap",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"HAVE_MPL",
":",
"# pragma: no cover",
"raise",
"RuntimeError",
"(",
"'matplotlib not available.'",
")",
"cmap",
"=",
"LinearSegmentedColormap",
".",
"from_list",
"(",
"self",
".",
"name",
",",
"self",
".",
"mpl_colors",
",",
"*",
"*",
"kwargs",
")",
"return",
"cmap"
] |
A color map that can be used in matplotlib plots. Requires matplotlib
to be importable. Keyword arguments are passed to
`matplotlib.colors.LinearSegmentedColormap.from_list`.
|
[
"A",
"color",
"map",
"that",
"can",
"be",
"used",
"in",
"matplotlib",
"plots",
".",
"Requires",
"matplotlib",
"to",
"be",
"importable",
".",
"Keyword",
"arguments",
"are",
"passed",
"to",
"matplotlib",
".",
"colors",
".",
"LinearSegmentedColormap",
".",
"from_list",
"."
] |
python
|
train
|
NoviceLive/intellicoder
|
intellicoder/intellisense/formatters.py
|
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/formatters.py#L75-L86
|
def format_names(raw):
"""Format a string representing the names contained in the files.
"""
if raw:
raw = [
'{}:\n{}'.format(
header.lower(), ' '.join(func[0] for func in funcs)
)
for header, funcs in raw
]
return '\n'.join(raw)
return ''
|
[
"def",
"format_names",
"(",
"raw",
")",
":",
"if",
"raw",
":",
"raw",
"=",
"[",
"'{}:\\n{}'",
".",
"format",
"(",
"header",
".",
"lower",
"(",
")",
",",
"' '",
".",
"join",
"(",
"func",
"[",
"0",
"]",
"for",
"func",
"in",
"funcs",
")",
")",
"for",
"header",
",",
"funcs",
"in",
"raw",
"]",
"return",
"'\\n'",
".",
"join",
"(",
"raw",
")",
"return",
"''"
] |
Format a string representing the names contained in the files.
|
[
"Format",
"a",
"string",
"representing",
"the",
"names",
"contained",
"in",
"the",
"files",
"."
] |
python
|
train
|
JarryShaw/PyPCAPKit
|
src/const/arp/hardware.py
|
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/const/arp/hardware.py#L55-L61
|
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Hardware(key)
if key not in Hardware._member_map_:
extend_enum(Hardware, key, default)
return Hardware[key]
|
[
"def",
"get",
"(",
"key",
",",
"default",
"=",
"-",
"1",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
":",
"return",
"Hardware",
"(",
"key",
")",
"if",
"key",
"not",
"in",
"Hardware",
".",
"_member_map_",
":",
"extend_enum",
"(",
"Hardware",
",",
"key",
",",
"default",
")",
"return",
"Hardware",
"[",
"key",
"]"
] |
Backport support for original codes.
|
[
"Backport",
"support",
"for",
"original",
"codes",
"."
] |
python
|
train
|
rstoneback/pysatMagVect
|
pysatMagVect/_core.py
|
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L253-L277
|
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
|
[
"def",
"project_ecef_vector_onto_basis",
"(",
"x",
",",
"y",
",",
"z",
",",
"xx",
",",
"xy",
",",
"xz",
",",
"yx",
",",
"yy",
",",
"yz",
",",
"zx",
",",
"zy",
",",
"zz",
")",
":",
"out_x",
"=",
"x",
"*",
"xx",
"+",
"y",
"*",
"xy",
"+",
"z",
"*",
"xz",
"out_y",
"=",
"x",
"*",
"yx",
"+",
"y",
"*",
"yy",
"+",
"z",
"*",
"yz",
"out_z",
"=",
"x",
"*",
"zx",
"+",
"y",
"*",
"zy",
"+",
"z",
"*",
"zz",
"return",
"out_x",
",",
"out_y",
",",
"out_z"
] |
Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
|
[
"Projects",
"vector",
"in",
"ecef",
"onto",
"different",
"basis",
"with",
"components",
"also",
"expressed",
"in",
"ECEF",
"Parameters",
"----------",
"x",
":",
"float",
"or",
"array",
"-",
"like",
"ECEF",
"-",
"X",
"component",
"of",
"vector",
"y",
":",
"float",
"or",
"array",
"-",
"like",
"ECEF",
"-",
"Y",
"component",
"of",
"vector",
"z",
":",
"float",
"or",
"array",
"-",
"like",
"ECEF",
"-",
"Z",
"component",
"of",
"vector",
"xx",
":",
"float",
"or",
"array",
"-",
"like",
"ECEF",
"-",
"X",
"component",
"of",
"the",
"x",
"unit",
"vector",
"of",
"new",
"basis",
"xy",
":",
"float",
"or",
"array",
"-",
"like",
"ECEF",
"-",
"Y",
"component",
"of",
"the",
"x",
"unit",
"vector",
"of",
"new",
"basis",
"xz",
":",
"float",
"or",
"array",
"-",
"like",
"ECEF",
"-",
"Z",
"component",
"of",
"the",
"x",
"unit",
"vector",
"of",
"new",
"basis"
] |
python
|
train
|
apache/incubator-superset
|
superset/utils/core.py
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L81-L89
|
def flasher(msg, severity=None):
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == 'danger':
logging.error(msg)
else:
logging.info(msg)
|
[
"def",
"flasher",
"(",
"msg",
",",
"severity",
"=",
"None",
")",
":",
"try",
":",
"flash",
"(",
"msg",
",",
"severity",
")",
"except",
"RuntimeError",
":",
"if",
"severity",
"==",
"'danger'",
":",
"logging",
".",
"error",
"(",
"msg",
")",
"else",
":",
"logging",
".",
"info",
"(",
"msg",
")"
] |
Flask's flash if available, logging call if not
|
[
"Flask",
"s",
"flash",
"if",
"available",
"logging",
"call",
"if",
"not"
] |
python
|
train
|
pybel/pybel-tools
|
src/pybel_tools/summary/node_properties.py
|
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/summary/node_properties.py#L92-L101
|
def get_causal_sink_nodes(graph: BELGraph, func) -> Set[BaseEntity]:
"""Returns a set of all ABUNDANCE nodes that have an causal out-degree of 0.
This likely means that the knowledge assembly is incomplete, or there is a curation error.
"""
return {
node
for node in graph
if node.function == func and is_causal_sink(graph, node)
}
|
[
"def",
"get_causal_sink_nodes",
"(",
"graph",
":",
"BELGraph",
",",
"func",
")",
"->",
"Set",
"[",
"BaseEntity",
"]",
":",
"return",
"{",
"node",
"for",
"node",
"in",
"graph",
"if",
"node",
".",
"function",
"==",
"func",
"and",
"is_causal_sink",
"(",
"graph",
",",
"node",
")",
"}"
] |
Returns a set of all ABUNDANCE nodes that have an causal out-degree of 0.
This likely means that the knowledge assembly is incomplete, or there is a curation error.
|
[
"Returns",
"a",
"set",
"of",
"all",
"ABUNDANCE",
"nodes",
"that",
"have",
"an",
"causal",
"out",
"-",
"degree",
"of",
"0",
"."
] |
python
|
valid
|
ic-labs/django-icekit
|
icekit/admin_tools/mixins.py
|
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/admin_tools/mixins.py#L36-L44
|
def formfield_for_foreignkey(self, db_field, *args, **kwargs):
"""
Update queryset for ``layout`` field.
"""
formfield = super(FluentLayoutsMixin, self).formfield_for_foreignkey(
db_field, *args, **kwargs)
if db_field.name == 'layout':
formfield.queryset = formfield.queryset.for_model(self.model)
return formfield
|
[
"def",
"formfield_for_foreignkey",
"(",
"self",
",",
"db_field",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"formfield",
"=",
"super",
"(",
"FluentLayoutsMixin",
",",
"self",
")",
".",
"formfield_for_foreignkey",
"(",
"db_field",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"db_field",
".",
"name",
"==",
"'layout'",
":",
"formfield",
".",
"queryset",
"=",
"formfield",
".",
"queryset",
".",
"for_model",
"(",
"self",
".",
"model",
")",
"return",
"formfield"
] |
Update queryset for ``layout`` field.
|
[
"Update",
"queryset",
"for",
"layout",
"field",
"."
] |
python
|
train
|
cocaine/cocaine-tools
|
cocaine/tools/dispatch.py
|
https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L823-L833
|
def app_pause(name, **kwargs):
"""
Stop application.
This command is alias for ```cocaine-tool app stop```.
"""
ctx = Context(**kwargs)
ctx.execute_action('app:stop', **{
'node': ctx.repo.create_secure_service('node'),
'name': name,
})
|
[
"def",
"app_pause",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"ctx",
"=",
"Context",
"(",
"*",
"*",
"kwargs",
")",
"ctx",
".",
"execute_action",
"(",
"'app:stop'",
",",
"*",
"*",
"{",
"'node'",
":",
"ctx",
".",
"repo",
".",
"create_secure_service",
"(",
"'node'",
")",
",",
"'name'",
":",
"name",
",",
"}",
")"
] |
Stop application.
This command is alias for ```cocaine-tool app stop```.
|
[
"Stop",
"application",
"."
] |
python
|
train
|
ttinies/sc2players
|
sc2players/playerRecord.py
|
https://github.com/ttinies/sc2players/blob/fd9b37c268bf1005d9ef73a25e65ed97c8b7895f/sc2players/playerRecord.py#L235-L247
|
def recentMatches(self, **criteria):
"""identify all recent matches for player given optional, additional criteria"""
if not self.matches: return [] # no match history
try: # maxMatches is a specially handled parameter (not true criteria)
maxMatches = criteria["maxMatches"]
del criteria["maxMatches"]
except AttributeError:
maxMatches = c.RECENT_MATCHES
alLMatches = self.matchSubset(**criteria)
matchTimes = [(m.endTime, m) for m in matches]
selMatches = sorted(matchTimes)[:maxMatches] # slice off X most recet matches
retMatches = [m for endTime,m in selMatches] # extract matches only
return retMatches
|
[
"def",
"recentMatches",
"(",
"self",
",",
"*",
"*",
"criteria",
")",
":",
"if",
"not",
"self",
".",
"matches",
":",
"return",
"[",
"]",
"# no match history",
"try",
":",
"# maxMatches is a specially handled parameter (not true criteria)",
"maxMatches",
"=",
"criteria",
"[",
"\"maxMatches\"",
"]",
"del",
"criteria",
"[",
"\"maxMatches\"",
"]",
"except",
"AttributeError",
":",
"maxMatches",
"=",
"c",
".",
"RECENT_MATCHES",
"alLMatches",
"=",
"self",
".",
"matchSubset",
"(",
"*",
"*",
"criteria",
")",
"matchTimes",
"=",
"[",
"(",
"m",
".",
"endTime",
",",
"m",
")",
"for",
"m",
"in",
"matches",
"]",
"selMatches",
"=",
"sorted",
"(",
"matchTimes",
")",
"[",
":",
"maxMatches",
"]",
"# slice off X most recet matches",
"retMatches",
"=",
"[",
"m",
"for",
"endTime",
",",
"m",
"in",
"selMatches",
"]",
"# extract matches only",
"return",
"retMatches"
] |
identify all recent matches for player given optional, additional criteria
|
[
"identify",
"all",
"recent",
"matches",
"for",
"player",
"given",
"optional",
"additional",
"criteria"
] |
python
|
train
|
shidenggui/easytrader
|
easytrader/xqtrader.py
|
https://github.com/shidenggui/easytrader/blob/e5ae4daeda4ea125763a95b280dd694c7f68257d/easytrader/xqtrader.py#L233-L271
|
def get_entrust(self):
"""
获取委托单(目前返回20次调仓的结果)
操作数量都按1手模拟换算的
:return:
"""
xq_entrust_list = self._get_xq_history()
entrust_list = []
replace_none = lambda s: s or 0
for xq_entrusts in xq_entrust_list:
status = xq_entrusts["status"] # 调仓状态
if status == "pending":
status = "已报"
elif status in ["canceled", "failed"]:
status = "废单"
else:
status = "已成"
for entrust in xq_entrusts["rebalancing_histories"]:
price = entrust["price"]
entrust_list.append(
{
"entrust_no": entrust["id"],
"entrust_bs": u"买入"
if entrust["target_weight"]
> replace_none(entrust["prev_weight"])
else u"卖出",
"report_time": self._time_strftime(
entrust["updated_at"]
),
"entrust_status": status,
"stock_code": entrust["stock_symbol"],
"stock_name": entrust["stock_name"],
"business_amount": 100,
"business_price": price,
"entrust_amount": 100,
"entrust_price": price,
}
)
return entrust_list
|
[
"def",
"get_entrust",
"(",
"self",
")",
":",
"xq_entrust_list",
"=",
"self",
".",
"_get_xq_history",
"(",
")",
"entrust_list",
"=",
"[",
"]",
"replace_none",
"=",
"lambda",
"s",
":",
"s",
"or",
"0",
"for",
"xq_entrusts",
"in",
"xq_entrust_list",
":",
"status",
"=",
"xq_entrusts",
"[",
"\"status\"",
"]",
"# 调仓状态",
"if",
"status",
"==",
"\"pending\"",
":",
"status",
"=",
"\"已报\"",
"elif",
"status",
"in",
"[",
"\"canceled\"",
",",
"\"failed\"",
"]",
":",
"status",
"=",
"\"废单\"",
"else",
":",
"status",
"=",
"\"已成\"",
"for",
"entrust",
"in",
"xq_entrusts",
"[",
"\"rebalancing_histories\"",
"]",
":",
"price",
"=",
"entrust",
"[",
"\"price\"",
"]",
"entrust_list",
".",
"append",
"(",
"{",
"\"entrust_no\"",
":",
"entrust",
"[",
"\"id\"",
"]",
",",
"\"entrust_bs\"",
":",
"u\"买入\"",
"if",
"entrust",
"[",
"\"target_weight\"",
"]",
">",
"replace_none",
"(",
"entrust",
"[",
"\"prev_weight\"",
"]",
")",
"else",
"u\"卖出\",",
"",
"\"report_time\"",
":",
"self",
".",
"_time_strftime",
"(",
"entrust",
"[",
"\"updated_at\"",
"]",
")",
",",
"\"entrust_status\"",
":",
"status",
",",
"\"stock_code\"",
":",
"entrust",
"[",
"\"stock_symbol\"",
"]",
",",
"\"stock_name\"",
":",
"entrust",
"[",
"\"stock_name\"",
"]",
",",
"\"business_amount\"",
":",
"100",
",",
"\"business_price\"",
":",
"price",
",",
"\"entrust_amount\"",
":",
"100",
",",
"\"entrust_price\"",
":",
"price",
",",
"}",
")",
"return",
"entrust_list"
] |
获取委托单(目前返回20次调仓的结果)
操作数量都按1手模拟换算的
:return:
|
[
"获取委托单",
"(",
"目前返回20次调仓的结果",
")",
"操作数量都按1手模拟换算的",
":",
"return",
":"
] |
python
|
train
|
wbond/asn1crypto
|
asn1crypto/x509.py
|
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/x509.py#L2748-L2762
|
def valid_ips(self):
"""
:return:
A list of unicode strings of valid IP addresses for the certificate
"""
if self._valid_ips is None:
self._valid_ips = []
if self.subject_alt_name_value:
for general_name in self.subject_alt_name_value:
if general_name.name == 'ip_address':
self._valid_ips.append(general_name.native)
return self._valid_ips
|
[
"def",
"valid_ips",
"(",
"self",
")",
":",
"if",
"self",
".",
"_valid_ips",
"is",
"None",
":",
"self",
".",
"_valid_ips",
"=",
"[",
"]",
"if",
"self",
".",
"subject_alt_name_value",
":",
"for",
"general_name",
"in",
"self",
".",
"subject_alt_name_value",
":",
"if",
"general_name",
".",
"name",
"==",
"'ip_address'",
":",
"self",
".",
"_valid_ips",
".",
"append",
"(",
"general_name",
".",
"native",
")",
"return",
"self",
".",
"_valid_ips"
] |
:return:
A list of unicode strings of valid IP addresses for the certificate
|
[
":",
"return",
":",
"A",
"list",
"of",
"unicode",
"strings",
"of",
"valid",
"IP",
"addresses",
"for",
"the",
"certificate"
] |
python
|
train
|
Frzk/Ellis
|
ellis/filter.py
|
https://github.com/Frzk/Ellis/blob/39ce8987cbc503354cf1f45927344186a8b18363/ellis/filter.py#L72-L85
|
def replace_tags(cls, raw_filter):
"""
Searches for known tags in the given string and replaces them with the
corresponding regular expression.
*raw_filter* is an (optionnaly tagged) regular expression.
Returns the regular expression with known tags replaces by the
corresponding regular expression.
"""
for k, v in iter(cls.known_tags.items()):
raw_filter = raw_filter.replace(k, v)
return raw_filter
|
[
"def",
"replace_tags",
"(",
"cls",
",",
"raw_filter",
")",
":",
"for",
"k",
",",
"v",
"in",
"iter",
"(",
"cls",
".",
"known_tags",
".",
"items",
"(",
")",
")",
":",
"raw_filter",
"=",
"raw_filter",
".",
"replace",
"(",
"k",
",",
"v",
")",
"return",
"raw_filter"
] |
Searches for known tags in the given string and replaces them with the
corresponding regular expression.
*raw_filter* is an (optionnaly tagged) regular expression.
Returns the regular expression with known tags replaces by the
corresponding regular expression.
|
[
"Searches",
"for",
"known",
"tags",
"in",
"the",
"given",
"string",
"and",
"replaces",
"them",
"with",
"the",
"corresponding",
"regular",
"expression",
"."
] |
python
|
train
|
SMTG-UCL/sumo
|
sumo/electronic_structure/optics.py
|
https://github.com/SMTG-UCL/sumo/blob/47aec6bbfa033a624435a65bd4edabd18bfb437f/sumo/electronic_structure/optics.py#L60-L201
|
def calculate_dielectric_properties(dielectric, properties,
average=True):
r"""Calculate optical properties from the dielectric function
Supported properties:
Absorption
~~~~~~~~~~
The unit of alpha is :math:`\mathrm{cm}^{-1}`.
Refractive index :math:`n` has real and imaginary parts:
.. math::
n = [(e^\prime + ie^{\prime\prime} / e_0]^{1/2}
= n^\prime + in^{\prime\prime}
Relationship between :math:`a` and imaginary :math:`n^{\prime\prime}`:
.. math::
a = 4 \pi n^{\prime\prime} / \lambda
Where:
.. math:: \lambda = hc/E
Args:
dielectric_data (tuple): The high-frequency dielectric data, following
the same format as :obj:`pymatgen.io.vasp.Vasprun.dielectric`.
This is a :obj:`tuple` containing the energy, the real part of the
dielectric tensor, and the imaginary part of the tensor, as a
:obj:`list` of :obj:`floats`. E.g.::
(
[energies],
[[real_xx, real_yy, real_zz, real_xy, real_yz, real_xz]],
[[imag_xx, imag_yy, imag_zz, imag_xy, imag_yz, imag_xz]]
)
properties (set):
The set of properties to return. Intermediate properties will be
calculated as needed. Accepted values: 'eps_real', 'eps_im',
'absorption', 'loss', 'n_real', 'n_imag'
average (:obj:`bool`, optional): Average the dielectric response across
the xx, yy, zz directions and calculate properties with scalar
maths. Defaults to ``True``. If False, solve dielectric matrix to
obtain directional properties, returning xx, yy, zz components.
This may be significantly slower!
Returns:
:obj:`tuple` of :obj:`list` of :obj:`float`: The optical absorption in
:math:`\mathrm{cm}^{-1}`. If ``average`` is ``True``, the data will be
returned as::
([energies], [property]).
If ``average`` is ``False``, the data will be returned as::
([energies], [property_xx, property_yy, property_zz]).
"""
results = {}
def _update_results(keys_vals):
"""Update results dict with selected properties only"""
results.update({prop: (energies, data)
for prop, data in keys_vals.items()
if (prop in properties)})
return results
energies = np.array(dielectric[0])
real_eps = np.array(dielectric[1])
imag_eps = np.array(dielectric[2])
if average:
real_eps = np.average(real_eps[:, :3], axis=1)
imag_eps = np.average(imag_eps[:, :3], axis=1)
results = _update_results({'eps_real': real_eps,
'eps_imag': imag_eps})
eps = real_eps + 1j * imag_eps
if 'loss' in properties:
loss = -np.imag(1/eps)
_update_results({'loss': loss})
if properties.intersection({'n_real', 'n_imag', 'absorption'}):
n = np.sqrt(eps)
_update_results({'n_real': n.real,
'n_imag': n.imag})
if 'absorption' in properties:
alpha = n.imag * energies * 4 * np.pi / 1.23984212E-4
_update_results({'absorption': alpha})
else:
# Work with eps as complex numbers in 9-column 'flattened' matrix
# First interpret 6-column data as symmetric matrix
# Input form xx yy zz xy yz xz
# Indices 0 1 2 3 4 5
n_rows = real_eps.shape[0]
eps = real_eps + 1j * imag_eps
eps = np.array([eps[:, 0], eps[:, 3], eps[:, 5],
eps[:, 3], eps[:, 1], eps[:, 4],
eps[:, 5], eps[:, 4], eps[:, 2]]).T
_update_results(
{'eps_real': eps.real[:, [0, 4, 8]],
'eps_imag': eps.imag[:, [0, 4, 8]]})
# Invert epsilon to obtain energy-loss function
if 'loss' in properties:
def matrix_loss_func(eps_row):
eps_matrix = eps_row.reshape(3, 3)
return -np.linalg.inv(eps_matrix).imag.flatten()
loss = np.array([matrix_loss_func(row) for row in eps])
_update_results({'loss': loss[:, [0, 4, 8]]})
if properties.intersection({'n_real', 'n_imag', 'absorption'}):
def matrix_n(eps_row):
eps_matrix = eps_row.reshape(3, 3)
eigenvals, v = np.linalg.eig(eps_matrix)
d = np.diag(eigenvals)
n = v @ np.sqrt(d) @ np.linalg.inv(v) # Py3.5 matrix mult @ =D
return n.flatten()
n = np.array([matrix_n(row) for row in eps])
_update_results({'n_real': n.real[:, [0, 4, 8]],
'n_imag': n.imag[:, [0, 4, 8]]})
if 'absorption' in properties:
alpha = (n.imag * energies.reshape(n_rows, 1) *
4 * np.pi / 1.23984212E-4)
_update_results({'absorption': alpha[:, [0, 4, 8]]})
return results
|
[
"def",
"calculate_dielectric_properties",
"(",
"dielectric",
",",
"properties",
",",
"average",
"=",
"True",
")",
":",
"results",
"=",
"{",
"}",
"def",
"_update_results",
"(",
"keys_vals",
")",
":",
"\"\"\"Update results dict with selected properties only\"\"\"",
"results",
".",
"update",
"(",
"{",
"prop",
":",
"(",
"energies",
",",
"data",
")",
"for",
"prop",
",",
"data",
"in",
"keys_vals",
".",
"items",
"(",
")",
"if",
"(",
"prop",
"in",
"properties",
")",
"}",
")",
"return",
"results",
"energies",
"=",
"np",
".",
"array",
"(",
"dielectric",
"[",
"0",
"]",
")",
"real_eps",
"=",
"np",
".",
"array",
"(",
"dielectric",
"[",
"1",
"]",
")",
"imag_eps",
"=",
"np",
".",
"array",
"(",
"dielectric",
"[",
"2",
"]",
")",
"if",
"average",
":",
"real_eps",
"=",
"np",
".",
"average",
"(",
"real_eps",
"[",
":",
",",
":",
"3",
"]",
",",
"axis",
"=",
"1",
")",
"imag_eps",
"=",
"np",
".",
"average",
"(",
"imag_eps",
"[",
":",
",",
":",
"3",
"]",
",",
"axis",
"=",
"1",
")",
"results",
"=",
"_update_results",
"(",
"{",
"'eps_real'",
":",
"real_eps",
",",
"'eps_imag'",
":",
"imag_eps",
"}",
")",
"eps",
"=",
"real_eps",
"+",
"1j",
"*",
"imag_eps",
"if",
"'loss'",
"in",
"properties",
":",
"loss",
"=",
"-",
"np",
".",
"imag",
"(",
"1",
"/",
"eps",
")",
"_update_results",
"(",
"{",
"'loss'",
":",
"loss",
"}",
")",
"if",
"properties",
".",
"intersection",
"(",
"{",
"'n_real'",
",",
"'n_imag'",
",",
"'absorption'",
"}",
")",
":",
"n",
"=",
"np",
".",
"sqrt",
"(",
"eps",
")",
"_update_results",
"(",
"{",
"'n_real'",
":",
"n",
".",
"real",
",",
"'n_imag'",
":",
"n",
".",
"imag",
"}",
")",
"if",
"'absorption'",
"in",
"properties",
":",
"alpha",
"=",
"n",
".",
"imag",
"*",
"energies",
"*",
"4",
"*",
"np",
".",
"pi",
"/",
"1.23984212E-4",
"_update_results",
"(",
"{",
"'absorption'",
":",
"alpha",
"}",
")",
"else",
":",
"# Work with eps as complex numbers in 9-column 'flattened' matrix",
"# First interpret 6-column data as symmetric matrix",
"# Input form xx yy zz xy yz xz",
"# Indices 0 1 2 3 4 5",
"n_rows",
"=",
"real_eps",
".",
"shape",
"[",
"0",
"]",
"eps",
"=",
"real_eps",
"+",
"1j",
"*",
"imag_eps",
"eps",
"=",
"np",
".",
"array",
"(",
"[",
"eps",
"[",
":",
",",
"0",
"]",
",",
"eps",
"[",
":",
",",
"3",
"]",
",",
"eps",
"[",
":",
",",
"5",
"]",
",",
"eps",
"[",
":",
",",
"3",
"]",
",",
"eps",
"[",
":",
",",
"1",
"]",
",",
"eps",
"[",
":",
",",
"4",
"]",
",",
"eps",
"[",
":",
",",
"5",
"]",
",",
"eps",
"[",
":",
",",
"4",
"]",
",",
"eps",
"[",
":",
",",
"2",
"]",
"]",
")",
".",
"T",
"_update_results",
"(",
"{",
"'eps_real'",
":",
"eps",
".",
"real",
"[",
":",
",",
"[",
"0",
",",
"4",
",",
"8",
"]",
"]",
",",
"'eps_imag'",
":",
"eps",
".",
"imag",
"[",
":",
",",
"[",
"0",
",",
"4",
",",
"8",
"]",
"]",
"}",
")",
"# Invert epsilon to obtain energy-loss function",
"if",
"'loss'",
"in",
"properties",
":",
"def",
"matrix_loss_func",
"(",
"eps_row",
")",
":",
"eps_matrix",
"=",
"eps_row",
".",
"reshape",
"(",
"3",
",",
"3",
")",
"return",
"-",
"np",
".",
"linalg",
".",
"inv",
"(",
"eps_matrix",
")",
".",
"imag",
".",
"flatten",
"(",
")",
"loss",
"=",
"np",
".",
"array",
"(",
"[",
"matrix_loss_func",
"(",
"row",
")",
"for",
"row",
"in",
"eps",
"]",
")",
"_update_results",
"(",
"{",
"'loss'",
":",
"loss",
"[",
":",
",",
"[",
"0",
",",
"4",
",",
"8",
"]",
"]",
"}",
")",
"if",
"properties",
".",
"intersection",
"(",
"{",
"'n_real'",
",",
"'n_imag'",
",",
"'absorption'",
"}",
")",
":",
"def",
"matrix_n",
"(",
"eps_row",
")",
":",
"eps_matrix",
"=",
"eps_row",
".",
"reshape",
"(",
"3",
",",
"3",
")",
"eigenvals",
",",
"v",
"=",
"np",
".",
"linalg",
".",
"eig",
"(",
"eps_matrix",
")",
"d",
"=",
"np",
".",
"diag",
"(",
"eigenvals",
")",
"n",
"=",
"v",
"@",
"np",
".",
"sqrt",
"(",
"d",
")",
"@",
"np",
".",
"linalg",
".",
"inv",
"(",
"v",
")",
"# Py3.5 matrix mult @ =D",
"return",
"n",
".",
"flatten",
"(",
")",
"n",
"=",
"np",
".",
"array",
"(",
"[",
"matrix_n",
"(",
"row",
")",
"for",
"row",
"in",
"eps",
"]",
")",
"_update_results",
"(",
"{",
"'n_real'",
":",
"n",
".",
"real",
"[",
":",
",",
"[",
"0",
",",
"4",
",",
"8",
"]",
"]",
",",
"'n_imag'",
":",
"n",
".",
"imag",
"[",
":",
",",
"[",
"0",
",",
"4",
",",
"8",
"]",
"]",
"}",
")",
"if",
"'absorption'",
"in",
"properties",
":",
"alpha",
"=",
"(",
"n",
".",
"imag",
"*",
"energies",
".",
"reshape",
"(",
"n_rows",
",",
"1",
")",
"*",
"4",
"*",
"np",
".",
"pi",
"/",
"1.23984212E-4",
")",
"_update_results",
"(",
"{",
"'absorption'",
":",
"alpha",
"[",
":",
",",
"[",
"0",
",",
"4",
",",
"8",
"]",
"]",
"}",
")",
"return",
"results"
] |
r"""Calculate optical properties from the dielectric function
Supported properties:
Absorption
~~~~~~~~~~
The unit of alpha is :math:`\mathrm{cm}^{-1}`.
Refractive index :math:`n` has real and imaginary parts:
.. math::
n = [(e^\prime + ie^{\prime\prime} / e_0]^{1/2}
= n^\prime + in^{\prime\prime}
Relationship between :math:`a` and imaginary :math:`n^{\prime\prime}`:
.. math::
a = 4 \pi n^{\prime\prime} / \lambda
Where:
.. math:: \lambda = hc/E
Args:
dielectric_data (tuple): The high-frequency dielectric data, following
the same format as :obj:`pymatgen.io.vasp.Vasprun.dielectric`.
This is a :obj:`tuple` containing the energy, the real part of the
dielectric tensor, and the imaginary part of the tensor, as a
:obj:`list` of :obj:`floats`. E.g.::
(
[energies],
[[real_xx, real_yy, real_zz, real_xy, real_yz, real_xz]],
[[imag_xx, imag_yy, imag_zz, imag_xy, imag_yz, imag_xz]]
)
properties (set):
The set of properties to return. Intermediate properties will be
calculated as needed. Accepted values: 'eps_real', 'eps_im',
'absorption', 'loss', 'n_real', 'n_imag'
average (:obj:`bool`, optional): Average the dielectric response across
the xx, yy, zz directions and calculate properties with scalar
maths. Defaults to ``True``. If False, solve dielectric matrix to
obtain directional properties, returning xx, yy, zz components.
This may be significantly slower!
Returns:
:obj:`tuple` of :obj:`list` of :obj:`float`: The optical absorption in
:math:`\mathrm{cm}^{-1}`. If ``average`` is ``True``, the data will be
returned as::
([energies], [property]).
If ``average`` is ``False``, the data will be returned as::
([energies], [property_xx, property_yy, property_zz]).
|
[
"r",
"Calculate",
"optical",
"properties",
"from",
"the",
"dielectric",
"function"
] |
python
|
train
|
saltstack/salt
|
salt/modules/sysrc.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sysrc.py#L77-L123
|
def set_(name, value, **kwargs):
'''
Set system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.set name=sshd_flags value="-p 2222"
'''
cmd = 'sysrc -v'
if 'file' in kwargs:
cmd += ' -f '+kwargs['file']
if 'jail' in kwargs:
cmd += ' -j '+kwargs['jail']
# This is here because the YAML parser likes to convert the string literals
# YES, NO, Yes, No, True, False, etc. to boolean types. However, in this case,
# we will check to see if that happened and replace it with "YES" or "NO" because
# those items are accepted in sysrc.
if type(value) == bool:
if value:
value = "YES"
else:
value = "NO"
# This is here for the same reason, except for numbers
if type(value) == int:
value = str(value)
cmd += ' '+name+"=\""+value+"\""
sysrcs = __salt__['cmd.run'](cmd)
ret = {}
for sysrc in sysrcs.split("\n"):
rcfile = sysrc.split(': ')[0]
var = sysrc.split(': ')[1]
oldval = sysrc.split(': ')[2].strip().split("->")[0]
newval = sysrc.split(': ')[2].strip().split("->")[1]
if rcfile not in ret:
ret[rcfile] = {}
ret[rcfile][var] = newval
return ret
|
[
"def",
"set_",
"(",
"name",
",",
"value",
",",
"*",
"*",
"kwargs",
")",
":",
"cmd",
"=",
"'sysrc -v'",
"if",
"'file'",
"in",
"kwargs",
":",
"cmd",
"+=",
"' -f '",
"+",
"kwargs",
"[",
"'file'",
"]",
"if",
"'jail'",
"in",
"kwargs",
":",
"cmd",
"+=",
"' -j '",
"+",
"kwargs",
"[",
"'jail'",
"]",
"# This is here because the YAML parser likes to convert the string literals",
"# YES, NO, Yes, No, True, False, etc. to boolean types. However, in this case,",
"# we will check to see if that happened and replace it with \"YES\" or \"NO\" because",
"# those items are accepted in sysrc.",
"if",
"type",
"(",
"value",
")",
"==",
"bool",
":",
"if",
"value",
":",
"value",
"=",
"\"YES\"",
"else",
":",
"value",
"=",
"\"NO\"",
"# This is here for the same reason, except for numbers",
"if",
"type",
"(",
"value",
")",
"==",
"int",
":",
"value",
"=",
"str",
"(",
"value",
")",
"cmd",
"+=",
"' '",
"+",
"name",
"+",
"\"=\\\"\"",
"+",
"value",
"+",
"\"\\\"\"",
"sysrcs",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
")",
"ret",
"=",
"{",
"}",
"for",
"sysrc",
"in",
"sysrcs",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"rcfile",
"=",
"sysrc",
".",
"split",
"(",
"': '",
")",
"[",
"0",
"]",
"var",
"=",
"sysrc",
".",
"split",
"(",
"': '",
")",
"[",
"1",
"]",
"oldval",
"=",
"sysrc",
".",
"split",
"(",
"': '",
")",
"[",
"2",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"->\"",
")",
"[",
"0",
"]",
"newval",
"=",
"sysrc",
".",
"split",
"(",
"': '",
")",
"[",
"2",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"->\"",
")",
"[",
"1",
"]",
"if",
"rcfile",
"not",
"in",
"ret",
":",
"ret",
"[",
"rcfile",
"]",
"=",
"{",
"}",
"ret",
"[",
"rcfile",
"]",
"[",
"var",
"]",
"=",
"newval",
"return",
"ret"
] |
Set system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.set name=sshd_flags value="-p 2222"
|
[
"Set",
"system",
"rc",
"configuration",
"variables"
] |
python
|
train
|
DataBiosphere/toil
|
src/toil/lib/iterables.py
|
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/iterables.py#L25-L32
|
def flatten( iterables ):
""" Flatten an iterable, except for string elements. """
for it in iterables:
if isinstance(it, str):
yield it
else:
for element in it:
yield element
|
[
"def",
"flatten",
"(",
"iterables",
")",
":",
"for",
"it",
"in",
"iterables",
":",
"if",
"isinstance",
"(",
"it",
",",
"str",
")",
":",
"yield",
"it",
"else",
":",
"for",
"element",
"in",
"it",
":",
"yield",
"element"
] |
Flatten an iterable, except for string elements.
|
[
"Flatten",
"an",
"iterable",
"except",
"for",
"string",
"elements",
"."
] |
python
|
train
|
mdiener/grace
|
grace/py27/slimit/parser.py
|
https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/slimit/parser.py#L290-L298
|
def p_object_literal(self, p):
"""object_literal : LBRACE RBRACE
| LBRACE property_list RBRACE
| LBRACE property_list COMMA RBRACE
"""
if len(p) == 3:
p[0] = ast.Object()
else:
p[0] = ast.Object(properties=p[2])
|
[
"def",
"p_object_literal",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"3",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Object",
"(",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Object",
"(",
"properties",
"=",
"p",
"[",
"2",
"]",
")"
] |
object_literal : LBRACE RBRACE
| LBRACE property_list RBRACE
| LBRACE property_list COMMA RBRACE
|
[
"object_literal",
":",
"LBRACE",
"RBRACE",
"|",
"LBRACE",
"property_list",
"RBRACE",
"|",
"LBRACE",
"property_list",
"COMMA",
"RBRACE"
] |
python
|
train
|
HPENetworking/PYHPEIMC
|
archived/pyhpimc.py
|
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/archived/pyhpimc.py#L217-L240
|
def get_device_access_interfaces(devId):
"""Function takes devId as input to RESTFUL call to HP IMC platform
:param devId: requires deviceID as the only input parameter
:return: list of dictionaries containing interfaces configured as access ports
"""
# checks to see if the imc credentials are already available
if auth is None or url is None:
set_imc_creds()
global r
get_access_interface_vlan_url = "/imcrs/vlan/access?devId=" + str(devId) + "&start=1&size=500&total=false"
f_url = url + get_access_interface_vlan_url
payload = None
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=headers)
# r.status_code
if r.status_code == 200:
dev_access_interfaces = (json.loads(r.text))
if len(dev_access_interfaces) == 2:
return dev_access_interfaces['accessIf']
else:
dev_access_interfaces['accessIf'] = ["No access inteface"]
return dev_access_interfaces['accessIf']
else:
print("get_device_access_interfaces: An Error has occured")
|
[
"def",
"get_device_access_interfaces",
"(",
"devId",
")",
":",
"# checks to see if the imc credentials are already available",
"if",
"auth",
"is",
"None",
"or",
"url",
"is",
"None",
":",
"set_imc_creds",
"(",
")",
"global",
"r",
"get_access_interface_vlan_url",
"=",
"\"/imcrs/vlan/access?devId=\"",
"+",
"str",
"(",
"devId",
")",
"+",
"\"&start=1&size=500&total=false\"",
"f_url",
"=",
"url",
"+",
"get_access_interface_vlan_url",
"payload",
"=",
"None",
"# creates the URL using the payload variable as the contents",
"r",
"=",
"requests",
".",
"get",
"(",
"f_url",
",",
"auth",
"=",
"auth",
",",
"headers",
"=",
"headers",
")",
"# r.status_code",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"dev_access_interfaces",
"=",
"(",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
")",
"if",
"len",
"(",
"dev_access_interfaces",
")",
"==",
"2",
":",
"return",
"dev_access_interfaces",
"[",
"'accessIf'",
"]",
"else",
":",
"dev_access_interfaces",
"[",
"'accessIf'",
"]",
"=",
"[",
"\"No access inteface\"",
"]",
"return",
"dev_access_interfaces",
"[",
"'accessIf'",
"]",
"else",
":",
"print",
"(",
"\"get_device_access_interfaces: An Error has occured\"",
")"
] |
Function takes devId as input to RESTFUL call to HP IMC platform
:param devId: requires deviceID as the only input parameter
:return: list of dictionaries containing interfaces configured as access ports
|
[
"Function",
"takes",
"devId",
"as",
"input",
"to",
"RESTFUL",
"call",
"to",
"HP",
"IMC",
"platform",
":",
"param",
"devId",
":",
"requires",
"deviceID",
"as",
"the",
"only",
"input",
"parameter",
":",
"return",
":",
"list",
"of",
"dictionaries",
"containing",
"interfaces",
"configured",
"as",
"access",
"ports"
] |
python
|
train
|
huge-success/sanic
|
sanic/response.py
|
https://github.com/huge-success/sanic/blob/6a4a3f617fdbe1d3ee8bdc9d1b12ad2d0b34acdd/sanic/response.py#L389-L415
|
def stream(
streaming_fn,
status=200,
headers=None,
content_type="text/plain; charset=utf-8",
):
"""Accepts an coroutine `streaming_fn` which can be used to
write chunks to a streaming response. Returns a `StreamingHTTPResponse`.
Example usage::
@app.route("/")
async def index(request):
async def streaming_fn(response):
await response.write('foo')
await response.write('bar')
return stream(streaming_fn, content_type='text/plain')
:param streaming_fn: A coroutine accepts a response and
writes content to that response.
:param mime_type: Specific mime_type.
:param headers: Custom Headers.
"""
return StreamingHTTPResponse(
streaming_fn, headers=headers, content_type=content_type, status=status
)
|
[
"def",
"stream",
"(",
"streaming_fn",
",",
"status",
"=",
"200",
",",
"headers",
"=",
"None",
",",
"content_type",
"=",
"\"text/plain; charset=utf-8\"",
",",
")",
":",
"return",
"StreamingHTTPResponse",
"(",
"streaming_fn",
",",
"headers",
"=",
"headers",
",",
"content_type",
"=",
"content_type",
",",
"status",
"=",
"status",
")"
] |
Accepts an coroutine `streaming_fn` which can be used to
write chunks to a streaming response. Returns a `StreamingHTTPResponse`.
Example usage::
@app.route("/")
async def index(request):
async def streaming_fn(response):
await response.write('foo')
await response.write('bar')
return stream(streaming_fn, content_type='text/plain')
:param streaming_fn: A coroutine accepts a response and
writes content to that response.
:param mime_type: Specific mime_type.
:param headers: Custom Headers.
|
[
"Accepts",
"an",
"coroutine",
"streaming_fn",
"which",
"can",
"be",
"used",
"to",
"write",
"chunks",
"to",
"a",
"streaming",
"response",
".",
"Returns",
"a",
"StreamingHTTPResponse",
"."
] |
python
|
train
|
oxalorg/ghPublish
|
ghPublish/ghPublish.py
|
https://github.com/oxalorg/ghPublish/blob/aa3ec8fd2187efd99cffc1a5a76eda4ff2a3b636/ghPublish/ghPublish.py#L44-L67
|
def publish_post(self):
"""
If it's a new file, add it.
Else, update it.
"""
payload = {'content': self.content_base64.decode('utf-8')}
sha_blob = self.get_sha_blob()
if sha_blob:
commit_msg = 'ghPublish UPDATE: {}'.format(self.title)
payload.update(sha=sha_blob)
payload.update(message=commit_msg)
else:
commit_msg = 'ghPublish ADD: {}'.format(self.title)
payload.update(message=commit_msg)
r = requests.put(self.api_url,
auth=self.get_auth_details(),
data=json.dumps(payload))
try:
url = r.json()['content']['html_url']
return r.status_code, url
except KeyError:
return r.status_code, None
|
[
"def",
"publish_post",
"(",
"self",
")",
":",
"payload",
"=",
"{",
"'content'",
":",
"self",
".",
"content_base64",
".",
"decode",
"(",
"'utf-8'",
")",
"}",
"sha_blob",
"=",
"self",
".",
"get_sha_blob",
"(",
")",
"if",
"sha_blob",
":",
"commit_msg",
"=",
"'ghPublish UPDATE: {}'",
".",
"format",
"(",
"self",
".",
"title",
")",
"payload",
".",
"update",
"(",
"sha",
"=",
"sha_blob",
")",
"payload",
".",
"update",
"(",
"message",
"=",
"commit_msg",
")",
"else",
":",
"commit_msg",
"=",
"'ghPublish ADD: {}'",
".",
"format",
"(",
"self",
".",
"title",
")",
"payload",
".",
"update",
"(",
"message",
"=",
"commit_msg",
")",
"r",
"=",
"requests",
".",
"put",
"(",
"self",
".",
"api_url",
",",
"auth",
"=",
"self",
".",
"get_auth_details",
"(",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
")",
")",
"try",
":",
"url",
"=",
"r",
".",
"json",
"(",
")",
"[",
"'content'",
"]",
"[",
"'html_url'",
"]",
"return",
"r",
".",
"status_code",
",",
"url",
"except",
"KeyError",
":",
"return",
"r",
".",
"status_code",
",",
"None"
] |
If it's a new file, add it.
Else, update it.
|
[
"If",
"it",
"s",
"a",
"new",
"file",
"add",
"it",
".",
"Else",
"update",
"it",
"."
] |
python
|
train
|
theno/fabsetup
|
fabsetup/fabfile/setup/nvm.py
|
https://github.com/theno/fabsetup/blob/ced728abff93551ba5677e63bc1bdc0ef5ca5777/fabsetup/fabfile/setup/nvm.py#L37-L47
|
def enable_nvm():
'''add to ~/.bashrc: Export of $NVM env variable and load nvm command.'''
bash_snippet = '~/.bashrc_nvm'
install_file_legacy(path=bash_snippet)
prefix = flo('if [ -f {bash_snippet} ]; ')
enabler = flo('if [ -f {bash_snippet} ]; then source {bash_snippet}; fi')
if env.host == 'localhost':
uncomment_or_update_or_append_line(filename='~/.bashrc', prefix=prefix,
new_line=enabler)
else:
print(cyan('\nappend to ~/.bashrc:\n\n ') + enabler)
|
[
"def",
"enable_nvm",
"(",
")",
":",
"bash_snippet",
"=",
"'~/.bashrc_nvm'",
"install_file_legacy",
"(",
"path",
"=",
"bash_snippet",
")",
"prefix",
"=",
"flo",
"(",
"'if [ -f {bash_snippet} ]; '",
")",
"enabler",
"=",
"flo",
"(",
"'if [ -f {bash_snippet} ]; then source {bash_snippet}; fi'",
")",
"if",
"env",
".",
"host",
"==",
"'localhost'",
":",
"uncomment_or_update_or_append_line",
"(",
"filename",
"=",
"'~/.bashrc'",
",",
"prefix",
"=",
"prefix",
",",
"new_line",
"=",
"enabler",
")",
"else",
":",
"print",
"(",
"cyan",
"(",
"'\\nappend to ~/.bashrc:\\n\\n '",
")",
"+",
"enabler",
")"
] |
add to ~/.bashrc: Export of $NVM env variable and load nvm command.
|
[
"add",
"to",
"~",
"/",
".",
"bashrc",
":",
"Export",
"of",
"$NVM",
"env",
"variable",
"and",
"load",
"nvm",
"command",
"."
] |
python
|
train
|
streamlink/streamlink
|
src/streamlink/plugin/api/support_plugin.py
|
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugin/api/support_plugin.py#L9-L30
|
def load_support_plugin(name):
"""Loads a plugin from the same directory as the calling plugin.
The path used is extracted from the last call in module scope,
therefore this must be called only from module level in the
originating plugin or the correct plugin path will not be found.
"""
# Get the path of the caller module
stack = list(filter(lambda f: f[3] == "<module>", inspect.stack()))
prev_frame = stack[0]
path = os.path.dirname(prev_frame[1])
# Major hack. If we are frozen by bbfreeze the stack trace will
# contain relative paths. We therefore use the __file__ variable
# in this module to correct it.
if not os.path.isabs(path):
prefix = os.path.normpath(__file__ + "../../../../../")
path = os.path.join(prefix, path)
return load_module(name, path)
|
[
"def",
"load_support_plugin",
"(",
"name",
")",
":",
"# Get the path of the caller module",
"stack",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"f",
":",
"f",
"[",
"3",
"]",
"==",
"\"<module>\"",
",",
"inspect",
".",
"stack",
"(",
")",
")",
")",
"prev_frame",
"=",
"stack",
"[",
"0",
"]",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"prev_frame",
"[",
"1",
"]",
")",
"# Major hack. If we are frozen by bbfreeze the stack trace will",
"# contain relative paths. We therefore use the __file__ variable",
"# in this module to correct it.",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"prefix",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"__file__",
"+",
"\"../../../../../\"",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"path",
")",
"return",
"load_module",
"(",
"name",
",",
"path",
")"
] |
Loads a plugin from the same directory as the calling plugin.
The path used is extracted from the last call in module scope,
therefore this must be called only from module level in the
originating plugin or the correct plugin path will not be found.
|
[
"Loads",
"a",
"plugin",
"from",
"the",
"same",
"directory",
"as",
"the",
"calling",
"plugin",
"."
] |
python
|
test
|
acorg/dark-matter
|
dark/reads.py
|
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/reads.py#L535-L553
|
def toDict(self):
"""
Get information about this read in a dictionary.
@return: A C{dict} with keys/values for the attributes of self.
"""
if six.PY3:
result = super().toDict()
else:
result = AARead.toDict(self)
result.update({
'start': self.start,
'stop': self.stop,
'openLeft': self.openLeft,
'openRight': self.openRight,
})
return result
|
[
"def",
"toDict",
"(",
"self",
")",
":",
"if",
"six",
".",
"PY3",
":",
"result",
"=",
"super",
"(",
")",
".",
"toDict",
"(",
")",
"else",
":",
"result",
"=",
"AARead",
".",
"toDict",
"(",
"self",
")",
"result",
".",
"update",
"(",
"{",
"'start'",
":",
"self",
".",
"start",
",",
"'stop'",
":",
"self",
".",
"stop",
",",
"'openLeft'",
":",
"self",
".",
"openLeft",
",",
"'openRight'",
":",
"self",
".",
"openRight",
",",
"}",
")",
"return",
"result"
] |
Get information about this read in a dictionary.
@return: A C{dict} with keys/values for the attributes of self.
|
[
"Get",
"information",
"about",
"this",
"read",
"in",
"a",
"dictionary",
"."
] |
python
|
train
|
googleapis/google-cloud-python
|
websecurityscanner/google/cloud/websecurityscanner_v1alpha/gapic/web_security_scanner_client.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/websecurityscanner/google/cloud/websecurityscanner_v1alpha/gapic/web_security_scanner_client.py#L87-L95
|
def finding_path(cls, project, scan_config, scan_run, finding):
"""Return a fully-qualified finding string."""
return google.api_core.path_template.expand(
"projects/{project}/scanConfigs/{scan_config}/scanRuns/{scan_run}/findings/{finding}",
project=project,
scan_config=scan_config,
scan_run=scan_run,
finding=finding,
)
|
[
"def",
"finding_path",
"(",
"cls",
",",
"project",
",",
"scan_config",
",",
"scan_run",
",",
"finding",
")",
":",
"return",
"google",
".",
"api_core",
".",
"path_template",
".",
"expand",
"(",
"\"projects/{project}/scanConfigs/{scan_config}/scanRuns/{scan_run}/findings/{finding}\"",
",",
"project",
"=",
"project",
",",
"scan_config",
"=",
"scan_config",
",",
"scan_run",
"=",
"scan_run",
",",
"finding",
"=",
"finding",
",",
")"
] |
Return a fully-qualified finding string.
|
[
"Return",
"a",
"fully",
"-",
"qualified",
"finding",
"string",
"."
] |
python
|
train
|
proycon/pynlpl
|
pynlpl/formats/folia.py
|
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L514-L540
|
def parse_datetime(s): #source: http://stackoverflow.com/questions/2211362/how-to-parse-xsddatetime-format
"""Returns (datetime, tz offset in minutes) or (None, None)."""
m = re.match(r""" ^
(?P<year>-?[0-9]{4}) - (?P<month>[0-9]{2}) - (?P<day>[0-9]{2})
T (?P<hour>[0-9]{2}) : (?P<minute>[0-9]{2}) : (?P<second>[0-9]{2})
(?P<microsecond>\.[0-9]{1,6})?
(?P<tz>
Z | (?P<tz_hr>[-+][0-9]{2}) : (?P<tz_min>[0-9]{2})
)?
$ """, s, re.X)
if m is not None:
values = m.groupdict()
#if values["tz"] in ("Z", None):
# tz = 0
#else:
# tz = int(values["tz_hr"]) * 60 + int(values["tz_min"])
if values["microsecond"] is None:
values["microsecond"] = 0
else:
values["microsecond"] = values["microsecond"][1:]
values["microsecond"] += "0" * (6 - len(values["microsecond"]))
values = dict((k, int(v)) for k, v in values.items() if not k.startswith("tz"))
try:
return datetime(**values) # , tz
except ValueError:
pass
return None
|
[
"def",
"parse_datetime",
"(",
"s",
")",
":",
"#source: http://stackoverflow.com/questions/2211362/how-to-parse-xsddatetime-format",
"m",
"=",
"re",
".",
"match",
"(",
"r\"\"\" ^\n (?P<year>-?[0-9]{4}) - (?P<month>[0-9]{2}) - (?P<day>[0-9]{2})\n T (?P<hour>[0-9]{2}) : (?P<minute>[0-9]{2}) : (?P<second>[0-9]{2})\n (?P<microsecond>\\.[0-9]{1,6})?\n (?P<tz>\n Z | (?P<tz_hr>[-+][0-9]{2}) : (?P<tz_min>[0-9]{2})\n )?\n $ \"\"\"",
",",
"s",
",",
"re",
".",
"X",
")",
"if",
"m",
"is",
"not",
"None",
":",
"values",
"=",
"m",
".",
"groupdict",
"(",
")",
"#if values[\"tz\"] in (\"Z\", None):",
"# tz = 0",
"#else:",
"# tz = int(values[\"tz_hr\"]) * 60 + int(values[\"tz_min\"])",
"if",
"values",
"[",
"\"microsecond\"",
"]",
"is",
"None",
":",
"values",
"[",
"\"microsecond\"",
"]",
"=",
"0",
"else",
":",
"values",
"[",
"\"microsecond\"",
"]",
"=",
"values",
"[",
"\"microsecond\"",
"]",
"[",
"1",
":",
"]",
"values",
"[",
"\"microsecond\"",
"]",
"+=",
"\"0\"",
"*",
"(",
"6",
"-",
"len",
"(",
"values",
"[",
"\"microsecond\"",
"]",
")",
")",
"values",
"=",
"dict",
"(",
"(",
"k",
",",
"int",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"values",
".",
"items",
"(",
")",
"if",
"not",
"k",
".",
"startswith",
"(",
"\"tz\"",
")",
")",
"try",
":",
"return",
"datetime",
"(",
"*",
"*",
"values",
")",
"# , tz",
"except",
"ValueError",
":",
"pass",
"return",
"None"
] |
Returns (datetime, tz offset in minutes) or (None, None).
|
[
"Returns",
"(",
"datetime",
"tz",
"offset",
"in",
"minutes",
")",
"or",
"(",
"None",
"None",
")",
"."
] |
python
|
train
|
SeabornGames/RequestClient
|
seaborn/request_client/connection_endpoint.py
|
https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/seaborn/request_client/connection_endpoint.py#L57-L68
|
def reinstantiate_endpoints(self, endpoint=None):
"""
This will re-instantiate the endpoints with the connection this time
:param endpoint: Endpoint object to instantiate the sub endpoint in.
:return: None
"""
endpoint = endpoint or self
for k, v in endpoint.__class__.__dict__.items():
if isinstance(v, Endpoint):
setattr(endpoint, k, v.__class__(self))
elif inspect.isclass(v) and issubclass(v, Endpoint):
setattr(endpoint, k, v(self))
|
[
"def",
"reinstantiate_endpoints",
"(",
"self",
",",
"endpoint",
"=",
"None",
")",
":",
"endpoint",
"=",
"endpoint",
"or",
"self",
"for",
"k",
",",
"v",
"in",
"endpoint",
".",
"__class__",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"Endpoint",
")",
":",
"setattr",
"(",
"endpoint",
",",
"k",
",",
"v",
".",
"__class__",
"(",
"self",
")",
")",
"elif",
"inspect",
".",
"isclass",
"(",
"v",
")",
"and",
"issubclass",
"(",
"v",
",",
"Endpoint",
")",
":",
"setattr",
"(",
"endpoint",
",",
"k",
",",
"v",
"(",
"self",
")",
")"
] |
This will re-instantiate the endpoints with the connection this time
:param endpoint: Endpoint object to instantiate the sub endpoint in.
:return: None
|
[
"This",
"will",
"re",
"-",
"instantiate",
"the",
"endpoints",
"with",
"the",
"connection",
"this",
"time",
":",
"param",
"endpoint",
":",
"Endpoint",
"object",
"to",
"instantiate",
"the",
"sub",
"endpoint",
"in",
".",
":",
"return",
":",
"None"
] |
python
|
train
|
DataDog/integrations-core
|
datadog_checks_base/datadog_checks/base/utils/containers.py
|
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/utils/containers.py#L7-L21
|
def freeze(o):
"""
Freezes any mutable object including dictionaries and lists for hashing.
Accepts nested dictionaries.
"""
if isinstance(o, (tuple, list)):
return tuple(sorted(freeze(e) for e in o))
if isinstance(o, dict):
return tuple(sorted((k, freeze(v)) for k, v in iteritems(o)))
if isinstance(o, (set, frozenset)):
return tuple(sorted(freeze(e) for e in o))
return o
|
[
"def",
"freeze",
"(",
"o",
")",
":",
"if",
"isinstance",
"(",
"o",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"tuple",
"(",
"sorted",
"(",
"freeze",
"(",
"e",
")",
"for",
"e",
"in",
"o",
")",
")",
"if",
"isinstance",
"(",
"o",
",",
"dict",
")",
":",
"return",
"tuple",
"(",
"sorted",
"(",
"(",
"k",
",",
"freeze",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"o",
")",
")",
")",
"if",
"isinstance",
"(",
"o",
",",
"(",
"set",
",",
"frozenset",
")",
")",
":",
"return",
"tuple",
"(",
"sorted",
"(",
"freeze",
"(",
"e",
")",
"for",
"e",
"in",
"o",
")",
")",
"return",
"o"
] |
Freezes any mutable object including dictionaries and lists for hashing.
Accepts nested dictionaries.
|
[
"Freezes",
"any",
"mutable",
"object",
"including",
"dictionaries",
"and",
"lists",
"for",
"hashing",
".",
"Accepts",
"nested",
"dictionaries",
"."
] |
python
|
train
|
Stranger6667/browserstacker
|
browserstacker/screenshots.py
|
https://github.com/Stranger6667/browserstacker/blob/1c98870c3f112bb8b59b864896fd0752bd397c9e/browserstacker/screenshots.py#L106-L116
|
def generate(self, url, browsers=None, orientation=None, mac_res=None, win_res=None,
quality=None, local=None, wait_time=None, callback_url=None):
"""
Generates screenshots for a URL.
"""
if isinstance(browsers, dict):
browsers = [browsers]
if browsers is None:
browsers = [self.default_browser]
data = dict((key, value) for key, value in locals().items() if value is not None and key != 'self')
return self.execute('POST', '/screenshots', json=data)
|
[
"def",
"generate",
"(",
"self",
",",
"url",
",",
"browsers",
"=",
"None",
",",
"orientation",
"=",
"None",
",",
"mac_res",
"=",
"None",
",",
"win_res",
"=",
"None",
",",
"quality",
"=",
"None",
",",
"local",
"=",
"None",
",",
"wait_time",
"=",
"None",
",",
"callback_url",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"browsers",
",",
"dict",
")",
":",
"browsers",
"=",
"[",
"browsers",
"]",
"if",
"browsers",
"is",
"None",
":",
"browsers",
"=",
"[",
"self",
".",
"default_browser",
"]",
"data",
"=",
"dict",
"(",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"locals",
"(",
")",
".",
"items",
"(",
")",
"if",
"value",
"is",
"not",
"None",
"and",
"key",
"!=",
"'self'",
")",
"return",
"self",
".",
"execute",
"(",
"'POST'",
",",
"'/screenshots'",
",",
"json",
"=",
"data",
")"
] |
Generates screenshots for a URL.
|
[
"Generates",
"screenshots",
"for",
"a",
"URL",
"."
] |
python
|
train
|
ellmetha/django-machina
|
machina/apps/forum_conversation/views.py
|
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/views.py#L565-L583
|
def get_success_url(self):
""" Returns the URL to redirect the user to upon valid form processing. """
if not self.forum_post.approved:
return reverse(
'forum:forum',
kwargs={
'slug': self.forum_post.topic.forum.slug,
'pk': self.forum_post.topic.forum.pk,
},
)
return reverse(
'forum_conversation:topic',
kwargs={
'forum_slug': self.forum_post.topic.forum.slug,
'forum_pk': self.forum_post.topic.forum.pk,
'slug': self.forum_post.topic.slug,
'pk': self.forum_post.topic.pk,
},
)
|
[
"def",
"get_success_url",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"forum_post",
".",
"approved",
":",
"return",
"reverse",
"(",
"'forum:forum'",
",",
"kwargs",
"=",
"{",
"'slug'",
":",
"self",
".",
"forum_post",
".",
"topic",
".",
"forum",
".",
"slug",
",",
"'pk'",
":",
"self",
".",
"forum_post",
".",
"topic",
".",
"forum",
".",
"pk",
",",
"}",
",",
")",
"return",
"reverse",
"(",
"'forum_conversation:topic'",
",",
"kwargs",
"=",
"{",
"'forum_slug'",
":",
"self",
".",
"forum_post",
".",
"topic",
".",
"forum",
".",
"slug",
",",
"'forum_pk'",
":",
"self",
".",
"forum_post",
".",
"topic",
".",
"forum",
".",
"pk",
",",
"'slug'",
":",
"self",
".",
"forum_post",
".",
"topic",
".",
"slug",
",",
"'pk'",
":",
"self",
".",
"forum_post",
".",
"topic",
".",
"pk",
",",
"}",
",",
")"
] |
Returns the URL to redirect the user to upon valid form processing.
|
[
"Returns",
"the",
"URL",
"to",
"redirect",
"the",
"user",
"to",
"upon",
"valid",
"form",
"processing",
"."
] |
python
|
train
|
Robpol86/libnl
|
libnl/attr.py
|
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/attr.py#L569-L583
|
def nla_put_string(msg, attrtype, value):
"""Add string attribute to Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L674
Positional arguments:
msg -- Netlink message (nl_msg class instance).
attrtype -- attribute type (integer).
value -- bytes() or bytearray() value (e.g. 'Test'.encode('ascii')).
Returns:
0 on success or a negative error code.
"""
data = bytearray(value) + bytearray(b'\0')
return nla_put(msg, attrtype, len(data), data)
|
[
"def",
"nla_put_string",
"(",
"msg",
",",
"attrtype",
",",
"value",
")",
":",
"data",
"=",
"bytearray",
"(",
"value",
")",
"+",
"bytearray",
"(",
"b'\\0'",
")",
"return",
"nla_put",
"(",
"msg",
",",
"attrtype",
",",
"len",
"(",
"data",
")",
",",
"data",
")"
] |
Add string attribute to Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L674
Positional arguments:
msg -- Netlink message (nl_msg class instance).
attrtype -- attribute type (integer).
value -- bytes() or bytearray() value (e.g. 'Test'.encode('ascii')).
Returns:
0 on success or a negative error code.
|
[
"Add",
"string",
"attribute",
"to",
"Netlink",
"message",
"."
] |
python
|
train
|
ml4ai/delphi
|
delphi/GrFN/networks.py
|
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/GrFN/networks.py#L428-L446
|
def from_fortran_src(cls, fortran_src: str, dir: str = "."):
""" Create a GroundedFunctionNetwork instance from a string with raw
Fortran code.
Args:
fortran_src: A string with Fortran source code.
dir: (Optional) - the directory in which the temporary Fortran file
will be created (make sure you have write permission!) Defaults to
the current directory.
Returns:
A GroundedFunctionNetwork instance
"""
import tempfile
fp = tempfile.NamedTemporaryFile('w+t', delete=False, dir=dir)
fp.writelines(fortran_src)
fp.close()
G = cls.from_fortran_file(fp.name, dir)
os.remove(fp.name)
return G
|
[
"def",
"from_fortran_src",
"(",
"cls",
",",
"fortran_src",
":",
"str",
",",
"dir",
":",
"str",
"=",
"\".\"",
")",
":",
"import",
"tempfile",
"fp",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"'w+t'",
",",
"delete",
"=",
"False",
",",
"dir",
"=",
"dir",
")",
"fp",
".",
"writelines",
"(",
"fortran_src",
")",
"fp",
".",
"close",
"(",
")",
"G",
"=",
"cls",
".",
"from_fortran_file",
"(",
"fp",
".",
"name",
",",
"dir",
")",
"os",
".",
"remove",
"(",
"fp",
".",
"name",
")",
"return",
"G"
] |
Create a GroundedFunctionNetwork instance from a string with raw
Fortran code.
Args:
fortran_src: A string with Fortran source code.
dir: (Optional) - the directory in which the temporary Fortran file
will be created (make sure you have write permission!) Defaults to
the current directory.
Returns:
A GroundedFunctionNetwork instance
|
[
"Create",
"a",
"GroundedFunctionNetwork",
"instance",
"from",
"a",
"string",
"with",
"raw",
"Fortran",
"code",
"."
] |
python
|
train
|
dhylands/rshell
|
rshell/main.py
|
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L436-L460
|
def parse_pattern(s):
"""Parse a string such as 'foo/bar/*.py'
Assumes is_pattern(s) has been called and returned True
1. directory to process
2. pattern to match"""
if '{' in s:
return None, None # Unsupported by fnmatch
if s and s[0] == '~':
s = os.path.expanduser(s)
parts = s.split('/')
absolute = len(parts) > 1 and not parts[0]
if parts[-1] == '': # # Outcome of trailing /
parts = parts[:-1] # discard
if len(parts) == 0:
directory = ''
pattern = ''
else:
directory = '/'.join(parts[:-1])
pattern = parts[-1]
if not is_pattern(directory): # Check for e.g. /abc/*/def
if is_pattern(pattern):
if not directory:
directory = '/' if absolute else '.'
return directory, pattern
return None, None
|
[
"def",
"parse_pattern",
"(",
"s",
")",
":",
"if",
"'{'",
"in",
"s",
":",
"return",
"None",
",",
"None",
"# Unsupported by fnmatch",
"if",
"s",
"and",
"s",
"[",
"0",
"]",
"==",
"'~'",
":",
"s",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"s",
")",
"parts",
"=",
"s",
".",
"split",
"(",
"'/'",
")",
"absolute",
"=",
"len",
"(",
"parts",
")",
">",
"1",
"and",
"not",
"parts",
"[",
"0",
"]",
"if",
"parts",
"[",
"-",
"1",
"]",
"==",
"''",
":",
"# # Outcome of trailing /",
"parts",
"=",
"parts",
"[",
":",
"-",
"1",
"]",
"# discard",
"if",
"len",
"(",
"parts",
")",
"==",
"0",
":",
"directory",
"=",
"''",
"pattern",
"=",
"''",
"else",
":",
"directory",
"=",
"'/'",
".",
"join",
"(",
"parts",
"[",
":",
"-",
"1",
"]",
")",
"pattern",
"=",
"parts",
"[",
"-",
"1",
"]",
"if",
"not",
"is_pattern",
"(",
"directory",
")",
":",
"# Check for e.g. /abc/*/def",
"if",
"is_pattern",
"(",
"pattern",
")",
":",
"if",
"not",
"directory",
":",
"directory",
"=",
"'/'",
"if",
"absolute",
"else",
"'.'",
"return",
"directory",
",",
"pattern",
"return",
"None",
",",
"None"
] |
Parse a string such as 'foo/bar/*.py'
Assumes is_pattern(s) has been called and returned True
1. directory to process
2. pattern to match
|
[
"Parse",
"a",
"string",
"such",
"as",
"foo",
"/",
"bar",
"/",
"*",
".",
"py",
"Assumes",
"is_pattern",
"(",
"s",
")",
"has",
"been",
"called",
"and",
"returned",
"True",
"1",
".",
"directory",
"to",
"process",
"2",
".",
"pattern",
"to",
"match"
] |
python
|
train
|
XuShaohua/bcloud
|
bcloud/MimeProvider.py
|
https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/MimeProvider.py#L29-L37
|
def get_mime(self, path, isdir):
'''猜测文件类型, 根据它的文件扩展名'''
if isdir:
file_type = FOLDER
else:
file_type = mimetypes.guess_type(path)[0]
if not file_type:
file_type = UNKNOWN
return file_type
|
[
"def",
"get_mime",
"(",
"self",
",",
"path",
",",
"isdir",
")",
":",
"if",
"isdir",
":",
"file_type",
"=",
"FOLDER",
"else",
":",
"file_type",
"=",
"mimetypes",
".",
"guess_type",
"(",
"path",
")",
"[",
"0",
"]",
"if",
"not",
"file_type",
":",
"file_type",
"=",
"UNKNOWN",
"return",
"file_type"
] |
猜测文件类型, 根据它的文件扩展名
|
[
"猜测文件类型",
"根据它的文件扩展名"
] |
python
|
train
|
citronneur/rdpy
|
rdpy/protocol/rfb/rfb.py
|
https://github.com/citronneur/rdpy/blob/4109b7a6fe2abf3ddbaed54e29d2f31e63ed97f6/rdpy/protocol/rfb/rfb.py#L247-L254
|
def readProtocolVersion(self, data):
"""
Read protocol version
@param data: Stream may contain protocol version string (ProtocolVersion)
"""
data.readType(self._version)
if not self._version.value in [ProtocolVersion.RFB003003, ProtocolVersion.RFB003007, ProtocolVersion.RFB003008]:
self._version.value = ProtocolVersion.UNKNOWN
|
[
"def",
"readProtocolVersion",
"(",
"self",
",",
"data",
")",
":",
"data",
".",
"readType",
"(",
"self",
".",
"_version",
")",
"if",
"not",
"self",
".",
"_version",
".",
"value",
"in",
"[",
"ProtocolVersion",
".",
"RFB003003",
",",
"ProtocolVersion",
".",
"RFB003007",
",",
"ProtocolVersion",
".",
"RFB003008",
"]",
":",
"self",
".",
"_version",
".",
"value",
"=",
"ProtocolVersion",
".",
"UNKNOWN"
] |
Read protocol version
@param data: Stream may contain protocol version string (ProtocolVersion)
|
[
"Read",
"protocol",
"version"
] |
python
|
train
|
peo3/cgroup-utils
|
cgutils/cgroup.py
|
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L722-L737
|
def apply_filters(self, filters):
"""
It applies a specified filters. The filters are used to reduce the control groups
which are accessed by get_confgs, get_stats, and get_defaults methods.
"""
_configs = self.configs
_stats = self.stats
self.configs = {}
self.stats = {}
for f in filters:
if f in _configs:
self.configs[f] = _configs[f]
elif f in _stats:
self.stats[f] = _stats[f]
else:
raise NoSuchControlFileError("%s for %s" % (f, self.subsystem.name))
|
[
"def",
"apply_filters",
"(",
"self",
",",
"filters",
")",
":",
"_configs",
"=",
"self",
".",
"configs",
"_stats",
"=",
"self",
".",
"stats",
"self",
".",
"configs",
"=",
"{",
"}",
"self",
".",
"stats",
"=",
"{",
"}",
"for",
"f",
"in",
"filters",
":",
"if",
"f",
"in",
"_configs",
":",
"self",
".",
"configs",
"[",
"f",
"]",
"=",
"_configs",
"[",
"f",
"]",
"elif",
"f",
"in",
"_stats",
":",
"self",
".",
"stats",
"[",
"f",
"]",
"=",
"_stats",
"[",
"f",
"]",
"else",
":",
"raise",
"NoSuchControlFileError",
"(",
"\"%s for %s\"",
"%",
"(",
"f",
",",
"self",
".",
"subsystem",
".",
"name",
")",
")"
] |
It applies a specified filters. The filters are used to reduce the control groups
which are accessed by get_confgs, get_stats, and get_defaults methods.
|
[
"It",
"applies",
"a",
"specified",
"filters",
".",
"The",
"filters",
"are",
"used",
"to",
"reduce",
"the",
"control",
"groups",
"which",
"are",
"accessed",
"by",
"get_confgs",
"get_stats",
"and",
"get_defaults",
"methods",
"."
] |
python
|
train
|
secdev/scapy
|
scapy/layers/l2.py
|
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/l2.py#L595-L608
|
def arpcachepoison(target, victim, interval=60):
"""Poison target's cache with (your MAC,victim's IP) couple
arpcachepoison(target, victim, [interval=60]) -> None
"""
tmac = getmacbyip(target)
p = Ether(dst=tmac) / ARP(op="who-has", psrc=victim, pdst=target)
try:
while True:
sendp(p, iface_hint=target)
if conf.verb > 1:
os.write(1, b".")
time.sleep(interval)
except KeyboardInterrupt:
pass
|
[
"def",
"arpcachepoison",
"(",
"target",
",",
"victim",
",",
"interval",
"=",
"60",
")",
":",
"tmac",
"=",
"getmacbyip",
"(",
"target",
")",
"p",
"=",
"Ether",
"(",
"dst",
"=",
"tmac",
")",
"/",
"ARP",
"(",
"op",
"=",
"\"who-has\"",
",",
"psrc",
"=",
"victim",
",",
"pdst",
"=",
"target",
")",
"try",
":",
"while",
"True",
":",
"sendp",
"(",
"p",
",",
"iface_hint",
"=",
"target",
")",
"if",
"conf",
".",
"verb",
">",
"1",
":",
"os",
".",
"write",
"(",
"1",
",",
"b\".\"",
")",
"time",
".",
"sleep",
"(",
"interval",
")",
"except",
"KeyboardInterrupt",
":",
"pass"
] |
Poison target's cache with (your MAC,victim's IP) couple
arpcachepoison(target, victim, [interval=60]) -> None
|
[
"Poison",
"target",
"s",
"cache",
"with",
"(",
"your",
"MAC",
"victim",
"s",
"IP",
")",
"couple",
"arpcachepoison",
"(",
"target",
"victim",
"[",
"interval",
"=",
"60",
"]",
")",
"-",
">",
"None"
] |
python
|
train
|
shoebot/shoebot
|
lib/photobot/__init__.py
|
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/photobot/__init__.py#L393-L410
|
def copy(self):
"""Returns a copy of the layer.
This is different from the duplicate() method,
which duplicates the layer as a new layer on the canvas.
The copy() method returns a copy of the layer
that can be added to a different canvas.
"""
layer = Layer(None, self.img.copy(), self.x, self.y, self.name)
layer.w = self.w
layer.h = self.h
layer.alpha = self.alpha
layer.blend = self.blend
return layer
|
[
"def",
"copy",
"(",
"self",
")",
":",
"layer",
"=",
"Layer",
"(",
"None",
",",
"self",
".",
"img",
".",
"copy",
"(",
")",
",",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"self",
".",
"name",
")",
"layer",
".",
"w",
"=",
"self",
".",
"w",
"layer",
".",
"h",
"=",
"self",
".",
"h",
"layer",
".",
"alpha",
"=",
"self",
".",
"alpha",
"layer",
".",
"blend",
"=",
"self",
".",
"blend",
"return",
"layer"
] |
Returns a copy of the layer.
This is different from the duplicate() method,
which duplicates the layer as a new layer on the canvas.
The copy() method returns a copy of the layer
that can be added to a different canvas.
|
[
"Returns",
"a",
"copy",
"of",
"the",
"layer",
".",
"This",
"is",
"different",
"from",
"the",
"duplicate",
"()",
"method",
"which",
"duplicates",
"the",
"layer",
"as",
"a",
"new",
"layer",
"on",
"the",
"canvas",
".",
"The",
"copy",
"()",
"method",
"returns",
"a",
"copy",
"of",
"the",
"layer",
"that",
"can",
"be",
"added",
"to",
"a",
"different",
"canvas",
"."
] |
python
|
valid
|
luckydonald/pytgbot
|
code_generation/output/pytgbot/api_types/receivable/media.py
|
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/api_types/receivable/media.py#L1076-L1092
|
def to_array(self):
"""
Serializes this Voice to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Voice, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['duration'] = int(self.duration) # type int
if self.mime_type is not None:
array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
return array
|
[
"def",
"to_array",
"(",
"self",
")",
":",
"array",
"=",
"super",
"(",
"Voice",
",",
"self",
")",
".",
"to_array",
"(",
")",
"array",
"[",
"'file_id'",
"]",
"=",
"u",
"(",
"self",
".",
"file_id",
")",
"# py2: type unicode, py3: type str",
"array",
"[",
"'duration'",
"]",
"=",
"int",
"(",
"self",
".",
"duration",
")",
"# type int",
"if",
"self",
".",
"mime_type",
"is",
"not",
"None",
":",
"array",
"[",
"'mime_type'",
"]",
"=",
"u",
"(",
"self",
".",
"mime_type",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"file_size",
"is",
"not",
"None",
":",
"array",
"[",
"'file_size'",
"]",
"=",
"int",
"(",
"self",
".",
"file_size",
")",
"# type int",
"return",
"array"
] |
Serializes this Voice to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
|
[
"Serializes",
"this",
"Voice",
"to",
"a",
"dictionary",
"."
] |
python
|
train
|
ioos/compliance-checker
|
compliance_checker/base.py
|
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/base.py#L339-L357
|
def check_has(priority=BaseCheck.HIGH, gname=None):
"""Decorator to wrap a function to check if a dataset has given attributes.
:param function func: function to wrap"""
def _inner(func):
def _dec(s, ds):
attr_process = kvp_convert(func(s, ds))
ret_val = []
# could potentially run tests in parallel if we eliminated side
# effects on `ret_val`
for kvp in attr_process.items():
# function mutates ret_val
attr_check(kvp, ds, priority, ret_val, gname)
return ret_val
return wraps(func)(_dec)
return _inner
|
[
"def",
"check_has",
"(",
"priority",
"=",
"BaseCheck",
".",
"HIGH",
",",
"gname",
"=",
"None",
")",
":",
"def",
"_inner",
"(",
"func",
")",
":",
"def",
"_dec",
"(",
"s",
",",
"ds",
")",
":",
"attr_process",
"=",
"kvp_convert",
"(",
"func",
"(",
"s",
",",
"ds",
")",
")",
"ret_val",
"=",
"[",
"]",
"# could potentially run tests in parallel if we eliminated side",
"# effects on `ret_val`",
"for",
"kvp",
"in",
"attr_process",
".",
"items",
"(",
")",
":",
"# function mutates ret_val",
"attr_check",
"(",
"kvp",
",",
"ds",
",",
"priority",
",",
"ret_val",
",",
"gname",
")",
"return",
"ret_val",
"return",
"wraps",
"(",
"func",
")",
"(",
"_dec",
")",
"return",
"_inner"
] |
Decorator to wrap a function to check if a dataset has given attributes.
:param function func: function to wrap
|
[
"Decorator",
"to",
"wrap",
"a",
"function",
"to",
"check",
"if",
"a",
"dataset",
"has",
"given",
"attributes",
".",
":",
"param",
"function",
"func",
":",
"function",
"to",
"wrap"
] |
python
|
train
|
Archived-Object/ligament
|
ligament/helpers.py
|
https://github.com/Archived-Object/ligament/blob/ff3d78130522676a20dc64086dc8a27b197cc20f/ligament/helpers.py#L142-L203
|
def indent_text(*strs, **kwargs):
""" indents text according to an operater string and a global indentation
level. returns a tuple of all passed args, indented according to the
operator string
indent: [defaults to +0]
The operator string, of the form
++n : increments the global indentation level by n and indents
+n : indents with the global indentation level + n
--n : decrements the global indentation level by n
-n : indents with the global indentation level - n
==n : sets the global indentation level to exactly n and indents
=n : indents with an indentation level of exactly n
"""
# python 2.7 workaround
indent = kwargs["indent"] if "indent" in kwargs else"+0"
autobreak = kwargs.get("autobreak", False)
char_limit = kwargs.get("char_limit", 80)
split_char = kwargs.get("split_char", " ")
strs = list(strs)
if autobreak:
for index, s in enumerate(strs):
if len(s) > char_limit:
strs[index] = []
spl = s.split(split_char)
result = []
collect = ""
for current_block in spl:
if len(current_block) + len(collect) > char_limit:
strs[index].append(collect[:-1] + "\n")
collect = " "
collect += current_block + split_char
strs[index].append(collect + "\n")
strs = flatten_list(strs)
global lasting_indent
if indent.startswith("++"):
lasting_indent = lasting_indent + int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("+"):
cur_indent = lasting_indent + int(indent[1:])
elif indent.startswith("--"):
lasting_indent = lasting_indent - int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("-"):
cur_indent = lasting_indent - int(indent[1:])
elif indent.startswith("=="):
lasting_indent = int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("="):
lasting_indent = int(indent[1:])
cur_indent = int(indent[1:])
else:
raise Exception(
"indent command format '%s' unrecognized (see the docstring)")
# mutate indentation level if needed
return tuple([" " * cur_indent] + [elem.replace("\n", "\n" + " " * cur_indent)
for elem in strs])
|
[
"def",
"indent_text",
"(",
"*",
"strs",
",",
"*",
"*",
"kwargs",
")",
":",
"# python 2.7 workaround",
"indent",
"=",
"kwargs",
"[",
"\"indent\"",
"]",
"if",
"\"indent\"",
"in",
"kwargs",
"else",
"\"+0\"",
"autobreak",
"=",
"kwargs",
".",
"get",
"(",
"\"autobreak\"",
",",
"False",
")",
"char_limit",
"=",
"kwargs",
".",
"get",
"(",
"\"char_limit\"",
",",
"80",
")",
"split_char",
"=",
"kwargs",
".",
"get",
"(",
"\"split_char\"",
",",
"\" \"",
")",
"strs",
"=",
"list",
"(",
"strs",
")",
"if",
"autobreak",
":",
"for",
"index",
",",
"s",
"in",
"enumerate",
"(",
"strs",
")",
":",
"if",
"len",
"(",
"s",
")",
">",
"char_limit",
":",
"strs",
"[",
"index",
"]",
"=",
"[",
"]",
"spl",
"=",
"s",
".",
"split",
"(",
"split_char",
")",
"result",
"=",
"[",
"]",
"collect",
"=",
"\"\"",
"for",
"current_block",
"in",
"spl",
":",
"if",
"len",
"(",
"current_block",
")",
"+",
"len",
"(",
"collect",
")",
">",
"char_limit",
":",
"strs",
"[",
"index",
"]",
".",
"append",
"(",
"collect",
"[",
":",
"-",
"1",
"]",
"+",
"\"\\n\"",
")",
"collect",
"=",
"\" \"",
"collect",
"+=",
"current_block",
"+",
"split_char",
"strs",
"[",
"index",
"]",
".",
"append",
"(",
"collect",
"+",
"\"\\n\"",
")",
"strs",
"=",
"flatten_list",
"(",
"strs",
")",
"global",
"lasting_indent",
"if",
"indent",
".",
"startswith",
"(",
"\"++\"",
")",
":",
"lasting_indent",
"=",
"lasting_indent",
"+",
"int",
"(",
"indent",
"[",
"2",
":",
"]",
")",
"cur_indent",
"=",
"lasting_indent",
"elif",
"indent",
".",
"startswith",
"(",
"\"+\"",
")",
":",
"cur_indent",
"=",
"lasting_indent",
"+",
"int",
"(",
"indent",
"[",
"1",
":",
"]",
")",
"elif",
"indent",
".",
"startswith",
"(",
"\"--\"",
")",
":",
"lasting_indent",
"=",
"lasting_indent",
"-",
"int",
"(",
"indent",
"[",
"2",
":",
"]",
")",
"cur_indent",
"=",
"lasting_indent",
"elif",
"indent",
".",
"startswith",
"(",
"\"-\"",
")",
":",
"cur_indent",
"=",
"lasting_indent",
"-",
"int",
"(",
"indent",
"[",
"1",
":",
"]",
")",
"elif",
"indent",
".",
"startswith",
"(",
"\"==\"",
")",
":",
"lasting_indent",
"=",
"int",
"(",
"indent",
"[",
"2",
":",
"]",
")",
"cur_indent",
"=",
"lasting_indent",
"elif",
"indent",
".",
"startswith",
"(",
"\"=\"",
")",
":",
"lasting_indent",
"=",
"int",
"(",
"indent",
"[",
"1",
":",
"]",
")",
"cur_indent",
"=",
"int",
"(",
"indent",
"[",
"1",
":",
"]",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"indent command format '%s' unrecognized (see the docstring)\"",
")",
"# mutate indentation level if needed",
"return",
"tuple",
"(",
"[",
"\" \"",
"*",
"cur_indent",
"]",
"+",
"[",
"elem",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\\n\"",
"+",
"\" \"",
"*",
"cur_indent",
")",
"for",
"elem",
"in",
"strs",
"]",
")"
] |
indents text according to an operater string and a global indentation
level. returns a tuple of all passed args, indented according to the
operator string
indent: [defaults to +0]
The operator string, of the form
++n : increments the global indentation level by n and indents
+n : indents with the global indentation level + n
--n : decrements the global indentation level by n
-n : indents with the global indentation level - n
==n : sets the global indentation level to exactly n and indents
=n : indents with an indentation level of exactly n
|
[
"indents",
"text",
"according",
"to",
"an",
"operater",
"string",
"and",
"a",
"global",
"indentation",
"level",
".",
"returns",
"a",
"tuple",
"of",
"all",
"passed",
"args",
"indented",
"according",
"to",
"the",
"operator",
"string"
] |
python
|
train
|
mcocdawc/chemcoord
|
src/chemcoord/cartesian_coordinates/xyz_functions.py
|
https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/src/chemcoord/cartesian_coordinates/xyz_functions.py#L220-L237
|
def allclose(a, b, align=False, rtol=1.e-5, atol=1.e-8):
"""Compare two molecules for numerical equality.
Args:
a (Cartesian):
b (Cartesian):
align (bool): a and b are
prealigned along their principal axes of inertia and moved to their
barycenters before comparing.
rtol (float): Relative tolerance for the numerical equality comparison
look into :func:`numpy.allclose` for further explanation.
atol (float): Relative tolerance for the numerical equality comparison
look into :func:`numpy.allclose` for further explanation.
Returns:
bool:
"""
return np.alltrue(isclose(a, b, align=align, rtol=rtol, atol=atol))
|
[
"def",
"allclose",
"(",
"a",
",",
"b",
",",
"align",
"=",
"False",
",",
"rtol",
"=",
"1.e-5",
",",
"atol",
"=",
"1.e-8",
")",
":",
"return",
"np",
".",
"alltrue",
"(",
"isclose",
"(",
"a",
",",
"b",
",",
"align",
"=",
"align",
",",
"rtol",
"=",
"rtol",
",",
"atol",
"=",
"atol",
")",
")"
] |
Compare two molecules for numerical equality.
Args:
a (Cartesian):
b (Cartesian):
align (bool): a and b are
prealigned along their principal axes of inertia and moved to their
barycenters before comparing.
rtol (float): Relative tolerance for the numerical equality comparison
look into :func:`numpy.allclose` for further explanation.
atol (float): Relative tolerance for the numerical equality comparison
look into :func:`numpy.allclose` for further explanation.
Returns:
bool:
|
[
"Compare",
"two",
"molecules",
"for",
"numerical",
"equality",
"."
] |
python
|
train
|
mozilla/mozdownload
|
mozdownload/timezones.py
|
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/timezones.py#L36-L41
|
def first_sunday(self, year, month):
"""Get the first sunday of a month."""
date = datetime(year, month, 1, 0)
days_until_sunday = 6 - date.weekday()
return date + timedelta(days=days_until_sunday)
|
[
"def",
"first_sunday",
"(",
"self",
",",
"year",
",",
"month",
")",
":",
"date",
"=",
"datetime",
"(",
"year",
",",
"month",
",",
"1",
",",
"0",
")",
"days_until_sunday",
"=",
"6",
"-",
"date",
".",
"weekday",
"(",
")",
"return",
"date",
"+",
"timedelta",
"(",
"days",
"=",
"days_until_sunday",
")"
] |
Get the first sunday of a month.
|
[
"Get",
"the",
"first",
"sunday",
"of",
"a",
"month",
"."
] |
python
|
train
|
johntfoster/bspline
|
bspline/bspline.py
|
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/bspline.py#L135-L158
|
def plot(self):
"""Plot basis functions over full range of knots.
Convenience function. Requires matplotlib.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
from sys import stderr
print("ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function", file=stderr)
raise
x_min = np.min(self.knot_vector)
x_max = np.max(self.knot_vector)
x = np.linspace(x_min, x_max, num=1000)
N = np.array([self(i) for i in x]).T
for n in N:
plt.plot(x,n)
return plt.show()
|
[
"def",
"plot",
"(",
"self",
")",
":",
"try",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"except",
"ImportError",
":",
"from",
"sys",
"import",
"stderr",
"print",
"(",
"\"ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function\"",
",",
"file",
"=",
"stderr",
")",
"raise",
"x_min",
"=",
"np",
".",
"min",
"(",
"self",
".",
"knot_vector",
")",
"x_max",
"=",
"np",
".",
"max",
"(",
"self",
".",
"knot_vector",
")",
"x",
"=",
"np",
".",
"linspace",
"(",
"x_min",
",",
"x_max",
",",
"num",
"=",
"1000",
")",
"N",
"=",
"np",
".",
"array",
"(",
"[",
"self",
"(",
"i",
")",
"for",
"i",
"in",
"x",
"]",
")",
".",
"T",
"for",
"n",
"in",
"N",
":",
"plt",
".",
"plot",
"(",
"x",
",",
"n",
")",
"return",
"plt",
".",
"show",
"(",
")"
] |
Plot basis functions over full range of knots.
Convenience function. Requires matplotlib.
|
[
"Plot",
"basis",
"functions",
"over",
"full",
"range",
"of",
"knots",
"."
] |
python
|
train
|
ramrod-project/database-brain
|
schema/brain/environment.py
|
https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/environment.py#L50-L58
|
def log_env_gte(desired):
"""
Boolean check if the current environment LOGLEVEL is
at least as verbose as a desired LOGLEVEL
:param desired: <str> one of 9 keys in <brain.environment.stage>
:return: <bool>
"""
return LOGLEVELS.get(check_log_env()) >= LOGLEVELS.get(desired, LOGLEVELS[TEST])
|
[
"def",
"log_env_gte",
"(",
"desired",
")",
":",
"return",
"LOGLEVELS",
".",
"get",
"(",
"check_log_env",
"(",
")",
")",
">=",
"LOGLEVELS",
".",
"get",
"(",
"desired",
",",
"LOGLEVELS",
"[",
"TEST",
"]",
")"
] |
Boolean check if the current environment LOGLEVEL is
at least as verbose as a desired LOGLEVEL
:param desired: <str> one of 9 keys in <brain.environment.stage>
:return: <bool>
|
[
"Boolean",
"check",
"if",
"the",
"current",
"environment",
"LOGLEVEL",
"is",
"at",
"least",
"as",
"verbose",
"as",
"a",
"desired",
"LOGLEVEL"
] |
python
|
train
|
waqasbhatti/astrobase
|
astrobase/hatsurveys/hplc.py
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hplc.py#L207-L230
|
def lcdict_to_pickle(lcdict, outfile=None):
'''This just writes the lcdict to a pickle.
If outfile is None, then will try to get the name from the
lcdict['objectid'] and write to <objectid>-hptxtlc.pkl. If that fails, will
write to a file named hptxtlc.pkl'.
'''
if not outfile and lcdict['objectid']:
outfile = '%s-hplc.pkl' % lcdict['objectid']
elif not outfile and not lcdict['objectid']:
outfile = 'hplc.pkl'
with open(outfile,'wb') as outfd:
pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
if os.path.exists(outfile):
LOGINFO('lcdict for object: %s -> %s OK' % (lcdict['objectid'],
outfile))
return outfile
else:
LOGERROR('could not make a pickle for this lcdict!')
return None
|
[
"def",
"lcdict_to_pickle",
"(",
"lcdict",
",",
"outfile",
"=",
"None",
")",
":",
"if",
"not",
"outfile",
"and",
"lcdict",
"[",
"'objectid'",
"]",
":",
"outfile",
"=",
"'%s-hplc.pkl'",
"%",
"lcdict",
"[",
"'objectid'",
"]",
"elif",
"not",
"outfile",
"and",
"not",
"lcdict",
"[",
"'objectid'",
"]",
":",
"outfile",
"=",
"'hplc.pkl'",
"with",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"outfd",
":",
"pickle",
".",
"dump",
"(",
"lcdict",
",",
"outfd",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"outfile",
")",
":",
"LOGINFO",
"(",
"'lcdict for object: %s -> %s OK'",
"%",
"(",
"lcdict",
"[",
"'objectid'",
"]",
",",
"outfile",
")",
")",
"return",
"outfile",
"else",
":",
"LOGERROR",
"(",
"'could not make a pickle for this lcdict!'",
")",
"return",
"None"
] |
This just writes the lcdict to a pickle.
If outfile is None, then will try to get the name from the
lcdict['objectid'] and write to <objectid>-hptxtlc.pkl. If that fails, will
write to a file named hptxtlc.pkl'.
|
[
"This",
"just",
"writes",
"the",
"lcdict",
"to",
"a",
"pickle",
"."
] |
python
|
valid
|
monarch-initiative/dipper
|
dipper/sources/Decipher.py
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Decipher.py#L228-L277
|
def make_allele_by_consequence(self, consequence, gene_id, gene_symbol):
"""
Given a "consequence" label that describes a variation type,
create an anonymous variant of the specified gene as an instance of
that consequence type.
:param consequence:
:param gene_id:
:param gene_symbol:
:return: allele_id
"""
allele_id = None
# Loss of function : Nonsense, frame-shifting indel,
# essential splice site mutation, whole gene deletion or any other
# mutation where functional analysis demonstrates clear reduction
# or loss of function
# All missense/in frame : Where all the mutations described in the data
# source are either missense or in frame deletions and there is no
# evidence favoring either loss-of-function, activating or
# dominant negative effect
# Dominant negative : Mutation within one allele of a gene that creates
# a significantly greater deleterious effect on gene product
# function than a monoallelic loss of function mutation
# Activating : Mutation, usually missense that results in
# a constitutive functional activation of the gene product
# Increased gene dosage : Copy number variation that increases
# the functional dosage of the gene
# Cis-regulatory or promotor mutation : Mutation in cis-regulatory
# elements that lies outwith the known transcription unit and
# promotor of the controlled gene
# Uncertain : Where the exact nature of the mutation is unclear or
# not recorded
type_id = self.resolve(consequence, mandatory=False)
if type_id == consequence:
LOG.warning("Consequence type unmapped: %s", str(consequence))
type_id = self.globaltt['sequence_variant']
# make the allele
allele_id = ''.join((gene_id, type_id))
allele_id = re.sub(r':', '', allele_id)
allele_id = '_:'+allele_id # make this a BNode
allele_label = ' '.join((consequence, 'allele in', gene_symbol))
self.model.addIndividualToGraph(allele_id, allele_label, type_id)
self.geno.addAlleleOfGene(allele_id, gene_id)
return allele_id
|
[
"def",
"make_allele_by_consequence",
"(",
"self",
",",
"consequence",
",",
"gene_id",
",",
"gene_symbol",
")",
":",
"allele_id",
"=",
"None",
"# Loss of function : Nonsense, frame-shifting indel,",
"# essential splice site mutation, whole gene deletion or any other",
"# mutation where functional analysis demonstrates clear reduction",
"# or loss of function",
"# All missense/in frame : Where all the mutations described in the data",
"# source are either missense or in frame deletions and there is no",
"# evidence favoring either loss-of-function, activating or",
"# dominant negative effect",
"# Dominant negative : Mutation within one allele of a gene that creates",
"# a significantly greater deleterious effect on gene product",
"# function than a monoallelic loss of function mutation",
"# Activating : Mutation, usually missense that results in",
"# a constitutive functional activation of the gene product",
"# Increased gene dosage : Copy number variation that increases",
"# the functional dosage of the gene",
"# Cis-regulatory or promotor mutation : Mutation in cis-regulatory",
"# elements that lies outwith the known transcription unit and",
"# promotor of the controlled gene",
"# Uncertain : Where the exact nature of the mutation is unclear or",
"# not recorded",
"type_id",
"=",
"self",
".",
"resolve",
"(",
"consequence",
",",
"mandatory",
"=",
"False",
")",
"if",
"type_id",
"==",
"consequence",
":",
"LOG",
".",
"warning",
"(",
"\"Consequence type unmapped: %s\"",
",",
"str",
"(",
"consequence",
")",
")",
"type_id",
"=",
"self",
".",
"globaltt",
"[",
"'sequence_variant'",
"]",
"# make the allele",
"allele_id",
"=",
"''",
".",
"join",
"(",
"(",
"gene_id",
",",
"type_id",
")",
")",
"allele_id",
"=",
"re",
".",
"sub",
"(",
"r':'",
",",
"''",
",",
"allele_id",
")",
"allele_id",
"=",
"'_:'",
"+",
"allele_id",
"# make this a BNode",
"allele_label",
"=",
"' '",
".",
"join",
"(",
"(",
"consequence",
",",
"'allele in'",
",",
"gene_symbol",
")",
")",
"self",
".",
"model",
".",
"addIndividualToGraph",
"(",
"allele_id",
",",
"allele_label",
",",
"type_id",
")",
"self",
".",
"geno",
".",
"addAlleleOfGene",
"(",
"allele_id",
",",
"gene_id",
")",
"return",
"allele_id"
] |
Given a "consequence" label that describes a variation type,
create an anonymous variant of the specified gene as an instance of
that consequence type.
:param consequence:
:param gene_id:
:param gene_symbol:
:return: allele_id
|
[
"Given",
"a",
"consequence",
"label",
"that",
"describes",
"a",
"variation",
"type",
"create",
"an",
"anonymous",
"variant",
"of",
"the",
"specified",
"gene",
"as",
"an",
"instance",
"of",
"that",
"consequence",
"type",
"."
] |
python
|
train
|
sarugaku/requirementslib
|
src/requirementslib/models/markers.py
|
https://github.com/sarugaku/requirementslib/blob/de78a01e8abc1fc47155516a96008d97035e8063/src/requirementslib/models/markers.py#L310-L338
|
def _strip_marker_elem(elem_name, elements):
"""Remove the supplied element from the marker.
This is not a comprehensive implementation, but relies on an important
characteristic of metadata generation: The element's operand is always
associated with an "and" operator. This means that we can simply remove the
operand and the "and" operator associated with it.
"""
extra_indexes = []
preceding_operators = ["and"] if elem_name == "extra" else ["and", "or"]
for i, element in enumerate(elements):
if isinstance(element, list):
cancelled = _strip_marker_elem(elem_name, element)
if cancelled:
extra_indexes.append(i)
elif isinstance(element, tuple) and element[0].value == elem_name:
extra_indexes.append(i)
for i in reversed(extra_indexes):
del elements[i]
if i > 0 and elements[i - 1] in preceding_operators:
# Remove the "and" before it.
del elements[i - 1]
elif elements:
# This shouldn't ever happen, but is included for completeness.
# If there is not an "and" before this element, try to remove the
# operator after it.
del elements[0]
return not elements
|
[
"def",
"_strip_marker_elem",
"(",
"elem_name",
",",
"elements",
")",
":",
"extra_indexes",
"=",
"[",
"]",
"preceding_operators",
"=",
"[",
"\"and\"",
"]",
"if",
"elem_name",
"==",
"\"extra\"",
"else",
"[",
"\"and\"",
",",
"\"or\"",
"]",
"for",
"i",
",",
"element",
"in",
"enumerate",
"(",
"elements",
")",
":",
"if",
"isinstance",
"(",
"element",
",",
"list",
")",
":",
"cancelled",
"=",
"_strip_marker_elem",
"(",
"elem_name",
",",
"element",
")",
"if",
"cancelled",
":",
"extra_indexes",
".",
"append",
"(",
"i",
")",
"elif",
"isinstance",
"(",
"element",
",",
"tuple",
")",
"and",
"element",
"[",
"0",
"]",
".",
"value",
"==",
"elem_name",
":",
"extra_indexes",
".",
"append",
"(",
"i",
")",
"for",
"i",
"in",
"reversed",
"(",
"extra_indexes",
")",
":",
"del",
"elements",
"[",
"i",
"]",
"if",
"i",
">",
"0",
"and",
"elements",
"[",
"i",
"-",
"1",
"]",
"in",
"preceding_operators",
":",
"# Remove the \"and\" before it.",
"del",
"elements",
"[",
"i",
"-",
"1",
"]",
"elif",
"elements",
":",
"# This shouldn't ever happen, but is included for completeness.",
"# If there is not an \"and\" before this element, try to remove the",
"# operator after it.",
"del",
"elements",
"[",
"0",
"]",
"return",
"not",
"elements"
] |
Remove the supplied element from the marker.
This is not a comprehensive implementation, but relies on an important
characteristic of metadata generation: The element's operand is always
associated with an "and" operator. This means that we can simply remove the
operand and the "and" operator associated with it.
|
[
"Remove",
"the",
"supplied",
"element",
"from",
"the",
"marker",
"."
] |
python
|
train
|
LordSputnik/mutagen
|
mutagen/id3.py
|
https://github.com/LordSputnik/mutagen/blob/38e62c8dc35c72b16554f5dbe7c0fde91acc3411/mutagen/id3.py#L191-L199
|
def delall(self, key):
"""Delete all tags of a given kind; see getall."""
if key in self:
del(self[key])
else:
key = key + ":"
for k in self.keys():
if k.startswith(key):
del(self[k])
|
[
"def",
"delall",
"(",
"self",
",",
"key",
")",
":",
"if",
"key",
"in",
"self",
":",
"del",
"(",
"self",
"[",
"key",
"]",
")",
"else",
":",
"key",
"=",
"key",
"+",
"\":\"",
"for",
"k",
"in",
"self",
".",
"keys",
"(",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"key",
")",
":",
"del",
"(",
"self",
"[",
"k",
"]",
")"
] |
Delete all tags of a given kind; see getall.
|
[
"Delete",
"all",
"tags",
"of",
"a",
"given",
"kind",
";",
"see",
"getall",
"."
] |
python
|
test
|
pytroll/satpy
|
satpy/writers/scmi.py
|
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/writers/scmi.py#L915-L926
|
def _group_by_area(self, datasets):
"""Group datasets by their area."""
def _area_id(area_def):
return area_def.name + str(area_def.area_extent) + str(area_def.shape)
# get all of the datasets stored by area
area_datasets = {}
for x in datasets:
area_id = _area_id(x.attrs['area'])
area, ds_list = area_datasets.setdefault(area_id, (x.attrs['area'], []))
ds_list.append(x)
return area_datasets
|
[
"def",
"_group_by_area",
"(",
"self",
",",
"datasets",
")",
":",
"def",
"_area_id",
"(",
"area_def",
")",
":",
"return",
"area_def",
".",
"name",
"+",
"str",
"(",
"area_def",
".",
"area_extent",
")",
"+",
"str",
"(",
"area_def",
".",
"shape",
")",
"# get all of the datasets stored by area",
"area_datasets",
"=",
"{",
"}",
"for",
"x",
"in",
"datasets",
":",
"area_id",
"=",
"_area_id",
"(",
"x",
".",
"attrs",
"[",
"'area'",
"]",
")",
"area",
",",
"ds_list",
"=",
"area_datasets",
".",
"setdefault",
"(",
"area_id",
",",
"(",
"x",
".",
"attrs",
"[",
"'area'",
"]",
",",
"[",
"]",
")",
")",
"ds_list",
".",
"append",
"(",
"x",
")",
"return",
"area_datasets"
] |
Group datasets by their area.
|
[
"Group",
"datasets",
"by",
"their",
"area",
"."
] |
python
|
train
|
JarryShaw/PyPCAPKit
|
src/const/ipv6/option.py
|
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/const/ipv6/option.py#L39-L45
|
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Option(key)
if key not in Option._member_map_:
extend_enum(Option, key, default)
return Option[key]
|
[
"def",
"get",
"(",
"key",
",",
"default",
"=",
"-",
"1",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
":",
"return",
"Option",
"(",
"key",
")",
"if",
"key",
"not",
"in",
"Option",
".",
"_member_map_",
":",
"extend_enum",
"(",
"Option",
",",
"key",
",",
"default",
")",
"return",
"Option",
"[",
"key",
"]"
] |
Backport support for original codes.
|
[
"Backport",
"support",
"for",
"original",
"codes",
"."
] |
python
|
train
|
ConsenSys/mythril-classic
|
mythril/disassembler/asm.py
|
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/disassembler/asm.py#L51-L60
|
def get_opcode_from_name(operation_name: str) -> int:
"""Get an op code based on its name.
:param operation_name:
:return:
"""
for op_code, value in opcodes.items():
if operation_name == value[0]:
return op_code
raise RuntimeError("Unknown opcode")
|
[
"def",
"get_opcode_from_name",
"(",
"operation_name",
":",
"str",
")",
"->",
"int",
":",
"for",
"op_code",
",",
"value",
"in",
"opcodes",
".",
"items",
"(",
")",
":",
"if",
"operation_name",
"==",
"value",
"[",
"0",
"]",
":",
"return",
"op_code",
"raise",
"RuntimeError",
"(",
"\"Unknown opcode\"",
")"
] |
Get an op code based on its name.
:param operation_name:
:return:
|
[
"Get",
"an",
"op",
"code",
"based",
"on",
"its",
"name",
"."
] |
python
|
train
|
projecthamster/hamster
|
src/hamster/lib/layout.py
|
https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/layout.py#L321-L332
|
def enabled(self):
"""whether the user is allowed to interact with the
widget. Item is enabled only if all it's parent elements are"""
enabled = self._enabled
if not enabled:
return False
if self.parent and isinstance(self.parent, Widget):
if self.parent.enabled == False:
return False
return True
|
[
"def",
"enabled",
"(",
"self",
")",
":",
"enabled",
"=",
"self",
".",
"_enabled",
"if",
"not",
"enabled",
":",
"return",
"False",
"if",
"self",
".",
"parent",
"and",
"isinstance",
"(",
"self",
".",
"parent",
",",
"Widget",
")",
":",
"if",
"self",
".",
"parent",
".",
"enabled",
"==",
"False",
":",
"return",
"False",
"return",
"True"
] |
whether the user is allowed to interact with the
widget. Item is enabled only if all it's parent elements are
|
[
"whether",
"the",
"user",
"is",
"allowed",
"to",
"interact",
"with",
"the",
"widget",
".",
"Item",
"is",
"enabled",
"only",
"if",
"all",
"it",
"s",
"parent",
"elements",
"are"
] |
python
|
train
|
chaoss/grimoirelab-sortinghat
|
sortinghat/utils.py
|
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/utils.py#L87-L107
|
def str_to_datetime(ts):
"""Format a string to a datetime object.
This functions supports several date formats like YYYY-MM-DD, MM-DD-YYYY
and YY-MM-DD. When the given data is None or an empty string, the function
returns None.
:param ts: string to convert
:returns: a datetime object
:raises IvalidDateError: when the given string cannot be converted into
a valid date
"""
if not ts:
return None
try:
return dateutil.parser.parse(ts).replace(tzinfo=None)
except Exception:
raise InvalidDateError(date=str(ts))
|
[
"def",
"str_to_datetime",
"(",
"ts",
")",
":",
"if",
"not",
"ts",
":",
"return",
"None",
"try",
":",
"return",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"ts",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"except",
"Exception",
":",
"raise",
"InvalidDateError",
"(",
"date",
"=",
"str",
"(",
"ts",
")",
")"
] |
Format a string to a datetime object.
This functions supports several date formats like YYYY-MM-DD, MM-DD-YYYY
and YY-MM-DD. When the given data is None or an empty string, the function
returns None.
:param ts: string to convert
:returns: a datetime object
:raises IvalidDateError: when the given string cannot be converted into
a valid date
|
[
"Format",
"a",
"string",
"to",
"a",
"datetime",
"object",
"."
] |
python
|
train
|
SectorLabs/django-localized-fields
|
localized_fields/fields/field.py
|
https://github.com/SectorLabs/django-localized-fields/blob/f0ac0f7f2503317fde5d75ba8481e34db83512bd/localized_fields/fields/field.py#L42-L53
|
def contribute_to_class(self, model, name, **kwargs):
"""Adds this field to the specifed model.
Arguments:
cls:
The model to add the field to.
name:
The name of the field to add.
"""
super(LocalizedField, self).contribute_to_class(model, name, **kwargs)
setattr(model, self.name, self.descriptor_class(self))
|
[
"def",
"contribute_to_class",
"(",
"self",
",",
"model",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"LocalizedField",
",",
"self",
")",
".",
"contribute_to_class",
"(",
"model",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
"setattr",
"(",
"model",
",",
"self",
".",
"name",
",",
"self",
".",
"descriptor_class",
"(",
"self",
")",
")"
] |
Adds this field to the specifed model.
Arguments:
cls:
The model to add the field to.
name:
The name of the field to add.
|
[
"Adds",
"this",
"field",
"to",
"the",
"specifed",
"model",
"."
] |
python
|
train
|
gccxml/pygccxml
|
pygccxml/declarations/pattern_parser.py
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/pattern_parser.py#L32-L46
|
def has_pattern(self, decl_string):
"""
Implementation detail
"""
if self.__begin == "<":
# Cleanup parentheses blocks before checking for the pattern
# See also the args() method (in this file) for more explanations.
decl_string = re.sub("\\s\\(.*?\\)", "", decl_string).strip()
last_part = decl_string.split('::')[-1]
return (
decl_string.find(self.__begin) != -1 and
last_part.find(self.__end) != -1
)
|
[
"def",
"has_pattern",
"(",
"self",
",",
"decl_string",
")",
":",
"if",
"self",
".",
"__begin",
"==",
"\"<\"",
":",
"# Cleanup parentheses blocks before checking for the pattern",
"# See also the args() method (in this file) for more explanations.",
"decl_string",
"=",
"re",
".",
"sub",
"(",
"\"\\\\s\\\\(.*?\\\\)\"",
",",
"\"\"",
",",
"decl_string",
")",
".",
"strip",
"(",
")",
"last_part",
"=",
"decl_string",
".",
"split",
"(",
"'::'",
")",
"[",
"-",
"1",
"]",
"return",
"(",
"decl_string",
".",
"find",
"(",
"self",
".",
"__begin",
")",
"!=",
"-",
"1",
"and",
"last_part",
".",
"find",
"(",
"self",
".",
"__end",
")",
"!=",
"-",
"1",
")"
] |
Implementation detail
|
[
"Implementation",
"detail"
] |
python
|
train
|
IdentityPython/pysaml2
|
src/saml2/entity.py
|
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/entity.py#L807-L864
|
def _parse_request(self, enc_request, request_cls, service, binding):
"""Parse a Request
:param enc_request: The request in its transport format
:param request_cls: The type of requests I expect
:param service:
:param binding: Which binding that was used to transport the message
to this entity.
:return: A request instance
"""
_log_info = logger.info
_log_debug = logger.debug
# The addresses I should receive messages like this on
receiver_addresses = self.config.endpoint(service, binding,
self.entity_type)
if not receiver_addresses and self.entity_type == "idp":
for typ in ["aa", "aq", "pdp"]:
receiver_addresses = self.config.endpoint(service, binding, typ)
if receiver_addresses:
break
_log_debug("receiver addresses: %s", receiver_addresses)
_log_debug("Binding: %s", binding)
try:
timeslack = self.config.accepted_time_diff
if not timeslack:
timeslack = 0
except AttributeError:
timeslack = 0
_request = request_cls(self.sec, receiver_addresses,
self.config.attribute_converters,
timeslack=timeslack)
xmlstr = self.unravel(enc_request, binding, request_cls.msgtype)
must = self.config.getattr("want_authn_requests_signed", "idp")
only_valid_cert = self.config.getattr(
"want_authn_requests_only_with_valid_cert", "idp")
if only_valid_cert is None:
only_valid_cert = False
if only_valid_cert:
must = True
_request = _request.loads(xmlstr, binding, origdoc=enc_request,
must=must, only_valid_cert=only_valid_cert)
_log_debug("Loaded request")
if _request:
_request = _request.verify()
_log_debug("Verified request")
if not _request:
return None
else:
return _request
|
[
"def",
"_parse_request",
"(",
"self",
",",
"enc_request",
",",
"request_cls",
",",
"service",
",",
"binding",
")",
":",
"_log_info",
"=",
"logger",
".",
"info",
"_log_debug",
"=",
"logger",
".",
"debug",
"# The addresses I should receive messages like this on",
"receiver_addresses",
"=",
"self",
".",
"config",
".",
"endpoint",
"(",
"service",
",",
"binding",
",",
"self",
".",
"entity_type",
")",
"if",
"not",
"receiver_addresses",
"and",
"self",
".",
"entity_type",
"==",
"\"idp\"",
":",
"for",
"typ",
"in",
"[",
"\"aa\"",
",",
"\"aq\"",
",",
"\"pdp\"",
"]",
":",
"receiver_addresses",
"=",
"self",
".",
"config",
".",
"endpoint",
"(",
"service",
",",
"binding",
",",
"typ",
")",
"if",
"receiver_addresses",
":",
"break",
"_log_debug",
"(",
"\"receiver addresses: %s\"",
",",
"receiver_addresses",
")",
"_log_debug",
"(",
"\"Binding: %s\"",
",",
"binding",
")",
"try",
":",
"timeslack",
"=",
"self",
".",
"config",
".",
"accepted_time_diff",
"if",
"not",
"timeslack",
":",
"timeslack",
"=",
"0",
"except",
"AttributeError",
":",
"timeslack",
"=",
"0",
"_request",
"=",
"request_cls",
"(",
"self",
".",
"sec",
",",
"receiver_addresses",
",",
"self",
".",
"config",
".",
"attribute_converters",
",",
"timeslack",
"=",
"timeslack",
")",
"xmlstr",
"=",
"self",
".",
"unravel",
"(",
"enc_request",
",",
"binding",
",",
"request_cls",
".",
"msgtype",
")",
"must",
"=",
"self",
".",
"config",
".",
"getattr",
"(",
"\"want_authn_requests_signed\"",
",",
"\"idp\"",
")",
"only_valid_cert",
"=",
"self",
".",
"config",
".",
"getattr",
"(",
"\"want_authn_requests_only_with_valid_cert\"",
",",
"\"idp\"",
")",
"if",
"only_valid_cert",
"is",
"None",
":",
"only_valid_cert",
"=",
"False",
"if",
"only_valid_cert",
":",
"must",
"=",
"True",
"_request",
"=",
"_request",
".",
"loads",
"(",
"xmlstr",
",",
"binding",
",",
"origdoc",
"=",
"enc_request",
",",
"must",
"=",
"must",
",",
"only_valid_cert",
"=",
"only_valid_cert",
")",
"_log_debug",
"(",
"\"Loaded request\"",
")",
"if",
"_request",
":",
"_request",
"=",
"_request",
".",
"verify",
"(",
")",
"_log_debug",
"(",
"\"Verified request\"",
")",
"if",
"not",
"_request",
":",
"return",
"None",
"else",
":",
"return",
"_request"
] |
Parse a Request
:param enc_request: The request in its transport format
:param request_cls: The type of requests I expect
:param service:
:param binding: Which binding that was used to transport the message
to this entity.
:return: A request instance
|
[
"Parse",
"a",
"Request"
] |
python
|
train
|
kdeldycke/maildir-deduplicate
|
setup.py
|
https://github.com/kdeldycke/maildir-deduplicate/blob/f1c6ff25b80c6c1a4dc2dc7a65b34d808b0b7733/setup.py#L114-L124
|
def long_description():
""" Collates project README and latest changes. """
changes = latest_changes()
changes[0] = "`Changes for v{}".format(changes[0][1:])
changes[1] = '-' * len(changes[0])
return "\n\n\n".join([
read_file('README.rst'),
'\n'.join(changes),
"`Full changelog <https://{}.readthedocs.io/en/develop/changelog.html"
"#changelog>`_.".format(PACKAGE_NAME),
])
|
[
"def",
"long_description",
"(",
")",
":",
"changes",
"=",
"latest_changes",
"(",
")",
"changes",
"[",
"0",
"]",
"=",
"\"`Changes for v{}\"",
".",
"format",
"(",
"changes",
"[",
"0",
"]",
"[",
"1",
":",
"]",
")",
"changes",
"[",
"1",
"]",
"=",
"'-'",
"*",
"len",
"(",
"changes",
"[",
"0",
"]",
")",
"return",
"\"\\n\\n\\n\"",
".",
"join",
"(",
"[",
"read_file",
"(",
"'README.rst'",
")",
",",
"'\\n'",
".",
"join",
"(",
"changes",
")",
",",
"\"`Full changelog <https://{}.readthedocs.io/en/develop/changelog.html\"",
"\"#changelog>`_.\"",
".",
"format",
"(",
"PACKAGE_NAME",
")",
",",
"]",
")"
] |
Collates project README and latest changes.
|
[
"Collates",
"project",
"README",
"and",
"latest",
"changes",
"."
] |
python
|
train
|
pymc-devs/pymc
|
pymc/Matplot.py
|
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Matplot.py#L717-L786
|
def discrepancy_plot(
data, name='discrepancy', report_p=True, format='png', suffix='-gof',
path='./', fontmap=None, verbose=1):
'''
Generate goodness-of-fit deviate scatter plot.
:Arguments:
data: list
List (or list of lists for vector-valued variables) of discrepancy values, output
from the `pymc.diagnostics.discrepancy` function .
name: string
The name of the plot.
report_p: bool
Flag for annotating the p-value to the plot.
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix (defaults to "-gof").
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
'''
if verbose > 0:
print_('Plotting', name + suffix)
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
# Generate new scatter plot
figure()
try:
x, y = transpose(data)
except ValueError:
x, y = data
scatter(x, y)
# Plot x=y line
lo = nmin(ravel(data))
hi = nmax(ravel(data))
datarange = hi - lo
lo -= 0.1 * datarange
hi += 0.1 * datarange
pyplot((lo, hi), (lo, hi))
# Plot options
xlabel('Observed deviates', fontsize='x-small')
ylabel('Simulated deviates', fontsize='x-small')
if report_p:
# Put p-value in legend
count = sum(s > o for o, s in zip(x, y))
text(lo + 0.1 * datarange, hi - 0.1 * datarange,
'p=%.3f' % (count / len(x)), horizontalalignment='center',
fontsize=10)
# Save to file
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
savefig("%s%s%s.%s" % (path, name, suffix, format))
|
[
"def",
"discrepancy_plot",
"(",
"data",
",",
"name",
"=",
"'discrepancy'",
",",
"report_p",
"=",
"True",
",",
"format",
"=",
"'png'",
",",
"suffix",
"=",
"'-gof'",
",",
"path",
"=",
"'./'",
",",
"fontmap",
"=",
"None",
",",
"verbose",
"=",
"1",
")",
":",
"if",
"verbose",
">",
"0",
":",
"print_",
"(",
"'Plotting'",
",",
"name",
"+",
"suffix",
")",
"if",
"fontmap",
"is",
"None",
":",
"fontmap",
"=",
"{",
"1",
":",
"10",
",",
"2",
":",
"8",
",",
"3",
":",
"6",
",",
"4",
":",
"5",
",",
"5",
":",
"4",
"}",
"# Generate new scatter plot",
"figure",
"(",
")",
"try",
":",
"x",
",",
"y",
"=",
"transpose",
"(",
"data",
")",
"except",
"ValueError",
":",
"x",
",",
"y",
"=",
"data",
"scatter",
"(",
"x",
",",
"y",
")",
"# Plot x=y line",
"lo",
"=",
"nmin",
"(",
"ravel",
"(",
"data",
")",
")",
"hi",
"=",
"nmax",
"(",
"ravel",
"(",
"data",
")",
")",
"datarange",
"=",
"hi",
"-",
"lo",
"lo",
"-=",
"0.1",
"*",
"datarange",
"hi",
"+=",
"0.1",
"*",
"datarange",
"pyplot",
"(",
"(",
"lo",
",",
"hi",
")",
",",
"(",
"lo",
",",
"hi",
")",
")",
"# Plot options",
"xlabel",
"(",
"'Observed deviates'",
",",
"fontsize",
"=",
"'x-small'",
")",
"ylabel",
"(",
"'Simulated deviates'",
",",
"fontsize",
"=",
"'x-small'",
")",
"if",
"report_p",
":",
"# Put p-value in legend",
"count",
"=",
"sum",
"(",
"s",
">",
"o",
"for",
"o",
",",
"s",
"in",
"zip",
"(",
"x",
",",
"y",
")",
")",
"text",
"(",
"lo",
"+",
"0.1",
"*",
"datarange",
",",
"hi",
"-",
"0.1",
"*",
"datarange",
",",
"'p=%.3f'",
"%",
"(",
"count",
"/",
"len",
"(",
"x",
")",
")",
",",
"horizontalalignment",
"=",
"'center'",
",",
"fontsize",
"=",
"10",
")",
"# Save to file",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"mkdir",
"(",
"path",
")",
"if",
"not",
"path",
".",
"endswith",
"(",
"'/'",
")",
":",
"path",
"+=",
"'/'",
"savefig",
"(",
"\"%s%s%s.%s\"",
"%",
"(",
"path",
",",
"name",
",",
"suffix",
",",
"format",
")",
")"
] |
Generate goodness-of-fit deviate scatter plot.
:Arguments:
data: list
List (or list of lists for vector-valued variables) of discrepancy values, output
from the `pymc.diagnostics.discrepancy` function .
name: string
The name of the plot.
report_p: bool
Flag for annotating the p-value to the plot.
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix (defaults to "-gof").
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
|
[
"Generate",
"goodness",
"-",
"of",
"-",
"fit",
"deviate",
"scatter",
"plot",
".",
":",
"Arguments",
":",
"data",
":",
"list",
"List",
"(",
"or",
"list",
"of",
"lists",
"for",
"vector",
"-",
"valued",
"variables",
")",
"of",
"discrepancy",
"values",
"output",
"from",
"the",
"pymc",
".",
"diagnostics",
".",
"discrepancy",
"function",
"."
] |
python
|
train
|
astrocatalogs/astrocats
|
astrocats/catalog/catalog.py
|
https://github.com/astrocatalogs/astrocats/blob/11abc3131c6366ecd23964369e55ff264add7805/astrocats/catalog/catalog.py#L221-L288
|
def import_data(self):
"""Run all of the import tasks.
This is executed by the 'scripts.main.py' when the module is run as an
executable. This can also be run as a method, in which case default
arguments are loaded, but can be overriden using `**kwargs`.
"""
tasks_list = self.load_task_list()
warnings.filterwarnings(
'ignore', r'Warning: converting a masked element to nan.')
# FIX
warnings.filterwarnings('ignore', category=DeprecationWarning)
# Delete all old (previously constructed) output files
if self.args.delete_old:
self.log.warning("Deleting all old entry files.")
self.delete_old_entry_files()
# In update mode, load all entry stubs.
if self.args.load_stubs or self.args.update:
self.load_stubs()
if self.args.travis:
self.log.warning("Running in `travis` mode.")
prev_priority = 0
prev_task_name = ''
# for task, task_obj in tasks_list.items():
for task_name, task_obj in tasks_list.items():
if not task_obj.active:
continue
self.log.warning("Task: '{}'".format(task_name))
nice_name = task_obj.nice_name
mod_name = task_obj.module
func_name = task_obj.function
priority = task_obj.priority
# Make sure things are running in the correct order
if priority < prev_priority and priority > 0:
raise RuntimeError("Priority for '{}': '{}', less than prev,"
"'{}': '{}'.\n{}"
.format(task_name, priority, prev_task_name,
prev_priority, task_obj))
self.log.debug("\t{}, {}, {}, {}".format(nice_name, priority,
mod_name, func_name))
mod = importlib.import_module('.' + mod_name, package='astrocats')
self.current_task = task_obj
getattr(mod, func_name)(self)
num_events, num_stubs = self.count()
self.log.warning("Task finished. Events: {}, Stubs: {}".format(
num_events, num_stubs))
self.journal_entries()
num_events, num_stubs = self.count()
self.log.warning("Journal finished. Events: {}, Stubs: {}".format(
num_events, num_stubs))
prev_priority = priority
prev_task_name = task_name
process = psutil.Process(os.getpid())
memory = process.memory_info().rss
self.log.warning('Memory used (MBs): '
'{:,}'.format(memory / 1024. / 1024.))
return
|
[
"def",
"import_data",
"(",
"self",
")",
":",
"tasks_list",
"=",
"self",
".",
"load_task_list",
"(",
")",
"warnings",
".",
"filterwarnings",
"(",
"'ignore'",
",",
"r'Warning: converting a masked element to nan.'",
")",
"# FIX",
"warnings",
".",
"filterwarnings",
"(",
"'ignore'",
",",
"category",
"=",
"DeprecationWarning",
")",
"# Delete all old (previously constructed) output files",
"if",
"self",
".",
"args",
".",
"delete_old",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Deleting all old entry files.\"",
")",
"self",
".",
"delete_old_entry_files",
"(",
")",
"# In update mode, load all entry stubs.",
"if",
"self",
".",
"args",
".",
"load_stubs",
"or",
"self",
".",
"args",
".",
"update",
":",
"self",
".",
"load_stubs",
"(",
")",
"if",
"self",
".",
"args",
".",
"travis",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Running in `travis` mode.\"",
")",
"prev_priority",
"=",
"0",
"prev_task_name",
"=",
"''",
"# for task, task_obj in tasks_list.items():",
"for",
"task_name",
",",
"task_obj",
"in",
"tasks_list",
".",
"items",
"(",
")",
":",
"if",
"not",
"task_obj",
".",
"active",
":",
"continue",
"self",
".",
"log",
".",
"warning",
"(",
"\"Task: '{}'\"",
".",
"format",
"(",
"task_name",
")",
")",
"nice_name",
"=",
"task_obj",
".",
"nice_name",
"mod_name",
"=",
"task_obj",
".",
"module",
"func_name",
"=",
"task_obj",
".",
"function",
"priority",
"=",
"task_obj",
".",
"priority",
"# Make sure things are running in the correct order",
"if",
"priority",
"<",
"prev_priority",
"and",
"priority",
">",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"Priority for '{}': '{}', less than prev,\"",
"\"'{}': '{}'.\\n{}\"",
".",
"format",
"(",
"task_name",
",",
"priority",
",",
"prev_task_name",
",",
"prev_priority",
",",
"task_obj",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"\\t{}, {}, {}, {}\"",
".",
"format",
"(",
"nice_name",
",",
"priority",
",",
"mod_name",
",",
"func_name",
")",
")",
"mod",
"=",
"importlib",
".",
"import_module",
"(",
"'.'",
"+",
"mod_name",
",",
"package",
"=",
"'astrocats'",
")",
"self",
".",
"current_task",
"=",
"task_obj",
"getattr",
"(",
"mod",
",",
"func_name",
")",
"(",
"self",
")",
"num_events",
",",
"num_stubs",
"=",
"self",
".",
"count",
"(",
")",
"self",
".",
"log",
".",
"warning",
"(",
"\"Task finished. Events: {}, Stubs: {}\"",
".",
"format",
"(",
"num_events",
",",
"num_stubs",
")",
")",
"self",
".",
"journal_entries",
"(",
")",
"num_events",
",",
"num_stubs",
"=",
"self",
".",
"count",
"(",
")",
"self",
".",
"log",
".",
"warning",
"(",
"\"Journal finished. Events: {}, Stubs: {}\"",
".",
"format",
"(",
"num_events",
",",
"num_stubs",
")",
")",
"prev_priority",
"=",
"priority",
"prev_task_name",
"=",
"task_name",
"process",
"=",
"psutil",
".",
"Process",
"(",
"os",
".",
"getpid",
"(",
")",
")",
"memory",
"=",
"process",
".",
"memory_info",
"(",
")",
".",
"rss",
"self",
".",
"log",
".",
"warning",
"(",
"'Memory used (MBs): '",
"'{:,}'",
".",
"format",
"(",
"memory",
"/",
"1024.",
"/",
"1024.",
")",
")",
"return"
] |
Run all of the import tasks.
This is executed by the 'scripts.main.py' when the module is run as an
executable. This can also be run as a method, in which case default
arguments are loaded, but can be overriden using `**kwargs`.
|
[
"Run",
"all",
"of",
"the",
"import",
"tasks",
"."
] |
python
|
train
|
zetaops/zengine
|
zengine/views/channel_management.py
|
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/channel_management.py#L220-L239
|
def move_chosen_subscribers(self):
"""
After splitting operation, only chosen subscribers
are moved to new channel or existing channel.
"""
from_channel = Channel.objects.get(self.current.task_data['chosen_channels'][0])
to_channel = Channel.objects.get(self.current.task_data['target_channel_key'])
with BlockSave(Subscriber, query_dict={'channel_id': to_channel.key}):
for subscriber in Subscriber.objects.filter(
key__in=self.current.task_data['chosen_subscribers']):
subscriber.channel = to_channel
subscriber.save()
if self.current.task_data['new_channel']:
self.copy_and_move_messages(from_channel, to_channel)
self.current.task_data[
'msg'] = _(u"Chosen subscribers and messages of them migrated from '%s' channel to "
u"'%s' channel successfully.") % (from_channel.name, to_channel.name)
|
[
"def",
"move_chosen_subscribers",
"(",
"self",
")",
":",
"from_channel",
"=",
"Channel",
".",
"objects",
".",
"get",
"(",
"self",
".",
"current",
".",
"task_data",
"[",
"'chosen_channels'",
"]",
"[",
"0",
"]",
")",
"to_channel",
"=",
"Channel",
".",
"objects",
".",
"get",
"(",
"self",
".",
"current",
".",
"task_data",
"[",
"'target_channel_key'",
"]",
")",
"with",
"BlockSave",
"(",
"Subscriber",
",",
"query_dict",
"=",
"{",
"'channel_id'",
":",
"to_channel",
".",
"key",
"}",
")",
":",
"for",
"subscriber",
"in",
"Subscriber",
".",
"objects",
".",
"filter",
"(",
"key__in",
"=",
"self",
".",
"current",
".",
"task_data",
"[",
"'chosen_subscribers'",
"]",
")",
":",
"subscriber",
".",
"channel",
"=",
"to_channel",
"subscriber",
".",
"save",
"(",
")",
"if",
"self",
".",
"current",
".",
"task_data",
"[",
"'new_channel'",
"]",
":",
"self",
".",
"copy_and_move_messages",
"(",
"from_channel",
",",
"to_channel",
")",
"self",
".",
"current",
".",
"task_data",
"[",
"'msg'",
"]",
"=",
"_",
"(",
"u\"Chosen subscribers and messages of them migrated from '%s' channel to \"",
"u\"'%s' channel successfully.\"",
")",
"%",
"(",
"from_channel",
".",
"name",
",",
"to_channel",
".",
"name",
")"
] |
After splitting operation, only chosen subscribers
are moved to new channel or existing channel.
|
[
"After",
"splitting",
"operation",
"only",
"chosen",
"subscribers",
"are",
"moved",
"to",
"new",
"channel",
"or",
"existing",
"channel",
"."
] |
python
|
train
|
RedFantom/ttkwidgets
|
ttkwidgets/timeline.py
|
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/timeline.py#L732-L735
|
def _set_scroll_v(self, *args):
"""Scroll both categories Canvas and scrolling container"""
self._canvas_categories.yview(*args)
self._canvas_scroll.yview(*args)
|
[
"def",
"_set_scroll_v",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"_canvas_categories",
".",
"yview",
"(",
"*",
"args",
")",
"self",
".",
"_canvas_scroll",
".",
"yview",
"(",
"*",
"args",
")"
] |
Scroll both categories Canvas and scrolling container
|
[
"Scroll",
"both",
"categories",
"Canvas",
"and",
"scrolling",
"container"
] |
python
|
train
|
happyleavesaoc/python-snapcast
|
snapcast/control/server.py
|
https://github.com/happyleavesaoc/python-snapcast/blob/9b3c483358677327c7fd6d0666bf474c19d87f19/snapcast/control/server.py#L239-L241
|
def _on_group_mute(self, data):
"""Handle group mute."""
self._groups.get(data.get('id')).update_mute(data)
|
[
"def",
"_on_group_mute",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_groups",
".",
"get",
"(",
"data",
".",
"get",
"(",
"'id'",
")",
")",
".",
"update_mute",
"(",
"data",
")"
] |
Handle group mute.
|
[
"Handle",
"group",
"mute",
"."
] |
python
|
train
|
MSchnei/pyprf_feature
|
pyprf_feature/analysis/old/pRF_hrfutils.py
|
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_hrfutils.py#L32-L94
|
def spm_hrf_compat(t,
peak_delay=6,
under_delay=16,
peak_disp=1,
under_disp=1,
p_u_ratio=6,
normalize=True,
):
""" SPM HRF function from sum of two gamma PDFs
This function is designed to be partially compatible with SPMs `spm_hrf.m`
function.
The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and
dispersion `peak_disp`), minus an *undershoot* gamma PDF (with location
`under_delay` and dispersion `under_disp`, and divided by the `p_u_ratio`).
Parameters
----------
t : array-like
vector of times at which to sample HRF
peak_delay : float, optional
delay of peak
peak_disp : float, optional
width (dispersion) of peak
under_delay : float, optional
delay of undershoot
under_disp : float, optional
width (dispersion) of undershoot
p_u_ratio : float, optional
peak to undershoot ratio. Undershoot divided by this value before
subtracting from peak.
normalize : {True, False}, optional
If True, divide HRF values by their sum before returning. SPM does this
by default.
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
See ``spm_hrf.m`` in the SPM distribution.
"""
if len([v for v in [peak_delay, peak_disp, under_delay, under_disp]
if v <= 0]):
raise ValueError("delays and dispersions must be > 0")
# gamma.pdf only defined for t > 0
hrf = np.zeros(t.shape, dtype=np.float)
pos_t = t[t > 0]
peak = sps.gamma.pdf(pos_t,
peak_delay / peak_disp,
loc=0,
scale=peak_disp)
undershoot = sps.gamma.pdf(pos_t,
under_delay / under_disp,
loc=0,
scale=under_disp)
hrf[t > 0] = peak - undershoot / p_u_ratio
if not normalize:
return hrf
return hrf / np.max(hrf)
|
[
"def",
"spm_hrf_compat",
"(",
"t",
",",
"peak_delay",
"=",
"6",
",",
"under_delay",
"=",
"16",
",",
"peak_disp",
"=",
"1",
",",
"under_disp",
"=",
"1",
",",
"p_u_ratio",
"=",
"6",
",",
"normalize",
"=",
"True",
",",
")",
":",
"if",
"len",
"(",
"[",
"v",
"for",
"v",
"in",
"[",
"peak_delay",
",",
"peak_disp",
",",
"under_delay",
",",
"under_disp",
"]",
"if",
"v",
"<=",
"0",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"delays and dispersions must be > 0\"",
")",
"# gamma.pdf only defined for t > 0",
"hrf",
"=",
"np",
".",
"zeros",
"(",
"t",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"pos_t",
"=",
"t",
"[",
"t",
">",
"0",
"]",
"peak",
"=",
"sps",
".",
"gamma",
".",
"pdf",
"(",
"pos_t",
",",
"peak_delay",
"/",
"peak_disp",
",",
"loc",
"=",
"0",
",",
"scale",
"=",
"peak_disp",
")",
"undershoot",
"=",
"sps",
".",
"gamma",
".",
"pdf",
"(",
"pos_t",
",",
"under_delay",
"/",
"under_disp",
",",
"loc",
"=",
"0",
",",
"scale",
"=",
"under_disp",
")",
"hrf",
"[",
"t",
">",
"0",
"]",
"=",
"peak",
"-",
"undershoot",
"/",
"p_u_ratio",
"if",
"not",
"normalize",
":",
"return",
"hrf",
"return",
"hrf",
"/",
"np",
".",
"max",
"(",
"hrf",
")"
] |
SPM HRF function from sum of two gamma PDFs
This function is designed to be partially compatible with SPMs `spm_hrf.m`
function.
The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and
dispersion `peak_disp`), minus an *undershoot* gamma PDF (with location
`under_delay` and dispersion `under_disp`, and divided by the `p_u_ratio`).
Parameters
----------
t : array-like
vector of times at which to sample HRF
peak_delay : float, optional
delay of peak
peak_disp : float, optional
width (dispersion) of peak
under_delay : float, optional
delay of undershoot
under_disp : float, optional
width (dispersion) of undershoot
p_u_ratio : float, optional
peak to undershoot ratio. Undershoot divided by this value before
subtracting from peak.
normalize : {True, False}, optional
If True, divide HRF values by their sum before returning. SPM does this
by default.
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
See ``spm_hrf.m`` in the SPM distribution.
|
[
"SPM",
"HRF",
"function",
"from",
"sum",
"of",
"two",
"gamma",
"PDFs"
] |
python
|
train
|
PmagPy/PmagPy
|
pmagpy/pmag.py
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L8933-L8981
|
def parse_site(sample, convention, Z):
"""
parse the site name from the sample name using the specified convention
"""
convention = str(convention)
site = sample # default is that site = sample
#
#
# Sample is final letter on site designation eg: TG001a (used by SIO lab
# in San Diego)
if convention == "1":
return sample[:-1] # peel off terminal character
#
# Site-Sample format eg: BG94-1 (used by PGL lab in Beijing)
#
if convention == "2":
parts = sample.strip('-').split('-')
return parts[0]
#
# Sample is XXXX.YY where XXX is site and YY is sample
#
if convention == "3":
parts = sample.split('.')
return parts[0]
#
# Sample is XXXXYYY where XXX is site desgnation and YYY is Z long integer
#
if convention == "4":
k = int(Z) - 1
return sample[0:-k] # peel off Z characters from site
if convention == "5": # sample == site
return sample
if convention == "6": # should be names in orient.txt
print("-W- Finding names in orient.txt is not currently supported")
if convention == "7": # peel off Z characters for site
k = int(Z)
return sample[0:k]
if convention == "8": # peel off Z characters for site
return ""
if convention == "9": # peel off Z characters for site
return sample
print("Error in site parsing routine")
return
|
[
"def",
"parse_site",
"(",
"sample",
",",
"convention",
",",
"Z",
")",
":",
"convention",
"=",
"str",
"(",
"convention",
")",
"site",
"=",
"sample",
"# default is that site = sample",
"#",
"#",
"# Sample is final letter on site designation eg: TG001a (used by SIO lab",
"# in San Diego)",
"if",
"convention",
"==",
"\"1\"",
":",
"return",
"sample",
"[",
":",
"-",
"1",
"]",
"# peel off terminal character",
"#",
"# Site-Sample format eg: BG94-1 (used by PGL lab in Beijing)",
"#",
"if",
"convention",
"==",
"\"2\"",
":",
"parts",
"=",
"sample",
".",
"strip",
"(",
"'-'",
")",
".",
"split",
"(",
"'-'",
")",
"return",
"parts",
"[",
"0",
"]",
"#",
"# Sample is XXXX.YY where XXX is site and YY is sample",
"#",
"if",
"convention",
"==",
"\"3\"",
":",
"parts",
"=",
"sample",
".",
"split",
"(",
"'.'",
")",
"return",
"parts",
"[",
"0",
"]",
"#",
"# Sample is XXXXYYY where XXX is site desgnation and YYY is Z long integer",
"#",
"if",
"convention",
"==",
"\"4\"",
":",
"k",
"=",
"int",
"(",
"Z",
")",
"-",
"1",
"return",
"sample",
"[",
"0",
":",
"-",
"k",
"]",
"# peel off Z characters from site",
"if",
"convention",
"==",
"\"5\"",
":",
"# sample == site",
"return",
"sample",
"if",
"convention",
"==",
"\"6\"",
":",
"# should be names in orient.txt",
"print",
"(",
"\"-W- Finding names in orient.txt is not currently supported\"",
")",
"if",
"convention",
"==",
"\"7\"",
":",
"# peel off Z characters for site",
"k",
"=",
"int",
"(",
"Z",
")",
"return",
"sample",
"[",
"0",
":",
"k",
"]",
"if",
"convention",
"==",
"\"8\"",
":",
"# peel off Z characters for site",
"return",
"\"\"",
"if",
"convention",
"==",
"\"9\"",
":",
"# peel off Z characters for site",
"return",
"sample",
"print",
"(",
"\"Error in site parsing routine\"",
")",
"return"
] |
parse the site name from the sample name using the specified convention
|
[
"parse",
"the",
"site",
"name",
"from",
"the",
"sample",
"name",
"using",
"the",
"specified",
"convention"
] |
python
|
train
|
SuperCowPowers/workbench
|
workbench/server/els_indexer.py
|
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/server/els_indexer.py#L52-L73
|
def search(self, index_name, query):
"""Search the given index_name with the given ELS query.
Args:
index_name: Name of the Index
query: The string to be searched.
Returns:
List of results.
Raises:
RuntimeError: When the search query fails.
"""
try:
results = self.els_search.search(index=index_name, body=query)
return results
except Exception, error:
error_str = 'Query failed: %s\n' % str(error)
error_str += '\nIs there a dynamic script in the query?, see www.elasticsearch.org'
print error_str
raise RuntimeError(error_str)
|
[
"def",
"search",
"(",
"self",
",",
"index_name",
",",
"query",
")",
":",
"try",
":",
"results",
"=",
"self",
".",
"els_search",
".",
"search",
"(",
"index",
"=",
"index_name",
",",
"body",
"=",
"query",
")",
"return",
"results",
"except",
"Exception",
",",
"error",
":",
"error_str",
"=",
"'Query failed: %s\\n'",
"%",
"str",
"(",
"error",
")",
"error_str",
"+=",
"'\\nIs there a dynamic script in the query?, see www.elasticsearch.org'",
"print",
"error_str",
"raise",
"RuntimeError",
"(",
"error_str",
")"
] |
Search the given index_name with the given ELS query.
Args:
index_name: Name of the Index
query: The string to be searched.
Returns:
List of results.
Raises:
RuntimeError: When the search query fails.
|
[
"Search",
"the",
"given",
"index_name",
"with",
"the",
"given",
"ELS",
"query",
"."
] |
python
|
train
|
usc-isi-i2/etk
|
etk/extraction.py
|
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extraction.py#L24-L46
|
def get_string(self, joiner: str = " ") -> str:
"""
Args:
joiner(str): if the value of an extractable is not a string, join the elements
using this string to separate them.
Returns: the value of the segment as a string, using a default method to convert
objects to strings.
"""
if not self._value:
return ""
elif isinstance(self._value, list):
return self.list2str(self._value, joiner)
elif isinstance(self._value, dict):
return self.dict2str(self._value, joiner)
elif isinstance(self.value, numbers.Number):
return str(self.value)
elif isinstance(self._value, datetime.date):
return self._value.strftime("%Y-%m-%d")
elif isinstance(self._value, datetime.datetime):
return self._value.isoformat()
else:
return self._value
|
[
"def",
"get_string",
"(",
"self",
",",
"joiner",
":",
"str",
"=",
"\" \"",
")",
"->",
"str",
":",
"if",
"not",
"self",
".",
"_value",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"self",
".",
"_value",
",",
"list",
")",
":",
"return",
"self",
".",
"list2str",
"(",
"self",
".",
"_value",
",",
"joiner",
")",
"elif",
"isinstance",
"(",
"self",
".",
"_value",
",",
"dict",
")",
":",
"return",
"self",
".",
"dict2str",
"(",
"self",
".",
"_value",
",",
"joiner",
")",
"elif",
"isinstance",
"(",
"self",
".",
"value",
",",
"numbers",
".",
"Number",
")",
":",
"return",
"str",
"(",
"self",
".",
"value",
")",
"elif",
"isinstance",
"(",
"self",
".",
"_value",
",",
"datetime",
".",
"date",
")",
":",
"return",
"self",
".",
"_value",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"elif",
"isinstance",
"(",
"self",
".",
"_value",
",",
"datetime",
".",
"datetime",
")",
":",
"return",
"self",
".",
"_value",
".",
"isoformat",
"(",
")",
"else",
":",
"return",
"self",
".",
"_value"
] |
Args:
joiner(str): if the value of an extractable is not a string, join the elements
using this string to separate them.
Returns: the value of the segment as a string, using a default method to convert
objects to strings.
|
[
"Args",
":",
"joiner",
"(",
"str",
")",
":",
"if",
"the",
"value",
"of",
"an",
"extractable",
"is",
"not",
"a",
"string",
"join",
"the",
"elements",
"using",
"this",
"string",
"to",
"separate",
"them",
"."
] |
python
|
train
|
reingart/gui2py
|
gui/controls/listview.py
|
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/controls/listview.py#L83-L89
|
def DeleteItem(self, item):
"Remove the item from the list and unset the related data"
wx_data = self.GetItemData(item)
py_data = self._py_data_map[wx_data]
del self._py_data_map[wx_data]
del self._wx_data_map[py_data]
wx.ListCtrl.DeleteItem(self, item)
|
[
"def",
"DeleteItem",
"(",
"self",
",",
"item",
")",
":",
"wx_data",
"=",
"self",
".",
"GetItemData",
"(",
"item",
")",
"py_data",
"=",
"self",
".",
"_py_data_map",
"[",
"wx_data",
"]",
"del",
"self",
".",
"_py_data_map",
"[",
"wx_data",
"]",
"del",
"self",
".",
"_wx_data_map",
"[",
"py_data",
"]",
"wx",
".",
"ListCtrl",
".",
"DeleteItem",
"(",
"self",
",",
"item",
")"
] |
Remove the item from the list and unset the related data
|
[
"Remove",
"the",
"item",
"from",
"the",
"list",
"and",
"unset",
"the",
"related",
"data"
] |
python
|
test
|
postlund/pyatv
|
pyatv/convert.py
|
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/convert.py#L6-L17
|
def media_kind(kind):
"""Convert iTunes media kind to API representation."""
if kind in [1]:
return const.MEDIA_TYPE_UNKNOWN
if kind in [3, 7, 11, 12, 13, 18, 32]:
return const.MEDIA_TYPE_VIDEO
if kind in [2, 4, 10, 14, 17, 21, 36]:
return const.MEDIA_TYPE_MUSIC
if kind in [8, 64]:
return const.MEDIA_TYPE_TV
raise exceptions.UnknownMediaKind('Unknown media kind: ' + str(kind))
|
[
"def",
"media_kind",
"(",
"kind",
")",
":",
"if",
"kind",
"in",
"[",
"1",
"]",
":",
"return",
"const",
".",
"MEDIA_TYPE_UNKNOWN",
"if",
"kind",
"in",
"[",
"3",
",",
"7",
",",
"11",
",",
"12",
",",
"13",
",",
"18",
",",
"32",
"]",
":",
"return",
"const",
".",
"MEDIA_TYPE_VIDEO",
"if",
"kind",
"in",
"[",
"2",
",",
"4",
",",
"10",
",",
"14",
",",
"17",
",",
"21",
",",
"36",
"]",
":",
"return",
"const",
".",
"MEDIA_TYPE_MUSIC",
"if",
"kind",
"in",
"[",
"8",
",",
"64",
"]",
":",
"return",
"const",
".",
"MEDIA_TYPE_TV",
"raise",
"exceptions",
".",
"UnknownMediaKind",
"(",
"'Unknown media kind: '",
"+",
"str",
"(",
"kind",
")",
")"
] |
Convert iTunes media kind to API representation.
|
[
"Convert",
"iTunes",
"media",
"kind",
"to",
"API",
"representation",
"."
] |
python
|
train
|
mikedh/trimesh
|
trimesh/curvature.py
|
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/curvature.py#L78-L118
|
def discrete_mean_curvature_measure(mesh, points, radius):
"""
Return the discrete mean curvature measure of a sphere centered
at a point as detailed in 'Restricted Delaunay triangulations and normal
cycle', Cohen-Steiner and Morvan.
Parameters
----------
points : (n,3) float, list of points in space
radius : float, the sphere radius
Returns
--------
mean_curvature: (n,) float, discrete mean curvature measure.
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# axis aligned bounds
bounds = np.column_stack((points - radius,
points + radius))
# line segments that intersect axis aligned bounding box
candidates = [list(mesh.face_adjacency_tree.intersection(b))
for b in bounds]
mean_curv = np.empty(len(points))
for i, (x, x_candidates) in enumerate(zip(points, candidates)):
endpoints = mesh.vertices[mesh.face_adjacency_edges[x_candidates]]
lengths = line_ball_intersection(
endpoints[:, 0],
endpoints[:, 1],
center=x,
radius=radius)
angles = mesh.face_adjacency_angles[x_candidates]
signs = np.where(mesh.face_adjacency_convex[x_candidates], 1, -1)
mean_curv[i] = (lengths * angles * signs).sum() / 2
return mean_curv
|
[
"def",
"discrete_mean_curvature_measure",
"(",
"mesh",
",",
"points",
",",
"radius",
")",
":",
"points",
"=",
"np",
".",
"asanyarray",
"(",
"points",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"if",
"not",
"util",
".",
"is_shape",
"(",
"points",
",",
"(",
"-",
"1",
",",
"3",
")",
")",
":",
"raise",
"ValueError",
"(",
"'points must be (n,3)!'",
")",
"# axis aligned bounds",
"bounds",
"=",
"np",
".",
"column_stack",
"(",
"(",
"points",
"-",
"radius",
",",
"points",
"+",
"radius",
")",
")",
"# line segments that intersect axis aligned bounding box",
"candidates",
"=",
"[",
"list",
"(",
"mesh",
".",
"face_adjacency_tree",
".",
"intersection",
"(",
"b",
")",
")",
"for",
"b",
"in",
"bounds",
"]",
"mean_curv",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"points",
")",
")",
"for",
"i",
",",
"(",
"x",
",",
"x_candidates",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"points",
",",
"candidates",
")",
")",
":",
"endpoints",
"=",
"mesh",
".",
"vertices",
"[",
"mesh",
".",
"face_adjacency_edges",
"[",
"x_candidates",
"]",
"]",
"lengths",
"=",
"line_ball_intersection",
"(",
"endpoints",
"[",
":",
",",
"0",
"]",
",",
"endpoints",
"[",
":",
",",
"1",
"]",
",",
"center",
"=",
"x",
",",
"radius",
"=",
"radius",
")",
"angles",
"=",
"mesh",
".",
"face_adjacency_angles",
"[",
"x_candidates",
"]",
"signs",
"=",
"np",
".",
"where",
"(",
"mesh",
".",
"face_adjacency_convex",
"[",
"x_candidates",
"]",
",",
"1",
",",
"-",
"1",
")",
"mean_curv",
"[",
"i",
"]",
"=",
"(",
"lengths",
"*",
"angles",
"*",
"signs",
")",
".",
"sum",
"(",
")",
"/",
"2",
"return",
"mean_curv"
] |
Return the discrete mean curvature measure of a sphere centered
at a point as detailed in 'Restricted Delaunay triangulations and normal
cycle', Cohen-Steiner and Morvan.
Parameters
----------
points : (n,3) float, list of points in space
radius : float, the sphere radius
Returns
--------
mean_curvature: (n,) float, discrete mean curvature measure.
|
[
"Return",
"the",
"discrete",
"mean",
"curvature",
"measure",
"of",
"a",
"sphere",
"centered",
"at",
"a",
"point",
"as",
"detailed",
"in",
"Restricted",
"Delaunay",
"triangulations",
"and",
"normal",
"cycle",
"Cohen",
"-",
"Steiner",
"and",
"Morvan",
"."
] |
python
|
train
|
ambitioninc/rabbitmq-admin
|
rabbitmq_admin/base.py
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L75-L85
|
def _put(self, *args, **kwargs):
"""
A wrapper for putting things. It will also json encode your 'data' parameter
:returns: The response of your put
:rtype: dict
"""
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data'])
response = requests.put(*args, **kwargs)
response.raise_for_status()
|
[
"def",
"_put",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'data'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'data'",
"]",
"=",
"json",
".",
"dumps",
"(",
"kwargs",
"[",
"'data'",
"]",
")",
"response",
"=",
"requests",
".",
"put",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"response",
".",
"raise_for_status",
"(",
")"
] |
A wrapper for putting things. It will also json encode your 'data' parameter
:returns: The response of your put
:rtype: dict
|
[
"A",
"wrapper",
"for",
"putting",
"things",
".",
"It",
"will",
"also",
"json",
"encode",
"your",
"data",
"parameter"
] |
python
|
train
|
yunojuno-archive/django-inbound-email
|
inbound_email/backends/sendgrid.py
|
https://github.com/yunojuno-archive/django-inbound-email/blob/c0c1186fc2ced56b43d6b223e73cd5e8700dfc48/inbound_email/backends/sendgrid.py#L78-L146
|
def parse(self, request):
"""Parse incoming request and return an email instance.
Args:
request: an HttpRequest object, containing the forwarded email, as
per the SendGrid specification for inbound emails.
Returns:
an EmailMultiAlternatives instance, containing the parsed contents
of the inbound email.
TODO: non-UTF8 charset handling.
TODO: handler headers.
"""
assert isinstance(request, HttpRequest), "Invalid request type: %s" % type(request)
try:
# from_email should never be a list (unless we change our API)
from_email = self._get_addresses([_decode_POST_value(request, 'from')])[0]
# ...but all these can and will be a list
to_email = self._get_addresses([_decode_POST_value(request, 'to')])
cc = self._get_addresses([_decode_POST_value(request, 'cc', default='')])
bcc = self._get_addresses([_decode_POST_value(request, 'bcc', default='')])
subject = _decode_POST_value(request, 'subject')
text = _decode_POST_value(request, 'text', default='')
html = _decode_POST_value(request, 'html', default='')
except IndexError as ex:
raise RequestParseError(
"Inbound request lacks a valid from address: %s." % request.get('from')
)
except MultiValueDictKeyError as ex:
raise RequestParseError("Inbound request is missing required value: %s." % ex)
if "@" not in from_email:
# Light sanity check for potential issues related to taking just the
# first element of the 'from' address list
raise RequestParseError("Could not get a valid from address out of: %s." % request)
email = EmailMultiAlternatives(
subject=subject,
body=text,
from_email=from_email,
to=to_email,
cc=cc,
bcc=bcc,
)
if html is not None and len(html) > 0:
email.attach_alternative(html, "text/html")
# TODO: this won't cope with big files - should really read in in chunks
for n, f in list(request.FILES.items()):
if f.size > self.max_file_size:
logger.debug(
"File attachment %s is too large to process (%sB)",
f.name,
f.size
)
raise AttachmentTooLargeError(
email=email,
filename=f.name,
size=f.size
)
else:
email.attach(f.name, f.read(), f.content_type)
return email
|
[
"def",
"parse",
"(",
"self",
",",
"request",
")",
":",
"assert",
"isinstance",
"(",
"request",
",",
"HttpRequest",
")",
",",
"\"Invalid request type: %s\"",
"%",
"type",
"(",
"request",
")",
"try",
":",
"# from_email should never be a list (unless we change our API)",
"from_email",
"=",
"self",
".",
"_get_addresses",
"(",
"[",
"_decode_POST_value",
"(",
"request",
",",
"'from'",
")",
"]",
")",
"[",
"0",
"]",
"# ...but all these can and will be a list",
"to_email",
"=",
"self",
".",
"_get_addresses",
"(",
"[",
"_decode_POST_value",
"(",
"request",
",",
"'to'",
")",
"]",
")",
"cc",
"=",
"self",
".",
"_get_addresses",
"(",
"[",
"_decode_POST_value",
"(",
"request",
",",
"'cc'",
",",
"default",
"=",
"''",
")",
"]",
")",
"bcc",
"=",
"self",
".",
"_get_addresses",
"(",
"[",
"_decode_POST_value",
"(",
"request",
",",
"'bcc'",
",",
"default",
"=",
"''",
")",
"]",
")",
"subject",
"=",
"_decode_POST_value",
"(",
"request",
",",
"'subject'",
")",
"text",
"=",
"_decode_POST_value",
"(",
"request",
",",
"'text'",
",",
"default",
"=",
"''",
")",
"html",
"=",
"_decode_POST_value",
"(",
"request",
",",
"'html'",
",",
"default",
"=",
"''",
")",
"except",
"IndexError",
"as",
"ex",
":",
"raise",
"RequestParseError",
"(",
"\"Inbound request lacks a valid from address: %s.\"",
"%",
"request",
".",
"get",
"(",
"'from'",
")",
")",
"except",
"MultiValueDictKeyError",
"as",
"ex",
":",
"raise",
"RequestParseError",
"(",
"\"Inbound request is missing required value: %s.\"",
"%",
"ex",
")",
"if",
"\"@\"",
"not",
"in",
"from_email",
":",
"# Light sanity check for potential issues related to taking just the",
"# first element of the 'from' address list",
"raise",
"RequestParseError",
"(",
"\"Could not get a valid from address out of: %s.\"",
"%",
"request",
")",
"email",
"=",
"EmailMultiAlternatives",
"(",
"subject",
"=",
"subject",
",",
"body",
"=",
"text",
",",
"from_email",
"=",
"from_email",
",",
"to",
"=",
"to_email",
",",
"cc",
"=",
"cc",
",",
"bcc",
"=",
"bcc",
",",
")",
"if",
"html",
"is",
"not",
"None",
"and",
"len",
"(",
"html",
")",
">",
"0",
":",
"email",
".",
"attach_alternative",
"(",
"html",
",",
"\"text/html\"",
")",
"# TODO: this won't cope with big files - should really read in in chunks",
"for",
"n",
",",
"f",
"in",
"list",
"(",
"request",
".",
"FILES",
".",
"items",
"(",
")",
")",
":",
"if",
"f",
".",
"size",
">",
"self",
".",
"max_file_size",
":",
"logger",
".",
"debug",
"(",
"\"File attachment %s is too large to process (%sB)\"",
",",
"f",
".",
"name",
",",
"f",
".",
"size",
")",
"raise",
"AttachmentTooLargeError",
"(",
"email",
"=",
"email",
",",
"filename",
"=",
"f",
".",
"name",
",",
"size",
"=",
"f",
".",
"size",
")",
"else",
":",
"email",
".",
"attach",
"(",
"f",
".",
"name",
",",
"f",
".",
"read",
"(",
")",
",",
"f",
".",
"content_type",
")",
"return",
"email"
] |
Parse incoming request and return an email instance.
Args:
request: an HttpRequest object, containing the forwarded email, as
per the SendGrid specification for inbound emails.
Returns:
an EmailMultiAlternatives instance, containing the parsed contents
of the inbound email.
TODO: non-UTF8 charset handling.
TODO: handler headers.
|
[
"Parse",
"incoming",
"request",
"and",
"return",
"an",
"email",
"instance",
"."
] |
python
|
train
|
edx/i18n-tools
|
i18n/generate.py
|
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/generate.py#L138-L150
|
def validate_files(directory, files_to_merge):
"""
Asserts that the given files exist.
files_to_merge is a list of file names (no directories).
directory is the directory (a path object from path.py) in which the files should appear.
raises an Exception if any of the files are not in dir.
"""
for file_path in files_to_merge:
pathname = directory.joinpath(file_path)
if not pathname.exists():
raise Exception("I18N: Cannot generate because file not found: {0}".format(pathname))
# clean sources
clean_pofile(pathname)
|
[
"def",
"validate_files",
"(",
"directory",
",",
"files_to_merge",
")",
":",
"for",
"file_path",
"in",
"files_to_merge",
":",
"pathname",
"=",
"directory",
".",
"joinpath",
"(",
"file_path",
")",
"if",
"not",
"pathname",
".",
"exists",
"(",
")",
":",
"raise",
"Exception",
"(",
"\"I18N: Cannot generate because file not found: {0}\"",
".",
"format",
"(",
"pathname",
")",
")",
"# clean sources",
"clean_pofile",
"(",
"pathname",
")"
] |
Asserts that the given files exist.
files_to_merge is a list of file names (no directories).
directory is the directory (a path object from path.py) in which the files should appear.
raises an Exception if any of the files are not in dir.
|
[
"Asserts",
"that",
"the",
"given",
"files",
"exist",
".",
"files_to_merge",
"is",
"a",
"list",
"of",
"file",
"names",
"(",
"no",
"directories",
")",
".",
"directory",
"is",
"the",
"directory",
"(",
"a",
"path",
"object",
"from",
"path",
".",
"py",
")",
"in",
"which",
"the",
"files",
"should",
"appear",
".",
"raises",
"an",
"Exception",
"if",
"any",
"of",
"the",
"files",
"are",
"not",
"in",
"dir",
"."
] |
python
|
train
|
ssalentin/plip
|
plip/modules/preparation.py
|
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/preparation.py#L1442-L1444
|
def extract_bs(self, cutoff, ligcentroid, resis):
"""Return list of ids from residues belonging to the binding site"""
return [obres.GetIdx() for obres in resis if self.res_belongs_to_bs(obres, cutoff, ligcentroid)]
|
[
"def",
"extract_bs",
"(",
"self",
",",
"cutoff",
",",
"ligcentroid",
",",
"resis",
")",
":",
"return",
"[",
"obres",
".",
"GetIdx",
"(",
")",
"for",
"obres",
"in",
"resis",
"if",
"self",
".",
"res_belongs_to_bs",
"(",
"obres",
",",
"cutoff",
",",
"ligcentroid",
")",
"]"
] |
Return list of ids from residues belonging to the binding site
|
[
"Return",
"list",
"of",
"ids",
"from",
"residues",
"belonging",
"to",
"the",
"binding",
"site"
] |
python
|
train
|
svinota/mdns
|
mdns/zeroconf.py
|
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1868-L1900
|
def check_service(self, info):
"""Checks the network for a unique service name, modifying the
ServiceInfo passed in if it is not unique."""
now = current_time_millis()
next_time = now
i = 0
while i < 3:
for record in self.cache.entries_with_name(info.type):
if record.type == _TYPE_PTR and \
not record.is_expired(now) and \
record.alias == info.name:
if (info.name.find('.') < 0):
info.name = info.name + ".[" + \
info.address + \
":" + info.port + \
"]." + info.type
self.check_service(info)
return
raise NonUniqueNameException
if now < next_time:
self.wait(next_time - now)
now = current_time_millis()
continue
out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
self.debug = out
out.add_question(
DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
out.add_authorative_answer(
DNSPointer(info.type,
_TYPE_PTR, _CLASS_IN, info.ttl, info.name))
self.send(out)
i += 1
next_time += _CHECK_TIME
|
[
"def",
"check_service",
"(",
"self",
",",
"info",
")",
":",
"now",
"=",
"current_time_millis",
"(",
")",
"next_time",
"=",
"now",
"i",
"=",
"0",
"while",
"i",
"<",
"3",
":",
"for",
"record",
"in",
"self",
".",
"cache",
".",
"entries_with_name",
"(",
"info",
".",
"type",
")",
":",
"if",
"record",
".",
"type",
"==",
"_TYPE_PTR",
"and",
"not",
"record",
".",
"is_expired",
"(",
"now",
")",
"and",
"record",
".",
"alias",
"==",
"info",
".",
"name",
":",
"if",
"(",
"info",
".",
"name",
".",
"find",
"(",
"'.'",
")",
"<",
"0",
")",
":",
"info",
".",
"name",
"=",
"info",
".",
"name",
"+",
"\".[\"",
"+",
"info",
".",
"address",
"+",
"\":\"",
"+",
"info",
".",
"port",
"+",
"\"].\"",
"+",
"info",
".",
"type",
"self",
".",
"check_service",
"(",
"info",
")",
"return",
"raise",
"NonUniqueNameException",
"if",
"now",
"<",
"next_time",
":",
"self",
".",
"wait",
"(",
"next_time",
"-",
"now",
")",
"now",
"=",
"current_time_millis",
"(",
")",
"continue",
"out",
"=",
"DNSOutgoing",
"(",
"_FLAGS_QR_QUERY",
"|",
"_FLAGS_AA",
")",
"self",
".",
"debug",
"=",
"out",
"out",
".",
"add_question",
"(",
"DNSQuestion",
"(",
"info",
".",
"type",
",",
"_TYPE_PTR",
",",
"_CLASS_IN",
")",
")",
"out",
".",
"add_authorative_answer",
"(",
"DNSPointer",
"(",
"info",
".",
"type",
",",
"_TYPE_PTR",
",",
"_CLASS_IN",
",",
"info",
".",
"ttl",
",",
"info",
".",
"name",
")",
")",
"self",
".",
"send",
"(",
"out",
")",
"i",
"+=",
"1",
"next_time",
"+=",
"_CHECK_TIME"
] |
Checks the network for a unique service name, modifying the
ServiceInfo passed in if it is not unique.
|
[
"Checks",
"the",
"network",
"for",
"a",
"unique",
"service",
"name",
"modifying",
"the",
"ServiceInfo",
"passed",
"in",
"if",
"it",
"is",
"not",
"unique",
"."
] |
python
|
train
|
poppy-project/pypot
|
pypot/vrep/remoteApiBindings/vrep.py
|
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L1254-L1290
|
def simxQuery(clientID, signalName, signalValue, retSignalName, timeOutInMs):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
retSignalLength = ct.c_int();
retSignalValue = ct.POINTER(ct.c_ubyte)()
sigV=signalValue
if sys.version_info[0] == 3:
if type(signalName) is str:
signalName=signalName.encode('utf-8')
if type(retSignalName) is str:
retSignalName=retSignalName.encode('utf-8')
if type(signalValue) is bytearray:
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
if type(signalValue) is str:
signalValue=signalValue.encode('utf-8')
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
else:
if type(signalValue) is bytearray:
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
if type(signalValue) is str:
signalValue=bytearray(signalValue)
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
sigV=ct.cast(sigV,ct.POINTER(ct.c_ubyte)) # IronPython needs this
ret = c_Query(clientID, signalName, sigV, len(signalValue), retSignalName, ct.byref(retSignalValue), ct.byref(retSignalLength), timeOutInMs)
a = bytearray()
if ret == 0:
for i in range(retSignalLength.value):
a.append(retSignalValue[i])
if sys.version_info[0] != 3:
a=str(a)
return ret, a
|
[
"def",
"simxQuery",
"(",
"clientID",
",",
"signalName",
",",
"signalValue",
",",
"retSignalName",
",",
"timeOutInMs",
")",
":",
"retSignalLength",
"=",
"ct",
".",
"c_int",
"(",
")",
"retSignalValue",
"=",
"ct",
".",
"POINTER",
"(",
"ct",
".",
"c_ubyte",
")",
"(",
")",
"sigV",
"=",
"signalValue",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"3",
":",
"if",
"type",
"(",
"signalName",
")",
"is",
"str",
":",
"signalName",
"=",
"signalName",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"type",
"(",
"retSignalName",
")",
"is",
"str",
":",
"retSignalName",
"=",
"retSignalName",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"type",
"(",
"signalValue",
")",
"is",
"bytearray",
":",
"sigV",
"=",
"(",
"ct",
".",
"c_ubyte",
"*",
"len",
"(",
"signalValue",
")",
")",
"(",
"*",
"signalValue",
")",
"if",
"type",
"(",
"signalValue",
")",
"is",
"str",
":",
"signalValue",
"=",
"signalValue",
".",
"encode",
"(",
"'utf-8'",
")",
"sigV",
"=",
"(",
"ct",
".",
"c_ubyte",
"*",
"len",
"(",
"signalValue",
")",
")",
"(",
"*",
"signalValue",
")",
"else",
":",
"if",
"type",
"(",
"signalValue",
")",
"is",
"bytearray",
":",
"sigV",
"=",
"(",
"ct",
".",
"c_ubyte",
"*",
"len",
"(",
"signalValue",
")",
")",
"(",
"*",
"signalValue",
")",
"if",
"type",
"(",
"signalValue",
")",
"is",
"str",
":",
"signalValue",
"=",
"bytearray",
"(",
"signalValue",
")",
"sigV",
"=",
"(",
"ct",
".",
"c_ubyte",
"*",
"len",
"(",
"signalValue",
")",
")",
"(",
"*",
"signalValue",
")",
"sigV",
"=",
"ct",
".",
"cast",
"(",
"sigV",
",",
"ct",
".",
"POINTER",
"(",
"ct",
".",
"c_ubyte",
")",
")",
"# IronPython needs this",
"ret",
"=",
"c_Query",
"(",
"clientID",
",",
"signalName",
",",
"sigV",
",",
"len",
"(",
"signalValue",
")",
",",
"retSignalName",
",",
"ct",
".",
"byref",
"(",
"retSignalValue",
")",
",",
"ct",
".",
"byref",
"(",
"retSignalLength",
")",
",",
"timeOutInMs",
")",
"a",
"=",
"bytearray",
"(",
")",
"if",
"ret",
"==",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"retSignalLength",
".",
"value",
")",
":",
"a",
".",
"append",
"(",
"retSignalValue",
"[",
"i",
"]",
")",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"!=",
"3",
":",
"a",
"=",
"str",
"(",
"a",
")",
"return",
"ret",
",",
"a"
] |
Please have a look at the function description/documentation in the V-REP user manual
|
[
"Please",
"have",
"a",
"look",
"at",
"the",
"function",
"description",
"/",
"documentation",
"in",
"the",
"V",
"-",
"REP",
"user",
"manual"
] |
python
|
train
|
santosjorge/cufflinks
|
cufflinks/date_tools.py
|
https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/date_tools.py#L9-L18
|
def getDateFromToday(delta,strfmt='%Y%m%d'):
""" Returns a string that represents a date n numbers of days from today.
Parameters:
-----------
delta : int
number of days
strfmt : string
format in which the date will be represented
"""
return (dt.date.today() + dt.timedelta(delta)).strftime(strfmt)
|
[
"def",
"getDateFromToday",
"(",
"delta",
",",
"strfmt",
"=",
"'%Y%m%d'",
")",
":",
"return",
"(",
"dt",
".",
"date",
".",
"today",
"(",
")",
"+",
"dt",
".",
"timedelta",
"(",
"delta",
")",
")",
".",
"strftime",
"(",
"strfmt",
")"
] |
Returns a string that represents a date n numbers of days from today.
Parameters:
-----------
delta : int
number of days
strfmt : string
format in which the date will be represented
|
[
"Returns",
"a",
"string",
"that",
"represents",
"a",
"date",
"n",
"numbers",
"of",
"days",
"from",
"today",
".",
"Parameters",
":",
"-----------",
"delta",
":",
"int",
"number",
"of",
"days",
"strfmt",
":",
"string",
"format",
"in",
"which",
"the",
"date",
"will",
"be",
"represented"
] |
python
|
train
|
jonathf/chaospy
|
chaospy/descriptives/sensitivity/total.py
|
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/descriptives/sensitivity/total.py#L8-L47
|
def Sens_t(poly, dist, **kws):
"""
Variance-based decomposition
AKA Sobol' indices
Total effect sensitivity index
Args:
poly (Poly):
Polynomial to find first order Sobol indices on.
dist (Dist):
The distributions of the input used in ``poly``.
Returns:
(numpy.ndarray) :
First order sensitivity indices for each parameters in ``poly``,
with shape ``(len(dist),) + poly.shape``.
Examples:
>>> x, y = chaospy.variable(2)
>>> poly = chaospy.Poly([1, x, y, 10*x*y])
>>> dist = chaospy.Iid(chaospy.Uniform(0, 1), 2)
>>> indices = chaospy.Sens_t(poly, dist)
>>> print(indices)
[[0. 1. 0. 0.57142857]
[0. 0. 1. 0.57142857]]
"""
dim = len(dist)
if poly.dim < dim:
poly = chaospy.poly.setdim(poly, len(dist))
zero = [1]*dim
out = numpy.zeros((dim,) + poly.shape, dtype=float)
V = Var(poly, dist, **kws)
for i in range(dim):
zero[i] = 0
out[i] = ((V-Var(E_cond(poly, zero, dist, **kws), dist, **kws)) /
(V+(V == 0))**(V!=0))
zero[i] = 1
return out
|
[
"def",
"Sens_t",
"(",
"poly",
",",
"dist",
",",
"*",
"*",
"kws",
")",
":",
"dim",
"=",
"len",
"(",
"dist",
")",
"if",
"poly",
".",
"dim",
"<",
"dim",
":",
"poly",
"=",
"chaospy",
".",
"poly",
".",
"setdim",
"(",
"poly",
",",
"len",
"(",
"dist",
")",
")",
"zero",
"=",
"[",
"1",
"]",
"*",
"dim",
"out",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"dim",
",",
")",
"+",
"poly",
".",
"shape",
",",
"dtype",
"=",
"float",
")",
"V",
"=",
"Var",
"(",
"poly",
",",
"dist",
",",
"*",
"*",
"kws",
")",
"for",
"i",
"in",
"range",
"(",
"dim",
")",
":",
"zero",
"[",
"i",
"]",
"=",
"0",
"out",
"[",
"i",
"]",
"=",
"(",
"(",
"V",
"-",
"Var",
"(",
"E_cond",
"(",
"poly",
",",
"zero",
",",
"dist",
",",
"*",
"*",
"kws",
")",
",",
"dist",
",",
"*",
"*",
"kws",
")",
")",
"/",
"(",
"V",
"+",
"(",
"V",
"==",
"0",
")",
")",
"**",
"(",
"V",
"!=",
"0",
")",
")",
"zero",
"[",
"i",
"]",
"=",
"1",
"return",
"out"
] |
Variance-based decomposition
AKA Sobol' indices
Total effect sensitivity index
Args:
poly (Poly):
Polynomial to find first order Sobol indices on.
dist (Dist):
The distributions of the input used in ``poly``.
Returns:
(numpy.ndarray) :
First order sensitivity indices for each parameters in ``poly``,
with shape ``(len(dist),) + poly.shape``.
Examples:
>>> x, y = chaospy.variable(2)
>>> poly = chaospy.Poly([1, x, y, 10*x*y])
>>> dist = chaospy.Iid(chaospy.Uniform(0, 1), 2)
>>> indices = chaospy.Sens_t(poly, dist)
>>> print(indices)
[[0. 1. 0. 0.57142857]
[0. 0. 1. 0.57142857]]
|
[
"Variance",
"-",
"based",
"decomposition",
"AKA",
"Sobol",
"indices"
] |
python
|
train
|
opendatateam/udata
|
udata/harvest/commands.py
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L129-L132
|
def unschedule(identifier):
'''Unschedule a periodical harvest job'''
source = actions.unschedule(identifier)
log.info('Unscheduled harvest source "%s"', source.name)
|
[
"def",
"unschedule",
"(",
"identifier",
")",
":",
"source",
"=",
"actions",
".",
"unschedule",
"(",
"identifier",
")",
"log",
".",
"info",
"(",
"'Unscheduled harvest source \"%s\"'",
",",
"source",
".",
"name",
")"
] |
Unschedule a periodical harvest job
|
[
"Unschedule",
"a",
"periodical",
"harvest",
"job"
] |
python
|
train
|
amcat/nlpipe
|
nlpipe/modules/corenlp.py
|
https://github.com/amcat/nlpipe/blob/e9dcf0214d5dc6ba3900b8d7359909e1e33f1ce7/nlpipe/modules/corenlp.py#L163-L205
|
def corenlp2naf(xml_bytes, annotators):
"""
Call from on the text and return a Naf object
"""
naf = KafNafParser(type="NAF")
try:
doc = Document(xml_bytes)
except:
log.exception("Error on parsing xml")
raise
terms = {} # (xml_sentid, xml_tokenid) : term
for sent in doc.sentences:
for t in sent.tokens:
wf = naf.create_wf(t.word, sent.id, t.character_offset_begin)
term = naf.create_term(t.lemma, POSMAP[t.pos], t.pos, [wf])
terms[sent.id, t.id] = term
if t.ner not in (None, 'O'):
naf.create_entity(t.ner, [term.get_id()])
if sent.collapsed_ccprocessed_dependencies:
dependencies = True
for dep in sent.collapsed_ccprocessed_dependencies.links:
if dep.type != 'root':
child = terms[sent.id, dep.dependent.idx]
parent = terms[sent.id, dep.governor.idx]
comment = "{t}({o}, {s})".format(s=child.get_lemma(), t=dep.type, o=parent.get_lemma())
naf.create_dependency(child.get_id(), parent.get_id(), dep.type, comment=comment)
if doc.coreferences:
for coref in doc.coreferences:
cterms = set()
for m in coref.mentions:
cterms |= {terms[m.sentence.id, t.id].get_id() for t in m.tokens}
naf.create_coreference("term", cterms)
for annotator in annotators:
if annotator in LAYERMAP:
naf.create_linguistic_processor(LAYERMAP[annotator], "CoreNLP {annotator}".format(**locals()),
get_corenlp_version())
s = BytesIO()
naf.dump(s)
return s.getvalue()
|
[
"def",
"corenlp2naf",
"(",
"xml_bytes",
",",
"annotators",
")",
":",
"naf",
"=",
"KafNafParser",
"(",
"type",
"=",
"\"NAF\"",
")",
"try",
":",
"doc",
"=",
"Document",
"(",
"xml_bytes",
")",
"except",
":",
"log",
".",
"exception",
"(",
"\"Error on parsing xml\"",
")",
"raise",
"terms",
"=",
"{",
"}",
"# (xml_sentid, xml_tokenid) : term",
"for",
"sent",
"in",
"doc",
".",
"sentences",
":",
"for",
"t",
"in",
"sent",
".",
"tokens",
":",
"wf",
"=",
"naf",
".",
"create_wf",
"(",
"t",
".",
"word",
",",
"sent",
".",
"id",
",",
"t",
".",
"character_offset_begin",
")",
"term",
"=",
"naf",
".",
"create_term",
"(",
"t",
".",
"lemma",
",",
"POSMAP",
"[",
"t",
".",
"pos",
"]",
",",
"t",
".",
"pos",
",",
"[",
"wf",
"]",
")",
"terms",
"[",
"sent",
".",
"id",
",",
"t",
".",
"id",
"]",
"=",
"term",
"if",
"t",
".",
"ner",
"not",
"in",
"(",
"None",
",",
"'O'",
")",
":",
"naf",
".",
"create_entity",
"(",
"t",
".",
"ner",
",",
"[",
"term",
".",
"get_id",
"(",
")",
"]",
")",
"if",
"sent",
".",
"collapsed_ccprocessed_dependencies",
":",
"dependencies",
"=",
"True",
"for",
"dep",
"in",
"sent",
".",
"collapsed_ccprocessed_dependencies",
".",
"links",
":",
"if",
"dep",
".",
"type",
"!=",
"'root'",
":",
"child",
"=",
"terms",
"[",
"sent",
".",
"id",
",",
"dep",
".",
"dependent",
".",
"idx",
"]",
"parent",
"=",
"terms",
"[",
"sent",
".",
"id",
",",
"dep",
".",
"governor",
".",
"idx",
"]",
"comment",
"=",
"\"{t}({o}, {s})\"",
".",
"format",
"(",
"s",
"=",
"child",
".",
"get_lemma",
"(",
")",
",",
"t",
"=",
"dep",
".",
"type",
",",
"o",
"=",
"parent",
".",
"get_lemma",
"(",
")",
")",
"naf",
".",
"create_dependency",
"(",
"child",
".",
"get_id",
"(",
")",
",",
"parent",
".",
"get_id",
"(",
")",
",",
"dep",
".",
"type",
",",
"comment",
"=",
"comment",
")",
"if",
"doc",
".",
"coreferences",
":",
"for",
"coref",
"in",
"doc",
".",
"coreferences",
":",
"cterms",
"=",
"set",
"(",
")",
"for",
"m",
"in",
"coref",
".",
"mentions",
":",
"cterms",
"|=",
"{",
"terms",
"[",
"m",
".",
"sentence",
".",
"id",
",",
"t",
".",
"id",
"]",
".",
"get_id",
"(",
")",
"for",
"t",
"in",
"m",
".",
"tokens",
"}",
"naf",
".",
"create_coreference",
"(",
"\"term\"",
",",
"cterms",
")",
"for",
"annotator",
"in",
"annotators",
":",
"if",
"annotator",
"in",
"LAYERMAP",
":",
"naf",
".",
"create_linguistic_processor",
"(",
"LAYERMAP",
"[",
"annotator",
"]",
",",
"\"CoreNLP {annotator}\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
",",
"get_corenlp_version",
"(",
")",
")",
"s",
"=",
"BytesIO",
"(",
")",
"naf",
".",
"dump",
"(",
"s",
")",
"return",
"s",
".",
"getvalue",
"(",
")"
] |
Call from on the text and return a Naf object
|
[
"Call",
"from",
"on",
"the",
"text",
"and",
"return",
"a",
"Naf",
"object"
] |
python
|
train
|
agoragames/haigha
|
haigha/channel_pool.py
|
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel_pool.py#L74-L80
|
def _process_queue(self):
'''
If there are any message in the queue, process one of them.
'''
if len(self._queue):
args, kwargs = self._queue.popleft()
self.publish(*args, **kwargs)
|
[
"def",
"_process_queue",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"_queue",
")",
":",
"args",
",",
"kwargs",
"=",
"self",
".",
"_queue",
".",
"popleft",
"(",
")",
"self",
".",
"publish",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
If there are any message in the queue, process one of them.
|
[
"If",
"there",
"are",
"any",
"message",
"in",
"the",
"queue",
"process",
"one",
"of",
"them",
"."
] |
python
|
train
|
pypa/pipenv
|
pipenv/utils.py
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/utils.py#L1127-L1149
|
def convert_deps_to_pip(deps, project=None, r=True, include_index=True):
""""Converts a Pipfile-formatted dependency to a pip-formatted one."""
from .vendor.requirementslib.models.requirements import Requirement
dependencies = []
for dep_name, dep in deps.items():
if project:
project.clear_pipfile_cache()
indexes = getattr(project, "pipfile_sources", []) if project is not None else []
new_dep = Requirement.from_pipfile(dep_name, dep)
if new_dep.index:
include_index = True
req = new_dep.as_line(sources=indexes if include_index else None).strip()
dependencies.append(req)
if not r:
return dependencies
# Write requirements.txt to tmp directory.
from .vendor.vistir.path import create_tracked_tempfile
f = create_tracked_tempfile(suffix="-requirements.txt", delete=False)
f.write("\n".join(dependencies).encode("utf-8"))
f.close()
return f.name
|
[
"def",
"convert_deps_to_pip",
"(",
"deps",
",",
"project",
"=",
"None",
",",
"r",
"=",
"True",
",",
"include_index",
"=",
"True",
")",
":",
"from",
".",
"vendor",
".",
"requirementslib",
".",
"models",
".",
"requirements",
"import",
"Requirement",
"dependencies",
"=",
"[",
"]",
"for",
"dep_name",
",",
"dep",
"in",
"deps",
".",
"items",
"(",
")",
":",
"if",
"project",
":",
"project",
".",
"clear_pipfile_cache",
"(",
")",
"indexes",
"=",
"getattr",
"(",
"project",
",",
"\"pipfile_sources\"",
",",
"[",
"]",
")",
"if",
"project",
"is",
"not",
"None",
"else",
"[",
"]",
"new_dep",
"=",
"Requirement",
".",
"from_pipfile",
"(",
"dep_name",
",",
"dep",
")",
"if",
"new_dep",
".",
"index",
":",
"include_index",
"=",
"True",
"req",
"=",
"new_dep",
".",
"as_line",
"(",
"sources",
"=",
"indexes",
"if",
"include_index",
"else",
"None",
")",
".",
"strip",
"(",
")",
"dependencies",
".",
"append",
"(",
"req",
")",
"if",
"not",
"r",
":",
"return",
"dependencies",
"# Write requirements.txt to tmp directory.",
"from",
".",
"vendor",
".",
"vistir",
".",
"path",
"import",
"create_tracked_tempfile",
"f",
"=",
"create_tracked_tempfile",
"(",
"suffix",
"=",
"\"-requirements.txt\"",
",",
"delete",
"=",
"False",
")",
"f",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"dependencies",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"f",
".",
"close",
"(",
")",
"return",
"f",
".",
"name"
] |
Converts a Pipfile-formatted dependency to a pip-formatted one.
|
[
"Converts",
"a",
"Pipfile",
"-",
"formatted",
"dependency",
"to",
"a",
"pip",
"-",
"formatted",
"one",
"."
] |
python
|
train
|
andreikop/qutepart
|
qutepart/__init__.py
|
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/__init__.py#L1319-L1327
|
def _onShortcutScroll(self, down):
"""Ctrl+Up/Down pressed, scroll viewport
"""
value = self.verticalScrollBar().value()
if down:
value += 1
else:
value -= 1
self.verticalScrollBar().setValue(value)
|
[
"def",
"_onShortcutScroll",
"(",
"self",
",",
"down",
")",
":",
"value",
"=",
"self",
".",
"verticalScrollBar",
"(",
")",
".",
"value",
"(",
")",
"if",
"down",
":",
"value",
"+=",
"1",
"else",
":",
"value",
"-=",
"1",
"self",
".",
"verticalScrollBar",
"(",
")",
".",
"setValue",
"(",
"value",
")"
] |
Ctrl+Up/Down pressed, scroll viewport
|
[
"Ctrl",
"+",
"Up",
"/",
"Down",
"pressed",
"scroll",
"viewport"
] |
python
|
train
|
phoebe-project/phoebe2
|
phoebe/parameters/parameters.py
|
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L3265-L3284
|
def uniquetwig(self, ps=None):
"""
see also :meth:`twig`
Determine the shortest (more-or-less) twig which will point
to this single Parameter in a given parent :class:`ParameterSet`
:parameter ps: :class:`ParameterSet` in which the returned
uniquetwig will point to this Parameter. If not provided
or None this will default to the parent :class:`phoebe.frontend.bundle.Bundle`,
if available.
:return: uniquetwig
:rtype: str
"""
if ps is None:
ps = self._bundle
if ps is None:
return self.twig
return ps._uniquetwig(self.twig)
|
[
"def",
"uniquetwig",
"(",
"self",
",",
"ps",
"=",
"None",
")",
":",
"if",
"ps",
"is",
"None",
":",
"ps",
"=",
"self",
".",
"_bundle",
"if",
"ps",
"is",
"None",
":",
"return",
"self",
".",
"twig",
"return",
"ps",
".",
"_uniquetwig",
"(",
"self",
".",
"twig",
")"
] |
see also :meth:`twig`
Determine the shortest (more-or-less) twig which will point
to this single Parameter in a given parent :class:`ParameterSet`
:parameter ps: :class:`ParameterSet` in which the returned
uniquetwig will point to this Parameter. If not provided
or None this will default to the parent :class:`phoebe.frontend.bundle.Bundle`,
if available.
:return: uniquetwig
:rtype: str
|
[
"see",
"also",
":",
"meth",
":",
"twig"
] |
python
|
train
|
mayfield/shellish
|
shellish/command/command.py
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L210-L222
|
def parent(self, parent):
""" Copy context from the parent into this instance as well as
adjusting or depth value to indicate where we exist in a command
tree. """
self._parent = parent
if parent:
pctx = dict((x, getattr(parent, x)) for x in parent.context_keys)
self.inject_context(pctx)
self.depth = parent.depth + 1
for command in self.subcommands.values():
command.parent = self # bump.
else:
self.depth = 0
|
[
"def",
"parent",
"(",
"self",
",",
"parent",
")",
":",
"self",
".",
"_parent",
"=",
"parent",
"if",
"parent",
":",
"pctx",
"=",
"dict",
"(",
"(",
"x",
",",
"getattr",
"(",
"parent",
",",
"x",
")",
")",
"for",
"x",
"in",
"parent",
".",
"context_keys",
")",
"self",
".",
"inject_context",
"(",
"pctx",
")",
"self",
".",
"depth",
"=",
"parent",
".",
"depth",
"+",
"1",
"for",
"command",
"in",
"self",
".",
"subcommands",
".",
"values",
"(",
")",
":",
"command",
".",
"parent",
"=",
"self",
"# bump.",
"else",
":",
"self",
".",
"depth",
"=",
"0"
] |
Copy context from the parent into this instance as well as
adjusting or depth value to indicate where we exist in a command
tree.
|
[
"Copy",
"context",
"from",
"the",
"parent",
"into",
"this",
"instance",
"as",
"well",
"as",
"adjusting",
"or",
"depth",
"value",
"to",
"indicate",
"where",
"we",
"exist",
"in",
"a",
"command",
"tree",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/netmiko_mod.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netmiko_mod.py#L319-L348
|
def multi_call(*methods, **kwargs):
'''
Invoke multiple Netmiko methods at once, and return their output, as list.
methods
A list of dictionaries with the following keys:
- ``name``: the name of the Netmiko method to be executed.
- ``args``: list of arguments to be sent to the Netmiko method.
- ``kwargs``: dictionary of arguments to be sent to the Netmiko method.
kwargs
Key-value dictionary with the connection details (when not running
under a Proxy Minion).
'''
kwargs = clean_kwargs(**kwargs)
if 'netmiko.conn' in __proxy__:
conn = __proxy__['netmiko.conn']()
else:
conn, kwargs = _prepare_connection(**kwargs)
ret = []
for method in methods:
# Explicit unpacking
method_name = method['name']
method_args = method.get('args', [])
method_kwargs = method.get('kwargs', [])
ret.append(getattr(conn, method_name)(*method_args, **method_kwargs))
if 'netmiko.conn' not in __proxy__:
conn.disconnect()
return ret
|
[
"def",
"multi_call",
"(",
"*",
"methods",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"clean_kwargs",
"(",
"*",
"*",
"kwargs",
")",
"if",
"'netmiko.conn'",
"in",
"__proxy__",
":",
"conn",
"=",
"__proxy__",
"[",
"'netmiko.conn'",
"]",
"(",
")",
"else",
":",
"conn",
",",
"kwargs",
"=",
"_prepare_connection",
"(",
"*",
"*",
"kwargs",
")",
"ret",
"=",
"[",
"]",
"for",
"method",
"in",
"methods",
":",
"# Explicit unpacking",
"method_name",
"=",
"method",
"[",
"'name'",
"]",
"method_args",
"=",
"method",
".",
"get",
"(",
"'args'",
",",
"[",
"]",
")",
"method_kwargs",
"=",
"method",
".",
"get",
"(",
"'kwargs'",
",",
"[",
"]",
")",
"ret",
".",
"append",
"(",
"getattr",
"(",
"conn",
",",
"method_name",
")",
"(",
"*",
"method_args",
",",
"*",
"*",
"method_kwargs",
")",
")",
"if",
"'netmiko.conn'",
"not",
"in",
"__proxy__",
":",
"conn",
".",
"disconnect",
"(",
")",
"return",
"ret"
] |
Invoke multiple Netmiko methods at once, and return their output, as list.
methods
A list of dictionaries with the following keys:
- ``name``: the name of the Netmiko method to be executed.
- ``args``: list of arguments to be sent to the Netmiko method.
- ``kwargs``: dictionary of arguments to be sent to the Netmiko method.
kwargs
Key-value dictionary with the connection details (when not running
under a Proxy Minion).
|
[
"Invoke",
"multiple",
"Netmiko",
"methods",
"at",
"once",
"and",
"return",
"their",
"output",
"as",
"list",
"."
] |
python
|
train
|
couchbase/couchbase-python-client
|
couchbase_version.py
|
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase_version.py#L84-L105
|
def get_version():
"""
Returns the version from the generated version file without actually
loading it (and thus trying to load the extension module).
"""
if not os.path.exists(VERSION_FILE):
raise VersionNotFound(VERSION_FILE + " does not exist")
fp = open(VERSION_FILE, "r")
vline = None
for x in fp.readlines():
x = x.rstrip()
if not x:
continue
if not x.startswith("__version__"):
continue
vline = x.split('=')[1]
break
if not vline:
raise VersionNotFound("version file present but has no contents")
return vline.strip().rstrip().replace("'", '')
|
[
"def",
"get_version",
"(",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"VERSION_FILE",
")",
":",
"raise",
"VersionNotFound",
"(",
"VERSION_FILE",
"+",
"\" does not exist\"",
")",
"fp",
"=",
"open",
"(",
"VERSION_FILE",
",",
"\"r\"",
")",
"vline",
"=",
"None",
"for",
"x",
"in",
"fp",
".",
"readlines",
"(",
")",
":",
"x",
"=",
"x",
".",
"rstrip",
"(",
")",
"if",
"not",
"x",
":",
"continue",
"if",
"not",
"x",
".",
"startswith",
"(",
"\"__version__\"",
")",
":",
"continue",
"vline",
"=",
"x",
".",
"split",
"(",
"'='",
")",
"[",
"1",
"]",
"break",
"if",
"not",
"vline",
":",
"raise",
"VersionNotFound",
"(",
"\"version file present but has no contents\"",
")",
"return",
"vline",
".",
"strip",
"(",
")",
".",
"rstrip",
"(",
")",
".",
"replace",
"(",
"\"'\"",
",",
"''",
")"
] |
Returns the version from the generated version file without actually
loading it (and thus trying to load the extension module).
|
[
"Returns",
"the",
"version",
"from",
"the",
"generated",
"version",
"file",
"without",
"actually",
"loading",
"it",
"(",
"and",
"thus",
"trying",
"to",
"load",
"the",
"extension",
"module",
")",
"."
] |
python
|
train
|
kragniz/python-etcd3
|
etcd3/utils.py
|
https://github.com/kragniz/python-etcd3/blob/0adb14840d4a6011a2023a13f07e247e4c336a80/etcd3/utils.py#L20-L30
|
def lease_to_id(lease):
"""Figure out if the argument is a Lease object, or the lease ID."""
lease_id = 0
if hasattr(lease, 'id'):
lease_id = lease.id
else:
try:
lease_id = int(lease)
except TypeError:
pass
return lease_id
|
[
"def",
"lease_to_id",
"(",
"lease",
")",
":",
"lease_id",
"=",
"0",
"if",
"hasattr",
"(",
"lease",
",",
"'id'",
")",
":",
"lease_id",
"=",
"lease",
".",
"id",
"else",
":",
"try",
":",
"lease_id",
"=",
"int",
"(",
"lease",
")",
"except",
"TypeError",
":",
"pass",
"return",
"lease_id"
] |
Figure out if the argument is a Lease object, or the lease ID.
|
[
"Figure",
"out",
"if",
"the",
"argument",
"is",
"a",
"Lease",
"object",
"or",
"the",
"lease",
"ID",
"."
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.