repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
pystorm/pystorm | pystorm/component.py | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L406-L478 | def emit(
self,
tup,
tup_id=None,
stream=None,
anchors=None,
direct_task=None,
need_task_ids=False,
):
"""Emit a new Tuple to a stream.
:param tup: the Tuple payload to send to Storm, should contain only
JSON-serializable data.
:type tup: :class:`list` or :class:`pystorm.component.Tuple`
:param tup_id: the ID for the Tuple. If omitted by a
:class:`pystorm.spout.Spout`, this emit will be
unreliable.
:type tup_id: str
:param stream: the ID of the stream to emit this Tuple to. Specify
``None`` to emit to default stream.
:type stream: str
:param anchors: IDs the Tuples (or
:class:`pystorm.component.Tuple` instances)
which the emitted Tuples should be anchored to. This is
only passed by :class:`pystorm.bolt.Bolt`.
:type anchors: list
:param direct_task: the task to send the Tuple to.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``.
"""
if not isinstance(tup, (list, tuple)):
raise TypeError(
"All Tuples must be either lists or tuples, "
"received {!r} instead.".format(type(tup))
)
msg = {"command": "emit", "tuple": tup}
downstream_task_ids = None
if anchors is not None:
msg["anchors"] = anchors
if tup_id is not None:
msg["id"] = tup_id
if stream is not None:
msg["stream"] = stream
if direct_task is not None:
msg["task"] = direct_task
if need_task_ids:
downstream_task_ids = [direct_task]
if not need_task_ids:
# only need to send on False, Storm's default is True
msg["need_task_ids"] = need_task_ids
if need_task_ids and direct_task is None:
# Use both locks so we ensure send_message and read_task_ids are for
# same emit
with self._reader_lock, self._writer_lock:
self.send_message(msg)
downstream_task_ids = self.read_task_ids()
# No locks necessary in simple case because serializer will acquire
# write lock itself
else:
self.send_message(msg)
return downstream_task_ids | [
"def",
"emit",
"(",
"self",
",",
"tup",
",",
"tup_id",
"=",
"None",
",",
"stream",
"=",
"None",
",",
"anchors",
"=",
"None",
",",
"direct_task",
"=",
"None",
",",
"need_task_ids",
"=",
"False",
",",
")",
":",
"if",
"not",
"isinstance",
"(",
"tup",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"All Tuples must be either lists or tuples, \"",
"\"received {!r} instead.\"",
".",
"format",
"(",
"type",
"(",
"tup",
")",
")",
")",
"msg",
"=",
"{",
"\"command\"",
":",
"\"emit\"",
",",
"\"tuple\"",
":",
"tup",
"}",
"downstream_task_ids",
"=",
"None",
"if",
"anchors",
"is",
"not",
"None",
":",
"msg",
"[",
"\"anchors\"",
"]",
"=",
"anchors",
"if",
"tup_id",
"is",
"not",
"None",
":",
"msg",
"[",
"\"id\"",
"]",
"=",
"tup_id",
"if",
"stream",
"is",
"not",
"None",
":",
"msg",
"[",
"\"stream\"",
"]",
"=",
"stream",
"if",
"direct_task",
"is",
"not",
"None",
":",
"msg",
"[",
"\"task\"",
"]",
"=",
"direct_task",
"if",
"need_task_ids",
":",
"downstream_task_ids",
"=",
"[",
"direct_task",
"]",
"if",
"not",
"need_task_ids",
":",
"# only need to send on False, Storm's default is True",
"msg",
"[",
"\"need_task_ids\"",
"]",
"=",
"need_task_ids",
"if",
"need_task_ids",
"and",
"direct_task",
"is",
"None",
":",
"# Use both locks so we ensure send_message and read_task_ids are for",
"# same emit",
"with",
"self",
".",
"_reader_lock",
",",
"self",
".",
"_writer_lock",
":",
"self",
".",
"send_message",
"(",
"msg",
")",
"downstream_task_ids",
"=",
"self",
".",
"read_task_ids",
"(",
")",
"# No locks necessary in simple case because serializer will acquire",
"# write lock itself",
"else",
":",
"self",
".",
"send_message",
"(",
"msg",
")",
"return",
"downstream_task_ids"
] | Emit a new Tuple to a stream.
:param tup: the Tuple payload to send to Storm, should contain only
JSON-serializable data.
:type tup: :class:`list` or :class:`pystorm.component.Tuple`
:param tup_id: the ID for the Tuple. If omitted by a
:class:`pystorm.spout.Spout`, this emit will be
unreliable.
:type tup_id: str
:param stream: the ID of the stream to emit this Tuple to. Specify
``None`` to emit to default stream.
:type stream: str
:param anchors: IDs the Tuples (or
:class:`pystorm.component.Tuple` instances)
which the emitted Tuples should be anchored to. This is
only passed by :class:`pystorm.bolt.Bolt`.
:type anchors: list
:param direct_task: the task to send the Tuple to.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``. | [
"Emit",
"a",
"new",
"Tuple",
"to",
"a",
"stream",
"."
] | python | train |
diffeo/rejester | rejester/_task_master.py | https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_task_master.py#L869-L878 | def num_pending(self, work_spec_name):
'''Get the number of pending work units for some work spec.
These are work units that some worker is currently working on
(hopefully; it could include work units assigned to workers that
died and that have not yet expired).
'''
return self.registry.len(WORK_UNITS_ + work_spec_name,
priority_min=time.time()) | [
"def",
"num_pending",
"(",
"self",
",",
"work_spec_name",
")",
":",
"return",
"self",
".",
"registry",
".",
"len",
"(",
"WORK_UNITS_",
"+",
"work_spec_name",
",",
"priority_min",
"=",
"time",
".",
"time",
"(",
")",
")"
] | Get the number of pending work units for some work spec.
These are work units that some worker is currently working on
(hopefully; it could include work units assigned to workers that
died and that have not yet expired). | [
"Get",
"the",
"number",
"of",
"pending",
"work",
"units",
"for",
"some",
"work",
"spec",
"."
] | python | train |
SuperCowPowers/workbench | workbench/workers/yara_sigs.py | https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/yara_sigs.py#L9-L21 | def get_rules_from_disk():
''' Recursively traverse the yara/rules directory for rules '''
# Try to find the yara rules directory relative to the worker
my_dir = os.path.dirname(os.path.realpath(__file__))
yara_rule_path = os.path.join(my_dir, 'yara/rules')
if not os.path.exists(yara_rule_path):
raise RuntimeError('yara could not find yara rules directory under: %s' % my_dir)
# Okay load in all the rules under the yara rule path
rules = yara.load_rules(rules_rootpath=yara_rule_path, fast_match=True)
return rules | [
"def",
"get_rules_from_disk",
"(",
")",
":",
"# Try to find the yara rules directory relative to the worker",
"my_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"yara_rule_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"my_dir",
",",
"'yara/rules'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"yara_rule_path",
")",
":",
"raise",
"RuntimeError",
"(",
"'yara could not find yara rules directory under: %s'",
"%",
"my_dir",
")",
"# Okay load in all the rules under the yara rule path",
"rules",
"=",
"yara",
".",
"load_rules",
"(",
"rules_rootpath",
"=",
"yara_rule_path",
",",
"fast_match",
"=",
"True",
")",
"return",
"rules"
] | Recursively traverse the yara/rules directory for rules | [
"Recursively",
"traverse",
"the",
"yara",
"/",
"rules",
"directory",
"for",
"rules"
] | python | train |
mikedh/trimesh | trimesh/collision.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/collision.py#L373-L439 | def in_collision_other(self, other_manager,
return_names=False, return_data=False):
"""
Check if any object from this manager collides with any object
from another manager.
Parameters
-------------------
other_manager : CollisionManager
Another collision manager object
return_names : bool
If true, a set is returned containing the names
of all pairs of objects in collision.
return_data : bool
If true, a list of ContactData is returned as well
Returns
-------------
is_collision : bool
True if a collision occurred between any pair of objects
and False otherwise
names : set of 2-tup
The set of pairwise collisions. Each tuple
contains two names (first from this manager,
second from the other_manager) indicating
that the two corresponding objects are in collision.
contacts : list of ContactData
All contacts detected
"""
cdata = fcl.CollisionData()
if return_names or return_data:
cdata = fcl.CollisionData(
request=fcl.CollisionRequest(
num_max_contacts=100000,
enable_contact=True))
self._manager.collide(other_manager._manager,
cdata,
fcl.defaultCollisionCallback)
result = cdata.result.is_collision
objs_in_collision = set()
contact_data = []
if return_names or return_data:
for contact in cdata.result.contacts:
reverse = False
names = (self._extract_name(contact.o1),
other_manager._extract_name(contact.o2))
if names[0] is None:
names = (self._extract_name(contact.o2),
other_manager._extract_name(contact.o1))
reverse = True
if return_names:
objs_in_collision.add(names)
if return_data:
if reverse:
names = reversed(names)
contact_data.append(ContactData(names, contact))
if return_names and return_data:
return result, objs_in_collision, contact_data
elif return_names:
return result, objs_in_collision
elif return_data:
return result, contact_data
else:
return result | [
"def",
"in_collision_other",
"(",
"self",
",",
"other_manager",
",",
"return_names",
"=",
"False",
",",
"return_data",
"=",
"False",
")",
":",
"cdata",
"=",
"fcl",
".",
"CollisionData",
"(",
")",
"if",
"return_names",
"or",
"return_data",
":",
"cdata",
"=",
"fcl",
".",
"CollisionData",
"(",
"request",
"=",
"fcl",
".",
"CollisionRequest",
"(",
"num_max_contacts",
"=",
"100000",
",",
"enable_contact",
"=",
"True",
")",
")",
"self",
".",
"_manager",
".",
"collide",
"(",
"other_manager",
".",
"_manager",
",",
"cdata",
",",
"fcl",
".",
"defaultCollisionCallback",
")",
"result",
"=",
"cdata",
".",
"result",
".",
"is_collision",
"objs_in_collision",
"=",
"set",
"(",
")",
"contact_data",
"=",
"[",
"]",
"if",
"return_names",
"or",
"return_data",
":",
"for",
"contact",
"in",
"cdata",
".",
"result",
".",
"contacts",
":",
"reverse",
"=",
"False",
"names",
"=",
"(",
"self",
".",
"_extract_name",
"(",
"contact",
".",
"o1",
")",
",",
"other_manager",
".",
"_extract_name",
"(",
"contact",
".",
"o2",
")",
")",
"if",
"names",
"[",
"0",
"]",
"is",
"None",
":",
"names",
"=",
"(",
"self",
".",
"_extract_name",
"(",
"contact",
".",
"o2",
")",
",",
"other_manager",
".",
"_extract_name",
"(",
"contact",
".",
"o1",
")",
")",
"reverse",
"=",
"True",
"if",
"return_names",
":",
"objs_in_collision",
".",
"add",
"(",
"names",
")",
"if",
"return_data",
":",
"if",
"reverse",
":",
"names",
"=",
"reversed",
"(",
"names",
")",
"contact_data",
".",
"append",
"(",
"ContactData",
"(",
"names",
",",
"contact",
")",
")",
"if",
"return_names",
"and",
"return_data",
":",
"return",
"result",
",",
"objs_in_collision",
",",
"contact_data",
"elif",
"return_names",
":",
"return",
"result",
",",
"objs_in_collision",
"elif",
"return_data",
":",
"return",
"result",
",",
"contact_data",
"else",
":",
"return",
"result"
] | Check if any object from this manager collides with any object
from another manager.
Parameters
-------------------
other_manager : CollisionManager
Another collision manager object
return_names : bool
If true, a set is returned containing the names
of all pairs of objects in collision.
return_data : bool
If true, a list of ContactData is returned as well
Returns
-------------
is_collision : bool
True if a collision occurred between any pair of objects
and False otherwise
names : set of 2-tup
The set of pairwise collisions. Each tuple
contains two names (first from this manager,
second from the other_manager) indicating
that the two corresponding objects are in collision.
contacts : list of ContactData
All contacts detected | [
"Check",
"if",
"any",
"object",
"from",
"this",
"manager",
"collides",
"with",
"any",
"object",
"from",
"another",
"manager",
"."
] | python | train |
saltstack/salt | salt/fileclient.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileclient.py#L870-L886 | def file_list(self, saltenv='base', prefix=''):
'''
Return a list of files in the given environment
with optional relative prefix path to limit directory traversal
'''
ret = []
prefix = prefix.strip('/')
for path in self.opts['pillar_roots'].get(saltenv, []):
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(path, prefix), followlinks=True
):
# Don't walk any directories that match file_ignore_regex or glob
dirs[:] = [d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)]
for fname in files:
relpath = os.path.relpath(os.path.join(root, fname), path)
ret.append(salt.utils.data.decode(relpath))
return ret | [
"def",
"file_list",
"(",
"self",
",",
"saltenv",
"=",
"'base'",
",",
"prefix",
"=",
"''",
")",
":",
"ret",
"=",
"[",
"]",
"prefix",
"=",
"prefix",
".",
"strip",
"(",
"'/'",
")",
"for",
"path",
"in",
"self",
".",
"opts",
"[",
"'pillar_roots'",
"]",
".",
"get",
"(",
"saltenv",
",",
"[",
"]",
")",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"salt",
".",
"utils",
".",
"path",
".",
"os_walk",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"prefix",
")",
",",
"followlinks",
"=",
"True",
")",
":",
"# Don't walk any directories that match file_ignore_regex or glob",
"dirs",
"[",
":",
"]",
"=",
"[",
"d",
"for",
"d",
"in",
"dirs",
"if",
"not",
"salt",
".",
"fileserver",
".",
"is_file_ignored",
"(",
"self",
".",
"opts",
",",
"d",
")",
"]",
"for",
"fname",
"in",
"files",
":",
"relpath",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"fname",
")",
",",
"path",
")",
"ret",
".",
"append",
"(",
"salt",
".",
"utils",
".",
"data",
".",
"decode",
"(",
"relpath",
")",
")",
"return",
"ret"
] | Return a list of files in the given environment
with optional relative prefix path to limit directory traversal | [
"Return",
"a",
"list",
"of",
"files",
"in",
"the",
"given",
"environment",
"with",
"optional",
"relative",
"prefix",
"path",
"to",
"limit",
"directory",
"traversal"
] | python | train |
wilson-eft/wilson | wilson/classes.py | https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/classes.py#L281-L308 | def plotdata(self, key, part='re', scale='log', steps=50):
"""Return a tuple of arrays x, y that can be fed to plt.plot,
where x is the scale in GeV and y is the parameter of interest.
Parameters:
- key: dicionary key of the parameter to be plotted (e.g. a WCxf
coefficient name or a SM parameter like 'g')
- part: plot the real part 're' (default) or the imaginary part 'im'
- scale: 'log'; make the x steps logarithmically distributed; for
'linear', linearly distributed
- steps: steps in x to take (default: 50)
"""
if scale == 'log':
x = np.logspace(log(self.scale_min),
log(self.scale_max),
steps,
base=e)
elif scale == 'linear':
x = np.linspace(self.scale_min,
self.scale_max,
steps)
y = self.fun(x)
y = np.array([d[key] for d in y])
if part == 're':
return x, y.real
elif part == 'im':
return x, y.imag | [
"def",
"plotdata",
"(",
"self",
",",
"key",
",",
"part",
"=",
"'re'",
",",
"scale",
"=",
"'log'",
",",
"steps",
"=",
"50",
")",
":",
"if",
"scale",
"==",
"'log'",
":",
"x",
"=",
"np",
".",
"logspace",
"(",
"log",
"(",
"self",
".",
"scale_min",
")",
",",
"log",
"(",
"self",
".",
"scale_max",
")",
",",
"steps",
",",
"base",
"=",
"e",
")",
"elif",
"scale",
"==",
"'linear'",
":",
"x",
"=",
"np",
".",
"linspace",
"(",
"self",
".",
"scale_min",
",",
"self",
".",
"scale_max",
",",
"steps",
")",
"y",
"=",
"self",
".",
"fun",
"(",
"x",
")",
"y",
"=",
"np",
".",
"array",
"(",
"[",
"d",
"[",
"key",
"]",
"for",
"d",
"in",
"y",
"]",
")",
"if",
"part",
"==",
"'re'",
":",
"return",
"x",
",",
"y",
".",
"real",
"elif",
"part",
"==",
"'im'",
":",
"return",
"x",
",",
"y",
".",
"imag"
] | Return a tuple of arrays x, y that can be fed to plt.plot,
where x is the scale in GeV and y is the parameter of interest.
Parameters:
- key: dicionary key of the parameter to be plotted (e.g. a WCxf
coefficient name or a SM parameter like 'g')
- part: plot the real part 're' (default) or the imaginary part 'im'
- scale: 'log'; make the x steps logarithmically distributed; for
'linear', linearly distributed
- steps: steps in x to take (default: 50) | [
"Return",
"a",
"tuple",
"of",
"arrays",
"x",
"y",
"that",
"can",
"be",
"fed",
"to",
"plt",
".",
"plot",
"where",
"x",
"is",
"the",
"scale",
"in",
"GeV",
"and",
"y",
"is",
"the",
"parameter",
"of",
"interest",
"."
] | python | train |
ryanvarley/ExoData | exodata/equations.py | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/equations.py#L768-L787 | def ratioTerminatorToStar(H_p, R_p, R_s): # TODO add into planet class
r"""Calculates the ratio of the terminator to the star assuming 5 scale
heights large. If you dont know all of the input try
:py:func:`calcRatioTerminatorToStar`
.. math::
\Delta F = \frac{10 H R_p + 25 H^2}{R_\star^2}
Where :math:`\Delta F` is the ration of the terminator to the star,
H scale height planet atmosphere, :math:`R_p` radius of the planet,
:math:`R_s` radius of the star
:param H_p:
:param R_p:
:param R_s:
:return: ratio of the terminator to the star
"""
deltaF = ((10 * H_p * R_p) + (25 * H_p**2)) / (R_s**2)
return deltaF.simplified | [
"def",
"ratioTerminatorToStar",
"(",
"H_p",
",",
"R_p",
",",
"R_s",
")",
":",
"# TODO add into planet class",
"deltaF",
"=",
"(",
"(",
"10",
"*",
"H_p",
"*",
"R_p",
")",
"+",
"(",
"25",
"*",
"H_p",
"**",
"2",
")",
")",
"/",
"(",
"R_s",
"**",
"2",
")",
"return",
"deltaF",
".",
"simplified"
] | r"""Calculates the ratio of the terminator to the star assuming 5 scale
heights large. If you dont know all of the input try
:py:func:`calcRatioTerminatorToStar`
.. math::
\Delta F = \frac{10 H R_p + 25 H^2}{R_\star^2}
Where :math:`\Delta F` is the ration of the terminator to the star,
H scale height planet atmosphere, :math:`R_p` radius of the planet,
:math:`R_s` radius of the star
:param H_p:
:param R_p:
:param R_s:
:return: ratio of the terminator to the star | [
"r",
"Calculates",
"the",
"ratio",
"of",
"the",
"terminator",
"to",
"the",
"star",
"assuming",
"5",
"scale",
"heights",
"large",
".",
"If",
"you",
"dont",
"know",
"all",
"of",
"the",
"input",
"try",
":",
"py",
":",
"func",
":",
"calcRatioTerminatorToStar"
] | python | train |
senaite/senaite.core | bika/lims/browser/worksheet/views/add_duplicate.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/worksheet/views/add_duplicate.py#L142-L153 | def get_container_mapping(self):
"""Returns a mapping of container -> postition
"""
layout = self.context.getLayout()
container_mapping = {}
for slot in layout:
if slot["type"] != "a":
continue
position = slot["position"]
container_uid = slot["container_uid"]
container_mapping[container_uid] = position
return container_mapping | [
"def",
"get_container_mapping",
"(",
"self",
")",
":",
"layout",
"=",
"self",
".",
"context",
".",
"getLayout",
"(",
")",
"container_mapping",
"=",
"{",
"}",
"for",
"slot",
"in",
"layout",
":",
"if",
"slot",
"[",
"\"type\"",
"]",
"!=",
"\"a\"",
":",
"continue",
"position",
"=",
"slot",
"[",
"\"position\"",
"]",
"container_uid",
"=",
"slot",
"[",
"\"container_uid\"",
"]",
"container_mapping",
"[",
"container_uid",
"]",
"=",
"position",
"return",
"container_mapping"
] | Returns a mapping of container -> postition | [
"Returns",
"a",
"mapping",
"of",
"container",
"-",
">",
"postition"
] | python | train |
icometrix/dicom2nifti | dicom2nifti/common.py | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L333-L343 | def get_is_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
# data is int formatted as string so convert te string first and cast to int
if tag.VR == 'OB' or tag.VR == 'UN':
value = int(tag.value.decode("ascii").replace(" ", ""))
return value
return int(tag.value) | [
"def",
"get_is_value",
"(",
"tag",
")",
":",
"# data is int formatted as string so convert te string first and cast to int",
"if",
"tag",
".",
"VR",
"==",
"'OB'",
"or",
"tag",
".",
"VR",
"==",
"'UN'",
":",
"value",
"=",
"int",
"(",
"tag",
".",
"value",
".",
"decode",
"(",
"\"ascii\"",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
")",
"return",
"value",
"return",
"int",
"(",
"tag",
".",
"value",
")"
] | Getters for data that also work with implicit transfersyntax
:param tag: the tag to read | [
"Getters",
"for",
"data",
"that",
"also",
"work",
"with",
"implicit",
"transfersyntax"
] | python | train |
mdsol/rwslib | rwslib/extras/rwscmd/data_scrambler.py | https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/extras/rwscmd/data_scrambler.py#L106-L124 | def scramble_value(self, value):
"""Duck-type value and scramble appropriately"""
try:
type, format = typeof_rave_data(value)
if type == 'float':
i, f = value.split('.')
return self.scramble_float(len(value) - 1, len(f))
elif type == 'int':
return self.scramble_int(len(value))
elif type == 'date':
return self.scramble_date(value, format)
elif type == 'time':
return self.scramble_time(format)
elif type == 'string':
return self.scramble_string(len(value))
else:
return value
except:
return "" | [
"def",
"scramble_value",
"(",
"self",
",",
"value",
")",
":",
"try",
":",
"type",
",",
"format",
"=",
"typeof_rave_data",
"(",
"value",
")",
"if",
"type",
"==",
"'float'",
":",
"i",
",",
"f",
"=",
"value",
".",
"split",
"(",
"'.'",
")",
"return",
"self",
".",
"scramble_float",
"(",
"len",
"(",
"value",
")",
"-",
"1",
",",
"len",
"(",
"f",
")",
")",
"elif",
"type",
"==",
"'int'",
":",
"return",
"self",
".",
"scramble_int",
"(",
"len",
"(",
"value",
")",
")",
"elif",
"type",
"==",
"'date'",
":",
"return",
"self",
".",
"scramble_date",
"(",
"value",
",",
"format",
")",
"elif",
"type",
"==",
"'time'",
":",
"return",
"self",
".",
"scramble_time",
"(",
"format",
")",
"elif",
"type",
"==",
"'string'",
":",
"return",
"self",
".",
"scramble_string",
"(",
"len",
"(",
"value",
")",
")",
"else",
":",
"return",
"value",
"except",
":",
"return",
"\"\""
] | Duck-type value and scramble appropriately | [
"Duck",
"-",
"type",
"value",
"and",
"scramble",
"appropriately"
] | python | train |
mosesschwartz/scrypture | scrypture/webapi.py | https://github.com/mosesschwartz/scrypture/blob/d51eb0c9835a5122a655078268185ce8ab9ec86a/scrypture/webapi.py#L40-L46 | def radio_field(*args, **kwargs):
'''
Get a password
'''
radio_field = wtforms.RadioField(*args, **kwargs)
radio_field.input_type = 'radio_field'
return radio_field | [
"def",
"radio_field",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"radio_field",
"=",
"wtforms",
".",
"RadioField",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"radio_field",
".",
"input_type",
"=",
"'radio_field'",
"return",
"radio_field"
] | Get a password | [
"Get",
"a",
"password"
] | python | train |
ming060/robotframework-uiautomatorlibrary | uiautomatorlibrary/Mobile.py | https://github.com/ming060/robotframework-uiautomatorlibrary/blob/b70202b6a8aa68b4efd9d029c2845407fb33451a/uiautomatorlibrary/Mobile.py#L772-L776 | def set_text(self, input_text, *args, **selectors):
"""
Set *input_text* to the UI object with *selectors*
"""
self.device(**selectors).set_text(input_text) | [
"def",
"set_text",
"(",
"self",
",",
"input_text",
",",
"*",
"args",
",",
"*",
"*",
"selectors",
")",
":",
"self",
".",
"device",
"(",
"*",
"*",
"selectors",
")",
".",
"set_text",
"(",
"input_text",
")"
] | Set *input_text* to the UI object with *selectors* | [
"Set",
"*",
"input_text",
"*",
"to",
"the",
"UI",
"object",
"with",
"*",
"selectors",
"*"
] | python | train |
mdiener/grace | grace/py27/slimit/parser.py | https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/slimit/parser.py#L311-L324 | def p_property_assignment(self, p):
"""property_assignment \
: property_name COLON assignment_expr
| GETPROP property_name LPAREN RPAREN LBRACE function_body RBRACE
| SETPROP property_name LPAREN formal_parameter_list RPAREN \
LBRACE function_body RBRACE
"""
if len(p) == 4:
p[0] = ast.Assign(left=p[1], op=p[2], right=p[3])
elif len(p) == 8:
p[0] = ast.GetPropAssign(prop_name=p[2], elements=p[6])
else:
p[0] = ast.SetPropAssign(
prop_name=p[2], parameters=p[4], elements=p[7]) | [
"def",
"p_property_assignment",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"4",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Assign",
"(",
"left",
"=",
"p",
"[",
"1",
"]",
",",
"op",
"=",
"p",
"[",
"2",
"]",
",",
"right",
"=",
"p",
"[",
"3",
"]",
")",
"elif",
"len",
"(",
"p",
")",
"==",
"8",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"GetPropAssign",
"(",
"prop_name",
"=",
"p",
"[",
"2",
"]",
",",
"elements",
"=",
"p",
"[",
"6",
"]",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"SetPropAssign",
"(",
"prop_name",
"=",
"p",
"[",
"2",
"]",
",",
"parameters",
"=",
"p",
"[",
"4",
"]",
",",
"elements",
"=",
"p",
"[",
"7",
"]",
")"
] | property_assignment \
: property_name COLON assignment_expr
| GETPROP property_name LPAREN RPAREN LBRACE function_body RBRACE
| SETPROP property_name LPAREN formal_parameter_list RPAREN \
LBRACE function_body RBRACE | [
"property_assignment",
"\\",
":",
"property_name",
"COLON",
"assignment_expr",
"|",
"GETPROP",
"property_name",
"LPAREN",
"RPAREN",
"LBRACE",
"function_body",
"RBRACE",
"|",
"SETPROP",
"property_name",
"LPAREN",
"formal_parameter_list",
"RPAREN",
"\\",
"LBRACE",
"function_body",
"RBRACE"
] | python | train |
project-ncl/pnc-cli | pnc_cli/swagger_client/apis/productreleases_api.py | https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/productreleases_api.py#L504-L527 | def get_all_support_level(self, **kwargs):
"""
Gets all Product Releases Support Level
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_support_level(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: SupportLevelPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_support_level_with_http_info(**kwargs)
else:
(data) = self.get_all_support_level_with_http_info(**kwargs)
return data | [
"def",
"get_all_support_level",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"get_all_support_level_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_all_support_level_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | Gets all Product Releases Support Level
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_support_level(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: SupportLevelPage
If the method is called asynchronously,
returns the request thread. | [
"Gets",
"all",
"Product",
"Releases",
"Support",
"Level",
"This",
"method",
"makes",
"a",
"synchronous",
"HTTP",
"request",
"by",
"default",
".",
"To",
"make",
"an",
"asynchronous",
"HTTP",
"request",
"please",
"define",
"a",
"callback",
"function",
"to",
"be",
"invoked",
"when",
"receiving",
"the",
"response",
".",
">>>",
"def",
"callback_function",
"(",
"response",
")",
":",
">>>",
"pprint",
"(",
"response",
")",
">>>",
">>>",
"thread",
"=",
"api",
".",
"get_all_support_level",
"(",
"callback",
"=",
"callback_function",
")"
] | python | train |
GNS3/gns3-server | gns3server/controller/compute.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/compute.py#L604-L614 | def forward(self, method, type, path, data=None):
"""
Forward a call to the emulator on compute
"""
try:
action = "/{}/{}".format(type, path)
res = yield from self.http_query(method, action, data=data, timeout=None)
except aiohttp.ServerDisconnectedError:
log.error("Connection lost to %s during %s %s", self._id, method, action)
raise aiohttp.web.HTTPGatewayTimeout()
return res.json | [
"def",
"forward",
"(",
"self",
",",
"method",
",",
"type",
",",
"path",
",",
"data",
"=",
"None",
")",
":",
"try",
":",
"action",
"=",
"\"/{}/{}\"",
".",
"format",
"(",
"type",
",",
"path",
")",
"res",
"=",
"yield",
"from",
"self",
".",
"http_query",
"(",
"method",
",",
"action",
",",
"data",
"=",
"data",
",",
"timeout",
"=",
"None",
")",
"except",
"aiohttp",
".",
"ServerDisconnectedError",
":",
"log",
".",
"error",
"(",
"\"Connection lost to %s during %s %s\"",
",",
"self",
".",
"_id",
",",
"method",
",",
"action",
")",
"raise",
"aiohttp",
".",
"web",
".",
"HTTPGatewayTimeout",
"(",
")",
"return",
"res",
".",
"json"
] | Forward a call to the emulator on compute | [
"Forward",
"a",
"call",
"to",
"the",
"emulator",
"on",
"compute"
] | python | train |
OCHA-DAP/hdx-python-utilities | src/hdx/utilities/compare.py | https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/compare.py#L24-L38 | def assert_files_same(path1, path2):
# type: (str, str) -> None
"""Asserts that two files are the same and returns delta using
-, ?, + format if not
Args:
path1 (str): Path to first file
path2 (str): Path to second file
Returns:
None
"""
difflines = compare_files(path1, path2)
assert len(difflines) == 0, ''.join(['\n'] + difflines) | [
"def",
"assert_files_same",
"(",
"path1",
",",
"path2",
")",
":",
"# type: (str, str) -> None",
"difflines",
"=",
"compare_files",
"(",
"path1",
",",
"path2",
")",
"assert",
"len",
"(",
"difflines",
")",
"==",
"0",
",",
"''",
".",
"join",
"(",
"[",
"'\\n'",
"]",
"+",
"difflines",
")"
] | Asserts that two files are the same and returns delta using
-, ?, + format if not
Args:
path1 (str): Path to first file
path2 (str): Path to second file
Returns:
None | [
"Asserts",
"that",
"two",
"files",
"are",
"the",
"same",
"and",
"returns",
"delta",
"using",
"-",
"?",
"+",
"format",
"if",
"not"
] | python | train |
Esri/ArcREST | src/arcrest/common/geometry.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/geometry.py#L563-L582 | def asDictionary(self):
""" returns the envelope as a dictionary """
template = {
"xmin" : self._xmin,
"ymin" : self._ymin,
"xmax" : self._xmax,
"ymax" : self._ymax,
"spatialReference" : self.spatialReference
}
if self._zmax is not None and \
self._zmin is not None:
template['zmin'] = self._zmin
template['zmax'] = self._zmax
if self._mmin is not None and \
self._mmax is not None:
template['mmax'] = self._mmax
template['mmin'] = self._mmin
return template | [
"def",
"asDictionary",
"(",
"self",
")",
":",
"template",
"=",
"{",
"\"xmin\"",
":",
"self",
".",
"_xmin",
",",
"\"ymin\"",
":",
"self",
".",
"_ymin",
",",
"\"xmax\"",
":",
"self",
".",
"_xmax",
",",
"\"ymax\"",
":",
"self",
".",
"_ymax",
",",
"\"spatialReference\"",
":",
"self",
".",
"spatialReference",
"}",
"if",
"self",
".",
"_zmax",
"is",
"not",
"None",
"and",
"self",
".",
"_zmin",
"is",
"not",
"None",
":",
"template",
"[",
"'zmin'",
"]",
"=",
"self",
".",
"_zmin",
"template",
"[",
"'zmax'",
"]",
"=",
"self",
".",
"_zmax",
"if",
"self",
".",
"_mmin",
"is",
"not",
"None",
"and",
"self",
".",
"_mmax",
"is",
"not",
"None",
":",
"template",
"[",
"'mmax'",
"]",
"=",
"self",
".",
"_mmax",
"template",
"[",
"'mmin'",
"]",
"=",
"self",
".",
"_mmin",
"return",
"template"
] | returns the envelope as a dictionary | [
"returns",
"the",
"envelope",
"as",
"a",
"dictionary"
] | python | train |
apache/incubator-heron | heron/tools/admin/src/python/standalone.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/admin/src/python/standalone.py#L840-L858 | def get_hostname(ip_addr, cl_args):
'''
get host name of remote host
'''
if is_self(ip_addr):
return get_self_hostname()
cmd = "hostname"
ssh_cmd = ssh_remote_execute(cmd, ip_addr, cl_args)
pid = subprocess.Popen(ssh_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return_code = pid.wait()
output = pid.communicate()
if return_code != 0:
Log.error("Failed to get hostname for remote host %s with output:\n%s" % (ip_addr, output))
sys.exit(-1)
return output[0].strip("\n") | [
"def",
"get_hostname",
"(",
"ip_addr",
",",
"cl_args",
")",
":",
"if",
"is_self",
"(",
"ip_addr",
")",
":",
"return",
"get_self_hostname",
"(",
")",
"cmd",
"=",
"\"hostname\"",
"ssh_cmd",
"=",
"ssh_remote_execute",
"(",
"cmd",
",",
"ip_addr",
",",
"cl_args",
")",
"pid",
"=",
"subprocess",
".",
"Popen",
"(",
"ssh_cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"return_code",
"=",
"pid",
".",
"wait",
"(",
")",
"output",
"=",
"pid",
".",
"communicate",
"(",
")",
"if",
"return_code",
"!=",
"0",
":",
"Log",
".",
"error",
"(",
"\"Failed to get hostname for remote host %s with output:\\n%s\"",
"%",
"(",
"ip_addr",
",",
"output",
")",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"return",
"output",
"[",
"0",
"]",
".",
"strip",
"(",
"\"\\n\"",
")"
] | get host name of remote host | [
"get",
"host",
"name",
"of",
"remote",
"host"
] | python | valid |
Contraz/demosys-py | demosys/resources/base.py | https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/resources/base.py#L121-L129 | def load_pool(self):
"""
Loads all the data files using the configured finders.
"""
for meta in self._resources:
resource = self.load(meta)
yield meta, resource
self._resources = [] | [
"def",
"load_pool",
"(",
"self",
")",
":",
"for",
"meta",
"in",
"self",
".",
"_resources",
":",
"resource",
"=",
"self",
".",
"load",
"(",
"meta",
")",
"yield",
"meta",
",",
"resource",
"self",
".",
"_resources",
"=",
"[",
"]"
] | Loads all the data files using the configured finders. | [
"Loads",
"all",
"the",
"data",
"files",
"using",
"the",
"configured",
"finders",
"."
] | python | valid |
bitcraft/pyscroll | pyscroll/group.py | https://github.com/bitcraft/pyscroll/blob/b41c1016dfefd0e2d83a14a2ce40d7ad298c5b0f/pyscroll/group.py#L34-L56 | def draw(self, surface):
""" Draw all sprites and map onto the surface
:param surface: pygame surface to draw to
:type surface: pygame.surface.Surface
"""
ox, oy = self._map_layer.get_center_offset()
new_surfaces = list()
spritedict = self.spritedict
gl = self.get_layer_of_sprite
new_surfaces_append = new_surfaces.append
for spr in self.sprites():
new_rect = spr.rect.move(ox, oy)
try:
new_surfaces_append((spr.image, new_rect, gl(spr), spr.blendmode))
except AttributeError: # generally should only fail when no blendmode available
new_surfaces_append((spr.image, new_rect, gl(spr)))
spritedict[spr] = new_rect
self.lostsprites = []
return self._map_layer.draw(surface, surface.get_rect(), new_surfaces) | [
"def",
"draw",
"(",
"self",
",",
"surface",
")",
":",
"ox",
",",
"oy",
"=",
"self",
".",
"_map_layer",
".",
"get_center_offset",
"(",
")",
"new_surfaces",
"=",
"list",
"(",
")",
"spritedict",
"=",
"self",
".",
"spritedict",
"gl",
"=",
"self",
".",
"get_layer_of_sprite",
"new_surfaces_append",
"=",
"new_surfaces",
".",
"append",
"for",
"spr",
"in",
"self",
".",
"sprites",
"(",
")",
":",
"new_rect",
"=",
"spr",
".",
"rect",
".",
"move",
"(",
"ox",
",",
"oy",
")",
"try",
":",
"new_surfaces_append",
"(",
"(",
"spr",
".",
"image",
",",
"new_rect",
",",
"gl",
"(",
"spr",
")",
",",
"spr",
".",
"blendmode",
")",
")",
"except",
"AttributeError",
":",
"# generally should only fail when no blendmode available",
"new_surfaces_append",
"(",
"(",
"spr",
".",
"image",
",",
"new_rect",
",",
"gl",
"(",
"spr",
")",
")",
")",
"spritedict",
"[",
"spr",
"]",
"=",
"new_rect",
"self",
".",
"lostsprites",
"=",
"[",
"]",
"return",
"self",
".",
"_map_layer",
".",
"draw",
"(",
"surface",
",",
"surface",
".",
"get_rect",
"(",
")",
",",
"new_surfaces",
")"
] | Draw all sprites and map onto the surface
:param surface: pygame surface to draw to
:type surface: pygame.surface.Surface | [
"Draw",
"all",
"sprites",
"and",
"map",
"onto",
"the",
"surface"
] | python | train |
LIVVkit/LIVVkit | livvkit/components/performance.py | https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L150-L174 | def _summarize_result(result, config):
""" Trim out some data to return for the index page """
timing_var = config['scaling_var']
summary = LIVVDict()
for size, res in result.items():
proc_counts = []
bench_times = []
model_times = []
for proc, data in res.items():
proc_counts.append(int(proc[1:]))
try:
bench_times.append(data['bench'][timing_var]['mean'])
except KeyError:
pass
try:
model_times.append(data['model'][timing_var]['mean'])
except KeyError:
pass
if model_times != [] and bench_times != []:
time_diff = np.mean(model_times)/np.mean(bench_times)
else:
time_diff = 'NA'
summary[size]['Proc. Counts'] = ", ".join([str(x) for x in sorted(proc_counts)])
summary[size]['Mean Time Diff (% of benchmark)'] = time_diff
return summary | [
"def",
"_summarize_result",
"(",
"result",
",",
"config",
")",
":",
"timing_var",
"=",
"config",
"[",
"'scaling_var'",
"]",
"summary",
"=",
"LIVVDict",
"(",
")",
"for",
"size",
",",
"res",
"in",
"result",
".",
"items",
"(",
")",
":",
"proc_counts",
"=",
"[",
"]",
"bench_times",
"=",
"[",
"]",
"model_times",
"=",
"[",
"]",
"for",
"proc",
",",
"data",
"in",
"res",
".",
"items",
"(",
")",
":",
"proc_counts",
".",
"append",
"(",
"int",
"(",
"proc",
"[",
"1",
":",
"]",
")",
")",
"try",
":",
"bench_times",
".",
"append",
"(",
"data",
"[",
"'bench'",
"]",
"[",
"timing_var",
"]",
"[",
"'mean'",
"]",
")",
"except",
"KeyError",
":",
"pass",
"try",
":",
"model_times",
".",
"append",
"(",
"data",
"[",
"'model'",
"]",
"[",
"timing_var",
"]",
"[",
"'mean'",
"]",
")",
"except",
"KeyError",
":",
"pass",
"if",
"model_times",
"!=",
"[",
"]",
"and",
"bench_times",
"!=",
"[",
"]",
":",
"time_diff",
"=",
"np",
".",
"mean",
"(",
"model_times",
")",
"/",
"np",
".",
"mean",
"(",
"bench_times",
")",
"else",
":",
"time_diff",
"=",
"'NA'",
"summary",
"[",
"size",
"]",
"[",
"'Proc. Counts'",
"]",
"=",
"\", \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"sorted",
"(",
"proc_counts",
")",
"]",
")",
"summary",
"[",
"size",
"]",
"[",
"'Mean Time Diff (% of benchmark)'",
"]",
"=",
"time_diff",
"return",
"summary"
] | Trim out some data to return for the index page | [
"Trim",
"out",
"some",
"data",
"to",
"return",
"for",
"the",
"index",
"page"
] | python | train |
NateFerrero/oauth2lib | oauth2lib/provider.py | https://github.com/NateFerrero/oauth2lib/blob/d161b010f8a596826050a09e5e94d59443cc12d9/oauth2lib/provider.py#L573-L586 | def get_authorization(self):
"""Get authorization object representing status of authentication."""
auth = self.authorization_class()
header = self.get_authorization_header()
if not header or not header.split:
return auth
header = header.split()
if len(header) > 1 and header[0] == 'Bearer':
auth.is_oauth = True
access_token = header[1]
self.validate_access_token(access_token, auth)
if not auth.is_valid:
auth.error = 'access_denied'
return auth | [
"def",
"get_authorization",
"(",
"self",
")",
":",
"auth",
"=",
"self",
".",
"authorization_class",
"(",
")",
"header",
"=",
"self",
".",
"get_authorization_header",
"(",
")",
"if",
"not",
"header",
"or",
"not",
"header",
".",
"split",
":",
"return",
"auth",
"header",
"=",
"header",
".",
"split",
"(",
")",
"if",
"len",
"(",
"header",
")",
">",
"1",
"and",
"header",
"[",
"0",
"]",
"==",
"'Bearer'",
":",
"auth",
".",
"is_oauth",
"=",
"True",
"access_token",
"=",
"header",
"[",
"1",
"]",
"self",
".",
"validate_access_token",
"(",
"access_token",
",",
"auth",
")",
"if",
"not",
"auth",
".",
"is_valid",
":",
"auth",
".",
"error",
"=",
"'access_denied'",
"return",
"auth"
] | Get authorization object representing status of authentication. | [
"Get",
"authorization",
"object",
"representing",
"status",
"of",
"authentication",
"."
] | python | test |
trombastic/PyScada | pyscada/utils/scheduler.py | https://github.com/trombastic/PyScada/blob/c5fc348a25f0df1340336f694ee9bc1aea62516a/pyscada/utils/scheduler.py#L142-L207 | def demonize(self):
"""
do the double fork magic
"""
# check if a process is already running
if access(self.pid_file_name, F_OK):
# read the pid file
pid = self.read_pid()
try:
kill(pid, 0) # check if process is running
self.stderr.write("process is already running\n")
return False
except OSError as e:
if e.errno == errno.ESRCH:
# process is dead
self.delete_pid(force_del=True)
else:
self.stderr.write("demonize failed, something went wrong: %d (%s)\n" % (e.errno, e.strerror))
return False
try:
pid = fork()
if pid > 0:
# Exit from the first parent
timeout = time() + 60
while self.read_pid() is None:
self.stderr.write("waiting for pid..\n")
sleep(0.5)
if time() > timeout:
break
self.stderr.write("pid is %d\n" % self.read_pid())
sys.exit(0)
except OSError as e:
self.stderr.write("demonize failed in 1. Fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
# os.chdir("/")
setsid()
umask(0)
# Do the Second fork
try:
pid = fork()
if pid > 0:
# Exit from the second parent
sys.exit(0)
except OSError as e:
self.stderr.write("demonize failed in 2. Fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Redirect standard file descriptors
# sys.stdout.flush()
# sys.stderr.flush()
# si = file(self.stdin, 'r')
# so = file(self.stdout, 'a+')
# se = file(self.stderr, 'a+',
# os.dup2(si.fileno(), sys.stdin.fileno())
# os.dup2(so.fileno(), sys.stdout.fileno())
# os.dup2(se.fileno(), sys.stderr.fileno())
# Write the PID file
#atexit.register(self.delete_pid)
self.write_pid()
return True | [
"def",
"demonize",
"(",
"self",
")",
":",
"# check if a process is already running",
"if",
"access",
"(",
"self",
".",
"pid_file_name",
",",
"F_OK",
")",
":",
"# read the pid file",
"pid",
"=",
"self",
".",
"read_pid",
"(",
")",
"try",
":",
"kill",
"(",
"pid",
",",
"0",
")",
"# check if process is running",
"self",
".",
"stderr",
".",
"write",
"(",
"\"process is already running\\n\"",
")",
"return",
"False",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ESRCH",
":",
"# process is dead",
"self",
".",
"delete_pid",
"(",
"force_del",
"=",
"True",
")",
"else",
":",
"self",
".",
"stderr",
".",
"write",
"(",
"\"demonize failed, something went wrong: %d (%s)\\n\"",
"%",
"(",
"e",
".",
"errno",
",",
"e",
".",
"strerror",
")",
")",
"return",
"False",
"try",
":",
"pid",
"=",
"fork",
"(",
")",
"if",
"pid",
">",
"0",
":",
"# Exit from the first parent",
"timeout",
"=",
"time",
"(",
")",
"+",
"60",
"while",
"self",
".",
"read_pid",
"(",
")",
"is",
"None",
":",
"self",
".",
"stderr",
".",
"write",
"(",
"\"waiting for pid..\\n\"",
")",
"sleep",
"(",
"0.5",
")",
"if",
"time",
"(",
")",
">",
"timeout",
":",
"break",
"self",
".",
"stderr",
".",
"write",
"(",
"\"pid is %d\\n\"",
"%",
"self",
".",
"read_pid",
"(",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"except",
"OSError",
"as",
"e",
":",
"self",
".",
"stderr",
".",
"write",
"(",
"\"demonize failed in 1. Fork: %d (%s)\\n\"",
"%",
"(",
"e",
".",
"errno",
",",
"e",
".",
"strerror",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# Decouple from parent environment",
"# os.chdir(\"/\")",
"setsid",
"(",
")",
"umask",
"(",
"0",
")",
"# Do the Second fork",
"try",
":",
"pid",
"=",
"fork",
"(",
")",
"if",
"pid",
">",
"0",
":",
"# Exit from the second parent",
"sys",
".",
"exit",
"(",
"0",
")",
"except",
"OSError",
"as",
"e",
":",
"self",
".",
"stderr",
".",
"write",
"(",
"\"demonize failed in 2. Fork: %d (%s)\\n\"",
"%",
"(",
"e",
".",
"errno",
",",
"e",
".",
"strerror",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# Redirect standard file descriptors",
"# sys.stdout.flush()",
"# sys.stderr.flush()",
"# si = file(self.stdin, 'r')",
"# so = file(self.stdout, 'a+')",
"# se = file(self.stderr, 'a+',",
"# os.dup2(si.fileno(), sys.stdin.fileno())",
"# os.dup2(so.fileno(), sys.stdout.fileno())",
"# os.dup2(se.fileno(), sys.stderr.fileno())",
"# Write the PID file",
"#atexit.register(self.delete_pid)",
"self",
".",
"write_pid",
"(",
")",
"return",
"True"
] | do the double fork magic | [
"do",
"the",
"double",
"fork",
"magic"
] | python | train |
Kozea/pygal | pygal/graph/histogram.py | https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/histogram.py#L64-L88 | def _bar(self, serie, parent, x0, x1, y, i, zero, secondary=False):
"""Internal bar drawing function"""
x, y = self.view((x0, y))
x1, _ = self.view((x1, y))
width = x1 - x
height = self.view.y(zero) - y
series_margin = width * self._series_margin
x += series_margin
width -= 2 * series_margin
r = serie.rounded_bars * 1 if serie.rounded_bars else 0
alter(
self.svg.transposable_node(
parent,
'rect',
x=x,
y=y,
rx=r,
ry=r,
width=width,
height=height,
class_='rect reactive tooltip-trigger'
), serie.metadata.get(i)
)
return x, y, width, height | [
"def",
"_bar",
"(",
"self",
",",
"serie",
",",
"parent",
",",
"x0",
",",
"x1",
",",
"y",
",",
"i",
",",
"zero",
",",
"secondary",
"=",
"False",
")",
":",
"x",
",",
"y",
"=",
"self",
".",
"view",
"(",
"(",
"x0",
",",
"y",
")",
")",
"x1",
",",
"_",
"=",
"self",
".",
"view",
"(",
"(",
"x1",
",",
"y",
")",
")",
"width",
"=",
"x1",
"-",
"x",
"height",
"=",
"self",
".",
"view",
".",
"y",
"(",
"zero",
")",
"-",
"y",
"series_margin",
"=",
"width",
"*",
"self",
".",
"_series_margin",
"x",
"+=",
"series_margin",
"width",
"-=",
"2",
"*",
"series_margin",
"r",
"=",
"serie",
".",
"rounded_bars",
"*",
"1",
"if",
"serie",
".",
"rounded_bars",
"else",
"0",
"alter",
"(",
"self",
".",
"svg",
".",
"transposable_node",
"(",
"parent",
",",
"'rect'",
",",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"rx",
"=",
"r",
",",
"ry",
"=",
"r",
",",
"width",
"=",
"width",
",",
"height",
"=",
"height",
",",
"class_",
"=",
"'rect reactive tooltip-trigger'",
")",
",",
"serie",
".",
"metadata",
".",
"get",
"(",
"i",
")",
")",
"return",
"x",
",",
"y",
",",
"width",
",",
"height"
] | Internal bar drawing function | [
"Internal",
"bar",
"drawing",
"function"
] | python | train |
lancekrogers/edgerdb | edgerdb/helper_functions.py | https://github.com/lancekrogers/edgerdb/blob/ed6f37af40f95588db94ba27a5a27d73da59e485/edgerdb/helper_functions.py#L165-L183 | def retrieve_document(file_path, directory='sec_filings'):
'''
This function takes a file path beginning with edgar and stores the form in a directory.
The default directory is sec_filings but can be changed through a keyword argument.
'''
ftp = FTP('ftp.sec.gov', timeout=None)
ftp.login()
name = file_path.replace('/', '_')
if not os.path.exists(directory):
os.makedirs(directory)
with tempfile.TemporaryFile() as temp:
ftp.retrbinary('RETR %s' % file_path, temp.write)
temp.seek(0)
with open('{}/{}'.format(directory, name), 'w+') as f:
f.write(temp.read().decode("utf-8"))
f.closed
records = temp
retry = False
ftp.close() | [
"def",
"retrieve_document",
"(",
"file_path",
",",
"directory",
"=",
"'sec_filings'",
")",
":",
"ftp",
"=",
"FTP",
"(",
"'ftp.sec.gov'",
",",
"timeout",
"=",
"None",
")",
"ftp",
".",
"login",
"(",
")",
"name",
"=",
"file_path",
".",
"replace",
"(",
"'/'",
",",
"'_'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"directory",
")",
":",
"os",
".",
"makedirs",
"(",
"directory",
")",
"with",
"tempfile",
".",
"TemporaryFile",
"(",
")",
"as",
"temp",
":",
"ftp",
".",
"retrbinary",
"(",
"'RETR %s'",
"%",
"file_path",
",",
"temp",
".",
"write",
")",
"temp",
".",
"seek",
"(",
"0",
")",
"with",
"open",
"(",
"'{}/{}'",
".",
"format",
"(",
"directory",
",",
"name",
")",
",",
"'w+'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"temp",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"f",
".",
"closed",
"records",
"=",
"temp",
"retry",
"=",
"False",
"ftp",
".",
"close",
"(",
")"
] | This function takes a file path beginning with edgar and stores the form in a directory.
The default directory is sec_filings but can be changed through a keyword argument. | [
"This",
"function",
"takes",
"a",
"file",
"path",
"beginning",
"with",
"edgar",
"and",
"stores",
"the",
"form",
"in",
"a",
"directory",
".",
"The",
"default",
"directory",
"is",
"sec_filings",
"but",
"can",
"be",
"changed",
"through",
"a",
"keyword",
"argument",
"."
] | python | valid |
saltstack/salt | salt/modules/bridge.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bridge.py#L103-L109 | def _linux_brdel(br):
'''
Internal, deletes the bridge
'''
brctl = _tool_path('brctl')
return __salt__['cmd.run']('{0} delbr {1}'.format(brctl, br),
python_shell=False) | [
"def",
"_linux_brdel",
"(",
"br",
")",
":",
"brctl",
"=",
"_tool_path",
"(",
"'brctl'",
")",
"return",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'{0} delbr {1}'",
".",
"format",
"(",
"brctl",
",",
"br",
")",
",",
"python_shell",
"=",
"False",
")"
] | Internal, deletes the bridge | [
"Internal",
"deletes",
"the",
"bridge"
] | python | train |
firecat53/urlscan | urlscan/urlchoose.py | https://github.com/firecat53/urlscan/blob/2d10807d01167873733da3b478c784f8fa21bbc0/urlscan/urlchoose.py#L357-L371 | def _digits(self):
""" 0-9 """
self.number += self.key
try:
if self.compact is False:
self.top.body.focus_position = \
self.items.index(self.items_com[max(int(self.number) - 1, 0)])
else:
self.top.body.focus_position = \
self.items.index(self.items[max(int(self.number) - 1, 0)])
except IndexError:
self.number = self.number[:-1]
self.top.keypress(self.size, "") # Trick urwid into redisplaying the cursor
if self.number:
self._footer_start_thread("Selection: {}".format(self.number), 1) | [
"def",
"_digits",
"(",
"self",
")",
":",
"self",
".",
"number",
"+=",
"self",
".",
"key",
"try",
":",
"if",
"self",
".",
"compact",
"is",
"False",
":",
"self",
".",
"top",
".",
"body",
".",
"focus_position",
"=",
"self",
".",
"items",
".",
"index",
"(",
"self",
".",
"items_com",
"[",
"max",
"(",
"int",
"(",
"self",
".",
"number",
")",
"-",
"1",
",",
"0",
")",
"]",
")",
"else",
":",
"self",
".",
"top",
".",
"body",
".",
"focus_position",
"=",
"self",
".",
"items",
".",
"index",
"(",
"self",
".",
"items",
"[",
"max",
"(",
"int",
"(",
"self",
".",
"number",
")",
"-",
"1",
",",
"0",
")",
"]",
")",
"except",
"IndexError",
":",
"self",
".",
"number",
"=",
"self",
".",
"number",
"[",
":",
"-",
"1",
"]",
"self",
".",
"top",
".",
"keypress",
"(",
"self",
".",
"size",
",",
"\"\"",
")",
"# Trick urwid into redisplaying the cursor",
"if",
"self",
".",
"number",
":",
"self",
".",
"_footer_start_thread",
"(",
"\"Selection: {}\"",
".",
"format",
"(",
"self",
".",
"number",
")",
",",
"1",
")"
] | 0-9 | [
"0",
"-",
"9"
] | python | train |
MacHu-GWU/rolex-project | rolex/math.py | https://github.com/MacHu-GWU/rolex-project/blob/a1111b410ed04b4b6eddd81df110fa2dacfa6537/rolex/math.py#L135-L165 | def add_years(datetime_like_object, n, return_date=False):
"""
Returns a time that n years after a time.
:param datetimestr: a datetime object or a datetime str
:param n: number of years, value can be negative
:param return_date: returns a date object instead of datetime
**中文文档**
返回给定日期N年之后的时间。
"""
a_datetime = parser.parse_datetime(datetime_like_object)
# try assign year, month, day
try:
a_datetime = datetime(
a_datetime.year + n, a_datetime.month, a_datetime.day,
a_datetime.hour, a_datetime.minute, a_datetime.second,
a_datetime.microsecond, tzinfo=a_datetime.tzinfo,
)
except ValueError: # Must be xxxx-02-29
a_datetime = datetime(
a_datetime.year + n, 2, 28,
a_datetime.hour, a_datetime.minute,
a_datetime.second, a_datetime.microsecond)
if return_date: # pragma: no cover
return a_datetime.date()
else:
return a_datetime | [
"def",
"add_years",
"(",
"datetime_like_object",
",",
"n",
",",
"return_date",
"=",
"False",
")",
":",
"a_datetime",
"=",
"parser",
".",
"parse_datetime",
"(",
"datetime_like_object",
")",
"# try assign year, month, day",
"try",
":",
"a_datetime",
"=",
"datetime",
"(",
"a_datetime",
".",
"year",
"+",
"n",
",",
"a_datetime",
".",
"month",
",",
"a_datetime",
".",
"day",
",",
"a_datetime",
".",
"hour",
",",
"a_datetime",
".",
"minute",
",",
"a_datetime",
".",
"second",
",",
"a_datetime",
".",
"microsecond",
",",
"tzinfo",
"=",
"a_datetime",
".",
"tzinfo",
",",
")",
"except",
"ValueError",
":",
"# Must be xxxx-02-29",
"a_datetime",
"=",
"datetime",
"(",
"a_datetime",
".",
"year",
"+",
"n",
",",
"2",
",",
"28",
",",
"a_datetime",
".",
"hour",
",",
"a_datetime",
".",
"minute",
",",
"a_datetime",
".",
"second",
",",
"a_datetime",
".",
"microsecond",
")",
"if",
"return_date",
":",
"# pragma: no cover",
"return",
"a_datetime",
".",
"date",
"(",
")",
"else",
":",
"return",
"a_datetime"
] | Returns a time that n years after a time.
:param datetimestr: a datetime object or a datetime str
:param n: number of years, value can be negative
:param return_date: returns a date object instead of datetime
**中文文档**
返回给定日期N年之后的时间。 | [
"Returns",
"a",
"time",
"that",
"n",
"years",
"after",
"a",
"time",
"."
] | python | train |
vertexproject/synapse | synapse/lib/task.py | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/task.py#L130-L141 | async def executor(func, *args, **kwargs):
'''
Execute a function in an executor thread.
Args:
todo ((func,args,kwargs)): A todo tuple.
'''
def syncfunc():
return func(*args, **kwargs)
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, syncfunc) | [
"async",
"def",
"executor",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"syncfunc",
"(",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"loop",
"=",
"asyncio",
".",
"get_running_loop",
"(",
")",
"return",
"await",
"loop",
".",
"run_in_executor",
"(",
"None",
",",
"syncfunc",
")"
] | Execute a function in an executor thread.
Args:
todo ((func,args,kwargs)): A todo tuple. | [
"Execute",
"a",
"function",
"in",
"an",
"executor",
"thread",
"."
] | python | train |
manns/pyspread | pyspread/src/gui/_gui_interfaces.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_gui_interfaces.py#L189-L208 | def get_print_setup(self, print_data):
"""Opens print setup dialog and returns print_data"""
psd = wx.PageSetupDialogData(print_data)
# psd.EnablePrinter(False)
psd.CalculatePaperSizeFromId()
dlg = wx.PageSetupDialog(self.main_window, psd)
dlg.ShowModal()
# this makes a copy of the wx.PrintData instead of just saving
# a reference to the one inside the PrintDialogData that will
# be destroyed when the dialog is destroyed
data = dlg.GetPageSetupData()
new_print_data = wx.PrintData(data.GetPrintData())
new_print_data.PaperId = data.PaperId
new_print_data.PaperSize = data.PaperSize
dlg.Destroy()
return new_print_data | [
"def",
"get_print_setup",
"(",
"self",
",",
"print_data",
")",
":",
"psd",
"=",
"wx",
".",
"PageSetupDialogData",
"(",
"print_data",
")",
"# psd.EnablePrinter(False)",
"psd",
".",
"CalculatePaperSizeFromId",
"(",
")",
"dlg",
"=",
"wx",
".",
"PageSetupDialog",
"(",
"self",
".",
"main_window",
",",
"psd",
")",
"dlg",
".",
"ShowModal",
"(",
")",
"# this makes a copy of the wx.PrintData instead of just saving",
"# a reference to the one inside the PrintDialogData that will",
"# be destroyed when the dialog is destroyed",
"data",
"=",
"dlg",
".",
"GetPageSetupData",
"(",
")",
"new_print_data",
"=",
"wx",
".",
"PrintData",
"(",
"data",
".",
"GetPrintData",
"(",
")",
")",
"new_print_data",
".",
"PaperId",
"=",
"data",
".",
"PaperId",
"new_print_data",
".",
"PaperSize",
"=",
"data",
".",
"PaperSize",
"dlg",
".",
"Destroy",
"(",
")",
"return",
"new_print_data"
] | Opens print setup dialog and returns print_data | [
"Opens",
"print",
"setup",
"dialog",
"and",
"returns",
"print_data"
] | python | train |
Nukesor/pueue | pueue/daemon/daemon.py | https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/daemon/daemon.py#L462-L486 | def stash(self, payload):
"""Stash the specified processes."""
succeeded = []
failed = []
for key in payload['keys']:
if self.queue.get(key) is not None:
if self.queue[key]['status'] == 'queued':
self.queue[key]['status'] = 'stashed'
succeeded.append(str(key))
else:
failed.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Stashed entries: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo queued entry for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
return answer | [
"def",
"stash",
"(",
"self",
",",
"payload",
")",
":",
"succeeded",
"=",
"[",
"]",
"failed",
"=",
"[",
"]",
"for",
"key",
"in",
"payload",
"[",
"'keys'",
"]",
":",
"if",
"self",
".",
"queue",
".",
"get",
"(",
"key",
")",
"is",
"not",
"None",
":",
"if",
"self",
".",
"queue",
"[",
"key",
"]",
"[",
"'status'",
"]",
"==",
"'queued'",
":",
"self",
".",
"queue",
"[",
"key",
"]",
"[",
"'status'",
"]",
"=",
"'stashed'",
"succeeded",
".",
"append",
"(",
"str",
"(",
"key",
")",
")",
"else",
":",
"failed",
".",
"append",
"(",
"str",
"(",
"key",
")",
")",
"else",
":",
"failed",
".",
"append",
"(",
"str",
"(",
"key",
")",
")",
"message",
"=",
"''",
"if",
"len",
"(",
"succeeded",
")",
">",
"0",
":",
"message",
"+=",
"'Stashed entries: {}.'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"succeeded",
")",
")",
"status",
"=",
"'success'",
"if",
"len",
"(",
"failed",
")",
">",
"0",
":",
"message",
"+=",
"'\\nNo queued entry for keys: {}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"failed",
")",
")",
"status",
"=",
"'error'",
"answer",
"=",
"{",
"'message'",
":",
"message",
".",
"strip",
"(",
")",
",",
"'status'",
":",
"status",
"}",
"return",
"answer"
] | Stash the specified processes. | [
"Stash",
"the",
"specified",
"processes",
"."
] | python | train |
LettError/ufoProcessor | Lib/ufoProcessor/__init__.py | https://github.com/LettError/ufoProcessor/blob/7c63e1c8aba2f2ef9b12edb6560aa6c58024a89a/Lib/ufoProcessor/__init__.py#L831-L848 | def _instantiateFont(self, path):
""" Return a instance of a font object with all the given subclasses"""
try:
return self.fontClass(path,
layerClass=self.layerClass,
libClass=self.libClass,
kerningClass=self.kerningClass,
groupsClass=self.groupsClass,
infoClass=self.infoClass,
featuresClass=self.featuresClass,
glyphClass=self.glyphClass,
glyphContourClass=self.glyphContourClass,
glyphPointClass=self.glyphPointClass,
glyphComponentClass=self.glyphComponentClass,
glyphAnchorClass=self.glyphAnchorClass)
except TypeError:
# if our fontClass doesnt support all the additional classes
return self.fontClass(path) | [
"def",
"_instantiateFont",
"(",
"self",
",",
"path",
")",
":",
"try",
":",
"return",
"self",
".",
"fontClass",
"(",
"path",
",",
"layerClass",
"=",
"self",
".",
"layerClass",
",",
"libClass",
"=",
"self",
".",
"libClass",
",",
"kerningClass",
"=",
"self",
".",
"kerningClass",
",",
"groupsClass",
"=",
"self",
".",
"groupsClass",
",",
"infoClass",
"=",
"self",
".",
"infoClass",
",",
"featuresClass",
"=",
"self",
".",
"featuresClass",
",",
"glyphClass",
"=",
"self",
".",
"glyphClass",
",",
"glyphContourClass",
"=",
"self",
".",
"glyphContourClass",
",",
"glyphPointClass",
"=",
"self",
".",
"glyphPointClass",
",",
"glyphComponentClass",
"=",
"self",
".",
"glyphComponentClass",
",",
"glyphAnchorClass",
"=",
"self",
".",
"glyphAnchorClass",
")",
"except",
"TypeError",
":",
"# if our fontClass doesnt support all the additional classes",
"return",
"self",
".",
"fontClass",
"(",
"path",
")"
] | Return a instance of a font object with all the given subclasses | [
"Return",
"a",
"instance",
"of",
"a",
"font",
"object",
"with",
"all",
"the",
"given",
"subclasses"
] | python | train |
moluwole/Bast | bast/validator/rules.py | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/validator/rules.py#L159-L170 | def run(self, value):
""" Determines if value character length equal self.length.
Keyword arguments:
value str -- the value of the associated field to compare
"""
if self.pass_ and not value.strip():
return True
if len((value.strip() if self.strip else value)) != self.length:
self.error = self.error.format(value, self.length)
return False
return True | [
"def",
"run",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"pass_",
"and",
"not",
"value",
".",
"strip",
"(",
")",
":",
"return",
"True",
"if",
"len",
"(",
"(",
"value",
".",
"strip",
"(",
")",
"if",
"self",
".",
"strip",
"else",
"value",
")",
")",
"!=",
"self",
".",
"length",
":",
"self",
".",
"error",
"=",
"self",
".",
"error",
".",
"format",
"(",
"value",
",",
"self",
".",
"length",
")",
"return",
"False",
"return",
"True"
] | Determines if value character length equal self.length.
Keyword arguments:
value str -- the value of the associated field to compare | [
"Determines",
"if",
"value",
"character",
"length",
"equal",
"self",
".",
"length",
".",
"Keyword",
"arguments",
":",
"value",
"str",
"--",
"the",
"value",
"of",
"the",
"associated",
"field",
"to",
"compare"
] | python | train |
peri-source/peri | peri/runner.py | https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/runner.py#L35-L96 | def locate_spheres(image, feature_rad, dofilter=False, order=(3 ,3, 3),
trim_edge=True, **kwargs):
"""
Get an initial featuring of sphere positions in an image.
Parameters
-----------
image : :class:`peri.util.Image` object
Image object which defines the image file as well as the region.
feature_rad : float
Radius of objects to find, in pixels. This is a featuring radius
and not a real radius, so a better value is frequently smaller
than the real radius (half the actual radius is good). If ``use_tp``
is True, then the twice ``feature_rad`` is passed as trackpy's
``diameter`` keyword.
dofilter : boolean, optional
Whether to remove the background before featuring. Doing so can
often greatly increase the success of initial featuring and
decrease later optimization time. Filtering functions by fitting
the image to a low-order polynomial and featuring the residuals.
In doing so, this will change the mean intensity of the featured
image and hence the good value of ``minmass`` will change when
``dofilter`` is True. Default is False.
order : 3-element tuple, optional
If `dofilter`, the 2+1D Leg Poly approximation to the background
illumination field. Default is (3,3,3).
Other Parameters
----------------
invert : boolean, optional
Whether to invert the image for featuring. Set to True if the
image is dark particles on a bright background. Default is True
minmass : Float or None, optional
The minimum mass/masscut of a particle. Default is None, which
calculates internally.
use_tp : Bool, optional
Whether or not to use trackpy. Default is False, since trackpy
cuts out particles at the edge.
Returns
--------
positions : np.ndarray [N,3]
Positions of the particles in order (z,y,x) in image pixel units.
Notes
-----
Optionally filters the image by fitting the image I(x,y,z) to a
polynomial, then subtracts this fitted intensity variation and uses
centroid methods to find the particles.
"""
# We just want a smoothed field model of the image so that the residuals
# are simply the particles without other complications
m = models.SmoothFieldModel()
I = ilms.LegendrePoly2P1D(order=order, constval=image.get_image().mean())
s = states.ImageState(image, [I], pad=0, mdl=m)
if dofilter:
opt.do_levmarq(s, s.params)
pos = addsub.feature_guess(s, feature_rad, trim_edge=trim_edge, **kwargs)[0]
return pos | [
"def",
"locate_spheres",
"(",
"image",
",",
"feature_rad",
",",
"dofilter",
"=",
"False",
",",
"order",
"=",
"(",
"3",
",",
"3",
",",
"3",
")",
",",
"trim_edge",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# We just want a smoothed field model of the image so that the residuals",
"# are simply the particles without other complications",
"m",
"=",
"models",
".",
"SmoothFieldModel",
"(",
")",
"I",
"=",
"ilms",
".",
"LegendrePoly2P1D",
"(",
"order",
"=",
"order",
",",
"constval",
"=",
"image",
".",
"get_image",
"(",
")",
".",
"mean",
"(",
")",
")",
"s",
"=",
"states",
".",
"ImageState",
"(",
"image",
",",
"[",
"I",
"]",
",",
"pad",
"=",
"0",
",",
"mdl",
"=",
"m",
")",
"if",
"dofilter",
":",
"opt",
".",
"do_levmarq",
"(",
"s",
",",
"s",
".",
"params",
")",
"pos",
"=",
"addsub",
".",
"feature_guess",
"(",
"s",
",",
"feature_rad",
",",
"trim_edge",
"=",
"trim_edge",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"return",
"pos"
] | Get an initial featuring of sphere positions in an image.
Parameters
-----------
image : :class:`peri.util.Image` object
Image object which defines the image file as well as the region.
feature_rad : float
Radius of objects to find, in pixels. This is a featuring radius
and not a real radius, so a better value is frequently smaller
than the real radius (half the actual radius is good). If ``use_tp``
is True, then the twice ``feature_rad`` is passed as trackpy's
``diameter`` keyword.
dofilter : boolean, optional
Whether to remove the background before featuring. Doing so can
often greatly increase the success of initial featuring and
decrease later optimization time. Filtering functions by fitting
the image to a low-order polynomial and featuring the residuals.
In doing so, this will change the mean intensity of the featured
image and hence the good value of ``minmass`` will change when
``dofilter`` is True. Default is False.
order : 3-element tuple, optional
If `dofilter`, the 2+1D Leg Poly approximation to the background
illumination field. Default is (3,3,3).
Other Parameters
----------------
invert : boolean, optional
Whether to invert the image for featuring. Set to True if the
image is dark particles on a bright background. Default is True
minmass : Float or None, optional
The minimum mass/masscut of a particle. Default is None, which
calculates internally.
use_tp : Bool, optional
Whether or not to use trackpy. Default is False, since trackpy
cuts out particles at the edge.
Returns
--------
positions : np.ndarray [N,3]
Positions of the particles in order (z,y,x) in image pixel units.
Notes
-----
Optionally filters the image by fitting the image I(x,y,z) to a
polynomial, then subtracts this fitted intensity variation and uses
centroid methods to find the particles. | [
"Get",
"an",
"initial",
"featuring",
"of",
"sphere",
"positions",
"in",
"an",
"image",
"."
] | python | valid |
google/grr | grr/server/grr_response_server/gui/http_api.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/http_api.py#L370-L396 | def _BuildStreamingResponse(self, binary_stream, method_name=None):
"""Builds HTTPResponse object for streaming."""
precondition.AssertType(method_name, Text)
# We get a first chunk of the output stream. This way the likelihood
# of catching an exception that may happen during response generation
# is much higher.
content = binary_stream.GenerateContent()
try:
peek = content.next()
stream = itertools.chain([peek], content)
except StopIteration:
stream = []
response = werkzeug_wrappers.Response(
response=stream,
content_type="binary/octet-stream",
direct_passthrough=True)
response.headers["Content-Disposition"] = ((
"attachment; filename=%s" % binary_stream.filename).encode("utf-8"))
if method_name:
response.headers["X-API-Method"] = method_name.encode("utf-8")
if binary_stream.content_length:
response.content_length = binary_stream.content_length
return response | [
"def",
"_BuildStreamingResponse",
"(",
"self",
",",
"binary_stream",
",",
"method_name",
"=",
"None",
")",
":",
"precondition",
".",
"AssertType",
"(",
"method_name",
",",
"Text",
")",
"# We get a first chunk of the output stream. This way the likelihood",
"# of catching an exception that may happen during response generation",
"# is much higher.",
"content",
"=",
"binary_stream",
".",
"GenerateContent",
"(",
")",
"try",
":",
"peek",
"=",
"content",
".",
"next",
"(",
")",
"stream",
"=",
"itertools",
".",
"chain",
"(",
"[",
"peek",
"]",
",",
"content",
")",
"except",
"StopIteration",
":",
"stream",
"=",
"[",
"]",
"response",
"=",
"werkzeug_wrappers",
".",
"Response",
"(",
"response",
"=",
"stream",
",",
"content_type",
"=",
"\"binary/octet-stream\"",
",",
"direct_passthrough",
"=",
"True",
")",
"response",
".",
"headers",
"[",
"\"Content-Disposition\"",
"]",
"=",
"(",
"(",
"\"attachment; filename=%s\"",
"%",
"binary_stream",
".",
"filename",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"if",
"method_name",
":",
"response",
".",
"headers",
"[",
"\"X-API-Method\"",
"]",
"=",
"method_name",
".",
"encode",
"(",
"\"utf-8\"",
")",
"if",
"binary_stream",
".",
"content_length",
":",
"response",
".",
"content_length",
"=",
"binary_stream",
".",
"content_length",
"return",
"response"
] | Builds HTTPResponse object for streaming. | [
"Builds",
"HTTPResponse",
"object",
"for",
"streaming",
"."
] | python | train |
wavefrontHQ/python-client | wavefront_api_client/api/event_api.py | https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/event_api.py#L626-L646 | def get_event_tags(self, id, **kwargs): # noqa: E501
"""Get all tags associated with a specific event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_event_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerTagsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_event_tags_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_event_tags_with_http_info(id, **kwargs) # noqa: E501
return data | [
"def",
"get_event_tags",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"get_event_tags_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_event_tags_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | Get all tags associated with a specific event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_event_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerTagsResponse
If the method is called asynchronously,
returns the request thread. | [
"Get",
"all",
"tags",
"associated",
"with",
"a",
"specific",
"event",
"#",
"noqa",
":",
"E501"
] | python | train |
rbarrois/confutils | confutils/configfile.py | https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L397-L402 | def handle_line(self, line):
"""Read one line."""
if line.kind == ConfigLine.KIND_HEADER:
self.enter_block(line.header)
else:
self.insert_line(line) | [
"def",
"handle_line",
"(",
"self",
",",
"line",
")",
":",
"if",
"line",
".",
"kind",
"==",
"ConfigLine",
".",
"KIND_HEADER",
":",
"self",
".",
"enter_block",
"(",
"line",
".",
"header",
")",
"else",
":",
"self",
".",
"insert_line",
"(",
"line",
")"
] | Read one line. | [
"Read",
"one",
"line",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/gui/helpers/state.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/helpers/state.py#L147-L230 | def create_new_state_from_state_with_type(source_state, target_state_class):
"""The function duplicates/transforms a state to a new state type. If the source state type and the new state
type both are ContainerStates the new state will have not transitions to force the user to explicitly re-order
the logical flow according the paradigm of the new state type.
:param source_state: previous/original state that is to transform into a new state type (target_state_class)
:param target_state_class: the final state class type
:return:
"""
current_state_is_container = isinstance(source_state, ContainerState)
new_state_is_container = issubclass(target_state_class, ContainerState)
if current_state_is_container and new_state_is_container: # TRANSFORM from CONTAINER- TO CONTAINER-STATE
# by default all transitions are left out if the new and original state are container states
# -> because switch from Barrier, Preemptive or Hierarchy has always different rules
state_transitions = {}
state_start_state_id = None
logger.info("Type change from %s to %s" % (type(source_state).__name__, target_state_class.__name__))
# decider state is removed because it is unique for BarrierConcurrencyState
if isinstance(source_state, BarrierConcurrencyState):
source_state.remove_state(UNIQUE_DECIDER_STATE_ID, force=True)
assert UNIQUE_DECIDER_STATE_ID not in source_state.states
# separate state-elements from source state
data_flows = dict(source_state.data_flows)
source_state.data_flows = {}
input_data_ports = dict(source_state.input_data_ports)
output_data_ports = dict(source_state.output_data_ports)
scoped_variables = dict(source_state.scoped_variables)
income = source_state.income
outcomes = dict(source_state.outcomes)
source_state.input_data_ports = {}
source_state.output_data_ports = {}
source_state.scoped_variables = {}
source_state.transitions = {} # before remove of outcomes related transitions should be gone
source_state.income = Income()
source_state.outcomes = {}
states = dict(source_state.states)
# TODO check why next line can not be performed
# source_state.states = {}
new_state = target_state_class(name=source_state.name, state_id=source_state.state_id,
input_data_ports=input_data_ports,
output_data_ports=output_data_ports,
scoped_variables=scoped_variables,
income=income,
outcomes=outcomes,
transitions=state_transitions,
data_flows=data_flows,
states=states,
start_state_id=state_start_state_id)
else: # TRANSFORM from EXECUTION- TO CONTAINER-STATE or FROM CONTAINER- TO EXECUTION-STATE
# in case the new state is an execution state remove of child states (for observable notifications)
if current_state_is_container and issubclass(target_state_class, ExecutionState):
if isinstance(source_state, BarrierConcurrencyState):
source_state.remove_state(UNIQUE_DECIDER_STATE_ID, force=True)
assert UNIQUE_DECIDER_STATE_ID not in source_state.states
for state_id in list(source_state.states.keys()):
source_state.remove_state(state_id)
# separate state-elements from source state
input_data_ports = dict(source_state.input_data_ports)
output_data_ports = dict(source_state.output_data_ports)
income = source_state.income
outcomes = dict(source_state.outcomes)
source_state.input_data_ports = {}
source_state.output_data_ports = {}
source_state.income = Income()
source_state.outcomes = {}
new_state = target_state_class(name=source_state.name, state_id=source_state.state_id,
input_data_ports=input_data_ports,
output_data_ports=output_data_ports,
income=income, outcomes=outcomes)
if source_state.description is not None and len(source_state.description) > 0:
new_state.description = source_state.description
new_state.semantic_data = Vividict(source_state.semantic_data)
return new_state | [
"def",
"create_new_state_from_state_with_type",
"(",
"source_state",
",",
"target_state_class",
")",
":",
"current_state_is_container",
"=",
"isinstance",
"(",
"source_state",
",",
"ContainerState",
")",
"new_state_is_container",
"=",
"issubclass",
"(",
"target_state_class",
",",
"ContainerState",
")",
"if",
"current_state_is_container",
"and",
"new_state_is_container",
":",
"# TRANSFORM from CONTAINER- TO CONTAINER-STATE",
"# by default all transitions are left out if the new and original state are container states",
"# -> because switch from Barrier, Preemptive or Hierarchy has always different rules",
"state_transitions",
"=",
"{",
"}",
"state_start_state_id",
"=",
"None",
"logger",
".",
"info",
"(",
"\"Type change from %s to %s\"",
"%",
"(",
"type",
"(",
"source_state",
")",
".",
"__name__",
",",
"target_state_class",
".",
"__name__",
")",
")",
"# decider state is removed because it is unique for BarrierConcurrencyState",
"if",
"isinstance",
"(",
"source_state",
",",
"BarrierConcurrencyState",
")",
":",
"source_state",
".",
"remove_state",
"(",
"UNIQUE_DECIDER_STATE_ID",
",",
"force",
"=",
"True",
")",
"assert",
"UNIQUE_DECIDER_STATE_ID",
"not",
"in",
"source_state",
".",
"states",
"# separate state-elements from source state",
"data_flows",
"=",
"dict",
"(",
"source_state",
".",
"data_flows",
")",
"source_state",
".",
"data_flows",
"=",
"{",
"}",
"input_data_ports",
"=",
"dict",
"(",
"source_state",
".",
"input_data_ports",
")",
"output_data_ports",
"=",
"dict",
"(",
"source_state",
".",
"output_data_ports",
")",
"scoped_variables",
"=",
"dict",
"(",
"source_state",
".",
"scoped_variables",
")",
"income",
"=",
"source_state",
".",
"income",
"outcomes",
"=",
"dict",
"(",
"source_state",
".",
"outcomes",
")",
"source_state",
".",
"input_data_ports",
"=",
"{",
"}",
"source_state",
".",
"output_data_ports",
"=",
"{",
"}",
"source_state",
".",
"scoped_variables",
"=",
"{",
"}",
"source_state",
".",
"transitions",
"=",
"{",
"}",
"# before remove of outcomes related transitions should be gone",
"source_state",
".",
"income",
"=",
"Income",
"(",
")",
"source_state",
".",
"outcomes",
"=",
"{",
"}",
"states",
"=",
"dict",
"(",
"source_state",
".",
"states",
")",
"# TODO check why next line can not be performed",
"# source_state.states = {}",
"new_state",
"=",
"target_state_class",
"(",
"name",
"=",
"source_state",
".",
"name",
",",
"state_id",
"=",
"source_state",
".",
"state_id",
",",
"input_data_ports",
"=",
"input_data_ports",
",",
"output_data_ports",
"=",
"output_data_ports",
",",
"scoped_variables",
"=",
"scoped_variables",
",",
"income",
"=",
"income",
",",
"outcomes",
"=",
"outcomes",
",",
"transitions",
"=",
"state_transitions",
",",
"data_flows",
"=",
"data_flows",
",",
"states",
"=",
"states",
",",
"start_state_id",
"=",
"state_start_state_id",
")",
"else",
":",
"# TRANSFORM from EXECUTION- TO CONTAINER-STATE or FROM CONTAINER- TO EXECUTION-STATE",
"# in case the new state is an execution state remove of child states (for observable notifications)",
"if",
"current_state_is_container",
"and",
"issubclass",
"(",
"target_state_class",
",",
"ExecutionState",
")",
":",
"if",
"isinstance",
"(",
"source_state",
",",
"BarrierConcurrencyState",
")",
":",
"source_state",
".",
"remove_state",
"(",
"UNIQUE_DECIDER_STATE_ID",
",",
"force",
"=",
"True",
")",
"assert",
"UNIQUE_DECIDER_STATE_ID",
"not",
"in",
"source_state",
".",
"states",
"for",
"state_id",
"in",
"list",
"(",
"source_state",
".",
"states",
".",
"keys",
"(",
")",
")",
":",
"source_state",
".",
"remove_state",
"(",
"state_id",
")",
"# separate state-elements from source state",
"input_data_ports",
"=",
"dict",
"(",
"source_state",
".",
"input_data_ports",
")",
"output_data_ports",
"=",
"dict",
"(",
"source_state",
".",
"output_data_ports",
")",
"income",
"=",
"source_state",
".",
"income",
"outcomes",
"=",
"dict",
"(",
"source_state",
".",
"outcomes",
")",
"source_state",
".",
"input_data_ports",
"=",
"{",
"}",
"source_state",
".",
"output_data_ports",
"=",
"{",
"}",
"source_state",
".",
"income",
"=",
"Income",
"(",
")",
"source_state",
".",
"outcomes",
"=",
"{",
"}",
"new_state",
"=",
"target_state_class",
"(",
"name",
"=",
"source_state",
".",
"name",
",",
"state_id",
"=",
"source_state",
".",
"state_id",
",",
"input_data_ports",
"=",
"input_data_ports",
",",
"output_data_ports",
"=",
"output_data_ports",
",",
"income",
"=",
"income",
",",
"outcomes",
"=",
"outcomes",
")",
"if",
"source_state",
".",
"description",
"is",
"not",
"None",
"and",
"len",
"(",
"source_state",
".",
"description",
")",
">",
"0",
":",
"new_state",
".",
"description",
"=",
"source_state",
".",
"description",
"new_state",
".",
"semantic_data",
"=",
"Vividict",
"(",
"source_state",
".",
"semantic_data",
")",
"return",
"new_state"
] | The function duplicates/transforms a state to a new state type. If the source state type and the new state
type both are ContainerStates the new state will have not transitions to force the user to explicitly re-order
the logical flow according the paradigm of the new state type.
:param source_state: previous/original state that is to transform into a new state type (target_state_class)
:param target_state_class: the final state class type
:return: | [
"The",
"function",
"duplicates",
"/",
"transforms",
"a",
"state",
"to",
"a",
"new",
"state",
"type",
".",
"If",
"the",
"source",
"state",
"type",
"and",
"the",
"new",
"state",
"type",
"both",
"are",
"ContainerStates",
"the",
"new",
"state",
"will",
"have",
"not",
"transitions",
"to",
"force",
"the",
"user",
"to",
"explicitly",
"re",
"-",
"order",
"the",
"logical",
"flow",
"according",
"the",
"paradigm",
"of",
"the",
"new",
"state",
"type",
"."
] | python | train |
pantsbuild/pants | build-support/bin/check_header_helper.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/build-support/bin/check_header_helper.py#L71-L82 | def check_dir(directory, newly_created_files):
"""Returns list of files that fail the check."""
header_parse_failures = []
for root, dirs, files in os.walk(directory):
for f in files:
if f.endswith('.py') and os.path.basename(f) != '__init__.py':
filename = os.path.join(root, f)
try:
check_header(filename, filename in newly_created_files)
except HeaderCheckFailure as e:
header_parse_failures.append(e.message)
return header_parse_failures | [
"def",
"check_dir",
"(",
"directory",
",",
"newly_created_files",
")",
":",
"header_parse_failures",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"directory",
")",
":",
"for",
"f",
"in",
"files",
":",
"if",
"f",
".",
"endswith",
"(",
"'.py'",
")",
"and",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
"!=",
"'__init__.py'",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
"try",
":",
"check_header",
"(",
"filename",
",",
"filename",
"in",
"newly_created_files",
")",
"except",
"HeaderCheckFailure",
"as",
"e",
":",
"header_parse_failures",
".",
"append",
"(",
"e",
".",
"message",
")",
"return",
"header_parse_failures"
] | Returns list of files that fail the check. | [
"Returns",
"list",
"of",
"files",
"that",
"fail",
"the",
"check",
"."
] | python | train |
dmerejkowsky/replacer | replacer.py | https://github.com/dmerejkowsky/replacer/blob/8dc16f297d0ff3a6ee2fa3c0d77789a6859b0f6a/replacer.py#L110-L133 | def walk_files(args, root, directory, action):
"""
Recusively go do the subdirectories of the directory,
calling the action on each file
"""
for entry in os.listdir(directory):
if is_hidden(args, entry):
continue
if is_excluded_directory(args, entry):
continue
if is_in_default_excludes(entry):
continue
if not is_included(args, entry):
continue
if is_excluded(args, entry, directory):
continue
entry = os.path.join(directory, entry)
if os.path.isdir(entry):
walk_files(args, root, entry, action)
if os.path.isfile(entry):
if is_binary(entry):
continue
action(entry) | [
"def",
"walk_files",
"(",
"args",
",",
"root",
",",
"directory",
",",
"action",
")",
":",
"for",
"entry",
"in",
"os",
".",
"listdir",
"(",
"directory",
")",
":",
"if",
"is_hidden",
"(",
"args",
",",
"entry",
")",
":",
"continue",
"if",
"is_excluded_directory",
"(",
"args",
",",
"entry",
")",
":",
"continue",
"if",
"is_in_default_excludes",
"(",
"entry",
")",
":",
"continue",
"if",
"not",
"is_included",
"(",
"args",
",",
"entry",
")",
":",
"continue",
"if",
"is_excluded",
"(",
"args",
",",
"entry",
",",
"directory",
")",
":",
"continue",
"entry",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"entry",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"entry",
")",
":",
"walk_files",
"(",
"args",
",",
"root",
",",
"entry",
",",
"action",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"entry",
")",
":",
"if",
"is_binary",
"(",
"entry",
")",
":",
"continue",
"action",
"(",
"entry",
")"
] | Recusively go do the subdirectories of the directory,
calling the action on each file | [
"Recusively",
"go",
"do",
"the",
"subdirectories",
"of",
"the",
"directory",
"calling",
"the",
"action",
"on",
"each",
"file"
] | python | train |
odlgroup/odl | odl/space/npy_tensors.py | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/npy_tensors.py#L2279-L2298 | def dist(self, x1, x2):
"""Return the weighted distance between ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `NumpyTensor`
Tensors whose mutual distance is calculated.
Returns
-------
dist : float
The distance between the tensors.
"""
if self.exponent == 2.0:
return float(np.sqrt(self.const) * _norm_default(x1 - x2))
elif self.exponent == float('inf'):
return float(self.const * _pnorm_default(x1 - x2, self.exponent))
else:
return float((self.const ** (1 / self.exponent) *
_pnorm_default(x1 - x2, self.exponent))) | [
"def",
"dist",
"(",
"self",
",",
"x1",
",",
"x2",
")",
":",
"if",
"self",
".",
"exponent",
"==",
"2.0",
":",
"return",
"float",
"(",
"np",
".",
"sqrt",
"(",
"self",
".",
"const",
")",
"*",
"_norm_default",
"(",
"x1",
"-",
"x2",
")",
")",
"elif",
"self",
".",
"exponent",
"==",
"float",
"(",
"'inf'",
")",
":",
"return",
"float",
"(",
"self",
".",
"const",
"*",
"_pnorm_default",
"(",
"x1",
"-",
"x2",
",",
"self",
".",
"exponent",
")",
")",
"else",
":",
"return",
"float",
"(",
"(",
"self",
".",
"const",
"**",
"(",
"1",
"/",
"self",
".",
"exponent",
")",
"*",
"_pnorm_default",
"(",
"x1",
"-",
"x2",
",",
"self",
".",
"exponent",
")",
")",
")"
] | Return the weighted distance between ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `NumpyTensor`
Tensors whose mutual distance is calculated.
Returns
-------
dist : float
The distance between the tensors. | [
"Return",
"the",
"weighted",
"distance",
"between",
"x1",
"and",
"x2",
"."
] | python | train |
inveniosoftware/invenio-access | invenio_access/alembic/2069a982633b_add_on_delete_cascade_constraint.py | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/alembic/2069a982633b_add_on_delete_cascade_constraint.py#L38-L53 | def downgrade():
"""Downgrade database."""
op.drop_constraint(op.f('fk_access_actionsusers_user_id_accounts_user'),
'access_actionsusers', type_='foreignkey')
op.drop_index(op.f('ix_access_actionsusers_user_id'),
table_name='access_actionsusers')
op.create_foreign_key(u'fk_access_actionsusers_user_id_accounts_user',
'access_actionsusers', 'accounts_user', ['user_id'],
['id'])
op.drop_constraint(op.f('fk_access_actionsroles_role_id_accounts_role'),
'access_actionsroles', type_='foreignkey')
op.drop_index(op.f('ix_access_actionsroles_role_id'),
table_name='access_actionsroles')
op.create_foreign_key(u'fk_access_actionsroles_role_id_accounts_role',
'access_actionsroles', 'accounts_role', ['role_id'],
['id']) | [
"def",
"downgrade",
"(",
")",
":",
"op",
".",
"drop_constraint",
"(",
"op",
".",
"f",
"(",
"'fk_access_actionsusers_user_id_accounts_user'",
")",
",",
"'access_actionsusers'",
",",
"type_",
"=",
"'foreignkey'",
")",
"op",
".",
"drop_index",
"(",
"op",
".",
"f",
"(",
"'ix_access_actionsusers_user_id'",
")",
",",
"table_name",
"=",
"'access_actionsusers'",
")",
"op",
".",
"create_foreign_key",
"(",
"u'fk_access_actionsusers_user_id_accounts_user'",
",",
"'access_actionsusers'",
",",
"'accounts_user'",
",",
"[",
"'user_id'",
"]",
",",
"[",
"'id'",
"]",
")",
"op",
".",
"drop_constraint",
"(",
"op",
".",
"f",
"(",
"'fk_access_actionsroles_role_id_accounts_role'",
")",
",",
"'access_actionsroles'",
",",
"type_",
"=",
"'foreignkey'",
")",
"op",
".",
"drop_index",
"(",
"op",
".",
"f",
"(",
"'ix_access_actionsroles_role_id'",
")",
",",
"table_name",
"=",
"'access_actionsroles'",
")",
"op",
".",
"create_foreign_key",
"(",
"u'fk_access_actionsroles_role_id_accounts_role'",
",",
"'access_actionsroles'",
",",
"'accounts_role'",
",",
"[",
"'role_id'",
"]",
",",
"[",
"'id'",
"]",
")"
] | Downgrade database. | [
"Downgrade",
"database",
"."
] | python | train |
msmbuilder/msmbuilder | msmbuilder/msm/bayesmsm.py | https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/msm/bayesmsm.py#L307-L327 | def all_timescales_(self):
"""Implied relaxation timescales each sample in the ensemble
Returns
-------
timescales : array-like, shape = (n_samples, n_timescales,)
The longest implied relaxation timescales of the each sample in
the ensemble of transition matrices, expressed in units of
time-step between indices in the source data supplied
to ``fit()``.
References
----------
.. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics:
Generation and validation." J. Chem. Phys. 134.17 (2011): 174105.
"""
us, lvs, rvs = self._get_eigensystem()
# make sure to leave off equilibrium distribution
timescales = - self.lag_time / np.log(us[:, 1:])
return timescales | [
"def",
"all_timescales_",
"(",
"self",
")",
":",
"us",
",",
"lvs",
",",
"rvs",
"=",
"self",
".",
"_get_eigensystem",
"(",
")",
"# make sure to leave off equilibrium distribution",
"timescales",
"=",
"-",
"self",
".",
"lag_time",
"/",
"np",
".",
"log",
"(",
"us",
"[",
":",
",",
"1",
":",
"]",
")",
"return",
"timescales"
] | Implied relaxation timescales each sample in the ensemble
Returns
-------
timescales : array-like, shape = (n_samples, n_timescales,)
The longest implied relaxation timescales of the each sample in
the ensemble of transition matrices, expressed in units of
time-step between indices in the source data supplied
to ``fit()``.
References
----------
.. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics:
Generation and validation." J. Chem. Phys. 134.17 (2011): 174105. | [
"Implied",
"relaxation",
"timescales",
"each",
"sample",
"in",
"the",
"ensemble"
] | python | train |
pgjones/quart | quart/app.py | https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/app.py#L1123-L1143 | def after_serving(self, func: Callable) -> Callable:
"""Add a after serving function.
This will allow the function provided to be called once after
anything is served (after last byte is sent).
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.after_serving
def func():
...
Arguments:
func: The function itself.
"""
handler = ensure_coroutine(func)
self.after_serving_funcs.append(handler)
return func | [
"def",
"after_serving",
"(",
"self",
",",
"func",
":",
"Callable",
")",
"->",
"Callable",
":",
"handler",
"=",
"ensure_coroutine",
"(",
"func",
")",
"self",
".",
"after_serving_funcs",
".",
"append",
"(",
"handler",
")",
"return",
"func"
] | Add a after serving function.
This will allow the function provided to be called once after
anything is served (after last byte is sent).
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.after_serving
def func():
...
Arguments:
func: The function itself. | [
"Add",
"a",
"after",
"serving",
"function",
"."
] | python | train |
hyperledger/indy-plenum | plenum/server/replica.py | https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L555-L568 | def generateName(nodeName: str, instId: int):
"""
Create and return the name for a replica using its nodeName and
instanceId.
Ex: Alpha:1
"""
if isinstance(nodeName, str):
# Because sometimes it is bytes (why?)
if ":" in nodeName:
# Because in some cases (for requested messages) it
# already has ':'. This should be fixed.
return nodeName
return "{}:{}".format(nodeName, instId) | [
"def",
"generateName",
"(",
"nodeName",
":",
"str",
",",
"instId",
":",
"int",
")",
":",
"if",
"isinstance",
"(",
"nodeName",
",",
"str",
")",
":",
"# Because sometimes it is bytes (why?)",
"if",
"\":\"",
"in",
"nodeName",
":",
"# Because in some cases (for requested messages) it",
"# already has ':'. This should be fixed.",
"return",
"nodeName",
"return",
"\"{}:{}\"",
".",
"format",
"(",
"nodeName",
",",
"instId",
")"
] | Create and return the name for a replica using its nodeName and
instanceId.
Ex: Alpha:1 | [
"Create",
"and",
"return",
"the",
"name",
"for",
"a",
"replica",
"using",
"its",
"nodeName",
"and",
"instanceId",
".",
"Ex",
":",
"Alpha",
":",
"1"
] | python | train |
bcbio/bcbio-nextgen | bcbio/provenance/do.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/do.py#L60-L72 | def _normalize_cmd_args(cmd):
"""Normalize subprocess arguments to handle list commands, string and pipes.
Piped commands set pipefail and require use of bash to help with debugging
intermediate errors.
"""
if isinstance(cmd, six.string_types):
# check for standard or anonymous named pipes
if cmd.find(" | ") > 0 or cmd.find(">(") or cmd.find("<("):
return "set -o pipefail; " + cmd, True, find_bash()
else:
return cmd, True, None
else:
return [str(x) for x in cmd], False, None | [
"def",
"_normalize_cmd_args",
"(",
"cmd",
")",
":",
"if",
"isinstance",
"(",
"cmd",
",",
"six",
".",
"string_types",
")",
":",
"# check for standard or anonymous named pipes",
"if",
"cmd",
".",
"find",
"(",
"\" | \"",
")",
">",
"0",
"or",
"cmd",
".",
"find",
"(",
"\">(\"",
")",
"or",
"cmd",
".",
"find",
"(",
"\"<(\"",
")",
":",
"return",
"\"set -o pipefail; \"",
"+",
"cmd",
",",
"True",
",",
"find_bash",
"(",
")",
"else",
":",
"return",
"cmd",
",",
"True",
",",
"None",
"else",
":",
"return",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"cmd",
"]",
",",
"False",
",",
"None"
] | Normalize subprocess arguments to handle list commands, string and pipes.
Piped commands set pipefail and require use of bash to help with debugging
intermediate errors. | [
"Normalize",
"subprocess",
"arguments",
"to",
"handle",
"list",
"commands",
"string",
"and",
"pipes",
".",
"Piped",
"commands",
"set",
"pipefail",
"and",
"require",
"use",
"of",
"bash",
"to",
"help",
"with",
"debugging",
"intermediate",
"errors",
"."
] | python | train |
OCR-D/core | ocrd/ocrd/cli/workspace.py | https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd/ocrd/cli/workspace.py#L276-L282 | def workspace_backup_list(ctx):
"""
List backups
"""
backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup))
for b in backup_manager.list():
print(b) | [
"def",
"workspace_backup_list",
"(",
"ctx",
")",
":",
"backup_manager",
"=",
"WorkspaceBackupManager",
"(",
"Workspace",
"(",
"ctx",
".",
"resolver",
",",
"directory",
"=",
"ctx",
".",
"directory",
",",
"mets_basename",
"=",
"ctx",
".",
"mets_basename",
",",
"automatic_backup",
"=",
"ctx",
".",
"automatic_backup",
")",
")",
"for",
"b",
"in",
"backup_manager",
".",
"list",
"(",
")",
":",
"print",
"(",
"b",
")"
] | List backups | [
"List",
"backups"
] | python | train |
nerdvegas/rez | src/build_utils/virtualenv/virtualenv.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/build_utils/virtualenv/virtualenv.py#L618-L624 | def get_environ_vars(self, prefix='VIRTUALENV_'):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val) | [
"def",
"get_environ_vars",
"(",
"self",
",",
"prefix",
"=",
"'VIRTUALENV_'",
")",
":",
"for",
"key",
",",
"val",
"in",
"os",
".",
"environ",
".",
"items",
"(",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"prefix",
")",
":",
"yield",
"(",
"key",
".",
"replace",
"(",
"prefix",
",",
"''",
")",
".",
"lower",
"(",
")",
",",
"val",
")"
] | Returns a generator with all environmental vars with prefix VIRTUALENV | [
"Returns",
"a",
"generator",
"with",
"all",
"environmental",
"vars",
"with",
"prefix",
"VIRTUALENV"
] | python | train |
marrow/web.db | web/ext/db.py | https://github.com/marrow/web.db/blob/c755fbff7028a5edc223d6a631b8421858274fc4/web/ext/db.py#L52-L57 | def _handle_event(self, event, *args, **kw):
"""Broadcast an event to the database connections registered."""
for engine in self.engines.values():
if hasattr(engine, event):
getattr(engine, event)(*args, **kw) | [
"def",
"_handle_event",
"(",
"self",
",",
"event",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"for",
"engine",
"in",
"self",
".",
"engines",
".",
"values",
"(",
")",
":",
"if",
"hasattr",
"(",
"engine",
",",
"event",
")",
":",
"getattr",
"(",
"engine",
",",
"event",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")"
] | Broadcast an event to the database connections registered. | [
"Broadcast",
"an",
"event",
"to",
"the",
"database",
"connections",
"registered",
"."
] | python | test |
markovmodel/PyEMMA | pyemma/plots/plots2d.py | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L63-L108 | def scatter_contour(
x, y, z, ncontours=50, colorbar=True, fig=None,
ax=None, cmap=None, outfile=None):
"""Contour plot on scattered data (x,y,z) and
plots the positions of the points (x,y) on top.
Parameters
----------
x : ndarray(T)
x-coordinates
y : ndarray(T)
y-coordinates
z : ndarray(T)
z-coordinates
ncontours : int, optional, default=50
number of contour levels
fig : matplotlib Figure object, optional, default=None
the figure to plot into. When set to None the default
Figure object will be used
ax : matplotlib Axes object, optional, default=None
the axes to plot to. When set to None the default Axes
object will be used.
cmap : matplotlib colormap, optional, default=None
the color map to use. None will use pylab.cm.jet.
outfile : str, optional, default=None
output file to write the figure to. When not given,
the plot will be displayed
Returns
-------
ax : Axes object containing the plot
"""
_warn(
'scatter_contour is deprected; use plot_contour instead'
' and manually add a scatter plot on top.',
DeprecationWarning)
ax = contour(
x, y, z, ncontours=ncontours, colorbar=colorbar,
fig=fig, ax=ax, cmap=cmap)
# scatter points
ax.scatter(x , y, marker='o', c='b', s=5)
# show or save
if outfile is not None:
ax.get_figure().savefig(outfile)
return ax | [
"def",
"scatter_contour",
"(",
"x",
",",
"y",
",",
"z",
",",
"ncontours",
"=",
"50",
",",
"colorbar",
"=",
"True",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"cmap",
"=",
"None",
",",
"outfile",
"=",
"None",
")",
":",
"_warn",
"(",
"'scatter_contour is deprected; use plot_contour instead'",
"' and manually add a scatter plot on top.'",
",",
"DeprecationWarning",
")",
"ax",
"=",
"contour",
"(",
"x",
",",
"y",
",",
"z",
",",
"ncontours",
"=",
"ncontours",
",",
"colorbar",
"=",
"colorbar",
",",
"fig",
"=",
"fig",
",",
"ax",
"=",
"ax",
",",
"cmap",
"=",
"cmap",
")",
"# scatter points",
"ax",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"marker",
"=",
"'o'",
",",
"c",
"=",
"'b'",
",",
"s",
"=",
"5",
")",
"# show or save",
"if",
"outfile",
"is",
"not",
"None",
":",
"ax",
".",
"get_figure",
"(",
")",
".",
"savefig",
"(",
"outfile",
")",
"return",
"ax"
] | Contour plot on scattered data (x,y,z) and
plots the positions of the points (x,y) on top.
Parameters
----------
x : ndarray(T)
x-coordinates
y : ndarray(T)
y-coordinates
z : ndarray(T)
z-coordinates
ncontours : int, optional, default=50
number of contour levels
fig : matplotlib Figure object, optional, default=None
the figure to plot into. When set to None the default
Figure object will be used
ax : matplotlib Axes object, optional, default=None
the axes to plot to. When set to None the default Axes
object will be used.
cmap : matplotlib colormap, optional, default=None
the color map to use. None will use pylab.cm.jet.
outfile : str, optional, default=None
output file to write the figure to. When not given,
the plot will be displayed
Returns
-------
ax : Axes object containing the plot | [
"Contour",
"plot",
"on",
"scattered",
"data",
"(",
"x",
"y",
"z",
")",
"and",
"plots",
"the",
"positions",
"of",
"the",
"points",
"(",
"x",
"y",
")",
"on",
"top",
"."
] | python | train |
NikolayDachev/jadm | lib/paramiko-1.14.1/paramiko/message.py | https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/message.py#L279-L288 | def add_string(self, s):
"""
Add a string to the stream.
:param str s: string to add
"""
s = asbytes(s)
self.add_size(len(s))
self.packet.write(s)
return self | [
"def",
"add_string",
"(",
"self",
",",
"s",
")",
":",
"s",
"=",
"asbytes",
"(",
"s",
")",
"self",
".",
"add_size",
"(",
"len",
"(",
"s",
")",
")",
"self",
".",
"packet",
".",
"write",
"(",
"s",
")",
"return",
"self"
] | Add a string to the stream.
:param str s: string to add | [
"Add",
"a",
"string",
"to",
"the",
"stream",
".",
":",
"param",
"str",
"s",
":",
"string",
"to",
"add"
] | python | train |
alvinwan/TexSoup | TexSoup/reader.py | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/reader.py#L72-L85 | def tokenize(text):
r"""Generator for LaTeX tokens on text, ignoring comments.
:param Union[str,iterator,Buffer] text: LaTeX to process
>>> print(*tokenize(r'\textbf{Do play \textit{nice}.}'))
\textbf { Do play \textit { nice } . }
>>> print(*tokenize(r'\begin{tabular} 0 & 1 \\ 2 & 0 \end{tabular}'))
\begin { tabular } 0 & 1 \\ 2 & 0 \end { tabular }
"""
current_token = next_token(text)
while current_token is not None:
yield current_token
current_token = next_token(text) | [
"def",
"tokenize",
"(",
"text",
")",
":",
"current_token",
"=",
"next_token",
"(",
"text",
")",
"while",
"current_token",
"is",
"not",
"None",
":",
"yield",
"current_token",
"current_token",
"=",
"next_token",
"(",
"text",
")"
] | r"""Generator for LaTeX tokens on text, ignoring comments.
:param Union[str,iterator,Buffer] text: LaTeX to process
>>> print(*tokenize(r'\textbf{Do play \textit{nice}.}'))
\textbf { Do play \textit { nice } . }
>>> print(*tokenize(r'\begin{tabular} 0 & 1 \\ 2 & 0 \end{tabular}'))
\begin { tabular } 0 & 1 \\ 2 & 0 \end { tabular } | [
"r",
"Generator",
"for",
"LaTeX",
"tokens",
"on",
"text",
"ignoring",
"comments",
"."
] | python | train |
istresearch/scrapy-cluster | utils/scutils/stats_collector.py | https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/stats_collector.py#L70-L87 | def get_rolling_time_window(self, redis_conn=None, host='localhost',
port=6379, key='rolling_time_window_counter',
cycle_time=5, window=SECONDS_1_HOUR):
'''
Generate a new RollingTimeWindow
Useful for collect data about the number of hits in the past X seconds
@param redis_conn: A premade redis connection (overrides host and port)
@param host: the redis host
@param port: the redis port
@param key: the key for your stats collection
@param cycle_time: how often to check for expiring counts
@param window: the number of seconds behind now() to keep data for
'''
counter = RollingTimeWindow(key=key, cycle_time=cycle_time,
window=window)
counter.setup(redis_conn=redis_conn, host=host, port=port)
return counter | [
"def",
"get_rolling_time_window",
"(",
"self",
",",
"redis_conn",
"=",
"None",
",",
"host",
"=",
"'localhost'",
",",
"port",
"=",
"6379",
",",
"key",
"=",
"'rolling_time_window_counter'",
",",
"cycle_time",
"=",
"5",
",",
"window",
"=",
"SECONDS_1_HOUR",
")",
":",
"counter",
"=",
"RollingTimeWindow",
"(",
"key",
"=",
"key",
",",
"cycle_time",
"=",
"cycle_time",
",",
"window",
"=",
"window",
")",
"counter",
".",
"setup",
"(",
"redis_conn",
"=",
"redis_conn",
",",
"host",
"=",
"host",
",",
"port",
"=",
"port",
")",
"return",
"counter"
] | Generate a new RollingTimeWindow
Useful for collect data about the number of hits in the past X seconds
@param redis_conn: A premade redis connection (overrides host and port)
@param host: the redis host
@param port: the redis port
@param key: the key for your stats collection
@param cycle_time: how often to check for expiring counts
@param window: the number of seconds behind now() to keep data for | [
"Generate",
"a",
"new",
"RollingTimeWindow",
"Useful",
"for",
"collect",
"data",
"about",
"the",
"number",
"of",
"hits",
"in",
"the",
"past",
"X",
"seconds"
] | python | train |
ARMmbed/icetea | icetea_lib/tools/file/FileUtils.py | https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/file/FileUtils.py#L55-L76 | def remove_file(filename, path=None):
"""
Remove file filename from path.
:param filename: Name of file to remove
:param path: Path where file is located
:return: True if successfull
:raises OSError if chdir or remove fails.
"""
cwd = os.getcwd()
try:
if path:
os.chdir(path)
except OSError:
raise
try:
os.remove(filename)
os.chdir(cwd)
return True
except OSError:
os.chdir(cwd)
raise | [
"def",
"remove_file",
"(",
"filename",
",",
"path",
"=",
"None",
")",
":",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"try",
":",
"if",
"path",
":",
"os",
".",
"chdir",
"(",
"path",
")",
"except",
"OSError",
":",
"raise",
"try",
":",
"os",
".",
"remove",
"(",
"filename",
")",
"os",
".",
"chdir",
"(",
"cwd",
")",
"return",
"True",
"except",
"OSError",
":",
"os",
".",
"chdir",
"(",
"cwd",
")",
"raise"
] | Remove file filename from path.
:param filename: Name of file to remove
:param path: Path where file is located
:return: True if successfull
:raises OSError if chdir or remove fails. | [
"Remove",
"file",
"filename",
"from",
"path",
"."
] | python | train |
KnowledgeLinks/rdfframework | rdfframework/rdfclass/rdfproperty.py | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rdfclass/rdfproperty.py#L588-L599 | def unique_append(self, value):
""" function for only appending unique items to a list.
#! consider the possibility of item using this to a set
"""
if value not in self:
try:
super(self.__class__, self).append(Uri(value))
except AttributeError as err:
if isinstance(value, MODULE.rdfclass.RdfClassBase):
super(self.__class__, self).append(value)
else:
raise err | [
"def",
"unique_append",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"not",
"in",
"self",
":",
"try",
":",
"super",
"(",
"self",
".",
"__class__",
",",
"self",
")",
".",
"append",
"(",
"Uri",
"(",
"value",
")",
")",
"except",
"AttributeError",
"as",
"err",
":",
"if",
"isinstance",
"(",
"value",
",",
"MODULE",
".",
"rdfclass",
".",
"RdfClassBase",
")",
":",
"super",
"(",
"self",
".",
"__class__",
",",
"self",
")",
".",
"append",
"(",
"value",
")",
"else",
":",
"raise",
"err"
] | function for only appending unique items to a list.
#! consider the possibility of item using this to a set | [
"function",
"for",
"only",
"appending",
"unique",
"items",
"to",
"a",
"list",
".",
"#!",
"consider",
"the",
"possibility",
"of",
"item",
"using",
"this",
"to",
"a",
"set"
] | python | train |
rollbar/pyrollbar | rollbar/__init__.py | https://github.com/rollbar/pyrollbar/blob/33ef2e723a33d09dd6302f978f4a3908be95b9d2/rollbar/__init__.py#L436-L479 | def send_payload(payload, access_token):
"""
Sends a payload object, (the result of calling _build_payload() + _serialize_payload()).
Uses the configured handler from SETTINGS['handler']
Available handlers:
- 'blocking': calls _send_payload() (which makes an HTTP request) immediately, blocks on it
- 'thread': starts a single-use thread that will call _send_payload(). returns immediately.
- 'agent': writes to a log file to be processed by rollbar-agent
- 'tornado': calls _send_payload_tornado() (which makes an async HTTP request using tornado's AsyncHTTPClient)
- 'gae': calls _send_payload_appengine() (which makes a blocking call to Google App Engine)
- 'twisted': calls _send_payload_twisted() (which makes an async HTTP reqeust using Twisted and Treq)
"""
payload = events.on_payload(payload)
if payload is False:
return
payload_str = _serialize_payload(payload)
handler = SETTINGS.get('handler')
if handler == 'blocking':
_send_payload(payload_str, access_token)
elif handler == 'agent':
agent_log.error(payload_str)
elif handler == 'tornado':
if TornadoAsyncHTTPClient is None:
log.error('Unable to find tornado')
return
_send_payload_tornado(payload_str, access_token)
elif handler == 'gae':
if AppEngineFetch is None:
log.error('Unable to find AppEngine URLFetch module')
return
_send_payload_appengine(payload_str, access_token)
elif handler == 'twisted':
if treq is None:
log.error('Unable to find Treq')
return
_send_payload_twisted(payload_str, access_token)
else:
# default to 'thread'
thread = threading.Thread(target=_send_payload, args=(payload_str, access_token))
_threads.put(thread)
thread.start() | [
"def",
"send_payload",
"(",
"payload",
",",
"access_token",
")",
":",
"payload",
"=",
"events",
".",
"on_payload",
"(",
"payload",
")",
"if",
"payload",
"is",
"False",
":",
"return",
"payload_str",
"=",
"_serialize_payload",
"(",
"payload",
")",
"handler",
"=",
"SETTINGS",
".",
"get",
"(",
"'handler'",
")",
"if",
"handler",
"==",
"'blocking'",
":",
"_send_payload",
"(",
"payload_str",
",",
"access_token",
")",
"elif",
"handler",
"==",
"'agent'",
":",
"agent_log",
".",
"error",
"(",
"payload_str",
")",
"elif",
"handler",
"==",
"'tornado'",
":",
"if",
"TornadoAsyncHTTPClient",
"is",
"None",
":",
"log",
".",
"error",
"(",
"'Unable to find tornado'",
")",
"return",
"_send_payload_tornado",
"(",
"payload_str",
",",
"access_token",
")",
"elif",
"handler",
"==",
"'gae'",
":",
"if",
"AppEngineFetch",
"is",
"None",
":",
"log",
".",
"error",
"(",
"'Unable to find AppEngine URLFetch module'",
")",
"return",
"_send_payload_appengine",
"(",
"payload_str",
",",
"access_token",
")",
"elif",
"handler",
"==",
"'twisted'",
":",
"if",
"treq",
"is",
"None",
":",
"log",
".",
"error",
"(",
"'Unable to find Treq'",
")",
"return",
"_send_payload_twisted",
"(",
"payload_str",
",",
"access_token",
")",
"else",
":",
"# default to 'thread'",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"_send_payload",
",",
"args",
"=",
"(",
"payload_str",
",",
"access_token",
")",
")",
"_threads",
".",
"put",
"(",
"thread",
")",
"thread",
".",
"start",
"(",
")"
] | Sends a payload object, (the result of calling _build_payload() + _serialize_payload()).
Uses the configured handler from SETTINGS['handler']
Available handlers:
- 'blocking': calls _send_payload() (which makes an HTTP request) immediately, blocks on it
- 'thread': starts a single-use thread that will call _send_payload(). returns immediately.
- 'agent': writes to a log file to be processed by rollbar-agent
- 'tornado': calls _send_payload_tornado() (which makes an async HTTP request using tornado's AsyncHTTPClient)
- 'gae': calls _send_payload_appengine() (which makes a blocking call to Google App Engine)
- 'twisted': calls _send_payload_twisted() (which makes an async HTTP reqeust using Twisted and Treq) | [
"Sends",
"a",
"payload",
"object",
"(",
"the",
"result",
"of",
"calling",
"_build_payload",
"()",
"+",
"_serialize_payload",
"()",
")",
".",
"Uses",
"the",
"configured",
"handler",
"from",
"SETTINGS",
"[",
"handler",
"]"
] | python | test |
singnet/snet-cli | snet_cli/utils_ipfs.py | https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/utils_ipfs.py#L35-L63 | def get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash_base58, validate=True):
"""
Get file from ipfs
We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise
"""
if validate:
from snet_cli.resources.proto.unixfs_pb2 import Data
from snet_cli.resources.proto.merckledag_pb2 import MerkleNode
# No nice Python library to parse ipfs blocks, so do it ourselves.
block_data = ipfs_client.block_get(ipfs_hash_base58)
mn = MerkleNode()
mn.ParseFromString(block_data)
unixfs_data = Data()
unixfs_data.ParseFromString(mn.Data)
assert unixfs_data.Type == unixfs_data.DataType.Value('File'), "IPFS hash must be a file"
data = unixfs_data.Data
# multihash has a badly registered base58 codec, overwrite it...
multihash.CodecReg.register('base58', base58.b58encode, base58.b58decode)
# create a multihash object from our ipfs hash
mh = multihash.decode(ipfs_hash_base58.encode('ascii'), 'base58')
# Convenience method lets us directly use a multihash to verify data
if not mh.verify(block_data):
raise Exception("IPFS hash mismatch with data")
else:
data = ipfs_client.cat(ipfs_hash_base58)
return data | [
"def",
"get_from_ipfs_and_checkhash",
"(",
"ipfs_client",
",",
"ipfs_hash_base58",
",",
"validate",
"=",
"True",
")",
":",
"if",
"validate",
":",
"from",
"snet_cli",
".",
"resources",
".",
"proto",
".",
"unixfs_pb2",
"import",
"Data",
"from",
"snet_cli",
".",
"resources",
".",
"proto",
".",
"merckledag_pb2",
"import",
"MerkleNode",
"# No nice Python library to parse ipfs blocks, so do it ourselves.",
"block_data",
"=",
"ipfs_client",
".",
"block_get",
"(",
"ipfs_hash_base58",
")",
"mn",
"=",
"MerkleNode",
"(",
")",
"mn",
".",
"ParseFromString",
"(",
"block_data",
")",
"unixfs_data",
"=",
"Data",
"(",
")",
"unixfs_data",
".",
"ParseFromString",
"(",
"mn",
".",
"Data",
")",
"assert",
"unixfs_data",
".",
"Type",
"==",
"unixfs_data",
".",
"DataType",
".",
"Value",
"(",
"'File'",
")",
",",
"\"IPFS hash must be a file\"",
"data",
"=",
"unixfs_data",
".",
"Data",
"# multihash has a badly registered base58 codec, overwrite it...",
"multihash",
".",
"CodecReg",
".",
"register",
"(",
"'base58'",
",",
"base58",
".",
"b58encode",
",",
"base58",
".",
"b58decode",
")",
"# create a multihash object from our ipfs hash",
"mh",
"=",
"multihash",
".",
"decode",
"(",
"ipfs_hash_base58",
".",
"encode",
"(",
"'ascii'",
")",
",",
"'base58'",
")",
"# Convenience method lets us directly use a multihash to verify data",
"if",
"not",
"mh",
".",
"verify",
"(",
"block_data",
")",
":",
"raise",
"Exception",
"(",
"\"IPFS hash mismatch with data\"",
")",
"else",
":",
"data",
"=",
"ipfs_client",
".",
"cat",
"(",
"ipfs_hash_base58",
")",
"return",
"data"
] | Get file from ipfs
We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise | [
"Get",
"file",
"from",
"ipfs",
"We",
"must",
"check",
"the",
"hash",
"becasue",
"we",
"cannot",
"believe",
"that",
"ipfs_client",
"wasn",
"t",
"been",
"compromise"
] | python | train |
AnthonyBloomer/daftlistings | daftlistings/daft.py | https://github.com/AnthonyBloomer/daftlistings/blob/f6c1b52425bc740f443b5efe6632a4bf18ee997f/daftlistings/daft.py#L64-L69 | def set_max_lease(self, max_lease):
"""
Set the maximum lease period in months.
:param max_lease: int
"""
self._query_params += str(QueryParam.MAX_LEASE) + str(max_lease) | [
"def",
"set_max_lease",
"(",
"self",
",",
"max_lease",
")",
":",
"self",
".",
"_query_params",
"+=",
"str",
"(",
"QueryParam",
".",
"MAX_LEASE",
")",
"+",
"str",
"(",
"max_lease",
")"
] | Set the maximum lease period in months.
:param max_lease: int | [
"Set",
"the",
"maximum",
"lease",
"period",
"in",
"months",
".",
":",
"param",
"max_lease",
":",
"int"
] | python | train |
tanghaibao/goatools | goatools/grouper/wrxlsx.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/wrxlsx.py#L41-L47 | def wr_xlsx_gos(self, fout_xlsx, **kws_usr):
"""Write an Excel spreadsheet with user GO ids, grouped under broader GO terms."""
# Keyword arguments: control content
desc2nts = self.sortobj.get_desc2nts(**kws_usr)
# Keyword arguments: control xlsx format
self.wr_xlsx_nts(fout_xlsx, desc2nts, **kws_usr)
return desc2nts | [
"def",
"wr_xlsx_gos",
"(",
"self",
",",
"fout_xlsx",
",",
"*",
"*",
"kws_usr",
")",
":",
"# Keyword arguments: control content",
"desc2nts",
"=",
"self",
".",
"sortobj",
".",
"get_desc2nts",
"(",
"*",
"*",
"kws_usr",
")",
"# Keyword arguments: control xlsx format",
"self",
".",
"wr_xlsx_nts",
"(",
"fout_xlsx",
",",
"desc2nts",
",",
"*",
"*",
"kws_usr",
")",
"return",
"desc2nts"
] | Write an Excel spreadsheet with user GO ids, grouped under broader GO terms. | [
"Write",
"an",
"Excel",
"spreadsheet",
"with",
"user",
"GO",
"ids",
"grouped",
"under",
"broader",
"GO",
"terms",
"."
] | python | train |
inveniosoftware/invenio-formatter | invenio_formatter/filters/datetime.py | https://github.com/inveniosoftware/invenio-formatter/blob/aa25f36742e809f05e116b52e8255cdb362e5642/invenio_formatter/filters/datetime.py#L16-L26 | def from_isodate(value, strict=False):
"""Convert an ISO formatted date into a Date object.
:param value: The ISO formatted date.
:param strict: If value is ``None``, then if strict is ``True`` it returns
the Date object of today, otherwise it returns ``None``.
(Default: ``False``)
:returns: The Date object or ``None``.
"""
if value or strict:
return arrow.get(value).date() | [
"def",
"from_isodate",
"(",
"value",
",",
"strict",
"=",
"False",
")",
":",
"if",
"value",
"or",
"strict",
":",
"return",
"arrow",
".",
"get",
"(",
"value",
")",
".",
"date",
"(",
")"
] | Convert an ISO formatted date into a Date object.
:param value: The ISO formatted date.
:param strict: If value is ``None``, then if strict is ``True`` it returns
the Date object of today, otherwise it returns ``None``.
(Default: ``False``)
:returns: The Date object or ``None``. | [
"Convert",
"an",
"ISO",
"formatted",
"date",
"into",
"a",
"Date",
"object",
"."
] | python | train |
horazont/aioxmpp | aioxmpp/ibb/service.py | https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/ibb/service.py#L232-L250 | def write(self, data):
"""
Send `data` over the IBB. If `data` is larger than the block size
is is chunked and sent in chunks.
Chunks from one call of :meth:`write` will always be sent in
series.
"""
if self.is_closing():
return
self._write_buffer += data
if len(self._write_buffer) >= self._output_buffer_limit_high:
self._protocol.pause_writing()
if self._write_buffer:
self._can_write.set() | [
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"if",
"self",
".",
"is_closing",
"(",
")",
":",
"return",
"self",
".",
"_write_buffer",
"+=",
"data",
"if",
"len",
"(",
"self",
".",
"_write_buffer",
")",
">=",
"self",
".",
"_output_buffer_limit_high",
":",
"self",
".",
"_protocol",
".",
"pause_writing",
"(",
")",
"if",
"self",
".",
"_write_buffer",
":",
"self",
".",
"_can_write",
".",
"set",
"(",
")"
] | Send `data` over the IBB. If `data` is larger than the block size
is is chunked and sent in chunks.
Chunks from one call of :meth:`write` will always be sent in
series. | [
"Send",
"data",
"over",
"the",
"IBB",
".",
"If",
"data",
"is",
"larger",
"than",
"the",
"block",
"size",
"is",
"is",
"chunked",
"and",
"sent",
"in",
"chunks",
"."
] | python | train |
eddiejessup/spatious | spatious/vector.py | https://github.com/eddiejessup/spatious/blob/b7ae91bec029e85a45a7f303ee184076433723cd/spatious/vector.py#L343-L347 | def smallest_signed_angle(source, target):
"""Find the smallest angle going from angle `source` to angle `target`."""
dth = target - source
dth = (dth + np.pi) % (2.0 * np.pi) - np.pi
return dth | [
"def",
"smallest_signed_angle",
"(",
"source",
",",
"target",
")",
":",
"dth",
"=",
"target",
"-",
"source",
"dth",
"=",
"(",
"dth",
"+",
"np",
".",
"pi",
")",
"%",
"(",
"2.0",
"*",
"np",
".",
"pi",
")",
"-",
"np",
".",
"pi",
"return",
"dth"
] | Find the smallest angle going from angle `source` to angle `target`. | [
"Find",
"the",
"smallest",
"angle",
"going",
"from",
"angle",
"source",
"to",
"angle",
"target",
"."
] | python | train |
mithro/python-datetime-tz | datetime_tz/__init__.py | https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L855-L875 | def _wrap_method(name):
"""Wrap a method.
Patch a method which might return a datetime.datetime to return a
datetime_tz.datetime_tz instead.
Args:
name: The name of the method to patch
"""
method = getattr(datetime.datetime, name)
# Have to give the second argument as method has no __module__ option.
@functools.wraps(method, ("__name__", "__doc__"), ())
def wrapper(self, *args, **kw):
r = method(self, *args, **kw)
if isinstance(r, datetime.datetime) and not isinstance(r, type(self)):
r = type(self)(r)
return r
setattr(datetime_tz, name, wrapper) | [
"def",
"_wrap_method",
"(",
"name",
")",
":",
"method",
"=",
"getattr",
"(",
"datetime",
".",
"datetime",
",",
"name",
")",
"# Have to give the second argument as method has no __module__ option.",
"@",
"functools",
".",
"wraps",
"(",
"method",
",",
"(",
"\"__name__\"",
",",
"\"__doc__\"",
")",
",",
"(",
")",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"r",
"=",
"method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"if",
"isinstance",
"(",
"r",
",",
"datetime",
".",
"datetime",
")",
"and",
"not",
"isinstance",
"(",
"r",
",",
"type",
"(",
"self",
")",
")",
":",
"r",
"=",
"type",
"(",
"self",
")",
"(",
"r",
")",
"return",
"r",
"setattr",
"(",
"datetime_tz",
",",
"name",
",",
"wrapper",
")"
] | Wrap a method.
Patch a method which might return a datetime.datetime to return a
datetime_tz.datetime_tz instead.
Args:
name: The name of the method to patch | [
"Wrap",
"a",
"method",
"."
] | python | train |
mfcovington/pubmed-lookup | pubmed_lookup/pubmed_lookup.py | https://github.com/mfcovington/pubmed-lookup/blob/b0aa2945b354f0945db73da22dd15ea628212da8/pubmed_lookup/pubmed_lookup.py#L60-L90 | def cite(self, max_authors=5):
"""
Return string with a citation for the record, formatted as:
'{authors} ({year}). {title} {journal} {volume}({issue}): {pages}.'
"""
citation_data = {
'title': self.title,
'authors': self.authors_et_al(max_authors),
'year': self.year,
'journal': self.journal,
'volume': self.volume,
'issue': self.issue,
'pages': self.pages,
}
citation = "{authors} ({year}). {title} {journal}".format(
**citation_data)
if self.volume and self.issue and self.pages:
citation += " {volume}({issue}): {pages}.".format(**citation_data)
elif self.volume and self.issue:
citation += " {volume}({issue}).".format(**citation_data)
elif self.volume and self.pages:
citation += " {volume}: {pages}.".format(**citation_data)
elif self.volume:
citation += " {volume}.".format(**citation_data)
elif self.pages:
citation += " {pages}.".format(**citation_data)
else:
citation += "."
return citation | [
"def",
"cite",
"(",
"self",
",",
"max_authors",
"=",
"5",
")",
":",
"citation_data",
"=",
"{",
"'title'",
":",
"self",
".",
"title",
",",
"'authors'",
":",
"self",
".",
"authors_et_al",
"(",
"max_authors",
")",
",",
"'year'",
":",
"self",
".",
"year",
",",
"'journal'",
":",
"self",
".",
"journal",
",",
"'volume'",
":",
"self",
".",
"volume",
",",
"'issue'",
":",
"self",
".",
"issue",
",",
"'pages'",
":",
"self",
".",
"pages",
",",
"}",
"citation",
"=",
"\"{authors} ({year}). {title} {journal}\"",
".",
"format",
"(",
"*",
"*",
"citation_data",
")",
"if",
"self",
".",
"volume",
"and",
"self",
".",
"issue",
"and",
"self",
".",
"pages",
":",
"citation",
"+=",
"\" {volume}({issue}): {pages}.\"",
".",
"format",
"(",
"*",
"*",
"citation_data",
")",
"elif",
"self",
".",
"volume",
"and",
"self",
".",
"issue",
":",
"citation",
"+=",
"\" {volume}({issue}).\"",
".",
"format",
"(",
"*",
"*",
"citation_data",
")",
"elif",
"self",
".",
"volume",
"and",
"self",
".",
"pages",
":",
"citation",
"+=",
"\" {volume}: {pages}.\"",
".",
"format",
"(",
"*",
"*",
"citation_data",
")",
"elif",
"self",
".",
"volume",
":",
"citation",
"+=",
"\" {volume}.\"",
".",
"format",
"(",
"*",
"*",
"citation_data",
")",
"elif",
"self",
".",
"pages",
":",
"citation",
"+=",
"\" {pages}.\"",
".",
"format",
"(",
"*",
"*",
"citation_data",
")",
"else",
":",
"citation",
"+=",
"\".\"",
"return",
"citation"
] | Return string with a citation for the record, formatted as:
'{authors} ({year}). {title} {journal} {volume}({issue}): {pages}.' | [
"Return",
"string",
"with",
"a",
"citation",
"for",
"the",
"record",
"formatted",
"as",
":",
"{",
"authors",
"}",
"(",
"{",
"year",
"}",
")",
".",
"{",
"title",
"}",
"{",
"journal",
"}",
"{",
"volume",
"}",
"(",
"{",
"issue",
"}",
")",
":",
"{",
"pages",
"}",
"."
] | python | train |
edublancas/sklearn-evaluation | sklearn_evaluation/util.py | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/util.py#L37-L51 | def _group_by(data, criteria):
"""
Group objects in data using a function or a key
"""
if isinstance(criteria, str):
criteria_str = criteria
def criteria(x):
return x[criteria_str]
res = defaultdict(list)
for element in data:
key = criteria(element)
res[key].append(element)
return res | [
"def",
"_group_by",
"(",
"data",
",",
"criteria",
")",
":",
"if",
"isinstance",
"(",
"criteria",
",",
"str",
")",
":",
"criteria_str",
"=",
"criteria",
"def",
"criteria",
"(",
"x",
")",
":",
"return",
"x",
"[",
"criteria_str",
"]",
"res",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"element",
"in",
"data",
":",
"key",
"=",
"criteria",
"(",
"element",
")",
"res",
"[",
"key",
"]",
".",
"append",
"(",
"element",
")",
"return",
"res"
] | Group objects in data using a function or a key | [
"Group",
"objects",
"in",
"data",
"using",
"a",
"function",
"or",
"a",
"key"
] | python | train |
tensorflow/tensorboard | tensorboard/plugins/profile/profile_plugin.py | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/profile/profile_plugin.py#L351-L409 | def data_impl(self, request):
"""Retrieves and processes the tool data for a run and a host.
Args:
request: XMLHttpRequest
Returns:
A string that can be served to the frontend tool or None if tool,
run or host is invalid.
"""
run = request.args.get('run')
tool = request.args.get('tag')
host = request.args.get('host')
run_dir = self._run_dir(run)
# Profile plugin "run" is the last component of run dir.
profile_run = os.path.basename(run_dir)
if tool not in TOOLS:
return None
self.start_grpc_stub_if_necessary()
if tool == 'trace_viewer@' and self.stub is not None:
from tensorflow.contrib.tpu.profiler import tpu_profiler_analysis_pb2
grpc_request = tpu_profiler_analysis_pb2.ProfileSessionDataRequest()
grpc_request.repository_root = run_dir
grpc_request.session_id = profile_run[:-1]
grpc_request.tool_name = 'trace_viewer'
# Remove the trailing dot if present
grpc_request.host_name = host.rstrip('.')
grpc_request.parameters['resolution'] = request.args.get('resolution')
if request.args.get('start_time_ms') is not None:
grpc_request.parameters['start_time_ms'] = request.args.get(
'start_time_ms')
if request.args.get('end_time_ms') is not None:
grpc_request.parameters['end_time_ms'] = request.args.get('end_time_ms')
grpc_response = self.stub.GetSessionToolData(grpc_request)
return grpc_response.output
if tool not in TOOLS:
return None
tool_name = str(host) + TOOLS[tool]
asset_path = os.path.join(run_dir, tool_name)
raw_data = None
try:
with tf.io.gfile.GFile(asset_path, 'rb') as f:
raw_data = f.read()
except tf.errors.NotFoundError:
logger.warn('Asset path %s not found', asset_path)
except tf.errors.OpError as e:
logger.warn("Couldn't read asset path: %s, OpError %s", asset_path, e)
if raw_data is None:
return None
if tool == 'trace_viewer':
return process_raw_trace(raw_data)
if tool in _RAW_DATA_TOOLS:
return raw_data
return None | [
"def",
"data_impl",
"(",
"self",
",",
"request",
")",
":",
"run",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'run'",
")",
"tool",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'tag'",
")",
"host",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'host'",
")",
"run_dir",
"=",
"self",
".",
"_run_dir",
"(",
"run",
")",
"# Profile plugin \"run\" is the last component of run dir.",
"profile_run",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"run_dir",
")",
"if",
"tool",
"not",
"in",
"TOOLS",
":",
"return",
"None",
"self",
".",
"start_grpc_stub_if_necessary",
"(",
")",
"if",
"tool",
"==",
"'trace_viewer@'",
"and",
"self",
".",
"stub",
"is",
"not",
"None",
":",
"from",
"tensorflow",
".",
"contrib",
".",
"tpu",
".",
"profiler",
"import",
"tpu_profiler_analysis_pb2",
"grpc_request",
"=",
"tpu_profiler_analysis_pb2",
".",
"ProfileSessionDataRequest",
"(",
")",
"grpc_request",
".",
"repository_root",
"=",
"run_dir",
"grpc_request",
".",
"session_id",
"=",
"profile_run",
"[",
":",
"-",
"1",
"]",
"grpc_request",
".",
"tool_name",
"=",
"'trace_viewer'",
"# Remove the trailing dot if present",
"grpc_request",
".",
"host_name",
"=",
"host",
".",
"rstrip",
"(",
"'.'",
")",
"grpc_request",
".",
"parameters",
"[",
"'resolution'",
"]",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'resolution'",
")",
"if",
"request",
".",
"args",
".",
"get",
"(",
"'start_time_ms'",
")",
"is",
"not",
"None",
":",
"grpc_request",
".",
"parameters",
"[",
"'start_time_ms'",
"]",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'start_time_ms'",
")",
"if",
"request",
".",
"args",
".",
"get",
"(",
"'end_time_ms'",
")",
"is",
"not",
"None",
":",
"grpc_request",
".",
"parameters",
"[",
"'end_time_ms'",
"]",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'end_time_ms'",
")",
"grpc_response",
"=",
"self",
".",
"stub",
".",
"GetSessionToolData",
"(",
"grpc_request",
")",
"return",
"grpc_response",
".",
"output",
"if",
"tool",
"not",
"in",
"TOOLS",
":",
"return",
"None",
"tool_name",
"=",
"str",
"(",
"host",
")",
"+",
"TOOLS",
"[",
"tool",
"]",
"asset_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"run_dir",
",",
"tool_name",
")",
"raw_data",
"=",
"None",
"try",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"asset_path",
",",
"'rb'",
")",
"as",
"f",
":",
"raw_data",
"=",
"f",
".",
"read",
"(",
")",
"except",
"tf",
".",
"errors",
".",
"NotFoundError",
":",
"logger",
".",
"warn",
"(",
"'Asset path %s not found'",
",",
"asset_path",
")",
"except",
"tf",
".",
"errors",
".",
"OpError",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"\"Couldn't read asset path: %s, OpError %s\"",
",",
"asset_path",
",",
"e",
")",
"if",
"raw_data",
"is",
"None",
":",
"return",
"None",
"if",
"tool",
"==",
"'trace_viewer'",
":",
"return",
"process_raw_trace",
"(",
"raw_data",
")",
"if",
"tool",
"in",
"_RAW_DATA_TOOLS",
":",
"return",
"raw_data",
"return",
"None"
] | Retrieves and processes the tool data for a run and a host.
Args:
request: XMLHttpRequest
Returns:
A string that can be served to the frontend tool or None if tool,
run or host is invalid. | [
"Retrieves",
"and",
"processes",
"the",
"tool",
"data",
"for",
"a",
"run",
"and",
"a",
"host",
"."
] | python | train |
datastore/datastore | datastore/core/key.py | https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/key.py#L147-L157 | def isAncestorOf(self, other):
'''Returns whether this Key is an ancestor of `other`.
>>> john = Key('/Comedy/MontyPython/Actor:JohnCleese')
>>> Key('/Comedy').isAncestorOf(john)
True
'''
if isinstance(other, Key):
return other._string.startswith(self._string + '/')
raise TypeError('%s is not of type %s' % (other, Key)) | [
"def",
"isAncestorOf",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"Key",
")",
":",
"return",
"other",
".",
"_string",
".",
"startswith",
"(",
"self",
".",
"_string",
"+",
"'/'",
")",
"raise",
"TypeError",
"(",
"'%s is not of type %s'",
"%",
"(",
"other",
",",
"Key",
")",
")"
] | Returns whether this Key is an ancestor of `other`.
>>> john = Key('/Comedy/MontyPython/Actor:JohnCleese')
>>> Key('/Comedy').isAncestorOf(john)
True | [
"Returns",
"whether",
"this",
"Key",
"is",
"an",
"ancestor",
"of",
"other",
"."
] | python | train |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/logs.py | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/logs.py#L38-L82 | def config_logging(no_log_file, log_to, log_level, silent, verbosity):
"""
Configures and generates a Logger object, 'openaccess_epub' based on common
parameters used for console interface script execution in OpenAccess_EPUB.
These parameters are:
no_log_file
Boolean. Disables logging to file. If set to True, log_to and
log_level become irrelevant.
log_to
A string name indicating a file path for logging.
log_level
Logging level, one of: 'debug', 'info', 'warning', 'error', 'critical'
silent
Boolean
verbosity
Console logging level, one of: 'debug', 'info', 'warning', 'error',
'critical
This method currently only configures a console StreamHandler with a
message-only Formatter.
"""
log_level = get_level(log_level)
console_level = get_level(verbosity)
#We want to configure our openaccess_epub as the parent log
log = logging.getLogger('openaccess_epub')
log.setLevel(logging.DEBUG) # Don't filter at the log level
standard = logging.Formatter(STANDARD_FORMAT)
message_only = logging.Formatter(MESSAGE_ONLY_FORMAT)
#Only add FileHandler IF it's allowed AND we have a name for it
if not no_log_file and log_to is not None:
fh = logging.FileHandler(filename=log_to)
fh.setLevel(log_level)
fh.setFormatter(standard)
log.addHandler(fh)
#Add on the console StreamHandler at verbosity level if silent not set
if not silent:
sh_echo = logging.StreamHandler(sys.stdout)
sh_echo.setLevel(console_level)
sh_echo.setFormatter(message_only)
log.addHandler(sh_echo) | [
"def",
"config_logging",
"(",
"no_log_file",
",",
"log_to",
",",
"log_level",
",",
"silent",
",",
"verbosity",
")",
":",
"log_level",
"=",
"get_level",
"(",
"log_level",
")",
"console_level",
"=",
"get_level",
"(",
"verbosity",
")",
"#We want to configure our openaccess_epub as the parent log",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"'openaccess_epub'",
")",
"log",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"# Don't filter at the log level",
"standard",
"=",
"logging",
".",
"Formatter",
"(",
"STANDARD_FORMAT",
")",
"message_only",
"=",
"logging",
".",
"Formatter",
"(",
"MESSAGE_ONLY_FORMAT",
")",
"#Only add FileHandler IF it's allowed AND we have a name for it",
"if",
"not",
"no_log_file",
"and",
"log_to",
"is",
"not",
"None",
":",
"fh",
"=",
"logging",
".",
"FileHandler",
"(",
"filename",
"=",
"log_to",
")",
"fh",
".",
"setLevel",
"(",
"log_level",
")",
"fh",
".",
"setFormatter",
"(",
"standard",
")",
"log",
".",
"addHandler",
"(",
"fh",
")",
"#Add on the console StreamHandler at verbosity level if silent not set",
"if",
"not",
"silent",
":",
"sh_echo",
"=",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stdout",
")",
"sh_echo",
".",
"setLevel",
"(",
"console_level",
")",
"sh_echo",
".",
"setFormatter",
"(",
"message_only",
")",
"log",
".",
"addHandler",
"(",
"sh_echo",
")"
] | Configures and generates a Logger object, 'openaccess_epub' based on common
parameters used for console interface script execution in OpenAccess_EPUB.
These parameters are:
no_log_file
Boolean. Disables logging to file. If set to True, log_to and
log_level become irrelevant.
log_to
A string name indicating a file path for logging.
log_level
Logging level, one of: 'debug', 'info', 'warning', 'error', 'critical'
silent
Boolean
verbosity
Console logging level, one of: 'debug', 'info', 'warning', 'error',
'critical
This method currently only configures a console StreamHandler with a
message-only Formatter. | [
"Configures",
"and",
"generates",
"a",
"Logger",
"object",
"openaccess_epub",
"based",
"on",
"common",
"parameters",
"used",
"for",
"console",
"interface",
"script",
"execution",
"in",
"OpenAccess_EPUB",
"."
] | python | train |
mbedmicro/pyOCD | pyocd/gdbserver/context_facade.py | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/gdbserver/context_facade.py#L60-L76 | def get_register_context(self):
"""
return hexadecimal dump of registers as expected by GDB
"""
logging.debug("GDB getting register context")
resp = b''
reg_num_list = [reg.reg_num for reg in self._register_list]
vals = self._context.read_core_registers_raw(reg_num_list)
#print("Vals: %s" % vals)
for reg, regValue in zip(self._register_list, vals):
if reg.bitsize == 64:
resp += six.b(conversion.u64_to_hex16le(regValue))
else:
resp += six.b(conversion.u32_to_hex8le(regValue))
logging.debug("GDB reg: %s = 0x%X", reg.name, regValue)
return resp | [
"def",
"get_register_context",
"(",
"self",
")",
":",
"logging",
".",
"debug",
"(",
"\"GDB getting register context\"",
")",
"resp",
"=",
"b''",
"reg_num_list",
"=",
"[",
"reg",
".",
"reg_num",
"for",
"reg",
"in",
"self",
".",
"_register_list",
"]",
"vals",
"=",
"self",
".",
"_context",
".",
"read_core_registers_raw",
"(",
"reg_num_list",
")",
"#print(\"Vals: %s\" % vals)",
"for",
"reg",
",",
"regValue",
"in",
"zip",
"(",
"self",
".",
"_register_list",
",",
"vals",
")",
":",
"if",
"reg",
".",
"bitsize",
"==",
"64",
":",
"resp",
"+=",
"six",
".",
"b",
"(",
"conversion",
".",
"u64_to_hex16le",
"(",
"regValue",
")",
")",
"else",
":",
"resp",
"+=",
"six",
".",
"b",
"(",
"conversion",
".",
"u32_to_hex8le",
"(",
"regValue",
")",
")",
"logging",
".",
"debug",
"(",
"\"GDB reg: %s = 0x%X\"",
",",
"reg",
".",
"name",
",",
"regValue",
")",
"return",
"resp"
] | return hexadecimal dump of registers as expected by GDB | [
"return",
"hexadecimal",
"dump",
"of",
"registers",
"as",
"expected",
"by",
"GDB"
] | python | train |
yamins81/tabular | tabular/web.py | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/web.py#L22-L318 | def tabular2html(fname=None, X=None, fin=None, title=None, printheader=False,
split=True, usecss=None, writecss=None, SERVERNAME=None,
SERVER_FROM_CURDIR='../', ROWS_PER_PAGE=1000,
returnstring = False, **kwargs):
"""
Creates an html representation of tabular data, either from a tabarray or
an externa file (`including ``.hsv``, ``.csv``, ``.tsv``). If no data is
directly provided by passing a tabarray to `X`, then a tabarray is
constructed using :func:`tabular.tabarray.tabarray.__new__`.
**Parameters**
**fname** : string
Path to the "main" HTML file to be created. This file path
must end in ``.html``.
Note that this function will create additional files (e.g.
a ``.css`` file and multiple linked ``.html`` files for
large datasets) linked to this HTML file, inside of the
directory containing `fname`.
**X** : tabarray
If `X` is `None`, then one of `fin`, `array`, `records`,
`columns`, `SV`, `HSV`, or `HSVlist` must not be `None`.
**fin** : string
File path to to a source of tabular dat, which will be
loaded using the tabarray constructor. The load method
will be inferred from the file extension and whether or not
there is a headerkey in the first line of the file.
**title** : string
Description to be used in the <title> tag of the created
html file.
**printheader** : boolean
If `printheader = True`, will print out a "header" and
"footer" (both in the body of the HTML documents) on every
page. The header contains information about the name of
the input file and the number of rows, as well what the
current page number is (if split between multiple
documents) and links to any other pages. The footer
contains the same page number and links.
**split** : boolean
If `split = False`, will not split to multiple HTML pages,
regardless of value of `ROWS_PER_PAGE`.
**usecss** : False or None or string
If usecss is False, no link to a cssfile is included in the
page header, and no cssfile is written out. If is a
string, that string is assumed to be a path and is linked
to as the CSS file. If it is None, then consideration of
the `writecss` variable is made.
**writecss** : boolean
If `usecss` is not `None`, then if `writecss` is not
`False`: the default css sheet is generated and written to
a file whose name is either generated by default (if
writecss is None) else given by writecss itself, and linked
to in the file header
**SERVERNAME** : string
Server name. For example, this could be the ServerName
of a VirtualHost on your local machine, assuming that
`fname` describes a path on the server.
**SERVER_FROM_CURDIR** : string
Root path of server relative to the current directory.
Assumed to be '../'.
**ROWS_PER_PAGE** : positive integer or 'all'
This sets the number of records displayed per .html page
(if the tabular file has more than ROWS_PER_PAGE rows,
it will be split into multiple sections on several .html
pages (default = 1000).
If the value is 'all' then the page is not split (e.g. it
is as if split = False)
**See also:** the kwargs arguments must be valid keyword arguments
for :func:`tabular.tabarray.tabarray.__new__`, the tabarray
constructor, see documentation for descriptions.
"""
# Must write to an HTML file.
assert returnstring or fname.endswith( '.html' ), 'fname must end in ".html".'
if X is None:
if fin is not None:
if fin.lstrip('/').endswith('.hsv'):
kwargs['HSVfile'] = fin
elif fin.endswith('.tsv') or fin.endswith('.csv'):
kwargs['SVfile'] = fin
elif fin.endswith(('.npy','.npz')):
kwargs['binary'] = fin
else:
assert False, ('This algorithm is being forced to determine '
'the proper file type for web representation '
'from file\'s path (e.g. by looking at '
'extension) since the type is not given '
'explicitly by use of a keyword argument, but '
'is having problems deducing the intended file '
'type from the path (e.g., because the '
'extension is not one of those this algorithm '
'recognizes).')
else:
assert any([l in kwargs.keys() and kwargs[l] != None
for l in ['SVfile','binary','HSVfile']]), \
('Either a tabarray is given, or file path "fin" is '
'given, or one of "HSV", "binary", or "SV" keyword '
'arguments are given.')
X = tb.tabarray(**kwargs)
names = X.dtype.names
try:
RowColors = X['__color__']
except:
if '__color__' in names:
cspot = names.index('__color__')
RowColors = [r[cspot] for r in X]
else:
RowColors = [''] * len(X)
try:
coloring = X.coloring
except:
coloring = {}
Num_Records = len(X)
Num_Cols = len(names)
ColorStyles = CSSColoring(names, coloring)
HdrNts = HeaderNotations(names, coloring)
# If I specify usecss and it is not false, it must be a string and I want
# to put that file name in the link and not write anything out.
# If I specify writecss I want it to write out file to that name and use it
# in the link.
# If usecss = false, writecss is false and nothing is put in the link.
# If usecss is not specified, then ...
if usecss != None:
if isinstance(usecss, str):
cssfile = usecss
CSSLINK = ('<link rel="stylesheet" type="text/css" href="' + '/' +
cssfile[len(SERVER_FROM_CURDIR):] + '"</link>')
else:
assert usecss == False
CSSLINK = ''
else:
if writecss == False or returnstring:
CSSLINK = ''
else:
if not isinstance(writecss,str):
cssfile = fname[:-5] + '.css'
else:
cssfile = writecss
WriteOutCSS(ColorStyles[1],cssfile)
CSSLINK = ('<link rel="stylesheet" type="text/css" href="' + '/' +
cssfile[len(SERVER_FROM_CURDIR):] + '"</link>')
if returnstring:
split = False
if not split or ROWS_PER_PAGE == 'all':
ROWS_PER_PAGE = Num_Records + 1
numSections = int(Num_Records / ROWS_PER_PAGE) + 1
# section2file(i) returns the name of the .html file corresponding to
# section number i.
section2file = (lambda sectionNum: fname if sectionNum == 0
else splitext(fname)[0] + str(sectionNum) + splitext(fname)[1])
if title is None:
if not fin is None:
title = fin
else:
title = 'Title Not Given'
for section in range(numSections):
sectionfname = section2file(section)
fromRow = section * ROWS_PER_PAGE # Start record # for this section.
toRow = min( fromRow + ROWS_PER_PAGE, Num_Records) # End record #.
if printheader and not returnstring:
prefix = '/' + DirName(fname[len(SERVER_FROM_CURDIR):]) + '/'
else:
prefix = ''
# Open the output file for the section to fileobject 'f'.
if not returnstring:
f = open(sectionfname,'w')
else:
f = tempfile.TemporaryFile('w+b')
# Write out file header.
if not returnstring:
f.write('<html><META HTTP-EQUIV="Content-Type" '
'CONTENT="text/html; charset=utf-8" /><head><title>' +
title + '</title>' + CSSLINK + '</head><body>\n' )
if printheader:
f.write('<p>Tabular File (page ' + str(section + 1) + ' of ' +
str(numSections) + ', rows ' + str(fromRow + 1) +
' - ' + str(toRow) + '): ' + title + '</p>\n')
f.write('<p>page ')
if section > 0:
f.write(' <a href="' + prefix +
basename(section2file(section - 1)) + '">prev</a> ')
if section < numSections - 1:
f.write(' <a href="' + prefix +
basename(section2file(section + 1)) + '">next</a> ')
for page in range(numSections):
f.write((' <a href="' + prefix +
basename(section2file(page)) + '">' + str(page + 1) +
'</a>') if page != section else ' ' + str(page + 1))
f.write( '</p>' )
# Write out table with number of cols.
f.write('<table border="1" cellspacing="0" cellpadding="4">\n')
f.write('<col span="' + str(Num_Cols) + '" align="center">\n')
# Write out table header line.
f.write('<thead>')
if len(HdrNts) > 0:
for h in HdrNts:
f.write(h + '\n')
f.write('<tr align="center">')
for name in names:
f.write('<th class="' + ColorStyles[0][name] + '">' +
cgi.escape(name) + '</th>')
f.write('</tr>')
f.write('</thead>\n')
# Write out each record in the section.
f.write( '<tbody>\n' )
if (len(names) > 1) or (fin != None and fin.endswith('.csv')):
for row in range( fromRow, toRow ):
colorst = (' style="background-color:' + RowColors[row] +
'" ' if RowColors[row] != '' else '')
f.write('<tr align="center">')
for (i, val) in enumerate(X[ row ]):
#f.write('<td>' + cgi.escape(str(val)) + '</td>')
f.write('<td ' + colorst + ' class="' +
ColorStyles[0][names[i]] + '">' + str(val).replace('\n','<br/>') +
'</td>')
f.write('</tr>\n')
else:
for row in range(fromRow, toRow):
f.write('<tr align="center">')
#f.write('<td>' + cgi.escape(str(X[row])) + '</td>')
f.write('<td>' + str(X[row]).replace('\n','<br/>') + '</td>')
f.write('</tr>\n')
f.write('</tbody>\n')
f.write( '</table>' )
# Write out hyperlinks to other sections.
if printheader:
f.write('<p>page ')
if section > 0:
f.write(' <a href="' + prefix +
basename(section2file(section - 1)) + '">prev</a> ')
if section < numSections - 1:
f.write(' <a href="' + prefix +
basename(section2file(section + 1)) + '">next</a> ')
for page in range(numSections):
f.write((' <a href="' + prefix +
basename(section2file(page)) + '">' +
str(page + 1) + '</a>') if page != section
else ' ' + str(page + 1))
f.write('</p>')
# End file.
if not returnstring:
f.write('</body></html>\n')
if returnstring:
f.seek(0)
s = f.read()
f.close()
return s
else:
f.close() | [
"def",
"tabular2html",
"(",
"fname",
"=",
"None",
",",
"X",
"=",
"None",
",",
"fin",
"=",
"None",
",",
"title",
"=",
"None",
",",
"printheader",
"=",
"False",
",",
"split",
"=",
"True",
",",
"usecss",
"=",
"None",
",",
"writecss",
"=",
"None",
",",
"SERVERNAME",
"=",
"None",
",",
"SERVER_FROM_CURDIR",
"=",
"'../'",
",",
"ROWS_PER_PAGE",
"=",
"1000",
",",
"returnstring",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# Must write to an HTML file.",
"assert",
"returnstring",
"or",
"fname",
".",
"endswith",
"(",
"'.html'",
")",
",",
"'fname must end in \".html\".'",
"if",
"X",
"is",
"None",
":",
"if",
"fin",
"is",
"not",
"None",
":",
"if",
"fin",
".",
"lstrip",
"(",
"'/'",
")",
".",
"endswith",
"(",
"'.hsv'",
")",
":",
"kwargs",
"[",
"'HSVfile'",
"]",
"=",
"fin",
"elif",
"fin",
".",
"endswith",
"(",
"'.tsv'",
")",
"or",
"fin",
".",
"endswith",
"(",
"'.csv'",
")",
":",
"kwargs",
"[",
"'SVfile'",
"]",
"=",
"fin",
"elif",
"fin",
".",
"endswith",
"(",
"(",
"'.npy'",
",",
"'.npz'",
")",
")",
":",
"kwargs",
"[",
"'binary'",
"]",
"=",
"fin",
"else",
":",
"assert",
"False",
",",
"(",
"'This algorithm is being forced to determine '",
"'the proper file type for web representation '",
"'from file\\'s path (e.g. by looking at '",
"'extension) since the type is not given '",
"'explicitly by use of a keyword argument, but '",
"'is having problems deducing the intended file '",
"'type from the path (e.g., because the '",
"'extension is not one of those this algorithm '",
"'recognizes).'",
")",
"else",
":",
"assert",
"any",
"(",
"[",
"l",
"in",
"kwargs",
".",
"keys",
"(",
")",
"and",
"kwargs",
"[",
"l",
"]",
"!=",
"None",
"for",
"l",
"in",
"[",
"'SVfile'",
",",
"'binary'",
",",
"'HSVfile'",
"]",
"]",
")",
",",
"(",
"'Either a tabarray is given, or file path \"fin\" is '",
"'given, or one of \"HSV\", \"binary\", or \"SV\" keyword '",
"'arguments are given.'",
")",
"X",
"=",
"tb",
".",
"tabarray",
"(",
"*",
"*",
"kwargs",
")",
"names",
"=",
"X",
".",
"dtype",
".",
"names",
"try",
":",
"RowColors",
"=",
"X",
"[",
"'__color__'",
"]",
"except",
":",
"if",
"'__color__'",
"in",
"names",
":",
"cspot",
"=",
"names",
".",
"index",
"(",
"'__color__'",
")",
"RowColors",
"=",
"[",
"r",
"[",
"cspot",
"]",
"for",
"r",
"in",
"X",
"]",
"else",
":",
"RowColors",
"=",
"[",
"''",
"]",
"*",
"len",
"(",
"X",
")",
"try",
":",
"coloring",
"=",
"X",
".",
"coloring",
"except",
":",
"coloring",
"=",
"{",
"}",
"Num_Records",
"=",
"len",
"(",
"X",
")",
"Num_Cols",
"=",
"len",
"(",
"names",
")",
"ColorStyles",
"=",
"CSSColoring",
"(",
"names",
",",
"coloring",
")",
"HdrNts",
"=",
"HeaderNotations",
"(",
"names",
",",
"coloring",
")",
"# If I specify usecss and it is not false, it must be a string and I want ",
"# to put that file name in the link and not write anything out.",
"# If I specify writecss I want it to write out file to that name and use it ",
"# in the link.",
"# If usecss = false, writecss is false and nothing is put in the link.",
"# If usecss is not specified, then ...",
"if",
"usecss",
"!=",
"None",
":",
"if",
"isinstance",
"(",
"usecss",
",",
"str",
")",
":",
"cssfile",
"=",
"usecss",
"CSSLINK",
"=",
"(",
"'<link rel=\"stylesheet\" type=\"text/css\" href=\"'",
"+",
"'/'",
"+",
"cssfile",
"[",
"len",
"(",
"SERVER_FROM_CURDIR",
")",
":",
"]",
"+",
"'\"</link>'",
")",
"else",
":",
"assert",
"usecss",
"==",
"False",
"CSSLINK",
"=",
"''",
"else",
":",
"if",
"writecss",
"==",
"False",
"or",
"returnstring",
":",
"CSSLINK",
"=",
"''",
"else",
":",
"if",
"not",
"isinstance",
"(",
"writecss",
",",
"str",
")",
":",
"cssfile",
"=",
"fname",
"[",
":",
"-",
"5",
"]",
"+",
"'.css'",
"else",
":",
"cssfile",
"=",
"writecss",
"WriteOutCSS",
"(",
"ColorStyles",
"[",
"1",
"]",
",",
"cssfile",
")",
"CSSLINK",
"=",
"(",
"'<link rel=\"stylesheet\" type=\"text/css\" href=\"'",
"+",
"'/'",
"+",
"cssfile",
"[",
"len",
"(",
"SERVER_FROM_CURDIR",
")",
":",
"]",
"+",
"'\"</link>'",
")",
"if",
"returnstring",
":",
"split",
"=",
"False",
"if",
"not",
"split",
"or",
"ROWS_PER_PAGE",
"==",
"'all'",
":",
"ROWS_PER_PAGE",
"=",
"Num_Records",
"+",
"1",
"numSections",
"=",
"int",
"(",
"Num_Records",
"/",
"ROWS_PER_PAGE",
")",
"+",
"1",
"# section2file(i) returns the name of the .html file corresponding to ",
"# section number i.",
"section2file",
"=",
"(",
"lambda",
"sectionNum",
":",
"fname",
"if",
"sectionNum",
"==",
"0",
"else",
"splitext",
"(",
"fname",
")",
"[",
"0",
"]",
"+",
"str",
"(",
"sectionNum",
")",
"+",
"splitext",
"(",
"fname",
")",
"[",
"1",
"]",
")",
"if",
"title",
"is",
"None",
":",
"if",
"not",
"fin",
"is",
"None",
":",
"title",
"=",
"fin",
"else",
":",
"title",
"=",
"'Title Not Given'",
"for",
"section",
"in",
"range",
"(",
"numSections",
")",
":",
"sectionfname",
"=",
"section2file",
"(",
"section",
")",
"fromRow",
"=",
"section",
"*",
"ROWS_PER_PAGE",
"# Start record # for this section.",
"toRow",
"=",
"min",
"(",
"fromRow",
"+",
"ROWS_PER_PAGE",
",",
"Num_Records",
")",
"# End record #.",
"if",
"printheader",
"and",
"not",
"returnstring",
":",
"prefix",
"=",
"'/'",
"+",
"DirName",
"(",
"fname",
"[",
"len",
"(",
"SERVER_FROM_CURDIR",
")",
":",
"]",
")",
"+",
"'/'",
"else",
":",
"prefix",
"=",
"''",
"# Open the output file for the section to fileobject 'f'.",
"if",
"not",
"returnstring",
":",
"f",
"=",
"open",
"(",
"sectionfname",
",",
"'w'",
")",
"else",
":",
"f",
"=",
"tempfile",
".",
"TemporaryFile",
"(",
"'w+b'",
")",
"# Write out file header.",
"if",
"not",
"returnstring",
":",
"f",
".",
"write",
"(",
"'<html><META HTTP-EQUIV=\"Content-Type\" '",
"'CONTENT=\"text/html; charset=utf-8\" /><head><title>'",
"+",
"title",
"+",
"'</title>'",
"+",
"CSSLINK",
"+",
"'</head><body>\\n'",
")",
"if",
"printheader",
":",
"f",
".",
"write",
"(",
"'<p>Tabular File (page '",
"+",
"str",
"(",
"section",
"+",
"1",
")",
"+",
"' of '",
"+",
"str",
"(",
"numSections",
")",
"+",
"', rows '",
"+",
"str",
"(",
"fromRow",
"+",
"1",
")",
"+",
"' - '",
"+",
"str",
"(",
"toRow",
")",
"+",
"'): '",
"+",
"title",
"+",
"'</p>\\n'",
")",
"f",
".",
"write",
"(",
"'<p>page '",
")",
"if",
"section",
">",
"0",
":",
"f",
".",
"write",
"(",
"' <a href=\"'",
"+",
"prefix",
"+",
"basename",
"(",
"section2file",
"(",
"section",
"-",
"1",
")",
")",
"+",
"'\">prev</a> '",
")",
"if",
"section",
"<",
"numSections",
"-",
"1",
":",
"f",
".",
"write",
"(",
"' <a href=\"'",
"+",
"prefix",
"+",
"basename",
"(",
"section2file",
"(",
"section",
"+",
"1",
")",
")",
"+",
"'\">next</a> '",
")",
"for",
"page",
"in",
"range",
"(",
"numSections",
")",
":",
"f",
".",
"write",
"(",
"(",
"' <a href=\"'",
"+",
"prefix",
"+",
"basename",
"(",
"section2file",
"(",
"page",
")",
")",
"+",
"'\">'",
"+",
"str",
"(",
"page",
"+",
"1",
")",
"+",
"'</a>'",
")",
"if",
"page",
"!=",
"section",
"else",
"' '",
"+",
"str",
"(",
"page",
"+",
"1",
")",
")",
"f",
".",
"write",
"(",
"'</p>'",
")",
"# Write out table with number of cols.",
"f",
".",
"write",
"(",
"'<table border=\"1\" cellspacing=\"0\" cellpadding=\"4\">\\n'",
")",
"f",
".",
"write",
"(",
"'<col span=\"'",
"+",
"str",
"(",
"Num_Cols",
")",
"+",
"'\" align=\"center\">\\n'",
")",
"# Write out table header line.",
"f",
".",
"write",
"(",
"'<thead>'",
")",
"if",
"len",
"(",
"HdrNts",
")",
">",
"0",
":",
"for",
"h",
"in",
"HdrNts",
":",
"f",
".",
"write",
"(",
"h",
"+",
"'\\n'",
")",
"f",
".",
"write",
"(",
"'<tr align=\"center\">'",
")",
"for",
"name",
"in",
"names",
":",
"f",
".",
"write",
"(",
"'<th class=\"'",
"+",
"ColorStyles",
"[",
"0",
"]",
"[",
"name",
"]",
"+",
"'\">'",
"+",
"cgi",
".",
"escape",
"(",
"name",
")",
"+",
"'</th>'",
")",
"f",
".",
"write",
"(",
"'</tr>'",
")",
"f",
".",
"write",
"(",
"'</thead>\\n'",
")",
"# Write out each record in the section.",
"f",
".",
"write",
"(",
"'<tbody>\\n'",
")",
"if",
"(",
"len",
"(",
"names",
")",
">",
"1",
")",
"or",
"(",
"fin",
"!=",
"None",
"and",
"fin",
".",
"endswith",
"(",
"'.csv'",
")",
")",
":",
"for",
"row",
"in",
"range",
"(",
"fromRow",
",",
"toRow",
")",
":",
"colorst",
"=",
"(",
"' style=\"background-color:'",
"+",
"RowColors",
"[",
"row",
"]",
"+",
"'\" '",
"if",
"RowColors",
"[",
"row",
"]",
"!=",
"''",
"else",
"''",
")",
"f",
".",
"write",
"(",
"'<tr align=\"center\">'",
")",
"for",
"(",
"i",
",",
"val",
")",
"in",
"enumerate",
"(",
"X",
"[",
"row",
"]",
")",
":",
"#f.write('<td>' + cgi.escape(str(val)) + '</td>')",
"f",
".",
"write",
"(",
"'<td '",
"+",
"colorst",
"+",
"' class=\"'",
"+",
"ColorStyles",
"[",
"0",
"]",
"[",
"names",
"[",
"i",
"]",
"]",
"+",
"'\">'",
"+",
"str",
"(",
"val",
")",
".",
"replace",
"(",
"'\\n'",
",",
"'<br/>'",
")",
"+",
"'</td>'",
")",
"f",
".",
"write",
"(",
"'</tr>\\n'",
")",
"else",
":",
"for",
"row",
"in",
"range",
"(",
"fromRow",
",",
"toRow",
")",
":",
"f",
".",
"write",
"(",
"'<tr align=\"center\">'",
")",
"#f.write('<td>' + cgi.escape(str(X[row])) + '</td>')",
"f",
".",
"write",
"(",
"'<td>'",
"+",
"str",
"(",
"X",
"[",
"row",
"]",
")",
".",
"replace",
"(",
"'\\n'",
",",
"'<br/>'",
")",
"+",
"'</td>'",
")",
"f",
".",
"write",
"(",
"'</tr>\\n'",
")",
"f",
".",
"write",
"(",
"'</tbody>\\n'",
")",
"f",
".",
"write",
"(",
"'</table>'",
")",
"# Write out hyperlinks to other sections.",
"if",
"printheader",
":",
"f",
".",
"write",
"(",
"'<p>page '",
")",
"if",
"section",
">",
"0",
":",
"f",
".",
"write",
"(",
"' <a href=\"'",
"+",
"prefix",
"+",
"basename",
"(",
"section2file",
"(",
"section",
"-",
"1",
")",
")",
"+",
"'\">prev</a> '",
")",
"if",
"section",
"<",
"numSections",
"-",
"1",
":",
"f",
".",
"write",
"(",
"' <a href=\"'",
"+",
"prefix",
"+",
"basename",
"(",
"section2file",
"(",
"section",
"+",
"1",
")",
")",
"+",
"'\">next</a> '",
")",
"for",
"page",
"in",
"range",
"(",
"numSections",
")",
":",
"f",
".",
"write",
"(",
"(",
"' <a href=\"'",
"+",
"prefix",
"+",
"basename",
"(",
"section2file",
"(",
"page",
")",
")",
"+",
"'\">'",
"+",
"str",
"(",
"page",
"+",
"1",
")",
"+",
"'</a>'",
")",
"if",
"page",
"!=",
"section",
"else",
"' '",
"+",
"str",
"(",
"page",
"+",
"1",
")",
")",
"f",
".",
"write",
"(",
"'</p>'",
")",
"# End file.",
"if",
"not",
"returnstring",
":",
"f",
".",
"write",
"(",
"'</body></html>\\n'",
")",
"if",
"returnstring",
":",
"f",
".",
"seek",
"(",
"0",
")",
"s",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"close",
"(",
")",
"return",
"s",
"else",
":",
"f",
".",
"close",
"(",
")"
] | Creates an html representation of tabular data, either from a tabarray or
an externa file (`including ``.hsv``, ``.csv``, ``.tsv``). If no data is
directly provided by passing a tabarray to `X`, then a tabarray is
constructed using :func:`tabular.tabarray.tabarray.__new__`.
**Parameters**
**fname** : string
Path to the "main" HTML file to be created. This file path
must end in ``.html``.
Note that this function will create additional files (e.g.
a ``.css`` file and multiple linked ``.html`` files for
large datasets) linked to this HTML file, inside of the
directory containing `fname`.
**X** : tabarray
If `X` is `None`, then one of `fin`, `array`, `records`,
`columns`, `SV`, `HSV`, or `HSVlist` must not be `None`.
**fin** : string
File path to to a source of tabular dat, which will be
loaded using the tabarray constructor. The load method
will be inferred from the file extension and whether or not
there is a headerkey in the first line of the file.
**title** : string
Description to be used in the <title> tag of the created
html file.
**printheader** : boolean
If `printheader = True`, will print out a "header" and
"footer" (both in the body of the HTML documents) on every
page. The header contains information about the name of
the input file and the number of rows, as well what the
current page number is (if split between multiple
documents) and links to any other pages. The footer
contains the same page number and links.
**split** : boolean
If `split = False`, will not split to multiple HTML pages,
regardless of value of `ROWS_PER_PAGE`.
**usecss** : False or None or string
If usecss is False, no link to a cssfile is included in the
page header, and no cssfile is written out. If is a
string, that string is assumed to be a path and is linked
to as the CSS file. If it is None, then consideration of
the `writecss` variable is made.
**writecss** : boolean
If `usecss` is not `None`, then if `writecss` is not
`False`: the default css sheet is generated and written to
a file whose name is either generated by default (if
writecss is None) else given by writecss itself, and linked
to in the file header
**SERVERNAME** : string
Server name. For example, this could be the ServerName
of a VirtualHost on your local machine, assuming that
`fname` describes a path on the server.
**SERVER_FROM_CURDIR** : string
Root path of server relative to the current directory.
Assumed to be '../'.
**ROWS_PER_PAGE** : positive integer or 'all'
This sets the number of records displayed per .html page
(if the tabular file has more than ROWS_PER_PAGE rows,
it will be split into multiple sections on several .html
pages (default = 1000).
If the value is 'all' then the page is not split (e.g. it
is as if split = False)
**See also:** the kwargs arguments must be valid keyword arguments
for :func:`tabular.tabarray.tabarray.__new__`, the tabarray
constructor, see documentation for descriptions. | [
"Creates",
"an",
"html",
"representation",
"of",
"tabular",
"data",
"either",
"from",
"a",
"tabarray",
"or",
"an",
"externa",
"file",
"(",
"including",
".",
"hsv",
".",
"csv",
".",
"tsv",
")",
".",
"If",
"no",
"data",
"is",
"directly",
"provided",
"by",
"passing",
"a",
"tabarray",
"to",
"X",
"then",
"a",
"tabarray",
"is",
"constructed",
"using",
":",
"func",
":",
"tabular",
".",
"tabarray",
".",
"tabarray",
".",
"__new__",
"."
] | python | train |
MinchinWeb/minchin.releaser | minchin/releaser/util.py | https://github.com/MinchinWeb/minchin.releaser/blob/cfc7f40ac4852b46db98aa1bb8fcaf138a6cdef4/minchin/releaser/util.py#L35-L61 | def check_existence(to_check, name, config_key=None, relative_to=None,
allow_undefined=False, allow_not_existing=False,
base_key='releaser'):
"""Determine whether a file or folder actually exists."""
if allow_undefined and (to_check is None or to_check.lower() == 'none'):
print("{: <14} -> {}UNDEFINED{}".format(name, WARNING_COLOR,
RESET_COLOR))
return
else:
if config_key is None:
config_key = "{}.{}".format(base_key, name)
my_check = Path(to_check).resolve()
if my_check.exists() and relative_to is not None:
printed_path = str(my_check.relative_to(relative_to))
if printed_path != '.':
printed_path = '.' + os.sep + printed_path
else:
printed_path = str(my_check)
if my_check.exists() or allow_not_existing:
print("{: <14} -> {}".format(name, printed_path))
return
else:
raise FileNotFoundError("[{}ERROR{}] '{}', as given, doesn't "
"exist. For configuration key '{}', was "
"given: {}".format(ERROR_COLOR, RESET_COLOR,
name, config_key,
to_check)) | [
"def",
"check_existence",
"(",
"to_check",
",",
"name",
",",
"config_key",
"=",
"None",
",",
"relative_to",
"=",
"None",
",",
"allow_undefined",
"=",
"False",
",",
"allow_not_existing",
"=",
"False",
",",
"base_key",
"=",
"'releaser'",
")",
":",
"if",
"allow_undefined",
"and",
"(",
"to_check",
"is",
"None",
"or",
"to_check",
".",
"lower",
"(",
")",
"==",
"'none'",
")",
":",
"print",
"(",
"\"{: <14} -> {}UNDEFINED{}\"",
".",
"format",
"(",
"name",
",",
"WARNING_COLOR",
",",
"RESET_COLOR",
")",
")",
"return",
"else",
":",
"if",
"config_key",
"is",
"None",
":",
"config_key",
"=",
"\"{}.{}\"",
".",
"format",
"(",
"base_key",
",",
"name",
")",
"my_check",
"=",
"Path",
"(",
"to_check",
")",
".",
"resolve",
"(",
")",
"if",
"my_check",
".",
"exists",
"(",
")",
"and",
"relative_to",
"is",
"not",
"None",
":",
"printed_path",
"=",
"str",
"(",
"my_check",
".",
"relative_to",
"(",
"relative_to",
")",
")",
"if",
"printed_path",
"!=",
"'.'",
":",
"printed_path",
"=",
"'.'",
"+",
"os",
".",
"sep",
"+",
"printed_path",
"else",
":",
"printed_path",
"=",
"str",
"(",
"my_check",
")",
"if",
"my_check",
".",
"exists",
"(",
")",
"or",
"allow_not_existing",
":",
"print",
"(",
"\"{: <14} -> {}\"",
".",
"format",
"(",
"name",
",",
"printed_path",
")",
")",
"return",
"else",
":",
"raise",
"FileNotFoundError",
"(",
"\"[{}ERROR{}] '{}', as given, doesn't \"",
"\"exist. For configuration key '{}', was \"",
"\"given: {}\"",
".",
"format",
"(",
"ERROR_COLOR",
",",
"RESET_COLOR",
",",
"name",
",",
"config_key",
",",
"to_check",
")",
")"
] | Determine whether a file or folder actually exists. | [
"Determine",
"whether",
"a",
"file",
"or",
"folder",
"actually",
"exists",
"."
] | python | train |
bigchaindb/bigchaindb | bigchaindb/common/transaction.py | https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/common/transaction.py#L161-L185 | def _fulfillment_to_details(fulfillment):
"""Encode a fulfillment as a details dictionary
Args:
fulfillment: Crypto-conditions Fulfillment object
"""
if fulfillment.type_name == 'ed25519-sha-256':
return {
'type': 'ed25519-sha-256',
'public_key': base58.b58encode(fulfillment.public_key).decode(),
}
if fulfillment.type_name == 'threshold-sha-256':
subconditions = [
_fulfillment_to_details(cond['body'])
for cond in fulfillment.subconditions
]
return {
'type': 'threshold-sha-256',
'threshold': fulfillment.threshold,
'subconditions': subconditions,
}
raise UnsupportedTypeError(fulfillment.type_name) | [
"def",
"_fulfillment_to_details",
"(",
"fulfillment",
")",
":",
"if",
"fulfillment",
".",
"type_name",
"==",
"'ed25519-sha-256'",
":",
"return",
"{",
"'type'",
":",
"'ed25519-sha-256'",
",",
"'public_key'",
":",
"base58",
".",
"b58encode",
"(",
"fulfillment",
".",
"public_key",
")",
".",
"decode",
"(",
")",
",",
"}",
"if",
"fulfillment",
".",
"type_name",
"==",
"'threshold-sha-256'",
":",
"subconditions",
"=",
"[",
"_fulfillment_to_details",
"(",
"cond",
"[",
"'body'",
"]",
")",
"for",
"cond",
"in",
"fulfillment",
".",
"subconditions",
"]",
"return",
"{",
"'type'",
":",
"'threshold-sha-256'",
",",
"'threshold'",
":",
"fulfillment",
".",
"threshold",
",",
"'subconditions'",
":",
"subconditions",
",",
"}",
"raise",
"UnsupportedTypeError",
"(",
"fulfillment",
".",
"type_name",
")"
] | Encode a fulfillment as a details dictionary
Args:
fulfillment: Crypto-conditions Fulfillment object | [
"Encode",
"a",
"fulfillment",
"as",
"a",
"details",
"dictionary"
] | python | train |
Metatab/metapack | metapack/cli/metaaws.py | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L349-L375 | def bucket_policy_to_dict(policy):
"""Produce a dictionary of read, write permissions for an existing bucket policy document"""
import json
if not isinstance(policy, dict):
policy = json.loads(policy)
statements = {s['Sid']: s for s in policy['Statement']}
d = {}
for rw in ('Read', 'Write'):
for prefix in TOP_LEVEL_DIRS:
sid = rw.title() + prefix.title()
if sid in statements:
if isinstance(statements[sid]['Principal']['AWS'], list):
for principal in statements[sid]['Principal']['AWS']:
user_name = principal.split('/').pop()
d[(user_name, prefix)] = rw[0]
else:
user_name = statements[sid]['Principal']['AWS'].split('/').pop()
d[(user_name, prefix)] = rw[0]
return d | [
"def",
"bucket_policy_to_dict",
"(",
"policy",
")",
":",
"import",
"json",
"if",
"not",
"isinstance",
"(",
"policy",
",",
"dict",
")",
":",
"policy",
"=",
"json",
".",
"loads",
"(",
"policy",
")",
"statements",
"=",
"{",
"s",
"[",
"'Sid'",
"]",
":",
"s",
"for",
"s",
"in",
"policy",
"[",
"'Statement'",
"]",
"}",
"d",
"=",
"{",
"}",
"for",
"rw",
"in",
"(",
"'Read'",
",",
"'Write'",
")",
":",
"for",
"prefix",
"in",
"TOP_LEVEL_DIRS",
":",
"sid",
"=",
"rw",
".",
"title",
"(",
")",
"+",
"prefix",
".",
"title",
"(",
")",
"if",
"sid",
"in",
"statements",
":",
"if",
"isinstance",
"(",
"statements",
"[",
"sid",
"]",
"[",
"'Principal'",
"]",
"[",
"'AWS'",
"]",
",",
"list",
")",
":",
"for",
"principal",
"in",
"statements",
"[",
"sid",
"]",
"[",
"'Principal'",
"]",
"[",
"'AWS'",
"]",
":",
"user_name",
"=",
"principal",
".",
"split",
"(",
"'/'",
")",
".",
"pop",
"(",
")",
"d",
"[",
"(",
"user_name",
",",
"prefix",
")",
"]",
"=",
"rw",
"[",
"0",
"]",
"else",
":",
"user_name",
"=",
"statements",
"[",
"sid",
"]",
"[",
"'Principal'",
"]",
"[",
"'AWS'",
"]",
".",
"split",
"(",
"'/'",
")",
".",
"pop",
"(",
")",
"d",
"[",
"(",
"user_name",
",",
"prefix",
")",
"]",
"=",
"rw",
"[",
"0",
"]",
"return",
"d"
] | Produce a dictionary of read, write permissions for an existing bucket policy document | [
"Produce",
"a",
"dictionary",
"of",
"read",
"write",
"permissions",
"for",
"an",
"existing",
"bucket",
"policy",
"document"
] | python | train |
etal/biocma | biocma/cma.py | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L374-L389 | def consensus2block(record, level=0, name=None):
"""Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here.
"""
cons_ungap = str(record.seq).replace('-', '').replace('.', '').upper()
record.seq = cons_ungap
return dict(
level=level, #record.annotations.get('level', 0),
one=1,
name=name or record.id,
params='go=10000,gx=2000,pn=1000.0,lf=0,rf=0',
query_length=len(cons_ungap),
query_chars='*'*len(cons_ungap),
sequences=[seqrecord2sequence(record, len(cons_ungap), 1)]
) | [
"def",
"consensus2block",
"(",
"record",
",",
"level",
"=",
"0",
",",
"name",
"=",
"None",
")",
":",
"cons_ungap",
"=",
"str",
"(",
"record",
".",
"seq",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
".",
"replace",
"(",
"'.'",
",",
"''",
")",
".",
"upper",
"(",
")",
"record",
".",
"seq",
"=",
"cons_ungap",
"return",
"dict",
"(",
"level",
"=",
"level",
",",
"#record.annotations.get('level', 0),",
"one",
"=",
"1",
",",
"name",
"=",
"name",
"or",
"record",
".",
"id",
",",
"params",
"=",
"'go=10000,gx=2000,pn=1000.0,lf=0,rf=0'",
",",
"query_length",
"=",
"len",
"(",
"cons_ungap",
")",
",",
"query_chars",
"=",
"'*'",
"*",
"len",
"(",
"cons_ungap",
")",
",",
"sequences",
"=",
"[",
"seqrecord2sequence",
"(",
"record",
",",
"len",
"(",
"cons_ungap",
")",
",",
"1",
")",
"]",
")"
] | Convert a Biopython SeqRecord to a esbglib.cma block.
Ungapping is handled here. | [
"Convert",
"a",
"Biopython",
"SeqRecord",
"to",
"a",
"esbglib",
".",
"cma",
"block",
"."
] | python | train |
cloudmesh-cmd3/cmd3 | cmd3/plugins/info.py | https://github.com/cloudmesh-cmd3/cmd3/blob/92e33c96032fd3921f159198a0e57917c4dc34ed/cmd3/plugins/info.py#L22-L42 | def do_info(self, arg, arguments):
"""
::
Usage:
info [--all]
Options:
--all -a more extensive information
Prints some internal information about the shell
"""
if arguments["--all"]:
Console.ok(70 * "-")
Console.ok('DIR')
Console.ok(70 * "-")
for element in dir(self):
Console.ok(str(element))
Console.ok(70 * "-")
self.print_info() | [
"def",
"do_info",
"(",
"self",
",",
"arg",
",",
"arguments",
")",
":",
"if",
"arguments",
"[",
"\"--all\"",
"]",
":",
"Console",
".",
"ok",
"(",
"70",
"*",
"\"-\"",
")",
"Console",
".",
"ok",
"(",
"'DIR'",
")",
"Console",
".",
"ok",
"(",
"70",
"*",
"\"-\"",
")",
"for",
"element",
"in",
"dir",
"(",
"self",
")",
":",
"Console",
".",
"ok",
"(",
"str",
"(",
"element",
")",
")",
"Console",
".",
"ok",
"(",
"70",
"*",
"\"-\"",
")",
"self",
".",
"print_info",
"(",
")"
] | ::
Usage:
info [--all]
Options:
--all -a more extensive information
Prints some internal information about the shell | [
"::"
] | python | train |
shoebot/shoebot | shoebot/sbio/shell.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/sbio/shell.py#L325-L332 | def do_EOF(self, line):
"""
Exit shell and shoebot
Alias for exit.
"""
print(self.response_prompt, file=self.stdout)
return self.do_exit(line) | [
"def",
"do_EOF",
"(",
"self",
",",
"line",
")",
":",
"print",
"(",
"self",
".",
"response_prompt",
",",
"file",
"=",
"self",
".",
"stdout",
")",
"return",
"self",
".",
"do_exit",
"(",
"line",
")"
] | Exit shell and shoebot
Alias for exit. | [
"Exit",
"shell",
"and",
"shoebot"
] | python | valid |
yyuu/botornado | boto/s3/bucket.py | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/bucket.py#L1289-L1352 | def initiate_multipart_upload(self, key_name, headers=None,
reduced_redundancy=False,
metadata=None, encrypt_key=False):
"""
Start a multipart upload operation.
:type key_name: string
:param key_name: The name of the key that will ultimately result from
this multipart upload operation. This will be exactly
as the key appears in the bucket after the upload
process has been completed.
:type headers: dict
:param headers: Additional HTTP headers to send and store with the
resulting key in S3.
:type reduced_redundancy: boolean
:param reduced_redundancy: In multipart uploads, the storage class is
specified when initiating the upload,
not when uploading individual parts. So
if you want the resulting key to use the
reduced redundancy storage class set this
flag when you initiate the upload.
:type metadata: dict
:param metadata: Any metadata that you would like to set on the key
that results from the multipart upload.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
"""
query_args = 'uploads'
provider = self.connection.provider
if headers is None:
headers = {}
if reduced_redundancy:
storage_class_header = provider.storage_class_header
if storage_class_header:
headers[storage_class_header] = 'REDUCED_REDUNDANCY'
# TODO: what if the provider doesn't support reduced redundancy?
# (see boto.s3.key.Key.set_contents_from_file)
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if metadata is None:
metadata = {}
headers = boto.utils.merge_meta(headers, metadata,
self.connection.provider)
response = self.connection.make_request('POST', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 200:
resp = MultiPartUpload(self)
h = handler.XmlHandler(resp, self)
xml.sax.parseString(body, h)
return resp
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body) | [
"def",
"initiate_multipart_upload",
"(",
"self",
",",
"key_name",
",",
"headers",
"=",
"None",
",",
"reduced_redundancy",
"=",
"False",
",",
"metadata",
"=",
"None",
",",
"encrypt_key",
"=",
"False",
")",
":",
"query_args",
"=",
"'uploads'",
"provider",
"=",
"self",
".",
"connection",
".",
"provider",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"{",
"}",
"if",
"reduced_redundancy",
":",
"storage_class_header",
"=",
"provider",
".",
"storage_class_header",
"if",
"storage_class_header",
":",
"headers",
"[",
"storage_class_header",
"]",
"=",
"'REDUCED_REDUNDANCY'",
"# TODO: what if the provider doesn't support reduced redundancy?",
"# (see boto.s3.key.Key.set_contents_from_file)",
"if",
"encrypt_key",
":",
"headers",
"[",
"provider",
".",
"server_side_encryption_header",
"]",
"=",
"'AES256'",
"if",
"metadata",
"is",
"None",
":",
"metadata",
"=",
"{",
"}",
"headers",
"=",
"boto",
".",
"utils",
".",
"merge_meta",
"(",
"headers",
",",
"metadata",
",",
"self",
".",
"connection",
".",
"provider",
")",
"response",
"=",
"self",
".",
"connection",
".",
"make_request",
"(",
"'POST'",
",",
"self",
".",
"name",
",",
"key_name",
",",
"query_args",
"=",
"query_args",
",",
"headers",
"=",
"headers",
")",
"body",
"=",
"response",
".",
"read",
"(",
")",
"boto",
".",
"log",
".",
"debug",
"(",
"body",
")",
"if",
"response",
".",
"status",
"==",
"200",
":",
"resp",
"=",
"MultiPartUpload",
"(",
"self",
")",
"h",
"=",
"handler",
".",
"XmlHandler",
"(",
"resp",
",",
"self",
")",
"xml",
".",
"sax",
".",
"parseString",
"(",
"body",
",",
"h",
")",
"return",
"resp",
"else",
":",
"raise",
"self",
".",
"connection",
".",
"provider",
".",
"storage_response_error",
"(",
"response",
".",
"status",
",",
"response",
".",
"reason",
",",
"body",
")"
] | Start a multipart upload operation.
:type key_name: string
:param key_name: The name of the key that will ultimately result from
this multipart upload operation. This will be exactly
as the key appears in the bucket after the upload
process has been completed.
:type headers: dict
:param headers: Additional HTTP headers to send and store with the
resulting key in S3.
:type reduced_redundancy: boolean
:param reduced_redundancy: In multipart uploads, the storage class is
specified when initiating the upload,
not when uploading individual parts. So
if you want the resulting key to use the
reduced redundancy storage class set this
flag when you initiate the upload.
:type metadata: dict
:param metadata: Any metadata that you would like to set on the key
that results from the multipart upload.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3. | [
"Start",
"a",
"multipart",
"upload",
"operation",
"."
] | python | train |
SheffieldML/GPy | GPy/likelihoods/bernoulli.py | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/likelihoods/bernoulli.py#L251-L258 | def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None):
"""
Get the "quantiles" of the binary labels (Bernoulli draws). all the
quantiles must be either 0 or 1, since those are the only values the
draw can take!
"""
p = self.predictive_mean(mu, var)
return [np.asarray(p>(q/100.), dtype=np.int32) for q in quantiles] | [
"def",
"predictive_quantiles",
"(",
"self",
",",
"mu",
",",
"var",
",",
"quantiles",
",",
"Y_metadata",
"=",
"None",
")",
":",
"p",
"=",
"self",
".",
"predictive_mean",
"(",
"mu",
",",
"var",
")",
"return",
"[",
"np",
".",
"asarray",
"(",
"p",
">",
"(",
"q",
"/",
"100.",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"q",
"in",
"quantiles",
"]"
] | Get the "quantiles" of the binary labels (Bernoulli draws). all the
quantiles must be either 0 or 1, since those are the only values the
draw can take! | [
"Get",
"the",
"quantiles",
"of",
"the",
"binary",
"labels",
"(",
"Bernoulli",
"draws",
")",
".",
"all",
"the",
"quantiles",
"must",
"be",
"either",
"0",
"or",
"1",
"since",
"those",
"are",
"the",
"only",
"values",
"the",
"draw",
"can",
"take!"
] | python | train |
obilaniu/Nauka | src/nauka/exp/experiment.py | https://github.com/obilaniu/Nauka/blob/1492a4f9d204a868c1a8a1d327bd108490b856b4/src/nauka/exp/experiment.py#L163-L199 | def purge (self,
strategy = "klogn",
keep = None,
deleteNonSnapshots = False,
**kwargs):
"""Purge snapshot directory of snapshots according to some strategy,
preserving however a given "keep" list or set of snapshot numbers.
Available strategies are:
"lastk": Keep last k snapshots (Default: k=10)
"klogn": Keep every snapshot in the last k, 2k snapshots in
the last k**2, 3k snapshots in the last k**3, ...
(Default: k=4. k must be > 1).
Returns `self`."""
assert(isinstance(keep, (list, set)) or keep is None)
keep = set(keep or [])
if self.haveSnapshots:
if strategy == "lastk":
keep.update(self.strategyLastK(self.latestSnapshotNum, **kwargs))
elif strategy == "klogn":
keep.update(self.strategyKLogN(self.latestSnapshotNum, **kwargs))
else:
raise ValueError("Unknown purge strategy "+str(None)+"!")
keep.update(["latest", str(self.latestSnapshotNum)])
keep = set(map(str, keep))
snaps, nonSnaps = self.listSnapshotDir(self.snapDir)
dirEntriesToDelete = set()
dirEntriesToDelete.update(snaps)
dirEntriesToDelete.update(nonSnaps if deleteNonSnapshots else set())
dirEntriesToDelete.difference_update(keep)
for dirEntry in dirEntriesToDelete:
self.rmR(os.path.join(self.snapDir, dirEntry))
return self | [
"def",
"purge",
"(",
"self",
",",
"strategy",
"=",
"\"klogn\"",
",",
"keep",
"=",
"None",
",",
"deleteNonSnapshots",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"(",
"isinstance",
"(",
"keep",
",",
"(",
"list",
",",
"set",
")",
")",
"or",
"keep",
"is",
"None",
")",
"keep",
"=",
"set",
"(",
"keep",
"or",
"[",
"]",
")",
"if",
"self",
".",
"haveSnapshots",
":",
"if",
"strategy",
"==",
"\"lastk\"",
":",
"keep",
".",
"update",
"(",
"self",
".",
"strategyLastK",
"(",
"self",
".",
"latestSnapshotNum",
",",
"*",
"*",
"kwargs",
")",
")",
"elif",
"strategy",
"==",
"\"klogn\"",
":",
"keep",
".",
"update",
"(",
"self",
".",
"strategyKLogN",
"(",
"self",
".",
"latestSnapshotNum",
",",
"*",
"*",
"kwargs",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown purge strategy \"",
"+",
"str",
"(",
"None",
")",
"+",
"\"!\"",
")",
"keep",
".",
"update",
"(",
"[",
"\"latest\"",
",",
"str",
"(",
"self",
".",
"latestSnapshotNum",
")",
"]",
")",
"keep",
"=",
"set",
"(",
"map",
"(",
"str",
",",
"keep",
")",
")",
"snaps",
",",
"nonSnaps",
"=",
"self",
".",
"listSnapshotDir",
"(",
"self",
".",
"snapDir",
")",
"dirEntriesToDelete",
"=",
"set",
"(",
")",
"dirEntriesToDelete",
".",
"update",
"(",
"snaps",
")",
"dirEntriesToDelete",
".",
"update",
"(",
"nonSnaps",
"if",
"deleteNonSnapshots",
"else",
"set",
"(",
")",
")",
"dirEntriesToDelete",
".",
"difference_update",
"(",
"keep",
")",
"for",
"dirEntry",
"in",
"dirEntriesToDelete",
":",
"self",
".",
"rmR",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"snapDir",
",",
"dirEntry",
")",
")",
"return",
"self"
] | Purge snapshot directory of snapshots according to some strategy,
preserving however a given "keep" list or set of snapshot numbers.
Available strategies are:
"lastk": Keep last k snapshots (Default: k=10)
"klogn": Keep every snapshot in the last k, 2k snapshots in
the last k**2, 3k snapshots in the last k**3, ...
(Default: k=4. k must be > 1).
Returns `self`. | [
"Purge",
"snapshot",
"directory",
"of",
"snapshots",
"according",
"to",
"some",
"strategy",
"preserving",
"however",
"a",
"given",
"keep",
"list",
"or",
"set",
"of",
"snapshot",
"numbers",
".",
"Available",
"strategies",
"are",
":",
"lastk",
":",
"Keep",
"last",
"k",
"snapshots",
"(",
"Default",
":",
"k",
"=",
"10",
")",
"klogn",
":",
"Keep",
"every",
"snapshot",
"in",
"the",
"last",
"k",
"2k",
"snapshots",
"in",
"the",
"last",
"k",
"**",
"2",
"3k",
"snapshots",
"in",
"the",
"last",
"k",
"**",
"3",
"...",
"(",
"Default",
":",
"k",
"=",
"4",
".",
"k",
"must",
"be",
">",
"1",
")",
".",
"Returns",
"self",
"."
] | python | train |
kalefranz/auxlib | auxlib/configuration.py | https://github.com/kalefranz/auxlib/blob/6ff2d6b57d128d0b9ed8f01ad83572e938da064f/auxlib/configuration.py#L146-L150 | def unset_env(self, key):
"""Removes an environment variable using the prepended app_name convention with `key`."""
os.environ.pop(make_env_key(self.appname, key), None)
self._registered_env_keys.discard(key)
self._clear_memoization() | [
"def",
"unset_env",
"(",
"self",
",",
"key",
")",
":",
"os",
".",
"environ",
".",
"pop",
"(",
"make_env_key",
"(",
"self",
".",
"appname",
",",
"key",
")",
",",
"None",
")",
"self",
".",
"_registered_env_keys",
".",
"discard",
"(",
"key",
")",
"self",
".",
"_clear_memoization",
"(",
")"
] | Removes an environment variable using the prepended app_name convention with `key`. | [
"Removes",
"an",
"environment",
"variable",
"using",
"the",
"prepended",
"app_name",
"convention",
"with",
"key",
"."
] | python | train |
Trax-air/swagger-parser | swagger_parser/swagger_parser.py | https://github.com/Trax-air/swagger-parser/blob/d97f962a417e76320c59c33dcb223e4373e516d5/swagger_parser/swagger_parser.py#L278-L315 | def _definition_from_example(example):
"""Generates a swagger definition json from a given example
Works only for simple types in the dict
Args:
example: The example for which we want a definition
Type is DICT
Returns:
A dict that is the swagger definition json
"""
assert isinstance(example, dict)
def _has_simple_type(value):
accepted = (str, int, float, bool)
return isinstance(value, accepted)
definition = {
'type': 'object',
'properties': {},
}
for key, value in example.items():
if not _has_simple_type(value):
raise Exception("Not implemented yet")
ret_value = None
if isinstance(value, str):
ret_value = {'type': 'string'}
elif isinstance(value, int):
ret_value = {'type': 'integer', 'format': 'int64'}
elif isinstance(value, float):
ret_value = {'type': 'number', 'format': 'double'}
elif isinstance(value, bool):
ret_value = {'type': 'boolean'}
else:
raise Exception("Not implemented yet")
definition['properties'][key] = ret_value
return definition | [
"def",
"_definition_from_example",
"(",
"example",
")",
":",
"assert",
"isinstance",
"(",
"example",
",",
"dict",
")",
"def",
"_has_simple_type",
"(",
"value",
")",
":",
"accepted",
"=",
"(",
"str",
",",
"int",
",",
"float",
",",
"bool",
")",
"return",
"isinstance",
"(",
"value",
",",
"accepted",
")",
"definition",
"=",
"{",
"'type'",
":",
"'object'",
",",
"'properties'",
":",
"{",
"}",
",",
"}",
"for",
"key",
",",
"value",
"in",
"example",
".",
"items",
"(",
")",
":",
"if",
"not",
"_has_simple_type",
"(",
"value",
")",
":",
"raise",
"Exception",
"(",
"\"Not implemented yet\"",
")",
"ret_value",
"=",
"None",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"ret_value",
"=",
"{",
"'type'",
":",
"'string'",
"}",
"elif",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"ret_value",
"=",
"{",
"'type'",
":",
"'integer'",
",",
"'format'",
":",
"'int64'",
"}",
"elif",
"isinstance",
"(",
"value",
",",
"float",
")",
":",
"ret_value",
"=",
"{",
"'type'",
":",
"'number'",
",",
"'format'",
":",
"'double'",
"}",
"elif",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"ret_value",
"=",
"{",
"'type'",
":",
"'boolean'",
"}",
"else",
":",
"raise",
"Exception",
"(",
"\"Not implemented yet\"",
")",
"definition",
"[",
"'properties'",
"]",
"[",
"key",
"]",
"=",
"ret_value",
"return",
"definition"
] | Generates a swagger definition json from a given example
Works only for simple types in the dict
Args:
example: The example for which we want a definition
Type is DICT
Returns:
A dict that is the swagger definition json | [
"Generates",
"a",
"swagger",
"definition",
"json",
"from",
"a",
"given",
"example",
"Works",
"only",
"for",
"simple",
"types",
"in",
"the",
"dict"
] | python | train |
TkTech/Jawa | jawa/fields.py | https://github.com/TkTech/Jawa/blob/94c8424e699029ac33fbc0e866fff0ecb2742289/jawa/fields.py#L98-L102 | def find_and_remove(self, f: Callable):
"""
Removes any and all fields for which `f(field)` returns `True`.
"""
self._table = [fld for fld in self._table if not f(fld)] | [
"def",
"find_and_remove",
"(",
"self",
",",
"f",
":",
"Callable",
")",
":",
"self",
".",
"_table",
"=",
"[",
"fld",
"for",
"fld",
"in",
"self",
".",
"_table",
"if",
"not",
"f",
"(",
"fld",
")",
"]"
] | Removes any and all fields for which `f(field)` returns `True`. | [
"Removes",
"any",
"and",
"all",
"fields",
"for",
"which",
"f",
"(",
"field",
")",
"returns",
"True",
"."
] | python | train |
msmbuilder/msmbuilder | msmbuilder/msm/msm.py | https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/msm/msm.py#L212-L281 | def eigtransform(self, sequences, right=True, mode='clip'):
r"""Transform a list of sequences by projecting the sequences onto
the first `n_timescales` dynamical eigenvectors.
Parameters
----------
sequences : list of array-like
List of sequences, or a single sequence. Each sequence should be a
1D iterable of state labels. Labels can be integers, strings, or
other orderable objects.
right : bool
Which eigenvectors to map onto. Both the left (:math:`\Phi`) and
the right (:math`\Psi`) eigenvectors of the transition matrix are
commonly used, and differ in their normalization. The two sets of
eigenvectors are related by the stationary distribution ::
\Phi_i(x) = \Psi_i(x) * \mu(x)
In the MSM literature, the right vectors (default here) are
approximations to the transfer operator eigenfunctions, whereas
the left eigenfunction are approximations to the propagator
eigenfunctions. For more details, refer to reference [1].
mode : {'clip', 'fill'}
Method by which to treat labels in `sequences` which do not have
a corresponding index. This can be due, for example, to the ergodic
trimming step.
``clip``
Unmapped labels are removed during transform. If they occur
at the beginning or end of a sequence, the resulting transformed
sequence will be shorted. If they occur in the middle of a
sequence, that sequence will be broken into two (or more)
sequences. (Default)
``fill``
Unmapped labels will be replaced with NaN, to signal missing
data. [The use of NaN to signal missing data is not fantastic,
but it's consistent with current behavior of the ``pandas``
library.]
Returns
-------
transformed : list of 2d arrays
Each element of transformed is an array of shape ``(n_samples,
n_timescales)`` containing the transformed data.
References
----------
.. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics:
Generation and validation." J. Chem. Phys. 134.17 (2011): 174105.
"""
result = []
for y in self.transform(sequences, mode=mode):
if right:
op = self.right_eigenvectors_[:, 1:]
else:
op = self.left_eigenvectors_[:, 1:]
is_finite = np.isfinite(y)
if not np.all(is_finite):
value = np.empty((y.shape[0], op.shape[1]))
value[is_finite, :] = np.take(op, y[is_finite].astype(np.int), axis=0)
value[~is_finite, :] = np.nan
else:
value = np.take(op, y, axis=0)
result.append(value)
return result | [
"def",
"eigtransform",
"(",
"self",
",",
"sequences",
",",
"right",
"=",
"True",
",",
"mode",
"=",
"'clip'",
")",
":",
"result",
"=",
"[",
"]",
"for",
"y",
"in",
"self",
".",
"transform",
"(",
"sequences",
",",
"mode",
"=",
"mode",
")",
":",
"if",
"right",
":",
"op",
"=",
"self",
".",
"right_eigenvectors_",
"[",
":",
",",
"1",
":",
"]",
"else",
":",
"op",
"=",
"self",
".",
"left_eigenvectors_",
"[",
":",
",",
"1",
":",
"]",
"is_finite",
"=",
"np",
".",
"isfinite",
"(",
"y",
")",
"if",
"not",
"np",
".",
"all",
"(",
"is_finite",
")",
":",
"value",
"=",
"np",
".",
"empty",
"(",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
",",
"op",
".",
"shape",
"[",
"1",
"]",
")",
")",
"value",
"[",
"is_finite",
",",
":",
"]",
"=",
"np",
".",
"take",
"(",
"op",
",",
"y",
"[",
"is_finite",
"]",
".",
"astype",
"(",
"np",
".",
"int",
")",
",",
"axis",
"=",
"0",
")",
"value",
"[",
"~",
"is_finite",
",",
":",
"]",
"=",
"np",
".",
"nan",
"else",
":",
"value",
"=",
"np",
".",
"take",
"(",
"op",
",",
"y",
",",
"axis",
"=",
"0",
")",
"result",
".",
"append",
"(",
"value",
")",
"return",
"result"
] | r"""Transform a list of sequences by projecting the sequences onto
the first `n_timescales` dynamical eigenvectors.
Parameters
----------
sequences : list of array-like
List of sequences, or a single sequence. Each sequence should be a
1D iterable of state labels. Labels can be integers, strings, or
other orderable objects.
right : bool
Which eigenvectors to map onto. Both the left (:math:`\Phi`) and
the right (:math`\Psi`) eigenvectors of the transition matrix are
commonly used, and differ in their normalization. The two sets of
eigenvectors are related by the stationary distribution ::
\Phi_i(x) = \Psi_i(x) * \mu(x)
In the MSM literature, the right vectors (default here) are
approximations to the transfer operator eigenfunctions, whereas
the left eigenfunction are approximations to the propagator
eigenfunctions. For more details, refer to reference [1].
mode : {'clip', 'fill'}
Method by which to treat labels in `sequences` which do not have
a corresponding index. This can be due, for example, to the ergodic
trimming step.
``clip``
Unmapped labels are removed during transform. If they occur
at the beginning or end of a sequence, the resulting transformed
sequence will be shorted. If they occur in the middle of a
sequence, that sequence will be broken into two (or more)
sequences. (Default)
``fill``
Unmapped labels will be replaced with NaN, to signal missing
data. [The use of NaN to signal missing data is not fantastic,
but it's consistent with current behavior of the ``pandas``
library.]
Returns
-------
transformed : list of 2d arrays
Each element of transformed is an array of shape ``(n_samples,
n_timescales)`` containing the transformed data.
References
----------
.. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics:
Generation and validation." J. Chem. Phys. 134.17 (2011): 174105. | [
"r",
"Transform",
"a",
"list",
"of",
"sequences",
"by",
"projecting",
"the",
"sequences",
"onto",
"the",
"first",
"n_timescales",
"dynamical",
"eigenvectors",
"."
] | python | train |
chromy/essence | src/essence/world.py | https://github.com/chromy/essence/blob/6cd18821ec91edf022619d9f0c0878f38c22a763/src/essence/world.py#L56-L72 | def add_component(self, entity, component):
"""Add component to entity.
Long-hand for :func:`essence.Entity.add`.
:param entity: entity to associate
:type entity: :class:`essence.Entity`
:param component: component to add to the entity
:type component: :class:`essence.Component`"""
component_type = type(component)
relation = self._get_relation(component_type)
if entity in relation:
# PYTHON2.6: Numbers required in format string.
msg = "Component {0} can't be added to entity {1} since it already has a component of type {2}.".format(component, entity, component_type)
raise DuplicateComponentError(msg)
relation[entity] = component
self._entities_with(component_type).add(entity) | [
"def",
"add_component",
"(",
"self",
",",
"entity",
",",
"component",
")",
":",
"component_type",
"=",
"type",
"(",
"component",
")",
"relation",
"=",
"self",
".",
"_get_relation",
"(",
"component_type",
")",
"if",
"entity",
"in",
"relation",
":",
"# PYTHON2.6: Numbers required in format string.",
"msg",
"=",
"\"Component {0} can't be added to entity {1} since it already has a component of type {2}.\"",
".",
"format",
"(",
"component",
",",
"entity",
",",
"component_type",
")",
"raise",
"DuplicateComponentError",
"(",
"msg",
")",
"relation",
"[",
"entity",
"]",
"=",
"component",
"self",
".",
"_entities_with",
"(",
"component_type",
")",
".",
"add",
"(",
"entity",
")"
] | Add component to entity.
Long-hand for :func:`essence.Entity.add`.
:param entity: entity to associate
:type entity: :class:`essence.Entity`
:param component: component to add to the entity
:type component: :class:`essence.Component` | [
"Add",
"component",
"to",
"entity",
"."
] | python | train |
AndresMWeber/Nomenclate | nomenclate/core/configurator.py | https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/configurator.py#L160-L174 | def _get_path_entry_from_string(self, query_string, first_found=True, full_path=False):
""" Parses a string to form a list of strings that represents a possible config entry header
:param query_string: str, query string we are looking for
:param first_found: bool, return first found entry or entire list
:param full_path: bool, whether to return each entry with their corresponding config entry path
:return: (Generator((list, str, dict, OrderedDict)), config entries that match the query string
:raises: exceptions.ResourceNotFoundError
"""
iter_matches = gen_dict_key_matches(query_string, self.config_file_contents, full_path=full_path)
try:
return next(iter_matches) if first_found else iter_matches
except (StopIteration, TypeError):
raise errors.ResourceNotFoundError('Could not find search string %s in the config file contents %s' %
(query_string, self.config_file_contents)) | [
"def",
"_get_path_entry_from_string",
"(",
"self",
",",
"query_string",
",",
"first_found",
"=",
"True",
",",
"full_path",
"=",
"False",
")",
":",
"iter_matches",
"=",
"gen_dict_key_matches",
"(",
"query_string",
",",
"self",
".",
"config_file_contents",
",",
"full_path",
"=",
"full_path",
")",
"try",
":",
"return",
"next",
"(",
"iter_matches",
")",
"if",
"first_found",
"else",
"iter_matches",
"except",
"(",
"StopIteration",
",",
"TypeError",
")",
":",
"raise",
"errors",
".",
"ResourceNotFoundError",
"(",
"'Could not find search string %s in the config file contents %s'",
"%",
"(",
"query_string",
",",
"self",
".",
"config_file_contents",
")",
")"
] | Parses a string to form a list of strings that represents a possible config entry header
:param query_string: str, query string we are looking for
:param first_found: bool, return first found entry or entire list
:param full_path: bool, whether to return each entry with their corresponding config entry path
:return: (Generator((list, str, dict, OrderedDict)), config entries that match the query string
:raises: exceptions.ResourceNotFoundError | [
"Parses",
"a",
"string",
"to",
"form",
"a",
"list",
"of",
"strings",
"that",
"represents",
"a",
"possible",
"config",
"entry",
"header"
] | python | train |
ultrabug/py3status | py3status/composite.py | https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/composite.py#L58-L72 | def append(self, item):
"""
Add an item to the Composite. Item can be a Composite, list etc
"""
if isinstance(item, Composite):
self._content += item.get_content()
elif isinstance(item, list):
self._content += item
elif isinstance(item, dict):
self._content.append(item)
elif isinstance(item, basestring):
self._content.append({"full_text": item})
else:
msg = "{!r} not suitable to append to Composite"
raise Exception(msg.format(item)) | [
"def",
"append",
"(",
"self",
",",
"item",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"Composite",
")",
":",
"self",
".",
"_content",
"+=",
"item",
".",
"get_content",
"(",
")",
"elif",
"isinstance",
"(",
"item",
",",
"list",
")",
":",
"self",
".",
"_content",
"+=",
"item",
"elif",
"isinstance",
"(",
"item",
",",
"dict",
")",
":",
"self",
".",
"_content",
".",
"append",
"(",
"item",
")",
"elif",
"isinstance",
"(",
"item",
",",
"basestring",
")",
":",
"self",
".",
"_content",
".",
"append",
"(",
"{",
"\"full_text\"",
":",
"item",
"}",
")",
"else",
":",
"msg",
"=",
"\"{!r} not suitable to append to Composite\"",
"raise",
"Exception",
"(",
"msg",
".",
"format",
"(",
"item",
")",
")"
] | Add an item to the Composite. Item can be a Composite, list etc | [
"Add",
"an",
"item",
"to",
"the",
"Composite",
".",
"Item",
"can",
"be",
"a",
"Composite",
"list",
"etc"
] | python | train |
chaoss/grimoirelab-perceval | perceval/backends/core/groupsio.py | https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/groupsio.py#L259-L268 | def _pre_init(self):
"""Initialize mailing lists directory path"""
if not self.parsed_args.mboxes_path:
base_path = os.path.expanduser('~/.perceval/mailinglists/')
dirpath = os.path.join(base_path, GROUPSIO_URL, 'g', self.parsed_args.group_name)
else:
dirpath = self.parsed_args.mboxes_path
setattr(self.parsed_args, 'dirpath', dirpath) | [
"def",
"_pre_init",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"parsed_args",
".",
"mboxes_path",
":",
"base_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.perceval/mailinglists/'",
")",
"dirpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"GROUPSIO_URL",
",",
"'g'",
",",
"self",
".",
"parsed_args",
".",
"group_name",
")",
"else",
":",
"dirpath",
"=",
"self",
".",
"parsed_args",
".",
"mboxes_path",
"setattr",
"(",
"self",
".",
"parsed_args",
",",
"'dirpath'",
",",
"dirpath",
")"
] | Initialize mailing lists directory path | [
"Initialize",
"mailing",
"lists",
"directory",
"path"
] | python | test |
mar10/wsgidav | wsgidav/dav_provider.py | https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/dav_provider.py#L582-L716 | def get_property_value(self, name):
"""Return the value of a property.
name:
the property name in Clark notation.
return value:
may have different types, depending on the status:
- string or unicode: for standard property values.
- lxml.etree.Element: for complex values.
If the property is not available, a DAVError is raised.
This default implementation handles ``{DAV:}lockdiscovery`` and
``{DAV:}supportedlock`` using the associated lock manager.
All other *live* properties (i.e. name starts with ``{DAV:}``) are
delegated to the self.xxx() getters.
Finally, other properties are considered *dead*, and are handled by
the associated property manager.
"""
refUrl = self.get_ref_url()
# lock properties
lm = self.provider.lock_manager
if lm and name == "{DAV:}lockdiscovery":
# TODO: we return HTTP_NOT_FOUND if no lockmanager is present.
# Correct?
activelocklist = lm.get_url_lock_list(refUrl)
lockdiscoveryEL = etree.Element(name)
for lock in activelocklist:
activelockEL = etree.SubElement(lockdiscoveryEL, "{DAV:}activelock")
locktypeEL = etree.SubElement(activelockEL, "{DAV:}locktype")
# Note: make sure `{DAV:}` is not handled as format tag:
etree.SubElement(locktypeEL, "{}{}".format("{DAV:}", lock["type"]))
lockscopeEL = etree.SubElement(activelockEL, "{DAV:}lockscope")
# Note: make sure `{DAV:}` is not handled as format tag:
etree.SubElement(lockscopeEL, "{}{}".format("{DAV:}", lock["scope"]))
etree.SubElement(activelockEL, "{DAV:}depth").text = lock["depth"]
if lock["owner"]:
# lock["owner"] is an XML string
# owner may be empty (#64)
ownerEL = xml_tools.string_to_xml(lock["owner"])
activelockEL.append(ownerEL)
timeout = lock["timeout"]
if timeout < 0:
timeout = "Infinite"
else:
# The time remaining on the lock
expire = lock["expire"]
timeout = "Second-" + str(int(expire - time.time()))
etree.SubElement(activelockEL, "{DAV:}timeout").text = timeout
locktokenEL = etree.SubElement(activelockEL, "{DAV:}locktoken")
etree.SubElement(locktokenEL, "{DAV:}href").text = lock["token"]
# TODO: this is ugly:
# res.get_property_value("{DAV:}lockdiscovery")
#
# lockRoot = self.get_href(self.provider.ref_url_to_path(lock["root"]))
lockPath = self.provider.ref_url_to_path(lock["root"])
lockRes = self.provider.get_resource_inst(lockPath, self.environ)
# FIXME: test for None
lockHref = lockRes.get_href()
lockrootEL = etree.SubElement(activelockEL, "{DAV:}lockroot")
etree.SubElement(lockrootEL, "{DAV:}href").text = lockHref
return lockdiscoveryEL
elif lm and name == "{DAV:}supportedlock":
# TODO: we return HTTP_NOT_FOUND if no lockmanager is present. Correct?
# TODO: the lockmanager should decide about it's features
supportedlockEL = etree.Element(name)
lockentryEL = etree.SubElement(supportedlockEL, "{DAV:}lockentry")
lockscopeEL = etree.SubElement(lockentryEL, "{DAV:}lockscope")
etree.SubElement(lockscopeEL, "{DAV:}exclusive")
locktypeEL = etree.SubElement(lockentryEL, "{DAV:}locktype")
etree.SubElement(locktypeEL, "{DAV:}write")
lockentryEL = etree.SubElement(supportedlockEL, "{DAV:}lockentry")
lockscopeEL = etree.SubElement(lockentryEL, "{DAV:}lockscope")
etree.SubElement(lockscopeEL, "{DAV:}shared")
locktypeEL = etree.SubElement(lockentryEL, "{DAV:}locktype")
etree.SubElement(locktypeEL, "{DAV:}write")
return supportedlockEL
elif name.startswith("{DAV:}"):
# Standard live property (raises HTTP_NOT_FOUND if not supported)
if name == "{DAV:}creationdate" and self.get_creation_date() is not None:
# Note: uses RFC3339 format (ISO 8601)
return util.get_rfc3339_time(self.get_creation_date())
elif name == "{DAV:}getcontenttype" and self.get_content_type() is not None:
return self.get_content_type()
elif name == "{DAV:}resourcetype":
if self.is_collection:
resourcetypeEL = etree.Element(name)
etree.SubElement(resourcetypeEL, "{DAV:}collection")
return resourcetypeEL
return ""
elif (
name == "{DAV:}getlastmodified" and self.get_last_modified() is not None
):
# Note: uses RFC1123 format
return util.get_rfc1123_time(self.get_last_modified())
elif (
name == "{DAV:}getcontentlength"
and self.get_content_length() is not None
):
# Note: must be a numeric string
return str(self.get_content_length())
elif name == "{DAV:}getetag" and self.get_etag() is not None:
return self.get_etag()
elif name == "{DAV:}displayname" and self.get_display_name() is not None:
return self.get_display_name()
# Unsupported, no persistence available, or property not found
raise DAVError(HTTP_NOT_FOUND)
# Dead property
pm = self.provider.prop_manager
if pm:
value = pm.get_property(refUrl, name, self.environ)
if value is not None:
return xml_tools.string_to_xml(value)
# No persistence available, or property not found
raise DAVError(HTTP_NOT_FOUND) | [
"def",
"get_property_value",
"(",
"self",
",",
"name",
")",
":",
"refUrl",
"=",
"self",
".",
"get_ref_url",
"(",
")",
"# lock properties",
"lm",
"=",
"self",
".",
"provider",
".",
"lock_manager",
"if",
"lm",
"and",
"name",
"==",
"\"{DAV:}lockdiscovery\"",
":",
"# TODO: we return HTTP_NOT_FOUND if no lockmanager is present.",
"# Correct?",
"activelocklist",
"=",
"lm",
".",
"get_url_lock_list",
"(",
"refUrl",
")",
"lockdiscoveryEL",
"=",
"etree",
".",
"Element",
"(",
"name",
")",
"for",
"lock",
"in",
"activelocklist",
":",
"activelockEL",
"=",
"etree",
".",
"SubElement",
"(",
"lockdiscoveryEL",
",",
"\"{DAV:}activelock\"",
")",
"locktypeEL",
"=",
"etree",
".",
"SubElement",
"(",
"activelockEL",
",",
"\"{DAV:}locktype\"",
")",
"# Note: make sure `{DAV:}` is not handled as format tag:",
"etree",
".",
"SubElement",
"(",
"locktypeEL",
",",
"\"{}{}\"",
".",
"format",
"(",
"\"{DAV:}\"",
",",
"lock",
"[",
"\"type\"",
"]",
")",
")",
"lockscopeEL",
"=",
"etree",
".",
"SubElement",
"(",
"activelockEL",
",",
"\"{DAV:}lockscope\"",
")",
"# Note: make sure `{DAV:}` is not handled as format tag:",
"etree",
".",
"SubElement",
"(",
"lockscopeEL",
",",
"\"{}{}\"",
".",
"format",
"(",
"\"{DAV:}\"",
",",
"lock",
"[",
"\"scope\"",
"]",
")",
")",
"etree",
".",
"SubElement",
"(",
"activelockEL",
",",
"\"{DAV:}depth\"",
")",
".",
"text",
"=",
"lock",
"[",
"\"depth\"",
"]",
"if",
"lock",
"[",
"\"owner\"",
"]",
":",
"# lock[\"owner\"] is an XML string",
"# owner may be empty (#64)",
"ownerEL",
"=",
"xml_tools",
".",
"string_to_xml",
"(",
"lock",
"[",
"\"owner\"",
"]",
")",
"activelockEL",
".",
"append",
"(",
"ownerEL",
")",
"timeout",
"=",
"lock",
"[",
"\"timeout\"",
"]",
"if",
"timeout",
"<",
"0",
":",
"timeout",
"=",
"\"Infinite\"",
"else",
":",
"# The time remaining on the lock",
"expire",
"=",
"lock",
"[",
"\"expire\"",
"]",
"timeout",
"=",
"\"Second-\"",
"+",
"str",
"(",
"int",
"(",
"expire",
"-",
"time",
".",
"time",
"(",
")",
")",
")",
"etree",
".",
"SubElement",
"(",
"activelockEL",
",",
"\"{DAV:}timeout\"",
")",
".",
"text",
"=",
"timeout",
"locktokenEL",
"=",
"etree",
".",
"SubElement",
"(",
"activelockEL",
",",
"\"{DAV:}locktoken\"",
")",
"etree",
".",
"SubElement",
"(",
"locktokenEL",
",",
"\"{DAV:}href\"",
")",
".",
"text",
"=",
"lock",
"[",
"\"token\"",
"]",
"# TODO: this is ugly:",
"# res.get_property_value(\"{DAV:}lockdiscovery\")",
"#",
"# lockRoot = self.get_href(self.provider.ref_url_to_path(lock[\"root\"]))",
"lockPath",
"=",
"self",
".",
"provider",
".",
"ref_url_to_path",
"(",
"lock",
"[",
"\"root\"",
"]",
")",
"lockRes",
"=",
"self",
".",
"provider",
".",
"get_resource_inst",
"(",
"lockPath",
",",
"self",
".",
"environ",
")",
"# FIXME: test for None",
"lockHref",
"=",
"lockRes",
".",
"get_href",
"(",
")",
"lockrootEL",
"=",
"etree",
".",
"SubElement",
"(",
"activelockEL",
",",
"\"{DAV:}lockroot\"",
")",
"etree",
".",
"SubElement",
"(",
"lockrootEL",
",",
"\"{DAV:}href\"",
")",
".",
"text",
"=",
"lockHref",
"return",
"lockdiscoveryEL",
"elif",
"lm",
"and",
"name",
"==",
"\"{DAV:}supportedlock\"",
":",
"# TODO: we return HTTP_NOT_FOUND if no lockmanager is present. Correct?",
"# TODO: the lockmanager should decide about it's features",
"supportedlockEL",
"=",
"etree",
".",
"Element",
"(",
"name",
")",
"lockentryEL",
"=",
"etree",
".",
"SubElement",
"(",
"supportedlockEL",
",",
"\"{DAV:}lockentry\"",
")",
"lockscopeEL",
"=",
"etree",
".",
"SubElement",
"(",
"lockentryEL",
",",
"\"{DAV:}lockscope\"",
")",
"etree",
".",
"SubElement",
"(",
"lockscopeEL",
",",
"\"{DAV:}exclusive\"",
")",
"locktypeEL",
"=",
"etree",
".",
"SubElement",
"(",
"lockentryEL",
",",
"\"{DAV:}locktype\"",
")",
"etree",
".",
"SubElement",
"(",
"locktypeEL",
",",
"\"{DAV:}write\"",
")",
"lockentryEL",
"=",
"etree",
".",
"SubElement",
"(",
"supportedlockEL",
",",
"\"{DAV:}lockentry\"",
")",
"lockscopeEL",
"=",
"etree",
".",
"SubElement",
"(",
"lockentryEL",
",",
"\"{DAV:}lockscope\"",
")",
"etree",
".",
"SubElement",
"(",
"lockscopeEL",
",",
"\"{DAV:}shared\"",
")",
"locktypeEL",
"=",
"etree",
".",
"SubElement",
"(",
"lockentryEL",
",",
"\"{DAV:}locktype\"",
")",
"etree",
".",
"SubElement",
"(",
"locktypeEL",
",",
"\"{DAV:}write\"",
")",
"return",
"supportedlockEL",
"elif",
"name",
".",
"startswith",
"(",
"\"{DAV:}\"",
")",
":",
"# Standard live property (raises HTTP_NOT_FOUND if not supported)",
"if",
"name",
"==",
"\"{DAV:}creationdate\"",
"and",
"self",
".",
"get_creation_date",
"(",
")",
"is",
"not",
"None",
":",
"# Note: uses RFC3339 format (ISO 8601)",
"return",
"util",
".",
"get_rfc3339_time",
"(",
"self",
".",
"get_creation_date",
"(",
")",
")",
"elif",
"name",
"==",
"\"{DAV:}getcontenttype\"",
"and",
"self",
".",
"get_content_type",
"(",
")",
"is",
"not",
"None",
":",
"return",
"self",
".",
"get_content_type",
"(",
")",
"elif",
"name",
"==",
"\"{DAV:}resourcetype\"",
":",
"if",
"self",
".",
"is_collection",
":",
"resourcetypeEL",
"=",
"etree",
".",
"Element",
"(",
"name",
")",
"etree",
".",
"SubElement",
"(",
"resourcetypeEL",
",",
"\"{DAV:}collection\"",
")",
"return",
"resourcetypeEL",
"return",
"\"\"",
"elif",
"(",
"name",
"==",
"\"{DAV:}getlastmodified\"",
"and",
"self",
".",
"get_last_modified",
"(",
")",
"is",
"not",
"None",
")",
":",
"# Note: uses RFC1123 format",
"return",
"util",
".",
"get_rfc1123_time",
"(",
"self",
".",
"get_last_modified",
"(",
")",
")",
"elif",
"(",
"name",
"==",
"\"{DAV:}getcontentlength\"",
"and",
"self",
".",
"get_content_length",
"(",
")",
"is",
"not",
"None",
")",
":",
"# Note: must be a numeric string",
"return",
"str",
"(",
"self",
".",
"get_content_length",
"(",
")",
")",
"elif",
"name",
"==",
"\"{DAV:}getetag\"",
"and",
"self",
".",
"get_etag",
"(",
")",
"is",
"not",
"None",
":",
"return",
"self",
".",
"get_etag",
"(",
")",
"elif",
"name",
"==",
"\"{DAV:}displayname\"",
"and",
"self",
".",
"get_display_name",
"(",
")",
"is",
"not",
"None",
":",
"return",
"self",
".",
"get_display_name",
"(",
")",
"# Unsupported, no persistence available, or property not found",
"raise",
"DAVError",
"(",
"HTTP_NOT_FOUND",
")",
"# Dead property",
"pm",
"=",
"self",
".",
"provider",
".",
"prop_manager",
"if",
"pm",
":",
"value",
"=",
"pm",
".",
"get_property",
"(",
"refUrl",
",",
"name",
",",
"self",
".",
"environ",
")",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"xml_tools",
".",
"string_to_xml",
"(",
"value",
")",
"# No persistence available, or property not found",
"raise",
"DAVError",
"(",
"HTTP_NOT_FOUND",
")"
] | Return the value of a property.
name:
the property name in Clark notation.
return value:
may have different types, depending on the status:
- string or unicode: for standard property values.
- lxml.etree.Element: for complex values.
If the property is not available, a DAVError is raised.
This default implementation handles ``{DAV:}lockdiscovery`` and
``{DAV:}supportedlock`` using the associated lock manager.
All other *live* properties (i.e. name starts with ``{DAV:}``) are
delegated to the self.xxx() getters.
Finally, other properties are considered *dead*, and are handled by
the associated property manager. | [
"Return",
"the",
"value",
"of",
"a",
"property",
"."
] | python | valid |
singularityhub/singularity-cli | spython/image/cmd/create.py | https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/image/cmd/create.py#L11-L34 | def create(self,image_path, size=1024, sudo=False):
'''create will create a a new image
Parameters
==========
image_path: full path to image
size: image sizein MiB, default is 1024MiB
filesystem: supported file systems ext3/ext4 (ext[2/3]: default ext3
'''
from spython.utils import check_install
check_install()
cmd = self.init_command('image.create')
cmd = cmd + ['--size', str(size), image_path ]
output = self.run_command(cmd,sudo=sudo)
self.println(output)
if not os.path.exists(image_path):
bot.exit("Could not create image %s" %image_path)
return image_path | [
"def",
"create",
"(",
"self",
",",
"image_path",
",",
"size",
"=",
"1024",
",",
"sudo",
"=",
"False",
")",
":",
"from",
"spython",
".",
"utils",
"import",
"check_install",
"check_install",
"(",
")",
"cmd",
"=",
"self",
".",
"init_command",
"(",
"'image.create'",
")",
"cmd",
"=",
"cmd",
"+",
"[",
"'--size'",
",",
"str",
"(",
"size",
")",
",",
"image_path",
"]",
"output",
"=",
"self",
".",
"run_command",
"(",
"cmd",
",",
"sudo",
"=",
"sudo",
")",
"self",
".",
"println",
"(",
"output",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"image_path",
")",
":",
"bot",
".",
"exit",
"(",
"\"Could not create image %s\"",
"%",
"image_path",
")",
"return",
"image_path"
] | create will create a a new image
Parameters
==========
image_path: full path to image
size: image sizein MiB, default is 1024MiB
filesystem: supported file systems ext3/ext4 (ext[2/3]: default ext3 | [
"create",
"will",
"create",
"a",
"a",
"new",
"image"
] | python | train |
sethmlarson/virtualbox-python | virtualbox/library.py | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L14781-L14794 | def delete_guest_property(self, name):
"""Deletes an entry from the machine's guest property store.
in name of type str
The name of the property to delete.
raises :class:`VBoxErrorInvalidVmState`
Machine session is not open.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
self._call("deleteGuestProperty",
in_p=[name]) | [
"def",
"delete_guest_property",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"isinstance",
"(",
"name",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"name can only be an instance of type basestring\"",
")",
"self",
".",
"_call",
"(",
"\"deleteGuestProperty\"",
",",
"in_p",
"=",
"[",
"name",
"]",
")"
] | Deletes an entry from the machine's guest property store.
in name of type str
The name of the property to delete.
raises :class:`VBoxErrorInvalidVmState`
Machine session is not open. | [
"Deletes",
"an",
"entry",
"from",
"the",
"machine",
"s",
"guest",
"property",
"store",
"."
] | python | train |
pandas-dev/pandas | pandas/core/groupby/groupby.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1984-L2088 | def _get_cythonized_result(self, how, grouper, aggregate=False,
cython_dtype=None, needs_values=False,
needs_mask=False, needs_ngroups=False,
result_is_index=False,
pre_processing=None, post_processing=None,
**kwargs):
"""
Get result for Cythonized functions.
Parameters
----------
how : str, Cythonized function name to be called
grouper : Grouper object containing pertinent group info
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
cython_dtype : default None
Type of the array that will be modified by the Cython call. If
`None`, the type will be inferred from the values of each slice
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython.
Function should return a tuple where the first element is the
values to be passed to Cython and the second element is an optional
type which the values should be converted to after being returned
by the Cython operation. Raises if `needs_values` is False.
post_processing : function, default None
Function to be applied to result of Cython function. Should accept
an array of values as the first argument and type inferences as its
second argument, i.e. the signature should be
(ndarray, Type).
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both "
"be True!")
if post_processing:
if not callable(pre_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError("Cannot use 'pre_processing' without "
"specifying 'needs_values'!")
labels, _, ngroups = grouper.group_info
output = collections.OrderedDict()
base_func = getattr(libgroupby, how)
for name, obj in self._iterate_slices():
if aggregate:
result_sz = ngroups
else:
result_sz = len(obj.values)
if not cython_dtype:
cython_dtype = obj.values.dtype
result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
inferences = None
if needs_values:
vals = obj.values
if pre_processing:
vals, inferences = pre_processing(vals)
func = partial(func, vals)
if needs_mask:
mask = isna(obj.values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if result_is_index:
result = algorithms.take_nd(obj.values, result)
if post_processing:
result = post_processing(result, inferences)
output[name] = result
if aggregate:
return self._wrap_aggregated_output(output)
else:
return self._wrap_transformed_output(output) | [
"def",
"_get_cythonized_result",
"(",
"self",
",",
"how",
",",
"grouper",
",",
"aggregate",
"=",
"False",
",",
"cython_dtype",
"=",
"None",
",",
"needs_values",
"=",
"False",
",",
"needs_mask",
"=",
"False",
",",
"needs_ngroups",
"=",
"False",
",",
"result_is_index",
"=",
"False",
",",
"pre_processing",
"=",
"None",
",",
"post_processing",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"result_is_index",
"and",
"aggregate",
":",
"raise",
"ValueError",
"(",
"\"'result_is_index' and 'aggregate' cannot both \"",
"\"be True!\"",
")",
"if",
"post_processing",
":",
"if",
"not",
"callable",
"(",
"pre_processing",
")",
":",
"raise",
"ValueError",
"(",
"\"'post_processing' must be a callable!\"",
")",
"if",
"pre_processing",
":",
"if",
"not",
"callable",
"(",
"pre_processing",
")",
":",
"raise",
"ValueError",
"(",
"\"'pre_processing' must be a callable!\"",
")",
"if",
"not",
"needs_values",
":",
"raise",
"ValueError",
"(",
"\"Cannot use 'pre_processing' without \"",
"\"specifying 'needs_values'!\"",
")",
"labels",
",",
"_",
",",
"ngroups",
"=",
"grouper",
".",
"group_info",
"output",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"base_func",
"=",
"getattr",
"(",
"libgroupby",
",",
"how",
")",
"for",
"name",
",",
"obj",
"in",
"self",
".",
"_iterate_slices",
"(",
")",
":",
"if",
"aggregate",
":",
"result_sz",
"=",
"ngroups",
"else",
":",
"result_sz",
"=",
"len",
"(",
"obj",
".",
"values",
")",
"if",
"not",
"cython_dtype",
":",
"cython_dtype",
"=",
"obj",
".",
"values",
".",
"dtype",
"result",
"=",
"np",
".",
"zeros",
"(",
"result_sz",
",",
"dtype",
"=",
"cython_dtype",
")",
"func",
"=",
"partial",
"(",
"base_func",
",",
"result",
",",
"labels",
")",
"inferences",
"=",
"None",
"if",
"needs_values",
":",
"vals",
"=",
"obj",
".",
"values",
"if",
"pre_processing",
":",
"vals",
",",
"inferences",
"=",
"pre_processing",
"(",
"vals",
")",
"func",
"=",
"partial",
"(",
"func",
",",
"vals",
")",
"if",
"needs_mask",
":",
"mask",
"=",
"isna",
"(",
"obj",
".",
"values",
")",
".",
"view",
"(",
"np",
".",
"uint8",
")",
"func",
"=",
"partial",
"(",
"func",
",",
"mask",
")",
"if",
"needs_ngroups",
":",
"func",
"=",
"partial",
"(",
"func",
",",
"ngroups",
")",
"func",
"(",
"*",
"*",
"kwargs",
")",
"# Call func to modify indexer values in place",
"if",
"result_is_index",
":",
"result",
"=",
"algorithms",
".",
"take_nd",
"(",
"obj",
".",
"values",
",",
"result",
")",
"if",
"post_processing",
":",
"result",
"=",
"post_processing",
"(",
"result",
",",
"inferences",
")",
"output",
"[",
"name",
"]",
"=",
"result",
"if",
"aggregate",
":",
"return",
"self",
".",
"_wrap_aggregated_output",
"(",
"output",
")",
"else",
":",
"return",
"self",
".",
"_wrap_transformed_output",
"(",
"output",
")"
] | Get result for Cythonized functions.
Parameters
----------
how : str, Cythonized function name to be called
grouper : Grouper object containing pertinent group info
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
cython_dtype : default None
Type of the array that will be modified by the Cython call. If
`None`, the type will be inferred from the values of each slice
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython.
Function should return a tuple where the first element is the
values to be passed to Cython and the second element is an optional
type which the values should be converted to after being returned
by the Cython operation. Raises if `needs_values` is False.
post_processing : function, default None
Function to be applied to result of Cython function. Should accept
an array of values as the first argument and type inferences as its
second argument, i.e. the signature should be
(ndarray, Type).
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values | [
"Get",
"result",
"for",
"Cythonized",
"functions",
"."
] | python | train |
saltstack/salt | salt/modules/salt_version.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/salt_version.py#L151-L170 | def _check_release_cmp(name):
'''
Helper function to compare release codename versions to the minion's current
Salt version.
If release codename isn't found, the function returns None. Otherwise, it
returns the results of the version comparison as documented by the
``versions_cmp`` function in ``salt.utils.versions.py``.
'''
map_version = get_release_number(name)
if map_version is None:
log.info('Release codename %s was not found.', name)
return None
current_version = six.text_type(salt.version.SaltStackVersion(
*salt.version.__version_info__))
current_version = current_version.rsplit('.', 1)[0]
version_cmp = salt.utils.versions.version_cmp(map_version, current_version)
return version_cmp | [
"def",
"_check_release_cmp",
"(",
"name",
")",
":",
"map_version",
"=",
"get_release_number",
"(",
"name",
")",
"if",
"map_version",
"is",
"None",
":",
"log",
".",
"info",
"(",
"'Release codename %s was not found.'",
",",
"name",
")",
"return",
"None",
"current_version",
"=",
"six",
".",
"text_type",
"(",
"salt",
".",
"version",
".",
"SaltStackVersion",
"(",
"*",
"salt",
".",
"version",
".",
"__version_info__",
")",
")",
"current_version",
"=",
"current_version",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"version_cmp",
"=",
"salt",
".",
"utils",
".",
"versions",
".",
"version_cmp",
"(",
"map_version",
",",
"current_version",
")",
"return",
"version_cmp"
] | Helper function to compare release codename versions to the minion's current
Salt version.
If release codename isn't found, the function returns None. Otherwise, it
returns the results of the version comparison as documented by the
``versions_cmp`` function in ``salt.utils.versions.py``. | [
"Helper",
"function",
"to",
"compare",
"release",
"codename",
"versions",
"to",
"the",
"minion",
"s",
"current",
"Salt",
"version",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py#L327-L338 | def get_port_channel_detail_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_channel_detail = ET.Element("get_port_channel_detail")
config = get_port_channel_detail
output = ET.SubElement(get_port_channel_detail, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_port_channel_detail_output_has_more",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_port_channel_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_port_channel_detail\"",
")",
"config",
"=",
"get_port_channel_detail",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_port_channel_detail",
",",
"\"output\"",
")",
"has_more",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"has-more\"",
")",
"has_more",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'has_more'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
callowayproject/Transmogrify | transmogrify/geometry.py | https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/geometry.py#L130-L156 | def translate(self, val1, val2=None):
"""
Move to new (x + dx, y + dy).
accepts Point, (x, y), [x, y], int, float
"""
error = "Point.translate only accepts a Point, a tuple, a list, ints or floats."
if val1 and val2:
if isinstance(val1, (int, float)) and isinstance(val2, (int, float)):
self.x += val1
self.y += val2
else:
raise ValueError(error)
elif val1 and val2 is None:
if isinstance(val1, (tuple, list)):
self.x += val1[0]
self.y += val1[1]
elif isinstance(val1, (int, float)):
self.x += val1
self.y += val1
elif isinstance(val1, Point):
self.x += val1.x
self.y += val1.y
else:
raise ValueError(error)
else:
raise ValueError(error) | [
"def",
"translate",
"(",
"self",
",",
"val1",
",",
"val2",
"=",
"None",
")",
":",
"error",
"=",
"\"Point.translate only accepts a Point, a tuple, a list, ints or floats.\"",
"if",
"val1",
"and",
"val2",
":",
"if",
"isinstance",
"(",
"val1",
",",
"(",
"int",
",",
"float",
")",
")",
"and",
"isinstance",
"(",
"val2",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"self",
".",
"x",
"+=",
"val1",
"self",
".",
"y",
"+=",
"val2",
"else",
":",
"raise",
"ValueError",
"(",
"error",
")",
"elif",
"val1",
"and",
"val2",
"is",
"None",
":",
"if",
"isinstance",
"(",
"val1",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"self",
".",
"x",
"+=",
"val1",
"[",
"0",
"]",
"self",
".",
"y",
"+=",
"val1",
"[",
"1",
"]",
"elif",
"isinstance",
"(",
"val1",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"self",
".",
"x",
"+=",
"val1",
"self",
".",
"y",
"+=",
"val1",
"elif",
"isinstance",
"(",
"val1",
",",
"Point",
")",
":",
"self",
".",
"x",
"+=",
"val1",
".",
"x",
"self",
".",
"y",
"+=",
"val1",
".",
"y",
"else",
":",
"raise",
"ValueError",
"(",
"error",
")",
"else",
":",
"raise",
"ValueError",
"(",
"error",
")"
] | Move to new (x + dx, y + dy).
accepts Point, (x, y), [x, y], int, float | [
"Move",
"to",
"new",
"(",
"x",
"+",
"dx",
"y",
"+",
"dy",
")",
"."
] | python | train |
HydraChain/hydrachain | hydrachain/native_contracts.py | https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/native_contracts.py#L215-L219 | def abi_encode_args(method, args):
"encode args for method: method_id|data"
assert issubclass(method.im_class, NativeABIContract), method.im_class
m_abi = method.im_class._get_method_abi(method)
return zpad(encode_int(m_abi['id']), 4) + abi.encode_abi(m_abi['arg_types'], args) | [
"def",
"abi_encode_args",
"(",
"method",
",",
"args",
")",
":",
"assert",
"issubclass",
"(",
"method",
".",
"im_class",
",",
"NativeABIContract",
")",
",",
"method",
".",
"im_class",
"m_abi",
"=",
"method",
".",
"im_class",
".",
"_get_method_abi",
"(",
"method",
")",
"return",
"zpad",
"(",
"encode_int",
"(",
"m_abi",
"[",
"'id'",
"]",
")",
",",
"4",
")",
"+",
"abi",
".",
"encode_abi",
"(",
"m_abi",
"[",
"'arg_types'",
"]",
",",
"args",
")"
] | encode args for method: method_id|data | [
"encode",
"args",
"for",
"method",
":",
"method_id|data"
] | python | test |
ambitioninc/django-entity | entity/sync.py | https://github.com/ambitioninc/django-entity/blob/ebc61f34313c52f4ef5819eb1da25b2ad837e80c/entity/sync.py#L59-L91 | def defer_entity_syncing(wrapped, instance, args, kwargs):
"""
A decorator that can be used to defer the syncing of entities until after the method has been run
This is being introduced to help avoid deadlocks in the meantime as we attempt to better understand
why they are happening
"""
# Defer entity syncing while we run our method
sync_entities.defer = True
# Run the method
try:
return wrapped(*args, **kwargs)
# After we run the method disable the deferred syncing
# and sync all the entities that have been buffered to be synced
finally:
# Enable entity syncing again
sync_entities.defer = False
# Get the models that need to be synced
model_objs = list(sync_entities.buffer.values())
# If none is in the model objects we need to sync all
if None in sync_entities.buffer:
model_objs = list()
# Sync the entities that were deferred if any
if len(sync_entities.buffer):
sync_entities(*model_objs)
# Clear the buffer
sync_entities.buffer = {} | [
"def",
"defer_entity_syncing",
"(",
"wrapped",
",",
"instance",
",",
"args",
",",
"kwargs",
")",
":",
"# Defer entity syncing while we run our method",
"sync_entities",
".",
"defer",
"=",
"True",
"# Run the method",
"try",
":",
"return",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# After we run the method disable the deferred syncing",
"# and sync all the entities that have been buffered to be synced",
"finally",
":",
"# Enable entity syncing again",
"sync_entities",
".",
"defer",
"=",
"False",
"# Get the models that need to be synced",
"model_objs",
"=",
"list",
"(",
"sync_entities",
".",
"buffer",
".",
"values",
"(",
")",
")",
"# If none is in the model objects we need to sync all",
"if",
"None",
"in",
"sync_entities",
".",
"buffer",
":",
"model_objs",
"=",
"list",
"(",
")",
"# Sync the entities that were deferred if any",
"if",
"len",
"(",
"sync_entities",
".",
"buffer",
")",
":",
"sync_entities",
"(",
"*",
"model_objs",
")",
"# Clear the buffer",
"sync_entities",
".",
"buffer",
"=",
"{",
"}"
] | A decorator that can be used to defer the syncing of entities until after the method has been run
This is being introduced to help avoid deadlocks in the meantime as we attempt to better understand
why they are happening | [
"A",
"decorator",
"that",
"can",
"be",
"used",
"to",
"defer",
"the",
"syncing",
"of",
"entities",
"until",
"after",
"the",
"method",
"has",
"been",
"run",
"This",
"is",
"being",
"introduced",
"to",
"help",
"avoid",
"deadlocks",
"in",
"the",
"meantime",
"as",
"we",
"attempt",
"to",
"better",
"understand",
"why",
"they",
"are",
"happening"
] | python | train |
i3visio/entify | entify/lib/patterns/dni.py | https://github.com/i3visio/entify/blob/51c5b89cebee3a39d44d0918e2798739361f337c/entify/lib/patterns/dni.py#L42-L71 | def isValidExp(self, exp):
'''
Method to verify if a given expression is correct just in case the used regular expression needs additional processing to verify this fact.$
This method will be overwritten when necessary.
:param exp: Expression to verify.
:return: True | False
'''
# order of the letters depending on which is the mod of the number
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
order = ['T', 'R', 'W', 'A', 'G', 'M', 'Y', 'F', 'P', 'D', 'X', 'B', 'N', 'J', 'Z', 'S', 'Q', 'V', 'H', 'L', 'C', 'K', 'E', 'T']
#print exp
l = exp[len(exp)-1]
try:
# verifying if it is an 8-length number
number = int(exp[0:7])
except:
try:
# verifying if it is a 7-length number
number = int(exp[0:6])
except:
# not a valid number
pass
if l == order[number%23]:
return True
else:
return False | [
"def",
"isValidExp",
"(",
"self",
",",
"exp",
")",
":",
"# order of the letters depending on which is the mod of the number",
"# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23",
"order",
"=",
"[",
"'T'",
",",
"'R'",
",",
"'W'",
",",
"'A'",
",",
"'G'",
",",
"'M'",
",",
"'Y'",
",",
"'F'",
",",
"'P'",
",",
"'D'",
",",
"'X'",
",",
"'B'",
",",
"'N'",
",",
"'J'",
",",
"'Z'",
",",
"'S'",
",",
"'Q'",
",",
"'V'",
",",
"'H'",
",",
"'L'",
",",
"'C'",
",",
"'K'",
",",
"'E'",
",",
"'T'",
"]",
"#print exp",
"l",
"=",
"exp",
"[",
"len",
"(",
"exp",
")",
"-",
"1",
"]",
"try",
":",
"# verifying if it is an 8-length number",
"number",
"=",
"int",
"(",
"exp",
"[",
"0",
":",
"7",
"]",
")",
"except",
":",
"try",
":",
"# verifying if it is a 7-length number",
"number",
"=",
"int",
"(",
"exp",
"[",
"0",
":",
"6",
"]",
")",
"except",
":",
"# not a valid number",
"pass",
"if",
"l",
"==",
"order",
"[",
"number",
"%",
"23",
"]",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | Method to verify if a given expression is correct just in case the used regular expression needs additional processing to verify this fact.$
This method will be overwritten when necessary.
:param exp: Expression to verify.
:return: True | False | [
"Method",
"to",
"verify",
"if",
"a",
"given",
"expression",
"is",
"correct",
"just",
"in",
"case",
"the",
"used",
"regular",
"expression",
"needs",
"additional",
"processing",
"to",
"verify",
"this",
"fact",
".",
"$",
"This",
"method",
"will",
"be",
"overwritten",
"when",
"necessary",
"."
] | python | train |
Falkonry/falkonry-python-client | falkonryclient/service/falkonry.py | https://github.com/Falkonry/falkonry-python-client/blob/0aeb2b00293ee94944f1634e9667401b03da29c1/falkonryclient/service/falkonry.py#L37-L45 | def get_datastreams(self):
"""
To get list of Datastream
"""
datastreams = []
response = self.http.get('/Datastream')
for datastream in response:
datastreams.append(Schemas.Datastream(datastream=datastream))
return datastreams | [
"def",
"get_datastreams",
"(",
"self",
")",
":",
"datastreams",
"=",
"[",
"]",
"response",
"=",
"self",
".",
"http",
".",
"get",
"(",
"'/Datastream'",
")",
"for",
"datastream",
"in",
"response",
":",
"datastreams",
".",
"append",
"(",
"Schemas",
".",
"Datastream",
"(",
"datastream",
"=",
"datastream",
")",
")",
"return",
"datastreams"
] | To get list of Datastream | [
"To",
"get",
"list",
"of",
"Datastream"
] | python | train |
markovmodel/PyEMMA | pyemma/coordinates/data/util/traj_info_backends.py | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/util/traj_info_backends.py#L259-L288 | def _update_time_stamp(self, hash_value):
""" timestamps are being stored distributed over several lru databases.
The timestamp is a time.time() snapshot (float), which are seconds since epoch."""
db_name = self._database_from_key(hash_value)
if not db_name:
db_name=':memory:'
def _update():
import sqlite3
try:
with sqlite3.connect(db_name, timeout=self.lru_timeout) as conn:
""" last_read is a result of time.time()"""
conn.execute('CREATE TABLE IF NOT EXISTS usage '
'(hash VARCHAR(32), last_read FLOAT)')
conn.commit()
cur = conn.execute('select * from usage where hash=?', (hash_value,))
row = cur.fetchone()
if not row:
conn.execute("insert into usage(hash, last_read) values(?, ?)", (hash_value, time.time()))
else:
conn.execute("update usage set last_read=? where hash=?", (time.time(), hash_value))
conn.commit()
except sqlite3.OperationalError:
# if there are many jobs to write to same database at same time, the timeout could be hit
logger.debug('could not update LRU info for db %s', db_name)
# this could lead to another (rare) race condition during cleaning...
#import threading
#threading.Thread(target=_update).start()
_update() | [
"def",
"_update_time_stamp",
"(",
"self",
",",
"hash_value",
")",
":",
"db_name",
"=",
"self",
".",
"_database_from_key",
"(",
"hash_value",
")",
"if",
"not",
"db_name",
":",
"db_name",
"=",
"':memory:'",
"def",
"_update",
"(",
")",
":",
"import",
"sqlite3",
"try",
":",
"with",
"sqlite3",
".",
"connect",
"(",
"db_name",
",",
"timeout",
"=",
"self",
".",
"lru_timeout",
")",
"as",
"conn",
":",
"\"\"\" last_read is a result of time.time()\"\"\"",
"conn",
".",
"execute",
"(",
"'CREATE TABLE IF NOT EXISTS usage '",
"'(hash VARCHAR(32), last_read FLOAT)'",
")",
"conn",
".",
"commit",
"(",
")",
"cur",
"=",
"conn",
".",
"execute",
"(",
"'select * from usage where hash=?'",
",",
"(",
"hash_value",
",",
")",
")",
"row",
"=",
"cur",
".",
"fetchone",
"(",
")",
"if",
"not",
"row",
":",
"conn",
".",
"execute",
"(",
"\"insert into usage(hash, last_read) values(?, ?)\"",
",",
"(",
"hash_value",
",",
"time",
".",
"time",
"(",
")",
")",
")",
"else",
":",
"conn",
".",
"execute",
"(",
"\"update usage set last_read=? where hash=?\"",
",",
"(",
"time",
".",
"time",
"(",
")",
",",
"hash_value",
")",
")",
"conn",
".",
"commit",
"(",
")",
"except",
"sqlite3",
".",
"OperationalError",
":",
"# if there are many jobs to write to same database at same time, the timeout could be hit",
"logger",
".",
"debug",
"(",
"'could not update LRU info for db %s'",
",",
"db_name",
")",
"# this could lead to another (rare) race condition during cleaning...",
"#import threading",
"#threading.Thread(target=_update).start()",
"_update",
"(",
")"
] | timestamps are being stored distributed over several lru databases.
The timestamp is a time.time() snapshot (float), which are seconds since epoch. | [
"timestamps",
"are",
"being",
"stored",
"distributed",
"over",
"several",
"lru",
"databases",
".",
"The",
"timestamp",
"is",
"a",
"time",
".",
"time",
"()",
"snapshot",
"(",
"float",
")",
"which",
"are",
"seconds",
"since",
"epoch",
"."
] | python | train |
exa-analytics/exa | exa/core/editor.py | https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/core/editor.py#L435-L452 | def lines_from_file(path, as_interned=False, encoding=None):
"""
Create a list of file lines from a given filepath.
Args:
path (str): File path
as_interned (bool): List of "interned" strings (default False)
Returns:
strings (list): File line list
"""
lines = None
with io.open(path, encoding=encoding) as f:
if as_interned:
lines = [sys.intern(line) for line in f.read().splitlines()]
else:
lines = f.read().splitlines()
return lines | [
"def",
"lines_from_file",
"(",
"path",
",",
"as_interned",
"=",
"False",
",",
"encoding",
"=",
"None",
")",
":",
"lines",
"=",
"None",
"with",
"io",
".",
"open",
"(",
"path",
",",
"encoding",
"=",
"encoding",
")",
"as",
"f",
":",
"if",
"as_interned",
":",
"lines",
"=",
"[",
"sys",
".",
"intern",
"(",
"line",
")",
"for",
"line",
"in",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"]",
"else",
":",
"lines",
"=",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"return",
"lines"
] | Create a list of file lines from a given filepath.
Args:
path (str): File path
as_interned (bool): List of "interned" strings (default False)
Returns:
strings (list): File line list | [
"Create",
"a",
"list",
"of",
"file",
"lines",
"from",
"a",
"given",
"filepath",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.