repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
gagneurlab/concise | concise/hyopt.py | https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L166-L172 | def count_by_state_unsynced(self, arg):
"""Extends the original object in order to inject checking
for stalled jobs and killing them if they are running for too long
"""
if self.kill_timeout is not None:
self.delete_running(self.kill_timeout)
return super(CMongoTrials, self).count_by_state_unsynced(arg) | [
"def",
"count_by_state_unsynced",
"(",
"self",
",",
"arg",
")",
":",
"if",
"self",
".",
"kill_timeout",
"is",
"not",
"None",
":",
"self",
".",
"delete_running",
"(",
"self",
".",
"kill_timeout",
")",
"return",
"super",
"(",
"CMongoTrials",
",",
"self",
")",
".",
"count_by_state_unsynced",
"(",
"arg",
")"
] | Extends the original object in order to inject checking
for stalled jobs and killing them if they are running for too long | [
"Extends",
"the",
"original",
"object",
"in",
"order",
"to",
"inject",
"checking",
"for",
"stalled",
"jobs",
"and",
"killing",
"them",
"if",
"they",
"are",
"running",
"for",
"too",
"long"
] | python | train |
materialsproject/pymatgen | pymatgen/io/feff/outputs.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/feff/outputs.py#L378-L386 | def material_formula(self):
"""
Returns chemical formula of material from feff.inp file
"""
try:
form = self.header.formula
except IndexError:
form = 'No formula provided'
return "".join(map(str, form)) | [
"def",
"material_formula",
"(",
"self",
")",
":",
"try",
":",
"form",
"=",
"self",
".",
"header",
".",
"formula",
"except",
"IndexError",
":",
"form",
"=",
"'No formula provided'",
"return",
"\"\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"form",
")",
")"
] | Returns chemical formula of material from feff.inp file | [
"Returns",
"chemical",
"formula",
"of",
"material",
"from",
"feff",
".",
"inp",
"file"
] | python | train |
StagPython/StagPy | stagpy/processing.py | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/processing.py#L241-L255 | def energy_prof(step):
"""Energy flux.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the energy flux and the radial position
at which it is evaluated.
"""
diff, rad = diffs_prof(step)
adv, _ = advts_prof(step)
return (diff + np.append(adv, 0)), rad | [
"def",
"energy_prof",
"(",
"step",
")",
":",
"diff",
",",
"rad",
"=",
"diffs_prof",
"(",
"step",
")",
"adv",
",",
"_",
"=",
"advts_prof",
"(",
"step",
")",
"return",
"(",
"diff",
"+",
"np",
".",
"append",
"(",
"adv",
",",
"0",
")",
")",
",",
"rad"
] | Energy flux.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the energy flux and the radial position
at which it is evaluated. | [
"Energy",
"flux",
"."
] | python | train |
pinax/pinax-messages | pinax/messages/models.py | https://github.com/pinax/pinax-messages/blob/8403bf95ee9b36cbe8ea0bb712e5ef75ba898746/pinax/messages/models.py#L98-L111 | def new_message(cls, from_user, to_users, subject, content):
"""
Create a new Message and Thread.
Mark thread as unread for all recipients, and
mark thread as read and deleted from inbox by creator.
"""
thread = Thread.objects.create(subject=subject)
for user in to_users:
thread.userthread_set.create(user=user, deleted=False, unread=True)
thread.userthread_set.create(user=from_user, deleted=True, unread=False)
msg = cls.objects.create(thread=thread, sender=from_user, content=content)
message_sent.send(sender=cls, message=msg, thread=thread, reply=False)
return msg | [
"def",
"new_message",
"(",
"cls",
",",
"from_user",
",",
"to_users",
",",
"subject",
",",
"content",
")",
":",
"thread",
"=",
"Thread",
".",
"objects",
".",
"create",
"(",
"subject",
"=",
"subject",
")",
"for",
"user",
"in",
"to_users",
":",
"thread",
".",
"userthread_set",
".",
"create",
"(",
"user",
"=",
"user",
",",
"deleted",
"=",
"False",
",",
"unread",
"=",
"True",
")",
"thread",
".",
"userthread_set",
".",
"create",
"(",
"user",
"=",
"from_user",
",",
"deleted",
"=",
"True",
",",
"unread",
"=",
"False",
")",
"msg",
"=",
"cls",
".",
"objects",
".",
"create",
"(",
"thread",
"=",
"thread",
",",
"sender",
"=",
"from_user",
",",
"content",
"=",
"content",
")",
"message_sent",
".",
"send",
"(",
"sender",
"=",
"cls",
",",
"message",
"=",
"msg",
",",
"thread",
"=",
"thread",
",",
"reply",
"=",
"False",
")",
"return",
"msg"
] | Create a new Message and Thread.
Mark thread as unread for all recipients, and
mark thread as read and deleted from inbox by creator. | [
"Create",
"a",
"new",
"Message",
"and",
"Thread",
"."
] | python | train |
gamechanger/schemer | schemer/validators.py | https://github.com/gamechanger/schemer/blob/1d1dd7da433d3b84ce5a80ded5a84ab4a65825ee/schemer/validators.py#L108-L129 | def is_email():
"""
Validates that a fields value is a valid email address.
"""
email = (
ur'(?!^\.)' # No dot at start
ur'(?!.*\.@)' # No dot before at sign
ur'(?!.*@\.)' # No dot after at sign
ur'(?!.*\.$)' # No dot at the end
ur'(?!.*\.\.)' # No double dots anywhere
ur'^\S+' # Starts with one or more non-whitespace characters
ur'@' # Contains an at sign
ur'\S+$' # Ends with one or more non-whitespace characters
)
regex = re.compile(email, re.IGNORECASE | re.UNICODE)
def validate(value):
if not regex.match(value):
return e("{} is not a valid email address", value)
return validate | [
"def",
"is_email",
"(",
")",
":",
"email",
"=",
"(",
"ur'(?!^\\.)'",
"# No dot at start",
"ur'(?!.*\\.@)'",
"# No dot before at sign",
"ur'(?!.*@\\.)'",
"# No dot after at sign",
"ur'(?!.*\\.$)'",
"# No dot at the end",
"ur'(?!.*\\.\\.)'",
"# No double dots anywhere",
"ur'^\\S+'",
"# Starts with one or more non-whitespace characters",
"ur'@'",
"# Contains an at sign",
"ur'\\S+$'",
"# Ends with one or more non-whitespace characters",
")",
"regex",
"=",
"re",
".",
"compile",
"(",
"email",
",",
"re",
".",
"IGNORECASE",
"|",
"re",
".",
"UNICODE",
")",
"def",
"validate",
"(",
"value",
")",
":",
"if",
"not",
"regex",
".",
"match",
"(",
"value",
")",
":",
"return",
"e",
"(",
"\"{} is not a valid email address\"",
",",
"value",
")",
"return",
"validate"
] | Validates that a fields value is a valid email address. | [
"Validates",
"that",
"a",
"fields",
"value",
"is",
"a",
"valid",
"email",
"address",
"."
] | python | train |
ubccr/pinky | pinky/canonicalization/traverse.py | https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/canonicalization/traverse.py#L122-L163 | def draw(molecule, TraversalType=SmilesTraversal):
"""(molecule)->canonical representation of a molecule
Well, it's only canonical if the atom symorders are
canonical, otherwise it's arbitrary.
atoms must have a symorder attribute
bonds must have a equiv_class attribute"""
result = []
atoms = allAtoms = molecule.atoms
visitedAtoms = {}
#
# Traverse all components of the graph to form
# the output string
while atoms:
atom = _get_lowest_symorder(atoms)
visitedAtoms[atom] = 1
visitedBonds = {}
nextTraverse = TraversalType()
atomsUsed, bondsUsed = [], []
_traverse(atom, nextTraverse, None,
visitedAtoms, visitedBonds,
atomsUsed, bondsUsed, TraversalType)
atoms = []
for atom in allAtoms:
if not visitedAtoms.has_key(atom):
atoms.append(atom)
assert nextTraverse.atoms == atomsUsed
assert nextTraverse.bonds == bondsUsed, "%s %s"%(
nextTraverse.bonds, bondsUsed)
result.append((str(nextTraverse),
atomsUsed, bondsUsed))
result.sort()
fragments = []
for r in result:
fragments.append(r[0])
return ".".join(fragments), result | [
"def",
"draw",
"(",
"molecule",
",",
"TraversalType",
"=",
"SmilesTraversal",
")",
":",
"result",
"=",
"[",
"]",
"atoms",
"=",
"allAtoms",
"=",
"molecule",
".",
"atoms",
"visitedAtoms",
"=",
"{",
"}",
"#",
"# Traverse all components of the graph to form",
"# the output string",
"while",
"atoms",
":",
"atom",
"=",
"_get_lowest_symorder",
"(",
"atoms",
")",
"visitedAtoms",
"[",
"atom",
"]",
"=",
"1",
"visitedBonds",
"=",
"{",
"}",
"nextTraverse",
"=",
"TraversalType",
"(",
")",
"atomsUsed",
",",
"bondsUsed",
"=",
"[",
"]",
",",
"[",
"]",
"_traverse",
"(",
"atom",
",",
"nextTraverse",
",",
"None",
",",
"visitedAtoms",
",",
"visitedBonds",
",",
"atomsUsed",
",",
"bondsUsed",
",",
"TraversalType",
")",
"atoms",
"=",
"[",
"]",
"for",
"atom",
"in",
"allAtoms",
":",
"if",
"not",
"visitedAtoms",
".",
"has_key",
"(",
"atom",
")",
":",
"atoms",
".",
"append",
"(",
"atom",
")",
"assert",
"nextTraverse",
".",
"atoms",
"==",
"atomsUsed",
"assert",
"nextTraverse",
".",
"bonds",
"==",
"bondsUsed",
",",
"\"%s %s\"",
"%",
"(",
"nextTraverse",
".",
"bonds",
",",
"bondsUsed",
")",
"result",
".",
"append",
"(",
"(",
"str",
"(",
"nextTraverse",
")",
",",
"atomsUsed",
",",
"bondsUsed",
")",
")",
"result",
".",
"sort",
"(",
")",
"fragments",
"=",
"[",
"]",
"for",
"r",
"in",
"result",
":",
"fragments",
".",
"append",
"(",
"r",
"[",
"0",
"]",
")",
"return",
"\".\"",
".",
"join",
"(",
"fragments",
")",
",",
"result"
] | (molecule)->canonical representation of a molecule
Well, it's only canonical if the atom symorders are
canonical, otherwise it's arbitrary.
atoms must have a symorder attribute
bonds must have a equiv_class attribute | [
"(",
"molecule",
")",
"-",
">",
"canonical",
"representation",
"of",
"a",
"molecule",
"Well",
"it",
"s",
"only",
"canonical",
"if",
"the",
"atom",
"symorders",
"are",
"canonical",
"otherwise",
"it",
"s",
"arbitrary",
"."
] | python | train |
cga-harvard/Hypermap-Registry | hypermap/aggregator/views.py | https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/views.py#L45-L58 | def serialize_checks(check_set):
"""
Serialize a check_set for raphael
"""
check_set_list = []
for check in check_set.all()[:25]:
check_set_list.append(
{
'datetime': check.checked_datetime.isoformat(),
'value': check.response_time,
'success': 1 if check.success else 0
}
)
return check_set_list | [
"def",
"serialize_checks",
"(",
"check_set",
")",
":",
"check_set_list",
"=",
"[",
"]",
"for",
"check",
"in",
"check_set",
".",
"all",
"(",
")",
"[",
":",
"25",
"]",
":",
"check_set_list",
".",
"append",
"(",
"{",
"'datetime'",
":",
"check",
".",
"checked_datetime",
".",
"isoformat",
"(",
")",
",",
"'value'",
":",
"check",
".",
"response_time",
",",
"'success'",
":",
"1",
"if",
"check",
".",
"success",
"else",
"0",
"}",
")",
"return",
"check_set_list"
] | Serialize a check_set for raphael | [
"Serialize",
"a",
"check_set",
"for",
"raphael"
] | python | train |
draios/python-sdc-client | sdcclient/_secure.py | https://github.com/draios/python-sdc-client/blob/47f83415842048778939b90944f64386a3bcb205/sdcclient/_secure.py#L428-L462 | def get_policy_events_id_range(self, id, from_sec, to_sec, sampling=None, aggregations=None, scope_filter=None, event_filter=None):
'''**Description**
Fetch all policy events with id that occurred in the time range [from_sec:to_sec]. This method is used in conjunction
with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events.
**Arguments**
- id: the id of the policy events to fetch.
- from_sec: the start of the timerange for which to get events
- end_sec: the end of the timerange for which to get events
- sampling: sample all policy events using *sampling* interval.
- scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, events are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only events that have happened on an ubuntu container).
- event_filter: this is a SysdigMonitor-like filter (e.g. policyEvent.policyId=3). When provided, events are filtered by some of their properties. Currently the supported set of filters is policyEvent.all(which can be used just with matches, policyEvent.policyId, policyEvent.id, policyEvent.severity, policyEvent.ruleTye, policyEvent.ruleSubtype.
- aggregations: When present it specifies how to aggregate events (sampling does not need to be specified, because when it's present it automatically means events will be aggregated). This field can either be a list of scope metrics or a list of policyEvents fields but (currently) not a mix of the two. When policy events fields are specified, only these can be used= severity, agentId, containerId, policyId, ruleType.
**Success Return Value**
An array containing:
- A context object that should be passed to later calls to get_more_policy_events.
- An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events`
for details on the contents of policy events.
**Example**
`examples/get_secure_policy_events.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_policy_events.py>`_
'''
options = {"id": id,
"from": int(from_sec) * 1000000,
"to": int(to_sec) * 1000000,
"offset": 0,
"limit": 1000,
"sampling": sampling,
"aggregations": aggregations,
"scopeFilter": scope_filter,
"eventFilter": event_filter}
ctx = {k: v for k, v in options.items() if v is not None}
return self._get_policy_events_int(ctx) | [
"def",
"get_policy_events_id_range",
"(",
"self",
",",
"id",
",",
"from_sec",
",",
"to_sec",
",",
"sampling",
"=",
"None",
",",
"aggregations",
"=",
"None",
",",
"scope_filter",
"=",
"None",
",",
"event_filter",
"=",
"None",
")",
":",
"options",
"=",
"{",
"\"id\"",
":",
"id",
",",
"\"from\"",
":",
"int",
"(",
"from_sec",
")",
"*",
"1000000",
",",
"\"to\"",
":",
"int",
"(",
"to_sec",
")",
"*",
"1000000",
",",
"\"offset\"",
":",
"0",
",",
"\"limit\"",
":",
"1000",
",",
"\"sampling\"",
":",
"sampling",
",",
"\"aggregations\"",
":",
"aggregations",
",",
"\"scopeFilter\"",
":",
"scope_filter",
",",
"\"eventFilter\"",
":",
"event_filter",
"}",
"ctx",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"options",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
"}",
"return",
"self",
".",
"_get_policy_events_int",
"(",
"ctx",
")"
] | **Description**
Fetch all policy events with id that occurred in the time range [from_sec:to_sec]. This method is used in conjunction
with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events.
**Arguments**
- id: the id of the policy events to fetch.
- from_sec: the start of the timerange for which to get events
- end_sec: the end of the timerange for which to get events
- sampling: sample all policy events using *sampling* interval.
- scope_filter: this is a SysdigMonitor-like filter (e.g 'container.image=ubuntu'). When provided, events are filtered by their scope, so only a subset will be returned (e.g. 'container.image=ubuntu' will provide only events that have happened on an ubuntu container).
- event_filter: this is a SysdigMonitor-like filter (e.g. policyEvent.policyId=3). When provided, events are filtered by some of their properties. Currently the supported set of filters is policyEvent.all(which can be used just with matches, policyEvent.policyId, policyEvent.id, policyEvent.severity, policyEvent.ruleTye, policyEvent.ruleSubtype.
- aggregations: When present it specifies how to aggregate events (sampling does not need to be specified, because when it's present it automatically means events will be aggregated). This field can either be a list of scope metrics or a list of policyEvents fields but (currently) not a mix of the two. When policy events fields are specified, only these can be used= severity, agentId, containerId, policyId, ruleType.
**Success Return Value**
An array containing:
- A context object that should be passed to later calls to get_more_policy_events.
- An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events`
for details on the contents of policy events.
**Example**
`examples/get_secure_policy_events.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_policy_events.py>`_ | [
"**",
"Description",
"**",
"Fetch",
"all",
"policy",
"events",
"with",
"id",
"that",
"occurred",
"in",
"the",
"time",
"range",
"[",
"from_sec",
":",
"to_sec",
"]",
".",
"This",
"method",
"is",
"used",
"in",
"conjunction",
"with",
":",
"func",
":",
"~sdcclient",
".",
"SdSecureClient",
".",
"get_more_policy_events",
"to",
"provide",
"paginated",
"access",
"to",
"policy",
"events",
"."
] | python | test |
sergiocorreia/panflute | panflute/autofilter.py | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/autofilter.py#L39-L112 | def stdio(filters=None, search_dirs=None, data_dir=True, sys_path=True, panfl_=False, input_stream=None, output_stream=None):
"""
Reads JSON from stdin and second CLI argument:
``sys.argv[1]``. Dumps JSON doc to the stdout.
:param filters: Union[List[str], None]
if None then read from metadata
:param search_dirs: Union[List[str], None]
if None then read from metadata
:param data_dir: bool
:param sys_path: bool
:param panfl_: bool
:param input_stream: io.StringIO or None
for debug purpose
:param output_stream: io.StringIO or None
for debug purpose
:return: None
"""
doc = load(input_stream)
# meta = doc.metadata # Local variable 'meta' value is not used
verbose = doc.get_metadata('panflute-verbose', False)
if search_dirs is None:
# metadata 'panflute-path' can be a list, a string, or missing
# `search_dirs` should be a list of str
search_dirs = doc.get_metadata('panflute-path', [])
if type(search_dirs) != list:
search_dirs = [search_dirs]
if '--data-dir' in search_dirs:
data_dir = True
if '--no-sys-path' in search_dirs:
sys_path = False
search_dirs = [dir_ for dir_ in search_dirs
if dir_ not in ('--data-dir', '--no-sys-path')]
if verbose:
debug('panflute: data_dir={} sys_path={}'.format(data_dir, sys_path))
search_dirs = [p.normpath(p.expanduser(p.expandvars(dir_))) for dir_ in search_dirs]
if not panfl_:
# default panflute behaviour:
search_dirs.append('.')
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += sys.path
else:
# panfl/pandoctools behaviour:
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += reduced_sys_path
# Display message (tests that everything is working ok)
msg = doc.get_metadata('panflute-echo', False)
if msg:
debug(msg)
if filters is None:
# metadata 'panflute-filters' can be a list, a string, or missing
# `filters` should be a list of str
filters = doc.get_metadata('panflute-filters', [])
if type(filters) != list:
filters = [filters]
if filters:
if verbose:
msg = "panflute: will run the following filters:"
debug(msg, ' '.join(filters))
doc = autorun_filters(filters, doc, search_dirs, verbose)
elif verbose:
debug("panflute: no filters were provided")
dump(doc, output_stream) | [
"def",
"stdio",
"(",
"filters",
"=",
"None",
",",
"search_dirs",
"=",
"None",
",",
"data_dir",
"=",
"True",
",",
"sys_path",
"=",
"True",
",",
"panfl_",
"=",
"False",
",",
"input_stream",
"=",
"None",
",",
"output_stream",
"=",
"None",
")",
":",
"doc",
"=",
"load",
"(",
"input_stream",
")",
"# meta = doc.metadata # Local variable 'meta' value is not used",
"verbose",
"=",
"doc",
".",
"get_metadata",
"(",
"'panflute-verbose'",
",",
"False",
")",
"if",
"search_dirs",
"is",
"None",
":",
"# metadata 'panflute-path' can be a list, a string, or missing",
"# `search_dirs` should be a list of str",
"search_dirs",
"=",
"doc",
".",
"get_metadata",
"(",
"'panflute-path'",
",",
"[",
"]",
")",
"if",
"type",
"(",
"search_dirs",
")",
"!=",
"list",
":",
"search_dirs",
"=",
"[",
"search_dirs",
"]",
"if",
"'--data-dir'",
"in",
"search_dirs",
":",
"data_dir",
"=",
"True",
"if",
"'--no-sys-path'",
"in",
"search_dirs",
":",
"sys_path",
"=",
"False",
"search_dirs",
"=",
"[",
"dir_",
"for",
"dir_",
"in",
"search_dirs",
"if",
"dir_",
"not",
"in",
"(",
"'--data-dir'",
",",
"'--no-sys-path'",
")",
"]",
"if",
"verbose",
":",
"debug",
"(",
"'panflute: data_dir={} sys_path={}'",
".",
"format",
"(",
"data_dir",
",",
"sys_path",
")",
")",
"search_dirs",
"=",
"[",
"p",
".",
"normpath",
"(",
"p",
".",
"expanduser",
"(",
"p",
".",
"expandvars",
"(",
"dir_",
")",
")",
")",
"for",
"dir_",
"in",
"search_dirs",
"]",
"if",
"not",
"panfl_",
":",
"# default panflute behaviour:",
"search_dirs",
".",
"append",
"(",
"'.'",
")",
"if",
"data_dir",
":",
"search_dirs",
".",
"append",
"(",
"get_filter_dir",
"(",
")",
")",
"if",
"sys_path",
":",
"search_dirs",
"+=",
"sys",
".",
"path",
"else",
":",
"# panfl/pandoctools behaviour:",
"if",
"data_dir",
":",
"search_dirs",
".",
"append",
"(",
"get_filter_dir",
"(",
")",
")",
"if",
"sys_path",
":",
"search_dirs",
"+=",
"reduced_sys_path",
"# Display message (tests that everything is working ok)",
"msg",
"=",
"doc",
".",
"get_metadata",
"(",
"'panflute-echo'",
",",
"False",
")",
"if",
"msg",
":",
"debug",
"(",
"msg",
")",
"if",
"filters",
"is",
"None",
":",
"# metadata 'panflute-filters' can be a list, a string, or missing",
"# `filters` should be a list of str",
"filters",
"=",
"doc",
".",
"get_metadata",
"(",
"'panflute-filters'",
",",
"[",
"]",
")",
"if",
"type",
"(",
"filters",
")",
"!=",
"list",
":",
"filters",
"=",
"[",
"filters",
"]",
"if",
"filters",
":",
"if",
"verbose",
":",
"msg",
"=",
"\"panflute: will run the following filters:\"",
"debug",
"(",
"msg",
",",
"' '",
".",
"join",
"(",
"filters",
")",
")",
"doc",
"=",
"autorun_filters",
"(",
"filters",
",",
"doc",
",",
"search_dirs",
",",
"verbose",
")",
"elif",
"verbose",
":",
"debug",
"(",
"\"panflute: no filters were provided\"",
")",
"dump",
"(",
"doc",
",",
"output_stream",
")"
] | Reads JSON from stdin and second CLI argument:
``sys.argv[1]``. Dumps JSON doc to the stdout.
:param filters: Union[List[str], None]
if None then read from metadata
:param search_dirs: Union[List[str], None]
if None then read from metadata
:param data_dir: bool
:param sys_path: bool
:param panfl_: bool
:param input_stream: io.StringIO or None
for debug purpose
:param output_stream: io.StringIO or None
for debug purpose
:return: None | [
"Reads",
"JSON",
"from",
"stdin",
"and",
"second",
"CLI",
"argument",
":",
"sys",
".",
"argv",
"[",
"1",
"]",
".",
"Dumps",
"JSON",
"doc",
"to",
"the",
"stdout",
"."
] | python | train |
mseclab/PyJFuzz | pyjfuzz/core/pjf_mutators.py | https://github.com/mseclab/PyJFuzz/blob/f777067076f62c9ab74ffea6e90fd54402b7a1b4/pyjfuzz/core/pjf_mutators.py#L259-L267 | def safe_unicode(self, buf):
"""
Safely return an unicode encoded string
"""
tmp = ""
buf = "".join(b for b in buf)
for character in buf:
tmp += character
return tmp | [
"def",
"safe_unicode",
"(",
"self",
",",
"buf",
")",
":",
"tmp",
"=",
"\"\"",
"buf",
"=",
"\"\"",
".",
"join",
"(",
"b",
"for",
"b",
"in",
"buf",
")",
"for",
"character",
"in",
"buf",
":",
"tmp",
"+=",
"character",
"return",
"tmp"
] | Safely return an unicode encoded string | [
"Safely",
"return",
"an",
"unicode",
"encoded",
"string"
] | python | test |
xapple/plumbing | plumbing/ec2.py | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/ec2.py#L70-L75 | def rename(self, name):
"""Set the name of the machine."""
self.ec2.create_tags(Resources = [self.instance_id],
Tags = [{'Key': 'Name',
'Value': name}])
self.refresh_info() | [
"def",
"rename",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"ec2",
".",
"create_tags",
"(",
"Resources",
"=",
"[",
"self",
".",
"instance_id",
"]",
",",
"Tags",
"=",
"[",
"{",
"'Key'",
":",
"'Name'",
",",
"'Value'",
":",
"name",
"}",
"]",
")",
"self",
".",
"refresh_info",
"(",
")"
] | Set the name of the machine. | [
"Set",
"the",
"name",
"of",
"the",
"machine",
"."
] | python | train |
acutesoftware/AIKIF | scripts/examples/game_of_life_console.py | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/game_of_life_console.py#L79-L86 | def print_there(x, y, text):
""""
allows display of a game of life on a console via
resetting cursor position to a set point - looks 'ok'
for testing but not production quality.
"""
sys.stdout.write("\x1b7\x1b[%d;%df%s\x1b8" % (x, y, text))
sys.stdout.flush() | [
"def",
"print_there",
"(",
"x",
",",
"y",
",",
"text",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\x1b7\\x1b[%d;%df%s\\x1b8\"",
"%",
"(",
"x",
",",
"y",
",",
"text",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] | allows display of a game of life on a console via
resetting cursor position to a set point - looks 'ok'
for testing but not production quality. | [
"allows",
"display",
"of",
"a",
"game",
"of",
"life",
"on",
"a",
"console",
"via",
"resetting",
"cursor",
"position",
"to",
"a",
"set",
"point",
"-",
"looks",
"ok",
"for",
"testing",
"but",
"not",
"production",
"quality",
"."
] | python | train |
postlund/pyatv | pyatv/dmap/__init__.py | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/dmap/__init__.py#L116-L125 | async def left(self):
"""Press key left."""
await self._send_commands(
self._move('Down', 0, 75, 100),
self._move('Move', 1, 70, 100),
self._move('Move', 3, 65, 100),
self._move('Move', 4, 60, 100),
self._move('Move', 5, 55, 100),
self._move('Move', 6, 50, 100),
self._move('Up', 7, 50, 100)) | [
"async",
"def",
"left",
"(",
"self",
")",
":",
"await",
"self",
".",
"_send_commands",
"(",
"self",
".",
"_move",
"(",
"'Down'",
",",
"0",
",",
"75",
",",
"100",
")",
",",
"self",
".",
"_move",
"(",
"'Move'",
",",
"1",
",",
"70",
",",
"100",
")",
",",
"self",
".",
"_move",
"(",
"'Move'",
",",
"3",
",",
"65",
",",
"100",
")",
",",
"self",
".",
"_move",
"(",
"'Move'",
",",
"4",
",",
"60",
",",
"100",
")",
",",
"self",
".",
"_move",
"(",
"'Move'",
",",
"5",
",",
"55",
",",
"100",
")",
",",
"self",
".",
"_move",
"(",
"'Move'",
",",
"6",
",",
"50",
",",
"100",
")",
",",
"self",
".",
"_move",
"(",
"'Up'",
",",
"7",
",",
"50",
",",
"100",
")",
")"
] | Press key left. | [
"Press",
"key",
"left",
"."
] | python | train |
MatterMiners/cobald | cobald/monitor/format_line.py | https://github.com/MatterMiners/cobald/blob/264138de4382d1c9b53fabcbc6660e10b33a914d/cobald/monitor/format_line.py#L8-L26 | def line_protocol(name, tags: dict = None, fields: dict = None, timestamp: float = None) -> str:
"""
Format a report as per InfluxDB line protocol
:param name: name of the report
:param tags: tags identifying the specific report
:param fields: measurements of the report
:param timestamp: when the measurement was taken, in **seconds** since the epoch
"""
output_str = name
if tags:
output_str += ','
output_str += ','.join('%s=%s' % (key, value) for key, value in sorted(tags.items()))
output_str += ' '
output_str += ','.join(('%s=%r' % (key, value)).replace("'", '"') for key, value in sorted(fields.items()))
if timestamp is not None:
# line protocol requires nanosecond precision, python uses seconds
output_str += ' %d' % (timestamp * 1E9)
return output_str + '\n' | [
"def",
"line_protocol",
"(",
"name",
",",
"tags",
":",
"dict",
"=",
"None",
",",
"fields",
":",
"dict",
"=",
"None",
",",
"timestamp",
":",
"float",
"=",
"None",
")",
"->",
"str",
":",
"output_str",
"=",
"name",
"if",
"tags",
":",
"output_str",
"+=",
"','",
"output_str",
"+=",
"','",
".",
"join",
"(",
"'%s=%s'",
"%",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"sorted",
"(",
"tags",
".",
"items",
"(",
")",
")",
")",
"output_str",
"+=",
"' '",
"output_str",
"+=",
"','",
".",
"join",
"(",
"(",
"'%s=%r'",
"%",
"(",
"key",
",",
"value",
")",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
"for",
"key",
",",
"value",
"in",
"sorted",
"(",
"fields",
".",
"items",
"(",
")",
")",
")",
"if",
"timestamp",
"is",
"not",
"None",
":",
"# line protocol requires nanosecond precision, python uses seconds",
"output_str",
"+=",
"' %d'",
"%",
"(",
"timestamp",
"*",
"1E9",
")",
"return",
"output_str",
"+",
"'\\n'"
] | Format a report as per InfluxDB line protocol
:param name: name of the report
:param tags: tags identifying the specific report
:param fields: measurements of the report
:param timestamp: when the measurement was taken, in **seconds** since the epoch | [
"Format",
"a",
"report",
"as",
"per",
"InfluxDB",
"line",
"protocol"
] | python | train |
joytunes/JTLocalize | localization_flow/jtlocalize/core/add_genstrings_comments_to_file.py | https://github.com/joytunes/JTLocalize/blob/87864dc60114e0e61c768d057c6eddfadff3f40a/localization_flow/jtlocalize/core/add_genstrings_comments_to_file.py#L26-L57 | def add_genstrings_comments_to_file(localization_file, genstrings_err):
""" Adds the comments produced by the genstrings script for duplicate keys.
Args:
localization_file (str): The path to the strings file.
"""
errors_to_log = [line for line in genstrings_err.splitlines() if "used with multiple comments" not in line]
if len(errors_to_log) > 0:
logging.warning("genstrings warnings:\n%s", "\n".join(errors_to_log))
loc_file = open_strings_file(localization_file, "a")
regex_matches = re.findall(r'Warning: Key "(.*?)" used with multiple comments ("[^"]*" (& "[^"]*")+)',
genstrings_err)
logging.info("Adding multiple comments from genstrings output")
for regex_match in regex_matches:
if len(regex_match) == 3:
key = regex_match[0]
comments = [comment.strip()[1:-1] for comment in regex_match[1].split("&")]
logging.info("Found key with %d comments: %s", len(comments), key)
loc_key = LocalizationEntry(comments, key, key)
loc_file.write(unicode(loc_key))
loc_file.write(u"\n")
loc_file.close() | [
"def",
"add_genstrings_comments_to_file",
"(",
"localization_file",
",",
"genstrings_err",
")",
":",
"errors_to_log",
"=",
"[",
"line",
"for",
"line",
"in",
"genstrings_err",
".",
"splitlines",
"(",
")",
"if",
"\"used with multiple comments\"",
"not",
"in",
"line",
"]",
"if",
"len",
"(",
"errors_to_log",
")",
">",
"0",
":",
"logging",
".",
"warning",
"(",
"\"genstrings warnings:\\n%s\"",
",",
"\"\\n\"",
".",
"join",
"(",
"errors_to_log",
")",
")",
"loc_file",
"=",
"open_strings_file",
"(",
"localization_file",
",",
"\"a\"",
")",
"regex_matches",
"=",
"re",
".",
"findall",
"(",
"r'Warning: Key \"(.*?)\" used with multiple comments (\"[^\"]*\" (& \"[^\"]*\")+)'",
",",
"genstrings_err",
")",
"logging",
".",
"info",
"(",
"\"Adding multiple comments from genstrings output\"",
")",
"for",
"regex_match",
"in",
"regex_matches",
":",
"if",
"len",
"(",
"regex_match",
")",
"==",
"3",
":",
"key",
"=",
"regex_match",
"[",
"0",
"]",
"comments",
"=",
"[",
"comment",
".",
"strip",
"(",
")",
"[",
"1",
":",
"-",
"1",
"]",
"for",
"comment",
"in",
"regex_match",
"[",
"1",
"]",
".",
"split",
"(",
"\"&\"",
")",
"]",
"logging",
".",
"info",
"(",
"\"Found key with %d comments: %s\"",
",",
"len",
"(",
"comments",
")",
",",
"key",
")",
"loc_key",
"=",
"LocalizationEntry",
"(",
"comments",
",",
"key",
",",
"key",
")",
"loc_file",
".",
"write",
"(",
"unicode",
"(",
"loc_key",
")",
")",
"loc_file",
".",
"write",
"(",
"u\"\\n\"",
")",
"loc_file",
".",
"close",
"(",
")"
] | Adds the comments produced by the genstrings script for duplicate keys.
Args:
localization_file (str): The path to the strings file. | [
"Adds",
"the",
"comments",
"produced",
"by",
"the",
"genstrings",
"script",
"for",
"duplicate",
"keys",
"."
] | python | train |
horazont/aioxmpp | aioxmpp/security_layer.py | https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/security_layer.py#L1224-L1253 | def security_layer(tls_provider, sasl_providers):
"""
.. deprecated:: 0.6
Replaced by :class:`SecurityLayer`.
Return a configured :class:`SecurityLayer`. `tls_provider` must be a
:class:`STARTTLSProvider`.
The return value can be passed to the constructor of
:class:`~.node.Client`.
Some very basic checking on the input is also performed.
"""
sasl_providers = tuple(sasl_providers)
if not sasl_providers:
raise ValueError("At least one SASL provider must be given.")
for sasl_provider in sasl_providers:
sasl_provider.execute # check that sasl_provider has execute method
result = SecurityLayer(
tls_provider.ssl_context_factory,
tls_provider.certificate_verifier_factory,
tls_provider.tls_required,
sasl_providers
)
return result | [
"def",
"security_layer",
"(",
"tls_provider",
",",
"sasl_providers",
")",
":",
"sasl_providers",
"=",
"tuple",
"(",
"sasl_providers",
")",
"if",
"not",
"sasl_providers",
":",
"raise",
"ValueError",
"(",
"\"At least one SASL provider must be given.\"",
")",
"for",
"sasl_provider",
"in",
"sasl_providers",
":",
"sasl_provider",
".",
"execute",
"# check that sasl_provider has execute method",
"result",
"=",
"SecurityLayer",
"(",
"tls_provider",
".",
"ssl_context_factory",
",",
"tls_provider",
".",
"certificate_verifier_factory",
",",
"tls_provider",
".",
"tls_required",
",",
"sasl_providers",
")",
"return",
"result"
] | .. deprecated:: 0.6
Replaced by :class:`SecurityLayer`.
Return a configured :class:`SecurityLayer`. `tls_provider` must be a
:class:`STARTTLSProvider`.
The return value can be passed to the constructor of
:class:`~.node.Client`.
Some very basic checking on the input is also performed. | [
"..",
"deprecated",
"::",
"0",
".",
"6"
] | python | train |
vbwagner/ctypescrypto | ctypescrypto/digest.py | https://github.com/vbwagner/ctypescrypto/blob/33c32904cf5e04901f87f90e2499634b8feecd3e/ctypescrypto/digest.py#L64-L69 | def name(self):
""" Returns name of the digest """
if not hasattr(self, 'digest_name'):
self.digest_name = Oid(libcrypto.EVP_MD_type(self.digest)
).longname()
return self.digest_name | [
"def",
"name",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'digest_name'",
")",
":",
"self",
".",
"digest_name",
"=",
"Oid",
"(",
"libcrypto",
".",
"EVP_MD_type",
"(",
"self",
".",
"digest",
")",
")",
".",
"longname",
"(",
")",
"return",
"self",
".",
"digest_name"
] | Returns name of the digest | [
"Returns",
"name",
"of",
"the",
"digest"
] | python | train |
romanz/trezor-agent | libagent/gpg/protocol.py | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L68-L71 | def subpackets(*items):
"""Serialize several GPG subpackets."""
prefixed = [subpacket_prefix_len(item) for item in items]
return util.prefix_len('>H', b''.join(prefixed)) | [
"def",
"subpackets",
"(",
"*",
"items",
")",
":",
"prefixed",
"=",
"[",
"subpacket_prefix_len",
"(",
"item",
")",
"for",
"item",
"in",
"items",
"]",
"return",
"util",
".",
"prefix_len",
"(",
"'>H'",
",",
"b''",
".",
"join",
"(",
"prefixed",
")",
")"
] | Serialize several GPG subpackets. | [
"Serialize",
"several",
"GPG",
"subpackets",
"."
] | python | train |
data61/clkhash | clkhash/cli.py | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/cli.py#L182-L202 | def create(server, name, project, apikey, output, threshold, verbose):
"""Create a new run on an entity matching server.
See entity matching service documentation for details on threshold.
Returns details for the created run.
"""
if verbose:
log("Entity Matching Server: {}".format(server))
if threshold is None:
raise ValueError("Please provide a threshold")
# Create a new run
try:
response = run_create(server, project, apikey, threshold, name)
except ServiceError as e:
log("Unexpected response with status {}".format(e.status_code))
log(e.text)
else:
json.dump(response, output) | [
"def",
"create",
"(",
"server",
",",
"name",
",",
"project",
",",
"apikey",
",",
"output",
",",
"threshold",
",",
"verbose",
")",
":",
"if",
"verbose",
":",
"log",
"(",
"\"Entity Matching Server: {}\"",
".",
"format",
"(",
"server",
")",
")",
"if",
"threshold",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Please provide a threshold\"",
")",
"# Create a new run",
"try",
":",
"response",
"=",
"run_create",
"(",
"server",
",",
"project",
",",
"apikey",
",",
"threshold",
",",
"name",
")",
"except",
"ServiceError",
"as",
"e",
":",
"log",
"(",
"\"Unexpected response with status {}\"",
".",
"format",
"(",
"e",
".",
"status_code",
")",
")",
"log",
"(",
"e",
".",
"text",
")",
"else",
":",
"json",
".",
"dump",
"(",
"response",
",",
"output",
")"
] | Create a new run on an entity matching server.
See entity matching service documentation for details on threshold.
Returns details for the created run. | [
"Create",
"a",
"new",
"run",
"on",
"an",
"entity",
"matching",
"server",
"."
] | python | train |
tgsmith61591/pmdarima | pmdarima/preprocessing/exog/fourier.py | https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/preprocessing/exog/fourier.py#L120-L173 | def transform(self, y, exogenous=None, n_periods=0, **_):
"""Create Fourier term features
When an ARIMA is fit with an exogenous array, it must be forecasted
with one also. Since at ``predict`` time in a pipeline we won't have
``y`` (and we may not yet have an ``exog`` array), we have to know how
far into the future for which to compute Fourier terms (hence
``n_periods``).
This method will compute the Fourier features for a given frequency and
``k`` term. Note that the ``y`` values are not used to compute these,
so this does not pose a risk of data leakage.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array. This is unused and technically
optional for the Fourier terms, since it uses the pre-computed
``n`` to calculate the seasonal Fourier terms.
exogenous : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. If specified, the
Fourier terms will be column-bound on the right side of the matrix.
Otherwise, the Fourier terms will be returned as the new exogenous
array.
n_periods : int, optional (default=0)
The number of periods in the future to forecast. If ``n_periods``
is 0, will compute the Fourier features for the training set.
``n_periods`` corresponds to the number of samples that will be
returned.
"""
check_is_fitted(self, "p_")
_, exog = self._check_y_exog(y, exogenous, null_allowed=True)
if n_periods and exog is not None:
if n_periods != exog.shape[0]:
raise ValueError("If n_periods and exog are specified, "
"n_periods must match dims of exogenous")
times = np.arange(self.n_ + n_periods, dtype=np.float64) + 1
X_fourier = _fourier_terms(self.p_, times)
# Maybe trim if we're in predict mode... in that case, we only keep the
# last n_periods rows in the matrix we've created
if n_periods:
X_fourier = X_fourier[-n_periods:, :]
if exog is None:
exog = X_fourier
else:
exog = np.hstack([exog, X_fourier])
return y, exog | [
"def",
"transform",
"(",
"self",
",",
"y",
",",
"exogenous",
"=",
"None",
",",
"n_periods",
"=",
"0",
",",
"*",
"*",
"_",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"\"p_\"",
")",
"_",
",",
"exog",
"=",
"self",
".",
"_check_y_exog",
"(",
"y",
",",
"exogenous",
",",
"null_allowed",
"=",
"True",
")",
"if",
"n_periods",
"and",
"exog",
"is",
"not",
"None",
":",
"if",
"n_periods",
"!=",
"exog",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"\"If n_periods and exog are specified, \"",
"\"n_periods must match dims of exogenous\"",
")",
"times",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"n_",
"+",
"n_periods",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"+",
"1",
"X_fourier",
"=",
"_fourier_terms",
"(",
"self",
".",
"p_",
",",
"times",
")",
"# Maybe trim if we're in predict mode... in that case, we only keep the",
"# last n_periods rows in the matrix we've created",
"if",
"n_periods",
":",
"X_fourier",
"=",
"X_fourier",
"[",
"-",
"n_periods",
":",
",",
":",
"]",
"if",
"exog",
"is",
"None",
":",
"exog",
"=",
"X_fourier",
"else",
":",
"exog",
"=",
"np",
".",
"hstack",
"(",
"[",
"exog",
",",
"X_fourier",
"]",
")",
"return",
"y",
",",
"exog"
] | Create Fourier term features
When an ARIMA is fit with an exogenous array, it must be forecasted
with one also. Since at ``predict`` time in a pipeline we won't have
``y`` (and we may not yet have an ``exog`` array), we have to know how
far into the future for which to compute Fourier terms (hence
``n_periods``).
This method will compute the Fourier features for a given frequency and
``k`` term. Note that the ``y`` values are not used to compute these,
so this does not pose a risk of data leakage.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array. This is unused and technically
optional for the Fourier terms, since it uses the pre-computed
``n`` to calculate the seasonal Fourier terms.
exogenous : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. If specified, the
Fourier terms will be column-bound on the right side of the matrix.
Otherwise, the Fourier terms will be returned as the new exogenous
array.
n_periods : int, optional (default=0)
The number of periods in the future to forecast. If ``n_periods``
is 0, will compute the Fourier features for the training set.
``n_periods`` corresponds to the number of samples that will be
returned. | [
"Create",
"Fourier",
"term",
"features"
] | python | train |
google/grr | grr/server/grr_response_server/keyword_index.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/keyword_index.py#L83-L92 | def AddKeywordsForName(self, name, keywords):
"""Associates keywords with name.
Records that keywords are associated with name.
Args:
name: A name which should be associated with some keywords.
keywords: A collection of keywords to associate with name.
"""
data_store.DB.IndexAddKeywordsForName(self.urn, name, keywords) | [
"def",
"AddKeywordsForName",
"(",
"self",
",",
"name",
",",
"keywords",
")",
":",
"data_store",
".",
"DB",
".",
"IndexAddKeywordsForName",
"(",
"self",
".",
"urn",
",",
"name",
",",
"keywords",
")"
] | Associates keywords with name.
Records that keywords are associated with name.
Args:
name: A name which should be associated with some keywords.
keywords: A collection of keywords to associate with name. | [
"Associates",
"keywords",
"with",
"name",
"."
] | python | train |
zeromake/aiko | aiko/application.py | https://github.com/zeromake/aiko/blob/53b246fa88652466a9e38ac3d1a99a6198195b0f/aiko/application.py#L199-L250 | def _middleware_call(
self,
middlewares: Iterator[MIDDLEWARE_TYPE],
ctx: Context,
next_call: NEXT_CALL_TYPE,
) -> TypeGenerator[Any, None, None]:
"""
从迭代器中取出一个中间件,执行
"""
middleware = None
try:
middleware = next(middlewares)
except StopIteration:
return
# next_call = self._next_middleware(middlewares, ctx)
if asyncio.iscoroutinefunction(middleware):
temp = cast(Any, middleware(ctx, next_call))
body = yield from temp
else:
body = middleware(ctx, next_call)
if body is not None:
if isinstance(body, Generator):
gen_obj = body
body = None
# 处理同步方法使用 yield 来调用异步
while True:
try:
gen = next(gen_obj)
temp = yield from handle_async_gen(gen, gen_obj)
except StopIteration:
break
if temp is not None:
body = temp
# elif asyncio.iscoroutine(body):
# # 处理中间件返回了一个 coroutine 对象需要 await
# try:
# body = yield from body
# except Exception:
# pass
if isinstance(body, tuple):
flag = True
for item in body:
if flag:
ctx.response.body = item
flag = False
elif isinstance(item, int):
ctx.response.status = item
elif isinstance(item, dict):
ctx.response.headers.update(item)
# 中间件返回的结果如果不为空设置到 body
elif body is not None:
ctx.response.body = body | [
"def",
"_middleware_call",
"(",
"self",
",",
"middlewares",
":",
"Iterator",
"[",
"MIDDLEWARE_TYPE",
"]",
",",
"ctx",
":",
"Context",
",",
"next_call",
":",
"NEXT_CALL_TYPE",
",",
")",
"->",
"TypeGenerator",
"[",
"Any",
",",
"None",
",",
"None",
"]",
":",
"middleware",
"=",
"None",
"try",
":",
"middleware",
"=",
"next",
"(",
"middlewares",
")",
"except",
"StopIteration",
":",
"return",
"# next_call = self._next_middleware(middlewares, ctx)",
"if",
"asyncio",
".",
"iscoroutinefunction",
"(",
"middleware",
")",
":",
"temp",
"=",
"cast",
"(",
"Any",
",",
"middleware",
"(",
"ctx",
",",
"next_call",
")",
")",
"body",
"=",
"yield",
"from",
"temp",
"else",
":",
"body",
"=",
"middleware",
"(",
"ctx",
",",
"next_call",
")",
"if",
"body",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"body",
",",
"Generator",
")",
":",
"gen_obj",
"=",
"body",
"body",
"=",
"None",
"# 处理同步方法使用 yield 来调用异步",
"while",
"True",
":",
"try",
":",
"gen",
"=",
"next",
"(",
"gen_obj",
")",
"temp",
"=",
"yield",
"from",
"handle_async_gen",
"(",
"gen",
",",
"gen_obj",
")",
"except",
"StopIteration",
":",
"break",
"if",
"temp",
"is",
"not",
"None",
":",
"body",
"=",
"temp",
"# elif asyncio.iscoroutine(body):",
"# # 处理中间件返回了一个 coroutine 对象需要 await",
"# try:",
"# body = yield from body",
"# except Exception:",
"# pass",
"if",
"isinstance",
"(",
"body",
",",
"tuple",
")",
":",
"flag",
"=",
"True",
"for",
"item",
"in",
"body",
":",
"if",
"flag",
":",
"ctx",
".",
"response",
".",
"body",
"=",
"item",
"flag",
"=",
"False",
"elif",
"isinstance",
"(",
"item",
",",
"int",
")",
":",
"ctx",
".",
"response",
".",
"status",
"=",
"item",
"elif",
"isinstance",
"(",
"item",
",",
"dict",
")",
":",
"ctx",
".",
"response",
".",
"headers",
".",
"update",
"(",
"item",
")",
"# 中间件返回的结果如果不为空设置到 body",
"elif",
"body",
"is",
"not",
"None",
":",
"ctx",
".",
"response",
".",
"body",
"=",
"body"
] | 从迭代器中取出一个中间件,执行 | [
"从迭代器中取出一个中间件,执行"
] | python | train |
RiotGames/cloud-inquisitor | plugins/public/cinq-auditor-iam/cinq_auditor_iam/__init__.py | https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/plugins/public/cinq-auditor-iam/cinq_auditor_iam/__init__.py#L368-L394 | def get_roles(client):
"""Returns a list of all the roles for an account. Returns a list containing all the roles for the account.
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
Returns:
:obj:`list` of `dict`
"""
done = False
marker = None
roles = []
while not done:
if marker:
response = client.list_roles(Marker=marker)
else:
response = client.list_roles()
roles += response['Roles']
if response['IsTruncated']:
marker = response['Marker']
else:
done = True
return roles | [
"def",
"get_roles",
"(",
"client",
")",
":",
"done",
"=",
"False",
"marker",
"=",
"None",
"roles",
"=",
"[",
"]",
"while",
"not",
"done",
":",
"if",
"marker",
":",
"response",
"=",
"client",
".",
"list_roles",
"(",
"Marker",
"=",
"marker",
")",
"else",
":",
"response",
"=",
"client",
".",
"list_roles",
"(",
")",
"roles",
"+=",
"response",
"[",
"'Roles'",
"]",
"if",
"response",
"[",
"'IsTruncated'",
"]",
":",
"marker",
"=",
"response",
"[",
"'Marker'",
"]",
"else",
":",
"done",
"=",
"True",
"return",
"roles"
] | Returns a list of all the roles for an account. Returns a list containing all the roles for the account.
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
Returns:
:obj:`list` of `dict` | [
"Returns",
"a",
"list",
"of",
"all",
"the",
"roles",
"for",
"an",
"account",
".",
"Returns",
"a",
"list",
"containing",
"all",
"the",
"roles",
"for",
"the",
"account",
"."
] | python | train |
wummel/linkchecker | linkcheck/checker/urlbase.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/urlbase.py#L526-L535 | def add_size_info (self):
"""Set size of URL content (if any)..
Should be overridden in subclasses."""
maxbytes = self.aggregate.config["maxfilesizedownload"]
if self.size > maxbytes:
self.add_warning(
_("Content size %(size)s is larger than %(maxbytes)s.") %
dict(size=strformat.strsize(self.size),
maxbytes=strformat.strsize(maxbytes)),
tag=WARN_URL_CONTENT_SIZE_TOO_LARGE) | [
"def",
"add_size_info",
"(",
"self",
")",
":",
"maxbytes",
"=",
"self",
".",
"aggregate",
".",
"config",
"[",
"\"maxfilesizedownload\"",
"]",
"if",
"self",
".",
"size",
">",
"maxbytes",
":",
"self",
".",
"add_warning",
"(",
"_",
"(",
"\"Content size %(size)s is larger than %(maxbytes)s.\"",
")",
"%",
"dict",
"(",
"size",
"=",
"strformat",
".",
"strsize",
"(",
"self",
".",
"size",
")",
",",
"maxbytes",
"=",
"strformat",
".",
"strsize",
"(",
"maxbytes",
")",
")",
",",
"tag",
"=",
"WARN_URL_CONTENT_SIZE_TOO_LARGE",
")"
] | Set size of URL content (if any)..
Should be overridden in subclasses. | [
"Set",
"size",
"of",
"URL",
"content",
"(",
"if",
"any",
")",
"..",
"Should",
"be",
"overridden",
"in",
"subclasses",
"."
] | python | train |
gc3-uzh-ch/elasticluster | elasticluster/providers/gce.py | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/gce.py#L180-L199 | def _connect(self):
"""Connects to the cloud web services. If this is the first
authentication, a web browser will be started to authenticate
against google and provide access to elasticluster.
:return: A Resource object with methods for interacting with the
service.
"""
# ensure only one thread runs the authentication process, if needed
with GoogleCloudProvider.__gce_lock:
# check for existing connection
if not self._gce:
version = pkg_resources.get_distribution("elasticluster").version
http = googleapiclient.http.set_user_agent(httplib2.Http(), "elasticluster/%s" % version)
credentials = self._get_credentials()
self._auth_http = credentials.authorize(http)
self._gce = build(GCE_API_NAME, GCE_API_VERSION, http=http)
return self._gce | [
"def",
"_connect",
"(",
"self",
")",
":",
"# ensure only one thread runs the authentication process, if needed",
"with",
"GoogleCloudProvider",
".",
"__gce_lock",
":",
"# check for existing connection",
"if",
"not",
"self",
".",
"_gce",
":",
"version",
"=",
"pkg_resources",
".",
"get_distribution",
"(",
"\"elasticluster\"",
")",
".",
"version",
"http",
"=",
"googleapiclient",
".",
"http",
".",
"set_user_agent",
"(",
"httplib2",
".",
"Http",
"(",
")",
",",
"\"elasticluster/%s\"",
"%",
"version",
")",
"credentials",
"=",
"self",
".",
"_get_credentials",
"(",
")",
"self",
".",
"_auth_http",
"=",
"credentials",
".",
"authorize",
"(",
"http",
")",
"self",
".",
"_gce",
"=",
"build",
"(",
"GCE_API_NAME",
",",
"GCE_API_VERSION",
",",
"http",
"=",
"http",
")",
"return",
"self",
".",
"_gce"
] | Connects to the cloud web services. If this is the first
authentication, a web browser will be started to authenticate
against google and provide access to elasticluster.
:return: A Resource object with methods for interacting with the
service. | [
"Connects",
"to",
"the",
"cloud",
"web",
"services",
".",
"If",
"this",
"is",
"the",
"first",
"authentication",
"a",
"web",
"browser",
"will",
"be",
"started",
"to",
"authenticate",
"against",
"google",
"and",
"provide",
"access",
"to",
"elasticluster",
"."
] | python | train |
dustin/twitty-twister | twittytwister/twitter.py | https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L1168-L1186 | def _state_error(self, reason):
"""
The connection attempt resulted in an error.
Attempt a reconnect with a back-off algorithm.
"""
log.err(reason)
def matchException(failure):
for errorState, backOff in self.backOffs.iteritems():
if 'errorTypes' not in backOff:
continue
if failure.check(*backOff['errorTypes']):
return errorState
return 'other'
errorState = matchException(reason)
self._reconnect(errorState) | [
"def",
"_state_error",
"(",
"self",
",",
"reason",
")",
":",
"log",
".",
"err",
"(",
"reason",
")",
"def",
"matchException",
"(",
"failure",
")",
":",
"for",
"errorState",
",",
"backOff",
"in",
"self",
".",
"backOffs",
".",
"iteritems",
"(",
")",
":",
"if",
"'errorTypes'",
"not",
"in",
"backOff",
":",
"continue",
"if",
"failure",
".",
"check",
"(",
"*",
"backOff",
"[",
"'errorTypes'",
"]",
")",
":",
"return",
"errorState",
"return",
"'other'",
"errorState",
"=",
"matchException",
"(",
"reason",
")",
"self",
".",
"_reconnect",
"(",
"errorState",
")"
] | The connection attempt resulted in an error.
Attempt a reconnect with a back-off algorithm. | [
"The",
"connection",
"attempt",
"resulted",
"in",
"an",
"error",
"."
] | python | train |
SpikeInterface/spiketoolkit | spiketoolkit/sorters/launcher.py | https://github.com/SpikeInterface/spiketoolkit/blob/f7c054383d1ebca640966b057c087fa187955d13/spiketoolkit/sorters/launcher.py#L181-L206 | def collect_results(working_folder):
"""
Collect results in a working_folder.
The output is nested dict[rec_name][sorter_name] of SortingExtrator.
"""
results = {}
working_folder = Path(working_folder)
output_folders = working_folder/'output_folders'
for rec_name in os.listdir(output_folders):
if not os.path.isdir(output_folders / rec_name):
continue
# print(rec_name)
results[rec_name] = {}
for sorter_name in os.listdir(output_folders / rec_name):
# print(' ', sorter_name)
output_folder = output_folders / rec_name / sorter_name
#~ print(output_folder)
if not os.path.isdir(output_folder):
continue
SorterClass = sorter_dict[sorter_name]
results[rec_name][sorter_name] = SorterClass.get_result_from_folder(output_folder)
return results | [
"def",
"collect_results",
"(",
"working_folder",
")",
":",
"results",
"=",
"{",
"}",
"working_folder",
"=",
"Path",
"(",
"working_folder",
")",
"output_folders",
"=",
"working_folder",
"/",
"'output_folders'",
"for",
"rec_name",
"in",
"os",
".",
"listdir",
"(",
"output_folders",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"output_folders",
"/",
"rec_name",
")",
":",
"continue",
"# print(rec_name)",
"results",
"[",
"rec_name",
"]",
"=",
"{",
"}",
"for",
"sorter_name",
"in",
"os",
".",
"listdir",
"(",
"output_folders",
"/",
"rec_name",
")",
":",
"# print(' ', sorter_name)",
"output_folder",
"=",
"output_folders",
"/",
"rec_name",
"/",
"sorter_name",
"#~ print(output_folder)",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"output_folder",
")",
":",
"continue",
"SorterClass",
"=",
"sorter_dict",
"[",
"sorter_name",
"]",
"results",
"[",
"rec_name",
"]",
"[",
"sorter_name",
"]",
"=",
"SorterClass",
".",
"get_result_from_folder",
"(",
"output_folder",
")",
"return",
"results"
] | Collect results in a working_folder.
The output is nested dict[rec_name][sorter_name] of SortingExtrator. | [
"Collect",
"results",
"in",
"a",
"working_folder",
"."
] | python | train |
blockstack/blockstack-files | blockstack_file/blockstack_file.py | https://github.com/blockstack/blockstack-files/blob/8d88cc48bdf8ed57f17d4bba860e972bde321921/blockstack_file/blockstack_file.py#L652-L866 | def main():
"""
Entry point for the CLI interface
"""
argparser = argparse.ArgumentParser(
description='Blockstack-file version {}'.format(__version__))
subparsers = argparser.add_subparsers(
dest='action', help='The file command to take [get/put/delete]')
parser = subparsers.add_parser(
'init',
help='Initialize this host to start sending and receiving files')
parser.add_argument(
'--config', action='store',
help='path to the config file to use (default is %s)' % CONFIG_PATH)
parser.add_argument(
'--blockchain_id', action='store',
help='the recipient blockchain ID to use'),
parser.add_argument(
'--hostname', action='store',
help='the recipient hostname to use')
parser = subparsers.add_parser(
'reset',
help='Reset this host\'s key')
parser.add_argument(
'--config', action='store',
help='path to the config file to use (default is %s)' % CONFIG_PATH)
parser.add_argument(
'--blockchain_id', action='store',
help='the recipient blockchain ID to use'),
parser.add_argument(
'--hostname', action='store',
help='the recipient hostname to use')
parser = subparsers.add_parser(
'get',
help='Get a file')
parser.add_argument(
'--config', action='store',
help='path to the config file to use (default is %s)' % CONFIG_PATH)
parser.add_argument(
'--blockchain_id', action='store',
help='the recipient blockchain ID to use'),
parser.add_argument(
'--hostname', action='store',
help='the recipient hostname to use')
parser.add_argument(
'--passphrase', action='store',
help='decryption passphrase')
parser.add_argument(
'--wallet', action='store',
help='path to your Blockstack wallet')
parser.add_argument(
'sender_blockchain_id', action='store',
help='the sender\'s blockchain ID')
parser.add_argument(
'data_name', action='store',
help='Public name of the file to fetch')
parser.add_argument(
'output_path', action='store', nargs='?',
help='[optional] destination path to save the file; defaults to stdout')
parser = subparsers.add_parser(
'put',
help='Share a file')
parser.add_argument(
'--config', action='store',
help='path to the config file to use (default is %s)' % CONFIG_PATH)
parser.add_argument(
'--blockchain_id', action='store',
help='the sender blockchain ID to use'),
parser.add_argument(
'--hostname', action='store',
help='the sender hostname to use')
parser.add_argument(
'--passphrase', action='store',
help='encryption passphrase')
parser.add_argument(
'--wallet', action='store',
help='path to your Blockstack wallet')
parser.add_argument(
'input_path', action='store',
help='Path to the file to share')
parser.add_argument(
'data_name', action='store',
help='Public name of the file to store')
# recipients come afterwards
parser = subparsers.add_parser(
'delete',
help='Delete a shared file')
parser.add_argument(
'--config', action='store',
help='path to the config file to use (default is %s)' % CONFIG_PATH)
parser.add_argument(
'--blockchain_id', action='store',
help='the sender blockchain ID to use'),
parser.add_argument(
'--hostname', action='store',
help='the sender hostname to use')
parser.add_argument(
'--wallet', action='store',
help='path to your Blockstack wallet')
parser.add_argument(
'data_name', action='store',
help='Public name of the file to delete')
args, unparsed = argparser.parse_known_args()
# load up config
config_path = args.config
if config_path is None:
config_path = CONFIG_PATH
conf = get_config( config_path )
config_dir = os.path.dirname(config_path)
blockchain_id = getattr(args, "blockchain_id", None)
hostname = getattr(args, "hostname", None)
passphrase = getattr(args, "passphrase", None)
data_name = getattr(args, "data_name", None)
wallet_path = getattr(args, "wallet", None)
if blockchain_id is None:
blockchain_id = conf['blockchain_id']
if hostname is None:
hostname = conf['hostname']
if wallet_path is None:
wallet_path = conf['wallet']
if wallet_path is None and config_dir is not None:
wallet_path = os.path.join(config_dir, blockstack_client.config.WALLET_FILENAME)
# load wallet
if wallet_path is not None and os.path.exists( wallet_path ):
# load from disk
log.debug("Load wallet from %s" % wallet_path)
wallet = blockstack_client.load_wallet( config_dir=config_dir, wallet_path=wallet_path, include_private=True )
if 'error' in wallet:
print >> sys.stderr, json.dumps(wallet, sort_keys=True, indent=4 )
sys.exit(1)
else:
wallet = wallet['wallet']
else:
# load from RPC
log.debug("Load wallet from RPC")
wallet = blockstack_client.dump_wallet(config_path=config_path)
if 'error' in wallet:
print >> sys.stderr, json.dumps(wallet, sort_keys=True, indent=4)
sys.exit(1)
log.debug("Process %s" % args.action)
if args.action in ['init', 'reset']:
# (re)key
res = file_key_regenerate( blockchain_id, hostname, config_path=config_path, wallet_keys=wallet )
if 'error' in res:
print >> sys.stderr, json.dumps(res, sort_keys=True, indent=4 )
sys.exit(1)
if args.action == 'get':
# get a file
sender_blockchain_id = args.sender_blockchain_id
output_path = args.output_path
tmp = False
if output_path is None:
fd, path = tempfile.mkstemp( prefix='blockstack-file-', dir=config_dir )
os.close(fd)
output_path = path
tmp = True
res = file_get( blockchain_id, hostname, sender_blockchain_id, data_name, output_path, passphrase=passphrase, config_path=config_path, wallet_keys=wallet )
if 'error' in res:
print >> sys.stderr, json.dumps(res, sort_keys=True, indent=4 )
sys.exit(1)
if tmp:
# print to stdout
with open(output_path, "r") as f:
while True:
buf = f.read(65536)
if len(buf) == 0:
break
sys.stdout.write(buf)
os.unlink(output_path)
elif args.action == 'put':
# put a file
recipients = unparsed
input_path = args.input_path
res = file_put( blockchain_id, hostname, recipients, data_name, input_path, passphrase=passphrase, config_path=config_path, wallet_keys=wallet )
if 'error' in res:
print >> sys.stderr, json.dumps(res, sort_keys=True, indent=4 )
sys.exit(1)
elif args.action == 'delete':
# delete a file
res = file_delete( blockchain_id, data_name, config_path=config_path, wallet_keys=wallet )
if 'error' in res:
print >> sys.stderr, json.dumps(res, sort_keys=True, indent=4 )
sys.exit(1)
print >> sys.stderr, json.dumps({'status': True}, sort_keys=True, indent=4 )
sys.exit(0) | [
"def",
"main",
"(",
")",
":",
"argparser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Blockstack-file version {}'",
".",
"format",
"(",
"__version__",
")",
")",
"subparsers",
"=",
"argparser",
".",
"add_subparsers",
"(",
"dest",
"=",
"'action'",
",",
"help",
"=",
"'The file command to take [get/put/delete]'",
")",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'init'",
",",
"help",
"=",
"'Initialize this host to start sending and receiving files'",
")",
"parser",
".",
"add_argument",
"(",
"'--config'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to the config file to use (default is %s)'",
"%",
"CONFIG_PATH",
")",
"parser",
".",
"add_argument",
"(",
"'--blockchain_id'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'the recipient blockchain ID to use'",
")",
",",
"parser",
".",
"add_argument",
"(",
"'--hostname'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'the recipient hostname to use'",
")",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'reset'",
",",
"help",
"=",
"'Reset this host\\'s key'",
")",
"parser",
".",
"add_argument",
"(",
"'--config'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to the config file to use (default is %s)'",
"%",
"CONFIG_PATH",
")",
"parser",
".",
"add_argument",
"(",
"'--blockchain_id'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'the recipient blockchain ID to use'",
")",
",",
"parser",
".",
"add_argument",
"(",
"'--hostname'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'the recipient hostname to use'",
")",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'get'",
",",
"help",
"=",
"'Get a file'",
")",
"parser",
".",
"add_argument",
"(",
"'--config'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to the config file to use (default is %s)'",
"%",
"CONFIG_PATH",
")",
"parser",
".",
"add_argument",
"(",
"'--blockchain_id'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'the recipient blockchain ID to use'",
")",
",",
"parser",
".",
"add_argument",
"(",
"'--hostname'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'the recipient hostname to use'",
")",
"parser",
".",
"add_argument",
"(",
"'--passphrase'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'decryption passphrase'",
")",
"parser",
".",
"add_argument",
"(",
"'--wallet'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to your Blockstack wallet'",
")",
"parser",
".",
"add_argument",
"(",
"'sender_blockchain_id'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'the sender\\'s blockchain ID'",
")",
"parser",
".",
"add_argument",
"(",
"'data_name'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Public name of the file to fetch'",
")",
"parser",
".",
"add_argument",
"(",
"'output_path'",
",",
"action",
"=",
"'store'",
",",
"nargs",
"=",
"'?'",
",",
"help",
"=",
"'[optional] destination path to save the file; defaults to stdout'",
")",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'put'",
",",
"help",
"=",
"'Share a file'",
")",
"parser",
".",
"add_argument",
"(",
"'--config'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to the config file to use (default is %s)'",
"%",
"CONFIG_PATH",
")",
"parser",
".",
"add_argument",
"(",
"'--blockchain_id'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'the sender blockchain ID to use'",
")",
",",
"parser",
".",
"add_argument",
"(",
"'--hostname'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'the sender hostname to use'",
")",
"parser",
".",
"add_argument",
"(",
"'--passphrase'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'encryption passphrase'",
")",
"parser",
".",
"add_argument",
"(",
"'--wallet'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to your Blockstack wallet'",
")",
"parser",
".",
"add_argument",
"(",
"'input_path'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Path to the file to share'",
")",
"parser",
".",
"add_argument",
"(",
"'data_name'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Public name of the file to store'",
")",
"# recipients come afterwards",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'delete'",
",",
"help",
"=",
"'Delete a shared file'",
")",
"parser",
".",
"add_argument",
"(",
"'--config'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to the config file to use (default is %s)'",
"%",
"CONFIG_PATH",
")",
"parser",
".",
"add_argument",
"(",
"'--blockchain_id'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'the sender blockchain ID to use'",
")",
",",
"parser",
".",
"add_argument",
"(",
"'--hostname'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'the sender hostname to use'",
")",
"parser",
".",
"add_argument",
"(",
"'--wallet'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to your Blockstack wallet'",
")",
"parser",
".",
"add_argument",
"(",
"'data_name'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Public name of the file to delete'",
")",
"args",
",",
"unparsed",
"=",
"argparser",
".",
"parse_known_args",
"(",
")",
"# load up config",
"config_path",
"=",
"args",
".",
"config",
"if",
"config_path",
"is",
"None",
":",
"config_path",
"=",
"CONFIG_PATH",
"conf",
"=",
"get_config",
"(",
"config_path",
")",
"config_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"config_path",
")",
"blockchain_id",
"=",
"getattr",
"(",
"args",
",",
"\"blockchain_id\"",
",",
"None",
")",
"hostname",
"=",
"getattr",
"(",
"args",
",",
"\"hostname\"",
",",
"None",
")",
"passphrase",
"=",
"getattr",
"(",
"args",
",",
"\"passphrase\"",
",",
"None",
")",
"data_name",
"=",
"getattr",
"(",
"args",
",",
"\"data_name\"",
",",
"None",
")",
"wallet_path",
"=",
"getattr",
"(",
"args",
",",
"\"wallet\"",
",",
"None",
")",
"if",
"blockchain_id",
"is",
"None",
":",
"blockchain_id",
"=",
"conf",
"[",
"'blockchain_id'",
"]",
"if",
"hostname",
"is",
"None",
":",
"hostname",
"=",
"conf",
"[",
"'hostname'",
"]",
"if",
"wallet_path",
"is",
"None",
":",
"wallet_path",
"=",
"conf",
"[",
"'wallet'",
"]",
"if",
"wallet_path",
"is",
"None",
"and",
"config_dir",
"is",
"not",
"None",
":",
"wallet_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config_dir",
",",
"blockstack_client",
".",
"config",
".",
"WALLET_FILENAME",
")",
"# load wallet ",
"if",
"wallet_path",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"wallet_path",
")",
":",
"# load from disk",
"log",
".",
"debug",
"(",
"\"Load wallet from %s\"",
"%",
"wallet_path",
")",
"wallet",
"=",
"blockstack_client",
".",
"load_wallet",
"(",
"config_dir",
"=",
"config_dir",
",",
"wallet_path",
"=",
"wallet_path",
",",
"include_private",
"=",
"True",
")",
"if",
"'error'",
"in",
"wallet",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"json",
".",
"dumps",
"(",
"wallet",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"wallet",
"=",
"wallet",
"[",
"'wallet'",
"]",
"else",
":",
"# load from RPC",
"log",
".",
"debug",
"(",
"\"Load wallet from RPC\"",
")",
"wallet",
"=",
"blockstack_client",
".",
"dump_wallet",
"(",
"config_path",
"=",
"config_path",
")",
"if",
"'error'",
"in",
"wallet",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"json",
".",
"dumps",
"(",
"wallet",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"log",
".",
"debug",
"(",
"\"Process %s\"",
"%",
"args",
".",
"action",
")",
"if",
"args",
".",
"action",
"in",
"[",
"'init'",
",",
"'reset'",
"]",
":",
"# (re)key",
"res",
"=",
"file_key_regenerate",
"(",
"blockchain_id",
",",
"hostname",
",",
"config_path",
"=",
"config_path",
",",
"wallet_keys",
"=",
"wallet",
")",
"if",
"'error'",
"in",
"res",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"json",
".",
"dumps",
"(",
"res",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"args",
".",
"action",
"==",
"'get'",
":",
"# get a file",
"sender_blockchain_id",
"=",
"args",
".",
"sender_blockchain_id",
"output_path",
"=",
"args",
".",
"output_path",
"tmp",
"=",
"False",
"if",
"output_path",
"is",
"None",
":",
"fd",
",",
"path",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'blockstack-file-'",
",",
"dir",
"=",
"config_dir",
")",
"os",
".",
"close",
"(",
"fd",
")",
"output_path",
"=",
"path",
"tmp",
"=",
"True",
"res",
"=",
"file_get",
"(",
"blockchain_id",
",",
"hostname",
",",
"sender_blockchain_id",
",",
"data_name",
",",
"output_path",
",",
"passphrase",
"=",
"passphrase",
",",
"config_path",
"=",
"config_path",
",",
"wallet_keys",
"=",
"wallet",
")",
"if",
"'error'",
"in",
"res",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"json",
".",
"dumps",
"(",
"res",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"tmp",
":",
"# print to stdout ",
"with",
"open",
"(",
"output_path",
",",
"\"r\"",
")",
"as",
"f",
":",
"while",
"True",
":",
"buf",
"=",
"f",
".",
"read",
"(",
"65536",
")",
"if",
"len",
"(",
"buf",
")",
"==",
"0",
":",
"break",
"sys",
".",
"stdout",
".",
"write",
"(",
"buf",
")",
"os",
".",
"unlink",
"(",
"output_path",
")",
"elif",
"args",
".",
"action",
"==",
"'put'",
":",
"# put a file",
"recipients",
"=",
"unparsed",
"input_path",
"=",
"args",
".",
"input_path",
"res",
"=",
"file_put",
"(",
"blockchain_id",
",",
"hostname",
",",
"recipients",
",",
"data_name",
",",
"input_path",
",",
"passphrase",
"=",
"passphrase",
",",
"config_path",
"=",
"config_path",
",",
"wallet_keys",
"=",
"wallet",
")",
"if",
"'error'",
"in",
"res",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"json",
".",
"dumps",
"(",
"res",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"args",
".",
"action",
"==",
"'delete'",
":",
"# delete a file",
"res",
"=",
"file_delete",
"(",
"blockchain_id",
",",
"data_name",
",",
"config_path",
"=",
"config_path",
",",
"wallet_keys",
"=",
"wallet",
")",
"if",
"'error'",
"in",
"res",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"json",
".",
"dumps",
"(",
"res",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"print",
">>",
"sys",
".",
"stderr",
",",
"json",
".",
"dumps",
"(",
"{",
"'status'",
":",
"True",
"}",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | Entry point for the CLI interface | [
"Entry",
"point",
"for",
"the",
"CLI",
"interface"
] | python | train |
rameshg87/pyremotevbox | pyremotevbox/ZSI/TCtimes.py | https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/TCtimes.py#L79-L135 | def _dict_to_tuple(d):
'''Convert a dictionary to a time tuple. Depends on key values in the
regexp pattern!
'''
# TODO: Adding a ms field to struct_time tuples is problematic
# since they don't have this field. Should use datetime
# which has a microseconds field, else no ms.. When mapping struct_time
# to gDateTime the last 3 fields are irrelevant, here using dummy values to make
# everything happy.
#
retval = _niltime[:]
for k,i in ( ('Y', 0), ('M', 1), ('D', 2), ('h', 3), ('m', 4), ):
v = d.get(k)
if v: retval[i] = int(v)
v = d.get('s')
if v:
msec,sec = _modf(float(v))
retval[6],retval[5] = int(round(msec*1000)), int(sec)
v = d.get('tz')
if v and v != 'Z':
h,m = map(int, v.split(':'))
# check for time zone offset, if within the same timezone,
# ignore offset specific calculations
offset=_localtimezone().utcoffset(_datetime.now())
local_offset_hour = offset.seconds/3600
local_offset_min = (offset.seconds%3600)%60
if local_offset_hour > 12:
local_offset_hour -= 24
if local_offset_hour != h or local_offset_min != m:
if h<0:
#TODO: why is this set to server
#foff = _fixedoffset(-((abs(h)*60+m)),"server")
foff = _fixedoffset(-((abs(h)*60+m)))
else:
#TODO: why is this set to server
#foff = _fixedoffset((abs(h)*60+m),"server")
foff = _fixedoffset((abs(h)*60+m))
dt = _datetime(retval[0],retval[1],retval[2],retval[3],retval[4],
retval[5],0,foff)
# update dict with calculated timezone
localdt=dt.astimezone(_localtimezone())
retval[0] = localdt.year
retval[1] = localdt.month
retval[2] = localdt.day
retval[3] = localdt.hour
retval[4] = localdt.minute
retval[5] = localdt.second
if d.get('neg', 0):
retval[0:5] = map(operator.__neg__, retval[0:5])
return tuple(retval) | [
"def",
"_dict_to_tuple",
"(",
"d",
")",
":",
"# TODO: Adding a ms field to struct_time tuples is problematic ",
"# since they don't have this field. Should use datetime",
"# which has a microseconds field, else no ms.. When mapping struct_time ",
"# to gDateTime the last 3 fields are irrelevant, here using dummy values to make",
"# everything happy.",
"# ",
"retval",
"=",
"_niltime",
"[",
":",
"]",
"for",
"k",
",",
"i",
"in",
"(",
"(",
"'Y'",
",",
"0",
")",
",",
"(",
"'M'",
",",
"1",
")",
",",
"(",
"'D'",
",",
"2",
")",
",",
"(",
"'h'",
",",
"3",
")",
",",
"(",
"'m'",
",",
"4",
")",
",",
")",
":",
"v",
"=",
"d",
".",
"get",
"(",
"k",
")",
"if",
"v",
":",
"retval",
"[",
"i",
"]",
"=",
"int",
"(",
"v",
")",
"v",
"=",
"d",
".",
"get",
"(",
"'s'",
")",
"if",
"v",
":",
"msec",
",",
"sec",
"=",
"_modf",
"(",
"float",
"(",
"v",
")",
")",
"retval",
"[",
"6",
"]",
",",
"retval",
"[",
"5",
"]",
"=",
"int",
"(",
"round",
"(",
"msec",
"*",
"1000",
")",
")",
",",
"int",
"(",
"sec",
")",
"v",
"=",
"d",
".",
"get",
"(",
"'tz'",
")",
"if",
"v",
"and",
"v",
"!=",
"'Z'",
":",
"h",
",",
"m",
"=",
"map",
"(",
"int",
",",
"v",
".",
"split",
"(",
"':'",
")",
")",
"# check for time zone offset, if within the same timezone, ",
"# ignore offset specific calculations",
"offset",
"=",
"_localtimezone",
"(",
")",
".",
"utcoffset",
"(",
"_datetime",
".",
"now",
"(",
")",
")",
"local_offset_hour",
"=",
"offset",
".",
"seconds",
"/",
"3600",
"local_offset_min",
"=",
"(",
"offset",
".",
"seconds",
"%",
"3600",
")",
"%",
"60",
"if",
"local_offset_hour",
">",
"12",
":",
"local_offset_hour",
"-=",
"24",
"if",
"local_offset_hour",
"!=",
"h",
"or",
"local_offset_min",
"!=",
"m",
":",
"if",
"h",
"<",
"0",
":",
"#TODO: why is this set to server",
"#foff = _fixedoffset(-((abs(h)*60+m)),\"server\")",
"foff",
"=",
"_fixedoffset",
"(",
"-",
"(",
"(",
"abs",
"(",
"h",
")",
"*",
"60",
"+",
"m",
")",
")",
")",
"else",
":",
"#TODO: why is this set to server",
"#foff = _fixedoffset((abs(h)*60+m),\"server\")",
"foff",
"=",
"_fixedoffset",
"(",
"(",
"abs",
"(",
"h",
")",
"*",
"60",
"+",
"m",
")",
")",
"dt",
"=",
"_datetime",
"(",
"retval",
"[",
"0",
"]",
",",
"retval",
"[",
"1",
"]",
",",
"retval",
"[",
"2",
"]",
",",
"retval",
"[",
"3",
"]",
",",
"retval",
"[",
"4",
"]",
",",
"retval",
"[",
"5",
"]",
",",
"0",
",",
"foff",
")",
"# update dict with calculated timezone",
"localdt",
"=",
"dt",
".",
"astimezone",
"(",
"_localtimezone",
"(",
")",
")",
"retval",
"[",
"0",
"]",
"=",
"localdt",
".",
"year",
"retval",
"[",
"1",
"]",
"=",
"localdt",
".",
"month",
"retval",
"[",
"2",
"]",
"=",
"localdt",
".",
"day",
"retval",
"[",
"3",
"]",
"=",
"localdt",
".",
"hour",
"retval",
"[",
"4",
"]",
"=",
"localdt",
".",
"minute",
"retval",
"[",
"5",
"]",
"=",
"localdt",
".",
"second",
"if",
"d",
".",
"get",
"(",
"'neg'",
",",
"0",
")",
":",
"retval",
"[",
"0",
":",
"5",
"]",
"=",
"map",
"(",
"operator",
".",
"__neg__",
",",
"retval",
"[",
"0",
":",
"5",
"]",
")",
"return",
"tuple",
"(",
"retval",
")"
] | Convert a dictionary to a time tuple. Depends on key values in the
regexp pattern! | [
"Convert",
"a",
"dictionary",
"to",
"a",
"time",
"tuple",
".",
"Depends",
"on",
"key",
"values",
"in",
"the",
"regexp",
"pattern!"
] | python | train |
scanny/python-pptx | pptx/oxml/chart/chart.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/chart/chart.py#L220-L229 | def next_order(self):
"""
Return the next available `c:ser/c:order` value within the scope of
this chart, the maximum order value found on existing series,
incremented by one.
"""
order_vals = [s.order.val for s in self.sers]
if not order_vals:
return 0
return max(order_vals)+1 | [
"def",
"next_order",
"(",
"self",
")",
":",
"order_vals",
"=",
"[",
"s",
".",
"order",
".",
"val",
"for",
"s",
"in",
"self",
".",
"sers",
"]",
"if",
"not",
"order_vals",
":",
"return",
"0",
"return",
"max",
"(",
"order_vals",
")",
"+",
"1"
] | Return the next available `c:ser/c:order` value within the scope of
this chart, the maximum order value found on existing series,
incremented by one. | [
"Return",
"the",
"next",
"available",
"c",
":",
"ser",
"/",
"c",
":",
"order",
"value",
"within",
"the",
"scope",
"of",
"this",
"chart",
"the",
"maximum",
"order",
"value",
"found",
"on",
"existing",
"series",
"incremented",
"by",
"one",
"."
] | python | train |
MagicStack/asyncpg | asyncpg/cursor.py | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/cursor.py#L241-L260 | async def forward(self, n, *, timeout=None) -> int:
r"""Skip over the next *n* rows.
:param float timeout: Optional timeout value in seconds.
:return: A number of rows actually skipped over (<= *n*).
"""
self._check_ready()
if n <= 0:
raise exceptions.InterfaceError('n must be greater than zero')
protocol = self._connection._protocol
status = await protocol.query('MOVE FORWARD {:d} {}'.format(
n, self._portal_name), timeout)
advanced = int(status.split()[1])
if advanced < n:
self._exhausted = True
return advanced | [
"async",
"def",
"forward",
"(",
"self",
",",
"n",
",",
"*",
",",
"timeout",
"=",
"None",
")",
"->",
"int",
":",
"self",
".",
"_check_ready",
"(",
")",
"if",
"n",
"<=",
"0",
":",
"raise",
"exceptions",
".",
"InterfaceError",
"(",
"'n must be greater than zero'",
")",
"protocol",
"=",
"self",
".",
"_connection",
".",
"_protocol",
"status",
"=",
"await",
"protocol",
".",
"query",
"(",
"'MOVE FORWARD {:d} {}'",
".",
"format",
"(",
"n",
",",
"self",
".",
"_portal_name",
")",
",",
"timeout",
")",
"advanced",
"=",
"int",
"(",
"status",
".",
"split",
"(",
")",
"[",
"1",
"]",
")",
"if",
"advanced",
"<",
"n",
":",
"self",
".",
"_exhausted",
"=",
"True",
"return",
"advanced"
] | r"""Skip over the next *n* rows.
:param float timeout: Optional timeout value in seconds.
:return: A number of rows actually skipped over (<= *n*). | [
"r",
"Skip",
"over",
"the",
"next",
"*",
"n",
"*",
"rows",
"."
] | python | train |
jfilter/deep-plots | deep_plots/wrangle.py | https://github.com/jfilter/deep-plots/blob/8b0af5c1e44336068c2f8c883ffa158bbb34ba5e/deep_plots/wrangle.py#L27-L37 | def from_keras_log(csv_path, output_dir_path, **kwargs):
"""Plot accuracy and loss from a Keras CSV log.
Args:
csv_path: The path to the CSV log with the actual data.
output_dir_path: The path to the directory where the resultings plots
should end up.
"""
# automatically get seperator by using Python's CSV parser
data = pd.read_csv(csv_path, sep=None, engine='python')
_from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs) | [
"def",
"from_keras_log",
"(",
"csv_path",
",",
"output_dir_path",
",",
"*",
"*",
"kwargs",
")",
":",
"# automatically get seperator by using Python's CSV parser",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"csv_path",
",",
"sep",
"=",
"None",
",",
"engine",
"=",
"'python'",
")",
"_from_keras_log_format",
"(",
"data",
",",
"output_dir_path",
"=",
"output_dir_path",
",",
"*",
"*",
"kwargs",
")"
] | Plot accuracy and loss from a Keras CSV log.
Args:
csv_path: The path to the CSV log with the actual data.
output_dir_path: The path to the directory where the resultings plots
should end up. | [
"Plot",
"accuracy",
"and",
"loss",
"from",
"a",
"Keras",
"CSV",
"log",
"."
] | python | train |
slightlynybbled/tk_tools | tk_tools/canvas.py | https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L597-L615 | def to_yellow(self, on: bool=False):
"""
Change the LED to yellow (on or off)
:param on: True or False
:return: None
"""
self._on = on
if on:
self._load_new(led_yellow_on)
if self._toggle_on_click:
self._canvas.bind('<Button-1>',
lambda x: self.to_yellow(False))
else:
self._load_new(led_yellow)
if self._toggle_on_click:
self._canvas.bind('<Button-1>',
lambda x: self.to_yellow(True)) | [
"def",
"to_yellow",
"(",
"self",
",",
"on",
":",
"bool",
"=",
"False",
")",
":",
"self",
".",
"_on",
"=",
"on",
"if",
"on",
":",
"self",
".",
"_load_new",
"(",
"led_yellow_on",
")",
"if",
"self",
".",
"_toggle_on_click",
":",
"self",
".",
"_canvas",
".",
"bind",
"(",
"'<Button-1>'",
",",
"lambda",
"x",
":",
"self",
".",
"to_yellow",
"(",
"False",
")",
")",
"else",
":",
"self",
".",
"_load_new",
"(",
"led_yellow",
")",
"if",
"self",
".",
"_toggle_on_click",
":",
"self",
".",
"_canvas",
".",
"bind",
"(",
"'<Button-1>'",
",",
"lambda",
"x",
":",
"self",
".",
"to_yellow",
"(",
"True",
")",
")"
] | Change the LED to yellow (on or off)
:param on: True or False
:return: None | [
"Change",
"the",
"LED",
"to",
"yellow",
"(",
"on",
"or",
"off",
")",
":",
"param",
"on",
":",
"True",
"or",
"False",
":",
"return",
":",
"None"
] | python | train |
cltl/KafNafParserPy | KafNafParserPy/feature_extractor/constituency.py | https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/feature_extractor/constituency.py#L137-L173 | def get_path_from_to(self,from_tid, to_tid):
"""
This function returns the path (in terms of phrase types) from one term to another
@type from_tid: string
@param from_tid: one term id
@type to_tid: string
@param to_tid: another term id
@rtype: list
@return: the path, list of phrase types
"""
shortest_subsumer = self.get_least_common_subsumer(from_tid, to_tid)
#print 'From:',self.naf.get_term(from_tid).get_lemma()
#print 'To:',self.naf.get_term(to_tid).get_lemma()
termid_from = self.terminal_for_term.get(from_tid)
termid_to = self.terminal_for_term.get(to_tid)
path_from = self.paths_for_terminal[termid_from][0]
path_to = self.paths_for_terminal[termid_to][0]
if shortest_subsumer is None:
return None
complete_path = []
for node in path_from:
complete_path.append(node)
if node == shortest_subsumer: break
begin=False
for node in path_to[-1::-1]:
if begin:
complete_path.append(node)
if node==shortest_subsumer:
begin=True
labels = [self.label_for_nonter[nonter] for nonter in complete_path]
return labels | [
"def",
"get_path_from_to",
"(",
"self",
",",
"from_tid",
",",
"to_tid",
")",
":",
"shortest_subsumer",
"=",
"self",
".",
"get_least_common_subsumer",
"(",
"from_tid",
",",
"to_tid",
")",
"#print 'From:',self.naf.get_term(from_tid).get_lemma()",
"#print 'To:',self.naf.get_term(to_tid).get_lemma()",
"termid_from",
"=",
"self",
".",
"terminal_for_term",
".",
"get",
"(",
"from_tid",
")",
"termid_to",
"=",
"self",
".",
"terminal_for_term",
".",
"get",
"(",
"to_tid",
")",
"path_from",
"=",
"self",
".",
"paths_for_terminal",
"[",
"termid_from",
"]",
"[",
"0",
"]",
"path_to",
"=",
"self",
".",
"paths_for_terminal",
"[",
"termid_to",
"]",
"[",
"0",
"]",
"if",
"shortest_subsumer",
"is",
"None",
":",
"return",
"None",
"complete_path",
"=",
"[",
"]",
"for",
"node",
"in",
"path_from",
":",
"complete_path",
".",
"append",
"(",
"node",
")",
"if",
"node",
"==",
"shortest_subsumer",
":",
"break",
"begin",
"=",
"False",
"for",
"node",
"in",
"path_to",
"[",
"-",
"1",
":",
":",
"-",
"1",
"]",
":",
"if",
"begin",
":",
"complete_path",
".",
"append",
"(",
"node",
")",
"if",
"node",
"==",
"shortest_subsumer",
":",
"begin",
"=",
"True",
"labels",
"=",
"[",
"self",
".",
"label_for_nonter",
"[",
"nonter",
"]",
"for",
"nonter",
"in",
"complete_path",
"]",
"return",
"labels"
] | This function returns the path (in terms of phrase types) from one term to another
@type from_tid: string
@param from_tid: one term id
@type to_tid: string
@param to_tid: another term id
@rtype: list
@return: the path, list of phrase types | [
"This",
"function",
"returns",
"the",
"path",
"(",
"in",
"terms",
"of",
"phrase",
"types",
")",
"from",
"one",
"term",
"to",
"another"
] | python | train |
OLC-Bioinformatics/sipprverse | genesippr_validation.py | https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/genesippr_validation.py#L47-L89 | def sequence_prep(self):
"""
Create metadata objects for all PacBio assembly FASTA files in the sequencepath.
Create individual subdirectories for each sample.
Relative symlink the original FASTA file to the appropriate subdirectory
"""
# Create a sorted list of all the FASTA files in the sequence path
strains = sorted(glob(os.path.join(self.fastapath, '*.fa*'.format(self.fastapath))))
for sample in strains:
# Create the object
metadata = MetadataObject()
# Set the sample name to be the file name of the sequence by removing the path and file extension
sample_name = os.path.splitext(os.path.basename(sample))[0]
if sample_name in self.strainset:
# Extract the OLNID from the dictionary using the SEQID
samplename = self.straindict[sample_name]
# samplename = sample_name
# Set and create the output directory
outputdir = os.path.join(self.path, samplename)
make_path(outputdir)
# Set the name of the JSON file
json_metadata = os.path.join(outputdir, '{name}.json'.format(name=samplename))
if not os.path.isfile(json_metadata):
# Create the name and output directory attributes
metadata.name = samplename
metadata.seqid = sample_name
metadata.outputdir = outputdir
metadata.jsonfile = json_metadata
# Set the name of the FASTA file to use in the analyses
metadata.bestassemblyfile = os.path.join(metadata.outputdir,
'{name}.fasta'.format(name=metadata.name))
# Symlink the original file to the output directory
relative_symlink(sample, outputdir, '{sn}.fasta'.format(sn=metadata.name))
# Associate the corresponding FASTQ files with the assembly
metadata.fastqfiles = sorted(glob(os.path.join(self.fastqpath,
'{name}*.gz'.format(name=metadata.name))))
metadata.forward_fastq, metadata.reverse_fastq = metadata.fastqfiles
# Write the object to file
self.write_json(metadata)
else:
metadata = self.read_json(json_metadata)
# Add the metadata object to the list of objects
self.metadata.append(metadata) | [
"def",
"sequence_prep",
"(",
"self",
")",
":",
"# Create a sorted list of all the FASTA files in the sequence path",
"strains",
"=",
"sorted",
"(",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"fastapath",
",",
"'*.fa*'",
".",
"format",
"(",
"self",
".",
"fastapath",
")",
")",
")",
")",
"for",
"sample",
"in",
"strains",
":",
"# Create the object",
"metadata",
"=",
"MetadataObject",
"(",
")",
"# Set the sample name to be the file name of the sequence by removing the path and file extension",
"sample_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"sample",
")",
")",
"[",
"0",
"]",
"if",
"sample_name",
"in",
"self",
".",
"strainset",
":",
"# Extract the OLNID from the dictionary using the SEQID",
"samplename",
"=",
"self",
".",
"straindict",
"[",
"sample_name",
"]",
"# samplename = sample_name",
"# Set and create the output directory",
"outputdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"samplename",
")",
"make_path",
"(",
"outputdir",
")",
"# Set the name of the JSON file",
"json_metadata",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outputdir",
",",
"'{name}.json'",
".",
"format",
"(",
"name",
"=",
"samplename",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"json_metadata",
")",
":",
"# Create the name and output directory attributes",
"metadata",
".",
"name",
"=",
"samplename",
"metadata",
".",
"seqid",
"=",
"sample_name",
"metadata",
".",
"outputdir",
"=",
"outputdir",
"metadata",
".",
"jsonfile",
"=",
"json_metadata",
"# Set the name of the FASTA file to use in the analyses",
"metadata",
".",
"bestassemblyfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"metadata",
".",
"outputdir",
",",
"'{name}.fasta'",
".",
"format",
"(",
"name",
"=",
"metadata",
".",
"name",
")",
")",
"# Symlink the original file to the output directory",
"relative_symlink",
"(",
"sample",
",",
"outputdir",
",",
"'{sn}.fasta'",
".",
"format",
"(",
"sn",
"=",
"metadata",
".",
"name",
")",
")",
"# Associate the corresponding FASTQ files with the assembly",
"metadata",
".",
"fastqfiles",
"=",
"sorted",
"(",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"fastqpath",
",",
"'{name}*.gz'",
".",
"format",
"(",
"name",
"=",
"metadata",
".",
"name",
")",
")",
")",
")",
"metadata",
".",
"forward_fastq",
",",
"metadata",
".",
"reverse_fastq",
"=",
"metadata",
".",
"fastqfiles",
"# Write the object to file",
"self",
".",
"write_json",
"(",
"metadata",
")",
"else",
":",
"metadata",
"=",
"self",
".",
"read_json",
"(",
"json_metadata",
")",
"# Add the metadata object to the list of objects",
"self",
".",
"metadata",
".",
"append",
"(",
"metadata",
")"
] | Create metadata objects for all PacBio assembly FASTA files in the sequencepath.
Create individual subdirectories for each sample.
Relative symlink the original FASTA file to the appropriate subdirectory | [
"Create",
"metadata",
"objects",
"for",
"all",
"PacBio",
"assembly",
"FASTA",
"files",
"in",
"the",
"sequencepath",
".",
"Create",
"individual",
"subdirectories",
"for",
"each",
"sample",
".",
"Relative",
"symlink",
"the",
"original",
"FASTA",
"file",
"to",
"the",
"appropriate",
"subdirectory"
] | python | train |
lalinsky/python-phoenixdb | phoenixdb/connection.py | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/connection.py#L75-L91 | def close(self):
"""Closes the connection.
No further operations are allowed, either on the connection or any
of its cursors, once the connection is closed.
If the connection is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
"""
if self._closed:
raise ProgrammingError('the connection is already closed')
for cursor_ref in self._cursors:
cursor = cursor_ref()
if cursor is not None and not cursor._closed:
cursor.close()
self._client.close_connection(self._id)
self._client.close()
self._closed = True | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_closed",
":",
"raise",
"ProgrammingError",
"(",
"'the connection is already closed'",
")",
"for",
"cursor_ref",
"in",
"self",
".",
"_cursors",
":",
"cursor",
"=",
"cursor_ref",
"(",
")",
"if",
"cursor",
"is",
"not",
"None",
"and",
"not",
"cursor",
".",
"_closed",
":",
"cursor",
".",
"close",
"(",
")",
"self",
".",
"_client",
".",
"close_connection",
"(",
"self",
".",
"_id",
")",
"self",
".",
"_client",
".",
"close",
"(",
")",
"self",
".",
"_closed",
"=",
"True"
] | Closes the connection.
No further operations are allowed, either on the connection or any
of its cursors, once the connection is closed.
If the connection is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block. | [
"Closes",
"the",
"connection",
".",
"No",
"further",
"operations",
"are",
"allowed",
"either",
"on",
"the",
"connection",
"or",
"any",
"of",
"its",
"cursors",
"once",
"the",
"connection",
"is",
"closed",
"."
] | python | train |
ajyoon/blur | blur/markov/graph.py | https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/markov/graph.py#L400-L521 | def from_string(cls,
source,
distance_weights=None,
merge_same_words=False,
group_marker_opening='<<',
group_marker_closing='>>'):
"""
Read a string and derive of ``Graph`` from it.
Words and punctuation marks are made into nodes.
Punctuation marks are split into separate nodes unless they fall
between other non-punctuation marks. ``'hello, world'`` is split
into ``'hello'``, ``','``, and ``'world'``, while ``'who's there?'``
is split into ``"who's"``, ``'there'``, and ``'?'``.
To group arbitrary characters together into a single node
(e.g. to make ``'hello, world!'``), surround the
text in question with ``group_marker_opening`` and
``group_marker_closing``. With the default value, this
would look like ``'<<hello, world!>>'``. It is recommended that
the group markers not appear anywhere in the source text where they
aren't meant to act as such to prevent unexpected behavior.
The exact regex for extracting nodes is defined by: ::
expression = r'{0}(.+){1}|([^\w\s]+)\B|([\S]+\b)'.format(
''.join('\\' + c for c in group_marker_opening),
''.join('\\' + c for c in group_marker_closing)
)
Args:
source (str): the string to derive the graph from
distance_weights (dict): dict of relative indices corresponding
with word weights. For example, if a dict entry is ``1: 1000``
this means that every word is linked to the word which follows
it with a weight of 1000. ``-4: 350`` would mean that every
word is linked to the 4th word behind it with a weight of 350.
A key of ``0`` refers to the weight words get
pointing to themselves. Keys pointing beyond the edge of the
word list will wrap around the list.
The default value for ``distance_weights`` is ``{1: 1}``.
This means that each word gets equal weight to whatever
word follows it. Consequently, if this default value is
used and ``merge_same_words`` is ``False``, the resulting
graph behavior will simply move linearly through the
source, wrapping at the end to the beginning.
merge_same_words (bool): if nodes which have the same value should
be merged or not.
group_marker_opening (str): The string used to mark the beginning
of word groups.
group_marker_closing (str): The string used to mark the end
of word groups. It is strongly recommended that this be
different than ``group_marker_opening`` to prevent unexpected
behavior with the regex pattern.
Returns: Graph
Example:
>>> graph = Graph.from_string('i have nothing to say and '
... 'i am saying it and that is poetry.')
>>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP
'using chance algorithmic in algorithmic art easier blur'
"""
if distance_weights is None:
distance_weights = {1: 1}
# Convert distance_weights to a sorted list of tuples
# To make output node list order more predictable
sorted_weights_list = sorted(distance_weights.items(),
key=lambda i: i[0])
# regex that matches:
# * Anything surrounded by
# group_marker_opening and group_marker_closing,
# * Groups of punctuation marks followed by whitespace
# * Any continuous group of non-whitespace characters
# followed by whitespace
expression = r'{0}(.+){1}|([^\w\s]+)\B|([\S]+\b)'.format(
''.join('\\' + c for c in group_marker_opening),
''.join('\\' + c for c in group_marker_closing)
)
matches = re.findall(expression, source)
# Un-tuple matches since we are only using groups to strip brackets
# Is there a better way to do this?
words = [next(t for t in match if t) for match in matches]
if merge_same_words:
# Ensure a 1:1 correspondence between words and nodes,
# and that all links point to these nodes as well
# Create nodes for every unique word
temp_node_list = []
for word in words:
if word not in (n.value for n in temp_node_list):
temp_node_list.append(Node(word))
# Loop through words, attaching links to nodes which correspond
# to the current word. Ensure links also point to valid
# corresponding nodes in the node list.
for i, word in enumerate(words):
matching_node = next(
(n for n in temp_node_list if n.value == word))
for key, weight in sorted_weights_list:
# Wrap the index of edge items
wrapped_index = (key + i) % len(words)
target_word = words[wrapped_index]
matching_target_node = next(
(n for n in temp_node_list
if n.value == target_word))
matching_node.add_link(matching_target_node, weight)
else:
# Create one node for every (not necessarily unique) word.
temp_node_list = [Node(word) for word in words]
for i, node in enumerate(temp_node_list):
for key, weight in sorted_weights_list:
# Wrap the index of edge items
wrapped_index = (key + i) % len(temp_node_list)
node.add_link(temp_node_list[wrapped_index], weight)
graph = cls()
graph.add_nodes(temp_node_list)
return graph | [
"def",
"from_string",
"(",
"cls",
",",
"source",
",",
"distance_weights",
"=",
"None",
",",
"merge_same_words",
"=",
"False",
",",
"group_marker_opening",
"=",
"'<<'",
",",
"group_marker_closing",
"=",
"'>>'",
")",
":",
"if",
"distance_weights",
"is",
"None",
":",
"distance_weights",
"=",
"{",
"1",
":",
"1",
"}",
"# Convert distance_weights to a sorted list of tuples",
"# To make output node list order more predictable",
"sorted_weights_list",
"=",
"sorted",
"(",
"distance_weights",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"i",
":",
"i",
"[",
"0",
"]",
")",
"# regex that matches:",
"# * Anything surrounded by",
"# group_marker_opening and group_marker_closing,",
"# * Groups of punctuation marks followed by whitespace",
"# * Any continuous group of non-whitespace characters",
"# followed by whitespace",
"expression",
"=",
"r'{0}(.+){1}|([^\\w\\s]+)\\B|([\\S]+\\b)'",
".",
"format",
"(",
"''",
".",
"join",
"(",
"'\\\\'",
"+",
"c",
"for",
"c",
"in",
"group_marker_opening",
")",
",",
"''",
".",
"join",
"(",
"'\\\\'",
"+",
"c",
"for",
"c",
"in",
"group_marker_closing",
")",
")",
"matches",
"=",
"re",
".",
"findall",
"(",
"expression",
",",
"source",
")",
"# Un-tuple matches since we are only using groups to strip brackets",
"# Is there a better way to do this?",
"words",
"=",
"[",
"next",
"(",
"t",
"for",
"t",
"in",
"match",
"if",
"t",
")",
"for",
"match",
"in",
"matches",
"]",
"if",
"merge_same_words",
":",
"# Ensure a 1:1 correspondence between words and nodes,",
"# and that all links point to these nodes as well",
"# Create nodes for every unique word",
"temp_node_list",
"=",
"[",
"]",
"for",
"word",
"in",
"words",
":",
"if",
"word",
"not",
"in",
"(",
"n",
".",
"value",
"for",
"n",
"in",
"temp_node_list",
")",
":",
"temp_node_list",
".",
"append",
"(",
"Node",
"(",
"word",
")",
")",
"# Loop through words, attaching links to nodes which correspond",
"# to the current word. Ensure links also point to valid",
"# corresponding nodes in the node list.",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"words",
")",
":",
"matching_node",
"=",
"next",
"(",
"(",
"n",
"for",
"n",
"in",
"temp_node_list",
"if",
"n",
".",
"value",
"==",
"word",
")",
")",
"for",
"key",
",",
"weight",
"in",
"sorted_weights_list",
":",
"# Wrap the index of edge items",
"wrapped_index",
"=",
"(",
"key",
"+",
"i",
")",
"%",
"len",
"(",
"words",
")",
"target_word",
"=",
"words",
"[",
"wrapped_index",
"]",
"matching_target_node",
"=",
"next",
"(",
"(",
"n",
"for",
"n",
"in",
"temp_node_list",
"if",
"n",
".",
"value",
"==",
"target_word",
")",
")",
"matching_node",
".",
"add_link",
"(",
"matching_target_node",
",",
"weight",
")",
"else",
":",
"# Create one node for every (not necessarily unique) word.",
"temp_node_list",
"=",
"[",
"Node",
"(",
"word",
")",
"for",
"word",
"in",
"words",
"]",
"for",
"i",
",",
"node",
"in",
"enumerate",
"(",
"temp_node_list",
")",
":",
"for",
"key",
",",
"weight",
"in",
"sorted_weights_list",
":",
"# Wrap the index of edge items",
"wrapped_index",
"=",
"(",
"key",
"+",
"i",
")",
"%",
"len",
"(",
"temp_node_list",
")",
"node",
".",
"add_link",
"(",
"temp_node_list",
"[",
"wrapped_index",
"]",
",",
"weight",
")",
"graph",
"=",
"cls",
"(",
")",
"graph",
".",
"add_nodes",
"(",
"temp_node_list",
")",
"return",
"graph"
] | Read a string and derive of ``Graph`` from it.
Words and punctuation marks are made into nodes.
Punctuation marks are split into separate nodes unless they fall
between other non-punctuation marks. ``'hello, world'`` is split
into ``'hello'``, ``','``, and ``'world'``, while ``'who's there?'``
is split into ``"who's"``, ``'there'``, and ``'?'``.
To group arbitrary characters together into a single node
(e.g. to make ``'hello, world!'``), surround the
text in question with ``group_marker_opening`` and
``group_marker_closing``. With the default value, this
would look like ``'<<hello, world!>>'``. It is recommended that
the group markers not appear anywhere in the source text where they
aren't meant to act as such to prevent unexpected behavior.
The exact regex for extracting nodes is defined by: ::
expression = r'{0}(.+){1}|([^\w\s]+)\B|([\S]+\b)'.format(
''.join('\\' + c for c in group_marker_opening),
''.join('\\' + c for c in group_marker_closing)
)
Args:
source (str): the string to derive the graph from
distance_weights (dict): dict of relative indices corresponding
with word weights. For example, if a dict entry is ``1: 1000``
this means that every word is linked to the word which follows
it with a weight of 1000. ``-4: 350`` would mean that every
word is linked to the 4th word behind it with a weight of 350.
A key of ``0`` refers to the weight words get
pointing to themselves. Keys pointing beyond the edge of the
word list will wrap around the list.
The default value for ``distance_weights`` is ``{1: 1}``.
This means that each word gets equal weight to whatever
word follows it. Consequently, if this default value is
used and ``merge_same_words`` is ``False``, the resulting
graph behavior will simply move linearly through the
source, wrapping at the end to the beginning.
merge_same_words (bool): if nodes which have the same value should
be merged or not.
group_marker_opening (str): The string used to mark the beginning
of word groups.
group_marker_closing (str): The string used to mark the end
of word groups. It is strongly recommended that this be
different than ``group_marker_opening`` to prevent unexpected
behavior with the regex pattern.
Returns: Graph
Example:
>>> graph = Graph.from_string('i have nothing to say and '
... 'i am saying it and that is poetry.')
>>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP
'using chance algorithmic in algorithmic art easier blur' | [
"Read",
"a",
"string",
"and",
"derive",
"of",
"Graph",
"from",
"it",
"."
] | python | train |
celiao/tmdbsimple | tmdbsimple/search.py | https://github.com/celiao/tmdbsimple/blob/ff17893110c99771d6398a62c35d36dd9735f4b9/tmdbsimple/search.py#L149-L164 | def keyword(self, **kwargs):
"""
Search for keywords by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('keyword')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | [
"def",
"keyword",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"self",
".",
"_get_path",
"(",
"'keyword'",
")",
"response",
"=",
"self",
".",
"_GET",
"(",
"path",
",",
"kwargs",
")",
"self",
".",
"_set_attrs_to_values",
"(",
"response",
")",
"return",
"response"
] | Search for keywords by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict respresentation of the JSON returned from the API. | [
"Search",
"for",
"keywords",
"by",
"name",
"."
] | python | test |
PonteIneptique/flask-github-proxy | flask_github_proxy/__init__.py | https://github.com/PonteIneptique/flask-github-proxy/blob/f0a60639342f7c0834360dc12a099bfc3a06d939/flask_github_proxy/__init__.py#L210-L240 | def put(self, file):
""" Create a new file on github
:param file: File to create
:return: File or self.ProxyError
"""
input_ = {
"message": file.logs,
"author": file.author.dict(),
"content": file.base64,
"branch": file.branch
}
uri = "{api}/repos/{origin}/contents/{path}".format(
api=self.github_api_url,
origin=self.origin,
path=file.path
)
data = self.request("PUT", uri, data=input_)
if data.status_code == 201:
file.pushed = True
return file
else:
decoded_data = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, (decoded_data, "message"),
step="put", context={
"uri": uri,
"params": input_
}
) | [
"def",
"put",
"(",
"self",
",",
"file",
")",
":",
"input_",
"=",
"{",
"\"message\"",
":",
"file",
".",
"logs",
",",
"\"author\"",
":",
"file",
".",
"author",
".",
"dict",
"(",
")",
",",
"\"content\"",
":",
"file",
".",
"base64",
",",
"\"branch\"",
":",
"file",
".",
"branch",
"}",
"uri",
"=",
"\"{api}/repos/{origin}/contents/{path}\"",
".",
"format",
"(",
"api",
"=",
"self",
".",
"github_api_url",
",",
"origin",
"=",
"self",
".",
"origin",
",",
"path",
"=",
"file",
".",
"path",
")",
"data",
"=",
"self",
".",
"request",
"(",
"\"PUT\"",
",",
"uri",
",",
"data",
"=",
"input_",
")",
"if",
"data",
".",
"status_code",
"==",
"201",
":",
"file",
".",
"pushed",
"=",
"True",
"return",
"file",
"else",
":",
"decoded_data",
"=",
"json",
".",
"loads",
"(",
"data",
".",
"content",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"return",
"self",
".",
"ProxyError",
"(",
"data",
".",
"status_code",
",",
"(",
"decoded_data",
",",
"\"message\"",
")",
",",
"step",
"=",
"\"put\"",
",",
"context",
"=",
"{",
"\"uri\"",
":",
"uri",
",",
"\"params\"",
":",
"input_",
"}",
")"
] | Create a new file on github
:param file: File to create
:return: File or self.ProxyError | [
"Create",
"a",
"new",
"file",
"on",
"github"
] | python | train |
feliphebueno/Rinzler | rinzler/core/route_mapping.py | https://github.com/feliphebueno/Rinzler/blob/7f6d5445b5662cba2e8938bb82c7f3ef94e5ded8/rinzler/core/route_mapping.py#L16-L22 | def get(self, route: str(), callback: object()):
"""
Binds a GET route with the given callback
:rtype: object
"""
self.__set_route('get', {route: callback})
return RouteMapping | [
"def",
"get",
"(",
"self",
",",
"route",
":",
"str",
"(",
")",
",",
"callback",
":",
"object",
"(",
")",
")",
":",
"self",
".",
"__set_route",
"(",
"'get'",
",",
"{",
"route",
":",
"callback",
"}",
")",
"return",
"RouteMapping"
] | Binds a GET route with the given callback
:rtype: object | [
"Binds",
"a",
"GET",
"route",
"with",
"the",
"given",
"callback",
":",
"rtype",
":",
"object"
] | python | train |
projecthamster/hamster | src/hamster/lib/graphics.py | https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/graphics.py#L2024-L2037 | def all_mouse_sprites(self):
"""Returns flat list of the sprite tree for simplified iteration"""
def all_recursive(sprites):
if not sprites:
return
for sprite in sprites:
if sprite.visible:
yield sprite
for child in all_recursive(sprite.get_mouse_sprites()):
yield child
return all_recursive(self.get_mouse_sprites()) | [
"def",
"all_mouse_sprites",
"(",
"self",
")",
":",
"def",
"all_recursive",
"(",
"sprites",
")",
":",
"if",
"not",
"sprites",
":",
"return",
"for",
"sprite",
"in",
"sprites",
":",
"if",
"sprite",
".",
"visible",
":",
"yield",
"sprite",
"for",
"child",
"in",
"all_recursive",
"(",
"sprite",
".",
"get_mouse_sprites",
"(",
")",
")",
":",
"yield",
"child",
"return",
"all_recursive",
"(",
"self",
".",
"get_mouse_sprites",
"(",
")",
")"
] | Returns flat list of the sprite tree for simplified iteration | [
"Returns",
"flat",
"list",
"of",
"the",
"sprite",
"tree",
"for",
"simplified",
"iteration"
] | python | train |
sorgerlab/indra | indra/sources/biopax/pathway_commons_client.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/pathway_commons_client.py#L123-L153 | def owl_to_model(fname):
"""Return a BioPAX model object from an OWL file.
Parameters
----------
fname : str
The name of the OWL file containing the model.
Returns
-------
biopax_model : org.biopax.paxtools.model.Model
A BioPAX model object (java object).
"""
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
try:
file_is = autoclass('java.io.FileInputStream')(fname)
except JavaException:
logger.error('Could not open data file %s' % fname)
return
try:
biopax_model = io.convertFromOWL(file_is)
except JavaException as e:
logger.error('Could not convert data file %s to BioPax model' % fname)
logger.error(e)
return
file_is.close()
return biopax_model | [
"def",
"owl_to_model",
"(",
"fname",
")",
":",
"io_class",
"=",
"autoclass",
"(",
"'org.biopax.paxtools.io.SimpleIOHandler'",
")",
"io",
"=",
"io_class",
"(",
"autoclass",
"(",
"'org.biopax.paxtools.model.BioPAXLevel'",
")",
".",
"L3",
")",
"try",
":",
"file_is",
"=",
"autoclass",
"(",
"'java.io.FileInputStream'",
")",
"(",
"fname",
")",
"except",
"JavaException",
":",
"logger",
".",
"error",
"(",
"'Could not open data file %s'",
"%",
"fname",
")",
"return",
"try",
":",
"biopax_model",
"=",
"io",
".",
"convertFromOWL",
"(",
"file_is",
")",
"except",
"JavaException",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Could not convert data file %s to BioPax model'",
"%",
"fname",
")",
"logger",
".",
"error",
"(",
"e",
")",
"return",
"file_is",
".",
"close",
"(",
")",
"return",
"biopax_model"
] | Return a BioPAX model object from an OWL file.
Parameters
----------
fname : str
The name of the OWL file containing the model.
Returns
-------
biopax_model : org.biopax.paxtools.model.Model
A BioPAX model object (java object). | [
"Return",
"a",
"BioPAX",
"model",
"object",
"from",
"an",
"OWL",
"file",
"."
] | python | train |
oscarbranson/latools | latools/D_obj.py | https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L653-L702 | def filter_gradient_threshold(self, analyte, win, threshold, recalc=True):
"""
Apply gradient threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : str
Description of `analyte`.
threshold : float
Description of `threshold`.
win : int
Window used to calculate gradients (n points)
recalc : bool
Whether or not to re-calculate the gradients.
Returns
-------
None
"""
params = locals()
del(params['self'])
# calculate absolute gradient
if recalc or not self.grads_calced:
self.grads = calc_grads(self.Time, self.focus,
[analyte], win)
self.grads_calced = True
below, above = filters.threshold(abs(self.grads[analyte]), threshold)
setn = self.filt.maxset + 1
self.filt.add(analyte + '_gthresh_below',
below,
'Keep gradient below {:.3e} '.format(threshold) + analyte,
params, setn=setn)
self.filt.add(analyte + '_gthresh_above',
above,
'Keep gradient above {:.3e} '.format(threshold) + analyte,
params, setn=setn) | [
"def",
"filter_gradient_threshold",
"(",
"self",
",",
"analyte",
",",
"win",
",",
"threshold",
",",
"recalc",
"=",
"True",
")",
":",
"params",
"=",
"locals",
"(",
")",
"del",
"(",
"params",
"[",
"'self'",
"]",
")",
"# calculate absolute gradient",
"if",
"recalc",
"or",
"not",
"self",
".",
"grads_calced",
":",
"self",
".",
"grads",
"=",
"calc_grads",
"(",
"self",
".",
"Time",
",",
"self",
".",
"focus",
",",
"[",
"analyte",
"]",
",",
"win",
")",
"self",
".",
"grads_calced",
"=",
"True",
"below",
",",
"above",
"=",
"filters",
".",
"threshold",
"(",
"abs",
"(",
"self",
".",
"grads",
"[",
"analyte",
"]",
")",
",",
"threshold",
")",
"setn",
"=",
"self",
".",
"filt",
".",
"maxset",
"+",
"1",
"self",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_gthresh_below'",
",",
"below",
",",
"'Keep gradient below {:.3e} '",
".",
"format",
"(",
"threshold",
")",
"+",
"analyte",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"self",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_gthresh_above'",
",",
"above",
",",
"'Keep gradient above {:.3e} '",
".",
"format",
"(",
"threshold",
")",
"+",
"analyte",
",",
"params",
",",
"setn",
"=",
"setn",
")"
] | Apply gradient threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : str
Description of `analyte`.
threshold : float
Description of `threshold`.
win : int
Window used to calculate gradients (n points)
recalc : bool
Whether or not to re-calculate the gradients.
Returns
-------
None | [
"Apply",
"gradient",
"threshold",
"filter",
"."
] | python | test |
ejeschke/ginga | ginga/ImageView.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L1263-L1296 | def redraw_now(self, whence=0):
"""Redraw the displayed image.
Parameters
----------
whence
See :meth:`get_rgb_object`.
"""
try:
time_start = time.time()
self.redraw_data(whence=whence)
# finally update the window drawable from the offscreen surface
self.update_image()
time_done = time.time()
time_delta = time_start - self.time_last_redraw
time_elapsed = time_done - time_start
self.time_last_redraw = time_done
self.logger.debug(
"widget '%s' redraw (whence=%d) delta=%.4f elapsed=%.4f sec" % (
self.name, whence, time_delta, time_elapsed))
except Exception as e:
self.logger.error("Error redrawing image: %s" % (str(e)))
try:
# log traceback, if possible
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str) | [
"def",
"redraw_now",
"(",
"self",
",",
"whence",
"=",
"0",
")",
":",
"try",
":",
"time_start",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"redraw_data",
"(",
"whence",
"=",
"whence",
")",
"# finally update the window drawable from the offscreen surface",
"self",
".",
"update_image",
"(",
")",
"time_done",
"=",
"time",
".",
"time",
"(",
")",
"time_delta",
"=",
"time_start",
"-",
"self",
".",
"time_last_redraw",
"time_elapsed",
"=",
"time_done",
"-",
"time_start",
"self",
".",
"time_last_redraw",
"=",
"time_done",
"self",
".",
"logger",
".",
"debug",
"(",
"\"widget '%s' redraw (whence=%d) delta=%.4f elapsed=%.4f sec\"",
"%",
"(",
"self",
".",
"name",
",",
"whence",
",",
"time_delta",
",",
"time_elapsed",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"Error redrawing image: %s\"",
"%",
"(",
"str",
"(",
"e",
")",
")",
")",
"try",
":",
"# log traceback, if possible",
"(",
"type",
",",
"value",
",",
"tb",
")",
"=",
"sys",
".",
"exc_info",
"(",
")",
"tb_str",
"=",
"\"\"",
".",
"join",
"(",
"traceback",
".",
"format_tb",
"(",
"tb",
")",
")",
"self",
".",
"logger",
".",
"error",
"(",
"\"Traceback:\\n%s\"",
"%",
"(",
"tb_str",
")",
")",
"except",
"Exception",
":",
"tb_str",
"=",
"\"Traceback information unavailable.\"",
"self",
".",
"logger",
".",
"error",
"(",
"tb_str",
")"
] | Redraw the displayed image.
Parameters
----------
whence
See :meth:`get_rgb_object`. | [
"Redraw",
"the",
"displayed",
"image",
"."
] | python | train |
sdispater/orator | orator/schema/grammars/grammar.py | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/schema/grammars/grammar.py#L18-L41 | def compile_rename_column(self, blueprint, command, connection):
"""
Compile a rename column command.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list
"""
schema = connection.get_schema_manager()
table = self.get_table_prefix() + blueprint.get_table()
column = connection.get_column(table, command.from_)
table_diff = self._get_renamed_diff(blueprint, command, column, schema)
return schema.get_database_platform().get_alter_table_sql(table_diff) | [
"def",
"compile_rename_column",
"(",
"self",
",",
"blueprint",
",",
"command",
",",
"connection",
")",
":",
"schema",
"=",
"connection",
".",
"get_schema_manager",
"(",
")",
"table",
"=",
"self",
".",
"get_table_prefix",
"(",
")",
"+",
"blueprint",
".",
"get_table",
"(",
")",
"column",
"=",
"connection",
".",
"get_column",
"(",
"table",
",",
"command",
".",
"from_",
")",
"table_diff",
"=",
"self",
".",
"_get_renamed_diff",
"(",
"blueprint",
",",
"command",
",",
"column",
",",
"schema",
")",
"return",
"schema",
".",
"get_database_platform",
"(",
")",
".",
"get_alter_table_sql",
"(",
"table_diff",
")"
] | Compile a rename column command.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list | [
"Compile",
"a",
"rename",
"column",
"command",
"."
] | python | train |
odlgroup/odl | odl/operator/oputils.py | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/oputils.py#L24-L121 | def matrix_representation(op):
"""Return a matrix representation of a linear operator.
Parameters
----------
op : `Operator`
The linear operator of which one wants a matrix representation.
If the domain or range is a `ProductSpace`, it must be a power-space.
Returns
-------
matrix : `numpy.ndarray`
The matrix representation of the operator.
The shape will be ``op.domain.shape + op.range.shape`` and the dtype
is the promoted (greatest) dtype of the domain and range.
Examples
--------
Approximate a matrix on its own:
>>> mat = np.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> op = odl.MatrixOperator(mat)
>>> matrix_representation(op)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
It also works with `ProductSpace`'s and higher dimensional `TensorSpace`'s.
In this case, the returned "matrix" will also be higher dimensional:
>>> space = odl.uniform_discr([0, 0], [2, 2], (2, 2))
>>> grad = odl.Gradient(space)
>>> tensor = odl.matrix_representation(grad)
>>> tensor.shape == (2, 2, 2, 2, 2)
True
Since the "matrix" is now higher dimensional, we need to use e.g.
`numpy.tensordot` if we want to compute with the matrix representation:
>>> x = space.element(lambda x: x[0] ** 2 + 2 * x[1] ** 2)
>>> grad(x)
ProductSpace(uniform_discr([ 0., 0.], [ 2., 2.], (2, 2)), 2).element([
<BLANKLINE>
[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]
])
>>> np.tensordot(tensor, x, axes=grad.domain.ndim)
array([[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]])
Notes
----------
The algorithm works by letting the operator act on all unit vectors, and
stacking the output as a matrix.
"""
if not op.is_linear:
raise ValueError('the operator is not linear')
if not (isinstance(op.domain, TensorSpace) or
(isinstance(op.domain, ProductSpace) and
op.domain.is_power_space and
all(isinstance(spc, TensorSpace) for spc in op.domain))):
raise TypeError('operator domain {!r} is neither `TensorSpace` '
'nor `ProductSpace` with only equal `TensorSpace` '
'components'.format(op.domain))
if not (isinstance(op.range, TensorSpace) or
(isinstance(op.range, ProductSpace) and
op.range.is_power_space and
all(isinstance(spc, TensorSpace) for spc in op.range))):
raise TypeError('operator range {!r} is neither `TensorSpace` '
'nor `ProductSpace` with only equal `TensorSpace` '
'components'.format(op.range))
# Generate the matrix
dtype = np.promote_types(op.domain.dtype, op.range.dtype)
matrix = np.zeros(op.range.shape + op.domain.shape, dtype=dtype)
tmp_ran = op.range.element() # Store for reuse in loop
tmp_dom = op.domain.zero() # Store for reuse in loop
for j in nd_iterator(op.domain.shape):
tmp_dom[j] = 1.0
op(tmp_dom, out=tmp_ran)
matrix[(Ellipsis,) + j] = tmp_ran.asarray()
tmp_dom[j] = 0.0
return matrix | [
"def",
"matrix_representation",
"(",
"op",
")",
":",
"if",
"not",
"op",
".",
"is_linear",
":",
"raise",
"ValueError",
"(",
"'the operator is not linear'",
")",
"if",
"not",
"(",
"isinstance",
"(",
"op",
".",
"domain",
",",
"TensorSpace",
")",
"or",
"(",
"isinstance",
"(",
"op",
".",
"domain",
",",
"ProductSpace",
")",
"and",
"op",
".",
"domain",
".",
"is_power_space",
"and",
"all",
"(",
"isinstance",
"(",
"spc",
",",
"TensorSpace",
")",
"for",
"spc",
"in",
"op",
".",
"domain",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'operator domain {!r} is neither `TensorSpace` '",
"'nor `ProductSpace` with only equal `TensorSpace` '",
"'components'",
".",
"format",
"(",
"op",
".",
"domain",
")",
")",
"if",
"not",
"(",
"isinstance",
"(",
"op",
".",
"range",
",",
"TensorSpace",
")",
"or",
"(",
"isinstance",
"(",
"op",
".",
"range",
",",
"ProductSpace",
")",
"and",
"op",
".",
"range",
".",
"is_power_space",
"and",
"all",
"(",
"isinstance",
"(",
"spc",
",",
"TensorSpace",
")",
"for",
"spc",
"in",
"op",
".",
"range",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'operator range {!r} is neither `TensorSpace` '",
"'nor `ProductSpace` with only equal `TensorSpace` '",
"'components'",
".",
"format",
"(",
"op",
".",
"range",
")",
")",
"# Generate the matrix",
"dtype",
"=",
"np",
".",
"promote_types",
"(",
"op",
".",
"domain",
".",
"dtype",
",",
"op",
".",
"range",
".",
"dtype",
")",
"matrix",
"=",
"np",
".",
"zeros",
"(",
"op",
".",
"range",
".",
"shape",
"+",
"op",
".",
"domain",
".",
"shape",
",",
"dtype",
"=",
"dtype",
")",
"tmp_ran",
"=",
"op",
".",
"range",
".",
"element",
"(",
")",
"# Store for reuse in loop",
"tmp_dom",
"=",
"op",
".",
"domain",
".",
"zero",
"(",
")",
"# Store for reuse in loop",
"for",
"j",
"in",
"nd_iterator",
"(",
"op",
".",
"domain",
".",
"shape",
")",
":",
"tmp_dom",
"[",
"j",
"]",
"=",
"1.0",
"op",
"(",
"tmp_dom",
",",
"out",
"=",
"tmp_ran",
")",
"matrix",
"[",
"(",
"Ellipsis",
",",
")",
"+",
"j",
"]",
"=",
"tmp_ran",
".",
"asarray",
"(",
")",
"tmp_dom",
"[",
"j",
"]",
"=",
"0.0",
"return",
"matrix"
] | Return a matrix representation of a linear operator.
Parameters
----------
op : `Operator`
The linear operator of which one wants a matrix representation.
If the domain or range is a `ProductSpace`, it must be a power-space.
Returns
-------
matrix : `numpy.ndarray`
The matrix representation of the operator.
The shape will be ``op.domain.shape + op.range.shape`` and the dtype
is the promoted (greatest) dtype of the domain and range.
Examples
--------
Approximate a matrix on its own:
>>> mat = np.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> op = odl.MatrixOperator(mat)
>>> matrix_representation(op)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
It also works with `ProductSpace`'s and higher dimensional `TensorSpace`'s.
In this case, the returned "matrix" will also be higher dimensional:
>>> space = odl.uniform_discr([0, 0], [2, 2], (2, 2))
>>> grad = odl.Gradient(space)
>>> tensor = odl.matrix_representation(grad)
>>> tensor.shape == (2, 2, 2, 2, 2)
True
Since the "matrix" is now higher dimensional, we need to use e.g.
`numpy.tensordot` if we want to compute with the matrix representation:
>>> x = space.element(lambda x: x[0] ** 2 + 2 * x[1] ** 2)
>>> grad(x)
ProductSpace(uniform_discr([ 0., 0.], [ 2., 2.], (2, 2)), 2).element([
<BLANKLINE>
[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]
])
>>> np.tensordot(tensor, x, axes=grad.domain.ndim)
array([[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]])
Notes
----------
The algorithm works by letting the operator act on all unit vectors, and
stacking the output as a matrix. | [
"Return",
"a",
"matrix",
"representation",
"of",
"a",
"linear",
"operator",
"."
] | python | train |
Qiskit/qiskit-terra | qiskit/qasm/node/idlist.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/qasm/node/idlist.py#L27-L30 | def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
return ",".join([self.children[j].qasm(prec)
for j in range(self.size())]) | [
"def",
"qasm",
"(",
"self",
",",
"prec",
"=",
"15",
")",
":",
"return",
"\",\"",
".",
"join",
"(",
"[",
"self",
".",
"children",
"[",
"j",
"]",
".",
"qasm",
"(",
"prec",
")",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"size",
"(",
")",
")",
"]",
")"
] | Return the corresponding OPENQASM string. | [
"Return",
"the",
"corresponding",
"OPENQASM",
"string",
"."
] | python | test |
portfors-lab/sparkle | sparkle/gui/stim/stimulusview.py | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/stimulusview.py#L178-L193 | def visualRectRC(self, row, column):
"""The rectangle for the bounds of the item at *row*, *column*
:param row: row of the item
:type row: int
:param column: column of the item
:type column: int
:returns: :qtdoc:`QRect` -- rectangle of the borders of the item
"""
rect = self._rects[row][column]
if rect.isValid():
return QtCore.QRect(rect.x() - self.horizontalScrollBar().value(),
rect.y() - self.verticalScrollBar().value(),
rect.width(), rect.height())
else:
return rect | [
"def",
"visualRectRC",
"(",
"self",
",",
"row",
",",
"column",
")",
":",
"rect",
"=",
"self",
".",
"_rects",
"[",
"row",
"]",
"[",
"column",
"]",
"if",
"rect",
".",
"isValid",
"(",
")",
":",
"return",
"QtCore",
".",
"QRect",
"(",
"rect",
".",
"x",
"(",
")",
"-",
"self",
".",
"horizontalScrollBar",
"(",
")",
".",
"value",
"(",
")",
",",
"rect",
".",
"y",
"(",
")",
"-",
"self",
".",
"verticalScrollBar",
"(",
")",
".",
"value",
"(",
")",
",",
"rect",
".",
"width",
"(",
")",
",",
"rect",
".",
"height",
"(",
")",
")",
"else",
":",
"return",
"rect"
] | The rectangle for the bounds of the item at *row*, *column*
:param row: row of the item
:type row: int
:param column: column of the item
:type column: int
:returns: :qtdoc:`QRect` -- rectangle of the borders of the item | [
"The",
"rectangle",
"for",
"the",
"bounds",
"of",
"the",
"item",
"at",
"*",
"row",
"*",
"*",
"column",
"*"
] | python | train |
bububa/pyTOP | pyTOP/packages/requests/sessions.py | https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/packages/requests/sessions.py#L229-L237 | def post(self, url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('post', url, data=data, **kwargs) | [
"def",
"post",
"(",
"self",
",",
"url",
",",
"data",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"request",
"(",
"'post'",
",",
"url",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
] | Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes. | [
"Sends",
"a",
"POST",
"request",
".",
"Returns",
":",
"class",
":",
"Response",
"object",
"."
] | python | train |
Britefury/batchup | batchup/data_source.py | https://github.com/Britefury/batchup/blob/3fc2304e629f813c05f9e7a85a18acef3581a536/batchup/data_source.py#L1800-L1849 | def coerce_data_source(x):
"""
Helper function to coerce an object into a data source, selecting the
appropriate data source class for the given object. If `x` is already
a data source it is returned as is.
Parameters
----------
x: any
The object to coerce. If `x` is a data source, it is returned as is.
If it is a list or tuple of array-like objects they will be wrapped
in an `ArrayDataSource` that will be returned. If `x` is an iterator
it will be wrapped in an `IteratorDataSource`. If it is a callable
it will be wrapped in a `CallableDataSource`.
Returns
-------
`x` coerced into a data source
Raises
------
`TypeError` if `x` is not a data souce, a list or tuple of array-like
objects, an iterator or a callable.
"""
if isinstance(x, AbstractDataSource):
return x
elif isinstance(x, (list, tuple)):
# Sequence of array-likes
items = []
for item in x:
if _is_array_like(item):
items.append(item)
else:
raise TypeError(
'Cannot convert x to a data source; x is a sequence and '
'one of the elements is not an array-like object, rather '
'a {}'.format(type(item)))
if len(items) == 0:
raise ValueError('Cannot convert x to a data source; x is an '
'empty sequence')
return ArrayDataSource(items)
elif isinstance(x, collections.Iterator):
return IteratorDataSource(x)
elif callable(x):
return CallableDataSource(x)
else:
raise TypeError('Cannot convert x to a data source; can only handle '
'iterators, callables, non-empty sequences of '
'array-like objects; cannot '
'handle {}'.format(type(x))) | [
"def",
"coerce_data_source",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"AbstractDataSource",
")",
":",
"return",
"x",
"elif",
"isinstance",
"(",
"x",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# Sequence of array-likes",
"items",
"=",
"[",
"]",
"for",
"item",
"in",
"x",
":",
"if",
"_is_array_like",
"(",
"item",
")",
":",
"items",
".",
"append",
"(",
"item",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Cannot convert x to a data source; x is a sequence and '",
"'one of the elements is not an array-like object, rather '",
"'a {}'",
".",
"format",
"(",
"type",
"(",
"item",
")",
")",
")",
"if",
"len",
"(",
"items",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'Cannot convert x to a data source; x is an '",
"'empty sequence'",
")",
"return",
"ArrayDataSource",
"(",
"items",
")",
"elif",
"isinstance",
"(",
"x",
",",
"collections",
".",
"Iterator",
")",
":",
"return",
"IteratorDataSource",
"(",
"x",
")",
"elif",
"callable",
"(",
"x",
")",
":",
"return",
"CallableDataSource",
"(",
"x",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Cannot convert x to a data source; can only handle '",
"'iterators, callables, non-empty sequences of '",
"'array-like objects; cannot '",
"'handle {}'",
".",
"format",
"(",
"type",
"(",
"x",
")",
")",
")"
] | Helper function to coerce an object into a data source, selecting the
appropriate data source class for the given object. If `x` is already
a data source it is returned as is.
Parameters
----------
x: any
The object to coerce. If `x` is a data source, it is returned as is.
If it is a list or tuple of array-like objects they will be wrapped
in an `ArrayDataSource` that will be returned. If `x` is an iterator
it will be wrapped in an `IteratorDataSource`. If it is a callable
it will be wrapped in a `CallableDataSource`.
Returns
-------
`x` coerced into a data source
Raises
------
`TypeError` if `x` is not a data souce, a list or tuple of array-like
objects, an iterator or a callable. | [
"Helper",
"function",
"to",
"coerce",
"an",
"object",
"into",
"a",
"data",
"source",
"selecting",
"the",
"appropriate",
"data",
"source",
"class",
"for",
"the",
"given",
"object",
".",
"If",
"x",
"is",
"already",
"a",
"data",
"source",
"it",
"is",
"returned",
"as",
"is",
"."
] | python | train |
ssato/python-anyconfig | src/anyconfig/backend/base.py | https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/base.py#L555-L574 | def load_with_fn(load_fn, content_or_strm, container, allow_primitives=False,
**options):
"""
Load data from given string or stream 'content_or_strm'.
:param load_fn: Callable to load data
:param content_or_strm: data content or stream provides it
:param container: callble to make a container object
:param allow_primitives:
True if the parser.load* may return objects of primitive data types
other than mapping types such like JSON parser
:param options: keyword options passed to 'load_fn'
:return: container object holding data
"""
ret = load_fn(content_or_strm, **options)
if anyconfig.utils.is_dict_like(ret):
return container() if (ret is None or not ret) else container(ret)
return ret if allow_primitives else container(ret) | [
"def",
"load_with_fn",
"(",
"load_fn",
",",
"content_or_strm",
",",
"container",
",",
"allow_primitives",
"=",
"False",
",",
"*",
"*",
"options",
")",
":",
"ret",
"=",
"load_fn",
"(",
"content_or_strm",
",",
"*",
"*",
"options",
")",
"if",
"anyconfig",
".",
"utils",
".",
"is_dict_like",
"(",
"ret",
")",
":",
"return",
"container",
"(",
")",
"if",
"(",
"ret",
"is",
"None",
"or",
"not",
"ret",
")",
"else",
"container",
"(",
"ret",
")",
"return",
"ret",
"if",
"allow_primitives",
"else",
"container",
"(",
"ret",
")"
] | Load data from given string or stream 'content_or_strm'.
:param load_fn: Callable to load data
:param content_or_strm: data content or stream provides it
:param container: callble to make a container object
:param allow_primitives:
True if the parser.load* may return objects of primitive data types
other than mapping types such like JSON parser
:param options: keyword options passed to 'load_fn'
:return: container object holding data | [
"Load",
"data",
"from",
"given",
"string",
"or",
"stream",
"content_or_strm",
"."
] | python | train |
rene-aguirre/pywinusb | pywinusb/hid/wnd_hook_mixin.py | https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/wnd_hook_mixin.py#L73-L78 | def hook_wnd_proc(self):
"""Attach to OS Window message handler"""
self.__local_wnd_proc_wrapped = WndProcType(self.local_wnd_proc)
self.__old_wnd_proc = SetWindowLong(self.__local_win_handle,
GWL_WNDPROC,
self.__local_wnd_proc_wrapped) | [
"def",
"hook_wnd_proc",
"(",
"self",
")",
":",
"self",
".",
"__local_wnd_proc_wrapped",
"=",
"WndProcType",
"(",
"self",
".",
"local_wnd_proc",
")",
"self",
".",
"__old_wnd_proc",
"=",
"SetWindowLong",
"(",
"self",
".",
"__local_win_handle",
",",
"GWL_WNDPROC",
",",
"self",
".",
"__local_wnd_proc_wrapped",
")"
] | Attach to OS Window message handler | [
"Attach",
"to",
"OS",
"Window",
"message",
"handler"
] | python | train |
bokeh/bokeh | bokeh/server/callbacks.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/server/callbacks.py#L171-L173 | def _copy_with_changed_callback(self, new_callback):
''' Dev API used to wrap the callback with decorators. '''
return TimeoutCallback(self._document, new_callback, self._timeout, self._id) | [
"def",
"_copy_with_changed_callback",
"(",
"self",
",",
"new_callback",
")",
":",
"return",
"TimeoutCallback",
"(",
"self",
".",
"_document",
",",
"new_callback",
",",
"self",
".",
"_timeout",
",",
"self",
".",
"_id",
")"
] | Dev API used to wrap the callback with decorators. | [
"Dev",
"API",
"used",
"to",
"wrap",
"the",
"callback",
"with",
"decorators",
"."
] | python | train |
log2timeline/dfvfs | dfvfs/file_io/tsk_partition_file_io.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/tsk_partition_file_io.py#L29-L78 | def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec:
raise ValueError('Missing path specification.')
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
self._file_system = resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=self._resolver_context)
tsk_volume = self._file_system.GetTSKVolume()
tsk_vs, _ = tsk_partition.GetTSKVsPartByPathSpec(tsk_volume, path_spec)
if tsk_vs is None:
raise errors.PathSpecError(
'Unable to retrieve TSK volume system part from path '
'specification.')
range_offset = tsk_partition.TSKVsPartGetStartSector(tsk_vs)
range_size = tsk_partition.TSKVsPartGetNumberOfSectors(tsk_vs)
if range_offset is None or range_size is None:
raise errors.PathSpecError(
'Unable to retrieve TSK volume system part data range from path '
'specification.')
bytes_per_sector = tsk_partition.TSKVolumeGetBytesPerSector(tsk_volume)
range_offset *= bytes_per_sector
range_size *= bytes_per_sector
self.SetRange(range_offset, range_size)
self._file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
self._file_object_set_in_init = True
# pylint: disable=protected-access
super(TSKPartitionFile, self)._Open(path_spec=path_spec, mode=mode) | [
"def",
"_Open",
"(",
"self",
",",
"path_spec",
"=",
"None",
",",
"mode",
"=",
"'rb'",
")",
":",
"if",
"not",
"path_spec",
":",
"raise",
"ValueError",
"(",
"'Missing path specification.'",
")",
"if",
"not",
"path_spec",
".",
"HasParent",
"(",
")",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Unsupported path specification without parent.'",
")",
"self",
".",
"_file_system",
"=",
"resolver",
".",
"Resolver",
".",
"OpenFileSystem",
"(",
"path_spec",
",",
"resolver_context",
"=",
"self",
".",
"_resolver_context",
")",
"tsk_volume",
"=",
"self",
".",
"_file_system",
".",
"GetTSKVolume",
"(",
")",
"tsk_vs",
",",
"_",
"=",
"tsk_partition",
".",
"GetTSKVsPartByPathSpec",
"(",
"tsk_volume",
",",
"path_spec",
")",
"if",
"tsk_vs",
"is",
"None",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Unable to retrieve TSK volume system part from path '",
"'specification.'",
")",
"range_offset",
"=",
"tsk_partition",
".",
"TSKVsPartGetStartSector",
"(",
"tsk_vs",
")",
"range_size",
"=",
"tsk_partition",
".",
"TSKVsPartGetNumberOfSectors",
"(",
"tsk_vs",
")",
"if",
"range_offset",
"is",
"None",
"or",
"range_size",
"is",
"None",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Unable to retrieve TSK volume system part data range from path '",
"'specification.'",
")",
"bytes_per_sector",
"=",
"tsk_partition",
".",
"TSKVolumeGetBytesPerSector",
"(",
"tsk_volume",
")",
"range_offset",
"*=",
"bytes_per_sector",
"range_size",
"*=",
"bytes_per_sector",
"self",
".",
"SetRange",
"(",
"range_offset",
",",
"range_size",
")",
"self",
".",
"_file_object",
"=",
"resolver",
".",
"Resolver",
".",
"OpenFileObject",
"(",
"path_spec",
".",
"parent",
",",
"resolver_context",
"=",
"self",
".",
"_resolver_context",
")",
"self",
".",
"_file_object_set_in_init",
"=",
"True",
"# pylint: disable=protected-access",
"super",
"(",
"TSKPartitionFile",
",",
"self",
")",
".",
"_Open",
"(",
"path_spec",
"=",
"path_spec",
",",
"mode",
"=",
"mode",
")"
] | Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | [
"Opens",
"the",
"file",
"-",
"like",
"object",
"defined",
"by",
"path",
"specification",
"."
] | python | train |
CalebBell/fluids | fluids/two_phase.py | https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/two_phase.py#L1935-L2080 | def Kim_Mudawar(m, x, rhol, rhog, mul, mug, sigma, D, L=1):
r'''Calculates two-phase pressure drop with the Kim and Mudawar (2012)
correlation as in [1]_, also presented in [2]_.
.. math::
\Delta P = \Delta P_{l} \phi_{l}^2
.. math::
\phi_l^2 = 1 + \frac{C}{X} + \frac{1}{X^2}
.. math::
X^2 = \frac{\Delta P_l}{\Delta P_g}
For turbulent liquid, turbulent gas:
.. math::
C = 0.39Re_{lo}^{0.03} Su_{go}^{0.10}\left(\frac{\rho_l}{\rho_g}
\right)^{0.35}
For turbulent liquid, laminar gas:
.. math::
C = 8.7\times 10^{-4} Re_{lo}^{0.17} Su_{go}^{0.50}\left(\frac{\rho_l}
{\rho_g}\right)^{0.14}
For laminar liquid, turbulent gas:
.. math::
C = 0.0015 Re_{lo}^{0.59} Su_{go}^{0.19}\left(\frac{\rho_l}{\rho_g}
\right)^{0.36}
For laminar liquid, laminar gas:
.. math::
C = 3.5\times 10^{-5} Re_{lo}^{0.44} Su_{go}^{0.50}\left(\frac{\rho_l}
{\rho_g}\right)^{0.48}
This model has its own friction factor calculations, to be consistent with
its Reynolds number transition. As their model was regressed with these
equations, more error is obtained when using any other friction factor
calculation. The laminar equation 64/Re is used up to Re=2000, then the
Blasius equation with a coefficient of 0.316, and above Re = 20000,
.. math::
f_d = \frac{0.184}{Re^{0.2}}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
sigma : float
Surface tension, [N/m]
D : float
Diameter of pipe, [m]
L : float, optional
Length of pipe, [m]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
The critical Reynolds number in this model is 2000, with a Reynolds number
definition using actual liquid and gas flows. This model also requires
liquid-only Reynolds number to be calculated.
No attempt to incorporate roughness into the model was made in [1]_.
The model was developed with hydraulic diameter from 0.0695 to 6.22 mm,
mass velocities 4 to 8528 kg/m^2/s, flow qualities from 0 to 1, reduced
pressures from 0.0052 to 0.91, superficial liquid Reynolds numbers up to
79202, superficial gas Reynolds numbers up to 253810, liquid-only Reynolds
numbers up to 89798, 7115 data points from 36 sources and working fluids
air, CO2, N2, water, ethanol, R12, R22, R134a, R236ea, R245fa, R404A, R407C,
propane, methane, and ammonia.
Examples
--------
>>> Kim_Mudawar(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, mug=14E-6,
... sigma=0.0487, D=0.05, L=1)
840.4137796786074
References
----------
.. [1] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting
Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/
Micro-Channel Flows." International Journal of Heat and Mass Transfer
55, no. 11-12 (May 2012): 3246-61.
doi:10.1016/j.ijheatmasstransfer.2012.02.047.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Review of Databases and
Predictive Methods for Pressure Drop in Adiabatic, Condensing and
Boiling Mini/Micro-Channel Flows." International Journal of Heat and
Mass Transfer 77 (October 2014): 74-97.
doi:10.1016/j.ijheatmasstransfer.2014.04.035.
'''
def friction_factor(Re):
if Re < 2000:
return 64./Re
elif Re < 20000:
return 0.316*Re**-0.25
else:
return 0.184*Re**-0.2
# Actual Liquid flow
v_l = m*(1-x)/rhol/(pi/4*D**2)
Re_l = Reynolds(V=v_l, rho=rhol, mu=mul, D=D)
fd_l = friction_factor(Re=Re_l)
dP_l = fd_l*L/D*(0.5*rhol*v_l**2)
# Actual gas flow
v_g = m*x/rhog/(pi/4*D**2)
Re_g = Reynolds(V=v_g, rho=rhog, mu=mug, D=D)
fd_g = friction_factor(Re=Re_g)
dP_g = fd_g*L/D*(0.5*rhog*v_g**2)
# Liquid-only flow
v_lo = m/rhol/(pi/4*D**2)
Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D)
Su = Suratman(L=D, rho=rhog, mu=mug, sigma=sigma)
X = (dP_l/dP_g)**0.5
Re_c = 2000 # Transition Reynolds number
if Re_l < Re_c and Re_g < Re_c:
C = 3.5E-5*Re_lo**0.44*Su**0.5*(rhol/rhog)**0.48
elif Re_l < Re_c and Re_g >= Re_c:
C = 0.0015*Re_lo**0.59*Su**0.19*(rhol/rhog)**0.36
elif Re_l >= Re_c and Re_g < Re_c:
C = 8.7E-4*Re_lo**0.17*Su**0.5*(rhol/rhog)**0.14
else: # Turbulent case
C = 0.39*Re_lo**0.03*Su**0.10*(rhol/rhog)**0.35
phi_l2 = 1 + C/X + 1./X**2
return dP_l*phi_l2 | [
"def",
"Kim_Mudawar",
"(",
"m",
",",
"x",
",",
"rhol",
",",
"rhog",
",",
"mul",
",",
"mug",
",",
"sigma",
",",
"D",
",",
"L",
"=",
"1",
")",
":",
"def",
"friction_factor",
"(",
"Re",
")",
":",
"if",
"Re",
"<",
"2000",
":",
"return",
"64.",
"/",
"Re",
"elif",
"Re",
"<",
"20000",
":",
"return",
"0.316",
"*",
"Re",
"**",
"-",
"0.25",
"else",
":",
"return",
"0.184",
"*",
"Re",
"**",
"-",
"0.2",
"# Actual Liquid flow",
"v_l",
"=",
"m",
"*",
"(",
"1",
"-",
"x",
")",
"/",
"rhol",
"/",
"(",
"pi",
"/",
"4",
"*",
"D",
"**",
"2",
")",
"Re_l",
"=",
"Reynolds",
"(",
"V",
"=",
"v_l",
",",
"rho",
"=",
"rhol",
",",
"mu",
"=",
"mul",
",",
"D",
"=",
"D",
")",
"fd_l",
"=",
"friction_factor",
"(",
"Re",
"=",
"Re_l",
")",
"dP_l",
"=",
"fd_l",
"*",
"L",
"/",
"D",
"*",
"(",
"0.5",
"*",
"rhol",
"*",
"v_l",
"**",
"2",
")",
"# Actual gas flow",
"v_g",
"=",
"m",
"*",
"x",
"/",
"rhog",
"/",
"(",
"pi",
"/",
"4",
"*",
"D",
"**",
"2",
")",
"Re_g",
"=",
"Reynolds",
"(",
"V",
"=",
"v_g",
",",
"rho",
"=",
"rhog",
",",
"mu",
"=",
"mug",
",",
"D",
"=",
"D",
")",
"fd_g",
"=",
"friction_factor",
"(",
"Re",
"=",
"Re_g",
")",
"dP_g",
"=",
"fd_g",
"*",
"L",
"/",
"D",
"*",
"(",
"0.5",
"*",
"rhog",
"*",
"v_g",
"**",
"2",
")",
"# Liquid-only flow",
"v_lo",
"=",
"m",
"/",
"rhol",
"/",
"(",
"pi",
"/",
"4",
"*",
"D",
"**",
"2",
")",
"Re_lo",
"=",
"Reynolds",
"(",
"V",
"=",
"v_lo",
",",
"rho",
"=",
"rhol",
",",
"mu",
"=",
"mul",
",",
"D",
"=",
"D",
")",
"Su",
"=",
"Suratman",
"(",
"L",
"=",
"D",
",",
"rho",
"=",
"rhog",
",",
"mu",
"=",
"mug",
",",
"sigma",
"=",
"sigma",
")",
"X",
"=",
"(",
"dP_l",
"/",
"dP_g",
")",
"**",
"0.5",
"Re_c",
"=",
"2000",
"# Transition Reynolds number",
"if",
"Re_l",
"<",
"Re_c",
"and",
"Re_g",
"<",
"Re_c",
":",
"C",
"=",
"3.5E-5",
"*",
"Re_lo",
"**",
"0.44",
"*",
"Su",
"**",
"0.5",
"*",
"(",
"rhol",
"/",
"rhog",
")",
"**",
"0.48",
"elif",
"Re_l",
"<",
"Re_c",
"and",
"Re_g",
">=",
"Re_c",
":",
"C",
"=",
"0.0015",
"*",
"Re_lo",
"**",
"0.59",
"*",
"Su",
"**",
"0.19",
"*",
"(",
"rhol",
"/",
"rhog",
")",
"**",
"0.36",
"elif",
"Re_l",
">=",
"Re_c",
"and",
"Re_g",
"<",
"Re_c",
":",
"C",
"=",
"8.7E-4",
"*",
"Re_lo",
"**",
"0.17",
"*",
"Su",
"**",
"0.5",
"*",
"(",
"rhol",
"/",
"rhog",
")",
"**",
"0.14",
"else",
":",
"# Turbulent case",
"C",
"=",
"0.39",
"*",
"Re_lo",
"**",
"0.03",
"*",
"Su",
"**",
"0.10",
"*",
"(",
"rhol",
"/",
"rhog",
")",
"**",
"0.35",
"phi_l2",
"=",
"1",
"+",
"C",
"/",
"X",
"+",
"1.",
"/",
"X",
"**",
"2",
"return",
"dP_l",
"*",
"phi_l2"
] | r'''Calculates two-phase pressure drop with the Kim and Mudawar (2012)
correlation as in [1]_, also presented in [2]_.
.. math::
\Delta P = \Delta P_{l} \phi_{l}^2
.. math::
\phi_l^2 = 1 + \frac{C}{X} + \frac{1}{X^2}
.. math::
X^2 = \frac{\Delta P_l}{\Delta P_g}
For turbulent liquid, turbulent gas:
.. math::
C = 0.39Re_{lo}^{0.03} Su_{go}^{0.10}\left(\frac{\rho_l}{\rho_g}
\right)^{0.35}
For turbulent liquid, laminar gas:
.. math::
C = 8.7\times 10^{-4} Re_{lo}^{0.17} Su_{go}^{0.50}\left(\frac{\rho_l}
{\rho_g}\right)^{0.14}
For laminar liquid, turbulent gas:
.. math::
C = 0.0015 Re_{lo}^{0.59} Su_{go}^{0.19}\left(\frac{\rho_l}{\rho_g}
\right)^{0.36}
For laminar liquid, laminar gas:
.. math::
C = 3.5\times 10^{-5} Re_{lo}^{0.44} Su_{go}^{0.50}\left(\frac{\rho_l}
{\rho_g}\right)^{0.48}
This model has its own friction factor calculations, to be consistent with
its Reynolds number transition. As their model was regressed with these
equations, more error is obtained when using any other friction factor
calculation. The laminar equation 64/Re is used up to Re=2000, then the
Blasius equation with a coefficient of 0.316, and above Re = 20000,
.. math::
f_d = \frac{0.184}{Re^{0.2}}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
sigma : float
Surface tension, [N/m]
D : float
Diameter of pipe, [m]
L : float, optional
Length of pipe, [m]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
The critical Reynolds number in this model is 2000, with a Reynolds number
definition using actual liquid and gas flows. This model also requires
liquid-only Reynolds number to be calculated.
No attempt to incorporate roughness into the model was made in [1]_.
The model was developed with hydraulic diameter from 0.0695 to 6.22 mm,
mass velocities 4 to 8528 kg/m^2/s, flow qualities from 0 to 1, reduced
pressures from 0.0052 to 0.91, superficial liquid Reynolds numbers up to
79202, superficial gas Reynolds numbers up to 253810, liquid-only Reynolds
numbers up to 89798, 7115 data points from 36 sources and working fluids
air, CO2, N2, water, ethanol, R12, R22, R134a, R236ea, R245fa, R404A, R407C,
propane, methane, and ammonia.
Examples
--------
>>> Kim_Mudawar(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, mug=14E-6,
... sigma=0.0487, D=0.05, L=1)
840.4137796786074
References
----------
.. [1] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting
Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/
Micro-Channel Flows." International Journal of Heat and Mass Transfer
55, no. 11-12 (May 2012): 3246-61.
doi:10.1016/j.ijheatmasstransfer.2012.02.047.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Review of Databases and
Predictive Methods for Pressure Drop in Adiabatic, Condensing and
Boiling Mini/Micro-Channel Flows." International Journal of Heat and
Mass Transfer 77 (October 2014): 74-97.
doi:10.1016/j.ijheatmasstransfer.2014.04.035. | [
"r",
"Calculates",
"two",
"-",
"phase",
"pressure",
"drop",
"with",
"the",
"Kim",
"and",
"Mudawar",
"(",
"2012",
")",
"correlation",
"as",
"in",
"[",
"1",
"]",
"_",
"also",
"presented",
"in",
"[",
"2",
"]",
"_",
"."
] | python | train |
bspaans/python-mingus | mingus/midi/midi_track.py | https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_track.py#L203-L210 | def set_deltatime(self, delta_time):
"""Set the delta_time.
Can be an integer or a variable length byte.
"""
if type(delta_time) == int:
delta_time = self.int_to_varbyte(delta_time)
self.delta_time = delta_time | [
"def",
"set_deltatime",
"(",
"self",
",",
"delta_time",
")",
":",
"if",
"type",
"(",
"delta_time",
")",
"==",
"int",
":",
"delta_time",
"=",
"self",
".",
"int_to_varbyte",
"(",
"delta_time",
")",
"self",
".",
"delta_time",
"=",
"delta_time"
] | Set the delta_time.
Can be an integer or a variable length byte. | [
"Set",
"the",
"delta_time",
"."
] | python | train |
cloud-custodian/cloud-custodian | tools/c7n_gcp/c7n_gcp/actions/cscc.py | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_gcp/c7n_gcp/actions/cscc.py#L137-L140 | def get_name(self, r):
"""Given an arbitrary resource attempt to resolve back to a qualified name."""
namer = ResourceNameAdapters[self.manager.resource_type.service]
return namer(r) | [
"def",
"get_name",
"(",
"self",
",",
"r",
")",
":",
"namer",
"=",
"ResourceNameAdapters",
"[",
"self",
".",
"manager",
".",
"resource_type",
".",
"service",
"]",
"return",
"namer",
"(",
"r",
")"
] | Given an arbitrary resource attempt to resolve back to a qualified name. | [
"Given",
"an",
"arbitrary",
"resource",
"attempt",
"to",
"resolve",
"back",
"to",
"a",
"qualified",
"name",
"."
] | python | train |
PmagPy/PmagPy | pmagpy/contribution_builder.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2229-L2249 | def get_singular_and_plural_dtype(self, dtype):
"""
Parameters
----------
dtype : str
MagIC table type (specimens, samples, contribution, etc.)
Returns
---------
name : str
singular name for MagIC table ('specimen' for specimens table, etc.)
dtype : str
plural dtype for MagIC table ('specimens' for specimens table, etc.)
"""
dtype = dtype.strip()
if dtype.endswith('s'):
return dtype[:-1], dtype
elif dtype == 'criteria':
return 'table_column', 'criteria'
elif dtype == 'contribution':
return 'doi', 'contribution' | [
"def",
"get_singular_and_plural_dtype",
"(",
"self",
",",
"dtype",
")",
":",
"dtype",
"=",
"dtype",
".",
"strip",
"(",
")",
"if",
"dtype",
".",
"endswith",
"(",
"'s'",
")",
":",
"return",
"dtype",
"[",
":",
"-",
"1",
"]",
",",
"dtype",
"elif",
"dtype",
"==",
"'criteria'",
":",
"return",
"'table_column'",
",",
"'criteria'",
"elif",
"dtype",
"==",
"'contribution'",
":",
"return",
"'doi'",
",",
"'contribution'"
] | Parameters
----------
dtype : str
MagIC table type (specimens, samples, contribution, etc.)
Returns
---------
name : str
singular name for MagIC table ('specimen' for specimens table, etc.)
dtype : str
plural dtype for MagIC table ('specimens' for specimens table, etc.) | [
"Parameters",
"----------",
"dtype",
":",
"str",
"MagIC",
"table",
"type",
"(",
"specimens",
"samples",
"contribution",
"etc",
".",
")"
] | python | train |
indietyp/django-automated-logging | automated_logging/signals/__init__.py | https://github.com/indietyp/django-automated-logging/blob/095dfc6df62dca45f7db4516bc35e52085d0a01c/automated_logging/signals/__init__.py#L23-L33 | def validate_instance(instance):
"""Validating if the instance should be logged, or is excluded"""
excludes = settings.AUTOMATED_LOGGING['exclude']['model']
for excluded in excludes:
if (excluded in [instance._meta.app_label.lower(),
instance.__class__.__name__.lower()] or
instance.__module__.lower().startswith(excluded)):
return False
return True | [
"def",
"validate_instance",
"(",
"instance",
")",
":",
"excludes",
"=",
"settings",
".",
"AUTOMATED_LOGGING",
"[",
"'exclude'",
"]",
"[",
"'model'",
"]",
"for",
"excluded",
"in",
"excludes",
":",
"if",
"(",
"excluded",
"in",
"[",
"instance",
".",
"_meta",
".",
"app_label",
".",
"lower",
"(",
")",
",",
"instance",
".",
"__class__",
".",
"__name__",
".",
"lower",
"(",
")",
"]",
"or",
"instance",
".",
"__module__",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"excluded",
")",
")",
":",
"return",
"False",
"return",
"True"
] | Validating if the instance should be logged, or is excluded | [
"Validating",
"if",
"the",
"instance",
"should",
"be",
"logged",
"or",
"is",
"excluded"
] | python | train |
operasoftware/twisted-apns | apns/listenable.py | https://github.com/operasoftware/twisted-apns/blob/c7bd460100067e0c96c440ac0f5516485ac7313f/apns/listenable.py#L35-L43 | def dispatchEvent(self, event, *args):
"""
Fire all callbacks assigned to a particular event. To be called by
derivative classes.
:param *args: Additional arguments to be passed to the callback
function.
"""
for callback in self.listeners[event]:
yield callback(event, self, *args) | [
"def",
"dispatchEvent",
"(",
"self",
",",
"event",
",",
"*",
"args",
")",
":",
"for",
"callback",
"in",
"self",
".",
"listeners",
"[",
"event",
"]",
":",
"yield",
"callback",
"(",
"event",
",",
"self",
",",
"*",
"args",
")"
] | Fire all callbacks assigned to a particular event. To be called by
derivative classes.
:param *args: Additional arguments to be passed to the callback
function. | [
"Fire",
"all",
"callbacks",
"assigned",
"to",
"a",
"particular",
"event",
".",
"To",
"be",
"called",
"by",
"derivative",
"classes",
".",
":",
"param",
"*",
"args",
":",
"Additional",
"arguments",
"to",
"be",
"passed",
"to",
"the",
"callback",
"function",
"."
] | python | train |
arne-cl/discoursegraphs | src/discoursegraphs/readwrite/generic.py | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/generic.py#L69-L86 | def layerset2list(discoursegraph):
"""
typecasts all `layers` sets to lists to make the graph
exportable (e.g. into the `geoff` format).
Parameters
----------
discoursegraph : DiscourseDocumentGraph
"""
for node_id in discoursegraph:
discoursegraph.node[node_id]['layers'] = \
list(discoursegraph.node[node_id]['layers'])
for (from_id, to_id) in discoursegraph.edges_iter():
# there might be multiple edges between 2 nodes
edge_dict = discoursegraph.edge[from_id][to_id]
for edge_id in edge_dict:
edge_dict[edge_id]['layers'] = \
list(edge_dict[edge_id]['layers']) | [
"def",
"layerset2list",
"(",
"discoursegraph",
")",
":",
"for",
"node_id",
"in",
"discoursegraph",
":",
"discoursegraph",
".",
"node",
"[",
"node_id",
"]",
"[",
"'layers'",
"]",
"=",
"list",
"(",
"discoursegraph",
".",
"node",
"[",
"node_id",
"]",
"[",
"'layers'",
"]",
")",
"for",
"(",
"from_id",
",",
"to_id",
")",
"in",
"discoursegraph",
".",
"edges_iter",
"(",
")",
":",
"# there might be multiple edges between 2 nodes",
"edge_dict",
"=",
"discoursegraph",
".",
"edge",
"[",
"from_id",
"]",
"[",
"to_id",
"]",
"for",
"edge_id",
"in",
"edge_dict",
":",
"edge_dict",
"[",
"edge_id",
"]",
"[",
"'layers'",
"]",
"=",
"list",
"(",
"edge_dict",
"[",
"edge_id",
"]",
"[",
"'layers'",
"]",
")"
] | typecasts all `layers` sets to lists to make the graph
exportable (e.g. into the `geoff` format).
Parameters
----------
discoursegraph : DiscourseDocumentGraph | [
"typecasts",
"all",
"layers",
"sets",
"to",
"lists",
"to",
"make",
"the",
"graph",
"exportable",
"(",
"e",
".",
"g",
".",
"into",
"the",
"geoff",
"format",
")",
"."
] | python | train |
marcinmiklitz/pywindow | pywindow/molecular.py | https://github.com/marcinmiklitz/pywindow/blob/e5264812157224f22a691741ca2e0aefdc9bd2eb/pywindow/molecular.py#L963-L1009 | def decipher_atom_keys(self, forcefield='DLF', dict_key='atom_ids'):
"""
Decipher force field atom ids.
This takes all values in :attr:`MolecularSystem.system['atom_ids']`
that match force field type criteria and creates
:attr:`MolecularSystem.system['elements']` with the corresponding
periodic table of elements equivalents.
If a forcefield is not supported by this method, the
:func:`MolecularSystem.swap_atom_keys()` can be used instead.
DLF stands for DL_F notation.
See: C. W. Yong, Descriptions and Implementations of DL_F Notation: A
Natural Chemical Expression System of Atom Types for Molecular
Simulations, J. Chem. Inf. Model., 2016, 56, 1405–1409.
Parameters
----------
forcefield : :class:`str`
The forcefield used to decipher atom ids. Allowed (not case
sensitive): 'OPLS', 'OPLS2005', 'OPLSAA', 'OPLS3', 'DLF', 'DL_F'.
(default='DLF')
dict_key : :class:`str`
The :attr:`MolecularSystem.system` dictionary key to the array
containing the force field atom ids. (default='atom_ids')
Returns
-------
None : :class:`NoneType`
"""
# In case there is no 'atom_ids' key we try 'elements'. This is for
# XYZ and MOL files mostly. But, we keep the dict_key keyword for
# someone who would want to decipher 'elements' even if 'atom_ids' key
# is present in the system's dictionary.
if 'atom_ids' not in self.system.keys():
dict_key = 'elements'
# I do it on temporary object so that it only finishes when successful
temp = deepcopy(self.system[dict_key])
for element in range(len(temp)):
temp[element] = "{0}".format(
decipher_atom_key(
temp[element], forcefield=forcefield))
self.system['elements'] = temp | [
"def",
"decipher_atom_keys",
"(",
"self",
",",
"forcefield",
"=",
"'DLF'",
",",
"dict_key",
"=",
"'atom_ids'",
")",
":",
"# In case there is no 'atom_ids' key we try 'elements'. This is for",
"# XYZ and MOL files mostly. But, we keep the dict_key keyword for",
"# someone who would want to decipher 'elements' even if 'atom_ids' key",
"# is present in the system's dictionary.",
"if",
"'atom_ids'",
"not",
"in",
"self",
".",
"system",
".",
"keys",
"(",
")",
":",
"dict_key",
"=",
"'elements'",
"# I do it on temporary object so that it only finishes when successful",
"temp",
"=",
"deepcopy",
"(",
"self",
".",
"system",
"[",
"dict_key",
"]",
")",
"for",
"element",
"in",
"range",
"(",
"len",
"(",
"temp",
")",
")",
":",
"temp",
"[",
"element",
"]",
"=",
"\"{0}\"",
".",
"format",
"(",
"decipher_atom_key",
"(",
"temp",
"[",
"element",
"]",
",",
"forcefield",
"=",
"forcefield",
")",
")",
"self",
".",
"system",
"[",
"'elements'",
"]",
"=",
"temp"
] | Decipher force field atom ids.
This takes all values in :attr:`MolecularSystem.system['atom_ids']`
that match force field type criteria and creates
:attr:`MolecularSystem.system['elements']` with the corresponding
periodic table of elements equivalents.
If a forcefield is not supported by this method, the
:func:`MolecularSystem.swap_atom_keys()` can be used instead.
DLF stands for DL_F notation.
See: C. W. Yong, Descriptions and Implementations of DL_F Notation: A
Natural Chemical Expression System of Atom Types for Molecular
Simulations, J. Chem. Inf. Model., 2016, 56, 1405–1409.
Parameters
----------
forcefield : :class:`str`
The forcefield used to decipher atom ids. Allowed (not case
sensitive): 'OPLS', 'OPLS2005', 'OPLSAA', 'OPLS3', 'DLF', 'DL_F'.
(default='DLF')
dict_key : :class:`str`
The :attr:`MolecularSystem.system` dictionary key to the array
containing the force field atom ids. (default='atom_ids')
Returns
-------
None : :class:`NoneType` | [
"Decipher",
"force",
"field",
"atom",
"ids",
"."
] | python | train |
ArchiveTeam/wpull | wpull/protocol/http/web.py | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/protocol/http/web.py#L114-L131 | def download(self, file: Optional[IO[bytes]]=None,
duration_timeout: Optional[float]=None):
'''Download content.
Args:
file: An optional file object for the document contents.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
Response: An instance of :class:`.http.request.Response`.
See :meth:`WebClient.session` for proper usage of this function.
Coroutine.
'''
yield from \
self._current_session.download(file, duration_timeout=duration_timeout) | [
"def",
"download",
"(",
"self",
",",
"file",
":",
"Optional",
"[",
"IO",
"[",
"bytes",
"]",
"]",
"=",
"None",
",",
"duration_timeout",
":",
"Optional",
"[",
"float",
"]",
"=",
"None",
")",
":",
"yield",
"from",
"self",
".",
"_current_session",
".",
"download",
"(",
"file",
",",
"duration_timeout",
"=",
"duration_timeout",
")"
] | Download content.
Args:
file: An optional file object for the document contents.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
Response: An instance of :class:`.http.request.Response`.
See :meth:`WebClient.session` for proper usage of this function.
Coroutine. | [
"Download",
"content",
"."
] | python | train |
mlperf/training | translation/tensorflow/transformer/data_download.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/data_download.py#L210-L214 | def txt_line_iterator(path):
"""Iterate through lines of file."""
with tf.gfile.Open(path) as f:
for line in f:
yield line.strip() | [
"def",
"txt_line_iterator",
"(",
"path",
")",
":",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"path",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"yield",
"line",
".",
"strip",
"(",
")"
] | Iterate through lines of file. | [
"Iterate",
"through",
"lines",
"of",
"file",
"."
] | python | train |
wummel/linkchecker | linkcheck/parser/__init__.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/parser/__init__.py#L60-L64 | def parse_chromium (url_data):
"""Parse a Chromium or Google Chrome bookmark file."""
from ..bookmarks.chromium import parse_bookmark_data
for url, name in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, name=name) | [
"def",
"parse_chromium",
"(",
"url_data",
")",
":",
"from",
".",
".",
"bookmarks",
".",
"chromium",
"import",
"parse_bookmark_data",
"for",
"url",
",",
"name",
"in",
"parse_bookmark_data",
"(",
"url_data",
".",
"get_content",
"(",
")",
")",
":",
"url_data",
".",
"add_url",
"(",
"url",
",",
"name",
"=",
"name",
")"
] | Parse a Chromium or Google Chrome bookmark file. | [
"Parse",
"a",
"Chromium",
"or",
"Google",
"Chrome",
"bookmark",
"file",
"."
] | python | train |
h2non/paco | paco/observer.py | https://github.com/h2non/paco/blob/1e5ef4df317e7cbbcefdf67d8dee28ce90538f3d/paco/observer.py#L23-L42 | def observe(self, event, fn):
"""
Arguments:
event (str): event to subscribe.
fn (function|coroutinefunction): function to trigger.
Raises:
TypeError: if fn argument is not valid
"""
iscoroutine = asyncio.iscoroutinefunction(fn)
if not iscoroutine and not isfunction(fn):
raise TypeError('paco: fn param must be a callable '
'object or coroutine function')
observers = self._pool.get(event)
if not observers:
observers = self._pool[event] = []
# Register the observer
observers.append(fn if iscoroutine else coroutine_wrapper(fn)) | [
"def",
"observe",
"(",
"self",
",",
"event",
",",
"fn",
")",
":",
"iscoroutine",
"=",
"asyncio",
".",
"iscoroutinefunction",
"(",
"fn",
")",
"if",
"not",
"iscoroutine",
"and",
"not",
"isfunction",
"(",
"fn",
")",
":",
"raise",
"TypeError",
"(",
"'paco: fn param must be a callable '",
"'object or coroutine function'",
")",
"observers",
"=",
"self",
".",
"_pool",
".",
"get",
"(",
"event",
")",
"if",
"not",
"observers",
":",
"observers",
"=",
"self",
".",
"_pool",
"[",
"event",
"]",
"=",
"[",
"]",
"# Register the observer",
"observers",
".",
"append",
"(",
"fn",
"if",
"iscoroutine",
"else",
"coroutine_wrapper",
"(",
"fn",
")",
")"
] | Arguments:
event (str): event to subscribe.
fn (function|coroutinefunction): function to trigger.
Raises:
TypeError: if fn argument is not valid | [
"Arguments",
":",
"event",
"(",
"str",
")",
":",
"event",
"to",
"subscribe",
".",
"fn",
"(",
"function|coroutinefunction",
")",
":",
"function",
"to",
"trigger",
"."
] | python | train |
django-extensions/django-extensions | django_extensions/management/commands/sqldsn.py | https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/management/commands/sqldsn.py#L100-L141 | def _postgresql(self, dbhost, dbport, dbname, dbuser, dbpass, dsn_style=None): # noqa
"""PostgreSQL psycopg2 driver accepts two syntaxes
Plus a string for .pgpass file
"""
dsn = []
if dsn_style is None or dsn_style == 'all' or dsn_style == 'keyvalue':
dsnstr = "host='{0}' dbname='{2}' user='{3}' password='{4}'"
if dbport is not None:
dsnstr += " port='{1}'"
dsn.append(dsnstr.format(dbhost,
dbport,
dbname,
dbuser,
dbpass,))
if dsn_style == 'all' or dsn_style == 'kwargs':
dsnstr = "host='{0}', database='{2}', user='{3}', password='{4}'"
if dbport is not None:
dsnstr += ", port='{1}'"
dsn.append(dsnstr.format(dbhost,
dbport,
dbname,
dbuser,
dbpass))
if dsn_style == 'all' or dsn_style == 'uri':
dsnstr = "postgresql://{user}:{password}@{host}/{name}"
dsn.append(dsnstr.format(
host="{host}:{port}".format(host=dbhost, port=dbport) if dbport else dbhost, # noqa
name=dbname, user=dbuser, password=dbpass))
if dsn_style == 'all' or dsn_style == 'pgpass':
dsn.append(':'.join(map(str, filter(
None, [dbhost, dbport, dbname, dbuser, dbpass]))))
return dsn | [
"def",
"_postgresql",
"(",
"self",
",",
"dbhost",
",",
"dbport",
",",
"dbname",
",",
"dbuser",
",",
"dbpass",
",",
"dsn_style",
"=",
"None",
")",
":",
"# noqa",
"dsn",
"=",
"[",
"]",
"if",
"dsn_style",
"is",
"None",
"or",
"dsn_style",
"==",
"'all'",
"or",
"dsn_style",
"==",
"'keyvalue'",
":",
"dsnstr",
"=",
"\"host='{0}' dbname='{2}' user='{3}' password='{4}'\"",
"if",
"dbport",
"is",
"not",
"None",
":",
"dsnstr",
"+=",
"\" port='{1}'\"",
"dsn",
".",
"append",
"(",
"dsnstr",
".",
"format",
"(",
"dbhost",
",",
"dbport",
",",
"dbname",
",",
"dbuser",
",",
"dbpass",
",",
")",
")",
"if",
"dsn_style",
"==",
"'all'",
"or",
"dsn_style",
"==",
"'kwargs'",
":",
"dsnstr",
"=",
"\"host='{0}', database='{2}', user='{3}', password='{4}'\"",
"if",
"dbport",
"is",
"not",
"None",
":",
"dsnstr",
"+=",
"\", port='{1}'\"",
"dsn",
".",
"append",
"(",
"dsnstr",
".",
"format",
"(",
"dbhost",
",",
"dbport",
",",
"dbname",
",",
"dbuser",
",",
"dbpass",
")",
")",
"if",
"dsn_style",
"==",
"'all'",
"or",
"dsn_style",
"==",
"'uri'",
":",
"dsnstr",
"=",
"\"postgresql://{user}:{password}@{host}/{name}\"",
"dsn",
".",
"append",
"(",
"dsnstr",
".",
"format",
"(",
"host",
"=",
"\"{host}:{port}\"",
".",
"format",
"(",
"host",
"=",
"dbhost",
",",
"port",
"=",
"dbport",
")",
"if",
"dbport",
"else",
"dbhost",
",",
"# noqa",
"name",
"=",
"dbname",
",",
"user",
"=",
"dbuser",
",",
"password",
"=",
"dbpass",
")",
")",
"if",
"dsn_style",
"==",
"'all'",
"or",
"dsn_style",
"==",
"'pgpass'",
":",
"dsn",
".",
"append",
"(",
"':'",
".",
"join",
"(",
"map",
"(",
"str",
",",
"filter",
"(",
"None",
",",
"[",
"dbhost",
",",
"dbport",
",",
"dbname",
",",
"dbuser",
",",
"dbpass",
"]",
")",
")",
")",
")",
"return",
"dsn"
] | PostgreSQL psycopg2 driver accepts two syntaxes
Plus a string for .pgpass file | [
"PostgreSQL",
"psycopg2",
"driver",
"accepts",
"two",
"syntaxes"
] | python | train |
portfors-lab/sparkle | sparkle/gui/controlwindow.py | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/controlwindow.py#L399-L415 | def closeEvent(self, event):
"""Closes listening threads and saves GUI data for later use.
Re-implemented from :qtdoc:`QWidget`
"""
self.acqmodel.stop_listening() # close listener threads
self.saveInputs(self.inputsFilename)
# save GUI size
settings = QtCore.QSettings("audiolab")
settings.setValue("geometry", self.saveGeometry())
settings.setValue("windowState", self.saveState())
logger = logging.getLogger('main')
logger.info('All user settings saved')
self.garbage_timer.stop()
gc.enable() | [
"def",
"closeEvent",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"acqmodel",
".",
"stop_listening",
"(",
")",
"# close listener threads",
"self",
".",
"saveInputs",
"(",
"self",
".",
"inputsFilename",
")",
"# save GUI size",
"settings",
"=",
"QtCore",
".",
"QSettings",
"(",
"\"audiolab\"",
")",
"settings",
".",
"setValue",
"(",
"\"geometry\"",
",",
"self",
".",
"saveGeometry",
"(",
")",
")",
"settings",
".",
"setValue",
"(",
"\"windowState\"",
",",
"self",
".",
"saveState",
"(",
")",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'main'",
")",
"logger",
".",
"info",
"(",
"'All user settings saved'",
")",
"self",
".",
"garbage_timer",
".",
"stop",
"(",
")",
"gc",
".",
"enable",
"(",
")"
] | Closes listening threads and saves GUI data for later use.
Re-implemented from :qtdoc:`QWidget` | [
"Closes",
"listening",
"threads",
"and",
"saves",
"GUI",
"data",
"for",
"later",
"use",
"."
] | python | train |
pyviz/holoviews | holoviews/core/util.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/util.py#L1299-L1314 | def is_cyclic(graph):
"""
Return True if the directed graph g has a cycle. The directed graph
should be represented as a dictionary mapping of edges for each node.
"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in graph.get(vertex, ()):
if neighbour in path or visit(neighbour):
return True
path.remove(vertex)
return False
return any(visit(v) for v in graph) | [
"def",
"is_cyclic",
"(",
"graph",
")",
":",
"path",
"=",
"set",
"(",
")",
"def",
"visit",
"(",
"vertex",
")",
":",
"path",
".",
"add",
"(",
"vertex",
")",
"for",
"neighbour",
"in",
"graph",
".",
"get",
"(",
"vertex",
",",
"(",
")",
")",
":",
"if",
"neighbour",
"in",
"path",
"or",
"visit",
"(",
"neighbour",
")",
":",
"return",
"True",
"path",
".",
"remove",
"(",
"vertex",
")",
"return",
"False",
"return",
"any",
"(",
"visit",
"(",
"v",
")",
"for",
"v",
"in",
"graph",
")"
] | Return True if the directed graph g has a cycle. The directed graph
should be represented as a dictionary mapping of edges for each node. | [
"Return",
"True",
"if",
"the",
"directed",
"graph",
"g",
"has",
"a",
"cycle",
".",
"The",
"directed",
"graph",
"should",
"be",
"represented",
"as",
"a",
"dictionary",
"mapping",
"of",
"edges",
"for",
"each",
"node",
"."
] | python | train |
slarse/pdfebc-core | pdfebc_core/email_utils.py | https://github.com/slarse/pdfebc-core/blob/fc40857bc42365b7434714333e37d7a3487603a0/pdfebc_core/email_utils.py#L53-L65 | def _attach_files(filepaths, email_):
"""Take a list of filepaths and attach the files to a MIMEMultipart.
Args:
filepaths (list(str)): A list of filepaths.
email_ (email.MIMEMultipart): A MIMEMultipart email_.
"""
for filepath in filepaths:
base = os.path.basename(filepath)
with open(filepath, "rb") as file:
part = MIMEApplication(file.read(), Name=base)
part["Content-Disposition"] = 'attachment; filename="%s"' % base
email_.attach(part) | [
"def",
"_attach_files",
"(",
"filepaths",
",",
"email_",
")",
":",
"for",
"filepath",
"in",
"filepaths",
":",
"base",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filepath",
")",
"with",
"open",
"(",
"filepath",
",",
"\"rb\"",
")",
"as",
"file",
":",
"part",
"=",
"MIMEApplication",
"(",
"file",
".",
"read",
"(",
")",
",",
"Name",
"=",
"base",
")",
"part",
"[",
"\"Content-Disposition\"",
"]",
"=",
"'attachment; filename=\"%s\"'",
"%",
"base",
"email_",
".",
"attach",
"(",
"part",
")"
] | Take a list of filepaths and attach the files to a MIMEMultipart.
Args:
filepaths (list(str)): A list of filepaths.
email_ (email.MIMEMultipart): A MIMEMultipart email_. | [
"Take",
"a",
"list",
"of",
"filepaths",
"and",
"attach",
"the",
"files",
"to",
"a",
"MIMEMultipart",
"."
] | python | train |
saltstack/salt | salt/state.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/state.py#L1346-L1353 | def verify_chunks(self, chunks):
'''
Verify the chunks in a list of low data structures
'''
err = []
for chunk in chunks:
err.extend(self.verify_data(chunk))
return err | [
"def",
"verify_chunks",
"(",
"self",
",",
"chunks",
")",
":",
"err",
"=",
"[",
"]",
"for",
"chunk",
"in",
"chunks",
":",
"err",
".",
"extend",
"(",
"self",
".",
"verify_data",
"(",
"chunk",
")",
")",
"return",
"err"
] | Verify the chunks in a list of low data structures | [
"Verify",
"the",
"chunks",
"in",
"a",
"list",
"of",
"low",
"data",
"structures"
] | python | train |
auth0/auth0-python | auth0/v3/authentication/social.py | https://github.com/auth0/auth0-python/blob/34adad3f342226aaaa6071387fa405ab840e5c02/auth0/v3/authentication/social.py#L12-L40 | def login(self, client_id, access_token, connection, scope='openid'):
"""Login using a social provider's access token
Given the social provider's access_token and the connection specified,
it will do the authentication on the provider and return a dict with
the access_token and id_token. Currently, this endpoint only works for
Facebook, Google, Twitter and Weibo.
Args:
client_id (str): application's client id.
access_token (str): social provider's access_token.
connection (str): connection type (e.g: 'facebook')
Returns:
A dict with 'access_token' and 'id_token' keys.
"""
return self.post(
'https://{}/oauth/access_token'.format(self.domain),
data={
'client_id': client_id,
'access_token': access_token,
'connection': connection,
'scope': scope,
},
headers={'Content-Type': 'application/json'}
) | [
"def",
"login",
"(",
"self",
",",
"client_id",
",",
"access_token",
",",
"connection",
",",
"scope",
"=",
"'openid'",
")",
":",
"return",
"self",
".",
"post",
"(",
"'https://{}/oauth/access_token'",
".",
"format",
"(",
"self",
".",
"domain",
")",
",",
"data",
"=",
"{",
"'client_id'",
":",
"client_id",
",",
"'access_token'",
":",
"access_token",
",",
"'connection'",
":",
"connection",
",",
"'scope'",
":",
"scope",
",",
"}",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
")"
] | Login using a social provider's access token
Given the social provider's access_token and the connection specified,
it will do the authentication on the provider and return a dict with
the access_token and id_token. Currently, this endpoint only works for
Facebook, Google, Twitter and Weibo.
Args:
client_id (str): application's client id.
access_token (str): social provider's access_token.
connection (str): connection type (e.g: 'facebook')
Returns:
A dict with 'access_token' and 'id_token' keys. | [
"Login",
"using",
"a",
"social",
"provider",
"s",
"access",
"token"
] | python | train |
google/dotty | efilter/transforms/normalize.py | https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/normalize.py#L80-L112 | def normalize(expr):
"""Pass through n-ary expressions, and eliminate empty branches.
Variadic and binary expressions recursively visit all their children.
If all children are eliminated then the parent expression is also
eliminated:
(& [removed] [removed]) => [removed]
If only one child is left, it is promoted to replace the parent node:
(& True) => True
"""
children = []
for child in expr.children:
branch = normalize(child)
if branch is None:
continue
if type(branch) is type(expr):
children.extend(branch.children)
else:
children.append(branch)
if len(children) == 0:
return None
if len(children) == 1:
return children[0]
return type(expr)(*children, start=children[0].start,
end=children[-1].end) | [
"def",
"normalize",
"(",
"expr",
")",
":",
"children",
"=",
"[",
"]",
"for",
"child",
"in",
"expr",
".",
"children",
":",
"branch",
"=",
"normalize",
"(",
"child",
")",
"if",
"branch",
"is",
"None",
":",
"continue",
"if",
"type",
"(",
"branch",
")",
"is",
"type",
"(",
"expr",
")",
":",
"children",
".",
"extend",
"(",
"branch",
".",
"children",
")",
"else",
":",
"children",
".",
"append",
"(",
"branch",
")",
"if",
"len",
"(",
"children",
")",
"==",
"0",
":",
"return",
"None",
"if",
"len",
"(",
"children",
")",
"==",
"1",
":",
"return",
"children",
"[",
"0",
"]",
"return",
"type",
"(",
"expr",
")",
"(",
"*",
"children",
",",
"start",
"=",
"children",
"[",
"0",
"]",
".",
"start",
",",
"end",
"=",
"children",
"[",
"-",
"1",
"]",
".",
"end",
")"
] | Pass through n-ary expressions, and eliminate empty branches.
Variadic and binary expressions recursively visit all their children.
If all children are eliminated then the parent expression is also
eliminated:
(& [removed] [removed]) => [removed]
If only one child is left, it is promoted to replace the parent node:
(& True) => True | [
"Pass",
"through",
"n",
"-",
"ary",
"expressions",
"and",
"eliminate",
"empty",
"branches",
"."
] | python | train |
juju/charm-helpers | charmhelpers/contrib/openstack/ip.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/ip.py#L64-L79 | def canonical_url(configs, endpoint_type=PUBLIC):
"""Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit.
"""
scheme = _get_scheme(configs)
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address) | [
"def",
"canonical_url",
"(",
"configs",
",",
"endpoint_type",
"=",
"PUBLIC",
")",
":",
"scheme",
"=",
"_get_scheme",
"(",
"configs",
")",
"address",
"=",
"resolve_address",
"(",
"endpoint_type",
")",
"if",
"is_ipv6",
"(",
"address",
")",
":",
"address",
"=",
"\"[{}]\"",
".",
"format",
"(",
"address",
")",
"return",
"'%s://%s'",
"%",
"(",
"scheme",
",",
"address",
")"
] | Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit. | [
"Returns",
"the",
"correct",
"HTTP",
"URL",
"to",
"this",
"host",
"given",
"the",
"state",
"of",
"HTTPS",
"configuration",
"hacluster",
"and",
"charm",
"configuration",
"."
] | python | train |
postlund/pyatv | pyatv/dmap/__init__.py | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/dmap/__init__.py#L70-L73 | def controlprompt_cmd(self, cmd):
"""Perform a "controlpromptentry" command."""
data = tags.string_tag('cmbe', cmd) + tags.uint8_tag('cmcc', 0)
return self.daap.post(_CTRL_PROMPT_CMD, data=data) | [
"def",
"controlprompt_cmd",
"(",
"self",
",",
"cmd",
")",
":",
"data",
"=",
"tags",
".",
"string_tag",
"(",
"'cmbe'",
",",
"cmd",
")",
"+",
"tags",
".",
"uint8_tag",
"(",
"'cmcc'",
",",
"0",
")",
"return",
"self",
".",
"daap",
".",
"post",
"(",
"_CTRL_PROMPT_CMD",
",",
"data",
"=",
"data",
")"
] | Perform a "controlpromptentry" command. | [
"Perform",
"a",
"controlpromptentry",
"command",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/core/library_manager.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/library_manager.py#L213-L310 | def get_os_path_to_library(self, library_path, library_name, allow_user_interaction=True):
"""Find library_os_path of library
This function retrieves the file system library_os_path of a library specified by a library_path and a
library_name. In case the library does not exist any more at its original location, the user has to specify
an alternative location.
:param str library_path: The library_path of the library, that must be relative and within a library_root_path
given in the config.yaml by LIBRARY_PATHS
:param str library_name: The name of the library
:param bool allow_user_interaction: Whether the user may be asked to specify library location
:return: library_os_path within filesystem, library_path, library_name
:rtype: str, str, str
:raises rafcon.core.custom_exceptions.LibraryNotFoundException: if the cannot be found
"""
original_path_and_name = os.path.join(library_path, library_name)
library_path_root = library_path.split(os.sep)[0]
if library_path.split(os.sep) and \
(library_path.startswith(os.sep) or library_path.endswith(os.sep) or os.sep + os.sep in library_path):
raise LibraryNotFoundException("A library_path is not considered to start or end with {2} or to have two "
"path separators {2}{2} in a row like '{0}' with library name {1}"
"".format(library_path, library_name, os.sep))
if not self._library_root_paths:
raise LibraryNotFoundException("There are no libraries registered")
# skip already skipped states
if original_path_and_name in self._skipped_states or library_path_root in self._skipped_library_roots:
# if an already skipped state shall be loaded again, directly raise the exception to jump over this state
raise LibraryNotFoundException("Library '{0}' not found in subfolder {1}".format(library_name,
library_path))
# replace already replaced states
if original_path_and_name in self._replaced_libraries:
new_library_os_path = self._replaced_libraries[original_path_and_name][0]
new_library_path = self._replaced_libraries[original_path_and_name][1]
# only show debug message if a state is automatically replaced by the appropriate library state
# chosen by the user before
if not self._replaced_libraries[original_path_and_name][2]:
logger.debug("The library with library_path \"{0}\" and name \"{1}\" "
"is automatically replaced by the library "
"with file system library_os_path \"{2}\" and library_path \"{3}\""
"".format(library_path, library_name, new_library_os_path, new_library_path))
return new_library_os_path, new_library_path, library_name
# a boolean to indicate if a state was regularly found or by the help of the user
regularly_found = True
library_os_path = self._get_library_os_path_from_library_dict_tree(library_path, library_name)
while library_os_path is None: # until the library is found or the user aborts
regularly_found = False
new_library_os_path = None
if allow_user_interaction:
notice = "Cannot find library '{0}' in library_path '{1}' in any of the library root paths. " \
"Please check your library root paths configuration in config.yaml " \
"LIBRARY_PATHS and environment variable RAFCON_LIBRARY_PATH. " \
"If your library_path is correct and the library was moved, please " \
"select the new root/library_os_path folder of the library which should be situated within a "\
"loaded library_root_path. If not, please abort.".format(library_name, library_path)
interface.show_notice_func(notice)
new_library_os_path = interface.open_folder_func("Select root folder for library name '{0}'"
"".format(original_path_and_name))
if new_library_os_path is None:
# User clicked cancel => cancel library search
# If the library root path is existent (e.g. "generic") and only the specific library state is not (
# e.g. "generic/wait", then the state is added to the skipped states.
# If the library root path is not existing, we ignore the whole library, preventing the user from
# endless dialogs for each missing library state.
if library_path_root not in self.libraries:
self._skipped_library_roots.append(library_path_root)
else:
self._skipped_states.append(original_path_and_name)
raise LibraryNotFoundException("Library '{0}' not found in sub-folder {1}".format(library_name,
library_path))
if not os.path.exists(new_library_os_path):
logger.error('Specified library_os_path does not exist')
continue
# check if valid library_path and library_name can be created
library_path, library_name = self.get_library_path_and_name_for_os_path(new_library_os_path)
if library_path is None:
logger.error("Specified library_os_path not within loaded library_root_path list or your config.yaml "
"file LIBRARY_PATHS or in the list of paths in environment variable RAFCON_LIBRARY_PATH")
continue # Allow the user to change the directory
# verification if library is also in library tree
library_os_path = self._get_library_os_path_from_library_dict_tree(library_path, library_name)
if library_os_path is not None:
assert library_os_path == new_library_os_path
# save the replacement in order that for a future occurrence the correct library_os_path can be used,
# without asking the user for the correct library_os_path
self._replaced_libraries[original_path_and_name] = (library_os_path, library_path, regularly_found)
return library_os_path, library_path, library_name | [
"def",
"get_os_path_to_library",
"(",
"self",
",",
"library_path",
",",
"library_name",
",",
"allow_user_interaction",
"=",
"True",
")",
":",
"original_path_and_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"library_path",
",",
"library_name",
")",
"library_path_root",
"=",
"library_path",
".",
"split",
"(",
"os",
".",
"sep",
")",
"[",
"0",
"]",
"if",
"library_path",
".",
"split",
"(",
"os",
".",
"sep",
")",
"and",
"(",
"library_path",
".",
"startswith",
"(",
"os",
".",
"sep",
")",
"or",
"library_path",
".",
"endswith",
"(",
"os",
".",
"sep",
")",
"or",
"os",
".",
"sep",
"+",
"os",
".",
"sep",
"in",
"library_path",
")",
":",
"raise",
"LibraryNotFoundException",
"(",
"\"A library_path is not considered to start or end with {2} or to have two \"",
"\"path separators {2}{2} in a row like '{0}' with library name {1}\"",
"\"\"",
".",
"format",
"(",
"library_path",
",",
"library_name",
",",
"os",
".",
"sep",
")",
")",
"if",
"not",
"self",
".",
"_library_root_paths",
":",
"raise",
"LibraryNotFoundException",
"(",
"\"There are no libraries registered\"",
")",
"# skip already skipped states",
"if",
"original_path_and_name",
"in",
"self",
".",
"_skipped_states",
"or",
"library_path_root",
"in",
"self",
".",
"_skipped_library_roots",
":",
"# if an already skipped state shall be loaded again, directly raise the exception to jump over this state",
"raise",
"LibraryNotFoundException",
"(",
"\"Library '{0}' not found in subfolder {1}\"",
".",
"format",
"(",
"library_name",
",",
"library_path",
")",
")",
"# replace already replaced states",
"if",
"original_path_and_name",
"in",
"self",
".",
"_replaced_libraries",
":",
"new_library_os_path",
"=",
"self",
".",
"_replaced_libraries",
"[",
"original_path_and_name",
"]",
"[",
"0",
"]",
"new_library_path",
"=",
"self",
".",
"_replaced_libraries",
"[",
"original_path_and_name",
"]",
"[",
"1",
"]",
"# only show debug message if a state is automatically replaced by the appropriate library state",
"# chosen by the user before",
"if",
"not",
"self",
".",
"_replaced_libraries",
"[",
"original_path_and_name",
"]",
"[",
"2",
"]",
":",
"logger",
".",
"debug",
"(",
"\"The library with library_path \\\"{0}\\\" and name \\\"{1}\\\" \"",
"\"is automatically replaced by the library \"",
"\"with file system library_os_path \\\"{2}\\\" and library_path \\\"{3}\\\"\"",
"\"\"",
".",
"format",
"(",
"library_path",
",",
"library_name",
",",
"new_library_os_path",
",",
"new_library_path",
")",
")",
"return",
"new_library_os_path",
",",
"new_library_path",
",",
"library_name",
"# a boolean to indicate if a state was regularly found or by the help of the user",
"regularly_found",
"=",
"True",
"library_os_path",
"=",
"self",
".",
"_get_library_os_path_from_library_dict_tree",
"(",
"library_path",
",",
"library_name",
")",
"while",
"library_os_path",
"is",
"None",
":",
"# until the library is found or the user aborts",
"regularly_found",
"=",
"False",
"new_library_os_path",
"=",
"None",
"if",
"allow_user_interaction",
":",
"notice",
"=",
"\"Cannot find library '{0}' in library_path '{1}' in any of the library root paths. \"",
"\"Please check your library root paths configuration in config.yaml \"",
"\"LIBRARY_PATHS and environment variable RAFCON_LIBRARY_PATH. \"",
"\"If your library_path is correct and the library was moved, please \"",
"\"select the new root/library_os_path folder of the library which should be situated within a \"",
"\"loaded library_root_path. If not, please abort.\"",
".",
"format",
"(",
"library_name",
",",
"library_path",
")",
"interface",
".",
"show_notice_func",
"(",
"notice",
")",
"new_library_os_path",
"=",
"interface",
".",
"open_folder_func",
"(",
"\"Select root folder for library name '{0}'\"",
"\"\"",
".",
"format",
"(",
"original_path_and_name",
")",
")",
"if",
"new_library_os_path",
"is",
"None",
":",
"# User clicked cancel => cancel library search",
"# If the library root path is existent (e.g. \"generic\") and only the specific library state is not (",
"# e.g. \"generic/wait\", then the state is added to the skipped states.",
"# If the library root path is not existing, we ignore the whole library, preventing the user from",
"# endless dialogs for each missing library state.",
"if",
"library_path_root",
"not",
"in",
"self",
".",
"libraries",
":",
"self",
".",
"_skipped_library_roots",
".",
"append",
"(",
"library_path_root",
")",
"else",
":",
"self",
".",
"_skipped_states",
".",
"append",
"(",
"original_path_and_name",
")",
"raise",
"LibraryNotFoundException",
"(",
"\"Library '{0}' not found in sub-folder {1}\"",
".",
"format",
"(",
"library_name",
",",
"library_path",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"new_library_os_path",
")",
":",
"logger",
".",
"error",
"(",
"'Specified library_os_path does not exist'",
")",
"continue",
"# check if valid library_path and library_name can be created",
"library_path",
",",
"library_name",
"=",
"self",
".",
"get_library_path_and_name_for_os_path",
"(",
"new_library_os_path",
")",
"if",
"library_path",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"\"Specified library_os_path not within loaded library_root_path list or your config.yaml \"",
"\"file LIBRARY_PATHS or in the list of paths in environment variable RAFCON_LIBRARY_PATH\"",
")",
"continue",
"# Allow the user to change the directory",
"# verification if library is also in library tree",
"library_os_path",
"=",
"self",
".",
"_get_library_os_path_from_library_dict_tree",
"(",
"library_path",
",",
"library_name",
")",
"if",
"library_os_path",
"is",
"not",
"None",
":",
"assert",
"library_os_path",
"==",
"new_library_os_path",
"# save the replacement in order that for a future occurrence the correct library_os_path can be used,",
"# without asking the user for the correct library_os_path",
"self",
".",
"_replaced_libraries",
"[",
"original_path_and_name",
"]",
"=",
"(",
"library_os_path",
",",
"library_path",
",",
"regularly_found",
")",
"return",
"library_os_path",
",",
"library_path",
",",
"library_name"
] | Find library_os_path of library
This function retrieves the file system library_os_path of a library specified by a library_path and a
library_name. In case the library does not exist any more at its original location, the user has to specify
an alternative location.
:param str library_path: The library_path of the library, that must be relative and within a library_root_path
given in the config.yaml by LIBRARY_PATHS
:param str library_name: The name of the library
:param bool allow_user_interaction: Whether the user may be asked to specify library location
:return: library_os_path within filesystem, library_path, library_name
:rtype: str, str, str
:raises rafcon.core.custom_exceptions.LibraryNotFoundException: if the cannot be found | [
"Find",
"library_os_path",
"of",
"library"
] | python | train |
minhhoit/yacms | yacms/pages/templatetags/pages_tags.py | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/pages/templatetags/pages_tags.py#L170-L203 | def set_page_permissions(context, token):
"""
Assigns a permissions dict to the given page instance, combining
Django's permission for the page's model and a permission check
against the instance itself calling the page's ``can_add``,
``can_change`` and ``can_delete`` custom methods.
Used within the change list for pages, to implement permission
checks for the navigation tree.
"""
page = context[token.split_contents()[1]]
model = page.get_content_model()
try:
opts = model._meta
except AttributeError:
if model is None:
error = _("Could not load the model for the following page, "
"was it removed?")
obj = page
else:
# A missing inner Meta class usually means the Page model
# hasn't been directly subclassed.
error = _("An error occured with the following class. Does "
"it subclass Page directly?")
obj = model.__class__.__name__
raise ImproperlyConfigured(error + " '%s'" % obj)
perm_name = opts.app_label + ".%s_" + opts.object_name.lower()
request = context["request"]
setattr(page, "perms", {})
for perm_type in ("add", "change", "delete"):
perm = request.user.has_perm(perm_name % perm_type)
perm = perm and getattr(model, "can_%s" % perm_type)(request)
page.perms[perm_type] = perm
return "" | [
"def",
"set_page_permissions",
"(",
"context",
",",
"token",
")",
":",
"page",
"=",
"context",
"[",
"token",
".",
"split_contents",
"(",
")",
"[",
"1",
"]",
"]",
"model",
"=",
"page",
".",
"get_content_model",
"(",
")",
"try",
":",
"opts",
"=",
"model",
".",
"_meta",
"except",
"AttributeError",
":",
"if",
"model",
"is",
"None",
":",
"error",
"=",
"_",
"(",
"\"Could not load the model for the following page, \"",
"\"was it removed?\"",
")",
"obj",
"=",
"page",
"else",
":",
"# A missing inner Meta class usually means the Page model",
"# hasn't been directly subclassed.",
"error",
"=",
"_",
"(",
"\"An error occured with the following class. Does \"",
"\"it subclass Page directly?\"",
")",
"obj",
"=",
"model",
".",
"__class__",
".",
"__name__",
"raise",
"ImproperlyConfigured",
"(",
"error",
"+",
"\" '%s'\"",
"%",
"obj",
")",
"perm_name",
"=",
"opts",
".",
"app_label",
"+",
"\".%s_\"",
"+",
"opts",
".",
"object_name",
".",
"lower",
"(",
")",
"request",
"=",
"context",
"[",
"\"request\"",
"]",
"setattr",
"(",
"page",
",",
"\"perms\"",
",",
"{",
"}",
")",
"for",
"perm_type",
"in",
"(",
"\"add\"",
",",
"\"change\"",
",",
"\"delete\"",
")",
":",
"perm",
"=",
"request",
".",
"user",
".",
"has_perm",
"(",
"perm_name",
"%",
"perm_type",
")",
"perm",
"=",
"perm",
"and",
"getattr",
"(",
"model",
",",
"\"can_%s\"",
"%",
"perm_type",
")",
"(",
"request",
")",
"page",
".",
"perms",
"[",
"perm_type",
"]",
"=",
"perm",
"return",
"\"\""
] | Assigns a permissions dict to the given page instance, combining
Django's permission for the page's model and a permission check
against the instance itself calling the page's ``can_add``,
``can_change`` and ``can_delete`` custom methods.
Used within the change list for pages, to implement permission
checks for the navigation tree. | [
"Assigns",
"a",
"permissions",
"dict",
"to",
"the",
"given",
"page",
"instance",
"combining",
"Django",
"s",
"permission",
"for",
"the",
"page",
"s",
"model",
"and",
"a",
"permission",
"check",
"against",
"the",
"instance",
"itself",
"calling",
"the",
"page",
"s",
"can_add",
"can_change",
"and",
"can_delete",
"custom",
"methods",
"."
] | python | train |
pyslackers/slack-sansio | slack/sansio.py | https://github.com/pyslackers/slack-sansio/blob/068ddd6480c6d2f9bf14fa4db498c9fe1017f4ab/slack/sansio.py#L203-L219 | def decode_response(status: int, headers: MutableMapping, body: bytes) -> dict:
"""
Decode incoming response
Args:
status: Response status
headers: Response headers
body: Response body
Returns:
Response data
"""
data = decode_body(headers, body)
raise_for_status(status, headers, data)
raise_for_api_error(headers, data)
return data | [
"def",
"decode_response",
"(",
"status",
":",
"int",
",",
"headers",
":",
"MutableMapping",
",",
"body",
":",
"bytes",
")",
"->",
"dict",
":",
"data",
"=",
"decode_body",
"(",
"headers",
",",
"body",
")",
"raise_for_status",
"(",
"status",
",",
"headers",
",",
"data",
")",
"raise_for_api_error",
"(",
"headers",
",",
"data",
")",
"return",
"data"
] | Decode incoming response
Args:
status: Response status
headers: Response headers
body: Response body
Returns:
Response data | [
"Decode",
"incoming",
"response"
] | python | train |
kimvais/ike | ike/util/external.py | https://github.com/kimvais/ike/blob/4a5622c878a43a3d3cc19c54aa7cc7be29318eae/ike/util/external.py#L13-L25 | def run_setkey(input):
"""
Runs a script through the 'setkey' command that is a user space insterface for PFKEY.
:param input: setkey configuration file contents.
"""
SETKEY_BINARY = '/usr/sbin/setkey'
fd, filename = tempfile.mkstemp('w')
f = os.fdopen(fd, 'w')
f.write(input)
f.close()
output = subprocess.check_output(['sudo', SETKEY_BINARY, '-f', filename])
os.remove(filename)
return output.decode('utf-8') | [
"def",
"run_setkey",
"(",
"input",
")",
":",
"SETKEY_BINARY",
"=",
"'/usr/sbin/setkey'",
"fd",
",",
"filename",
"=",
"tempfile",
".",
"mkstemp",
"(",
"'w'",
")",
"f",
"=",
"os",
".",
"fdopen",
"(",
"fd",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"input",
")",
"f",
".",
"close",
"(",
")",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'sudo'",
",",
"SETKEY_BINARY",
",",
"'-f'",
",",
"filename",
"]",
")",
"os",
".",
"remove",
"(",
"filename",
")",
"return",
"output",
".",
"decode",
"(",
"'utf-8'",
")"
] | Runs a script through the 'setkey' command that is a user space insterface for PFKEY.
:param input: setkey configuration file contents. | [
"Runs",
"a",
"script",
"through",
"the",
"setkey",
"command",
"that",
"is",
"a",
"user",
"space",
"insterface",
"for",
"PFKEY",
".",
":",
"param",
"input",
":",
"setkey",
"configuration",
"file",
"contents",
"."
] | python | train |
proycon/pynlpl | pynlpl/formats/folia.py | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4521-L4539 | def append(self, child, *args, **kwargs):
"""See :meth:`AbstractElement.append`"""
#if no set is associated with the layer yet, we learn it from span annotation elements that are added
if self.set is False or self.set is None:
if inspect.isclass(child):
if issubclass(child,AbstractSpanAnnotation):
if 'set' in kwargs:
self.set = kwargs['set']
elif isinstance(child, AbstractSpanAnnotation):
if child.set:
self.set = child.set
elif isinstance(child, Correction):
#descend into corrections to find the proper set for this layer (derived from span annotation elements)
for e in itertools.chain( child.new(), child.original(), child.suggestions() ):
if isinstance(e, AbstractSpanAnnotation) and e.set:
self.set = e.set
break
return super(AbstractAnnotationLayer, self).append(child, *args, **kwargs) | [
"def",
"append",
"(",
"self",
",",
"child",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"#if no set is associated with the layer yet, we learn it from span annotation elements that are added",
"if",
"self",
".",
"set",
"is",
"False",
"or",
"self",
".",
"set",
"is",
"None",
":",
"if",
"inspect",
".",
"isclass",
"(",
"child",
")",
":",
"if",
"issubclass",
"(",
"child",
",",
"AbstractSpanAnnotation",
")",
":",
"if",
"'set'",
"in",
"kwargs",
":",
"self",
".",
"set",
"=",
"kwargs",
"[",
"'set'",
"]",
"elif",
"isinstance",
"(",
"child",
",",
"AbstractSpanAnnotation",
")",
":",
"if",
"child",
".",
"set",
":",
"self",
".",
"set",
"=",
"child",
".",
"set",
"elif",
"isinstance",
"(",
"child",
",",
"Correction",
")",
":",
"#descend into corrections to find the proper set for this layer (derived from span annotation elements)",
"for",
"e",
"in",
"itertools",
".",
"chain",
"(",
"child",
".",
"new",
"(",
")",
",",
"child",
".",
"original",
"(",
")",
",",
"child",
".",
"suggestions",
"(",
")",
")",
":",
"if",
"isinstance",
"(",
"e",
",",
"AbstractSpanAnnotation",
")",
"and",
"e",
".",
"set",
":",
"self",
".",
"set",
"=",
"e",
".",
"set",
"break",
"return",
"super",
"(",
"AbstractAnnotationLayer",
",",
"self",
")",
".",
"append",
"(",
"child",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | See :meth:`AbstractElement.append` | [
"See",
":",
"meth",
":",
"AbstractElement",
".",
"append"
] | python | train |
ArduPilot/MAVProxy | MAVProxy/modules/lib/MacOS/backend_wx.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/MacOS/backend_wx.py#L816-L825 | def draw(self, drawDC=None):
"""
Render the figure using RendererWx instance renderer, or using a
previously defined renderer if none is specified.
"""
DEBUG_MSG("draw()", 1, self)
self.renderer = RendererWx(self.bitmap, self.figure.dpi)
self.figure.draw(self.renderer)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC) | [
"def",
"draw",
"(",
"self",
",",
"drawDC",
"=",
"None",
")",
":",
"DEBUG_MSG",
"(",
"\"draw()\"",
",",
"1",
",",
"self",
")",
"self",
".",
"renderer",
"=",
"RendererWx",
"(",
"self",
".",
"bitmap",
",",
"self",
".",
"figure",
".",
"dpi",
")",
"self",
".",
"figure",
".",
"draw",
"(",
"self",
".",
"renderer",
")",
"self",
".",
"_isDrawn",
"=",
"True",
"self",
".",
"gui_repaint",
"(",
"drawDC",
"=",
"drawDC",
")"
] | Render the figure using RendererWx instance renderer, or using a
previously defined renderer if none is specified. | [
"Render",
"the",
"figure",
"using",
"RendererWx",
"instance",
"renderer",
"or",
"using",
"a",
"previously",
"defined",
"renderer",
"if",
"none",
"is",
"specified",
"."
] | python | train |
tensorlayer/tensorlayer | tensorlayer/prepro.py | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L2230-L2253 | def find_contours(x, level=0.8, fully_connected='low', positive_orientation='low'):
"""Find iso-valued contours in a 2D array for a given level value, returns list of (n, 2)-ndarrays
see `skimage.measure.find_contours <http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.find_contours>`__.
Parameters
------------
x : 2D ndarray of double.
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str
Either `low` or `high`. Indicates whether array elements below the given level value are to be considered fully-connected (and hence elements above the value will only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : str
Either `low` or `high`. Indicates whether the output contours will produce positively-oriented polygons around islands of low- or high-valued elements. If `low` then contours will wind counter-clockwise around elements below the iso-value. Alternately, this means that low-valued elements are always on the left of the contour.
Returns
--------
list of (n,2)-ndarrays
Each contour is an ndarray of shape (n, 2), consisting of n (row, column) coordinates along the contour.
"""
return skimage.measure.find_contours(
x, level, fully_connected=fully_connected, positive_orientation=positive_orientation
) | [
"def",
"find_contours",
"(",
"x",
",",
"level",
"=",
"0.8",
",",
"fully_connected",
"=",
"'low'",
",",
"positive_orientation",
"=",
"'low'",
")",
":",
"return",
"skimage",
".",
"measure",
".",
"find_contours",
"(",
"x",
",",
"level",
",",
"fully_connected",
"=",
"fully_connected",
",",
"positive_orientation",
"=",
"positive_orientation",
")"
] | Find iso-valued contours in a 2D array for a given level value, returns list of (n, 2)-ndarrays
see `skimage.measure.find_contours <http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.find_contours>`__.
Parameters
------------
x : 2D ndarray of double.
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str
Either `low` or `high`. Indicates whether array elements below the given level value are to be considered fully-connected (and hence elements above the value will only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : str
Either `low` or `high`. Indicates whether the output contours will produce positively-oriented polygons around islands of low- or high-valued elements. If `low` then contours will wind counter-clockwise around elements below the iso-value. Alternately, this means that low-valued elements are always on the left of the contour.
Returns
--------
list of (n,2)-ndarrays
Each contour is an ndarray of shape (n, 2), consisting of n (row, column) coordinates along the contour. | [
"Find",
"iso",
"-",
"valued",
"contours",
"in",
"a",
"2D",
"array",
"for",
"a",
"given",
"level",
"value",
"returns",
"list",
"of",
"(",
"n",
"2",
")",
"-",
"ndarrays",
"see",
"skimage",
".",
"measure",
".",
"find_contours",
"<http",
":",
"//",
"scikit",
"-",
"image",
".",
"org",
"/",
"docs",
"/",
"dev",
"/",
"api",
"/",
"skimage",
".",
"measure",
".",
"html#skimage",
".",
"measure",
".",
"find_contours",
">",
"__",
"."
] | python | valid |
syndbg/demonoid-api | demonoid/parser.py | https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/parser.py#L124-L151 | def parse_second_row(row, url):
"""
Static method that parses a given table row element by using helper methods `Parser.parse_category_subcategory_and_or_quality`,
`Parser.parse_torrent_link` and scrapping torrent's category, subcategory, quality, language, user, user url, torrent link, size,
comments, times completed, seeders and leechers. Used specifically with a torrent's second table row.
:param lxml.HtmlElement row: row to parse
:param urls.Url url_instance: Url used to combine base url's with scrapped links from tr
:return: scrapped category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed,
seeders and leechers
:rtype: list
"""
tags = row.findall('./td')
category, subcategory, quality, language = Parser.parse_torrent_properties(tags[0])
user_info = tags[1].find('./a')
user = user_info.text_content()
user_url = url.combine(user_info.get('href'))
# Two urls - one is spam, second is torrent url.
# Don't combine it with BASE_URL, since it's an absolute url.
torrent_link = Parser.parse_torrent_link(tags[2])
size = tags[3].text # as 10.5 GB
comments = tags[4].text
times_completed = tags[5].text
seeders = tags[6].text
leechers = tags[7].text
return [category, subcategory, quality, language, user, user_url, torrent_link,
size, comments, times_completed, seeders, leechers] | [
"def",
"parse_second_row",
"(",
"row",
",",
"url",
")",
":",
"tags",
"=",
"row",
".",
"findall",
"(",
"'./td'",
")",
"category",
",",
"subcategory",
",",
"quality",
",",
"language",
"=",
"Parser",
".",
"parse_torrent_properties",
"(",
"tags",
"[",
"0",
"]",
")",
"user_info",
"=",
"tags",
"[",
"1",
"]",
".",
"find",
"(",
"'./a'",
")",
"user",
"=",
"user_info",
".",
"text_content",
"(",
")",
"user_url",
"=",
"url",
".",
"combine",
"(",
"user_info",
".",
"get",
"(",
"'href'",
")",
")",
"# Two urls - one is spam, second is torrent url.",
"# Don't combine it with BASE_URL, since it's an absolute url.",
"torrent_link",
"=",
"Parser",
".",
"parse_torrent_link",
"(",
"tags",
"[",
"2",
"]",
")",
"size",
"=",
"tags",
"[",
"3",
"]",
".",
"text",
"# as 10.5 GB",
"comments",
"=",
"tags",
"[",
"4",
"]",
".",
"text",
"times_completed",
"=",
"tags",
"[",
"5",
"]",
".",
"text",
"seeders",
"=",
"tags",
"[",
"6",
"]",
".",
"text",
"leechers",
"=",
"tags",
"[",
"7",
"]",
".",
"text",
"return",
"[",
"category",
",",
"subcategory",
",",
"quality",
",",
"language",
",",
"user",
",",
"user_url",
",",
"torrent_link",
",",
"size",
",",
"comments",
",",
"times_completed",
",",
"seeders",
",",
"leechers",
"]"
] | Static method that parses a given table row element by using helper methods `Parser.parse_category_subcategory_and_or_quality`,
`Parser.parse_torrent_link` and scrapping torrent's category, subcategory, quality, language, user, user url, torrent link, size,
comments, times completed, seeders and leechers. Used specifically with a torrent's second table row.
:param lxml.HtmlElement row: row to parse
:param urls.Url url_instance: Url used to combine base url's with scrapped links from tr
:return: scrapped category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed,
seeders and leechers
:rtype: list | [
"Static",
"method",
"that",
"parses",
"a",
"given",
"table",
"row",
"element",
"by",
"using",
"helper",
"methods",
"Parser",
".",
"parse_category_subcategory_and_or_quality",
"Parser",
".",
"parse_torrent_link",
"and",
"scrapping",
"torrent",
"s",
"category",
"subcategory",
"quality",
"language",
"user",
"user",
"url",
"torrent",
"link",
"size",
"comments",
"times",
"completed",
"seeders",
"and",
"leechers",
".",
"Used",
"specifically",
"with",
"a",
"torrent",
"s",
"second",
"table",
"row",
"."
] | python | train |
PythonCharmers/python-future | docs/3rd-party-py3k-compat-code/pandas_py3k.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/docs/3rd-party-py3k-compat-code/pandas_py3k.py#L505-L515 | def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(iteritems(self), key=itemgetter(1), reverse=True)
return nlargest(n, iteritems(self), key=itemgetter(1)) | [
"def",
"most_common",
"(",
"self",
",",
"n",
"=",
"None",
")",
":",
"if",
"n",
"is",
"None",
":",
"return",
"sorted",
"(",
"iteritems",
"(",
"self",
")",
",",
"key",
"=",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
"return",
"nlargest",
"(",
"n",
",",
"iteritems",
"(",
"self",
")",
",",
"key",
"=",
"itemgetter",
"(",
"1",
")",
")"
] | List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)] | [
"List",
"the",
"n",
"most",
"common",
"elements",
"and",
"their",
"counts",
"from",
"the",
"most",
"common",
"to",
"the",
"least",
".",
"If",
"n",
"is",
"None",
"then",
"list",
"all",
"element",
"counts",
"."
] | python | train |
jepegit/cellpy | cellpy/parameters/prmreader.py | https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/parameters/prmreader.py#L56-L72 | def _pack_prms():
"""if you introduce new 'save-able' parameter dictionaries, then you have
to include them here"""
config_dict = {
"Paths": prms.Paths.to_dict(),
"FileNames": prms.FileNames.to_dict(),
"Db": prms.Db.to_dict(),
"DbCols": prms.DbCols.to_dict(),
"DataSet": prms.DataSet.to_dict(),
"Reader": prms.Reader.to_dict(),
"Instruments": prms.Instruments.to_dict(),
# "excel_db_cols": prms.excel_db_cols.to_dict(),
# "excel_db_filename_cols": prms.excel_db_filename_cols.to_dict(),
"Batch": prms.Batch.to_dict(),
}
return config_dict | [
"def",
"_pack_prms",
"(",
")",
":",
"config_dict",
"=",
"{",
"\"Paths\"",
":",
"prms",
".",
"Paths",
".",
"to_dict",
"(",
")",
",",
"\"FileNames\"",
":",
"prms",
".",
"FileNames",
".",
"to_dict",
"(",
")",
",",
"\"Db\"",
":",
"prms",
".",
"Db",
".",
"to_dict",
"(",
")",
",",
"\"DbCols\"",
":",
"prms",
".",
"DbCols",
".",
"to_dict",
"(",
")",
",",
"\"DataSet\"",
":",
"prms",
".",
"DataSet",
".",
"to_dict",
"(",
")",
",",
"\"Reader\"",
":",
"prms",
".",
"Reader",
".",
"to_dict",
"(",
")",
",",
"\"Instruments\"",
":",
"prms",
".",
"Instruments",
".",
"to_dict",
"(",
")",
",",
"# \"excel_db_cols\": prms.excel_db_cols.to_dict(),",
"# \"excel_db_filename_cols\": prms.excel_db_filename_cols.to_dict(),",
"\"Batch\"",
":",
"prms",
".",
"Batch",
".",
"to_dict",
"(",
")",
",",
"}",
"return",
"config_dict"
] | if you introduce new 'save-able' parameter dictionaries, then you have
to include them here | [
"if",
"you",
"introduce",
"new",
"save",
"-",
"able",
"parameter",
"dictionaries",
"then",
"you",
"have",
"to",
"include",
"them",
"here"
] | python | train |
SmartTeleMax/iktomi | iktomi/web/url.py | https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/web/url.py#L181-L190 | def qs_add(self, *args, **kwargs):
'''Add value to QuerySet MultiDict'''
query = self.query.copy()
if args:
mdict = MultiDict(args[0])
for k, v in mdict.items():
query.add(k, v)
for k, v in kwargs.items():
query.add(k, v)
return self._copy(query=query) | [
"def",
"qs_add",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"query",
"=",
"self",
".",
"query",
".",
"copy",
"(",
")",
"if",
"args",
":",
"mdict",
"=",
"MultiDict",
"(",
"args",
"[",
"0",
"]",
")",
"for",
"k",
",",
"v",
"in",
"mdict",
".",
"items",
"(",
")",
":",
"query",
".",
"add",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"query",
".",
"add",
"(",
"k",
",",
"v",
")",
"return",
"self",
".",
"_copy",
"(",
"query",
"=",
"query",
")"
] | Add value to QuerySet MultiDict | [
"Add",
"value",
"to",
"QuerySet",
"MultiDict"
] | python | train |
KelSolaar/Umbra | umbra/managers/actions_manager.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/managers/actions_manager.py#L134-L145 | def root_namespace(self, value):
"""
Setter for **self.__root_namespace** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"root_namespace", value)
self.__root_namespace = value | [
"def",
"root_namespace",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"value",
")",
"is",
"unicode",
",",
"\"'{0}' attribute: '{1}' type is not 'unicode'!\"",
".",
"format",
"(",
"\"root_namespace\"",
",",
"value",
")",
"self",
".",
"__root_namespace",
"=",
"value"
] | Setter for **self.__root_namespace** attribute.
:param value: Attribute value.
:type value: unicode | [
"Setter",
"for",
"**",
"self",
".",
"__root_namespace",
"**",
"attribute",
"."
] | python | train |
python-security/pyt | pyt/cfg/stmt_visitor_helper.py | https://github.com/python-security/pyt/blob/efc0cfb716e40e0c8df4098f1cc8cf43723cd31f/pyt/cfg/stmt_visitor_helper.py#L61-L73 | def connect_nodes(nodes):
"""Connect the nodes in a list linearly."""
for n, next_node in zip(nodes, nodes[1:]):
if isinstance(n, ControlFlowNode):
_connect_control_flow_node(n, next_node)
elif isinstance(next_node, ControlFlowNode):
n.connect(next_node.test)
elif isinstance(next_node, RestoreNode):
continue
elif CALL_IDENTIFIER in next_node.label:
continue
else:
n.connect(next_node) | [
"def",
"connect_nodes",
"(",
"nodes",
")",
":",
"for",
"n",
",",
"next_node",
"in",
"zip",
"(",
"nodes",
",",
"nodes",
"[",
"1",
":",
"]",
")",
":",
"if",
"isinstance",
"(",
"n",
",",
"ControlFlowNode",
")",
":",
"_connect_control_flow_node",
"(",
"n",
",",
"next_node",
")",
"elif",
"isinstance",
"(",
"next_node",
",",
"ControlFlowNode",
")",
":",
"n",
".",
"connect",
"(",
"next_node",
".",
"test",
")",
"elif",
"isinstance",
"(",
"next_node",
",",
"RestoreNode",
")",
":",
"continue",
"elif",
"CALL_IDENTIFIER",
"in",
"next_node",
".",
"label",
":",
"continue",
"else",
":",
"n",
".",
"connect",
"(",
"next_node",
")"
] | Connect the nodes in a list linearly. | [
"Connect",
"the",
"nodes",
"in",
"a",
"list",
"linearly",
"."
] | python | train |
calston/tensor | tensor/utils.py | https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/utils.py#L329-L342 | def expire(self, age):
"""Expire any items in the cache older than `age` seconds"""
now = time.time()
cache = self._acquire_cache()
expired = [k for k, v in cache.items() if (now - v[0]) > age]
for k in expired:
if k in cache:
del cache[k]
if k in self.store:
del self.store[k]
self._write_cache(cache) | [
"def",
"expire",
"(",
"self",
",",
"age",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"cache",
"=",
"self",
".",
"_acquire_cache",
"(",
")",
"expired",
"=",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"cache",
".",
"items",
"(",
")",
"if",
"(",
"now",
"-",
"v",
"[",
"0",
"]",
")",
">",
"age",
"]",
"for",
"k",
"in",
"expired",
":",
"if",
"k",
"in",
"cache",
":",
"del",
"cache",
"[",
"k",
"]",
"if",
"k",
"in",
"self",
".",
"store",
":",
"del",
"self",
".",
"store",
"[",
"k",
"]",
"self",
".",
"_write_cache",
"(",
"cache",
")"
] | Expire any items in the cache older than `age` seconds | [
"Expire",
"any",
"items",
"in",
"the",
"cache",
"older",
"than",
"age",
"seconds"
] | python | test |
alejandroautalan/pygubu | pygubudesigner/main.py | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/main.py#L514-L520 | def load_file(self, filename):
"""Load xml into treeview"""
self.tree_editor.load_file(filename)
self.project_name.configure(text=filename)
self.currentfile = filename
self.is_changed = False | [
"def",
"load_file",
"(",
"self",
",",
"filename",
")",
":",
"self",
".",
"tree_editor",
".",
"load_file",
"(",
"filename",
")",
"self",
".",
"project_name",
".",
"configure",
"(",
"text",
"=",
"filename",
")",
"self",
".",
"currentfile",
"=",
"filename",
"self",
".",
"is_changed",
"=",
"False"
] | Load xml into treeview | [
"Load",
"xml",
"into",
"treeview"
] | python | train |
Autodesk/cryptorito | cryptorito/__init__.py | https://github.com/Autodesk/cryptorito/blob/277fc7cc42c31c5bc37e26d8bf5a2ac746a6ea85/cryptorito/__init__.py#L282-L289 | def fingerprint_from_file(filename):
"""Extract a fingerprint from a GPG public key file"""
cmd = flatten([gnupg_bin(), gnupg_home(), filename])
outp = stderr_output(cmd).split('\n')
if not outp[0].startswith('pub'):
raise CryptoritoError('probably an invalid gpg key')
return outp[1].strip() | [
"def",
"fingerprint_from_file",
"(",
"filename",
")",
":",
"cmd",
"=",
"flatten",
"(",
"[",
"gnupg_bin",
"(",
")",
",",
"gnupg_home",
"(",
")",
",",
"filename",
"]",
")",
"outp",
"=",
"stderr_output",
"(",
"cmd",
")",
".",
"split",
"(",
"'\\n'",
")",
"if",
"not",
"outp",
"[",
"0",
"]",
".",
"startswith",
"(",
"'pub'",
")",
":",
"raise",
"CryptoritoError",
"(",
"'probably an invalid gpg key'",
")",
"return",
"outp",
"[",
"1",
"]",
".",
"strip",
"(",
")"
] | Extract a fingerprint from a GPG public key file | [
"Extract",
"a",
"fingerprint",
"from",
"a",
"GPG",
"public",
"key",
"file"
] | python | train |
dls-controls/annotypes | annotypes/_fake_typing.py | https://github.com/dls-controls/annotypes/blob/31ab68a0367bb70ebd9898e8b9fa9405423465bd/annotypes/_fake_typing.py#L143-L154 | def _next_in_mro(cls):
"""Helper for Generic.__new__.
Returns the class after the last occurrence of Generic or
Generic[...] in cls.__mro__.
"""
next_in_mro = object
# Look for the last occurrence of Generic or Generic[...].
for i, c in enumerate(cls.__mro__[:-1]):
if isinstance(c, GenericMeta) and _gorg(c) is Generic:
next_in_mro = cls.__mro__[i + 1]
return next_in_mro | [
"def",
"_next_in_mro",
"(",
"cls",
")",
":",
"next_in_mro",
"=",
"object",
"# Look for the last occurrence of Generic or Generic[...].",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"cls",
".",
"__mro__",
"[",
":",
"-",
"1",
"]",
")",
":",
"if",
"isinstance",
"(",
"c",
",",
"GenericMeta",
")",
"and",
"_gorg",
"(",
"c",
")",
"is",
"Generic",
":",
"next_in_mro",
"=",
"cls",
".",
"__mro__",
"[",
"i",
"+",
"1",
"]",
"return",
"next_in_mro"
] | Helper for Generic.__new__.
Returns the class after the last occurrence of Generic or
Generic[...] in cls.__mro__. | [
"Helper",
"for",
"Generic",
".",
"__new__",
"."
] | python | train |
wonambi-python/wonambi | wonambi/ioeeg/fieldtrip.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/fieldtrip.py#L31-L86 | def return_hdr(self):
"""Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
Notes
-----
It only reads hdf5 matlab files and the VARiable needs to be called
'data'
h5py is necessary for this function
"""
# fieldtrip does not have this information
orig = dict()
subj_id = str()
start_time = datetime.fromordinal(1) # fake
try:
ft_data = loadmat(self.filename, struct_as_record=True,
squeeze_me=True)
if VAR not in ft_data:
raise KeyError('Save the FieldTrip variable as ''{}'''
''.format(VAR))
ft_data = ft_data[VAR]
s_freq = ft_data['fsample'].astype('float64').item()
n_samples = ft_data['trial'].item().shape[1]
chan_name = list(ft_data['label'].item())
except NotImplementedError:
with File(self.filename) as f:
if VAR not in f.keys():
raise KeyError('Save the FieldTrip variable as ''{}'''
''.format(VAR))
s_freq = int(f[VAR]['fsample'].value.squeeze())
chan_name = read_hdf5_chan_name(f, f[VAR]['label'])
n_samples = int(around(f[f[VAR]['trial'][0].item()].shape[0]))
return subj_id, start_time, s_freq, chan_name, n_samples, orig | [
"def",
"return_hdr",
"(",
"self",
")",
":",
"# fieldtrip does not have this information",
"orig",
"=",
"dict",
"(",
")",
"subj_id",
"=",
"str",
"(",
")",
"start_time",
"=",
"datetime",
".",
"fromordinal",
"(",
"1",
")",
"# fake",
"try",
":",
"ft_data",
"=",
"loadmat",
"(",
"self",
".",
"filename",
",",
"struct_as_record",
"=",
"True",
",",
"squeeze_me",
"=",
"True",
")",
"if",
"VAR",
"not",
"in",
"ft_data",
":",
"raise",
"KeyError",
"(",
"'Save the FieldTrip variable as '",
"'{}'",
"''",
"''",
".",
"format",
"(",
"VAR",
")",
")",
"ft_data",
"=",
"ft_data",
"[",
"VAR",
"]",
"s_freq",
"=",
"ft_data",
"[",
"'fsample'",
"]",
".",
"astype",
"(",
"'float64'",
")",
".",
"item",
"(",
")",
"n_samples",
"=",
"ft_data",
"[",
"'trial'",
"]",
".",
"item",
"(",
")",
".",
"shape",
"[",
"1",
"]",
"chan_name",
"=",
"list",
"(",
"ft_data",
"[",
"'label'",
"]",
".",
"item",
"(",
")",
")",
"except",
"NotImplementedError",
":",
"with",
"File",
"(",
"self",
".",
"filename",
")",
"as",
"f",
":",
"if",
"VAR",
"not",
"in",
"f",
".",
"keys",
"(",
")",
":",
"raise",
"KeyError",
"(",
"'Save the FieldTrip variable as '",
"'{}'",
"''",
"''",
".",
"format",
"(",
"VAR",
")",
")",
"s_freq",
"=",
"int",
"(",
"f",
"[",
"VAR",
"]",
"[",
"'fsample'",
"]",
".",
"value",
".",
"squeeze",
"(",
")",
")",
"chan_name",
"=",
"read_hdf5_chan_name",
"(",
"f",
",",
"f",
"[",
"VAR",
"]",
"[",
"'label'",
"]",
")",
"n_samples",
"=",
"int",
"(",
"around",
"(",
"f",
"[",
"f",
"[",
"VAR",
"]",
"[",
"'trial'",
"]",
"[",
"0",
"]",
".",
"item",
"(",
")",
"]",
".",
"shape",
"[",
"0",
"]",
")",
")",
"return",
"subj_id",
",",
"start_time",
",",
"s_freq",
",",
"chan_name",
",",
"n_samples",
",",
"orig"
] | Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
Notes
-----
It only reads hdf5 matlab files and the VARiable needs to be called
'data'
h5py is necessary for this function | [
"Return",
"the",
"header",
"for",
"further",
"use",
"."
] | python | train |
rocky/python-uncompyle6 | uncompyle6/verify.py | https://github.com/rocky/python-uncompyle6/blob/c5d7944e657f0ad05a0e2edd34e1acb27001abc0/uncompyle6/verify.py#L150-L362 | def cmp_code_objects(version, is_pypy, code_obj1, code_obj2, verify,
name=''):
"""
Compare two code-objects.
This is the main part of this module.
"""
# print code_obj1, type(code_obj2)
assert iscode(code_obj1), \
"cmp_code_object first object type is %s, not code" % type(code_obj1)
assert iscode(code_obj2), \
"cmp_code_object second object type is %s, not code" % type(code_obj2)
# print dir(code_obj1)
if isinstance(code_obj1, object):
# new style classes (Python 2.2)
# assume _both_ code objects to be new stle classes
assert dir(code_obj1) == dir(code_obj2)
else:
# old style classes
assert dir(code_obj1) == code_obj1.__members__
assert dir(code_obj2) == code_obj2.__members__
assert code_obj1.__members__ == code_obj2.__members__
if name == '__main__':
name = code_obj1.co_name
else:
name = '%s.%s' % (name, code_obj1.co_name)
if name == '.?': name = '__main__'
if isinstance(code_obj1, object) and code_equal(code_obj1, code_obj2):
# use the new style code-classes' __cmp__ method, which
# should be faster and more sophisticated
# if this compare fails, we use the old routine to
# find out, what exactly is nor equal
# if this compare succeds, simply return
# return
pass
if isinstance(code_obj1, object):
members = [x for x in dir(code_obj1) if x.startswith('co_')]
else:
members = dir(code_obj1)
members.sort() # ; members.reverse()
tokens1 = None
for member in members:
if member in __IGNORE_CODE_MEMBERS__ or verify != 'verify':
pass
elif member == 'co_code':
if verify != 'strong':
continue
scanner = get_scanner(version, is_pypy, show_asm=False)
global JUMP_OPS
JUMP_OPS = list(scan.JUMP_OPS) + ['JUMP_BACK']
# use changed Token class
# We (re)set this here to save exception handling,
# which would get confusing.
scanner.setTokenClass(Token)
try:
# ingest both code-objects
tokens1, customize = scanner.ingest(code_obj1)
del customize # save memory
tokens2, customize = scanner.ingest(code_obj2)
del customize # save memory
finally:
scanner.resetTokenClass() # restore Token class
targets1 = dis.findlabels(code_obj1.co_code)
tokens1 = [t for t in tokens1 if t.kind != 'COME_FROM']
tokens2 = [t for t in tokens2 if t.kind != 'COME_FROM']
i1 = 0; i2 = 0
offset_map = {}; check_jumps = {}
while i1 < len(tokens1):
if i2 >= len(tokens2):
if len(tokens1) == len(tokens2) + 2 \
and tokens1[-1].kind == 'RETURN_VALUE' \
and tokens1[-2].kind == 'LOAD_CONST' \
and tokens1[-2].pattr is None \
and tokens1[-3].kind == 'RETURN_VALUE':
break
else:
raise CmpErrorCodeLen(name, tokens1, tokens2)
offset_map[tokens1[i1].offset] = tokens2[i2].offset
for idx1, idx2, offset2 in check_jumps.get(tokens1[i1].offset, []):
if offset2 != tokens2[i2].offset:
raise CmpErrorCode(name, tokens1[idx1].offset, tokens1[idx1],
tokens2[idx2], tokens1, tokens2)
if tokens1[i1].kind != tokens2[i2].kind:
if tokens1[i1].kind == 'LOAD_CONST' == tokens2[i2].kind:
i = 1
while tokens1[i1+i].kind == 'LOAD_CONST':
i += 1
if tokens1[i1+i].kind.startswith(('BUILD_TUPLE', 'BUILD_LIST')) \
and i == int(tokens1[i1+i].kind.split('_')[-1]):
t = tuple([ elem.pattr for elem in tokens1[i1:i1+i] ])
if t != tokens2[i2].pattr:
raise CmpErrorCode(name, tokens1[i1].offset, tokens1[i1],
tokens2[i2], tokens1, tokens2)
i1 += i + 1
i2 += 1
continue
elif i == 2 and tokens1[i1+i].kind == 'ROT_TWO' and tokens2[i2+1].kind == 'UNPACK_SEQUENCE_2':
i1 += 3
i2 += 2
continue
elif i == 2 and tokens1[i1+i].kind in BIN_OP_FUNCS:
f = BIN_OP_FUNCS[tokens1[i1+i].kind]
if f(tokens1[i1].pattr, tokens1[i1+1].pattr) == tokens2[i2].pattr:
i1 += 3
i2 += 1
continue
elif tokens1[i1].kind == 'UNARY_NOT':
if tokens2[i2].kind == 'POP_JUMP_IF_TRUE':
if tokens1[i1+1].kind == 'POP_JUMP_IF_FALSE':
i1 += 2
i2 += 1
continue
elif tokens2[i2].kind == 'POP_JUMP_IF_FALSE':
if tokens1[i1+1].kind == 'POP_JUMP_IF_TRUE':
i1 += 2
i2 += 1
continue
elif tokens1[i1].kind in ('JUMP_FORWARD', 'JUMP_BACK') \
and tokens1[i1-1].kind == 'RETURN_VALUE' \
and tokens2[i2-1].kind in ('RETURN_VALUE', 'RETURN_END_IF') \
and int(tokens1[i1].offset) not in targets1:
i1 += 1
continue
elif tokens1[i1].kind == 'JUMP_BACK' and tokens2[i2].kind == 'CONTINUE':
# FIXME: should make sure that offset is inside loop, not outside of it
i1 += 2
i2 += 2
continue
elif tokens1[i1].kind == 'JUMP_FORWARD' and tokens2[i2].kind == 'JUMP_BACK' \
and tokens1[i1+1].kind == 'JUMP_BACK' and tokens2[i2+1].kind == 'JUMP_BACK' \
and int(tokens1[i1].pattr) == int(tokens1[i1].offset) + 3:
if int(tokens1[i1].pattr) == int(tokens1[i1+1].offset):
i1 += 2
i2 += 2
continue
elif tokens1[i1].kind == 'LOAD_NAME' and tokens2[i2].kind == 'LOAD_CONST' \
and tokens1[i1].pattr == 'None' and tokens2[i2].pattr is None:
pass
elif tokens1[i1].kind == 'LOAD_GLOBAL' and tokens2[i2].kind == 'LOAD_NAME' \
and tokens1[i1].pattr == tokens2[i2].pattr:
pass
elif tokens1[i1].kind == 'LOAD_ASSERT' and tokens2[i2].kind == 'LOAD_NAME' \
and tokens1[i1].pattr == tokens2[i2].pattr:
pass
elif (tokens1[i1].kind == 'RETURN_VALUE' and
tokens2[i2].kind == 'RETURN_END_IF'):
pass
elif (tokens1[i1].kind == 'BUILD_TUPLE_0' and
tokens2[i2].pattr == ()):
pass
else:
raise CmpErrorCode(name, tokens1[i1].offset, tokens1[i1],
tokens2[i2], tokens1, tokens2)
elif tokens1[i1].kind in JUMP_OPS and tokens1[i1].pattr != tokens2[i2].pattr:
if tokens1[i1].kind == 'JUMP_BACK':
dest1 = int(tokens1[i1].pattr)
dest2 = int(tokens2[i2].pattr)
if offset_map[dest1] != dest2:
raise CmpErrorCode(name, tokens1[i1].offset, tokens1[i1],
tokens2[i2], tokens1, tokens2)
else:
# import pdb; pdb.set_trace()
try:
dest1 = int(tokens1[i1].pattr)
if dest1 in check_jumps:
check_jumps[dest1].append((i1, i2, dest2))
else:
check_jumps[dest1] = [(i1, i2, dest2)]
except:
pass
i1 += 1
i2 += 1
del tokens1, tokens2 # save memory
elif member == 'co_consts':
# partial optimization can make the co_consts look different,
# so we'll just compare the code consts
codes1 = ( c for c in code_obj1.co_consts if hasattr(c, 'co_consts') )
codes2 = ( c for c in code_obj2.co_consts if hasattr(c, 'co_consts') )
for c1, c2 in zip(codes1, codes2):
cmp_code_objects(version, is_pypy, c1, c2, verify,
name=name)
elif member == 'co_flags':
flags1 = code_obj1.co_flags
flags2 = code_obj2.co_flags
if is_pypy:
# For PYPY for now we don't care about PYPY_SOURCE_IS_UTF8:
flags2 &= ~0x0100 # PYPY_SOURCE_IS_UTF8
# We also don't care about COROUTINE or GENERATOR for now
flags1 &= ~0x000000a0
flags2 &= ~0x000000a0
if flags1 != flags2:
raise CmpErrorMember(name, 'co_flags',
pretty_flags(flags1),
pretty_flags(flags2))
else:
# all other members must be equal
if getattr(code_obj1, member) != getattr(code_obj2, member):
raise CmpErrorMember(name, member,
getattr(code_obj1, member),
getattr(code_obj2, member)) | [
"def",
"cmp_code_objects",
"(",
"version",
",",
"is_pypy",
",",
"code_obj1",
",",
"code_obj2",
",",
"verify",
",",
"name",
"=",
"''",
")",
":",
"# print code_obj1, type(code_obj2)",
"assert",
"iscode",
"(",
"code_obj1",
")",
",",
"\"cmp_code_object first object type is %s, not code\"",
"%",
"type",
"(",
"code_obj1",
")",
"assert",
"iscode",
"(",
"code_obj2",
")",
",",
"\"cmp_code_object second object type is %s, not code\"",
"%",
"type",
"(",
"code_obj2",
")",
"# print dir(code_obj1)",
"if",
"isinstance",
"(",
"code_obj1",
",",
"object",
")",
":",
"# new style classes (Python 2.2)",
"# assume _both_ code objects to be new stle classes",
"assert",
"dir",
"(",
"code_obj1",
")",
"==",
"dir",
"(",
"code_obj2",
")",
"else",
":",
"# old style classes",
"assert",
"dir",
"(",
"code_obj1",
")",
"==",
"code_obj1",
".",
"__members__",
"assert",
"dir",
"(",
"code_obj2",
")",
"==",
"code_obj2",
".",
"__members__",
"assert",
"code_obj1",
".",
"__members__",
"==",
"code_obj2",
".",
"__members__",
"if",
"name",
"==",
"'__main__'",
":",
"name",
"=",
"code_obj1",
".",
"co_name",
"else",
":",
"name",
"=",
"'%s.%s'",
"%",
"(",
"name",
",",
"code_obj1",
".",
"co_name",
")",
"if",
"name",
"==",
"'.?'",
":",
"name",
"=",
"'__main__'",
"if",
"isinstance",
"(",
"code_obj1",
",",
"object",
")",
"and",
"code_equal",
"(",
"code_obj1",
",",
"code_obj2",
")",
":",
"# use the new style code-classes' __cmp__ method, which",
"# should be faster and more sophisticated",
"# if this compare fails, we use the old routine to",
"# find out, what exactly is nor equal",
"# if this compare succeds, simply return",
"# return",
"pass",
"if",
"isinstance",
"(",
"code_obj1",
",",
"object",
")",
":",
"members",
"=",
"[",
"x",
"for",
"x",
"in",
"dir",
"(",
"code_obj1",
")",
"if",
"x",
".",
"startswith",
"(",
"'co_'",
")",
"]",
"else",
":",
"members",
"=",
"dir",
"(",
"code_obj1",
")",
"members",
".",
"sort",
"(",
")",
"# ; members.reverse()",
"tokens1",
"=",
"None",
"for",
"member",
"in",
"members",
":",
"if",
"member",
"in",
"__IGNORE_CODE_MEMBERS__",
"or",
"verify",
"!=",
"'verify'",
":",
"pass",
"elif",
"member",
"==",
"'co_code'",
":",
"if",
"verify",
"!=",
"'strong'",
":",
"continue",
"scanner",
"=",
"get_scanner",
"(",
"version",
",",
"is_pypy",
",",
"show_asm",
"=",
"False",
")",
"global",
"JUMP_OPS",
"JUMP_OPS",
"=",
"list",
"(",
"scan",
".",
"JUMP_OPS",
")",
"+",
"[",
"'JUMP_BACK'",
"]",
"# use changed Token class",
"# We (re)set this here to save exception handling,",
"# which would get confusing.",
"scanner",
".",
"setTokenClass",
"(",
"Token",
")",
"try",
":",
"# ingest both code-objects",
"tokens1",
",",
"customize",
"=",
"scanner",
".",
"ingest",
"(",
"code_obj1",
")",
"del",
"customize",
"# save memory",
"tokens2",
",",
"customize",
"=",
"scanner",
".",
"ingest",
"(",
"code_obj2",
")",
"del",
"customize",
"# save memory",
"finally",
":",
"scanner",
".",
"resetTokenClass",
"(",
")",
"# restore Token class",
"targets1",
"=",
"dis",
".",
"findlabels",
"(",
"code_obj1",
".",
"co_code",
")",
"tokens1",
"=",
"[",
"t",
"for",
"t",
"in",
"tokens1",
"if",
"t",
".",
"kind",
"!=",
"'COME_FROM'",
"]",
"tokens2",
"=",
"[",
"t",
"for",
"t",
"in",
"tokens2",
"if",
"t",
".",
"kind",
"!=",
"'COME_FROM'",
"]",
"i1",
"=",
"0",
"i2",
"=",
"0",
"offset_map",
"=",
"{",
"}",
"check_jumps",
"=",
"{",
"}",
"while",
"i1",
"<",
"len",
"(",
"tokens1",
")",
":",
"if",
"i2",
">=",
"len",
"(",
"tokens2",
")",
":",
"if",
"len",
"(",
"tokens1",
")",
"==",
"len",
"(",
"tokens2",
")",
"+",
"2",
"and",
"tokens1",
"[",
"-",
"1",
"]",
".",
"kind",
"==",
"'RETURN_VALUE'",
"and",
"tokens1",
"[",
"-",
"2",
"]",
".",
"kind",
"==",
"'LOAD_CONST'",
"and",
"tokens1",
"[",
"-",
"2",
"]",
".",
"pattr",
"is",
"None",
"and",
"tokens1",
"[",
"-",
"3",
"]",
".",
"kind",
"==",
"'RETURN_VALUE'",
":",
"break",
"else",
":",
"raise",
"CmpErrorCodeLen",
"(",
"name",
",",
"tokens1",
",",
"tokens2",
")",
"offset_map",
"[",
"tokens1",
"[",
"i1",
"]",
".",
"offset",
"]",
"=",
"tokens2",
"[",
"i2",
"]",
".",
"offset",
"for",
"idx1",
",",
"idx2",
",",
"offset2",
"in",
"check_jumps",
".",
"get",
"(",
"tokens1",
"[",
"i1",
"]",
".",
"offset",
",",
"[",
"]",
")",
":",
"if",
"offset2",
"!=",
"tokens2",
"[",
"i2",
"]",
".",
"offset",
":",
"raise",
"CmpErrorCode",
"(",
"name",
",",
"tokens1",
"[",
"idx1",
"]",
".",
"offset",
",",
"tokens1",
"[",
"idx1",
"]",
",",
"tokens2",
"[",
"idx2",
"]",
",",
"tokens1",
",",
"tokens2",
")",
"if",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"!=",
"tokens2",
"[",
"i2",
"]",
".",
"kind",
":",
"if",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"==",
"'LOAD_CONST'",
"==",
"tokens2",
"[",
"i2",
"]",
".",
"kind",
":",
"i",
"=",
"1",
"while",
"tokens1",
"[",
"i1",
"+",
"i",
"]",
".",
"kind",
"==",
"'LOAD_CONST'",
":",
"i",
"+=",
"1",
"if",
"tokens1",
"[",
"i1",
"+",
"i",
"]",
".",
"kind",
".",
"startswith",
"(",
"(",
"'BUILD_TUPLE'",
",",
"'BUILD_LIST'",
")",
")",
"and",
"i",
"==",
"int",
"(",
"tokens1",
"[",
"i1",
"+",
"i",
"]",
".",
"kind",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
")",
":",
"t",
"=",
"tuple",
"(",
"[",
"elem",
".",
"pattr",
"for",
"elem",
"in",
"tokens1",
"[",
"i1",
":",
"i1",
"+",
"i",
"]",
"]",
")",
"if",
"t",
"!=",
"tokens2",
"[",
"i2",
"]",
".",
"pattr",
":",
"raise",
"CmpErrorCode",
"(",
"name",
",",
"tokens1",
"[",
"i1",
"]",
".",
"offset",
",",
"tokens1",
"[",
"i1",
"]",
",",
"tokens2",
"[",
"i2",
"]",
",",
"tokens1",
",",
"tokens2",
")",
"i1",
"+=",
"i",
"+",
"1",
"i2",
"+=",
"1",
"continue",
"elif",
"i",
"==",
"2",
"and",
"tokens1",
"[",
"i1",
"+",
"i",
"]",
".",
"kind",
"==",
"'ROT_TWO'",
"and",
"tokens2",
"[",
"i2",
"+",
"1",
"]",
".",
"kind",
"==",
"'UNPACK_SEQUENCE_2'",
":",
"i1",
"+=",
"3",
"i2",
"+=",
"2",
"continue",
"elif",
"i",
"==",
"2",
"and",
"tokens1",
"[",
"i1",
"+",
"i",
"]",
".",
"kind",
"in",
"BIN_OP_FUNCS",
":",
"f",
"=",
"BIN_OP_FUNCS",
"[",
"tokens1",
"[",
"i1",
"+",
"i",
"]",
".",
"kind",
"]",
"if",
"f",
"(",
"tokens1",
"[",
"i1",
"]",
".",
"pattr",
",",
"tokens1",
"[",
"i1",
"+",
"1",
"]",
".",
"pattr",
")",
"==",
"tokens2",
"[",
"i2",
"]",
".",
"pattr",
":",
"i1",
"+=",
"3",
"i2",
"+=",
"1",
"continue",
"elif",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"==",
"'UNARY_NOT'",
":",
"if",
"tokens2",
"[",
"i2",
"]",
".",
"kind",
"==",
"'POP_JUMP_IF_TRUE'",
":",
"if",
"tokens1",
"[",
"i1",
"+",
"1",
"]",
".",
"kind",
"==",
"'POP_JUMP_IF_FALSE'",
":",
"i1",
"+=",
"2",
"i2",
"+=",
"1",
"continue",
"elif",
"tokens2",
"[",
"i2",
"]",
".",
"kind",
"==",
"'POP_JUMP_IF_FALSE'",
":",
"if",
"tokens1",
"[",
"i1",
"+",
"1",
"]",
".",
"kind",
"==",
"'POP_JUMP_IF_TRUE'",
":",
"i1",
"+=",
"2",
"i2",
"+=",
"1",
"continue",
"elif",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"in",
"(",
"'JUMP_FORWARD'",
",",
"'JUMP_BACK'",
")",
"and",
"tokens1",
"[",
"i1",
"-",
"1",
"]",
".",
"kind",
"==",
"'RETURN_VALUE'",
"and",
"tokens2",
"[",
"i2",
"-",
"1",
"]",
".",
"kind",
"in",
"(",
"'RETURN_VALUE'",
",",
"'RETURN_END_IF'",
")",
"and",
"int",
"(",
"tokens1",
"[",
"i1",
"]",
".",
"offset",
")",
"not",
"in",
"targets1",
":",
"i1",
"+=",
"1",
"continue",
"elif",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"==",
"'JUMP_BACK'",
"and",
"tokens2",
"[",
"i2",
"]",
".",
"kind",
"==",
"'CONTINUE'",
":",
"# FIXME: should make sure that offset is inside loop, not outside of it",
"i1",
"+=",
"2",
"i2",
"+=",
"2",
"continue",
"elif",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"==",
"'JUMP_FORWARD'",
"and",
"tokens2",
"[",
"i2",
"]",
".",
"kind",
"==",
"'JUMP_BACK'",
"and",
"tokens1",
"[",
"i1",
"+",
"1",
"]",
".",
"kind",
"==",
"'JUMP_BACK'",
"and",
"tokens2",
"[",
"i2",
"+",
"1",
"]",
".",
"kind",
"==",
"'JUMP_BACK'",
"and",
"int",
"(",
"tokens1",
"[",
"i1",
"]",
".",
"pattr",
")",
"==",
"int",
"(",
"tokens1",
"[",
"i1",
"]",
".",
"offset",
")",
"+",
"3",
":",
"if",
"int",
"(",
"tokens1",
"[",
"i1",
"]",
".",
"pattr",
")",
"==",
"int",
"(",
"tokens1",
"[",
"i1",
"+",
"1",
"]",
".",
"offset",
")",
":",
"i1",
"+=",
"2",
"i2",
"+=",
"2",
"continue",
"elif",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"==",
"'LOAD_NAME'",
"and",
"tokens2",
"[",
"i2",
"]",
".",
"kind",
"==",
"'LOAD_CONST'",
"and",
"tokens1",
"[",
"i1",
"]",
".",
"pattr",
"==",
"'None'",
"and",
"tokens2",
"[",
"i2",
"]",
".",
"pattr",
"is",
"None",
":",
"pass",
"elif",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"==",
"'LOAD_GLOBAL'",
"and",
"tokens2",
"[",
"i2",
"]",
".",
"kind",
"==",
"'LOAD_NAME'",
"and",
"tokens1",
"[",
"i1",
"]",
".",
"pattr",
"==",
"tokens2",
"[",
"i2",
"]",
".",
"pattr",
":",
"pass",
"elif",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"==",
"'LOAD_ASSERT'",
"and",
"tokens2",
"[",
"i2",
"]",
".",
"kind",
"==",
"'LOAD_NAME'",
"and",
"tokens1",
"[",
"i1",
"]",
".",
"pattr",
"==",
"tokens2",
"[",
"i2",
"]",
".",
"pattr",
":",
"pass",
"elif",
"(",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"==",
"'RETURN_VALUE'",
"and",
"tokens2",
"[",
"i2",
"]",
".",
"kind",
"==",
"'RETURN_END_IF'",
")",
":",
"pass",
"elif",
"(",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"==",
"'BUILD_TUPLE_0'",
"and",
"tokens2",
"[",
"i2",
"]",
".",
"pattr",
"==",
"(",
")",
")",
":",
"pass",
"else",
":",
"raise",
"CmpErrorCode",
"(",
"name",
",",
"tokens1",
"[",
"i1",
"]",
".",
"offset",
",",
"tokens1",
"[",
"i1",
"]",
",",
"tokens2",
"[",
"i2",
"]",
",",
"tokens1",
",",
"tokens2",
")",
"elif",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"in",
"JUMP_OPS",
"and",
"tokens1",
"[",
"i1",
"]",
".",
"pattr",
"!=",
"tokens2",
"[",
"i2",
"]",
".",
"pattr",
":",
"if",
"tokens1",
"[",
"i1",
"]",
".",
"kind",
"==",
"'JUMP_BACK'",
":",
"dest1",
"=",
"int",
"(",
"tokens1",
"[",
"i1",
"]",
".",
"pattr",
")",
"dest2",
"=",
"int",
"(",
"tokens2",
"[",
"i2",
"]",
".",
"pattr",
")",
"if",
"offset_map",
"[",
"dest1",
"]",
"!=",
"dest2",
":",
"raise",
"CmpErrorCode",
"(",
"name",
",",
"tokens1",
"[",
"i1",
"]",
".",
"offset",
",",
"tokens1",
"[",
"i1",
"]",
",",
"tokens2",
"[",
"i2",
"]",
",",
"tokens1",
",",
"tokens2",
")",
"else",
":",
"# import pdb; pdb.set_trace()",
"try",
":",
"dest1",
"=",
"int",
"(",
"tokens1",
"[",
"i1",
"]",
".",
"pattr",
")",
"if",
"dest1",
"in",
"check_jumps",
":",
"check_jumps",
"[",
"dest1",
"]",
".",
"append",
"(",
"(",
"i1",
",",
"i2",
",",
"dest2",
")",
")",
"else",
":",
"check_jumps",
"[",
"dest1",
"]",
"=",
"[",
"(",
"i1",
",",
"i2",
",",
"dest2",
")",
"]",
"except",
":",
"pass",
"i1",
"+=",
"1",
"i2",
"+=",
"1",
"del",
"tokens1",
",",
"tokens2",
"# save memory",
"elif",
"member",
"==",
"'co_consts'",
":",
"# partial optimization can make the co_consts look different,",
"# so we'll just compare the code consts",
"codes1",
"=",
"(",
"c",
"for",
"c",
"in",
"code_obj1",
".",
"co_consts",
"if",
"hasattr",
"(",
"c",
",",
"'co_consts'",
")",
")",
"codes2",
"=",
"(",
"c",
"for",
"c",
"in",
"code_obj2",
".",
"co_consts",
"if",
"hasattr",
"(",
"c",
",",
"'co_consts'",
")",
")",
"for",
"c1",
",",
"c2",
"in",
"zip",
"(",
"codes1",
",",
"codes2",
")",
":",
"cmp_code_objects",
"(",
"version",
",",
"is_pypy",
",",
"c1",
",",
"c2",
",",
"verify",
",",
"name",
"=",
"name",
")",
"elif",
"member",
"==",
"'co_flags'",
":",
"flags1",
"=",
"code_obj1",
".",
"co_flags",
"flags2",
"=",
"code_obj2",
".",
"co_flags",
"if",
"is_pypy",
":",
"# For PYPY for now we don't care about PYPY_SOURCE_IS_UTF8:",
"flags2",
"&=",
"~",
"0x0100",
"# PYPY_SOURCE_IS_UTF8",
"# We also don't care about COROUTINE or GENERATOR for now",
"flags1",
"&=",
"~",
"0x000000a0",
"flags2",
"&=",
"~",
"0x000000a0",
"if",
"flags1",
"!=",
"flags2",
":",
"raise",
"CmpErrorMember",
"(",
"name",
",",
"'co_flags'",
",",
"pretty_flags",
"(",
"flags1",
")",
",",
"pretty_flags",
"(",
"flags2",
")",
")",
"else",
":",
"# all other members must be equal",
"if",
"getattr",
"(",
"code_obj1",
",",
"member",
")",
"!=",
"getattr",
"(",
"code_obj2",
",",
"member",
")",
":",
"raise",
"CmpErrorMember",
"(",
"name",
",",
"member",
",",
"getattr",
"(",
"code_obj1",
",",
"member",
")",
",",
"getattr",
"(",
"code_obj2",
",",
"member",
")",
")"
] | Compare two code-objects.
This is the main part of this module. | [
"Compare",
"two",
"code",
"-",
"objects",
"."
] | python | train |
AguaClara/aguaclara | aguaclara/core/physchem.py | https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/core/physchem.py#L414-L420 | def num_orifices(FlowPlant, RatioVCOrifice, HeadLossOrifice, DiamOrifice):
"""Return the number of orifices."""
#Inputs do not need to be checked here because they are checked by
#functions this function calls.
return np.ceil(area_orifice(HeadLossOrifice, RatioVCOrifice,
FlowPlant).magnitude
/ area_circle(DiamOrifice).magnitude) | [
"def",
"num_orifices",
"(",
"FlowPlant",
",",
"RatioVCOrifice",
",",
"HeadLossOrifice",
",",
"DiamOrifice",
")",
":",
"#Inputs do not need to be checked here because they are checked by",
"#functions this function calls.",
"return",
"np",
".",
"ceil",
"(",
"area_orifice",
"(",
"HeadLossOrifice",
",",
"RatioVCOrifice",
",",
"FlowPlant",
")",
".",
"magnitude",
"/",
"area_circle",
"(",
"DiamOrifice",
")",
".",
"magnitude",
")"
] | Return the number of orifices. | [
"Return",
"the",
"number",
"of",
"orifices",
"."
] | python | train |
ronaldguillen/wave | wave/fields.py | https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/fields.py#L1511-L1522 | def to_internal_value(self, data):
"""
Dicts of native values <- Dicts of primitive datatypes.
"""
if html.is_html_input(data):
data = html.parse_html_dict(data)
if not isinstance(data, dict):
self.fail('not_a_dict', input_type=type(data).__name__)
return {
six.text_type(key): self.child.run_validation(value)
for key, value in data.items()
} | [
"def",
"to_internal_value",
"(",
"self",
",",
"data",
")",
":",
"if",
"html",
".",
"is_html_input",
"(",
"data",
")",
":",
"data",
"=",
"html",
".",
"parse_html_dict",
"(",
"data",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"self",
".",
"fail",
"(",
"'not_a_dict'",
",",
"input_type",
"=",
"type",
"(",
"data",
")",
".",
"__name__",
")",
"return",
"{",
"six",
".",
"text_type",
"(",
"key",
")",
":",
"self",
".",
"child",
".",
"run_validation",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
"}"
] | Dicts of native values <- Dicts of primitive datatypes. | [
"Dicts",
"of",
"native",
"values",
"<",
"-",
"Dicts",
"of",
"primitive",
"datatypes",
"."
] | python | train |
databio/pypiper | pypiper/utils.py | https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/utils.py#L149-L193 | def checkpoint_filepath(checkpoint, pm):
"""
Create filepath for indicated checkpoint.
:param str | pypiper.Stage checkpoint: Pipeline phase/stage or one's name
:param pypiper.PipelineManager | pypiper.Pipeline pm: manager of a pipeline
instance, relevant for output folder path.
:return str: standardized checkpoint name for file, plus extension
:raise ValueError: if the checkpoint is given as absolute path that does
not point within pipeline output folder
"""
# Handle case in which checkpoint is given not just as a string, but
# as a checkpoint-like filename. Don't worry about absolute path status
# of a potential filename input, or whether it's in the pipeline's
# output folder. That's handled upstream. While this isn't a protected
# function, there's no real reason to call this from outside the package.
if isinstance(checkpoint, str):
if os.path.isabs(checkpoint):
if is_in_file_tree(checkpoint, pm.outfolder):
return checkpoint
else:
raise ValueError(
"Absolute checkpoint path '{}' is not in pipeline output "
"folder '{}'".format(checkpoint, pm.outfolder))
_, ext = os.path.splitext(checkpoint)
if ext == CHECKPOINT_EXTENSION:
return pipeline_filepath(pm, filename=checkpoint)
# Allow Pipeline as pm type without importing Pipeline.
try:
pm = pm.manager
except AttributeError:
pass
# We want the checkpoint filename itself to become a suffix, with a
# delimiter intervening between the pipeline name and the checkpoint
# name + extension. This is to handle the case in which a single, e.g.,
# sample's output folder is the destination for output from multiple
# pipelines, and we thus want to be able to distinguish between
# checkpoint files from different pipelines for that sample that may
# well define one or more stages with the same name (e.g., trim_reads,
# align_reads, etc.)
chkpt_name = checkpoint_filename(checkpoint, pipeline_name=pm.name)
return pipeline_filepath(pm, filename=chkpt_name) | [
"def",
"checkpoint_filepath",
"(",
"checkpoint",
",",
"pm",
")",
":",
"# Handle case in which checkpoint is given not just as a string, but",
"# as a checkpoint-like filename. Don't worry about absolute path status",
"# of a potential filename input, or whether it's in the pipeline's",
"# output folder. That's handled upstream. While this isn't a protected",
"# function, there's no real reason to call this from outside the package.",
"if",
"isinstance",
"(",
"checkpoint",
",",
"str",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"checkpoint",
")",
":",
"if",
"is_in_file_tree",
"(",
"checkpoint",
",",
"pm",
".",
"outfolder",
")",
":",
"return",
"checkpoint",
"else",
":",
"raise",
"ValueError",
"(",
"\"Absolute checkpoint path '{}' is not in pipeline output \"",
"\"folder '{}'\"",
".",
"format",
"(",
"checkpoint",
",",
"pm",
".",
"outfolder",
")",
")",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"checkpoint",
")",
"if",
"ext",
"==",
"CHECKPOINT_EXTENSION",
":",
"return",
"pipeline_filepath",
"(",
"pm",
",",
"filename",
"=",
"checkpoint",
")",
"# Allow Pipeline as pm type without importing Pipeline.",
"try",
":",
"pm",
"=",
"pm",
".",
"manager",
"except",
"AttributeError",
":",
"pass",
"# We want the checkpoint filename itself to become a suffix, with a",
"# delimiter intervening between the pipeline name and the checkpoint",
"# name + extension. This is to handle the case in which a single, e.g.,",
"# sample's output folder is the destination for output from multiple",
"# pipelines, and we thus want to be able to distinguish between",
"# checkpoint files from different pipelines for that sample that may",
"# well define one or more stages with the same name (e.g., trim_reads,",
"# align_reads, etc.)",
"chkpt_name",
"=",
"checkpoint_filename",
"(",
"checkpoint",
",",
"pipeline_name",
"=",
"pm",
".",
"name",
")",
"return",
"pipeline_filepath",
"(",
"pm",
",",
"filename",
"=",
"chkpt_name",
")"
] | Create filepath for indicated checkpoint.
:param str | pypiper.Stage checkpoint: Pipeline phase/stage or one's name
:param pypiper.PipelineManager | pypiper.Pipeline pm: manager of a pipeline
instance, relevant for output folder path.
:return str: standardized checkpoint name for file, plus extension
:raise ValueError: if the checkpoint is given as absolute path that does
not point within pipeline output folder | [
"Create",
"filepath",
"for",
"indicated",
"checkpoint",
"."
] | python | train |
aws/aws-iot-device-sdk-python | AWSIoTPythonSDK/core/protocol/paho/client.py | https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/core/protocol/paho/client.py#L1399-L1411 | def loop_stop(self, force=False):
"""This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored.
"""
if self._thread is None:
return MQTT_ERR_INVAL
self._thread_terminate = True
self._thread.join()
self._thread = None | [
"def",
"loop_stop",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"if",
"self",
".",
"_thread",
"is",
"None",
":",
"return",
"MQTT_ERR_INVAL",
"self",
".",
"_thread_terminate",
"=",
"True",
"self",
".",
"_thread",
".",
"join",
"(",
")",
"self",
".",
"_thread",
"=",
"None"
] | This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored. | [
"This",
"is",
"part",
"of",
"the",
"threaded",
"client",
"interface",
".",
"Call",
"this",
"once",
"to",
"stop",
"the",
"network",
"thread",
"previously",
"created",
"with",
"loop_start",
"()",
".",
"This",
"call",
"will",
"block",
"until",
"the",
"network",
"thread",
"finishes",
"."
] | python | train |
wmayner/pyphi | pyphi/convert.py | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/convert.py#L176-L190 | def to_multidimensional(tpm):
"""Reshape a state-by-node TPM to the multidimensional form.
See documentation for the |Network| object for more information on TPM
formats.
"""
# Cast to np.array.
tpm = np.array(tpm)
# Get the number of nodes.
N = tpm.shape[-1]
# Reshape. We use Fortran ordering here so that the rows use the
# little-endian convention (least-significant bits correspond to low-index
# nodes). Note that this does not change the actual memory layout (C- or
# Fortran-contiguous), so there is no performance loss.
return tpm.reshape([2] * N + [N], order="F").astype(float) | [
"def",
"to_multidimensional",
"(",
"tpm",
")",
":",
"# Cast to np.array.",
"tpm",
"=",
"np",
".",
"array",
"(",
"tpm",
")",
"# Get the number of nodes.",
"N",
"=",
"tpm",
".",
"shape",
"[",
"-",
"1",
"]",
"# Reshape. We use Fortran ordering here so that the rows use the",
"# little-endian convention (least-significant bits correspond to low-index",
"# nodes). Note that this does not change the actual memory layout (C- or",
"# Fortran-contiguous), so there is no performance loss.",
"return",
"tpm",
".",
"reshape",
"(",
"[",
"2",
"]",
"*",
"N",
"+",
"[",
"N",
"]",
",",
"order",
"=",
"\"F\"",
")",
".",
"astype",
"(",
"float",
")"
] | Reshape a state-by-node TPM to the multidimensional form.
See documentation for the |Network| object for more information on TPM
formats. | [
"Reshape",
"a",
"state",
"-",
"by",
"-",
"node",
"TPM",
"to",
"the",
"multidimensional",
"form",
"."
] | python | train |
hobson/pug-invest | pug/invest/sandbox/sim.py | https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L678-L701 | def clipping_params(ts, capacity=100):
"""Start and end index that clips the price/value of a time series the most
Assumes that the integrated maximum includes the peak (instantaneous maximum).
Arguments:
ts (TimeSeries): Time series to attempt to clip to as low a max value as possible
capacity (float): Total "funds" or "energy" available for clipping (integrated area under time series)
Returns:
2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase
"""
ts_sorted = ts.order(ascending=False)
i, t0, t1, integral = 1, None, None, 0
while integral <= capacity and i+1 < len(ts):
i += 1
t0_within_capacity = t0
t1_within_capacity = t1
t0 = min(ts_sorted.index[:i])
t1 = max(ts_sorted.index[:i])
integral = integrated_change(ts[t0:t1])
print i, t0, ts[t0], t1, ts[t1], integral
if t0_within_capacity and t1_within_capacity:
return t0_within_capacity, t1_within_capacity | [
"def",
"clipping_params",
"(",
"ts",
",",
"capacity",
"=",
"100",
")",
":",
"ts_sorted",
"=",
"ts",
".",
"order",
"(",
"ascending",
"=",
"False",
")",
"i",
",",
"t0",
",",
"t1",
",",
"integral",
"=",
"1",
",",
"None",
",",
"None",
",",
"0",
"while",
"integral",
"<=",
"capacity",
"and",
"i",
"+",
"1",
"<",
"len",
"(",
"ts",
")",
":",
"i",
"+=",
"1",
"t0_within_capacity",
"=",
"t0",
"t1_within_capacity",
"=",
"t1",
"t0",
"=",
"min",
"(",
"ts_sorted",
".",
"index",
"[",
":",
"i",
"]",
")",
"t1",
"=",
"max",
"(",
"ts_sorted",
".",
"index",
"[",
":",
"i",
"]",
")",
"integral",
"=",
"integrated_change",
"(",
"ts",
"[",
"t0",
":",
"t1",
"]",
")",
"print",
"i",
",",
"t0",
",",
"ts",
"[",
"t0",
"]",
",",
"t1",
",",
"ts",
"[",
"t1",
"]",
",",
"integral",
"if",
"t0_within_capacity",
"and",
"t1_within_capacity",
":",
"return",
"t0_within_capacity",
",",
"t1_within_capacity"
] | Start and end index that clips the price/value of a time series the most
Assumes that the integrated maximum includes the peak (instantaneous maximum).
Arguments:
ts (TimeSeries): Time series to attempt to clip to as low a max value as possible
capacity (float): Total "funds" or "energy" available for clipping (integrated area under time series)
Returns:
2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase | [
"Start",
"and",
"end",
"index",
"that",
"clips",
"the",
"price",
"/",
"value",
"of",
"a",
"time",
"series",
"the",
"most"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.