repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
rameshg87/pyremotevbox | pyremotevbox/ZSI/wstools/WSDLTools.py | https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/WSDLTools.py#L1108-L1117 | def getAddressBinding(self):
"""A convenience method to obtain the extension element used
as the address binding for the port."""
for item in self.extensions:
if isinstance(item, SoapAddressBinding) or \
isinstance(item, HttpAddressBinding):
return item
raise WSDLError(
'No address binding found in port.'
) | [
"def",
"getAddressBinding",
"(",
"self",
")",
":",
"for",
"item",
"in",
"self",
".",
"extensions",
":",
"if",
"isinstance",
"(",
"item",
",",
"SoapAddressBinding",
")",
"or",
"isinstance",
"(",
"item",
",",
"HttpAddressBinding",
")",
":",
"return",
"item",
"raise",
"WSDLError",
"(",
"'No address binding found in port.'",
")"
] | A convenience method to obtain the extension element used
as the address binding for the port. | [
"A",
"convenience",
"method",
"to",
"obtain",
"the",
"extension",
"element",
"used",
"as",
"the",
"address",
"binding",
"for",
"the",
"port",
"."
] | python | train |
OrkoHunter/keep | keep/cli.py | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/cli.py#L22-L25 | def vlog(self, msg, *args):
"""Logs a message to stderr only if verbose is enabled."""
if self.verbose:
self.log(msg, *args) | [
"def",
"vlog",
"(",
"self",
",",
"msg",
",",
"*",
"args",
")",
":",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"log",
"(",
"msg",
",",
"*",
"args",
")"
] | Logs a message to stderr only if verbose is enabled. | [
"Logs",
"a",
"message",
"to",
"stderr",
"only",
"if",
"verbose",
"is",
"enabled",
"."
] | python | train |
Duke-GCB/DukeDSClient | ddsc/core/projectuploader.py | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/projectuploader.py#L371-L380 | def after_run(self, remote_file_data):
"""
Save uuid of file to our LocalFile
:param remote_file_data: dict: DukeDS file data
"""
if self.file_upload_post_processor:
self.file_upload_post_processor.run(self.settings.data_service, remote_file_data)
remote_file_id = remote_file_data['id']
self.settings.watcher.transferring_item(self.local_file)
self.local_file.set_remote_id_after_send(remote_file_id) | [
"def",
"after_run",
"(",
"self",
",",
"remote_file_data",
")",
":",
"if",
"self",
".",
"file_upload_post_processor",
":",
"self",
".",
"file_upload_post_processor",
".",
"run",
"(",
"self",
".",
"settings",
".",
"data_service",
",",
"remote_file_data",
")",
"remote_file_id",
"=",
"remote_file_data",
"[",
"'id'",
"]",
"self",
".",
"settings",
".",
"watcher",
".",
"transferring_item",
"(",
"self",
".",
"local_file",
")",
"self",
".",
"local_file",
".",
"set_remote_id_after_send",
"(",
"remote_file_id",
")"
] | Save uuid of file to our LocalFile
:param remote_file_data: dict: DukeDS file data | [
"Save",
"uuid",
"of",
"file",
"to",
"our",
"LocalFile",
":",
"param",
"remote_file_data",
":",
"dict",
":",
"DukeDS",
"file",
"data"
] | python | train |
django-treebeard/django-treebeard | treebeard/mp_tree.py | https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/mp_tree.py#L1039-L1042 | def get_root(self):
""":returns: the root node for the current node object."""
return get_result_class(self.__class__).objects.get(
path=self.path[0:self.steplen]) | [
"def",
"get_root",
"(",
"self",
")",
":",
"return",
"get_result_class",
"(",
"self",
".",
"__class__",
")",
".",
"objects",
".",
"get",
"(",
"path",
"=",
"self",
".",
"path",
"[",
"0",
":",
"self",
".",
"steplen",
"]",
")"
] | :returns: the root node for the current node object. | [
":",
"returns",
":",
"the",
"root",
"node",
"for",
"the",
"current",
"node",
"object",
"."
] | python | train |
nkmathew/yasi-sexp-indenter | yasi.py | https://github.com/nkmathew/yasi-sexp-indenter/blob/6ec2a4675e79606c555bcb67494a0ba994b05805/yasi.py#L579-L615 | def add_keywords(opts):
""" add_keywords(dialect : str) -> [str, str]
Takens a lisp dialect name and returns a list of keywords that increase
indentation by two spaces and those that can be one-armed like 'if'
"""
dialect = opts.dialect
keywords = collections.defaultdict(int)
two_spacers = []
two_armed = IF_LIKE
local_binders = []
if dialect == 'lisp': # Lisp
two_spacers = LISP_KEYWORDS
two_armed += ['multiple-value-bind', 'destructuring-bind', 'do', 'do*']
local_binders += ['flet', 'macrolet', 'labels']
elif dialect == 'scheme': # Scheme
two_spacers = SCHEME_KEYWORDS
two_armed += ['with-slots', 'do', 'do*']
local_binders += []
elif dialect == 'clojure': # Clojure
two_spacers = CLOJURE_KEYWORDS
two_armed += []
local_binders += ['letfn']
elif dialect == 'newlisp': # newLISP
two_spacers = NEWLISP_KEYWORDS
two_armed += []
local_binders += []
elif dialect == 'all':
two_spacers = LISP_KEYWORDS + SCHEME_KEYWORDS + CLOJURE_KEYWORDS + \
NEWLISP_KEYWORDS
keywords = assign_indent_numbers(two_spacers, KEYWORD1, keywords)
keywords = assign_indent_numbers(two_armed, KEYWORD2, keywords)
keywords = assign_indent_numbers(local_binders, KEYWORD4, keywords)
if opts.read_rc:
rc_keywords = parse_rc_json()
keywords.update(rc_keywords[dialect])
return keywords | [
"def",
"add_keywords",
"(",
"opts",
")",
":",
"dialect",
"=",
"opts",
".",
"dialect",
"keywords",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"two_spacers",
"=",
"[",
"]",
"two_armed",
"=",
"IF_LIKE",
"local_binders",
"=",
"[",
"]",
"if",
"dialect",
"==",
"'lisp'",
":",
"# Lisp",
"two_spacers",
"=",
"LISP_KEYWORDS",
"two_armed",
"+=",
"[",
"'multiple-value-bind'",
",",
"'destructuring-bind'",
",",
"'do'",
",",
"'do*'",
"]",
"local_binders",
"+=",
"[",
"'flet'",
",",
"'macrolet'",
",",
"'labels'",
"]",
"elif",
"dialect",
"==",
"'scheme'",
":",
"# Scheme",
"two_spacers",
"=",
"SCHEME_KEYWORDS",
"two_armed",
"+=",
"[",
"'with-slots'",
",",
"'do'",
",",
"'do*'",
"]",
"local_binders",
"+=",
"[",
"]",
"elif",
"dialect",
"==",
"'clojure'",
":",
"# Clojure",
"two_spacers",
"=",
"CLOJURE_KEYWORDS",
"two_armed",
"+=",
"[",
"]",
"local_binders",
"+=",
"[",
"'letfn'",
"]",
"elif",
"dialect",
"==",
"'newlisp'",
":",
"# newLISP",
"two_spacers",
"=",
"NEWLISP_KEYWORDS",
"two_armed",
"+=",
"[",
"]",
"local_binders",
"+=",
"[",
"]",
"elif",
"dialect",
"==",
"'all'",
":",
"two_spacers",
"=",
"LISP_KEYWORDS",
"+",
"SCHEME_KEYWORDS",
"+",
"CLOJURE_KEYWORDS",
"+",
"NEWLISP_KEYWORDS",
"keywords",
"=",
"assign_indent_numbers",
"(",
"two_spacers",
",",
"KEYWORD1",
",",
"keywords",
")",
"keywords",
"=",
"assign_indent_numbers",
"(",
"two_armed",
",",
"KEYWORD2",
",",
"keywords",
")",
"keywords",
"=",
"assign_indent_numbers",
"(",
"local_binders",
",",
"KEYWORD4",
",",
"keywords",
")",
"if",
"opts",
".",
"read_rc",
":",
"rc_keywords",
"=",
"parse_rc_json",
"(",
")",
"keywords",
".",
"update",
"(",
"rc_keywords",
"[",
"dialect",
"]",
")",
"return",
"keywords"
] | add_keywords(dialect : str) -> [str, str]
Takens a lisp dialect name and returns a list of keywords that increase
indentation by two spaces and those that can be one-armed like 'if' | [
"add_keywords",
"(",
"dialect",
":",
"str",
")",
"-",
">",
"[",
"str",
"str",
"]"
] | python | train |
saltstack/salt | salt/utils/schema.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schema.py#L1481-L1493 | def _add_missing_schema_attributes(self):
'''
Adds any missed schema attributes to the _attributes list
The attributes can be class attributes and they won't be
included in the _attributes list automatically
'''
for attr in [attr for attr in dir(self) if not attr.startswith('__')]:
attr_val = getattr(self, attr)
if isinstance(getattr(self, attr), SchemaItem) and \
attr not in self._attributes:
self._attributes.append(attr) | [
"def",
"_add_missing_schema_attributes",
"(",
"self",
")",
":",
"for",
"attr",
"in",
"[",
"attr",
"for",
"attr",
"in",
"dir",
"(",
"self",
")",
"if",
"not",
"attr",
".",
"startswith",
"(",
"'__'",
")",
"]",
":",
"attr_val",
"=",
"getattr",
"(",
"self",
",",
"attr",
")",
"if",
"isinstance",
"(",
"getattr",
"(",
"self",
",",
"attr",
")",
",",
"SchemaItem",
")",
"and",
"attr",
"not",
"in",
"self",
".",
"_attributes",
":",
"self",
".",
"_attributes",
".",
"append",
"(",
"attr",
")"
] | Adds any missed schema attributes to the _attributes list
The attributes can be class attributes and they won't be
included in the _attributes list automatically | [
"Adds",
"any",
"missed",
"schema",
"attributes",
"to",
"the",
"_attributes",
"list"
] | python | train |
aio-libs/aiohttp-debugtoolbar | aiohttp_debugtoolbar/tbtools/tbtools.py | https://github.com/aio-libs/aiohttp-debugtoolbar/blob/a1c3fb2b487bcaaf23eb71ee4c9c3cfc9cb94322/aiohttp_debugtoolbar/tbtools/tbtools.py#L148-L153 | def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.encode('utf-8', 'replace').rstrip() + '\n'
logfile.write(tb) | [
"def",
"log",
"(",
"self",
",",
"logfile",
"=",
"None",
")",
":",
"if",
"logfile",
"is",
"None",
":",
"logfile",
"=",
"sys",
".",
"stderr",
"tb",
"=",
"self",
".",
"plaintext",
".",
"encode",
"(",
"'utf-8'",
",",
"'replace'",
")",
".",
"rstrip",
"(",
")",
"+",
"'\\n'",
"logfile",
".",
"write",
"(",
"tb",
")"
] | Log the ASCII traceback into a file object. | [
"Log",
"the",
"ASCII",
"traceback",
"into",
"a",
"file",
"object",
"."
] | python | train |
twilio/twilio-python | twilio/rest/autopilot/v1/assistant/field_type/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/field_type/__init__.py#L296-L309 | def field_values(self):
"""
Access the field_values
:returns: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueList
:rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueList
"""
if self._field_values is None:
self._field_values = FieldValueList(
self._version,
assistant_sid=self._solution['assistant_sid'],
field_type_sid=self._solution['sid'],
)
return self._field_values | [
"def",
"field_values",
"(",
"self",
")",
":",
"if",
"self",
".",
"_field_values",
"is",
"None",
":",
"self",
".",
"_field_values",
"=",
"FieldValueList",
"(",
"self",
".",
"_version",
",",
"assistant_sid",
"=",
"self",
".",
"_solution",
"[",
"'assistant_sid'",
"]",
",",
"field_type_sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")",
"return",
"self",
".",
"_field_values"
] | Access the field_values
:returns: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueList
:rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueList | [
"Access",
"the",
"field_values"
] | python | train |
Jajcus/pyxmpp2 | pyxmpp2/ext/disco.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/disco.py#L735-L752 | def set_identities(self,identities):
"""Set identities in the disco#info object.
Remove all existing identities from `self`.
:Parameters:
- `identities`: list of identities or identity properties
(jid,node,category,type,name).
:Types:
- `identities`: sequence of `DiscoIdentity` or sequence of sequences
"""
for identity in self.identities:
identity.remove()
for identity in identities:
try:
self.add_identity(identity.name,identity.category,identity.type)
except AttributeError:
self.add_identity(*identity) | [
"def",
"set_identities",
"(",
"self",
",",
"identities",
")",
":",
"for",
"identity",
"in",
"self",
".",
"identities",
":",
"identity",
".",
"remove",
"(",
")",
"for",
"identity",
"in",
"identities",
":",
"try",
":",
"self",
".",
"add_identity",
"(",
"identity",
".",
"name",
",",
"identity",
".",
"category",
",",
"identity",
".",
"type",
")",
"except",
"AttributeError",
":",
"self",
".",
"add_identity",
"(",
"*",
"identity",
")"
] | Set identities in the disco#info object.
Remove all existing identities from `self`.
:Parameters:
- `identities`: list of identities or identity properties
(jid,node,category,type,name).
:Types:
- `identities`: sequence of `DiscoIdentity` or sequence of sequences | [
"Set",
"identities",
"in",
"the",
"disco#info",
"object",
"."
] | python | valid |
tensorlayer/tensorlayer | tensorlayer/layers/convolution/deformable_conv.py | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/convolution/deformable_conv.py#L247-L296 | def _tf_batch_map_offsets(self, inputs, offsets, grid_offset):
"""Batch map offsets into input
Parameters
------------
inputs : ``tf.Tensor``
shape = (b, h, w, c)
offsets: ``tf.Tensor``
shape = (b, h, w, 2*n)
grid_offset: `tf.Tensor``
Offset grids shape = (h, w, n, 2)
Returns
-------
``tf.Tensor``
A Tensor with the shape as (b, h, w, c)
"""
input_shape = inputs.get_shape()
batch_size = tf.shape(inputs)[0]
kernel_n = int(int(offsets.get_shape()[3]) / 2)
input_h = input_shape[1]
input_w = input_shape[2]
channel = input_shape[3]
# inputs (b, h, w, c) --> (b*c, h, w)
inputs = self._to_bc_h_w(inputs, input_shape)
# offsets (b, h, w, 2*n) --> (b, h, w, n, 2)
offsets = tf.reshape(offsets, (batch_size, input_h, input_w, kernel_n, 2))
# offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2)
# offsets = tf.tile(offsets, [channel, 1, 1, 1, 1])
coords = tf.expand_dims(grid_offset, 0) # grid_offset --> (1, h, w, n, 2)
coords = tf.tile(coords, [batch_size, 1, 1, 1, 1]) + offsets # grid_offset --> (b, h, w, n, 2)
# clip out of bound
coords = tf.stack(
[
tf.clip_by_value(coords[:, :, :, :, 0], 0.0, tf.cast(input_h - 1, 'float32')),
tf.clip_by_value(coords[:, :, :, :, 1], 0.0, tf.cast(input_w - 1, 'float32'))
], axis=-1
)
coords = tf.tile(coords, [channel, 1, 1, 1, 1])
mapped_vals = self._tf_batch_map_coordinates(inputs, coords)
# (b*c, h, w, n) --> (b, h, w, n, c)
mapped_vals = self._to_b_h_w_n_c(mapped_vals, [batch_size, input_h, input_w, kernel_n, channel])
return mapped_vals | [
"def",
"_tf_batch_map_offsets",
"(",
"self",
",",
"inputs",
",",
"offsets",
",",
"grid_offset",
")",
":",
"input_shape",
"=",
"inputs",
".",
"get_shape",
"(",
")",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"0",
"]",
"kernel_n",
"=",
"int",
"(",
"int",
"(",
"offsets",
".",
"get_shape",
"(",
")",
"[",
"3",
"]",
")",
"/",
"2",
")",
"input_h",
"=",
"input_shape",
"[",
"1",
"]",
"input_w",
"=",
"input_shape",
"[",
"2",
"]",
"channel",
"=",
"input_shape",
"[",
"3",
"]",
"# inputs (b, h, w, c) --> (b*c, h, w)",
"inputs",
"=",
"self",
".",
"_to_bc_h_w",
"(",
"inputs",
",",
"input_shape",
")",
"# offsets (b, h, w, 2*n) --> (b, h, w, n, 2)",
"offsets",
"=",
"tf",
".",
"reshape",
"(",
"offsets",
",",
"(",
"batch_size",
",",
"input_h",
",",
"input_w",
",",
"kernel_n",
",",
"2",
")",
")",
"# offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2)",
"# offsets = tf.tile(offsets, [channel, 1, 1, 1, 1])",
"coords",
"=",
"tf",
".",
"expand_dims",
"(",
"grid_offset",
",",
"0",
")",
"# grid_offset --> (1, h, w, n, 2)",
"coords",
"=",
"tf",
".",
"tile",
"(",
"coords",
",",
"[",
"batch_size",
",",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
"+",
"offsets",
"# grid_offset --> (b, h, w, n, 2)",
"# clip out of bound",
"coords",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"clip_by_value",
"(",
"coords",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"0",
"]",
",",
"0.0",
",",
"tf",
".",
"cast",
"(",
"input_h",
"-",
"1",
",",
"'float32'",
")",
")",
",",
"tf",
".",
"clip_by_value",
"(",
"coords",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"1",
"]",
",",
"0.0",
",",
"tf",
".",
"cast",
"(",
"input_w",
"-",
"1",
",",
"'float32'",
")",
")",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"coords",
"=",
"tf",
".",
"tile",
"(",
"coords",
",",
"[",
"channel",
",",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
"mapped_vals",
"=",
"self",
".",
"_tf_batch_map_coordinates",
"(",
"inputs",
",",
"coords",
")",
"# (b*c, h, w, n) --> (b, h, w, n, c)",
"mapped_vals",
"=",
"self",
".",
"_to_b_h_w_n_c",
"(",
"mapped_vals",
",",
"[",
"batch_size",
",",
"input_h",
",",
"input_w",
",",
"kernel_n",
",",
"channel",
"]",
")",
"return",
"mapped_vals"
] | Batch map offsets into input
Parameters
------------
inputs : ``tf.Tensor``
shape = (b, h, w, c)
offsets: ``tf.Tensor``
shape = (b, h, w, 2*n)
grid_offset: `tf.Tensor``
Offset grids shape = (h, w, n, 2)
Returns
-------
``tf.Tensor``
A Tensor with the shape as (b, h, w, c) | [
"Batch",
"map",
"offsets",
"into",
"input"
] | python | valid |
getpelican/pelican-plugins | events/events.py | https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/events/events.py#L80-L103 | def parse_article(generator, metadata):
"""Collect articles metadata to be used for building the event calendar
:returns: None
"""
if 'event-start' not in metadata:
return
dtstart = parse_tstamp(metadata, 'event-start')
if 'event-end' in metadata:
dtend = parse_tstamp(metadata, 'event-end')
elif 'event-duration' in metadata:
dtdelta = parse_timedelta(metadata)
dtend = dtstart + dtdelta
else:
msg = "Either 'event-end' or 'event-duration' must be" + \
" speciefied in the event named '%s'" % metadata['title']
log.error(msg)
raise ValueError(msg)
events.append(Event(dtstart, dtend, metadata)) | [
"def",
"parse_article",
"(",
"generator",
",",
"metadata",
")",
":",
"if",
"'event-start'",
"not",
"in",
"metadata",
":",
"return",
"dtstart",
"=",
"parse_tstamp",
"(",
"metadata",
",",
"'event-start'",
")",
"if",
"'event-end'",
"in",
"metadata",
":",
"dtend",
"=",
"parse_tstamp",
"(",
"metadata",
",",
"'event-end'",
")",
"elif",
"'event-duration'",
"in",
"metadata",
":",
"dtdelta",
"=",
"parse_timedelta",
"(",
"metadata",
")",
"dtend",
"=",
"dtstart",
"+",
"dtdelta",
"else",
":",
"msg",
"=",
"\"Either 'event-end' or 'event-duration' must be\"",
"+",
"\" speciefied in the event named '%s'\"",
"%",
"metadata",
"[",
"'title'",
"]",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"events",
".",
"append",
"(",
"Event",
"(",
"dtstart",
",",
"dtend",
",",
"metadata",
")",
")"
] | Collect articles metadata to be used for building the event calendar
:returns: None | [
"Collect",
"articles",
"metadata",
"to",
"be",
"used",
"for",
"building",
"the",
"event",
"calendar"
] | python | train |
aws/aws-xray-sdk-python | aws_xray_sdk/core/recorder.py | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/recorder.py#L460-L473 | def _send_segment(self):
"""
Send the current segment to X-Ray daemon if it is present and
sampled, then clean up context storage.
The emitter will handle failures.
"""
segment = self.current_segment()
if not segment:
return
if segment.sampled:
self.emitter.send_entity(segment)
self.clear_trace_entities() | [
"def",
"_send_segment",
"(",
"self",
")",
":",
"segment",
"=",
"self",
".",
"current_segment",
"(",
")",
"if",
"not",
"segment",
":",
"return",
"if",
"segment",
".",
"sampled",
":",
"self",
".",
"emitter",
".",
"send_entity",
"(",
"segment",
")",
"self",
".",
"clear_trace_entities",
"(",
")"
] | Send the current segment to X-Ray daemon if it is present and
sampled, then clean up context storage.
The emitter will handle failures. | [
"Send",
"the",
"current",
"segment",
"to",
"X",
"-",
"Ray",
"daemon",
"if",
"it",
"is",
"present",
"and",
"sampled",
"then",
"clean",
"up",
"context",
"storage",
".",
"The",
"emitter",
"will",
"handle",
"failures",
"."
] | python | train |
hyperledger/sawtooth-core | cli/sawtooth_cli/admin_command/keygen.py | https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/admin_command/keygen.py#L65-L133 | def do_keygen(args):
"""Executes the key generation operation, given the parsed arguments.
Args:
args (:obj:`Namespace`): The parsed args.
"""
if args.key_name is not None:
key_name = args.key_name
else:
key_name = 'validator'
key_dir = get_key_dir()
if not os.path.exists(key_dir):
raise CliException("Key directory does not exist: {}".format(key_dir))
priv_filename = os.path.join(key_dir, key_name + '.priv')
pub_filename = os.path.join(key_dir, key_name + '.pub')
if not args.force:
file_exists = False
for filename in [priv_filename, pub_filename]:
if os.path.exists(filename):
file_exists = True
print('file exists: {}'.format(filename), file=sys.stderr)
if file_exists:
raise CliException(
'files exist, rerun with --force to overwrite existing files')
context = create_context('secp256k1')
private_key = context.new_random_private_key()
public_key = context.get_public_key(private_key)
try:
priv_exists = os.path.exists(priv_filename)
with open(priv_filename, 'w') as priv_fd:
if not args.quiet:
if priv_exists:
print('overwriting file: {}'.format(priv_filename))
else:
print('writing file: {}'.format(priv_filename))
priv_fd.write(private_key.as_hex())
priv_fd.write('\n')
# Get the uid and gid of the key directory
keydir_info = os.stat(key_dir)
keydir_gid = keydir_info.st_gid
keydir_uid = keydir_info.st_uid
# Set user and group on keys to the user/group of the key directory
os.chown(priv_filename, keydir_uid, keydir_gid)
# Set the private key u+rw g+r
os.chmod(priv_filename, 0o640)
pub_exists = os.path.exists(pub_filename)
with open(pub_filename, 'w') as pub_fd:
if not args.quiet:
if pub_exists:
print('overwriting file: {}'.format(pub_filename))
else:
print('writing file: {}'.format(pub_filename))
pub_fd.write(public_key.as_hex())
pub_fd.write('\n')
# Set user and group on keys to the user/group of the key directory
os.chown(pub_filename, keydir_uid, keydir_gid)
# Set the public key u+rw g+r o+r
os.chmod(pub_filename, 0o644)
except IOError as ioe:
raise CliException('IOError: {}'.format(str(ioe))) | [
"def",
"do_keygen",
"(",
"args",
")",
":",
"if",
"args",
".",
"key_name",
"is",
"not",
"None",
":",
"key_name",
"=",
"args",
".",
"key_name",
"else",
":",
"key_name",
"=",
"'validator'",
"key_dir",
"=",
"get_key_dir",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"key_dir",
")",
":",
"raise",
"CliException",
"(",
"\"Key directory does not exist: {}\"",
".",
"format",
"(",
"key_dir",
")",
")",
"priv_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"key_dir",
",",
"key_name",
"+",
"'.priv'",
")",
"pub_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"key_dir",
",",
"key_name",
"+",
"'.pub'",
")",
"if",
"not",
"args",
".",
"force",
":",
"file_exists",
"=",
"False",
"for",
"filename",
"in",
"[",
"priv_filename",
",",
"pub_filename",
"]",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"file_exists",
"=",
"True",
"print",
"(",
"'file exists: {}'",
".",
"format",
"(",
"filename",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"file_exists",
":",
"raise",
"CliException",
"(",
"'files exist, rerun with --force to overwrite existing files'",
")",
"context",
"=",
"create_context",
"(",
"'secp256k1'",
")",
"private_key",
"=",
"context",
".",
"new_random_private_key",
"(",
")",
"public_key",
"=",
"context",
".",
"get_public_key",
"(",
"private_key",
")",
"try",
":",
"priv_exists",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"priv_filename",
")",
"with",
"open",
"(",
"priv_filename",
",",
"'w'",
")",
"as",
"priv_fd",
":",
"if",
"not",
"args",
".",
"quiet",
":",
"if",
"priv_exists",
":",
"print",
"(",
"'overwriting file: {}'",
".",
"format",
"(",
"priv_filename",
")",
")",
"else",
":",
"print",
"(",
"'writing file: {}'",
".",
"format",
"(",
"priv_filename",
")",
")",
"priv_fd",
".",
"write",
"(",
"private_key",
".",
"as_hex",
"(",
")",
")",
"priv_fd",
".",
"write",
"(",
"'\\n'",
")",
"# Get the uid and gid of the key directory",
"keydir_info",
"=",
"os",
".",
"stat",
"(",
"key_dir",
")",
"keydir_gid",
"=",
"keydir_info",
".",
"st_gid",
"keydir_uid",
"=",
"keydir_info",
".",
"st_uid",
"# Set user and group on keys to the user/group of the key directory",
"os",
".",
"chown",
"(",
"priv_filename",
",",
"keydir_uid",
",",
"keydir_gid",
")",
"# Set the private key u+rw g+r",
"os",
".",
"chmod",
"(",
"priv_filename",
",",
"0o640",
")",
"pub_exists",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"pub_filename",
")",
"with",
"open",
"(",
"pub_filename",
",",
"'w'",
")",
"as",
"pub_fd",
":",
"if",
"not",
"args",
".",
"quiet",
":",
"if",
"pub_exists",
":",
"print",
"(",
"'overwriting file: {}'",
".",
"format",
"(",
"pub_filename",
")",
")",
"else",
":",
"print",
"(",
"'writing file: {}'",
".",
"format",
"(",
"pub_filename",
")",
")",
"pub_fd",
".",
"write",
"(",
"public_key",
".",
"as_hex",
"(",
")",
")",
"pub_fd",
".",
"write",
"(",
"'\\n'",
")",
"# Set user and group on keys to the user/group of the key directory",
"os",
".",
"chown",
"(",
"pub_filename",
",",
"keydir_uid",
",",
"keydir_gid",
")",
"# Set the public key u+rw g+r o+r",
"os",
".",
"chmod",
"(",
"pub_filename",
",",
"0o644",
")",
"except",
"IOError",
"as",
"ioe",
":",
"raise",
"CliException",
"(",
"'IOError: {}'",
".",
"format",
"(",
"str",
"(",
"ioe",
")",
")",
")"
] | Executes the key generation operation, given the parsed arguments.
Args:
args (:obj:`Namespace`): The parsed args. | [
"Executes",
"the",
"key",
"generation",
"operation",
"given",
"the",
"parsed",
"arguments",
"."
] | python | train |
NuGrid/NuGridPy | nugridpy/data_plot.py | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/data_plot.py#L1937-L2625 | def abu_flux_chart(self, cycle, ilabel=True, imlabel=True,
imagic=False, boxstable=True, lbound=(-12,0),
plotaxis=[0,0,0,0], which_flux=None, prange=None,
profile='charged', show=True):
'''
Plots an abundance and flux chart
Parameters
----------
cycle : string, integer or list
The cycle we are looking in. If it is a list of cycles,
this method will then do a plot for each of these cycles
and save them all to a file.
ilabel : boolean, optional
Elemental labels off/on. The default is True.
imlabel : boolean, optional
Label for isotopic masses off/on. The default is True.
imagic : boolean, optional
Turn lines for magic numbers off/on. The default is False.
boxstable : boolean, optional
Plot the black boxes around the stable elements. The
defaults is True.
lbound : tuple, optional
Boundaries for colour spectrum ploted. The default is
(-12,0).
plotaxis : list, optional
Set axis limit. If [0, 0, 0, 0] the complete range in (N,Z)
will be plotted. It equates to [xMin, xMax, Ymin, Ymax].
The default is [0, 0, 0, 0].
which_flux : integer, optional
Set to 0 for nucleosynthesis flux plot. Set to 1 for
energy flux plot. Setting wich_flux to 0 is equivelent to
setting it to 0. The default is None.
prange : integer, optional
Range of fluxes to be considered, if prange is None then
the plot range is set to 8. The default is None.
profile : string, optional
'charged' is ideal setting to show charged particle
reactions flow. 'neutron' is ideal setting for neutron
captures flows. The default is 'charged'.
show : boolean, optional
Boolean of if the plot should be displayed. Useful with
saving multiple plots using abu_chartMulti. The default is
True.
'''
#######################################################################
#### plot options
# Set axis limit: If default [0,0,0,0] the complete range in (N,Z) will
# be plotted, i.e. all isotopes, else specify the limits in
# plotaxis = [xmin,xmax,ymin,ymax]
#######################################################################
# read data file
#inpfile = cycle
#ff = fdic.ff(inpfile)
# with the flux implementation I am not using mass range for now.
# It may be introduced eventually.
mass_range = None
if str(cycle.__class__)=="<type 'list'>":
self.abu_chartMulti(cycle, mass_range,ilabel,imlabel,imlabel_fontsize,imagic,boxstable,\
lbound,plotaxis)
return
plotType=self._classTest()
#if mass_range!=None and mass_range[0]>mass_range[1]:
#print 'Please input a proper mass range'
#print 'Returning None'
#return None
if plotType=='se':
cycle=self.se.findCycle(cycle)
nin=zeros(len(self.se.A))
zin=zeros(len(self.se.Z))
for i in range(len(nin)):
nin[i]=self.se.A[i]
zin[i]=self.se.Z[i]
for i in range(len(nin)):
nin[i]=nin[i]-zin[i]
yin=self.get(cycle, 'iso_massf')
isom=self.se.isomeric_states
masses = self.se.get(cycle,'mass')
if mass_range != None:
masses = self.se.get(cycle,'mass')
masses.sort()
if mass_range != None:
tmpyps=[]
masses = self.se.get(cycle,'mass')
masses = self.se.get(cycle,'mass')
masses.sort()
for i in range(len(masses)):
if (masses[i] >mass_range[0] and masses[i]<mass_range[1]) or\
(masses[i]==mass_range[0] or masses[i]==mass_range[1]):
tmpyps.append(yin[i])
yin=tmpyps
tmp=zeros(len(yin[0]))
for i in range(len(yin)):
for j in range(len(yin[i])):
tmp[j]+=yin[i][j]
tmp=old_div(tmp,len(yin))
yin=tmp
elif plotType=='PPN':
ain=self.get('A',cycle)
zin=self.get('Z',cycle)
nin=ain-zin
yin=self.get('ABUNDANCE_MF',cycle)
isom=self.get('ISOM',cycle)
if mass_range != None:
tmpA=[]
tmpZ=[]
tmpIsom=[]
tmpyps=[]
for i in range(len(nin)):
if (ain[i] >mass_range[0] and ain[i]<mass_range[1])\
or (ain[i]==mass_range[0] or ain[i]==mass_range[1]):
tmpA.append(nin[i])
tmpZ.append(zin[i])
tmpIsom.append(isom[i])
tmpyps.append(yin[i])
zin=tmpZ
nin=tmpA
yin=tmpyps
isom=tmpIsom
else:
print('This method, abu_chart, is not supported by this class')
print('Returning None')
return None
# in case we call from ipython -pylab, turn interactive on at end again
turnoff=False
if not show:
try:
ioff()
turnoff=True
except NameError:
turnoff=False
nnmax = int(max(nin))+1
nzmax = int(max(zin))+1
nnmax_plot = nnmax
nzmax_plot = nzmax
nzycheck = zeros([nnmax,nzmax,3])
nzycheck_plot = zeros([nnmax,nzmax,3])
for i in range(len(nin)):
if isom[i]==1:
ni = int(nin[i])
zi = int(zin[i])
nzycheck[ni,zi,0] = 1
nzycheck[ni,zi,1] = yin[i]
nzycheck_plot[ni,zi,0] = 1
#######################################################################
# elemental names: elname(i) is the name of element with Z=i
elname=self.elements_names
#### create plot
## define axis and plot style (colormap, size, fontsize etc.)
if plotaxis==[0,0,0,0]:
xdim=10
ydim=6
else:
dx = plotaxis[1]-plotaxis[0]
dy = plotaxis[3]-plotaxis[2]
ydim = 6
xdim = ydim*dx/dy
params = {'axes.labelsize': 15,
'text.fontsize': 12,
'legend.fontsize': 15,
'xtick.labelsize': 15,
'ytick.labelsize': 15,
'text.usetex': True}
#pl.rcParams.update(params) #May cause Error, someting to do with tex
#fig=pl.figure(figsize=(xdim,ydim),dpi=100)
fig=pl.figure()
if profile == 'charged':
ax1 = fig.add_subplot(1, 2, 1)
elif profile == 'neutron':
ax1 = fig.add_subplot(2, 1, 1)
#axx = 0.10
#axy = 0.10
#axw = 0.85
#axh = 0.8
#ax1=pl.axes([axx,axy,axw,axh])
# Tick marks
xminorlocator = MultipleLocator(1)
xmajorlocator = MultipleLocator(5)
ax1.xaxis.set_major_locator(xmajorlocator)
ax1.xaxis.set_minor_locator(xminorlocator)
yminorlocator = MultipleLocator(1)
ymajorlocator = MultipleLocator(5)
ax1.yaxis.set_major_locator(ymajorlocator)
ax1.yaxis.set_minor_locator(yminorlocator)
# color map choice for abundances
#cmapa = cm.jet
cmapa = cm.summer
# color map choice for arrows
cmapr = cm.summer
# if a value is below the lower limit its set to white
cmapa.set_under(color='w')
cmapr.set_under(color='w')
# set value range for abundance colors (log10(Y))
norma = colors.Normalize(vmin=lbound[0],vmax=lbound[1])
# set x- and y-axis scale aspect ratio to 1
#ax1.set_aspect('equal')
#print time,temp and density on top
temp = ' '#'%8.3e' %ff['temp']
time = ' '#'%8.3e' %ff['time']
dens = ' '#'%8.3e' %ff['dens']
#May cause Error, someting to do with tex
'''
#box1 = TextArea("t : " + time + " s~~/~~T$_{9}$ : " + temp + "~~/~~$\\rho_{b}$ : " \
# + dens + ' g/cm$^{3}$', textprops=dict(color="k"))
anchored_box = AnchoredOffsetbox(loc=3,
child=box1, pad=0.,
frameon=False,
bbox_to_anchor=(0., 1.02),
bbox_transform=ax.transAxes,
borderpad=0.,
)
ax.add_artist(anchored_box)
'''
## Colour bar plotted
patches = []
color = []
for i in range(nzmax):
for j in range(nnmax):
if nzycheck[j,i,0]==1:
xy = j-0.5,i-0.5
rect = Rectangle(xy,1,1,)
# abundance
yab = nzycheck[j,i,1]
if yab == 0:
yab=1e-99
col =log10(yab)
patches.append(rect)
color.append(col)
p = PatchCollection(patches, cmap=cmapa, norm=norma)
p.set_array(array(color))
p.set_zorder(1)
ax1.add_collection(p)
cb = pl.colorbar(p)
# colorbar label
if profile == 'neutron':
cb.set_label('log$_{10}$(X)')
# plot file name
graphname = 'abundance-flux-chart'+str(cycle)
# Add black frames for stable isotopes
if boxstable:
for i in range(len(self.stable_el)):
if i == 0:
continue
tmp = self.stable_el[i]
try:
zz= self.elements_names.index(tmp[0]) #charge
except:
continue
for j in range(len(tmp)):
if j == 0:
continue
nn = int(tmp[j]) #atomic mass
nn=nn-zz
xy = nn-0.5,zz-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=4.)
rect.set_zorder(2)
ax1.add_patch(rect)
# decide which array to take for label positions
iarr = 0
# plot element labels
if ilabel:
for z in range(nzmax):
try:
nmin = min(argwhere(nzycheck[:,z,iarr]))[0]-1
nmax = max(argwhere(nzycheck[:,z,iarr]))[0]+1
ax1.text(nmin,z,elname[z],horizontalalignment='center',verticalalignment='center',\
fontsize='small',clip_on=True)
ax1.text(nmax,z,elname[z],horizontalalignment='center',verticalalignment='center',\
fontsize='small',clip_on=True)
except ValueError:
continue
# plot mass numbers
if imlabel:
for z in range(nzmax):
for n in range(nnmax):
a = z+n
if nzycheck[n,z,iarr]==1:
ax1.text(n,z,a,horizontalalignment='center',verticalalignment='center',\
fontsize='x-small',clip_on=True)
# plot lines at magic numbers
if imagic:
ixymagic=[2, 8, 20, 28, 50, 82, 126]
nmagic = len(ixymagic)
for magic in ixymagic:
if magic<=nzmax:
try:
xnmin = min(argwhere(nzycheck[:,magic,iarr]))[0]
xnmax = max(argwhere(nzycheck[:,magic,iarr]))[0]
line = ax1.plot([xnmin,xnmax],[magic,magic],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
if magic<=nnmax:
try:
yzmin = min(argwhere(nzycheck[magic,:,iarr]))[0]
yzmax = max(argwhere(nzycheck[magic,:,iarr]))[0]
line = ax1.plot([magic,magic],[yzmin,yzmax],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
# set axis limits
if plotaxis==[0,0,0,0]:
xmax=max(nin)
ymax=max(zin)
ax1.axis([-0.5,xmax+0.5,-0.5,ymax+0.5])
else:
ax1.axis(plotaxis)
# set x- and y-axis label
ax1.set_ylabel('Proton number')
if profile == 'charged':
ax1.set_xlabel('Neutron number')
#pl.title('Isotopic Chart for cycle '+str(int(cycle)))
#
# here below I read data from the flux_*****.DAT file.
#
file_name = 'flux_'+str(cycle).zfill(5)+'.DAT'
print(file_name)
f = open(file_name)
lines = f.readline()
lines = f.readlines()
f.close()
print_max_flux_in_plot = False
# color map choice for fluxes
#cmapa = cm.jet
cmapa = cm.autumn
# color map choice for arrows
cmapr = cm.autumn
# starting point of arrow
coord_x_1 = []
coord_y_1 = []
# ending point of arrow (option 1)
coord_x_2 = []
coord_y_2 = []
# ending point of arrow (option 2)
coord_x_3 = []
coord_y_3 = []
# fluxes
flux_read = []
flux_log10 = []
if which_flux == None or which_flux == 0:
print('chart for nucleosynthesis fluxes [dYi/dt]')
line_to_read = 9
elif which_flux == 1:
print('chart for energy fluxes')
line_to_read = 10
elif which_flux > 1:
print("you have only option 0 or 1, not larger than 1")
single_line = []
for i in range(len(lines)):
single_line.append(lines[i].split())
coord_y_1.append(int(single_line[i][1]))
coord_x_1.append(int(single_line[i][2])-coord_y_1[i])
coord_y_2.append(int(single_line[i][5]))
coord_x_2.append(int(single_line[i][6])-coord_y_2[i])
coord_y_3.append(int(single_line[i][7]))
coord_x_3.append(int(single_line[i][8])-coord_y_3[i])
try:
flux_read.append(float(single_line[i][line_to_read]))
except ValueError: # this is done to avoid format issues like 3.13725-181...
flux_read.append(1.0E-99)
flux_log10.append(log10(flux_read[i]+1.0e-99))
print(file_name,' read!')
# I need to select smaller sample, with only fluxes inside plotaxis.
if plotaxis!=[0,0,0,0]:
coord_y_1_small=[]
coord_x_1_small=[]
coord_y_2_small=[]
coord_x_2_small=[]
coord_y_3_small=[]
coord_x_3_small=[]
flux_log10_small = []
for i in range(len(flux_log10)):
I_am_in = 0
if coord_y_1[i] > plotaxis[2] and coord_y_1[i] < plotaxis[3] and coord_x_1[i] > plotaxis[0] and coord_x_1[i] < plotaxis[1]:
I_am_in = 1
coord_y_1_small.append(int(coord_y_1[i]))
coord_x_1_small.append(int(coord_x_1[i]))
coord_y_2_small.append(int(coord_y_2[i]))
coord_x_2_small.append(int(coord_x_2[i]))
coord_y_3_small.append(int(coord_y_3[i]))
coord_x_3_small.append(int(coord_x_3[i]))
flux_log10_small.append(flux_log10[i])
if coord_y_3[i] > plotaxis[2] and coord_y_3[i] < plotaxis[3] and coord_x_3[i] > plotaxis[0] and coord_x_3[i] < plotaxis[1] and I_am_in == 0:
I_am_in = 1
coord_y_1_small.append(int(coord_y_1[i]))
coord_x_1_small.append(int(coord_x_1[i]))
coord_y_2_small.append(int(coord_y_2[i]))
coord_x_2_small.append(int(coord_x_2[i]))
coord_y_3_small.append(int(coord_y_3[i]))
coord_x_3_small.append(int(coord_x_3[i]))
flux_log10_small.append(flux_log10[i])
# elemental labels off/on [0/1]
ilabel = 1
# label for isotopic masses off/on [0/1]
imlabel = 1
# turn lines for magic numbers off/on [0/1]
imagic = 0
# flow is plotted over "prange" dex. If flow < maxflow-prange it is not plotted
if prange == None:
print('plot range given by default')
prange = 8.
#############################################
#print flux_log10_small
# we should scale prange on plot_axis range, not on max_flux!
max_flux = max(flux_log10)
ind_max_flux = flux_log10.index(max_flux)
if plotaxis!=[0,0,0,0]:
max_flux_small = max(flux_log10_small)
if plotaxis==[0,0,0,0]:
nzmax = int(max(max(coord_y_1),max(coord_y_2),max(coord_y_3)))+1
nnmax = int(max(max(coord_x_1),max(coord_x_2),max(coord_x_3)))+1
coord_x_1_small = coord_x_1
coord_x_2_small = coord_x_2
coord_x_3_small = coord_x_3
coord_y_1_small = coord_y_1
coord_y_2_small = coord_y_2
coord_y_3_small = coord_y_3
flux_log10_small= flux_log10
max_flux_small = max_flux
else:
nzmax = int(max(max(coord_y_1_small),max(coord_y_2_small),max(coord_y_3_small)))+1
nnmax = int(max(max(coord_x_1_small),max(coord_x_2_small),max(coord_x_3_small)))+1
for i in range(nzmax):
for j in range(nnmax):
if nzycheck[j,i,0]==1:
xy = j-0.5,i-0.5
rect = Rectangle(xy,1,1,)
patches.append(rect)
nzycheck = zeros([nnmax_plot,nzmax,3])
coord_x_out = zeros(len(coord_x_2_small), dtype='int')
coord_y_out = zeros(len(coord_y_2_small),dtype='int')
for i in range(len(flux_log10_small)):
nzycheck[coord_x_1_small[i],coord_y_1_small[i],0] = 1
nzycheck[coord_x_1_small[i],coord_y_1_small[i],1] = flux_log10_small[i]
if coord_x_2_small[i] >= coord_x_3_small[i]:
coord_x_out[i] = coord_x_2_small[i]
coord_y_out[i] = coord_y_2_small[i]
nzycheck[coord_x_out[i],coord_y_out[i],0] = 1
nzycheck[coord_x_out[i],coord_y_out[i],1] = flux_log10_small[i]
elif coord_x_2_small[i] < coord_x_3_small[i]:
coord_x_out[i] = coord_x_3_small[i]
coord_y_out[i] = coord_y_3_small[i]
nzycheck[coord_x_out[i],coord_y_out[i],0] = 1
nzycheck[coord_x_out[i],coord_y_out[i],1] = flux_log10_small[i]
if flux_log10_small[i]>max_flux_small-prange:
nzycheck[coord_x_1_small[i],coord_y_1_small[i],2] = 1
nzycheck[coord_x_out[i],coord_y_out[i],2] = 1
#### create plot
if profile == 'charged':
ax2 = fig.add_subplot(1, 2, 2)
elif profile == 'neutron':
ax2 = fig.add_subplot(2, 1, 2)
# Tick marks
xminorlocator = MultipleLocator(1)
xmajorlocator = MultipleLocator(5)
ax2.xaxis.set_major_locator(xmajorlocator)
ax2.xaxis.set_minor_locator(xminorlocator)
yminorlocator = MultipleLocator(1)
ymajorlocator = MultipleLocator(5)
ax2.yaxis.set_major_locator(ymajorlocator)
ax2.yaxis.set_minor_locator(yminorlocator)
## define axis and plot style (colormap, size, fontsize etc.)
if plotaxis==[0,0,0,0]:
xdim=10
ydim=6
else:
dx = plotaxis[1]-plotaxis[0]
dy = plotaxis[3]-plotaxis[2]
ydim = 6
xdim = ydim*dx/dy
format = 'pdf'
# set x- and y-axis scale aspect ratio to 1
#ax2.set_aspect('equal')
# Add black frames for stable isotopes
# Add black frames for stable isotopes
if boxstable:
for i in range(len(self.stable_el)):
if i == 0:
continue
tmp = self.stable_el[i]
try:
zz= self.elements_names.index(tmp[0]) #charge
except:
continue
for j in range(len(tmp)):
if j == 0:
continue
nn = int(tmp[j]) #atomic mass
nn=nn-zz
xy = nn-0.5,zz-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=4.)
rect.set_zorder(2)
ax2.add_patch(rect)
apatches = []
acolor = []
m = old_div(0.8,prange)
vmax=ceil(max(flux_log10_small))
vmin=max(flux_log10_small)-prange
b=-vmin*m+0.1
normr = colors.Normalize(vmin=vmin,vmax=vmax)
ymax=0.
xmax=0.
for i in range(len(flux_log10_small)):
x = coord_x_1_small[i]
y = coord_y_1_small[i]
dx = coord_x_out[i]-coord_x_1_small[i]
dy = coord_y_out[i]-coord_y_1_small[i]
if flux_log10_small[i]>=vmin:
arrowwidth = flux_log10_small[i]*m+b
arrow = Arrow(x,y,dx,dy, width=arrowwidth)
if xmax<x:
xmax=x
if ymax<y:
ymax=y
acol = flux_log10_small[i]
apatches.append(arrow)
acolor.append(acol)
xy = x-0.5,y-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=1.)
patches.append(rect)
xy = x+dx-0.5,y+dy-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=1.)
patches.append(rect)
p = PatchCollection(patches,norm=0,facecolor='w')
p.set_zorder(1)
ax2.add_collection(p)
a = PatchCollection(apatches, cmap=cmapr, norm=normr)
a.set_array(array(acolor))
a.set_zorder(3)
ax2.add_collection(a)
cb = pl.colorbar(a)
# colorbar label
cb.set_label('log$_{10}$($x$)')
if profile == 'neutron':
cb.set_label('log$_{10}$(f)')
# decide which array to take for label positions
iarr = 2
# plot element labels
for z in range(nzmax):
try:
nmin = min(argwhere(nzycheck_plot[:,z,iarr-2]))[0]-1
nmax = max(argwhere(nzycheck_plot[:,z,iarr-2]))[0]+1
ax2.text(nmin,z,elname[z],horizontalalignment='center',verticalalignment='center',fontsize='small',clip_on=True)
ax2.text(nmax,z,elname[z],horizontalalignment='center',verticalalignment='center',fontsize='small',clip_on=True)
except ValueError:
continue
# plot mass numbers
if imlabel:
for z in range(nzmax):
for n in range(nnmax_plot):
a = z+n
if nzycheck_plot[n,z,iarr-2]==1:
ax2.text(n,z,a,horizontalalignment='center',verticalalignment='center',fontsize='x-small',clip_on=True)
# plot lines at magic numbers
if imagic==1:
ixymagic=[2, 8, 20, 28, 50, 82, 126]
nmagic = len(ixymagic)
for magic in ixymagic:
if magic<=nzmax:
try:
xnmin = min(argwhere(nzycheck[:,magic,iarr-2]))[0]
xnmax = max(argwhere(nzycheck[:,magic,iarr-2]))[0]
line = ax2.plot([xnmin,xnmax],[magic,magic],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
if magic<=nnmax:
try:
yzmin = min(argwhere(nzycheck[magic,:,iarr-2]))[0]
yzmax = max(argwhere(nzycheck[magic,:,iarr-2]))[0]
line = ax2.plot([magic,magic],[yzmin,yzmax],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
# set axis limits
if plotaxis==[0,0,0,0]:
ax2.axis([-0.5,xmax+0.5,-0.5,ymax+0.5])
else:
ax2.axis(plotaxis)
# set x- and y-axis label
ax2.set_xlabel('Neutron number')
if profile == 'neutron':
ax2.set_ylabel('Proton number')
if which_flux == None or which_flux == 0:
max_flux_label="max flux = "+str('{0:.4f}'.format(max_flux))
elif which_flux == 1:
max_flux_label="max energy flux = "+str('{0:.4f}'.format(max_flux))
if print_max_flux_in_plot:
ax2.text(plotaxis[1]-1.8,plotaxis[2]+0.1,max_flux_label,fontsize=10.)
#fig.savefig(graphname)
print(graphname,'is done')
if show:
pl.show()
if turnoff:
ion()
return | [
"def",
"abu_flux_chart",
"(",
"self",
",",
"cycle",
",",
"ilabel",
"=",
"True",
",",
"imlabel",
"=",
"True",
",",
"imagic",
"=",
"False",
",",
"boxstable",
"=",
"True",
",",
"lbound",
"=",
"(",
"-",
"12",
",",
"0",
")",
",",
"plotaxis",
"=",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
",",
"which_flux",
"=",
"None",
",",
"prange",
"=",
"None",
",",
"profile",
"=",
"'charged'",
",",
"show",
"=",
"True",
")",
":",
"#######################################################################",
"#### plot options",
"# Set axis limit: If default [0,0,0,0] the complete range in (N,Z) will",
"# be plotted, i.e. all isotopes, else specify the limits in",
"# plotaxis = [xmin,xmax,ymin,ymax]",
"#######################################################################",
"# read data file",
"#inpfile = cycle",
"#ff = fdic.ff(inpfile)",
"# with the flux implementation I am not using mass range for now.",
"# It may be introduced eventually.",
"mass_range",
"=",
"None",
"if",
"str",
"(",
"cycle",
".",
"__class__",
")",
"==",
"\"<type 'list'>\"",
":",
"self",
".",
"abu_chartMulti",
"(",
"cycle",
",",
"mass_range",
",",
"ilabel",
",",
"imlabel",
",",
"imlabel_fontsize",
",",
"imagic",
",",
"boxstable",
",",
"lbound",
",",
"plotaxis",
")",
"return",
"plotType",
"=",
"self",
".",
"_classTest",
"(",
")",
"#if mass_range!=None and mass_range[0]>mass_range[1]:",
"#print 'Please input a proper mass range'",
"#print 'Returning None'",
"#return None",
"if",
"plotType",
"==",
"'se'",
":",
"cycle",
"=",
"self",
".",
"se",
".",
"findCycle",
"(",
"cycle",
")",
"nin",
"=",
"zeros",
"(",
"len",
"(",
"self",
".",
"se",
".",
"A",
")",
")",
"zin",
"=",
"zeros",
"(",
"len",
"(",
"self",
".",
"se",
".",
"Z",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"nin",
")",
")",
":",
"nin",
"[",
"i",
"]",
"=",
"self",
".",
"se",
".",
"A",
"[",
"i",
"]",
"zin",
"[",
"i",
"]",
"=",
"self",
".",
"se",
".",
"Z",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"nin",
")",
")",
":",
"nin",
"[",
"i",
"]",
"=",
"nin",
"[",
"i",
"]",
"-",
"zin",
"[",
"i",
"]",
"yin",
"=",
"self",
".",
"get",
"(",
"cycle",
",",
"'iso_massf'",
")",
"isom",
"=",
"self",
".",
"se",
".",
"isomeric_states",
"masses",
"=",
"self",
".",
"se",
".",
"get",
"(",
"cycle",
",",
"'mass'",
")",
"if",
"mass_range",
"!=",
"None",
":",
"masses",
"=",
"self",
".",
"se",
".",
"get",
"(",
"cycle",
",",
"'mass'",
")",
"masses",
".",
"sort",
"(",
")",
"if",
"mass_range",
"!=",
"None",
":",
"tmpyps",
"=",
"[",
"]",
"masses",
"=",
"self",
".",
"se",
".",
"get",
"(",
"cycle",
",",
"'mass'",
")",
"masses",
"=",
"self",
".",
"se",
".",
"get",
"(",
"cycle",
",",
"'mass'",
")",
"masses",
".",
"sort",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"masses",
")",
")",
":",
"if",
"(",
"masses",
"[",
"i",
"]",
">",
"mass_range",
"[",
"0",
"]",
"and",
"masses",
"[",
"i",
"]",
"<",
"mass_range",
"[",
"1",
"]",
")",
"or",
"(",
"masses",
"[",
"i",
"]",
"==",
"mass_range",
"[",
"0",
"]",
"or",
"masses",
"[",
"i",
"]",
"==",
"mass_range",
"[",
"1",
"]",
")",
":",
"tmpyps",
".",
"append",
"(",
"yin",
"[",
"i",
"]",
")",
"yin",
"=",
"tmpyps",
"tmp",
"=",
"zeros",
"(",
"len",
"(",
"yin",
"[",
"0",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"yin",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"yin",
"[",
"i",
"]",
")",
")",
":",
"tmp",
"[",
"j",
"]",
"+=",
"yin",
"[",
"i",
"]",
"[",
"j",
"]",
"tmp",
"=",
"old_div",
"(",
"tmp",
",",
"len",
"(",
"yin",
")",
")",
"yin",
"=",
"tmp",
"elif",
"plotType",
"==",
"'PPN'",
":",
"ain",
"=",
"self",
".",
"get",
"(",
"'A'",
",",
"cycle",
")",
"zin",
"=",
"self",
".",
"get",
"(",
"'Z'",
",",
"cycle",
")",
"nin",
"=",
"ain",
"-",
"zin",
"yin",
"=",
"self",
".",
"get",
"(",
"'ABUNDANCE_MF'",
",",
"cycle",
")",
"isom",
"=",
"self",
".",
"get",
"(",
"'ISOM'",
",",
"cycle",
")",
"if",
"mass_range",
"!=",
"None",
":",
"tmpA",
"=",
"[",
"]",
"tmpZ",
"=",
"[",
"]",
"tmpIsom",
"=",
"[",
"]",
"tmpyps",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"nin",
")",
")",
":",
"if",
"(",
"ain",
"[",
"i",
"]",
">",
"mass_range",
"[",
"0",
"]",
"and",
"ain",
"[",
"i",
"]",
"<",
"mass_range",
"[",
"1",
"]",
")",
"or",
"(",
"ain",
"[",
"i",
"]",
"==",
"mass_range",
"[",
"0",
"]",
"or",
"ain",
"[",
"i",
"]",
"==",
"mass_range",
"[",
"1",
"]",
")",
":",
"tmpA",
".",
"append",
"(",
"nin",
"[",
"i",
"]",
")",
"tmpZ",
".",
"append",
"(",
"zin",
"[",
"i",
"]",
")",
"tmpIsom",
".",
"append",
"(",
"isom",
"[",
"i",
"]",
")",
"tmpyps",
".",
"append",
"(",
"yin",
"[",
"i",
"]",
")",
"zin",
"=",
"tmpZ",
"nin",
"=",
"tmpA",
"yin",
"=",
"tmpyps",
"isom",
"=",
"tmpIsom",
"else",
":",
"print",
"(",
"'This method, abu_chart, is not supported by this class'",
")",
"print",
"(",
"'Returning None'",
")",
"return",
"None",
"# in case we call from ipython -pylab, turn interactive on at end again",
"turnoff",
"=",
"False",
"if",
"not",
"show",
":",
"try",
":",
"ioff",
"(",
")",
"turnoff",
"=",
"True",
"except",
"NameError",
":",
"turnoff",
"=",
"False",
"nnmax",
"=",
"int",
"(",
"max",
"(",
"nin",
")",
")",
"+",
"1",
"nzmax",
"=",
"int",
"(",
"max",
"(",
"zin",
")",
")",
"+",
"1",
"nnmax_plot",
"=",
"nnmax",
"nzmax_plot",
"=",
"nzmax",
"nzycheck",
"=",
"zeros",
"(",
"[",
"nnmax",
",",
"nzmax",
",",
"3",
"]",
")",
"nzycheck_plot",
"=",
"zeros",
"(",
"[",
"nnmax",
",",
"nzmax",
",",
"3",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"nin",
")",
")",
":",
"if",
"isom",
"[",
"i",
"]",
"==",
"1",
":",
"ni",
"=",
"int",
"(",
"nin",
"[",
"i",
"]",
")",
"zi",
"=",
"int",
"(",
"zin",
"[",
"i",
"]",
")",
"nzycheck",
"[",
"ni",
",",
"zi",
",",
"0",
"]",
"=",
"1",
"nzycheck",
"[",
"ni",
",",
"zi",
",",
"1",
"]",
"=",
"yin",
"[",
"i",
"]",
"nzycheck_plot",
"[",
"ni",
",",
"zi",
",",
"0",
"]",
"=",
"1",
"#######################################################################",
"# elemental names: elname(i) is the name of element with Z=i",
"elname",
"=",
"self",
".",
"elements_names",
"#### create plot",
"## define axis and plot style (colormap, size, fontsize etc.)",
"if",
"plotaxis",
"==",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
":",
"xdim",
"=",
"10",
"ydim",
"=",
"6",
"else",
":",
"dx",
"=",
"plotaxis",
"[",
"1",
"]",
"-",
"plotaxis",
"[",
"0",
"]",
"dy",
"=",
"plotaxis",
"[",
"3",
"]",
"-",
"plotaxis",
"[",
"2",
"]",
"ydim",
"=",
"6",
"xdim",
"=",
"ydim",
"*",
"dx",
"/",
"dy",
"params",
"=",
"{",
"'axes.labelsize'",
":",
"15",
",",
"'text.fontsize'",
":",
"12",
",",
"'legend.fontsize'",
":",
"15",
",",
"'xtick.labelsize'",
":",
"15",
",",
"'ytick.labelsize'",
":",
"15",
",",
"'text.usetex'",
":",
"True",
"}",
"#pl.rcParams.update(params) #May cause Error, someting to do with tex",
"#fig=pl.figure(figsize=(xdim,ydim),dpi=100)",
"fig",
"=",
"pl",
".",
"figure",
"(",
")",
"if",
"profile",
"==",
"'charged'",
":",
"ax1",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"2",
",",
"1",
")",
"elif",
"profile",
"==",
"'neutron'",
":",
"ax1",
"=",
"fig",
".",
"add_subplot",
"(",
"2",
",",
"1",
",",
"1",
")",
"#axx = 0.10",
"#axy = 0.10",
"#axw = 0.85",
"#axh = 0.8",
"#ax1=pl.axes([axx,axy,axw,axh])",
"# Tick marks",
"xminorlocator",
"=",
"MultipleLocator",
"(",
"1",
")",
"xmajorlocator",
"=",
"MultipleLocator",
"(",
"5",
")",
"ax1",
".",
"xaxis",
".",
"set_major_locator",
"(",
"xmajorlocator",
")",
"ax1",
".",
"xaxis",
".",
"set_minor_locator",
"(",
"xminorlocator",
")",
"yminorlocator",
"=",
"MultipleLocator",
"(",
"1",
")",
"ymajorlocator",
"=",
"MultipleLocator",
"(",
"5",
")",
"ax1",
".",
"yaxis",
".",
"set_major_locator",
"(",
"ymajorlocator",
")",
"ax1",
".",
"yaxis",
".",
"set_minor_locator",
"(",
"yminorlocator",
")",
"# color map choice for abundances",
"#cmapa = cm.jet",
"cmapa",
"=",
"cm",
".",
"summer",
"# color map choice for arrows",
"cmapr",
"=",
"cm",
".",
"summer",
"# if a value is below the lower limit its set to white",
"cmapa",
".",
"set_under",
"(",
"color",
"=",
"'w'",
")",
"cmapr",
".",
"set_under",
"(",
"color",
"=",
"'w'",
")",
"# set value range for abundance colors (log10(Y))",
"norma",
"=",
"colors",
".",
"Normalize",
"(",
"vmin",
"=",
"lbound",
"[",
"0",
"]",
",",
"vmax",
"=",
"lbound",
"[",
"1",
"]",
")",
"# set x- and y-axis scale aspect ratio to 1",
"#ax1.set_aspect('equal')",
"#print time,temp and density on top",
"temp",
"=",
"' '",
"#'%8.3e' %ff['temp']",
"time",
"=",
"' '",
"#'%8.3e' %ff['time']",
"dens",
"=",
"' '",
"#'%8.3e' %ff['dens']",
"#May cause Error, someting to do with tex",
"'''\n #box1 = TextArea(\"t : \" + time + \" s~~/~~T$_{9}$ : \" + temp + \"~~/~~$\\\\rho_{b}$ : \" \\\n # + dens + ' g/cm$^{3}$', textprops=dict(color=\"k\"))\n anchored_box = AnchoredOffsetbox(loc=3,\n child=box1, pad=0.,\n frameon=False,\n bbox_to_anchor=(0., 1.02),\n bbox_transform=ax.transAxes,\n borderpad=0.,\n )\n ax.add_artist(anchored_box)\n '''",
"## Colour bar plotted",
"patches",
"=",
"[",
"]",
"color",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"nzmax",
")",
":",
"for",
"j",
"in",
"range",
"(",
"nnmax",
")",
":",
"if",
"nzycheck",
"[",
"j",
",",
"i",
",",
"0",
"]",
"==",
"1",
":",
"xy",
"=",
"j",
"-",
"0.5",
",",
"i",
"-",
"0.5",
"rect",
"=",
"Rectangle",
"(",
"xy",
",",
"1",
",",
"1",
",",
")",
"# abundance",
"yab",
"=",
"nzycheck",
"[",
"j",
",",
"i",
",",
"1",
"]",
"if",
"yab",
"==",
"0",
":",
"yab",
"=",
"1e-99",
"col",
"=",
"log10",
"(",
"yab",
")",
"patches",
".",
"append",
"(",
"rect",
")",
"color",
".",
"append",
"(",
"col",
")",
"p",
"=",
"PatchCollection",
"(",
"patches",
",",
"cmap",
"=",
"cmapa",
",",
"norm",
"=",
"norma",
")",
"p",
".",
"set_array",
"(",
"array",
"(",
"color",
")",
")",
"p",
".",
"set_zorder",
"(",
"1",
")",
"ax1",
".",
"add_collection",
"(",
"p",
")",
"cb",
"=",
"pl",
".",
"colorbar",
"(",
"p",
")",
"# colorbar label",
"if",
"profile",
"==",
"'neutron'",
":",
"cb",
".",
"set_label",
"(",
"'log$_{10}$(X)'",
")",
"# plot file name",
"graphname",
"=",
"'abundance-flux-chart'",
"+",
"str",
"(",
"cycle",
")",
"# Add black frames for stable isotopes",
"if",
"boxstable",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"stable_el",
")",
")",
":",
"if",
"i",
"==",
"0",
":",
"continue",
"tmp",
"=",
"self",
".",
"stable_el",
"[",
"i",
"]",
"try",
":",
"zz",
"=",
"self",
".",
"elements_names",
".",
"index",
"(",
"tmp",
"[",
"0",
"]",
")",
"#charge",
"except",
":",
"continue",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"tmp",
")",
")",
":",
"if",
"j",
"==",
"0",
":",
"continue",
"nn",
"=",
"int",
"(",
"tmp",
"[",
"j",
"]",
")",
"#atomic mass",
"nn",
"=",
"nn",
"-",
"zz",
"xy",
"=",
"nn",
"-",
"0.5",
",",
"zz",
"-",
"0.5",
"rect",
"=",
"Rectangle",
"(",
"xy",
",",
"1",
",",
"1",
",",
"ec",
"=",
"'k'",
",",
"fc",
"=",
"'None'",
",",
"fill",
"=",
"'False'",
",",
"lw",
"=",
"4.",
")",
"rect",
".",
"set_zorder",
"(",
"2",
")",
"ax1",
".",
"add_patch",
"(",
"rect",
")",
"# decide which array to take for label positions",
"iarr",
"=",
"0",
"# plot element labels",
"if",
"ilabel",
":",
"for",
"z",
"in",
"range",
"(",
"nzmax",
")",
":",
"try",
":",
"nmin",
"=",
"min",
"(",
"argwhere",
"(",
"nzycheck",
"[",
":",
",",
"z",
",",
"iarr",
"]",
")",
")",
"[",
"0",
"]",
"-",
"1",
"nmax",
"=",
"max",
"(",
"argwhere",
"(",
"nzycheck",
"[",
":",
",",
"z",
",",
"iarr",
"]",
")",
")",
"[",
"0",
"]",
"+",
"1",
"ax1",
".",
"text",
"(",
"nmin",
",",
"z",
",",
"elname",
"[",
"z",
"]",
",",
"horizontalalignment",
"=",
"'center'",
",",
"verticalalignment",
"=",
"'center'",
",",
"fontsize",
"=",
"'small'",
",",
"clip_on",
"=",
"True",
")",
"ax1",
".",
"text",
"(",
"nmax",
",",
"z",
",",
"elname",
"[",
"z",
"]",
",",
"horizontalalignment",
"=",
"'center'",
",",
"verticalalignment",
"=",
"'center'",
",",
"fontsize",
"=",
"'small'",
",",
"clip_on",
"=",
"True",
")",
"except",
"ValueError",
":",
"continue",
"# plot mass numbers",
"if",
"imlabel",
":",
"for",
"z",
"in",
"range",
"(",
"nzmax",
")",
":",
"for",
"n",
"in",
"range",
"(",
"nnmax",
")",
":",
"a",
"=",
"z",
"+",
"n",
"if",
"nzycheck",
"[",
"n",
",",
"z",
",",
"iarr",
"]",
"==",
"1",
":",
"ax1",
".",
"text",
"(",
"n",
",",
"z",
",",
"a",
",",
"horizontalalignment",
"=",
"'center'",
",",
"verticalalignment",
"=",
"'center'",
",",
"fontsize",
"=",
"'x-small'",
",",
"clip_on",
"=",
"True",
")",
"# plot lines at magic numbers",
"if",
"imagic",
":",
"ixymagic",
"=",
"[",
"2",
",",
"8",
",",
"20",
",",
"28",
",",
"50",
",",
"82",
",",
"126",
"]",
"nmagic",
"=",
"len",
"(",
"ixymagic",
")",
"for",
"magic",
"in",
"ixymagic",
":",
"if",
"magic",
"<=",
"nzmax",
":",
"try",
":",
"xnmin",
"=",
"min",
"(",
"argwhere",
"(",
"nzycheck",
"[",
":",
",",
"magic",
",",
"iarr",
"]",
")",
")",
"[",
"0",
"]",
"xnmax",
"=",
"max",
"(",
"argwhere",
"(",
"nzycheck",
"[",
":",
",",
"magic",
",",
"iarr",
"]",
")",
")",
"[",
"0",
"]",
"line",
"=",
"ax1",
".",
"plot",
"(",
"[",
"xnmin",
",",
"xnmax",
"]",
",",
"[",
"magic",
",",
"magic",
"]",
",",
"lw",
"=",
"3.",
",",
"color",
"=",
"'r'",
",",
"ls",
"=",
"'-'",
")",
"except",
"ValueError",
":",
"dummy",
"=",
"0",
"if",
"magic",
"<=",
"nnmax",
":",
"try",
":",
"yzmin",
"=",
"min",
"(",
"argwhere",
"(",
"nzycheck",
"[",
"magic",
",",
":",
",",
"iarr",
"]",
")",
")",
"[",
"0",
"]",
"yzmax",
"=",
"max",
"(",
"argwhere",
"(",
"nzycheck",
"[",
"magic",
",",
":",
",",
"iarr",
"]",
")",
")",
"[",
"0",
"]",
"line",
"=",
"ax1",
".",
"plot",
"(",
"[",
"magic",
",",
"magic",
"]",
",",
"[",
"yzmin",
",",
"yzmax",
"]",
",",
"lw",
"=",
"3.",
",",
"color",
"=",
"'r'",
",",
"ls",
"=",
"'-'",
")",
"except",
"ValueError",
":",
"dummy",
"=",
"0",
"# set axis limits",
"if",
"plotaxis",
"==",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
":",
"xmax",
"=",
"max",
"(",
"nin",
")",
"ymax",
"=",
"max",
"(",
"zin",
")",
"ax1",
".",
"axis",
"(",
"[",
"-",
"0.5",
",",
"xmax",
"+",
"0.5",
",",
"-",
"0.5",
",",
"ymax",
"+",
"0.5",
"]",
")",
"else",
":",
"ax1",
".",
"axis",
"(",
"plotaxis",
")",
"# set x- and y-axis label",
"ax1",
".",
"set_ylabel",
"(",
"'Proton number'",
")",
"if",
"profile",
"==",
"'charged'",
":",
"ax1",
".",
"set_xlabel",
"(",
"'Neutron number'",
")",
"#pl.title('Isotopic Chart for cycle '+str(int(cycle)))",
"#",
"# here below I read data from the flux_*****.DAT file.",
"#",
"file_name",
"=",
"'flux_'",
"+",
"str",
"(",
"cycle",
")",
".",
"zfill",
"(",
"5",
")",
"+",
"'.DAT'",
"print",
"(",
"file_name",
")",
"f",
"=",
"open",
"(",
"file_name",
")",
"lines",
"=",
"f",
".",
"readline",
"(",
")",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"f",
".",
"close",
"(",
")",
"print_max_flux_in_plot",
"=",
"False",
"# color map choice for fluxes",
"#cmapa = cm.jet",
"cmapa",
"=",
"cm",
".",
"autumn",
"# color map choice for arrows",
"cmapr",
"=",
"cm",
".",
"autumn",
"# starting point of arrow",
"coord_x_1",
"=",
"[",
"]",
"coord_y_1",
"=",
"[",
"]",
"# ending point of arrow (option 1)",
"coord_x_2",
"=",
"[",
"]",
"coord_y_2",
"=",
"[",
"]",
"# ending point of arrow (option 2)",
"coord_x_3",
"=",
"[",
"]",
"coord_y_3",
"=",
"[",
"]",
"# fluxes",
"flux_read",
"=",
"[",
"]",
"flux_log10",
"=",
"[",
"]",
"if",
"which_flux",
"==",
"None",
"or",
"which_flux",
"==",
"0",
":",
"print",
"(",
"'chart for nucleosynthesis fluxes [dYi/dt]'",
")",
"line_to_read",
"=",
"9",
"elif",
"which_flux",
"==",
"1",
":",
"print",
"(",
"'chart for energy fluxes'",
")",
"line_to_read",
"=",
"10",
"elif",
"which_flux",
">",
"1",
":",
"print",
"(",
"\"you have only option 0 or 1, not larger than 1\"",
")",
"single_line",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"lines",
")",
")",
":",
"single_line",
".",
"append",
"(",
"lines",
"[",
"i",
"]",
".",
"split",
"(",
")",
")",
"coord_y_1",
".",
"append",
"(",
"int",
"(",
"single_line",
"[",
"i",
"]",
"[",
"1",
"]",
")",
")",
"coord_x_1",
".",
"append",
"(",
"int",
"(",
"single_line",
"[",
"i",
"]",
"[",
"2",
"]",
")",
"-",
"coord_y_1",
"[",
"i",
"]",
")",
"coord_y_2",
".",
"append",
"(",
"int",
"(",
"single_line",
"[",
"i",
"]",
"[",
"5",
"]",
")",
")",
"coord_x_2",
".",
"append",
"(",
"int",
"(",
"single_line",
"[",
"i",
"]",
"[",
"6",
"]",
")",
"-",
"coord_y_2",
"[",
"i",
"]",
")",
"coord_y_3",
".",
"append",
"(",
"int",
"(",
"single_line",
"[",
"i",
"]",
"[",
"7",
"]",
")",
")",
"coord_x_3",
".",
"append",
"(",
"int",
"(",
"single_line",
"[",
"i",
"]",
"[",
"8",
"]",
")",
"-",
"coord_y_3",
"[",
"i",
"]",
")",
"try",
":",
"flux_read",
".",
"append",
"(",
"float",
"(",
"single_line",
"[",
"i",
"]",
"[",
"line_to_read",
"]",
")",
")",
"except",
"ValueError",
":",
"# this is done to avoid format issues like 3.13725-181...",
"flux_read",
".",
"append",
"(",
"1.0E-99",
")",
"flux_log10",
".",
"append",
"(",
"log10",
"(",
"flux_read",
"[",
"i",
"]",
"+",
"1.0e-99",
")",
")",
"print",
"(",
"file_name",
",",
"' read!'",
")",
"# I need to select smaller sample, with only fluxes inside plotaxis.",
"if",
"plotaxis",
"!=",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
":",
"coord_y_1_small",
"=",
"[",
"]",
"coord_x_1_small",
"=",
"[",
"]",
"coord_y_2_small",
"=",
"[",
"]",
"coord_x_2_small",
"=",
"[",
"]",
"coord_y_3_small",
"=",
"[",
"]",
"coord_x_3_small",
"=",
"[",
"]",
"flux_log10_small",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"flux_log10",
")",
")",
":",
"I_am_in",
"=",
"0",
"if",
"coord_y_1",
"[",
"i",
"]",
">",
"plotaxis",
"[",
"2",
"]",
"and",
"coord_y_1",
"[",
"i",
"]",
"<",
"plotaxis",
"[",
"3",
"]",
"and",
"coord_x_1",
"[",
"i",
"]",
">",
"plotaxis",
"[",
"0",
"]",
"and",
"coord_x_1",
"[",
"i",
"]",
"<",
"plotaxis",
"[",
"1",
"]",
":",
"I_am_in",
"=",
"1",
"coord_y_1_small",
".",
"append",
"(",
"int",
"(",
"coord_y_1",
"[",
"i",
"]",
")",
")",
"coord_x_1_small",
".",
"append",
"(",
"int",
"(",
"coord_x_1",
"[",
"i",
"]",
")",
")",
"coord_y_2_small",
".",
"append",
"(",
"int",
"(",
"coord_y_2",
"[",
"i",
"]",
")",
")",
"coord_x_2_small",
".",
"append",
"(",
"int",
"(",
"coord_x_2",
"[",
"i",
"]",
")",
")",
"coord_y_3_small",
".",
"append",
"(",
"int",
"(",
"coord_y_3",
"[",
"i",
"]",
")",
")",
"coord_x_3_small",
".",
"append",
"(",
"int",
"(",
"coord_x_3",
"[",
"i",
"]",
")",
")",
"flux_log10_small",
".",
"append",
"(",
"flux_log10",
"[",
"i",
"]",
")",
"if",
"coord_y_3",
"[",
"i",
"]",
">",
"plotaxis",
"[",
"2",
"]",
"and",
"coord_y_3",
"[",
"i",
"]",
"<",
"plotaxis",
"[",
"3",
"]",
"and",
"coord_x_3",
"[",
"i",
"]",
">",
"plotaxis",
"[",
"0",
"]",
"and",
"coord_x_3",
"[",
"i",
"]",
"<",
"plotaxis",
"[",
"1",
"]",
"and",
"I_am_in",
"==",
"0",
":",
"I_am_in",
"=",
"1",
"coord_y_1_small",
".",
"append",
"(",
"int",
"(",
"coord_y_1",
"[",
"i",
"]",
")",
")",
"coord_x_1_small",
".",
"append",
"(",
"int",
"(",
"coord_x_1",
"[",
"i",
"]",
")",
")",
"coord_y_2_small",
".",
"append",
"(",
"int",
"(",
"coord_y_2",
"[",
"i",
"]",
")",
")",
"coord_x_2_small",
".",
"append",
"(",
"int",
"(",
"coord_x_2",
"[",
"i",
"]",
")",
")",
"coord_y_3_small",
".",
"append",
"(",
"int",
"(",
"coord_y_3",
"[",
"i",
"]",
")",
")",
"coord_x_3_small",
".",
"append",
"(",
"int",
"(",
"coord_x_3",
"[",
"i",
"]",
")",
")",
"flux_log10_small",
".",
"append",
"(",
"flux_log10",
"[",
"i",
"]",
")",
"# elemental labels off/on [0/1]",
"ilabel",
"=",
"1",
"# label for isotopic masses off/on [0/1]",
"imlabel",
"=",
"1",
"# turn lines for magic numbers off/on [0/1]",
"imagic",
"=",
"0",
"# flow is plotted over \"prange\" dex. If flow < maxflow-prange it is not plotted",
"if",
"prange",
"==",
"None",
":",
"print",
"(",
"'plot range given by default'",
")",
"prange",
"=",
"8.",
"#############################################",
"#print flux_log10_small",
"# we should scale prange on plot_axis range, not on max_flux!",
"max_flux",
"=",
"max",
"(",
"flux_log10",
")",
"ind_max_flux",
"=",
"flux_log10",
".",
"index",
"(",
"max_flux",
")",
"if",
"plotaxis",
"!=",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
":",
"max_flux_small",
"=",
"max",
"(",
"flux_log10_small",
")",
"if",
"plotaxis",
"==",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
":",
"nzmax",
"=",
"int",
"(",
"max",
"(",
"max",
"(",
"coord_y_1",
")",
",",
"max",
"(",
"coord_y_2",
")",
",",
"max",
"(",
"coord_y_3",
")",
")",
")",
"+",
"1",
"nnmax",
"=",
"int",
"(",
"max",
"(",
"max",
"(",
"coord_x_1",
")",
",",
"max",
"(",
"coord_x_2",
")",
",",
"max",
"(",
"coord_x_3",
")",
")",
")",
"+",
"1",
"coord_x_1_small",
"=",
"coord_x_1",
"coord_x_2_small",
"=",
"coord_x_2",
"coord_x_3_small",
"=",
"coord_x_3",
"coord_y_1_small",
"=",
"coord_y_1",
"coord_y_2_small",
"=",
"coord_y_2",
"coord_y_3_small",
"=",
"coord_y_3",
"flux_log10_small",
"=",
"flux_log10",
"max_flux_small",
"=",
"max_flux",
"else",
":",
"nzmax",
"=",
"int",
"(",
"max",
"(",
"max",
"(",
"coord_y_1_small",
")",
",",
"max",
"(",
"coord_y_2_small",
")",
",",
"max",
"(",
"coord_y_3_small",
")",
")",
")",
"+",
"1",
"nnmax",
"=",
"int",
"(",
"max",
"(",
"max",
"(",
"coord_x_1_small",
")",
",",
"max",
"(",
"coord_x_2_small",
")",
",",
"max",
"(",
"coord_x_3_small",
")",
")",
")",
"+",
"1",
"for",
"i",
"in",
"range",
"(",
"nzmax",
")",
":",
"for",
"j",
"in",
"range",
"(",
"nnmax",
")",
":",
"if",
"nzycheck",
"[",
"j",
",",
"i",
",",
"0",
"]",
"==",
"1",
":",
"xy",
"=",
"j",
"-",
"0.5",
",",
"i",
"-",
"0.5",
"rect",
"=",
"Rectangle",
"(",
"xy",
",",
"1",
",",
"1",
",",
")",
"patches",
".",
"append",
"(",
"rect",
")",
"nzycheck",
"=",
"zeros",
"(",
"[",
"nnmax_plot",
",",
"nzmax",
",",
"3",
"]",
")",
"coord_x_out",
"=",
"zeros",
"(",
"len",
"(",
"coord_x_2_small",
")",
",",
"dtype",
"=",
"'int'",
")",
"coord_y_out",
"=",
"zeros",
"(",
"len",
"(",
"coord_y_2_small",
")",
",",
"dtype",
"=",
"'int'",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"flux_log10_small",
")",
")",
":",
"nzycheck",
"[",
"coord_x_1_small",
"[",
"i",
"]",
",",
"coord_y_1_small",
"[",
"i",
"]",
",",
"0",
"]",
"=",
"1",
"nzycheck",
"[",
"coord_x_1_small",
"[",
"i",
"]",
",",
"coord_y_1_small",
"[",
"i",
"]",
",",
"1",
"]",
"=",
"flux_log10_small",
"[",
"i",
"]",
"if",
"coord_x_2_small",
"[",
"i",
"]",
">=",
"coord_x_3_small",
"[",
"i",
"]",
":",
"coord_x_out",
"[",
"i",
"]",
"=",
"coord_x_2_small",
"[",
"i",
"]",
"coord_y_out",
"[",
"i",
"]",
"=",
"coord_y_2_small",
"[",
"i",
"]",
"nzycheck",
"[",
"coord_x_out",
"[",
"i",
"]",
",",
"coord_y_out",
"[",
"i",
"]",
",",
"0",
"]",
"=",
"1",
"nzycheck",
"[",
"coord_x_out",
"[",
"i",
"]",
",",
"coord_y_out",
"[",
"i",
"]",
",",
"1",
"]",
"=",
"flux_log10_small",
"[",
"i",
"]",
"elif",
"coord_x_2_small",
"[",
"i",
"]",
"<",
"coord_x_3_small",
"[",
"i",
"]",
":",
"coord_x_out",
"[",
"i",
"]",
"=",
"coord_x_3_small",
"[",
"i",
"]",
"coord_y_out",
"[",
"i",
"]",
"=",
"coord_y_3_small",
"[",
"i",
"]",
"nzycheck",
"[",
"coord_x_out",
"[",
"i",
"]",
",",
"coord_y_out",
"[",
"i",
"]",
",",
"0",
"]",
"=",
"1",
"nzycheck",
"[",
"coord_x_out",
"[",
"i",
"]",
",",
"coord_y_out",
"[",
"i",
"]",
",",
"1",
"]",
"=",
"flux_log10_small",
"[",
"i",
"]",
"if",
"flux_log10_small",
"[",
"i",
"]",
">",
"max_flux_small",
"-",
"prange",
":",
"nzycheck",
"[",
"coord_x_1_small",
"[",
"i",
"]",
",",
"coord_y_1_small",
"[",
"i",
"]",
",",
"2",
"]",
"=",
"1",
"nzycheck",
"[",
"coord_x_out",
"[",
"i",
"]",
",",
"coord_y_out",
"[",
"i",
"]",
",",
"2",
"]",
"=",
"1",
"#### create plot",
"if",
"profile",
"==",
"'charged'",
":",
"ax2",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"2",
",",
"2",
")",
"elif",
"profile",
"==",
"'neutron'",
":",
"ax2",
"=",
"fig",
".",
"add_subplot",
"(",
"2",
",",
"1",
",",
"2",
")",
"# Tick marks",
"xminorlocator",
"=",
"MultipleLocator",
"(",
"1",
")",
"xmajorlocator",
"=",
"MultipleLocator",
"(",
"5",
")",
"ax2",
".",
"xaxis",
".",
"set_major_locator",
"(",
"xmajorlocator",
")",
"ax2",
".",
"xaxis",
".",
"set_minor_locator",
"(",
"xminorlocator",
")",
"yminorlocator",
"=",
"MultipleLocator",
"(",
"1",
")",
"ymajorlocator",
"=",
"MultipleLocator",
"(",
"5",
")",
"ax2",
".",
"yaxis",
".",
"set_major_locator",
"(",
"ymajorlocator",
")",
"ax2",
".",
"yaxis",
".",
"set_minor_locator",
"(",
"yminorlocator",
")",
"## define axis and plot style (colormap, size, fontsize etc.)",
"if",
"plotaxis",
"==",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
":",
"xdim",
"=",
"10",
"ydim",
"=",
"6",
"else",
":",
"dx",
"=",
"plotaxis",
"[",
"1",
"]",
"-",
"plotaxis",
"[",
"0",
"]",
"dy",
"=",
"plotaxis",
"[",
"3",
"]",
"-",
"plotaxis",
"[",
"2",
"]",
"ydim",
"=",
"6",
"xdim",
"=",
"ydim",
"*",
"dx",
"/",
"dy",
"format",
"=",
"'pdf'",
"# set x- and y-axis scale aspect ratio to 1",
"#ax2.set_aspect('equal')",
"# Add black frames for stable isotopes",
"# Add black frames for stable isotopes",
"if",
"boxstable",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"stable_el",
")",
")",
":",
"if",
"i",
"==",
"0",
":",
"continue",
"tmp",
"=",
"self",
".",
"stable_el",
"[",
"i",
"]",
"try",
":",
"zz",
"=",
"self",
".",
"elements_names",
".",
"index",
"(",
"tmp",
"[",
"0",
"]",
")",
"#charge",
"except",
":",
"continue",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"tmp",
")",
")",
":",
"if",
"j",
"==",
"0",
":",
"continue",
"nn",
"=",
"int",
"(",
"tmp",
"[",
"j",
"]",
")",
"#atomic mass",
"nn",
"=",
"nn",
"-",
"zz",
"xy",
"=",
"nn",
"-",
"0.5",
",",
"zz",
"-",
"0.5",
"rect",
"=",
"Rectangle",
"(",
"xy",
",",
"1",
",",
"1",
",",
"ec",
"=",
"'k'",
",",
"fc",
"=",
"'None'",
",",
"fill",
"=",
"'False'",
",",
"lw",
"=",
"4.",
")",
"rect",
".",
"set_zorder",
"(",
"2",
")",
"ax2",
".",
"add_patch",
"(",
"rect",
")",
"apatches",
"=",
"[",
"]",
"acolor",
"=",
"[",
"]",
"m",
"=",
"old_div",
"(",
"0.8",
",",
"prange",
")",
"vmax",
"=",
"ceil",
"(",
"max",
"(",
"flux_log10_small",
")",
")",
"vmin",
"=",
"max",
"(",
"flux_log10_small",
")",
"-",
"prange",
"b",
"=",
"-",
"vmin",
"*",
"m",
"+",
"0.1",
"normr",
"=",
"colors",
".",
"Normalize",
"(",
"vmin",
"=",
"vmin",
",",
"vmax",
"=",
"vmax",
")",
"ymax",
"=",
"0.",
"xmax",
"=",
"0.",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"flux_log10_small",
")",
")",
":",
"x",
"=",
"coord_x_1_small",
"[",
"i",
"]",
"y",
"=",
"coord_y_1_small",
"[",
"i",
"]",
"dx",
"=",
"coord_x_out",
"[",
"i",
"]",
"-",
"coord_x_1_small",
"[",
"i",
"]",
"dy",
"=",
"coord_y_out",
"[",
"i",
"]",
"-",
"coord_y_1_small",
"[",
"i",
"]",
"if",
"flux_log10_small",
"[",
"i",
"]",
">=",
"vmin",
":",
"arrowwidth",
"=",
"flux_log10_small",
"[",
"i",
"]",
"*",
"m",
"+",
"b",
"arrow",
"=",
"Arrow",
"(",
"x",
",",
"y",
",",
"dx",
",",
"dy",
",",
"width",
"=",
"arrowwidth",
")",
"if",
"xmax",
"<",
"x",
":",
"xmax",
"=",
"x",
"if",
"ymax",
"<",
"y",
":",
"ymax",
"=",
"y",
"acol",
"=",
"flux_log10_small",
"[",
"i",
"]",
"apatches",
".",
"append",
"(",
"arrow",
")",
"acolor",
".",
"append",
"(",
"acol",
")",
"xy",
"=",
"x",
"-",
"0.5",
",",
"y",
"-",
"0.5",
"rect",
"=",
"Rectangle",
"(",
"xy",
",",
"1",
",",
"1",
",",
"ec",
"=",
"'k'",
",",
"fc",
"=",
"'None'",
",",
"fill",
"=",
"'False'",
",",
"lw",
"=",
"1.",
")",
"patches",
".",
"append",
"(",
"rect",
")",
"xy",
"=",
"x",
"+",
"dx",
"-",
"0.5",
",",
"y",
"+",
"dy",
"-",
"0.5",
"rect",
"=",
"Rectangle",
"(",
"xy",
",",
"1",
",",
"1",
",",
"ec",
"=",
"'k'",
",",
"fc",
"=",
"'None'",
",",
"fill",
"=",
"'False'",
",",
"lw",
"=",
"1.",
")",
"patches",
".",
"append",
"(",
"rect",
")",
"p",
"=",
"PatchCollection",
"(",
"patches",
",",
"norm",
"=",
"0",
",",
"facecolor",
"=",
"'w'",
")",
"p",
".",
"set_zorder",
"(",
"1",
")",
"ax2",
".",
"add_collection",
"(",
"p",
")",
"a",
"=",
"PatchCollection",
"(",
"apatches",
",",
"cmap",
"=",
"cmapr",
",",
"norm",
"=",
"normr",
")",
"a",
".",
"set_array",
"(",
"array",
"(",
"acolor",
")",
")",
"a",
".",
"set_zorder",
"(",
"3",
")",
"ax2",
".",
"add_collection",
"(",
"a",
")",
"cb",
"=",
"pl",
".",
"colorbar",
"(",
"a",
")",
"# colorbar label",
"cb",
".",
"set_label",
"(",
"'log$_{10}$($x$)'",
")",
"if",
"profile",
"==",
"'neutron'",
":",
"cb",
".",
"set_label",
"(",
"'log$_{10}$(f)'",
")",
"# decide which array to take for label positions",
"iarr",
"=",
"2",
"# plot element labels",
"for",
"z",
"in",
"range",
"(",
"nzmax",
")",
":",
"try",
":",
"nmin",
"=",
"min",
"(",
"argwhere",
"(",
"nzycheck_plot",
"[",
":",
",",
"z",
",",
"iarr",
"-",
"2",
"]",
")",
")",
"[",
"0",
"]",
"-",
"1",
"nmax",
"=",
"max",
"(",
"argwhere",
"(",
"nzycheck_plot",
"[",
":",
",",
"z",
",",
"iarr",
"-",
"2",
"]",
")",
")",
"[",
"0",
"]",
"+",
"1",
"ax2",
".",
"text",
"(",
"nmin",
",",
"z",
",",
"elname",
"[",
"z",
"]",
",",
"horizontalalignment",
"=",
"'center'",
",",
"verticalalignment",
"=",
"'center'",
",",
"fontsize",
"=",
"'small'",
",",
"clip_on",
"=",
"True",
")",
"ax2",
".",
"text",
"(",
"nmax",
",",
"z",
",",
"elname",
"[",
"z",
"]",
",",
"horizontalalignment",
"=",
"'center'",
",",
"verticalalignment",
"=",
"'center'",
",",
"fontsize",
"=",
"'small'",
",",
"clip_on",
"=",
"True",
")",
"except",
"ValueError",
":",
"continue",
"# plot mass numbers",
"if",
"imlabel",
":",
"for",
"z",
"in",
"range",
"(",
"nzmax",
")",
":",
"for",
"n",
"in",
"range",
"(",
"nnmax_plot",
")",
":",
"a",
"=",
"z",
"+",
"n",
"if",
"nzycheck_plot",
"[",
"n",
",",
"z",
",",
"iarr",
"-",
"2",
"]",
"==",
"1",
":",
"ax2",
".",
"text",
"(",
"n",
",",
"z",
",",
"a",
",",
"horizontalalignment",
"=",
"'center'",
",",
"verticalalignment",
"=",
"'center'",
",",
"fontsize",
"=",
"'x-small'",
",",
"clip_on",
"=",
"True",
")",
"# plot lines at magic numbers",
"if",
"imagic",
"==",
"1",
":",
"ixymagic",
"=",
"[",
"2",
",",
"8",
",",
"20",
",",
"28",
",",
"50",
",",
"82",
",",
"126",
"]",
"nmagic",
"=",
"len",
"(",
"ixymagic",
")",
"for",
"magic",
"in",
"ixymagic",
":",
"if",
"magic",
"<=",
"nzmax",
":",
"try",
":",
"xnmin",
"=",
"min",
"(",
"argwhere",
"(",
"nzycheck",
"[",
":",
",",
"magic",
",",
"iarr",
"-",
"2",
"]",
")",
")",
"[",
"0",
"]",
"xnmax",
"=",
"max",
"(",
"argwhere",
"(",
"nzycheck",
"[",
":",
",",
"magic",
",",
"iarr",
"-",
"2",
"]",
")",
")",
"[",
"0",
"]",
"line",
"=",
"ax2",
".",
"plot",
"(",
"[",
"xnmin",
",",
"xnmax",
"]",
",",
"[",
"magic",
",",
"magic",
"]",
",",
"lw",
"=",
"3.",
",",
"color",
"=",
"'r'",
",",
"ls",
"=",
"'-'",
")",
"except",
"ValueError",
":",
"dummy",
"=",
"0",
"if",
"magic",
"<=",
"nnmax",
":",
"try",
":",
"yzmin",
"=",
"min",
"(",
"argwhere",
"(",
"nzycheck",
"[",
"magic",
",",
":",
",",
"iarr",
"-",
"2",
"]",
")",
")",
"[",
"0",
"]",
"yzmax",
"=",
"max",
"(",
"argwhere",
"(",
"nzycheck",
"[",
"magic",
",",
":",
",",
"iarr",
"-",
"2",
"]",
")",
")",
"[",
"0",
"]",
"line",
"=",
"ax2",
".",
"plot",
"(",
"[",
"magic",
",",
"magic",
"]",
",",
"[",
"yzmin",
",",
"yzmax",
"]",
",",
"lw",
"=",
"3.",
",",
"color",
"=",
"'r'",
",",
"ls",
"=",
"'-'",
")",
"except",
"ValueError",
":",
"dummy",
"=",
"0",
"# set axis limits",
"if",
"plotaxis",
"==",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
":",
"ax2",
".",
"axis",
"(",
"[",
"-",
"0.5",
",",
"xmax",
"+",
"0.5",
",",
"-",
"0.5",
",",
"ymax",
"+",
"0.5",
"]",
")",
"else",
":",
"ax2",
".",
"axis",
"(",
"plotaxis",
")",
"# set x- and y-axis label",
"ax2",
".",
"set_xlabel",
"(",
"'Neutron number'",
")",
"if",
"profile",
"==",
"'neutron'",
":",
"ax2",
".",
"set_ylabel",
"(",
"'Proton number'",
")",
"if",
"which_flux",
"==",
"None",
"or",
"which_flux",
"==",
"0",
":",
"max_flux_label",
"=",
"\"max flux = \"",
"+",
"str",
"(",
"'{0:.4f}'",
".",
"format",
"(",
"max_flux",
")",
")",
"elif",
"which_flux",
"==",
"1",
":",
"max_flux_label",
"=",
"\"max energy flux = \"",
"+",
"str",
"(",
"'{0:.4f}'",
".",
"format",
"(",
"max_flux",
")",
")",
"if",
"print_max_flux_in_plot",
":",
"ax2",
".",
"text",
"(",
"plotaxis",
"[",
"1",
"]",
"-",
"1.8",
",",
"plotaxis",
"[",
"2",
"]",
"+",
"0.1",
",",
"max_flux_label",
",",
"fontsize",
"=",
"10.",
")",
"#fig.savefig(graphname)",
"print",
"(",
"graphname",
",",
"'is done'",
")",
"if",
"show",
":",
"pl",
".",
"show",
"(",
")",
"if",
"turnoff",
":",
"ion",
"(",
")",
"return"
] | Plots an abundance and flux chart
Parameters
----------
cycle : string, integer or list
The cycle we are looking in. If it is a list of cycles,
this method will then do a plot for each of these cycles
and save them all to a file.
ilabel : boolean, optional
Elemental labels off/on. The default is True.
imlabel : boolean, optional
Label for isotopic masses off/on. The default is True.
imagic : boolean, optional
Turn lines for magic numbers off/on. The default is False.
boxstable : boolean, optional
Plot the black boxes around the stable elements. The
defaults is True.
lbound : tuple, optional
Boundaries for colour spectrum ploted. The default is
(-12,0).
plotaxis : list, optional
Set axis limit. If [0, 0, 0, 0] the complete range in (N,Z)
will be plotted. It equates to [xMin, xMax, Ymin, Ymax].
The default is [0, 0, 0, 0].
which_flux : integer, optional
Set to 0 for nucleosynthesis flux plot. Set to 1 for
energy flux plot. Setting wich_flux to 0 is equivelent to
setting it to 0. The default is None.
prange : integer, optional
Range of fluxes to be considered, if prange is None then
the plot range is set to 8. The default is None.
profile : string, optional
'charged' is ideal setting to show charged particle
reactions flow. 'neutron' is ideal setting for neutron
captures flows. The default is 'charged'.
show : boolean, optional
Boolean of if the plot should be displayed. Useful with
saving multiple plots using abu_chartMulti. The default is
True. | [
"Plots",
"an",
"abundance",
"and",
"flux",
"chart"
] | python | train |
apache/airflow | airflow/hooks/druid_hook.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/druid_hook.py#L127-L139 | def get_conn(self):
"""
Establish a connection to druid broker.
"""
conn = self.get_connection(self.druid_broker_conn_id)
druid_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/druid/v2/sql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to druid broker on %s', conn.host)
return druid_broker_conn | [
"def",
"get_conn",
"(",
"self",
")",
":",
"conn",
"=",
"self",
".",
"get_connection",
"(",
"self",
".",
"druid_broker_conn_id",
")",
"druid_broker_conn",
"=",
"connect",
"(",
"host",
"=",
"conn",
".",
"host",
",",
"port",
"=",
"conn",
".",
"port",
",",
"path",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'endpoint'",
",",
"'/druid/v2/sql'",
")",
",",
"scheme",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'schema'",
",",
"'http'",
")",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Get the connection to druid broker on %s'",
",",
"conn",
".",
"host",
")",
"return",
"druid_broker_conn"
] | Establish a connection to druid broker. | [
"Establish",
"a",
"connection",
"to",
"druid",
"broker",
"."
] | python | test |
beathan/django-akamai | django_akamai/purge.py | https://github.com/beathan/django-akamai/blob/00cab2dd5fab3745742721185e75a55a5c26fe7e/django_akamai/purge.py#L149-L167 | def add(self, urls):
"""
Add the provided urls to this purge request
The urls argument can be a single string, a list of strings, a queryset
or model instance. Models must implement `get_absolute_url()`.
"""
if isinstance(urls, (list, tuple)):
self.urls.extend(urls)
elif isinstance(urls, basestring):
self.urls.append(urls)
elif isinstance(urls, QuerySet):
for obj in urls:
self.urls.append(obj.get_absolute_url())
elif hasattr(urls, 'get_absolute_url'):
self.urls.append(urls.get_absolute_url())
else:
raise TypeError("Don't know how to handle %r" % urls) | [
"def",
"add",
"(",
"self",
",",
"urls",
")",
":",
"if",
"isinstance",
"(",
"urls",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"self",
".",
"urls",
".",
"extend",
"(",
"urls",
")",
"elif",
"isinstance",
"(",
"urls",
",",
"basestring",
")",
":",
"self",
".",
"urls",
".",
"append",
"(",
"urls",
")",
"elif",
"isinstance",
"(",
"urls",
",",
"QuerySet",
")",
":",
"for",
"obj",
"in",
"urls",
":",
"self",
".",
"urls",
".",
"append",
"(",
"obj",
".",
"get_absolute_url",
"(",
")",
")",
"elif",
"hasattr",
"(",
"urls",
",",
"'get_absolute_url'",
")",
":",
"self",
".",
"urls",
".",
"append",
"(",
"urls",
".",
"get_absolute_url",
"(",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Don't know how to handle %r\"",
"%",
"urls",
")"
] | Add the provided urls to this purge request
The urls argument can be a single string, a list of strings, a queryset
or model instance. Models must implement `get_absolute_url()`. | [
"Add",
"the",
"provided",
"urls",
"to",
"this",
"purge",
"request"
] | python | train |
numenta/nupic | src/nupic/encoders/logarithm.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/logarithm.py#L261-L289 | def closenessScores(self, expValues, actValues, fractional=True):
"""
See the function description in base.py
"""
# Compute the percent error in log space
if expValues[0] > 0:
expValue = math.log10(expValues[0])
else:
expValue = self.minScaledValue
if actValues [0] > 0:
actValue = math.log10(actValues[0])
else:
actValue = self.minScaledValue
if fractional:
err = abs(expValue - actValue)
pctErr = err / (self.maxScaledValue - self.minScaledValue)
pctErr = min(1.0, pctErr)
closeness = 1.0 - pctErr
else:
err = abs(expValue - actValue)
closeness = err
#print "log::", "expValue:", expValues[0], "actValue:", actValues[0], \
# "closeness", closeness
#import pdb; pdb.set_trace()
return numpy.array([closeness]) | [
"def",
"closenessScores",
"(",
"self",
",",
"expValues",
",",
"actValues",
",",
"fractional",
"=",
"True",
")",
":",
"# Compute the percent error in log space",
"if",
"expValues",
"[",
"0",
"]",
">",
"0",
":",
"expValue",
"=",
"math",
".",
"log10",
"(",
"expValues",
"[",
"0",
"]",
")",
"else",
":",
"expValue",
"=",
"self",
".",
"minScaledValue",
"if",
"actValues",
"[",
"0",
"]",
">",
"0",
":",
"actValue",
"=",
"math",
".",
"log10",
"(",
"actValues",
"[",
"0",
"]",
")",
"else",
":",
"actValue",
"=",
"self",
".",
"minScaledValue",
"if",
"fractional",
":",
"err",
"=",
"abs",
"(",
"expValue",
"-",
"actValue",
")",
"pctErr",
"=",
"err",
"/",
"(",
"self",
".",
"maxScaledValue",
"-",
"self",
".",
"minScaledValue",
")",
"pctErr",
"=",
"min",
"(",
"1.0",
",",
"pctErr",
")",
"closeness",
"=",
"1.0",
"-",
"pctErr",
"else",
":",
"err",
"=",
"abs",
"(",
"expValue",
"-",
"actValue",
")",
"closeness",
"=",
"err",
"#print \"log::\", \"expValue:\", expValues[0], \"actValue:\", actValues[0], \\",
"# \"closeness\", closeness",
"#import pdb; pdb.set_trace()",
"return",
"numpy",
".",
"array",
"(",
"[",
"closeness",
"]",
")"
] | See the function description in base.py | [
"See",
"the",
"function",
"description",
"in",
"base",
".",
"py"
] | python | valid |
xolox/python-update-dotdee | update_dotdee/__init__.py | https://github.com/xolox/python-update-dotdee/blob/04d5836f0d217e32778745b533beeb8159d80c32/update_dotdee/__init__.py#L478-L492 | def inject_documentation(**options):
"""
Generate configuration documentation in reStructuredText_ syntax.
:param options: Any keyword arguments are passed on to the
:class:`ConfigLoader` initializer.
This methods injects the generated documentation into the output generated
by cog_.
.. _cog: https://pypi.python.org/pypi/cogapp
"""
import cog
loader = ConfigLoader(**options)
cog.out("\n" + loader.documentation + "\n\n") | [
"def",
"inject_documentation",
"(",
"*",
"*",
"options",
")",
":",
"import",
"cog",
"loader",
"=",
"ConfigLoader",
"(",
"*",
"*",
"options",
")",
"cog",
".",
"out",
"(",
"\"\\n\"",
"+",
"loader",
".",
"documentation",
"+",
"\"\\n\\n\"",
")"
] | Generate configuration documentation in reStructuredText_ syntax.
:param options: Any keyword arguments are passed on to the
:class:`ConfigLoader` initializer.
This methods injects the generated documentation into the output generated
by cog_.
.. _cog: https://pypi.python.org/pypi/cogapp | [
"Generate",
"configuration",
"documentation",
"in",
"reStructuredText_",
"syntax",
"."
] | python | train |
tensorflow/datasets | tensorflow_datasets/core/utils/tqdm_utils.py | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tqdm_utils.py#L120-L124 | def update(self, n=1):
"""Increment current value."""
with self._lock:
self._pbar.update(n)
self.refresh() | [
"def",
"update",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_pbar",
".",
"update",
"(",
"n",
")",
"self",
".",
"refresh",
"(",
")"
] | Increment current value. | [
"Increment",
"current",
"value",
"."
] | python | train |
bykof/billomapy | billomapy/billomapy.py | https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L2046-L2055 | def get_offers_per_page(self, per_page=1000, page=1, params=None):
"""
Get offers per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=OFFERS, per_page=per_page, page=page, params=params) | [
"def",
"get_offers_per_page",
"(",
"self",
",",
"per_page",
"=",
"1000",
",",
"page",
"=",
"1",
",",
"params",
"=",
"None",
")",
":",
"return",
"self",
".",
"_get_resource_per_page",
"(",
"resource",
"=",
"OFFERS",
",",
"per_page",
"=",
"per_page",
",",
"page",
"=",
"page",
",",
"params",
"=",
"params",
")"
] | Get offers per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list | [
"Get",
"offers",
"per",
"page"
] | python | train |
maweigert/gputools | gputools/fft/fftshift.py | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/fftshift.py#L27-L80 | def fftshift(arr_obj, axes = None, res_g = None, return_buffer = False):
"""
gpu version of fftshift for numpy arrays or OCLArrays
Parameters
----------
arr_obj: numpy array or OCLArray (float32/complex64)
the array to be fftshifted
axes: list or None
the axes over which to shift (like np.fft.fftshift)
if None, all axes are taken
res_g:
if given, fills it with the result (has to be same shape and dtype as arr_obj)
else internally creates a new one
Returns
-------
if return_buffer, returns the result as (well :) OCLArray
else returns the result as numpy array
"""
if axes is None:
axes = list(range(arr_obj.ndim))
if isinstance(arr_obj, OCLArray):
if not arr_obj.dtype.type in DTYPE_KERNEL_NAMES:
raise NotImplementedError("only works for float32 or complex64")
elif isinstance(arr_obj, np.ndarray):
if np.iscomplexobj(arr_obj):
arr_obj = OCLArray.from_array(arr_obj.astype(np.complex64,copy = False))
else:
arr_obj = OCLArray.from_array(arr_obj.astype(np.float32,copy = False))
else:
raise ValueError("unknown type (%s)"%(type(arr_obj)))
if not np.all([arr_obj.shape[a]%2==0 for a in axes]):
raise NotImplementedError("only works on axes of even dimensions")
if res_g is None:
res_g = OCLArray.empty_like(arr_obj)
# iterate over all axes
# FIXME: this is still rather inefficient
in_g = arr_obj
for ax in axes:
_fftshift_single(in_g, res_g, ax)
in_g = res_g
if return_buffer:
return res_g
else:
return res_g.get() | [
"def",
"fftshift",
"(",
"arr_obj",
",",
"axes",
"=",
"None",
",",
"res_g",
"=",
"None",
",",
"return_buffer",
"=",
"False",
")",
":",
"if",
"axes",
"is",
"None",
":",
"axes",
"=",
"list",
"(",
"range",
"(",
"arr_obj",
".",
"ndim",
")",
")",
"if",
"isinstance",
"(",
"arr_obj",
",",
"OCLArray",
")",
":",
"if",
"not",
"arr_obj",
".",
"dtype",
".",
"type",
"in",
"DTYPE_KERNEL_NAMES",
":",
"raise",
"NotImplementedError",
"(",
"\"only works for float32 or complex64\"",
")",
"elif",
"isinstance",
"(",
"arr_obj",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"np",
".",
"iscomplexobj",
"(",
"arr_obj",
")",
":",
"arr_obj",
"=",
"OCLArray",
".",
"from_array",
"(",
"arr_obj",
".",
"astype",
"(",
"np",
".",
"complex64",
",",
"copy",
"=",
"False",
")",
")",
"else",
":",
"arr_obj",
"=",
"OCLArray",
".",
"from_array",
"(",
"arr_obj",
".",
"astype",
"(",
"np",
".",
"float32",
",",
"copy",
"=",
"False",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"unknown type (%s)\"",
"%",
"(",
"type",
"(",
"arr_obj",
")",
")",
")",
"if",
"not",
"np",
".",
"all",
"(",
"[",
"arr_obj",
".",
"shape",
"[",
"a",
"]",
"%",
"2",
"==",
"0",
"for",
"a",
"in",
"axes",
"]",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"only works on axes of even dimensions\"",
")",
"if",
"res_g",
"is",
"None",
":",
"res_g",
"=",
"OCLArray",
".",
"empty_like",
"(",
"arr_obj",
")",
"# iterate over all axes",
"# FIXME: this is still rather inefficient",
"in_g",
"=",
"arr_obj",
"for",
"ax",
"in",
"axes",
":",
"_fftshift_single",
"(",
"in_g",
",",
"res_g",
",",
"ax",
")",
"in_g",
"=",
"res_g",
"if",
"return_buffer",
":",
"return",
"res_g",
"else",
":",
"return",
"res_g",
".",
"get",
"(",
")"
] | gpu version of fftshift for numpy arrays or OCLArrays
Parameters
----------
arr_obj: numpy array or OCLArray (float32/complex64)
the array to be fftshifted
axes: list or None
the axes over which to shift (like np.fft.fftshift)
if None, all axes are taken
res_g:
if given, fills it with the result (has to be same shape and dtype as arr_obj)
else internally creates a new one
Returns
-------
if return_buffer, returns the result as (well :) OCLArray
else returns the result as numpy array | [
"gpu",
"version",
"of",
"fftshift",
"for",
"numpy",
"arrays",
"or",
"OCLArrays"
] | python | train |
gwastro/pycbc | pycbc/strain/recalibrate.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/strain/recalibrate.py#L411-L428 | def tf_from_file(cls, path, delimiter=" "):
"""Convert the contents of a file with the columns
[freq, real(h), imag(h)] to a numpy.array with columns
[freq, real(h)+j*imag(h)].
Parameters
----------
path : string
delimiter : {" ", string}
Return
------
numpy.array
"""
data = np.loadtxt(path, delimiter=delimiter)
freq = data[:, 0]
h = data[:, 1] + 1.0j * data[:, 2]
return np.array([freq, h]).transpose() | [
"def",
"tf_from_file",
"(",
"cls",
",",
"path",
",",
"delimiter",
"=",
"\" \"",
")",
":",
"data",
"=",
"np",
".",
"loadtxt",
"(",
"path",
",",
"delimiter",
"=",
"delimiter",
")",
"freq",
"=",
"data",
"[",
":",
",",
"0",
"]",
"h",
"=",
"data",
"[",
":",
",",
"1",
"]",
"+",
"1.0j",
"*",
"data",
"[",
":",
",",
"2",
"]",
"return",
"np",
".",
"array",
"(",
"[",
"freq",
",",
"h",
"]",
")",
".",
"transpose",
"(",
")"
] | Convert the contents of a file with the columns
[freq, real(h), imag(h)] to a numpy.array with columns
[freq, real(h)+j*imag(h)].
Parameters
----------
path : string
delimiter : {" ", string}
Return
------
numpy.array | [
"Convert",
"the",
"contents",
"of",
"a",
"file",
"with",
"the",
"columns",
"[",
"freq",
"real",
"(",
"h",
")",
"imag",
"(",
"h",
")",
"]",
"to",
"a",
"numpy",
".",
"array",
"with",
"columns",
"[",
"freq",
"real",
"(",
"h",
")",
"+",
"j",
"*",
"imag",
"(",
"h",
")",
"]",
"."
] | python | train |
caseyjlaw/activegit | activegit/activegit.py | https://github.com/caseyjlaw/activegit/blob/2b4a0ee0fecf13345b5257130ba98b48f46e1098/activegit/activegit.py#L105-L122 | def set_version(self, version, force=True):
""" Sets the version name for the current state of repo """
if version in self.versions:
self._version = version
if 'working' in self.repo.branch().stdout:
if force:
logger.info('Found working branch. Removing...')
cmd = self.repo.checkout('master')
cmd = self.repo.branch('working', d=True)
else:
logger.info('Found working branch from previous session. Use force=True to remove it and start anew.')
return
stdout = self.repo.checkout(version, b='working').stdout # active version set in 'working' branch
logger.info('Version {0} set'.format(version))
else:
raise AttributeError('Version {0} not found'.format(version)) | [
"def",
"set_version",
"(",
"self",
",",
"version",
",",
"force",
"=",
"True",
")",
":",
"if",
"version",
"in",
"self",
".",
"versions",
":",
"self",
".",
"_version",
"=",
"version",
"if",
"'working'",
"in",
"self",
".",
"repo",
".",
"branch",
"(",
")",
".",
"stdout",
":",
"if",
"force",
":",
"logger",
".",
"info",
"(",
"'Found working branch. Removing...'",
")",
"cmd",
"=",
"self",
".",
"repo",
".",
"checkout",
"(",
"'master'",
")",
"cmd",
"=",
"self",
".",
"repo",
".",
"branch",
"(",
"'working'",
",",
"d",
"=",
"True",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Found working branch from previous session. Use force=True to remove it and start anew.'",
")",
"return",
"stdout",
"=",
"self",
".",
"repo",
".",
"checkout",
"(",
"version",
",",
"b",
"=",
"'working'",
")",
".",
"stdout",
"# active version set in 'working' branch",
"logger",
".",
"info",
"(",
"'Version {0} set'",
".",
"format",
"(",
"version",
")",
")",
"else",
":",
"raise",
"AttributeError",
"(",
"'Version {0} not found'",
".",
"format",
"(",
"version",
")",
")"
] | Sets the version name for the current state of repo | [
"Sets",
"the",
"version",
"name",
"for",
"the",
"current",
"state",
"of",
"repo"
] | python | train |
square/pylink | pylink/jlink.py | https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L4267-L4282 | def trace_set_buffer_capacity(self, size):
"""Sets the capacity for the trace buffer.
Args:
self (JLink): the ``JLink`` instance.
size (int): the new capacity for the trace buffer.
Returns:
``None``
"""
cmd = enums.JLinkTraceCommand.SET_CAPACITY
data = ctypes.c_uint32(size)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))
if (res == 1):
raise errors.JLinkException('Failed to set trace buffer size.')
return None | [
"def",
"trace_set_buffer_capacity",
"(",
"self",
",",
"size",
")",
":",
"cmd",
"=",
"enums",
".",
"JLinkTraceCommand",
".",
"SET_CAPACITY",
"data",
"=",
"ctypes",
".",
"c_uint32",
"(",
"size",
")",
"res",
"=",
"self",
".",
"_dll",
".",
"JLINKARM_TRACE_Control",
"(",
"cmd",
",",
"ctypes",
".",
"byref",
"(",
"data",
")",
")",
"if",
"(",
"res",
"==",
"1",
")",
":",
"raise",
"errors",
".",
"JLinkException",
"(",
"'Failed to set trace buffer size.'",
")",
"return",
"None"
] | Sets the capacity for the trace buffer.
Args:
self (JLink): the ``JLink`` instance.
size (int): the new capacity for the trace buffer.
Returns:
``None`` | [
"Sets",
"the",
"capacity",
"for",
"the",
"trace",
"buffer",
"."
] | python | train |
fitodic/centerline | centerline/utils.py | https://github.com/fitodic/centerline/blob/f27e7b1ecb77bd4da40093ab44754cbd3ec9f58b/centerline/utils.py#L34-L63 | def get_ogr_driver(filepath):
"""
Get the OGR driver from the provided file extension.
Args:
file_extension (str): file extension
Returns:
osgeo.ogr.Driver
Raises:
ValueError: no driver is found
"""
filename, file_extension = os.path.splitext(filepath)
EXTENSION = file_extension[1:]
ogr_driver_count = ogr.GetDriverCount()
for idx in range(ogr_driver_count):
driver = ogr.GetDriver(idx)
driver_extension = driver.GetMetadataItem(str('DMD_EXTENSION')) or ''
driver_extensions = driver.GetMetadataItem(str('DMD_EXTENSIONS')) or ''
if EXTENSION == driver_extension or EXTENSION in driver_extensions:
return driver
else:
msg = 'No driver found for the following file extension: {}'.format(
EXTENSION)
raise ValueError(msg) | [
"def",
"get_ogr_driver",
"(",
"filepath",
")",
":",
"filename",
",",
"file_extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filepath",
")",
"EXTENSION",
"=",
"file_extension",
"[",
"1",
":",
"]",
"ogr_driver_count",
"=",
"ogr",
".",
"GetDriverCount",
"(",
")",
"for",
"idx",
"in",
"range",
"(",
"ogr_driver_count",
")",
":",
"driver",
"=",
"ogr",
".",
"GetDriver",
"(",
"idx",
")",
"driver_extension",
"=",
"driver",
".",
"GetMetadataItem",
"(",
"str",
"(",
"'DMD_EXTENSION'",
")",
")",
"or",
"''",
"driver_extensions",
"=",
"driver",
".",
"GetMetadataItem",
"(",
"str",
"(",
"'DMD_EXTENSIONS'",
")",
")",
"or",
"''",
"if",
"EXTENSION",
"==",
"driver_extension",
"or",
"EXTENSION",
"in",
"driver_extensions",
":",
"return",
"driver",
"else",
":",
"msg",
"=",
"'No driver found for the following file extension: {}'",
".",
"format",
"(",
"EXTENSION",
")",
"raise",
"ValueError",
"(",
"msg",
")"
] | Get the OGR driver from the provided file extension.
Args:
file_extension (str): file extension
Returns:
osgeo.ogr.Driver
Raises:
ValueError: no driver is found | [
"Get",
"the",
"OGR",
"driver",
"from",
"the",
"provided",
"file",
"extension",
"."
] | python | train |
pybel/pybel | src/pybel/dsl/namespaces.py | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/dsl/namespaces.py#L13-L15 | def chebi(name=None, identifier=None) -> Abundance:
"""Build a ChEBI abundance node."""
return Abundance(namespace='CHEBI', name=name, identifier=identifier) | [
"def",
"chebi",
"(",
"name",
"=",
"None",
",",
"identifier",
"=",
"None",
")",
"->",
"Abundance",
":",
"return",
"Abundance",
"(",
"namespace",
"=",
"'CHEBI'",
",",
"name",
"=",
"name",
",",
"identifier",
"=",
"identifier",
")"
] | Build a ChEBI abundance node. | [
"Build",
"a",
"ChEBI",
"abundance",
"node",
"."
] | python | train |
brandon-rhodes/python-jplephem | jplephem/ephem.py | https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/ephem.py#L179-L196 | def velocity_from_bundle(self, bundle):
"""[DEPRECATED] Return velocity, given the `coefficient_bundle()` return value."""
coefficients, days_per_set, T, twot1 = bundle
coefficient_count = coefficients.shape[2]
# Chebyshev derivative:
dT = np.empty_like(T)
dT[0] = 0.0
dT[1] = 1.0
dT[2] = twot1 + twot1
for i in range(3, coefficient_count):
dT[i] = twot1 * dT[i-1] - dT[i-2] + T[i-1] + T[i-1]
dT *= 2.0
dT /= days_per_set
return (dT.T * coefficients).sum(axis=2) | [
"def",
"velocity_from_bundle",
"(",
"self",
",",
"bundle",
")",
":",
"coefficients",
",",
"days_per_set",
",",
"T",
",",
"twot1",
"=",
"bundle",
"coefficient_count",
"=",
"coefficients",
".",
"shape",
"[",
"2",
"]",
"# Chebyshev derivative:",
"dT",
"=",
"np",
".",
"empty_like",
"(",
"T",
")",
"dT",
"[",
"0",
"]",
"=",
"0.0",
"dT",
"[",
"1",
"]",
"=",
"1.0",
"dT",
"[",
"2",
"]",
"=",
"twot1",
"+",
"twot1",
"for",
"i",
"in",
"range",
"(",
"3",
",",
"coefficient_count",
")",
":",
"dT",
"[",
"i",
"]",
"=",
"twot1",
"*",
"dT",
"[",
"i",
"-",
"1",
"]",
"-",
"dT",
"[",
"i",
"-",
"2",
"]",
"+",
"T",
"[",
"i",
"-",
"1",
"]",
"+",
"T",
"[",
"i",
"-",
"1",
"]",
"dT",
"*=",
"2.0",
"dT",
"/=",
"days_per_set",
"return",
"(",
"dT",
".",
"T",
"*",
"coefficients",
")",
".",
"sum",
"(",
"axis",
"=",
"2",
")"
] | [DEPRECATED] Return velocity, given the `coefficient_bundle()` return value. | [
"[",
"DEPRECATED",
"]",
"Return",
"velocity",
"given",
"the",
"coefficient_bundle",
"()",
"return",
"value",
"."
] | python | test |
johnnoone/json-spec | src/jsonspec/operations/__init__.py | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/operations/__init__.py#L49-L64 | def replace(doc, pointer, value):
"""Replace element from sequence, member from mapping.
:param doc: the document base
:param pointer: the path to search in
:param value: the new value
:return: the new object
.. note::
This operation is functionally identical to a "remove" operation for
a value, followed immediately by an "add" operation at the same
location with the replacement value.
"""
return Target(doc).replace(pointer, value).document | [
"def",
"replace",
"(",
"doc",
",",
"pointer",
",",
"value",
")",
":",
"return",
"Target",
"(",
"doc",
")",
".",
"replace",
"(",
"pointer",
",",
"value",
")",
".",
"document"
] | Replace element from sequence, member from mapping.
:param doc: the document base
:param pointer: the path to search in
:param value: the new value
:return: the new object
.. note::
This operation is functionally identical to a "remove" operation for
a value, followed immediately by an "add" operation at the same
location with the replacement value. | [
"Replace",
"element",
"from",
"sequence",
"member",
"from",
"mapping",
"."
] | python | train |
readbeyond/aeneas | aeneas/tools/convert_syncmap.py | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/tools/convert_syncmap.py#L81-L161 | def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 2:
return self.print_help()
input_file_path = self.actual_arguments[0]
output_file_path = self.actual_arguments[1]
output_html = self.has_option(u"--output-html")
if not self.check_input_file(input_file_path):
return self.ERROR_EXIT_CODE
input_sm_format = self.has_option_with_value(u"--input-format")
if input_sm_format is None:
input_sm_format = gf.file_extension(input_file_path)
if not self.check_format(input_sm_format):
return self.ERROR_EXIT_CODE
if not self.check_output_file(output_file_path):
return self.ERROR_EXIT_CODE
if output_html:
if len(self.actual_arguments) < 3:
return self.print_help()
audio_file_path = self.actual_arguments[2]
if not self.check_input_file(audio_file_path):
return self.ERROR_EXIT_CODE
else:
output_sm_format = self.has_option_with_value(u"--output-format")
if output_sm_format is None:
output_sm_format = gf.file_extension(output_file_path)
if not self.check_format(output_sm_format):
return self.ERROR_EXIT_CODE
# TODO add a way to specify a text file for input formats like SMIL
# that do not carry the source text
language = self.has_option_with_value(u"--language")
audio_ref = self.has_option_with_value(u"--audio-ref")
page_ref = self.has_option_with_value(u"--page-ref")
parameters = {
gc.PPN_SYNCMAP_LANGUAGE: language,
gc.PPN_TASK_OS_FILE_SMIL_AUDIO_REF: audio_ref,
gc.PPN_TASK_OS_FILE_SMIL_PAGE_REF: page_ref
}
try:
self.print_info(u"Reading sync map in '%s' format from file '%s'" % (input_sm_format, input_file_path))
self.print_info(u"Reading sync map...")
syncmap = SyncMap(logger=self.logger)
syncmap.read(input_sm_format, input_file_path, parameters)
self.print_info(u"Reading sync map... done")
self.print_info(u"Read %d sync map fragments" % (len(syncmap)))
except Exception as exc:
self.print_error(u"An unexpected error occurred while reading the input sync map:")
self.print_error(u"%s" % (exc))
return self.ERROR_EXIT_CODE
if output_html:
try:
self.print_info(u"Writing HTML file...")
syncmap.output_html_for_tuning(audio_file_path, output_file_path, parameters)
self.print_info(u"Writing HTML file... done")
self.print_success(u"Created HTML file '%s'" % (output_file_path))
return self.NO_ERROR_EXIT_CODE
except Exception as exc:
self.print_error(u"An unexpected error occurred while writing the output HTML file:")
self.print_error(u"%s" % (exc))
else:
try:
self.print_info(u"Writing sync map...")
syncmap.write(output_sm_format, output_file_path, parameters)
self.print_info(u"Writing sync map... done")
self.print_success(u"Created '%s' sync map file '%s'" % (output_sm_format, output_file_path))
return self.NO_ERROR_EXIT_CODE
except Exception as exc:
self.print_error(u"An unexpected error occurred while writing the output sync map:")
self.print_error(u"%s" % (exc))
return self.ERROR_EXIT_CODE | [
"def",
"perform_command",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"actual_arguments",
")",
"<",
"2",
":",
"return",
"self",
".",
"print_help",
"(",
")",
"input_file_path",
"=",
"self",
".",
"actual_arguments",
"[",
"0",
"]",
"output_file_path",
"=",
"self",
".",
"actual_arguments",
"[",
"1",
"]",
"output_html",
"=",
"self",
".",
"has_option",
"(",
"u\"--output-html\"",
")",
"if",
"not",
"self",
".",
"check_input_file",
"(",
"input_file_path",
")",
":",
"return",
"self",
".",
"ERROR_EXIT_CODE",
"input_sm_format",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--input-format\"",
")",
"if",
"input_sm_format",
"is",
"None",
":",
"input_sm_format",
"=",
"gf",
".",
"file_extension",
"(",
"input_file_path",
")",
"if",
"not",
"self",
".",
"check_format",
"(",
"input_sm_format",
")",
":",
"return",
"self",
".",
"ERROR_EXIT_CODE",
"if",
"not",
"self",
".",
"check_output_file",
"(",
"output_file_path",
")",
":",
"return",
"self",
".",
"ERROR_EXIT_CODE",
"if",
"output_html",
":",
"if",
"len",
"(",
"self",
".",
"actual_arguments",
")",
"<",
"3",
":",
"return",
"self",
".",
"print_help",
"(",
")",
"audio_file_path",
"=",
"self",
".",
"actual_arguments",
"[",
"2",
"]",
"if",
"not",
"self",
".",
"check_input_file",
"(",
"audio_file_path",
")",
":",
"return",
"self",
".",
"ERROR_EXIT_CODE",
"else",
":",
"output_sm_format",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--output-format\"",
")",
"if",
"output_sm_format",
"is",
"None",
":",
"output_sm_format",
"=",
"gf",
".",
"file_extension",
"(",
"output_file_path",
")",
"if",
"not",
"self",
".",
"check_format",
"(",
"output_sm_format",
")",
":",
"return",
"self",
".",
"ERROR_EXIT_CODE",
"# TODO add a way to specify a text file for input formats like SMIL",
"# that do not carry the source text",
"language",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--language\"",
")",
"audio_ref",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--audio-ref\"",
")",
"page_ref",
"=",
"self",
".",
"has_option_with_value",
"(",
"u\"--page-ref\"",
")",
"parameters",
"=",
"{",
"gc",
".",
"PPN_SYNCMAP_LANGUAGE",
":",
"language",
",",
"gc",
".",
"PPN_TASK_OS_FILE_SMIL_AUDIO_REF",
":",
"audio_ref",
",",
"gc",
".",
"PPN_TASK_OS_FILE_SMIL_PAGE_REF",
":",
"page_ref",
"}",
"try",
":",
"self",
".",
"print_info",
"(",
"u\"Reading sync map in '%s' format from file '%s'\"",
"%",
"(",
"input_sm_format",
",",
"input_file_path",
")",
")",
"self",
".",
"print_info",
"(",
"u\"Reading sync map...\"",
")",
"syncmap",
"=",
"SyncMap",
"(",
"logger",
"=",
"self",
".",
"logger",
")",
"syncmap",
".",
"read",
"(",
"input_sm_format",
",",
"input_file_path",
",",
"parameters",
")",
"self",
".",
"print_info",
"(",
"u\"Reading sync map... done\"",
")",
"self",
".",
"print_info",
"(",
"u\"Read %d sync map fragments\"",
"%",
"(",
"len",
"(",
"syncmap",
")",
")",
")",
"except",
"Exception",
"as",
"exc",
":",
"self",
".",
"print_error",
"(",
"u\"An unexpected error occurred while reading the input sync map:\"",
")",
"self",
".",
"print_error",
"(",
"u\"%s\"",
"%",
"(",
"exc",
")",
")",
"return",
"self",
".",
"ERROR_EXIT_CODE",
"if",
"output_html",
":",
"try",
":",
"self",
".",
"print_info",
"(",
"u\"Writing HTML file...\"",
")",
"syncmap",
".",
"output_html_for_tuning",
"(",
"audio_file_path",
",",
"output_file_path",
",",
"parameters",
")",
"self",
".",
"print_info",
"(",
"u\"Writing HTML file... done\"",
")",
"self",
".",
"print_success",
"(",
"u\"Created HTML file '%s'\"",
"%",
"(",
"output_file_path",
")",
")",
"return",
"self",
".",
"NO_ERROR_EXIT_CODE",
"except",
"Exception",
"as",
"exc",
":",
"self",
".",
"print_error",
"(",
"u\"An unexpected error occurred while writing the output HTML file:\"",
")",
"self",
".",
"print_error",
"(",
"u\"%s\"",
"%",
"(",
"exc",
")",
")",
"else",
":",
"try",
":",
"self",
".",
"print_info",
"(",
"u\"Writing sync map...\"",
")",
"syncmap",
".",
"write",
"(",
"output_sm_format",
",",
"output_file_path",
",",
"parameters",
")",
"self",
".",
"print_info",
"(",
"u\"Writing sync map... done\"",
")",
"self",
".",
"print_success",
"(",
"u\"Created '%s' sync map file '%s'\"",
"%",
"(",
"output_sm_format",
",",
"output_file_path",
")",
")",
"return",
"self",
".",
"NO_ERROR_EXIT_CODE",
"except",
"Exception",
"as",
"exc",
":",
"self",
".",
"print_error",
"(",
"u\"An unexpected error occurred while writing the output sync map:\"",
")",
"self",
".",
"print_error",
"(",
"u\"%s\"",
"%",
"(",
"exc",
")",
")",
"return",
"self",
".",
"ERROR_EXIT_CODE"
] | Perform command and return the appropriate exit code.
:rtype: int | [
"Perform",
"command",
"and",
"return",
"the",
"appropriate",
"exit",
"code",
"."
] | python | train |
MillionIntegrals/vel | vel/optimizers/rmsprop_tf.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/optimizers/rmsprop_tf.py#L50-L110 | def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# ANOTHER LINE I'VE CHANGED
state['square_avg'] = torch.ones_like(p.data)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p.data)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p.data)
square_avg = state['square_avg']
alpha = group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.mul_(alpha).add_(1 - alpha, grad)
# THIS LINE IS EVERYTHING THAT I CHANGED IN THIS OPTIMIZER
# avg = square_avg.addcmul(-1, grad_avg, grad_avg).sqrt().add_(group['eps'])
avg = square_avg.addcmul(-1, grad_avg, grad_avg).add(group['eps']).sqrt()
else:
# THIS LINE IS EVERYTHING THAT I CHANGED IN THIS OPTIMIZER
# avg = square_avg.sqrt().add_(group['eps'])
avg = square_avg.add(group['eps']).sqrt()
if group['momentum'] > 0:
buf = state['momentum_buffer']
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.data.add_(-group['lr'], buf)
else:
p.data.addcdiv_(-group['lr'], grad, avg)
return loss | [
"def",
"step",
"(",
"self",
",",
"closure",
"=",
"None",
")",
":",
"loss",
"=",
"None",
"if",
"closure",
"is",
"not",
"None",
":",
"loss",
"=",
"closure",
"(",
")",
"for",
"group",
"in",
"self",
".",
"param_groups",
":",
"for",
"p",
"in",
"group",
"[",
"'params'",
"]",
":",
"if",
"p",
".",
"grad",
"is",
"None",
":",
"continue",
"grad",
"=",
"p",
".",
"grad",
".",
"data",
"if",
"grad",
".",
"is_sparse",
":",
"raise",
"RuntimeError",
"(",
"'RMSprop does not support sparse gradients'",
")",
"state",
"=",
"self",
".",
"state",
"[",
"p",
"]",
"# State initialization",
"if",
"len",
"(",
"state",
")",
"==",
"0",
":",
"state",
"[",
"'step'",
"]",
"=",
"0",
"# ANOTHER LINE I'VE CHANGED",
"state",
"[",
"'square_avg'",
"]",
"=",
"torch",
".",
"ones_like",
"(",
"p",
".",
"data",
")",
"if",
"group",
"[",
"'momentum'",
"]",
">",
"0",
":",
"state",
"[",
"'momentum_buffer'",
"]",
"=",
"torch",
".",
"zeros_like",
"(",
"p",
".",
"data",
")",
"if",
"group",
"[",
"'centered'",
"]",
":",
"state",
"[",
"'grad_avg'",
"]",
"=",
"torch",
".",
"zeros_like",
"(",
"p",
".",
"data",
")",
"square_avg",
"=",
"state",
"[",
"'square_avg'",
"]",
"alpha",
"=",
"group",
"[",
"'alpha'",
"]",
"state",
"[",
"'step'",
"]",
"+=",
"1",
"if",
"group",
"[",
"'weight_decay'",
"]",
"!=",
"0",
":",
"grad",
"=",
"grad",
".",
"add",
"(",
"group",
"[",
"'weight_decay'",
"]",
",",
"p",
".",
"data",
")",
"square_avg",
".",
"mul_",
"(",
"alpha",
")",
".",
"addcmul_",
"(",
"1",
"-",
"alpha",
",",
"grad",
",",
"grad",
")",
"if",
"group",
"[",
"'centered'",
"]",
":",
"grad_avg",
"=",
"state",
"[",
"'grad_avg'",
"]",
"grad_avg",
".",
"mul_",
"(",
"alpha",
")",
".",
"add_",
"(",
"1",
"-",
"alpha",
",",
"grad",
")",
"# THIS LINE IS EVERYTHING THAT I CHANGED IN THIS OPTIMIZER",
"# avg = square_avg.addcmul(-1, grad_avg, grad_avg).sqrt().add_(group['eps'])",
"avg",
"=",
"square_avg",
".",
"addcmul",
"(",
"-",
"1",
",",
"grad_avg",
",",
"grad_avg",
")",
".",
"add",
"(",
"group",
"[",
"'eps'",
"]",
")",
".",
"sqrt",
"(",
")",
"else",
":",
"# THIS LINE IS EVERYTHING THAT I CHANGED IN THIS OPTIMIZER",
"# avg = square_avg.sqrt().add_(group['eps'])",
"avg",
"=",
"square_avg",
".",
"add",
"(",
"group",
"[",
"'eps'",
"]",
")",
".",
"sqrt",
"(",
")",
"if",
"group",
"[",
"'momentum'",
"]",
">",
"0",
":",
"buf",
"=",
"state",
"[",
"'momentum_buffer'",
"]",
"buf",
".",
"mul_",
"(",
"group",
"[",
"'momentum'",
"]",
")",
".",
"addcdiv_",
"(",
"grad",
",",
"avg",
")",
"p",
".",
"data",
".",
"add_",
"(",
"-",
"group",
"[",
"'lr'",
"]",
",",
"buf",
")",
"else",
":",
"p",
".",
"data",
".",
"addcdiv_",
"(",
"-",
"group",
"[",
"'lr'",
"]",
",",
"grad",
",",
"avg",
")",
"return",
"loss"
] | Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss. | [
"Performs",
"a",
"single",
"optimization",
"step",
"."
] | python | train |
HIPS/autograd | autograd/differential_operators.py | https://github.com/HIPS/autograd/blob/e3b525302529d7490769d5c0bcfc7457e24e3b3e/autograd/differential_operators.py#L48-L61 | def jacobian(fun, x):
"""
Returns a function which computes the Jacobian of `fun` with respect to
positional argument number `argnum`, which must be a scalar or array. Unlike
`grad` it is not restricted to scalar-output functions, but also it cannot
take derivatives with respect to some argument types (like lists or dicts).
If the input to `fun` has shape (in1, in2, ...) and the output has shape
(out1, out2, ...) then the Jacobian has shape (out1, out2, ..., in1, in2, ...).
"""
vjp, ans = _make_vjp(fun, x)
ans_vspace = vspace(ans)
jacobian_shape = ans_vspace.shape + vspace(x).shape
grads = map(vjp, ans_vspace.standard_basis())
return np.reshape(np.stack(grads), jacobian_shape) | [
"def",
"jacobian",
"(",
"fun",
",",
"x",
")",
":",
"vjp",
",",
"ans",
"=",
"_make_vjp",
"(",
"fun",
",",
"x",
")",
"ans_vspace",
"=",
"vspace",
"(",
"ans",
")",
"jacobian_shape",
"=",
"ans_vspace",
".",
"shape",
"+",
"vspace",
"(",
"x",
")",
".",
"shape",
"grads",
"=",
"map",
"(",
"vjp",
",",
"ans_vspace",
".",
"standard_basis",
"(",
")",
")",
"return",
"np",
".",
"reshape",
"(",
"np",
".",
"stack",
"(",
"grads",
")",
",",
"jacobian_shape",
")"
] | Returns a function which computes the Jacobian of `fun` with respect to
positional argument number `argnum`, which must be a scalar or array. Unlike
`grad` it is not restricted to scalar-output functions, but also it cannot
take derivatives with respect to some argument types (like lists or dicts).
If the input to `fun` has shape (in1, in2, ...) and the output has shape
(out1, out2, ...) then the Jacobian has shape (out1, out2, ..., in1, in2, ...). | [
"Returns",
"a",
"function",
"which",
"computes",
"the",
"Jacobian",
"of",
"fun",
"with",
"respect",
"to",
"positional",
"argument",
"number",
"argnum",
"which",
"must",
"be",
"a",
"scalar",
"or",
"array",
".",
"Unlike",
"grad",
"it",
"is",
"not",
"restricted",
"to",
"scalar",
"-",
"output",
"functions",
"but",
"also",
"it",
"cannot",
"take",
"derivatives",
"with",
"respect",
"to",
"some",
"argument",
"types",
"(",
"like",
"lists",
"or",
"dicts",
")",
".",
"If",
"the",
"input",
"to",
"fun",
"has",
"shape",
"(",
"in1",
"in2",
"...",
")",
"and",
"the",
"output",
"has",
"shape",
"(",
"out1",
"out2",
"...",
")",
"then",
"the",
"Jacobian",
"has",
"shape",
"(",
"out1",
"out2",
"...",
"in1",
"in2",
"...",
")",
"."
] | python | train |
openstax/cnx-archive | cnxarchive/search.py | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/search.py#L388-L396 | def _upper(val_list):
"""
:param val_list: a list of strings
:return: a list of upper-cased strings
"""
res = []
for ele in val_list:
res.append(ele.upper())
return res | [
"def",
"_upper",
"(",
"val_list",
")",
":",
"res",
"=",
"[",
"]",
"for",
"ele",
"in",
"val_list",
":",
"res",
".",
"append",
"(",
"ele",
".",
"upper",
"(",
")",
")",
"return",
"res"
] | :param val_list: a list of strings
:return: a list of upper-cased strings | [
":",
"param",
"val_list",
":",
"a",
"list",
"of",
"strings",
":",
"return",
":",
"a",
"list",
"of",
"upper",
"-",
"cased",
"strings"
] | python | train |
jason-weirather/py-seq-tools | seqtools/structure/__init__.py | https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/structure/__init__.py#L239-L249 | def get_sequence(self,ref):
"""get a sequence given a reference"""
strand = '+'
if not self._options.direction:
sys.stderr.write("WARNING: no strand information for the transcript\n")
if self._options.direction: strand = self._options.direction
seq = ''
for e in [x.range for x in self.exons]:
seq += str(ref[e.chr][e.start-1:e.end])
if strand == '-': seq = rc(seq)
return Sequence(seq.upper()) | [
"def",
"get_sequence",
"(",
"self",
",",
"ref",
")",
":",
"strand",
"=",
"'+'",
"if",
"not",
"self",
".",
"_options",
".",
"direction",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"WARNING: no strand information for the transcript\\n\"",
")",
"if",
"self",
".",
"_options",
".",
"direction",
":",
"strand",
"=",
"self",
".",
"_options",
".",
"direction",
"seq",
"=",
"''",
"for",
"e",
"in",
"[",
"x",
".",
"range",
"for",
"x",
"in",
"self",
".",
"exons",
"]",
":",
"seq",
"+=",
"str",
"(",
"ref",
"[",
"e",
".",
"chr",
"]",
"[",
"e",
".",
"start",
"-",
"1",
":",
"e",
".",
"end",
"]",
")",
"if",
"strand",
"==",
"'-'",
":",
"seq",
"=",
"rc",
"(",
"seq",
")",
"return",
"Sequence",
"(",
"seq",
".",
"upper",
"(",
")",
")"
] | get a sequence given a reference | [
"get",
"a",
"sequence",
"given",
"a",
"reference"
] | python | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L392-L467 | def avail_images(call=None):
'''
Return a dict of all available images on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
compconn = get_conn(client_type='compute')
region = get_location()
publishers = []
ret = {}
def _get_publisher_images(publisher):
'''
Get all images from a specific publisher
'''
data = {}
try:
offers = compconn.virtual_machine_images.list_offers(
location=region,
publisher_name=publisher,
)
for offer_obj in offers:
offer = offer_obj.as_dict()
skus = compconn.virtual_machine_images.list_skus(
location=region,
publisher_name=publisher,
offer=offer['name'],
)
for sku_obj in skus:
sku = sku_obj.as_dict()
results = compconn.virtual_machine_images.list(
location=region,
publisher_name=publisher,
offer=offer['name'],
skus=sku['name'],
)
for version_obj in results:
version = version_obj.as_dict()
name = '|'.join((
publisher,
offer['name'],
sku['name'],
version['name'],
))
data[name] = {
'publisher': publisher,
'offer': offer['name'],
'sku': sku['name'],
'version': version['name'],
}
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
data = {publisher: exc.message}
return data
try:
publishers_query = compconn.virtual_machine_images.list_publishers(
location=region
)
for publisher_obj in publishers_query:
publisher = publisher_obj.as_dict()
publishers.append(publisher['name'])
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
pool = ThreadPool(cpu_count() * 6)
results = pool.map_async(_get_publisher_images, publishers)
results.wait()
ret = {k: v for result in results.get() for k, v in six.iteritems(result)}
return ret | [
"def",
"avail_images",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_images function must be called with '",
"'-f or --function, or with the --list-images option'",
")",
"compconn",
"=",
"get_conn",
"(",
"client_type",
"=",
"'compute'",
")",
"region",
"=",
"get_location",
"(",
")",
"publishers",
"=",
"[",
"]",
"ret",
"=",
"{",
"}",
"def",
"_get_publisher_images",
"(",
"publisher",
")",
":",
"'''\n Get all images from a specific publisher\n '''",
"data",
"=",
"{",
"}",
"try",
":",
"offers",
"=",
"compconn",
".",
"virtual_machine_images",
".",
"list_offers",
"(",
"location",
"=",
"region",
",",
"publisher_name",
"=",
"publisher",
",",
")",
"for",
"offer_obj",
"in",
"offers",
":",
"offer",
"=",
"offer_obj",
".",
"as_dict",
"(",
")",
"skus",
"=",
"compconn",
".",
"virtual_machine_images",
".",
"list_skus",
"(",
"location",
"=",
"region",
",",
"publisher_name",
"=",
"publisher",
",",
"offer",
"=",
"offer",
"[",
"'name'",
"]",
",",
")",
"for",
"sku_obj",
"in",
"skus",
":",
"sku",
"=",
"sku_obj",
".",
"as_dict",
"(",
")",
"results",
"=",
"compconn",
".",
"virtual_machine_images",
".",
"list",
"(",
"location",
"=",
"region",
",",
"publisher_name",
"=",
"publisher",
",",
"offer",
"=",
"offer",
"[",
"'name'",
"]",
",",
"skus",
"=",
"sku",
"[",
"'name'",
"]",
",",
")",
"for",
"version_obj",
"in",
"results",
":",
"version",
"=",
"version_obj",
".",
"as_dict",
"(",
")",
"name",
"=",
"'|'",
".",
"join",
"(",
"(",
"publisher",
",",
"offer",
"[",
"'name'",
"]",
",",
"sku",
"[",
"'name'",
"]",
",",
"version",
"[",
"'name'",
"]",
",",
")",
")",
"data",
"[",
"name",
"]",
"=",
"{",
"'publisher'",
":",
"publisher",
",",
"'offer'",
":",
"offer",
"[",
"'name'",
"]",
",",
"'sku'",
":",
"sku",
"[",
"'name'",
"]",
",",
"'version'",
":",
"version",
"[",
"'name'",
"]",
",",
"}",
"except",
"CloudError",
"as",
"exc",
":",
"__utils__",
"[",
"'azurearm.log_cloud_error'",
"]",
"(",
"'compute'",
",",
"exc",
".",
"message",
")",
"data",
"=",
"{",
"publisher",
":",
"exc",
".",
"message",
"}",
"return",
"data",
"try",
":",
"publishers_query",
"=",
"compconn",
".",
"virtual_machine_images",
".",
"list_publishers",
"(",
"location",
"=",
"region",
")",
"for",
"publisher_obj",
"in",
"publishers_query",
":",
"publisher",
"=",
"publisher_obj",
".",
"as_dict",
"(",
")",
"publishers",
".",
"append",
"(",
"publisher",
"[",
"'name'",
"]",
")",
"except",
"CloudError",
"as",
"exc",
":",
"__utils__",
"[",
"'azurearm.log_cloud_error'",
"]",
"(",
"'compute'",
",",
"exc",
".",
"message",
")",
"pool",
"=",
"ThreadPool",
"(",
"cpu_count",
"(",
")",
"*",
"6",
")",
"results",
"=",
"pool",
".",
"map_async",
"(",
"_get_publisher_images",
",",
"publishers",
")",
"results",
".",
"wait",
"(",
")",
"ret",
"=",
"{",
"k",
":",
"v",
"for",
"result",
"in",
"results",
".",
"get",
"(",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"result",
")",
"}",
"return",
"ret"
] | Return a dict of all available images on the provider | [
"Return",
"a",
"dict",
"of",
"all",
"available",
"images",
"on",
"the",
"provider"
] | python | train |
codelv/enaml-native | src/enamlnative/android/android_location.py | https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_location.py#L61-L105 | def start(cls, callback, provider='gps', min_time=1000, min_distance=0):
""" Convenience method that checks and requests permission if necessary
and if successful calls the callback with a populated `Location`
instance on updates.
Note you must have the permissions in your manifest or requests
will be denied immediately.
"""
app = AndroidApplication.instance()
f = app.create_future()
def on_success(lm):
#: When we have finally have permission
lm.onLocationChanged.connect(callback)
#: Save a reference to our listener
#: because we may want to stop updates
listener = LocationManager.LocationListener(lm)
lm.listeners.append(listener)
lm.requestLocationUpdates(provider, min_time, min_distance,
listener)
app.set_future_result(f, True)
def on_perm_request_result(allowed):
#: When our permission request is accepted or decliend.
if allowed:
LocationManager.get().then(on_success)
else:
#: Access denied
app.set_future_result(f, False)
def on_perm_check(allowed):
if allowed:
LocationManager.get().then(on_success)
else:
LocationManager.request_permission(
fine=provider == 'gps').then(on_perm_request_result)
#: Check permission
LocationManager.check_permission(
fine=provider == 'gps').then(on_perm_check)
return f | [
"def",
"start",
"(",
"cls",
",",
"callback",
",",
"provider",
"=",
"'gps'",
",",
"min_time",
"=",
"1000",
",",
"min_distance",
"=",
"0",
")",
":",
"app",
"=",
"AndroidApplication",
".",
"instance",
"(",
")",
"f",
"=",
"app",
".",
"create_future",
"(",
")",
"def",
"on_success",
"(",
"lm",
")",
":",
"#: When we have finally have permission",
"lm",
".",
"onLocationChanged",
".",
"connect",
"(",
"callback",
")",
"#: Save a reference to our listener",
"#: because we may want to stop updates",
"listener",
"=",
"LocationManager",
".",
"LocationListener",
"(",
"lm",
")",
"lm",
".",
"listeners",
".",
"append",
"(",
"listener",
")",
"lm",
".",
"requestLocationUpdates",
"(",
"provider",
",",
"min_time",
",",
"min_distance",
",",
"listener",
")",
"app",
".",
"set_future_result",
"(",
"f",
",",
"True",
")",
"def",
"on_perm_request_result",
"(",
"allowed",
")",
":",
"#: When our permission request is accepted or decliend.",
"if",
"allowed",
":",
"LocationManager",
".",
"get",
"(",
")",
".",
"then",
"(",
"on_success",
")",
"else",
":",
"#: Access denied",
"app",
".",
"set_future_result",
"(",
"f",
",",
"False",
")",
"def",
"on_perm_check",
"(",
"allowed",
")",
":",
"if",
"allowed",
":",
"LocationManager",
".",
"get",
"(",
")",
".",
"then",
"(",
"on_success",
")",
"else",
":",
"LocationManager",
".",
"request_permission",
"(",
"fine",
"=",
"provider",
"==",
"'gps'",
")",
".",
"then",
"(",
"on_perm_request_result",
")",
"#: Check permission",
"LocationManager",
".",
"check_permission",
"(",
"fine",
"=",
"provider",
"==",
"'gps'",
")",
".",
"then",
"(",
"on_perm_check",
")",
"return",
"f"
] | Convenience method that checks and requests permission if necessary
and if successful calls the callback with a populated `Location`
instance on updates.
Note you must have the permissions in your manifest or requests
will be denied immediately. | [
"Convenience",
"method",
"that",
"checks",
"and",
"requests",
"permission",
"if",
"necessary",
"and",
"if",
"successful",
"calls",
"the",
"callback",
"with",
"a",
"populated",
"Location",
"instance",
"on",
"updates",
"."
] | python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/flask/app.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/app.py#L1441-L1461 | def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args) | [
"def",
"dispatch_request",
"(",
"self",
")",
":",
"req",
"=",
"_request_ctx_stack",
".",
"top",
".",
"request",
"if",
"req",
".",
"routing_exception",
"is",
"not",
"None",
":",
"self",
".",
"raise_routing_exception",
"(",
"req",
")",
"rule",
"=",
"req",
".",
"url_rule",
"# if we provide automatic options for this URL and the",
"# request came with the OPTIONS method, reply automatically",
"if",
"getattr",
"(",
"rule",
",",
"'provide_automatic_options'",
",",
"False",
")",
"and",
"req",
".",
"method",
"==",
"'OPTIONS'",
":",
"return",
"self",
".",
"make_default_options_response",
"(",
")",
"# otherwise dispatch to the handler for that endpoint",
"return",
"self",
".",
"view_functions",
"[",
"rule",
".",
"endpoint",
"]",
"(",
"*",
"*",
"req",
".",
"view_args",
")"
] | Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`. | [
"Does",
"the",
"request",
"dispatching",
".",
"Matches",
"the",
"URL",
"and",
"returns",
"the",
"return",
"value",
"of",
"the",
"view",
"or",
"error",
"handler",
".",
"This",
"does",
"not",
"have",
"to",
"be",
"a",
"response",
"object",
".",
"In",
"order",
"to",
"convert",
"the",
"return",
"value",
"to",
"a",
"proper",
"response",
"object",
"call",
":",
"func",
":",
"make_response",
"."
] | python | test |
ga4gh/ga4gh-server | ga4gh/server/gff3.py | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/gff3.py#L200-L205 | def _recSortKey(r):
"""
Sort order for Features, by genomic coordinate,
disambiguated by feature type (alphabetically).
"""
return r.seqname, r.start, -r.end, r.type | [
"def",
"_recSortKey",
"(",
"r",
")",
":",
"return",
"r",
".",
"seqname",
",",
"r",
".",
"start",
",",
"-",
"r",
".",
"end",
",",
"r",
".",
"type"
] | Sort order for Features, by genomic coordinate,
disambiguated by feature type (alphabetically). | [
"Sort",
"order",
"for",
"Features",
"by",
"genomic",
"coordinate",
"disambiguated",
"by",
"feature",
"type",
"(",
"alphabetically",
")",
"."
] | python | train |
jepegit/cellpy | cellpy/readers/cellreader.py | https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/cellreader.py#L391-L428 | def check_file_ids(self, rawfiles, cellpyfile):
"""Check the stats for the files (raw-data and cellpy hdf5).
This function checks if the hdf5 file and the res-files have the same
timestamps etc to find out if we need to bother to load .res -files.
Args:
cellpyfile (str): filename of the cellpy hdf5-file.
rawfiles (list of str): name(s) of raw-data file(s).
Returns:
False if the raw files are newer than the cellpy hdf5-file
(update needed).
If return_res is True it also returns list of raw-file_names as
second argument.
"""
txt = "checking file ids - using '%s'" % self.filestatuschecker
self.logger.info(txt)
ids_cellpy_file = self._check_cellpy_file(cellpyfile)
self.logger.debug(f"cellpyfile ids: {ids_cellpy_file}")
if not ids_cellpy_file:
# self.logger.debug("hdf5 file does not exist - needs updating")
return False
ids_raw = self._check_raw(rawfiles)
similar = self._compare_ids(ids_raw, ids_cellpy_file)
if not similar:
# self.logger.debug("hdf5 file needs updating")
return False
else:
# self.logger.debug("hdf5 file is updated")
return True | [
"def",
"check_file_ids",
"(",
"self",
",",
"rawfiles",
",",
"cellpyfile",
")",
":",
"txt",
"=",
"\"checking file ids - using '%s'\"",
"%",
"self",
".",
"filestatuschecker",
"self",
".",
"logger",
".",
"info",
"(",
"txt",
")",
"ids_cellpy_file",
"=",
"self",
".",
"_check_cellpy_file",
"(",
"cellpyfile",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"cellpyfile ids: {ids_cellpy_file}\"",
")",
"if",
"not",
"ids_cellpy_file",
":",
"# self.logger.debug(\"hdf5 file does not exist - needs updating\")",
"return",
"False",
"ids_raw",
"=",
"self",
".",
"_check_raw",
"(",
"rawfiles",
")",
"similar",
"=",
"self",
".",
"_compare_ids",
"(",
"ids_raw",
",",
"ids_cellpy_file",
")",
"if",
"not",
"similar",
":",
"# self.logger.debug(\"hdf5 file needs updating\")",
"return",
"False",
"else",
":",
"# self.logger.debug(\"hdf5 file is updated\")",
"return",
"True"
] | Check the stats for the files (raw-data and cellpy hdf5).
This function checks if the hdf5 file and the res-files have the same
timestamps etc to find out if we need to bother to load .res -files.
Args:
cellpyfile (str): filename of the cellpy hdf5-file.
rawfiles (list of str): name(s) of raw-data file(s).
Returns:
False if the raw files are newer than the cellpy hdf5-file
(update needed).
If return_res is True it also returns list of raw-file_names as
second argument. | [
"Check",
"the",
"stats",
"for",
"the",
"files",
"(",
"raw",
"-",
"data",
"and",
"cellpy",
"hdf5",
")",
"."
] | python | train |
pyQode/pyqode.core | pyqode/core/widgets/splittable_tab_widget.py | https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/splittable_tab_widget.py#L201-L213 | def close_right(self):
"""
Closes every editors tabs on the left of the current one.
"""
current_widget = self.widget(self.tab_under_menu())
index = self.indexOf(current_widget)
if self._try_close_dirty_tabs(tab_range=range(index + 1, self.count())):
while True:
widget = self.widget(self.count() - 1)
if widget != current_widget:
self.remove_tab(self.count() - 1)
else:
break | [
"def",
"close_right",
"(",
"self",
")",
":",
"current_widget",
"=",
"self",
".",
"widget",
"(",
"self",
".",
"tab_under_menu",
"(",
")",
")",
"index",
"=",
"self",
".",
"indexOf",
"(",
"current_widget",
")",
"if",
"self",
".",
"_try_close_dirty_tabs",
"(",
"tab_range",
"=",
"range",
"(",
"index",
"+",
"1",
",",
"self",
".",
"count",
"(",
")",
")",
")",
":",
"while",
"True",
":",
"widget",
"=",
"self",
".",
"widget",
"(",
"self",
".",
"count",
"(",
")",
"-",
"1",
")",
"if",
"widget",
"!=",
"current_widget",
":",
"self",
".",
"remove_tab",
"(",
"self",
".",
"count",
"(",
")",
"-",
"1",
")",
"else",
":",
"break"
] | Closes every editors tabs on the left of the current one. | [
"Closes",
"every",
"editors",
"tabs",
"on",
"the",
"left",
"of",
"the",
"current",
"one",
"."
] | python | train |
SBRG/ssbio | ssbio/utils.py | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L752-L785 | def input_list_parser(infile_list):
"""Always return a list of files with varying input.
>>> input_list_parser(['/path/to/folder/'])
['/path/to/folder/file1.txt', '/path/to/folder/file2.txt', '/path/to/folder/file3.txt']
>>> input_list_parser(['/path/to/file.txt'])
['/path/to/file.txt']
>>> input_list_parser(['file1.txt'])
['file1.txt']
Args:
infile_list: List of arguments
Returns:
list: Standardized list of files
"""
final_list_of_files = []
for x in infile_list:
# If the input is a folder
if op.isdir(x):
os.chdir(x)
final_list_of_files.extend(glob.glob('*'))
# If the input is a file
if op.isfile(x):
final_list_of_files.append(x)
return final_list_of_files | [
"def",
"input_list_parser",
"(",
"infile_list",
")",
":",
"final_list_of_files",
"=",
"[",
"]",
"for",
"x",
"in",
"infile_list",
":",
"# If the input is a folder",
"if",
"op",
".",
"isdir",
"(",
"x",
")",
":",
"os",
".",
"chdir",
"(",
"x",
")",
"final_list_of_files",
".",
"extend",
"(",
"glob",
".",
"glob",
"(",
"'*'",
")",
")",
"# If the input is a file",
"if",
"op",
".",
"isfile",
"(",
"x",
")",
":",
"final_list_of_files",
".",
"append",
"(",
"x",
")",
"return",
"final_list_of_files"
] | Always return a list of files with varying input.
>>> input_list_parser(['/path/to/folder/'])
['/path/to/folder/file1.txt', '/path/to/folder/file2.txt', '/path/to/folder/file3.txt']
>>> input_list_parser(['/path/to/file.txt'])
['/path/to/file.txt']
>>> input_list_parser(['file1.txt'])
['file1.txt']
Args:
infile_list: List of arguments
Returns:
list: Standardized list of files | [
"Always",
"return",
"a",
"list",
"of",
"files",
"with",
"varying",
"input",
"."
] | python | train |
inveniosoftware/invenio-github | invenio_github/handlers.py | https://github.com/inveniosoftware/invenio-github/blob/ec42fd6a06079310dcbe2c46d9fd79d5197bbe26/invenio_github/handlers.py#L62-L103 | def disconnect(remote):
"""Disconnect callback handler for GitHub."""
# User must be authenticated
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
external_method = 'github'
external_ids = [i.id for i in current_user.external_identifiers
if i.method == external_method]
if external_ids:
oauth_unlink_external_id(dict(id=external_ids[0],
method=external_method))
user_id = int(current_user.get_id())
token = RemoteToken.get(user_id, remote.consumer_key)
if token:
extra_data = token.remote_account.extra_data
# Delete the token that we issued for GitHub to deliver webhooks
webhook_token_id = extra_data.get('tokens', {}).get('webhook')
ProviderToken.query.filter_by(id=webhook_token_id).delete()
# Disable GitHub webhooks from our side
db_repos = Repository.query.filter_by(user_id=user_id).all()
# Keep repositories with hooks to pass to the celery task later on
repos_with_hooks = [(r.github_id, r.hook) for r in db_repos if r.hook]
for repo in db_repos:
try:
Repository.disable(user_id=user_id,
github_id=repo.github_id,
name=repo.name)
except NoResultFound:
# If the repository doesn't exist, no action is necessary
pass
db.session.commit()
# Send Celery task for webhooks removal and token revocation
disconnect_github.delay(token.access_token, repos_with_hooks)
# Delete the RemoteAccount (along with the associated RemoteToken)
token.remote_account.delete()
return redirect(url_for('invenio_oauthclient_settings.index')) | [
"def",
"disconnect",
"(",
"remote",
")",
":",
"# User must be authenticated",
"if",
"not",
"current_user",
".",
"is_authenticated",
":",
"return",
"current_app",
".",
"login_manager",
".",
"unauthorized",
"(",
")",
"external_method",
"=",
"'github'",
"external_ids",
"=",
"[",
"i",
".",
"id",
"for",
"i",
"in",
"current_user",
".",
"external_identifiers",
"if",
"i",
".",
"method",
"==",
"external_method",
"]",
"if",
"external_ids",
":",
"oauth_unlink_external_id",
"(",
"dict",
"(",
"id",
"=",
"external_ids",
"[",
"0",
"]",
",",
"method",
"=",
"external_method",
")",
")",
"user_id",
"=",
"int",
"(",
"current_user",
".",
"get_id",
"(",
")",
")",
"token",
"=",
"RemoteToken",
".",
"get",
"(",
"user_id",
",",
"remote",
".",
"consumer_key",
")",
"if",
"token",
":",
"extra_data",
"=",
"token",
".",
"remote_account",
".",
"extra_data",
"# Delete the token that we issued for GitHub to deliver webhooks",
"webhook_token_id",
"=",
"extra_data",
".",
"get",
"(",
"'tokens'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'webhook'",
")",
"ProviderToken",
".",
"query",
".",
"filter_by",
"(",
"id",
"=",
"webhook_token_id",
")",
".",
"delete",
"(",
")",
"# Disable GitHub webhooks from our side",
"db_repos",
"=",
"Repository",
".",
"query",
".",
"filter_by",
"(",
"user_id",
"=",
"user_id",
")",
".",
"all",
"(",
")",
"# Keep repositories with hooks to pass to the celery task later on",
"repos_with_hooks",
"=",
"[",
"(",
"r",
".",
"github_id",
",",
"r",
".",
"hook",
")",
"for",
"r",
"in",
"db_repos",
"if",
"r",
".",
"hook",
"]",
"for",
"repo",
"in",
"db_repos",
":",
"try",
":",
"Repository",
".",
"disable",
"(",
"user_id",
"=",
"user_id",
",",
"github_id",
"=",
"repo",
".",
"github_id",
",",
"name",
"=",
"repo",
".",
"name",
")",
"except",
"NoResultFound",
":",
"# If the repository doesn't exist, no action is necessary",
"pass",
"db",
".",
"session",
".",
"commit",
"(",
")",
"# Send Celery task for webhooks removal and token revocation",
"disconnect_github",
".",
"delay",
"(",
"token",
".",
"access_token",
",",
"repos_with_hooks",
")",
"# Delete the RemoteAccount (along with the associated RemoteToken)",
"token",
".",
"remote_account",
".",
"delete",
"(",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'invenio_oauthclient_settings.index'",
")",
")"
] | Disconnect callback handler for GitHub. | [
"Disconnect",
"callback",
"handler",
"for",
"GitHub",
"."
] | python | train |
mitsei/dlkit | dlkit/records/assessment/basic/simple_records.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/simple_records.py#L482-L494 | def set_max_string_length(self, length=None):
"""stub"""
if self.get_max_string_length_metadata().is_read_only():
raise NoAccess()
if not self.my_osid_object_form._is_valid_cardinal(
length,
self.get_max_string_length_metadata()):
raise InvalidArgument()
if self.my_osid_object_form.min_string_length is not None and \
length < self.my_osid_object_form.min_string_length + 1:
raise InvalidArgument()
self.my_osid_object_form._my_map['maxStringLength'] = length
self._max_string_length = length | [
"def",
"set_max_string_length",
"(",
"self",
",",
"length",
"=",
"None",
")",
":",
"if",
"self",
".",
"get_max_string_length_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
":",
"raise",
"NoAccess",
"(",
")",
"if",
"not",
"self",
".",
"my_osid_object_form",
".",
"_is_valid_cardinal",
"(",
"length",
",",
"self",
".",
"get_max_string_length_metadata",
"(",
")",
")",
":",
"raise",
"InvalidArgument",
"(",
")",
"if",
"self",
".",
"my_osid_object_form",
".",
"min_string_length",
"is",
"not",
"None",
"and",
"length",
"<",
"self",
".",
"my_osid_object_form",
".",
"min_string_length",
"+",
"1",
":",
"raise",
"InvalidArgument",
"(",
")",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'maxStringLength'",
"]",
"=",
"length",
"self",
".",
"_max_string_length",
"=",
"length"
] | stub | [
"stub"
] | python | train |
nickmckay/LiPD-utilities | Python/lipd/excel.py | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/excel.py#L1377-L1389 | def compile_authors(cell):
"""
Split the string of author names into the BibJSON format.
:param str cell: Data from author cell
:return: (list of dicts) Author names
"""
logger_excel.info("enter compile_authors")
author_lst = []
s = cell.split(';')
for w in s:
author_lst.append(w.lstrip())
logger_excel.info("exit compile_authors")
return author_lst | [
"def",
"compile_authors",
"(",
"cell",
")",
":",
"logger_excel",
".",
"info",
"(",
"\"enter compile_authors\"",
")",
"author_lst",
"=",
"[",
"]",
"s",
"=",
"cell",
".",
"split",
"(",
"';'",
")",
"for",
"w",
"in",
"s",
":",
"author_lst",
".",
"append",
"(",
"w",
".",
"lstrip",
"(",
")",
")",
"logger_excel",
".",
"info",
"(",
"\"exit compile_authors\"",
")",
"return",
"author_lst"
] | Split the string of author names into the BibJSON format.
:param str cell: Data from author cell
:return: (list of dicts) Author names | [
"Split",
"the",
"string",
"of",
"author",
"names",
"into",
"the",
"BibJSON",
"format",
".",
":",
"param",
"str",
"cell",
":",
"Data",
"from",
"author",
"cell",
":",
"return",
":",
"(",
"list",
"of",
"dicts",
")",
"Author",
"names"
] | python | train |
mrjoes/sockjs-tornado | sockjs/tornado/session.py | https://github.com/mrjoes/sockjs-tornado/blob/bd3a99b407f1181f054b3b1730f438dde375ca1c/sockjs/tornado/session.py#L255-L292 | def set_handler(self, handler, start_heartbeat=True):
"""Set active handler for the session
`handler`
Associate active Tornado handler with the session
`start_heartbeat`
Should session start heartbeat immediately
"""
# Check if session already has associated handler
if self.handler is not None:
handler.send_pack(proto.disconnect(2010, "Another connection still open"))
return False
if self._verify_ip and self.conn_info is not None:
# If IP address doesn't match - refuse connection
if handler.request.remote_ip != self.conn_info.ip:
LOG.error('Attempted to attach to session %s (%s) from different IP (%s)' % (
self.session_id,
self.conn_info.ip,
handler.request.remote_ip
))
handler.send_pack(proto.disconnect(2010, "Attempted to connect to session from different IP"))
return False
if (self.state == CLOSING or self.state == CLOSED) and not self.send_queue:
handler.send_pack(proto.disconnect(*self.get_close_reason()))
return False
# Associate handler and promote session
super(Session, self).set_handler(handler)
self.promote()
if start_heartbeat:
self.start_heartbeat()
return True | [
"def",
"set_handler",
"(",
"self",
",",
"handler",
",",
"start_heartbeat",
"=",
"True",
")",
":",
"# Check if session already has associated handler",
"if",
"self",
".",
"handler",
"is",
"not",
"None",
":",
"handler",
".",
"send_pack",
"(",
"proto",
".",
"disconnect",
"(",
"2010",
",",
"\"Another connection still open\"",
")",
")",
"return",
"False",
"if",
"self",
".",
"_verify_ip",
"and",
"self",
".",
"conn_info",
"is",
"not",
"None",
":",
"# If IP address doesn't match - refuse connection",
"if",
"handler",
".",
"request",
".",
"remote_ip",
"!=",
"self",
".",
"conn_info",
".",
"ip",
":",
"LOG",
".",
"error",
"(",
"'Attempted to attach to session %s (%s) from different IP (%s)'",
"%",
"(",
"self",
".",
"session_id",
",",
"self",
".",
"conn_info",
".",
"ip",
",",
"handler",
".",
"request",
".",
"remote_ip",
")",
")",
"handler",
".",
"send_pack",
"(",
"proto",
".",
"disconnect",
"(",
"2010",
",",
"\"Attempted to connect to session from different IP\"",
")",
")",
"return",
"False",
"if",
"(",
"self",
".",
"state",
"==",
"CLOSING",
"or",
"self",
".",
"state",
"==",
"CLOSED",
")",
"and",
"not",
"self",
".",
"send_queue",
":",
"handler",
".",
"send_pack",
"(",
"proto",
".",
"disconnect",
"(",
"*",
"self",
".",
"get_close_reason",
"(",
")",
")",
")",
"return",
"False",
"# Associate handler and promote session",
"super",
"(",
"Session",
",",
"self",
")",
".",
"set_handler",
"(",
"handler",
")",
"self",
".",
"promote",
"(",
")",
"if",
"start_heartbeat",
":",
"self",
".",
"start_heartbeat",
"(",
")",
"return",
"True"
] | Set active handler for the session
`handler`
Associate active Tornado handler with the session
`start_heartbeat`
Should session start heartbeat immediately | [
"Set",
"active",
"handler",
"for",
"the",
"session"
] | python | train |
pylast/pylast | src/pylast/__init__.py | https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1395-L1412 | def get_top_tags(self, limit=None):
"""Returns a list of the most frequently used Tags on this object."""
doc = self._request(self.ws_prefix + ".getTopTags", True)
elements = doc.getElementsByTagName("tag")
seq = []
for element in elements:
tag_name = _extract(element, "name")
tagcount = _extract(element, "count")
seq.append(TopItem(Tag(tag_name, self.network), tagcount))
if limit:
seq = seq[:limit]
return seq | [
"def",
"get_top_tags",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"doc",
"=",
"self",
".",
"_request",
"(",
"self",
".",
"ws_prefix",
"+",
"\".getTopTags\"",
",",
"True",
")",
"elements",
"=",
"doc",
".",
"getElementsByTagName",
"(",
"\"tag\"",
")",
"seq",
"=",
"[",
"]",
"for",
"element",
"in",
"elements",
":",
"tag_name",
"=",
"_extract",
"(",
"element",
",",
"\"name\"",
")",
"tagcount",
"=",
"_extract",
"(",
"element",
",",
"\"count\"",
")",
"seq",
".",
"append",
"(",
"TopItem",
"(",
"Tag",
"(",
"tag_name",
",",
"self",
".",
"network",
")",
",",
"tagcount",
")",
")",
"if",
"limit",
":",
"seq",
"=",
"seq",
"[",
":",
"limit",
"]",
"return",
"seq"
] | Returns a list of the most frequently used Tags on this object. | [
"Returns",
"a",
"list",
"of",
"the",
"most",
"frequently",
"used",
"Tags",
"on",
"this",
"object",
"."
] | python | train |
photo/openphoto-python | trovebox/objects/photo.py | https://github.com/photo/openphoto-python/blob/209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b/trovebox/objects/photo.py#L10-L20 | def delete(self, **kwds):
"""
Endpoint: /photo/<id>/delete.json
Deletes this photo.
Returns True if successful.
Raises a TroveboxError if not.
"""
result = self._client.photo.delete(self, **kwds)
self._delete_fields()
return result | [
"def",
"delete",
"(",
"self",
",",
"*",
"*",
"kwds",
")",
":",
"result",
"=",
"self",
".",
"_client",
".",
"photo",
".",
"delete",
"(",
"self",
",",
"*",
"*",
"kwds",
")",
"self",
".",
"_delete_fields",
"(",
")",
"return",
"result"
] | Endpoint: /photo/<id>/delete.json
Deletes this photo.
Returns True if successful.
Raises a TroveboxError if not. | [
"Endpoint",
":",
"/",
"photo",
"/",
"<id",
">",
"/",
"delete",
".",
"json"
] | python | train |
suds-community/suds | suds/umx/core.py | https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/umx/core.py#L130-L153 | def append_children(self, content):
"""
Append child nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
for child in content.node:
cont = Content(child)
cval = self.append(cont)
key = reserved.get(child.name, child.name)
if key in content.data:
v = getattr(content.data, key)
if isinstance(v, list):
v.append(cval)
else:
setattr(content.data, key, [v, cval])
continue
if self.multi_occurrence(cont):
if cval is None:
setattr(content.data, key, [])
else:
setattr(content.data, key, [cval,])
else:
setattr(content.data, key, cval) | [
"def",
"append_children",
"(",
"self",
",",
"content",
")",
":",
"for",
"child",
"in",
"content",
".",
"node",
":",
"cont",
"=",
"Content",
"(",
"child",
")",
"cval",
"=",
"self",
".",
"append",
"(",
"cont",
")",
"key",
"=",
"reserved",
".",
"get",
"(",
"child",
".",
"name",
",",
"child",
".",
"name",
")",
"if",
"key",
"in",
"content",
".",
"data",
":",
"v",
"=",
"getattr",
"(",
"content",
".",
"data",
",",
"key",
")",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"v",
".",
"append",
"(",
"cval",
")",
"else",
":",
"setattr",
"(",
"content",
".",
"data",
",",
"key",
",",
"[",
"v",
",",
"cval",
"]",
")",
"continue",
"if",
"self",
".",
"multi_occurrence",
"(",
"cont",
")",
":",
"if",
"cval",
"is",
"None",
":",
"setattr",
"(",
"content",
".",
"data",
",",
"key",
",",
"[",
"]",
")",
"else",
":",
"setattr",
"(",
"content",
".",
"data",
",",
"key",
",",
"[",
"cval",
",",
"]",
")",
"else",
":",
"setattr",
"(",
"content",
".",
"data",
",",
"key",
",",
"cval",
")"
] | Append child nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content} | [
"Append",
"child",
"nodes",
"into",
"L",
"{",
"Content",
".",
"data",
"}"
] | python | train |
Tanganelli/CoAPthon3 | coapthon/resources/resource.py | https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/resources/resource.py#L330-L342 | def resource_type(self):
"""
Get the CoRE Link Format rt attribute of the resource.
:return: the CoRE Link Format rt attribute
"""
value = "rt="
lst = self._attributes.get("rt")
if lst is None:
value = ""
else:
value += "\"" + str(lst) + "\""
return value | [
"def",
"resource_type",
"(",
"self",
")",
":",
"value",
"=",
"\"rt=\"",
"lst",
"=",
"self",
".",
"_attributes",
".",
"get",
"(",
"\"rt\"",
")",
"if",
"lst",
"is",
"None",
":",
"value",
"=",
"\"\"",
"else",
":",
"value",
"+=",
"\"\\\"\"",
"+",
"str",
"(",
"lst",
")",
"+",
"\"\\\"\"",
"return",
"value"
] | Get the CoRE Link Format rt attribute of the resource.
:return: the CoRE Link Format rt attribute | [
"Get",
"the",
"CoRE",
"Link",
"Format",
"rt",
"attribute",
"of",
"the",
"resource",
"."
] | python | train |
gem/oq-engine | openquake/hazardlib/gsim/si_midorikawa_1999.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/si_midorikawa_1999.py#L96-L109 | def _get_min_distance_to_volcanic_front(lons, lats):
"""
Compute and return minimum distance between volcanic front and points
specified by 'lon' and 'lat'.
Distance is negative if point is located east of the volcanic front,
positive otherwise.
The method uses the same approach as :meth:`_get_min_distance_to_sub_trench`
but final distance is returned without taking the absolute value.
"""
vf = _construct_surface(VOLCANIC_FRONT_LONS, VOLCANIC_FRONT_LATS, 0., 10.)
sites = Mesh(lons, lats, None)
return vf.get_rx_distance(sites) | [
"def",
"_get_min_distance_to_volcanic_front",
"(",
"lons",
",",
"lats",
")",
":",
"vf",
"=",
"_construct_surface",
"(",
"VOLCANIC_FRONT_LONS",
",",
"VOLCANIC_FRONT_LATS",
",",
"0.",
",",
"10.",
")",
"sites",
"=",
"Mesh",
"(",
"lons",
",",
"lats",
",",
"None",
")",
"return",
"vf",
".",
"get_rx_distance",
"(",
"sites",
")"
] | Compute and return minimum distance between volcanic front and points
specified by 'lon' and 'lat'.
Distance is negative if point is located east of the volcanic front,
positive otherwise.
The method uses the same approach as :meth:`_get_min_distance_to_sub_trench`
but final distance is returned without taking the absolute value. | [
"Compute",
"and",
"return",
"minimum",
"distance",
"between",
"volcanic",
"front",
"and",
"points",
"specified",
"by",
"lon",
"and",
"lat",
"."
] | python | train |
pantsbuild/pants | src/python/pants/base/mustache.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/mustache.py#L88-L98 | def _get_template_text_from_package(self, template_name):
"""Load the named template embedded in our package."""
if self._package_name is None:
raise self.MustacheError('No package specified for template loading.')
path = os.path.join('templates', template_name + '.mustache')
template_text = pkgutil.get_data(self._package_name, path)
if template_text is None:
raise self.MustacheError(
'could not find template {} in package {}'.format(path, self._package_name))
return template_text.decode('utf8') | [
"def",
"_get_template_text_from_package",
"(",
"self",
",",
"template_name",
")",
":",
"if",
"self",
".",
"_package_name",
"is",
"None",
":",
"raise",
"self",
".",
"MustacheError",
"(",
"'No package specified for template loading.'",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'templates'",
",",
"template_name",
"+",
"'.mustache'",
")",
"template_text",
"=",
"pkgutil",
".",
"get_data",
"(",
"self",
".",
"_package_name",
",",
"path",
")",
"if",
"template_text",
"is",
"None",
":",
"raise",
"self",
".",
"MustacheError",
"(",
"'could not find template {} in package {}'",
".",
"format",
"(",
"path",
",",
"self",
".",
"_package_name",
")",
")",
"return",
"template_text",
".",
"decode",
"(",
"'utf8'",
")"
] | Load the named template embedded in our package. | [
"Load",
"the",
"named",
"template",
"embedded",
"in",
"our",
"package",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/squaremap/squaremap.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/squaremap/squaremap.py#L460-L462 | def children_sum( self, children,node ):
"""Calculate children's total sum"""
return sum( [self.value(value,node) for value in children] ) | [
"def",
"children_sum",
"(",
"self",
",",
"children",
",",
"node",
")",
":",
"return",
"sum",
"(",
"[",
"self",
".",
"value",
"(",
"value",
",",
"node",
")",
"for",
"value",
"in",
"children",
"]",
")"
] | Calculate children's total sum | [
"Calculate",
"children",
"s",
"total",
"sum"
] | python | train |
lappis-unb/salic-ml | src/salicml/metrics/finance/common_items_ratio.py | https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/src/salicml/metrics/finance/common_items_ratio.py#L110-L118 | def get_project_items(pronac):
"""
Returns all items from a project.
"""
df = data.all_items
return (
df[df['PRONAC'] == pronac]
.drop(columns=['PRONAC', 'idSegmento'])
) | [
"def",
"get_project_items",
"(",
"pronac",
")",
":",
"df",
"=",
"data",
".",
"all_items",
"return",
"(",
"df",
"[",
"df",
"[",
"'PRONAC'",
"]",
"==",
"pronac",
"]",
".",
"drop",
"(",
"columns",
"=",
"[",
"'PRONAC'",
",",
"'idSegmento'",
"]",
")",
")"
] | Returns all items from a project. | [
"Returns",
"all",
"items",
"from",
"a",
"project",
"."
] | python | train |
SamLau95/nbinteract | nbinteract/exporters.py | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/exporters.py#L236-L249 | def _wait_for_save(nb_name, timeout=5):
"""Waits for nb_name to update, waiting up to TIMEOUT seconds.
Returns True if a save was detected, and False otherwise.
"""
modification_time = os.path.getmtime(nb_name)
start_time = time.time()
while time.time() < start_time + timeout:
if (
os.path.getmtime(nb_name) > modification_time
and os.path.getsize(nb_name) > 0
):
return True
time.sleep(0.2)
return False | [
"def",
"_wait_for_save",
"(",
"nb_name",
",",
"timeout",
"=",
"5",
")",
":",
"modification_time",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"nb_name",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"while",
"time",
".",
"time",
"(",
")",
"<",
"start_time",
"+",
"timeout",
":",
"if",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"nb_name",
")",
">",
"modification_time",
"and",
"os",
".",
"path",
".",
"getsize",
"(",
"nb_name",
")",
">",
"0",
")",
":",
"return",
"True",
"time",
".",
"sleep",
"(",
"0.2",
")",
"return",
"False"
] | Waits for nb_name to update, waiting up to TIMEOUT seconds.
Returns True if a save was detected, and False otherwise. | [
"Waits",
"for",
"nb_name",
"to",
"update",
"waiting",
"up",
"to",
"TIMEOUT",
"seconds",
".",
"Returns",
"True",
"if",
"a",
"save",
"was",
"detected",
"and",
"False",
"otherwise",
"."
] | python | train |
horazont/aioxmpp | aioxmpp/stream.py | https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/stream.py#L1195-L1211 | def flush_incoming(self):
"""
Flush all incoming queues to the respective processing methods. The
handlers are called as usual, thus it may require at least one
iteration through the asyncio event loop before effects can be seen.
The incoming queues are empty after a call to this method.
It is legal (but pretty useless) to call this method while the stream
is :attr:`running`.
"""
while True:
try:
stanza_obj = self._incoming_queue.get_nowait()
except asyncio.QueueEmpty:
break
self._process_incoming(None, stanza_obj) | [
"def",
"flush_incoming",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"stanza_obj",
"=",
"self",
".",
"_incoming_queue",
".",
"get_nowait",
"(",
")",
"except",
"asyncio",
".",
"QueueEmpty",
":",
"break",
"self",
".",
"_process_incoming",
"(",
"None",
",",
"stanza_obj",
")"
] | Flush all incoming queues to the respective processing methods. The
handlers are called as usual, thus it may require at least one
iteration through the asyncio event loop before effects can be seen.
The incoming queues are empty after a call to this method.
It is legal (but pretty useless) to call this method while the stream
is :attr:`running`. | [
"Flush",
"all",
"incoming",
"queues",
"to",
"the",
"respective",
"processing",
"methods",
".",
"The",
"handlers",
"are",
"called",
"as",
"usual",
"thus",
"it",
"may",
"require",
"at",
"least",
"one",
"iteration",
"through",
"the",
"asyncio",
"event",
"loop",
"before",
"effects",
"can",
"be",
"seen",
"."
] | python | train |
titusjan/argos | argos/utils/cls.py | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/utils/cls.py#L350-L370 | def array_has_real_numbers(array):
""" Uses the dtype kind of the numpy array to determine if it represents real numbers.
That is, the array kind should be one of: i u f
Possible dtype.kind values.
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
"""
kind = array.dtype.kind
assert kind in 'biufcmMOSUV', "Unexpected array kind: {}".format(kind)
return kind in 'iuf' | [
"def",
"array_has_real_numbers",
"(",
"array",
")",
":",
"kind",
"=",
"array",
".",
"dtype",
".",
"kind",
"assert",
"kind",
"in",
"'biufcmMOSUV'",
",",
"\"Unexpected array kind: {}\"",
".",
"format",
"(",
"kind",
")",
"return",
"kind",
"in",
"'iuf'"
] | Uses the dtype kind of the numpy array to determine if it represents real numbers.
That is, the array kind should be one of: i u f
Possible dtype.kind values.
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void | [
"Uses",
"the",
"dtype",
"kind",
"of",
"the",
"numpy",
"array",
"to",
"determine",
"if",
"it",
"represents",
"real",
"numbers",
"."
] | python | train |
gwastro/pycbc-glue | pycbc_glue/ligolw/utils/segments.py | https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/utils/segments.py#L112-L121 | def sort(self, *args):
"""
Sort the internal segment lists. The optional args are
passed to the .sort() method of the segment lists. This
can be used to control the sort order by providing an
alternate comparison function. The default is to sort by
start time with ties broken by end time.
"""
self.valid.sort(*args)
self.active.sort(*args) | [
"def",
"sort",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"valid",
".",
"sort",
"(",
"*",
"args",
")",
"self",
".",
"active",
".",
"sort",
"(",
"*",
"args",
")"
] | Sort the internal segment lists. The optional args are
passed to the .sort() method of the segment lists. This
can be used to control the sort order by providing an
alternate comparison function. The default is to sort by
start time with ties broken by end time. | [
"Sort",
"the",
"internal",
"segment",
"lists",
".",
"The",
"optional",
"args",
"are",
"passed",
"to",
"the",
".",
"sort",
"()",
"method",
"of",
"the",
"segment",
"lists",
".",
"This",
"can",
"be",
"used",
"to",
"control",
"the",
"sort",
"order",
"by",
"providing",
"an",
"alternate",
"comparison",
"function",
".",
"The",
"default",
"is",
"to",
"sort",
"by",
"start",
"time",
"with",
"ties",
"broken",
"by",
"end",
"time",
"."
] | python | train |
boundlessgeo/lib-qgis-commons | qgiscommons2/layers.py | https://github.com/boundlessgeo/lib-qgis-commons/blob/d25d13803db08c18632b55d12036e332f006d9ac/qgiscommons2/layers.py#L246-L261 | def loadLayerNoCrsDialog(filename, name=None, provider=None):
'''
Tries to load a layer from the given file
Same as the loadLayer method, but it does not ask for CRS, regardless of current
configuration in QGIS settings
'''
settings = QSettings()
prjSetting = settings.value('/Projections/defaultBehaviour')
settings.setValue('/Projections/defaultBehaviour', '')
# QGIS3:
prjSetting3 = settings.value('/Projections/defaultBehavior')
settings.setValue('/Projections/defaultBehavior', '')
layer = loadLayer(filename, name, provider)
settings.setValue('/Projections/defaultBehaviour', prjSetting)
settings.setValue('/Projections/defaultBehavior', prjSetting3)
return layer | [
"def",
"loadLayerNoCrsDialog",
"(",
"filename",
",",
"name",
"=",
"None",
",",
"provider",
"=",
"None",
")",
":",
"settings",
"=",
"QSettings",
"(",
")",
"prjSetting",
"=",
"settings",
".",
"value",
"(",
"'/Projections/defaultBehaviour'",
")",
"settings",
".",
"setValue",
"(",
"'/Projections/defaultBehaviour'",
",",
"''",
")",
"# QGIS3:",
"prjSetting3",
"=",
"settings",
".",
"value",
"(",
"'/Projections/defaultBehavior'",
")",
"settings",
".",
"setValue",
"(",
"'/Projections/defaultBehavior'",
",",
"''",
")",
"layer",
"=",
"loadLayer",
"(",
"filename",
",",
"name",
",",
"provider",
")",
"settings",
".",
"setValue",
"(",
"'/Projections/defaultBehaviour'",
",",
"prjSetting",
")",
"settings",
".",
"setValue",
"(",
"'/Projections/defaultBehavior'",
",",
"prjSetting3",
")",
"return",
"layer"
] | Tries to load a layer from the given file
Same as the loadLayer method, but it does not ask for CRS, regardless of current
configuration in QGIS settings | [
"Tries",
"to",
"load",
"a",
"layer",
"from",
"the",
"given",
"file",
"Same",
"as",
"the",
"loadLayer",
"method",
"but",
"it",
"does",
"not",
"ask",
"for",
"CRS",
"regardless",
"of",
"current",
"configuration",
"in",
"QGIS",
"settings"
] | python | train |
pypa/pipenv | pipenv/vendor/distlib/database.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/database.py#L617-L630 | def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result | [
"def",
"read_exports",
"(",
"self",
")",
":",
"result",
"=",
"{",
"}",
"r",
"=",
"self",
".",
"get_distinfo_resource",
"(",
"EXPORTS_FILENAME",
")",
"if",
"r",
":",
"with",
"contextlib",
".",
"closing",
"(",
"r",
".",
"as_stream",
"(",
")",
")",
"as",
"stream",
":",
"result",
"=",
"read_exports",
"(",
"stream",
")",
"return",
"result"
] | Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries. | [
"Read",
"exports",
"data",
"from",
"a",
"file",
"in",
".",
"ini",
"format",
"."
] | python | train |
luiscberrocal/pyjavaprops | pyjavaprops/javaproperties.py | https://github.com/luiscberrocal/pyjavaprops/blob/7d0327b1758b3d907af657e0df3b0618776ac46d/pyjavaprops/javaproperties.py#L285-L291 | def list(self, out=sys.stdout):
""" Prints a listing of the properties to the
stream 'out' which defaults to the standard output """
out.write('-- listing properties --\n')
for key,value in self._properties.items():
out.write(''.join((key,'=',value,'\n'))) | [
"def",
"list",
"(",
"self",
",",
"out",
"=",
"sys",
".",
"stdout",
")",
":",
"out",
".",
"write",
"(",
"'-- listing properties --\\n'",
")",
"for",
"key",
",",
"value",
"in",
"self",
".",
"_properties",
".",
"items",
"(",
")",
":",
"out",
".",
"write",
"(",
"''",
".",
"join",
"(",
"(",
"key",
",",
"'='",
",",
"value",
",",
"'\\n'",
")",
")",
")"
] | Prints a listing of the properties to the
stream 'out' which defaults to the standard output | [
"Prints",
"a",
"listing",
"of",
"the",
"properties",
"to",
"the",
"stream",
"out",
"which",
"defaults",
"to",
"the",
"standard",
"output"
] | python | train |
applegrew/django-select2 | django_select2/forms.py | https://github.com/applegrew/django-select2/blob/2bb6f3a9740a368e486e1ea01ff553d2d1954241/django_select2/forms.py#L350-L368 | def set_to_cache(self):
"""
Add widget's attributes to Django's cache.
Split the QuerySet, to not pickle the result set.
"""
queryset = self.get_queryset()
cache.set(self._get_cache_key(), {
'queryset':
[
queryset.none(),
queryset.query,
],
'cls': self.__class__,
'search_fields': tuple(self.search_fields),
'max_results': int(self.max_results),
'url': str(self.get_url()),
'dependent_fields': dict(self.dependent_fields),
}) | [
"def",
"set_to_cache",
"(",
"self",
")",
":",
"queryset",
"=",
"self",
".",
"get_queryset",
"(",
")",
"cache",
".",
"set",
"(",
"self",
".",
"_get_cache_key",
"(",
")",
",",
"{",
"'queryset'",
":",
"[",
"queryset",
".",
"none",
"(",
")",
",",
"queryset",
".",
"query",
",",
"]",
",",
"'cls'",
":",
"self",
".",
"__class__",
",",
"'search_fields'",
":",
"tuple",
"(",
"self",
".",
"search_fields",
")",
",",
"'max_results'",
":",
"int",
"(",
"self",
".",
"max_results",
")",
",",
"'url'",
":",
"str",
"(",
"self",
".",
"get_url",
"(",
")",
")",
",",
"'dependent_fields'",
":",
"dict",
"(",
"self",
".",
"dependent_fields",
")",
",",
"}",
")"
] | Add widget's attributes to Django's cache.
Split the QuerySet, to not pickle the result set. | [
"Add",
"widget",
"s",
"attributes",
"to",
"Django",
"s",
"cache",
"."
] | python | train |
UCL-INGI/INGInious | inginious/frontend/pages/api/courses.py | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/api/courses.py#L16-L67 | def API_GET(self, courseid=None): # pylint: disable=arguments-differ
"""
List courses available to the connected client. Returns a dict in the form
::
{
"courseid1":
{
"name": "Name of the course", #the name of the course
"require_password": False, #indicates if this course requires a password or not
"is_registered": False, #indicates if the user is registered to this course or not
"tasks": #only appears if is_registered is True
{
"taskid1": "name of task1",
"taskid2": "name of task2"
#...
},
"grade": 0.0 #the current grade in the course. Only appears if is_registered is True
}
#...
}
If you use the endpoint /api/v0/courses/the_course_id, this dict will contain one entry or the page will return 404 Not Found.
"""
output = []
if courseid is None:
courses = self.course_factory.get_all_courses()
else:
try:
courses = {courseid: self.course_factory.get_course(courseid)}
except:
raise APINotFound("Course not found")
username = self.user_manager.session_username()
user_info = self.database.users.find_one({"username": username})
for courseid, course in courses.items():
if self.user_manager.course_is_open_to_user(course, username, False) or course.is_registration_possible(user_info):
data = {
"id": courseid,
"name": course.get_name(self.user_manager.session_language()),
"require_password": course.is_password_needed_for_registration(),
"is_registered": self.user_manager.course_is_open_to_user(course, username, False)
}
if self.user_manager.course_is_open_to_user(course, username, False):
data["tasks"] = {taskid: task.get_name(self.user_manager.session_language()) for taskid, task in course.get_tasks().items()}
data["grade"] = self.user_manager.get_course_cache(username, course)["grade"]
output.append(data)
return 200, output | [
"def",
"API_GET",
"(",
"self",
",",
"courseid",
"=",
"None",
")",
":",
"# pylint: disable=arguments-differ",
"output",
"=",
"[",
"]",
"if",
"courseid",
"is",
"None",
":",
"courses",
"=",
"self",
".",
"course_factory",
".",
"get_all_courses",
"(",
")",
"else",
":",
"try",
":",
"courses",
"=",
"{",
"courseid",
":",
"self",
".",
"course_factory",
".",
"get_course",
"(",
"courseid",
")",
"}",
"except",
":",
"raise",
"APINotFound",
"(",
"\"Course not found\"",
")",
"username",
"=",
"self",
".",
"user_manager",
".",
"session_username",
"(",
")",
"user_info",
"=",
"self",
".",
"database",
".",
"users",
".",
"find_one",
"(",
"{",
"\"username\"",
":",
"username",
"}",
")",
"for",
"courseid",
",",
"course",
"in",
"courses",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"user_manager",
".",
"course_is_open_to_user",
"(",
"course",
",",
"username",
",",
"False",
")",
"or",
"course",
".",
"is_registration_possible",
"(",
"user_info",
")",
":",
"data",
"=",
"{",
"\"id\"",
":",
"courseid",
",",
"\"name\"",
":",
"course",
".",
"get_name",
"(",
"self",
".",
"user_manager",
".",
"session_language",
"(",
")",
")",
",",
"\"require_password\"",
":",
"course",
".",
"is_password_needed_for_registration",
"(",
")",
",",
"\"is_registered\"",
":",
"self",
".",
"user_manager",
".",
"course_is_open_to_user",
"(",
"course",
",",
"username",
",",
"False",
")",
"}",
"if",
"self",
".",
"user_manager",
".",
"course_is_open_to_user",
"(",
"course",
",",
"username",
",",
"False",
")",
":",
"data",
"[",
"\"tasks\"",
"]",
"=",
"{",
"taskid",
":",
"task",
".",
"get_name",
"(",
"self",
".",
"user_manager",
".",
"session_language",
"(",
")",
")",
"for",
"taskid",
",",
"task",
"in",
"course",
".",
"get_tasks",
"(",
")",
".",
"items",
"(",
")",
"}",
"data",
"[",
"\"grade\"",
"]",
"=",
"self",
".",
"user_manager",
".",
"get_course_cache",
"(",
"username",
",",
"course",
")",
"[",
"\"grade\"",
"]",
"output",
".",
"append",
"(",
"data",
")",
"return",
"200",
",",
"output"
] | List courses available to the connected client. Returns a dict in the form
::
{
"courseid1":
{
"name": "Name of the course", #the name of the course
"require_password": False, #indicates if this course requires a password or not
"is_registered": False, #indicates if the user is registered to this course or not
"tasks": #only appears if is_registered is True
{
"taskid1": "name of task1",
"taskid2": "name of task2"
#...
},
"grade": 0.0 #the current grade in the course. Only appears if is_registered is True
}
#...
}
If you use the endpoint /api/v0/courses/the_course_id, this dict will contain one entry or the page will return 404 Not Found. | [
"List",
"courses",
"available",
"to",
"the",
"connected",
"client",
".",
"Returns",
"a",
"dict",
"in",
"the",
"form"
] | python | train |
bcbio/bcbio-nextgen | bcbio/variation/joint.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L196-L207 | def _fix_orig_vcf_refs(data):
"""Supply references to initial variantcalls if run in addition to batching.
"""
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if variantcaller:
data["vrn_file_orig"] = data["vrn_file"]
for i, sub in enumerate(data.get("group_orig", [])):
sub_vrn = sub.pop("vrn_file", None)
if sub_vrn:
sub["vrn_file_orig"] = sub_vrn
data["group_orig"][i] = sub
return data | [
"def",
"_fix_orig_vcf_refs",
"(",
"data",
")",
":",
"variantcaller",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"variantcaller\"",
")",
",",
"data",
")",
"if",
"variantcaller",
":",
"data",
"[",
"\"vrn_file_orig\"",
"]",
"=",
"data",
"[",
"\"vrn_file\"",
"]",
"for",
"i",
",",
"sub",
"in",
"enumerate",
"(",
"data",
".",
"get",
"(",
"\"group_orig\"",
",",
"[",
"]",
")",
")",
":",
"sub_vrn",
"=",
"sub",
".",
"pop",
"(",
"\"vrn_file\"",
",",
"None",
")",
"if",
"sub_vrn",
":",
"sub",
"[",
"\"vrn_file_orig\"",
"]",
"=",
"sub_vrn",
"data",
"[",
"\"group_orig\"",
"]",
"[",
"i",
"]",
"=",
"sub",
"return",
"data"
] | Supply references to initial variantcalls if run in addition to batching. | [
"Supply",
"references",
"to",
"initial",
"variantcalls",
"if",
"run",
"in",
"addition",
"to",
"batching",
"."
] | python | train |
openstax/cnx-epub | cnxepub/epub.py | https://github.com/openstax/cnx-epub/blob/f648a309eff551b0a68a115a98ddf7858149a2ea/cnxepub/epub.py#L379-L413 | def to_file(package, directory):
"""Write the package to the given ``directory``.
Returns the OPF filename.
"""
opf_filepath = os.path.join(directory, package.name)
# Create the directory structure
for name in ('contents', 'resources',):
path = os.path.join(directory, name)
if not os.path.exists(path):
os.mkdir(path)
# Write the items to the filesystem
locations = {} # Used when rendering
for item in package:
if item.media_type == 'application/xhtml+xml':
base = os.path.join(directory, 'contents')
else:
base = os.path.join(directory, 'resources')
filename = item.name
filepath = os.path.join(base, filename)
locations[item] = os.path.relpath(filepath, directory)
with open(filepath, 'wb') as item_file:
item_file.write(item.data.read())
# Write the OPF
template = jinja2.Template(OPF_TEMPLATE,
trim_blocks=True, lstrip_blocks=True)
with open(opf_filepath, 'wb') as opf_file:
opf = template.render(package=package, locations=locations)
if not isinstance(opf, bytes):
opf = opf.encode('utf-8')
opf_file.write(opf)
return opf_filepath | [
"def",
"to_file",
"(",
"package",
",",
"directory",
")",
":",
"opf_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"package",
".",
"name",
")",
"# Create the directory structure",
"for",
"name",
"in",
"(",
"'contents'",
",",
"'resources'",
",",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"name",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"mkdir",
"(",
"path",
")",
"# Write the items to the filesystem",
"locations",
"=",
"{",
"}",
"# Used when rendering",
"for",
"item",
"in",
"package",
":",
"if",
"item",
".",
"media_type",
"==",
"'application/xhtml+xml'",
":",
"base",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'contents'",
")",
"else",
":",
"base",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'resources'",
")",
"filename",
"=",
"item",
".",
"name",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"filename",
")",
"locations",
"[",
"item",
"]",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"filepath",
",",
"directory",
")",
"with",
"open",
"(",
"filepath",
",",
"'wb'",
")",
"as",
"item_file",
":",
"item_file",
".",
"write",
"(",
"item",
".",
"data",
".",
"read",
"(",
")",
")",
"# Write the OPF",
"template",
"=",
"jinja2",
".",
"Template",
"(",
"OPF_TEMPLATE",
",",
"trim_blocks",
"=",
"True",
",",
"lstrip_blocks",
"=",
"True",
")",
"with",
"open",
"(",
"opf_filepath",
",",
"'wb'",
")",
"as",
"opf_file",
":",
"opf",
"=",
"template",
".",
"render",
"(",
"package",
"=",
"package",
",",
"locations",
"=",
"locations",
")",
"if",
"not",
"isinstance",
"(",
"opf",
",",
"bytes",
")",
":",
"opf",
"=",
"opf",
".",
"encode",
"(",
"'utf-8'",
")",
"opf_file",
".",
"write",
"(",
"opf",
")",
"return",
"opf_filepath"
] | Write the package to the given ``directory``.
Returns the OPF filename. | [
"Write",
"the",
"package",
"to",
"the",
"given",
"directory",
".",
"Returns",
"the",
"OPF",
"filename",
"."
] | python | train |
Rockhopper-Technologies/enlighten | enlighten/_win_terminal.py | https://github.com/Rockhopper-Technologies/enlighten/blob/857855f940e6c1bb84d0be849b999a18fff5bf5a/enlighten/_win_terminal.py#L163-L172 | def color(self, code):
"""
When color is given as a number, apply that color to the content
While this is designed to support 256 color terminals, Windows will approximate
this with 16 colors
"""
def func(content=''):
return self._apply_color(u'38;5;%d' % code, content)
return func | [
"def",
"color",
"(",
"self",
",",
"code",
")",
":",
"def",
"func",
"(",
"content",
"=",
"''",
")",
":",
"return",
"self",
".",
"_apply_color",
"(",
"u'38;5;%d'",
"%",
"code",
",",
"content",
")",
"return",
"func"
] | When color is given as a number, apply that color to the content
While this is designed to support 256 color terminals, Windows will approximate
this with 16 colors | [
"When",
"color",
"is",
"given",
"as",
"a",
"number",
"apply",
"that",
"color",
"to",
"the",
"content",
"While",
"this",
"is",
"designed",
"to",
"support",
"256",
"color",
"terminals",
"Windows",
"will",
"approximate",
"this",
"with",
"16",
"colors"
] | python | train |
materialsproject/pymatgen | pymatgen/symmetry/analyzer.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/symmetry/analyzer.py#L1471-L1508 | def cluster_sites(mol, tol, give_only_index=False):
"""
Cluster sites based on distance and species type.
Args:
mol (Molecule): Molecule **with origin at center of mass**.
tol (float): Tolerance to use.
Returns:
(origin_site, clustered_sites): origin_site is a site at the center
of mass (None if there are no origin atoms). clustered_sites is a
dict of {(avg_dist, species_and_occu): [list of sites]}
"""
# Cluster works for dim > 2 data. We just add a dummy 0 for second
# coordinate.
dists = [[np.linalg.norm(site.coords), 0] for site in mol]
import scipy.cluster as spcluster
f = spcluster.hierarchy.fclusterdata(dists, tol, criterion='distance')
clustered_dists = defaultdict(list)
for i, site in enumerate(mol):
clustered_dists[f[i]].append(dists[i])
avg_dist = {label: np.mean(val) for label, val in clustered_dists.items()}
clustered_sites = defaultdict(list)
origin_site = None
for i, site in enumerate(mol):
if avg_dist[f[i]] < tol:
if give_only_index:
origin_site = i
else:
origin_site = site
else:
if give_only_index:
clustered_sites[
(avg_dist[f[i]], site.species)].append(i)
else:
clustered_sites[
(avg_dist[f[i]], site.species)].append(site)
return origin_site, clustered_sites | [
"def",
"cluster_sites",
"(",
"mol",
",",
"tol",
",",
"give_only_index",
"=",
"False",
")",
":",
"# Cluster works for dim > 2 data. We just add a dummy 0 for second",
"# coordinate.",
"dists",
"=",
"[",
"[",
"np",
".",
"linalg",
".",
"norm",
"(",
"site",
".",
"coords",
")",
",",
"0",
"]",
"for",
"site",
"in",
"mol",
"]",
"import",
"scipy",
".",
"cluster",
"as",
"spcluster",
"f",
"=",
"spcluster",
".",
"hierarchy",
".",
"fclusterdata",
"(",
"dists",
",",
"tol",
",",
"criterion",
"=",
"'distance'",
")",
"clustered_dists",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"i",
",",
"site",
"in",
"enumerate",
"(",
"mol",
")",
":",
"clustered_dists",
"[",
"f",
"[",
"i",
"]",
"]",
".",
"append",
"(",
"dists",
"[",
"i",
"]",
")",
"avg_dist",
"=",
"{",
"label",
":",
"np",
".",
"mean",
"(",
"val",
")",
"for",
"label",
",",
"val",
"in",
"clustered_dists",
".",
"items",
"(",
")",
"}",
"clustered_sites",
"=",
"defaultdict",
"(",
"list",
")",
"origin_site",
"=",
"None",
"for",
"i",
",",
"site",
"in",
"enumerate",
"(",
"mol",
")",
":",
"if",
"avg_dist",
"[",
"f",
"[",
"i",
"]",
"]",
"<",
"tol",
":",
"if",
"give_only_index",
":",
"origin_site",
"=",
"i",
"else",
":",
"origin_site",
"=",
"site",
"else",
":",
"if",
"give_only_index",
":",
"clustered_sites",
"[",
"(",
"avg_dist",
"[",
"f",
"[",
"i",
"]",
"]",
",",
"site",
".",
"species",
")",
"]",
".",
"append",
"(",
"i",
")",
"else",
":",
"clustered_sites",
"[",
"(",
"avg_dist",
"[",
"f",
"[",
"i",
"]",
"]",
",",
"site",
".",
"species",
")",
"]",
".",
"append",
"(",
"site",
")",
"return",
"origin_site",
",",
"clustered_sites"
] | Cluster sites based on distance and species type.
Args:
mol (Molecule): Molecule **with origin at center of mass**.
tol (float): Tolerance to use.
Returns:
(origin_site, clustered_sites): origin_site is a site at the center
of mass (None if there are no origin atoms). clustered_sites is a
dict of {(avg_dist, species_and_occu): [list of sites]} | [
"Cluster",
"sites",
"based",
"on",
"distance",
"and",
"species",
"type",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/summary.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/summary.py#L283-L293 | def _subtract(summary, o):
"""Remove object o from the summary by subtracting it's size."""
found = False
row = [_repr(o), 1, _getsizeof(o)]
for r in summary:
if r[0] == row[0]:
(r[1], r[2]) = (r[1] - row[1], r[2] - row[2])
found = True
if not found:
summary.append([row[0], -row[1], -row[2]])
return summary | [
"def",
"_subtract",
"(",
"summary",
",",
"o",
")",
":",
"found",
"=",
"False",
"row",
"=",
"[",
"_repr",
"(",
"o",
")",
",",
"1",
",",
"_getsizeof",
"(",
"o",
")",
"]",
"for",
"r",
"in",
"summary",
":",
"if",
"r",
"[",
"0",
"]",
"==",
"row",
"[",
"0",
"]",
":",
"(",
"r",
"[",
"1",
"]",
",",
"r",
"[",
"2",
"]",
")",
"=",
"(",
"r",
"[",
"1",
"]",
"-",
"row",
"[",
"1",
"]",
",",
"r",
"[",
"2",
"]",
"-",
"row",
"[",
"2",
"]",
")",
"found",
"=",
"True",
"if",
"not",
"found",
":",
"summary",
".",
"append",
"(",
"[",
"row",
"[",
"0",
"]",
",",
"-",
"row",
"[",
"1",
"]",
",",
"-",
"row",
"[",
"2",
"]",
"]",
")",
"return",
"summary"
] | Remove object o from the summary by subtracting it's size. | [
"Remove",
"object",
"o",
"from",
"the",
"summary",
"by",
"subtracting",
"it",
"s",
"size",
"."
] | python | train |
Scoppio/RagnarokEngine3 | RagnarokEngine3/RE3.py | https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L3227-L3231 | def update(self, milliseconds):
"""Updates all of the objects in our world."""
self.__sort_up()
for obj in self.__up_objects:
obj.update(milliseconds) | [
"def",
"update",
"(",
"self",
",",
"milliseconds",
")",
":",
"self",
".",
"__sort_up",
"(",
")",
"for",
"obj",
"in",
"self",
".",
"__up_objects",
":",
"obj",
".",
"update",
"(",
"milliseconds",
")"
] | Updates all of the objects in our world. | [
"Updates",
"all",
"of",
"the",
"objects",
"in",
"our",
"world",
"."
] | python | train |
quantopian/zipline | zipline/data/minute_bars.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L1098-L1149 | def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
if self._last_get_value_dt_value == dt.value:
minute_pos = self._last_get_value_dt_position
else:
try:
minute_pos = self._find_position_of_minute(dt)
except ValueError:
raise NoDataOnDate()
self._last_get_value_dt_value = dt.value
self._last_get_value_dt_position = minute_pos
try:
value = self._open_minute_file(field, sid)[minute_pos]
except IndexError:
value = 0
if value == 0:
if field == 'volume':
return 0
else:
return np.nan
if field != 'volume':
value *= self._ohlc_ratio_inverse_for_sid(sid)
return value | [
"def",
"get_value",
"(",
"self",
",",
"sid",
",",
"dt",
",",
"field",
")",
":",
"if",
"self",
".",
"_last_get_value_dt_value",
"==",
"dt",
".",
"value",
":",
"minute_pos",
"=",
"self",
".",
"_last_get_value_dt_position",
"else",
":",
"try",
":",
"minute_pos",
"=",
"self",
".",
"_find_position_of_minute",
"(",
"dt",
")",
"except",
"ValueError",
":",
"raise",
"NoDataOnDate",
"(",
")",
"self",
".",
"_last_get_value_dt_value",
"=",
"dt",
".",
"value",
"self",
".",
"_last_get_value_dt_position",
"=",
"minute_pos",
"try",
":",
"value",
"=",
"self",
".",
"_open_minute_file",
"(",
"field",
",",
"sid",
")",
"[",
"minute_pos",
"]",
"except",
"IndexError",
":",
"value",
"=",
"0",
"if",
"value",
"==",
"0",
":",
"if",
"field",
"==",
"'volume'",
":",
"return",
"0",
"else",
":",
"return",
"np",
".",
"nan",
"if",
"field",
"!=",
"'volume'",
":",
"value",
"*=",
"self",
".",
"_ohlc_ratio_inverse_for_sid",
"(",
"sid",
")",
"return",
"value"
] | Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.) | [
"Retrieve",
"the",
"pricing",
"info",
"for",
"the",
"given",
"sid",
"dt",
"and",
"field",
"."
] | python | train |
digidotcom/python-streamexpect | streamexpect.py | https://github.com/digidotcom/python-streamexpect/blob/9ab894506ffd679b37230e935158ff3b0aa170ab/streamexpect.py#L193-L209 | def search(self, buf):
"""Search the provided buffer for matching text.
Search the provided buffer for matching text. If the *match* is found,
returns a :class:`SequenceMatch` object, otherwise returns ``None``.
:param buf: Buffer to search for a match.
:return: :class:`SequenceMatch` if matched, None if no match was found.
"""
self._check_type(buf)
normalized = unicodedata.normalize(self.FORM, buf)
idx = normalized.find(self._text)
if idx < 0:
return None
start = idx
end = idx + len(self._text)
return SequenceMatch(self, normalized[start:end], start, end) | [
"def",
"search",
"(",
"self",
",",
"buf",
")",
":",
"self",
".",
"_check_type",
"(",
"buf",
")",
"normalized",
"=",
"unicodedata",
".",
"normalize",
"(",
"self",
".",
"FORM",
",",
"buf",
")",
"idx",
"=",
"normalized",
".",
"find",
"(",
"self",
".",
"_text",
")",
"if",
"idx",
"<",
"0",
":",
"return",
"None",
"start",
"=",
"idx",
"end",
"=",
"idx",
"+",
"len",
"(",
"self",
".",
"_text",
")",
"return",
"SequenceMatch",
"(",
"self",
",",
"normalized",
"[",
"start",
":",
"end",
"]",
",",
"start",
",",
"end",
")"
] | Search the provided buffer for matching text.
Search the provided buffer for matching text. If the *match* is found,
returns a :class:`SequenceMatch` object, otherwise returns ``None``.
:param buf: Buffer to search for a match.
:return: :class:`SequenceMatch` if matched, None if no match was found. | [
"Search",
"the",
"provided",
"buffer",
"for",
"matching",
"text",
"."
] | python | train |
blockstack/virtualchain | virtualchain/lib/indexer.py | https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/indexer.py#L1681-L1745 | def sqlite3_backup(src_path, dest_path):
"""
Back up a sqlite3 database, while ensuring
that no ongoing queries are being executed.
Return True on success
Return False on error.
"""
# find sqlite3
sqlite3_path = sqlite3_find_tool()
if sqlite3_path is None:
log.error("Failed to find sqlite3 tool")
return False
sqlite3_cmd = [sqlite3_path, '{}'.format(src_path), '.backup "{}"'.format(dest_path)]
rc = None
backoff = 1.0
out = None
err = None
try:
while True:
log.debug("{}".format(" ".join(sqlite3_cmd)))
p = subprocess.Popen(sqlite3_cmd, shell=False, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
rc = p.wait()
if rc != 0:
if "database is locked" in out.lower() or "database is locked" in err.lower():
# try again
log.error("Database {} is locked; trying again in {} seconds".format(src_path, backoff))
time.sleep(backoff)
backoff += 2 * backoff + random.random() * random.randint(0, int(backoff))
continue
elif 'is not a database' in out.lower() or 'is not a database' in err.lower():
# not a valid sqlite3 file
log.error("File {} is not a SQLite database".format(src_path))
return False
else:
# some other failure. Try again
log.error('Failed to back up with "{}". Error log follows.\n{}'.format(" ".join(sqlite3_cmd), err))
continue
else:
break
except Exception, e:
log.exception(e)
return False
if not os.WIFEXITED(rc):
# bad exit
# failed for some other reason
log.error("Backup failed: out='{}', err='{}', rc={}".format(out, err, rc))
return False
if os.WEXITSTATUS(rc) != 0:
# bad exit
log.error("Backup failed: out='{}', err='{}', exit={}".format(out, err, os.WEXITSTATUS(rc)))
return False
return True | [
"def",
"sqlite3_backup",
"(",
"src_path",
",",
"dest_path",
")",
":",
"# find sqlite3",
"sqlite3_path",
"=",
"sqlite3_find_tool",
"(",
")",
"if",
"sqlite3_path",
"is",
"None",
":",
"log",
".",
"error",
"(",
"\"Failed to find sqlite3 tool\"",
")",
"return",
"False",
"sqlite3_cmd",
"=",
"[",
"sqlite3_path",
",",
"'{}'",
".",
"format",
"(",
"src_path",
")",
",",
"'.backup \"{}\"'",
".",
"format",
"(",
"dest_path",
")",
"]",
"rc",
"=",
"None",
"backoff",
"=",
"1.0",
"out",
"=",
"None",
"err",
"=",
"None",
"try",
":",
"while",
"True",
":",
"log",
".",
"debug",
"(",
"\"{}\"",
".",
"format",
"(",
"\" \"",
".",
"join",
"(",
"sqlite3_cmd",
")",
")",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"sqlite3_cmd",
",",
"shell",
"=",
"False",
",",
"close_fds",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"out",
",",
"err",
"=",
"p",
".",
"communicate",
"(",
")",
"rc",
"=",
"p",
".",
"wait",
"(",
")",
"if",
"rc",
"!=",
"0",
":",
"if",
"\"database is locked\"",
"in",
"out",
".",
"lower",
"(",
")",
"or",
"\"database is locked\"",
"in",
"err",
".",
"lower",
"(",
")",
":",
"# try again",
"log",
".",
"error",
"(",
"\"Database {} is locked; trying again in {} seconds\"",
".",
"format",
"(",
"src_path",
",",
"backoff",
")",
")",
"time",
".",
"sleep",
"(",
"backoff",
")",
"backoff",
"+=",
"2",
"*",
"backoff",
"+",
"random",
".",
"random",
"(",
")",
"*",
"random",
".",
"randint",
"(",
"0",
",",
"int",
"(",
"backoff",
")",
")",
"continue",
"elif",
"'is not a database'",
"in",
"out",
".",
"lower",
"(",
")",
"or",
"'is not a database'",
"in",
"err",
".",
"lower",
"(",
")",
":",
"# not a valid sqlite3 file",
"log",
".",
"error",
"(",
"\"File {} is not a SQLite database\"",
".",
"format",
"(",
"src_path",
")",
")",
"return",
"False",
"else",
":",
"# some other failure. Try again",
"log",
".",
"error",
"(",
"'Failed to back up with \"{}\". Error log follows.\\n{}'",
".",
"format",
"(",
"\" \"",
".",
"join",
"(",
"sqlite3_cmd",
")",
",",
"err",
")",
")",
"continue",
"else",
":",
"break",
"except",
"Exception",
",",
"e",
":",
"log",
".",
"exception",
"(",
"e",
")",
"return",
"False",
"if",
"not",
"os",
".",
"WIFEXITED",
"(",
"rc",
")",
":",
"# bad exit ",
"# failed for some other reason",
"log",
".",
"error",
"(",
"\"Backup failed: out='{}', err='{}', rc={}\"",
".",
"format",
"(",
"out",
",",
"err",
",",
"rc",
")",
")",
"return",
"False",
"if",
"os",
".",
"WEXITSTATUS",
"(",
"rc",
")",
"!=",
"0",
":",
"# bad exit",
"log",
".",
"error",
"(",
"\"Backup failed: out='{}', err='{}', exit={}\"",
".",
"format",
"(",
"out",
",",
"err",
",",
"os",
".",
"WEXITSTATUS",
"(",
"rc",
")",
")",
")",
"return",
"False",
"return",
"True"
] | Back up a sqlite3 database, while ensuring
that no ongoing queries are being executed.
Return True on success
Return False on error. | [
"Back",
"up",
"a",
"sqlite3",
"database",
"while",
"ensuring",
"that",
"no",
"ongoing",
"queries",
"are",
"being",
"executed",
"."
] | python | train |
pandas-dev/pandas | pandas/core/dtypes/common.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1980-L2002 | def _validate_date_like_dtype(dtype):
"""
Check whether the dtype is a date-like dtype. Raises an error if invalid.
Parameters
----------
dtype : dtype, type
The dtype to check.
Raises
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
the frequency provided is too specific)
"""
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError('{error}'.format(error=e))
if typ != 'generic' and typ != 'ns':
msg = '{name!r} is too specific of a frequency, try passing {type!r}'
raise ValueError(msg.format(name=dtype.name, type=dtype.type.__name__)) | [
"def",
"_validate_date_like_dtype",
"(",
"dtype",
")",
":",
"try",
":",
"typ",
"=",
"np",
".",
"datetime_data",
"(",
"dtype",
")",
"[",
"0",
"]",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"'{error}'",
".",
"format",
"(",
"error",
"=",
"e",
")",
")",
"if",
"typ",
"!=",
"'generic'",
"and",
"typ",
"!=",
"'ns'",
":",
"msg",
"=",
"'{name!r} is too specific of a frequency, try passing {type!r}'",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"name",
"=",
"dtype",
".",
"name",
",",
"type",
"=",
"dtype",
".",
"type",
".",
"__name__",
")",
")"
] | Check whether the dtype is a date-like dtype. Raises an error if invalid.
Parameters
----------
dtype : dtype, type
The dtype to check.
Raises
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
the frequency provided is too specific) | [
"Check",
"whether",
"the",
"dtype",
"is",
"a",
"date",
"-",
"like",
"dtype",
".",
"Raises",
"an",
"error",
"if",
"invalid",
"."
] | python | train |
nmdp-bioinformatics/SeqAnn | seqann/blast_cmd.py | https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/blast_cmd.py#L40-L184 | def blastn(sequences, locus, nseqs, kir=False,
verbose=False, refdata=None, evalue=10):
"""
Gets the a list of alleles that are the most similar to the input sequence
:param sequences: The input sequence record.
:type sequences: SeqRecord
:param locus: The gene locus associated with the sequence.
:type locus: ``str``
:param nseqs: The incomplete annotation from a previous iteration.
:type nseqs: ``int``
:param evalue: The evalue to use (default = 10)
:type evalue: ``int``
:param kir: Run with KIR or not
:type kir: ``bool``
:param verbose: Run in versboe
:type verbose: ``bool``
:param refdata: An object with reference data
:type refdata: :ref:`ref`
:rtype: :ref:`bl`
Example usage:
>>> from Bio.Seq import Seq
>>> from seqann.blast_cmd import blastn
>>> sequence = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC')
>>> blast = blastn(sequence, locus, nseqs)
"""
logger = logging.getLogger("Logger." + __name__)
if not refdata:
refdata = ReferenceData()
file_id = str(randomid())
input_fasta = file_id + ".fasta"
output_xml = file_id + ".xml"
SeqIO.write(sequences, input_fasta, "fasta")
blastn_cline = NcbiblastnCommandline(query=input_fasta,
db=refdata.blastdb,
evalue=evalue,
outfmt=5,
reward=1,
penalty=-3,
gapopen=5,
gapextend=2,
dust='yes',
out=output_xml)
stdout, stderr = blastn_cline()
loc = locus
if not kir:
loc = locus.split("-")[1]
blast_qresult = SearchIO.read(output_xml, 'blast-xml')
# Delete files
cleanup(file_id)
# TODO: Use logging
if len(blast_qresult.hits) == 0:
if verbose:
logger.error("Failed blast! No hits!")
logger.error(stderr)
return Blast(failed=True)
alleles = []
full_sequences = []
load_blast = 70 if nseqs < 70 else nseqs
l = len(blast_qresult.hits) if load_blast > len(blast_qresult.hits) else load_blast
# TODO: update all blast files to have HLA-
if locus in refdata.hla_loci and not kir:
alleles = [blast_qresult[i].id.split("_")[0] for i in range(0, l)
if blast_qresult[i].id.split("*")[0] == locus or "HLA-" + blast_qresult[i].id.split("*")[0] == locus]
alleles = ["HLA-" + a if not has_hla(a) else a for a in alleles]
if kir:
alleles = [blast_qresult[i].id.split("_")[0] for i in range(0, l)
if blast_qresult[i].id.split("*")[0] == locus]
if verbose:
logger.info("Blast alleles: " + ",".join(alleles))
# TODO: sort alleles by number of features they contain and evalue
# Use biosql db if provided
# otherwise use IMGT dat file
final_seqs = []
rmax = refdata.structure_max[locus]
if refdata.server_avail:
db = refdata.server[refdata.dbversion + "_" + loc]
full_sequences = []
for n in alleles:
if n in refdata.hla_names:
try:
seq = db.lookup(name=n)
full_sequences.append(seq)
except:
logger.error("Allele doesnt exist in IMGT BioSQL DB!! "
+ n)
else:
if verbose:
logger.info("Getting sequences from HLA.dat file")
full_sequences = [refdata.hlaref[a] for a in alleles
if a in refdata.hlaref]
for s in full_sequences:
s.name = s.description.split(",")[0]
i = 1
last_seq = []
max_f = 0
added_max = False
full_feats = False
for s in full_sequences:
fs = len([f.type for f in s.features
if not f.type in ['source', 'CDS']])
if i <= nseqs:
final_seqs.append(s)
max_f = fs if fs > max_f else max_f
if i <= nseqs and max_f < rmax:
full_feats = True
if(i >= nseqs and fs == max_f and not added_max):
if len(last_seq) >= 10:
last_seq.insert(3, s)
else:
last_seq.append(s)
added_max = True
if(fs > max_f and len(last_seq) < 10
and i >= nseqs and len(last_seq) < 10):
last_seq.append(s)
i += 1
if full_feats:
for s in last_seq:
final_seqs.append(s)
# Build Blast object
blast_o = Blast(match_seqs=final_seqs, alleles=alleles)
return blast_o | [
"def",
"blastn",
"(",
"sequences",
",",
"locus",
",",
"nseqs",
",",
"kir",
"=",
"False",
",",
"verbose",
"=",
"False",
",",
"refdata",
"=",
"None",
",",
"evalue",
"=",
"10",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"\"Logger.\"",
"+",
"__name__",
")",
"if",
"not",
"refdata",
":",
"refdata",
"=",
"ReferenceData",
"(",
")",
"file_id",
"=",
"str",
"(",
"randomid",
"(",
")",
")",
"input_fasta",
"=",
"file_id",
"+",
"\".fasta\"",
"output_xml",
"=",
"file_id",
"+",
"\".xml\"",
"SeqIO",
".",
"write",
"(",
"sequences",
",",
"input_fasta",
",",
"\"fasta\"",
")",
"blastn_cline",
"=",
"NcbiblastnCommandline",
"(",
"query",
"=",
"input_fasta",
",",
"db",
"=",
"refdata",
".",
"blastdb",
",",
"evalue",
"=",
"evalue",
",",
"outfmt",
"=",
"5",
",",
"reward",
"=",
"1",
",",
"penalty",
"=",
"-",
"3",
",",
"gapopen",
"=",
"5",
",",
"gapextend",
"=",
"2",
",",
"dust",
"=",
"'yes'",
",",
"out",
"=",
"output_xml",
")",
"stdout",
",",
"stderr",
"=",
"blastn_cline",
"(",
")",
"loc",
"=",
"locus",
"if",
"not",
"kir",
":",
"loc",
"=",
"locus",
".",
"split",
"(",
"\"-\"",
")",
"[",
"1",
"]",
"blast_qresult",
"=",
"SearchIO",
".",
"read",
"(",
"output_xml",
",",
"'blast-xml'",
")",
"# Delete files",
"cleanup",
"(",
"file_id",
")",
"# TODO: Use logging",
"if",
"len",
"(",
"blast_qresult",
".",
"hits",
")",
"==",
"0",
":",
"if",
"verbose",
":",
"logger",
".",
"error",
"(",
"\"Failed blast! No hits!\"",
")",
"logger",
".",
"error",
"(",
"stderr",
")",
"return",
"Blast",
"(",
"failed",
"=",
"True",
")",
"alleles",
"=",
"[",
"]",
"full_sequences",
"=",
"[",
"]",
"load_blast",
"=",
"70",
"if",
"nseqs",
"<",
"70",
"else",
"nseqs",
"l",
"=",
"len",
"(",
"blast_qresult",
".",
"hits",
")",
"if",
"load_blast",
">",
"len",
"(",
"blast_qresult",
".",
"hits",
")",
"else",
"load_blast",
"# TODO: update all blast files to have HLA-",
"if",
"locus",
"in",
"refdata",
".",
"hla_loci",
"and",
"not",
"kir",
":",
"alleles",
"=",
"[",
"blast_qresult",
"[",
"i",
"]",
".",
"id",
".",
"split",
"(",
"\"_\"",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"l",
")",
"if",
"blast_qresult",
"[",
"i",
"]",
".",
"id",
".",
"split",
"(",
"\"*\"",
")",
"[",
"0",
"]",
"==",
"locus",
"or",
"\"HLA-\"",
"+",
"blast_qresult",
"[",
"i",
"]",
".",
"id",
".",
"split",
"(",
"\"*\"",
")",
"[",
"0",
"]",
"==",
"locus",
"]",
"alleles",
"=",
"[",
"\"HLA-\"",
"+",
"a",
"if",
"not",
"has_hla",
"(",
"a",
")",
"else",
"a",
"for",
"a",
"in",
"alleles",
"]",
"if",
"kir",
":",
"alleles",
"=",
"[",
"blast_qresult",
"[",
"i",
"]",
".",
"id",
".",
"split",
"(",
"\"_\"",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"l",
")",
"if",
"blast_qresult",
"[",
"i",
"]",
".",
"id",
".",
"split",
"(",
"\"*\"",
")",
"[",
"0",
"]",
"==",
"locus",
"]",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"\"Blast alleles: \"",
"+",
"\",\"",
".",
"join",
"(",
"alleles",
")",
")",
"# TODO: sort alleles by number of features they contain and evalue",
"# Use biosql db if provided",
"# otherwise use IMGT dat file",
"final_seqs",
"=",
"[",
"]",
"rmax",
"=",
"refdata",
".",
"structure_max",
"[",
"locus",
"]",
"if",
"refdata",
".",
"server_avail",
":",
"db",
"=",
"refdata",
".",
"server",
"[",
"refdata",
".",
"dbversion",
"+",
"\"_\"",
"+",
"loc",
"]",
"full_sequences",
"=",
"[",
"]",
"for",
"n",
"in",
"alleles",
":",
"if",
"n",
"in",
"refdata",
".",
"hla_names",
":",
"try",
":",
"seq",
"=",
"db",
".",
"lookup",
"(",
"name",
"=",
"n",
")",
"full_sequences",
".",
"append",
"(",
"seq",
")",
"except",
":",
"logger",
".",
"error",
"(",
"\"Allele doesnt exist in IMGT BioSQL DB!! \"",
"+",
"n",
")",
"else",
":",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"\"Getting sequences from HLA.dat file\"",
")",
"full_sequences",
"=",
"[",
"refdata",
".",
"hlaref",
"[",
"a",
"]",
"for",
"a",
"in",
"alleles",
"if",
"a",
"in",
"refdata",
".",
"hlaref",
"]",
"for",
"s",
"in",
"full_sequences",
":",
"s",
".",
"name",
"=",
"s",
".",
"description",
".",
"split",
"(",
"\",\"",
")",
"[",
"0",
"]",
"i",
"=",
"1",
"last_seq",
"=",
"[",
"]",
"max_f",
"=",
"0",
"added_max",
"=",
"False",
"full_feats",
"=",
"False",
"for",
"s",
"in",
"full_sequences",
":",
"fs",
"=",
"len",
"(",
"[",
"f",
".",
"type",
"for",
"f",
"in",
"s",
".",
"features",
"if",
"not",
"f",
".",
"type",
"in",
"[",
"'source'",
",",
"'CDS'",
"]",
"]",
")",
"if",
"i",
"<=",
"nseqs",
":",
"final_seqs",
".",
"append",
"(",
"s",
")",
"max_f",
"=",
"fs",
"if",
"fs",
">",
"max_f",
"else",
"max_f",
"if",
"i",
"<=",
"nseqs",
"and",
"max_f",
"<",
"rmax",
":",
"full_feats",
"=",
"True",
"if",
"(",
"i",
">=",
"nseqs",
"and",
"fs",
"==",
"max_f",
"and",
"not",
"added_max",
")",
":",
"if",
"len",
"(",
"last_seq",
")",
">=",
"10",
":",
"last_seq",
".",
"insert",
"(",
"3",
",",
"s",
")",
"else",
":",
"last_seq",
".",
"append",
"(",
"s",
")",
"added_max",
"=",
"True",
"if",
"(",
"fs",
">",
"max_f",
"and",
"len",
"(",
"last_seq",
")",
"<",
"10",
"and",
"i",
">=",
"nseqs",
"and",
"len",
"(",
"last_seq",
")",
"<",
"10",
")",
":",
"last_seq",
".",
"append",
"(",
"s",
")",
"i",
"+=",
"1",
"if",
"full_feats",
":",
"for",
"s",
"in",
"last_seq",
":",
"final_seqs",
".",
"append",
"(",
"s",
")",
"# Build Blast object",
"blast_o",
"=",
"Blast",
"(",
"match_seqs",
"=",
"final_seqs",
",",
"alleles",
"=",
"alleles",
")",
"return",
"blast_o"
] | Gets the a list of alleles that are the most similar to the input sequence
:param sequences: The input sequence record.
:type sequences: SeqRecord
:param locus: The gene locus associated with the sequence.
:type locus: ``str``
:param nseqs: The incomplete annotation from a previous iteration.
:type nseqs: ``int``
:param evalue: The evalue to use (default = 10)
:type evalue: ``int``
:param kir: Run with KIR or not
:type kir: ``bool``
:param verbose: Run in versboe
:type verbose: ``bool``
:param refdata: An object with reference data
:type refdata: :ref:`ref`
:rtype: :ref:`bl`
Example usage:
>>> from Bio.Seq import Seq
>>> from seqann.blast_cmd import blastn
>>> sequence = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC')
>>> blast = blastn(sequence, locus, nseqs) | [
"Gets",
"the",
"a",
"list",
"of",
"alleles",
"that",
"are",
"the",
"most",
"similar",
"to",
"the",
"input",
"sequence"
] | python | train |
FutunnOpen/futuquant | futuquant/common/open_context_base.py | https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/common/open_context_base.py#L242-L286 | def get_global_state(self):
"""
获取全局状态
:return: (ret, data)
ret == RET_OK data为包含全局状态的字典,含义如下
ret != RET_OK data为错误描述字符串
===================== =========== ==============================================================
key value类型 说明
===================== =========== ==============================================================
market_sz str 深圳市场状态,参见MarketState
market_us str 美国市场状态,参见MarketState
market_sh str 上海市场状态,参见MarketState
market_hk str 香港市场状态,参见MarketState
market_future str 香港期货市场状态,参见MarketState
server_ver str FutuOpenD版本号
trd_logined str '1':已登录交易服务器,'0': 未登录交易服务器
qot_logined str '1':已登录行情服务器,'0': 未登录行情服务器
timestamp str Futu后台服务器当前时间戳(秒)
local_timestamp double FutuOpenD运行机器当前时间戳(
===================== =========== ==============================================================
:example:
.. code:: python
from futuquant import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.get_global_state())
quote_ctx.close()
"""
query_processor = self._get_sync_query_processor(
GlobalStateQuery.pack_req, GlobalStateQuery.unpack_rsp)
kargs = {
'user_id': self.get_login_user_id(),
'conn_id': self.get_sync_conn_id(),
}
ret_code, msg, state_dict = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, msg
return RET_OK, state_dict | [
"def",
"get_global_state",
"(",
"self",
")",
":",
"query_processor",
"=",
"self",
".",
"_get_sync_query_processor",
"(",
"GlobalStateQuery",
".",
"pack_req",
",",
"GlobalStateQuery",
".",
"unpack_rsp",
")",
"kargs",
"=",
"{",
"'user_id'",
":",
"self",
".",
"get_login_user_id",
"(",
")",
",",
"'conn_id'",
":",
"self",
".",
"get_sync_conn_id",
"(",
")",
",",
"}",
"ret_code",
",",
"msg",
",",
"state_dict",
"=",
"query_processor",
"(",
"*",
"*",
"kargs",
")",
"if",
"ret_code",
"!=",
"RET_OK",
":",
"return",
"ret_code",
",",
"msg",
"return",
"RET_OK",
",",
"state_dict"
] | 获取全局状态
:return: (ret, data)
ret == RET_OK data为包含全局状态的字典,含义如下
ret != RET_OK data为错误描述字符串
===================== =========== ==============================================================
key value类型 说明
===================== =========== ==============================================================
market_sz str 深圳市场状态,参见MarketState
market_us str 美国市场状态,参见MarketState
market_sh str 上海市场状态,参见MarketState
market_hk str 香港市场状态,参见MarketState
market_future str 香港期货市场状态,参见MarketState
server_ver str FutuOpenD版本号
trd_logined str '1':已登录交易服务器,'0': 未登录交易服务器
qot_logined str '1':已登录行情服务器,'0': 未登录行情服务器
timestamp str Futu后台服务器当前时间戳(秒)
local_timestamp double FutuOpenD运行机器当前时间戳(
===================== =========== ==============================================================
:example:
.. code:: python
from futuquant import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.get_global_state())
quote_ctx.close() | [
"获取全局状态"
] | python | train |
hawkular/hawkular-client-python | hawkular/metrics.py | https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L176-L212 | def query_metric_stats(self, metric_type, metric_id=None, start=None, end=None, bucketDuration=None, **query_options):
"""
Query for metric aggregates from the server. This is called buckets in the Hawkular-Metrics documentation.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id or None for tags matching only
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param bucketDuration: The timedelta or duration of buckets. Can be a string presentation or timedelta object
:param query_options: For possible query_options, see the Hawkular-Metrics documentation.
"""
if start is not None:
if type(start) is datetime:
query_options['start'] = datetime_to_time_millis(start)
else:
query_options['start'] = start
if end is not None:
if type(end) is datetime:
query_options['end'] = datetime_to_time_millis(end)
else:
query_options['end'] = end
if bucketDuration is not None:
if type(bucketDuration) is timedelta:
query_options['bucketDuration'] = timedelta_to_duration(bucketDuration)
else:
query_options['bucketDuration'] = bucketDuration
if metric_id is not None:
url = self._get_metrics_stats_url(self._get_metrics_single_url(metric_type, metric_id))
else:
if len(query_options) < 0:
raise HawkularError('Tags are required when querying without metric_id')
url = self._get_metrics_stats_url(self._get_url(metric_type))
return self._get(url, **query_options) | [
"def",
"query_metric_stats",
"(",
"self",
",",
"metric_type",
",",
"metric_id",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"bucketDuration",
"=",
"None",
",",
"*",
"*",
"query_options",
")",
":",
"if",
"start",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"start",
")",
"is",
"datetime",
":",
"query_options",
"[",
"'start'",
"]",
"=",
"datetime_to_time_millis",
"(",
"start",
")",
"else",
":",
"query_options",
"[",
"'start'",
"]",
"=",
"start",
"if",
"end",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"end",
")",
"is",
"datetime",
":",
"query_options",
"[",
"'end'",
"]",
"=",
"datetime_to_time_millis",
"(",
"end",
")",
"else",
":",
"query_options",
"[",
"'end'",
"]",
"=",
"end",
"if",
"bucketDuration",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"bucketDuration",
")",
"is",
"timedelta",
":",
"query_options",
"[",
"'bucketDuration'",
"]",
"=",
"timedelta_to_duration",
"(",
"bucketDuration",
")",
"else",
":",
"query_options",
"[",
"'bucketDuration'",
"]",
"=",
"bucketDuration",
"if",
"metric_id",
"is",
"not",
"None",
":",
"url",
"=",
"self",
".",
"_get_metrics_stats_url",
"(",
"self",
".",
"_get_metrics_single_url",
"(",
"metric_type",
",",
"metric_id",
")",
")",
"else",
":",
"if",
"len",
"(",
"query_options",
")",
"<",
"0",
":",
"raise",
"HawkularError",
"(",
"'Tags are required when querying without metric_id'",
")",
"url",
"=",
"self",
".",
"_get_metrics_stats_url",
"(",
"self",
".",
"_get_url",
"(",
"metric_type",
")",
")",
"return",
"self",
".",
"_get",
"(",
"url",
",",
"*",
"*",
"query_options",
")"
] | Query for metric aggregates from the server. This is called buckets in the Hawkular-Metrics documentation.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id or None for tags matching only
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param bucketDuration: The timedelta or duration of buckets. Can be a string presentation or timedelta object
:param query_options: For possible query_options, see the Hawkular-Metrics documentation. | [
"Query",
"for",
"metric",
"aggregates",
"from",
"the",
"server",
".",
"This",
"is",
"called",
"buckets",
"in",
"the",
"Hawkular",
"-",
"Metrics",
"documentation",
"."
] | python | train |
ingolemo/python-lenses | examples/naughts_and_crosses.py | https://github.com/ingolemo/python-lenses/blob/a3a6ed0a31f6674451e542e7380a8aa16e6f8edf/examples/naughts_and_crosses.py#L55-L64 | def winner(self):
'The winner of this board if one exists.'
for potential_win in self._potential_wins():
if potential_win == tuple('XXX'):
return Outcome.win_for_crosses
elif potential_win == tuple('OOO'):
return Outcome.win_for_naughts
if self._count(' ') == 0:
return Outcome.draw
return Outcome.ongoing | [
"def",
"winner",
"(",
"self",
")",
":",
"for",
"potential_win",
"in",
"self",
".",
"_potential_wins",
"(",
")",
":",
"if",
"potential_win",
"==",
"tuple",
"(",
"'XXX'",
")",
":",
"return",
"Outcome",
".",
"win_for_crosses",
"elif",
"potential_win",
"==",
"tuple",
"(",
"'OOO'",
")",
":",
"return",
"Outcome",
".",
"win_for_naughts",
"if",
"self",
".",
"_count",
"(",
"' '",
")",
"==",
"0",
":",
"return",
"Outcome",
".",
"draw",
"return",
"Outcome",
".",
"ongoing"
] | The winner of this board if one exists. | [
"The",
"winner",
"of",
"this",
"board",
"if",
"one",
"exists",
"."
] | python | test |
spyder-ide/spyder | spyder/widgets/arraybuilder.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/arraybuilder.py#L271-L285 | def keyPressEvent(self, event):
"""
Qt override.
"""
QToolTip.hideText()
ctrl = event.modifiers() & Qt.ControlModifier
if event.key() in [Qt.Key_Enter, Qt.Key_Return]:
if ctrl:
self.process_text(array=False)
else:
self.process_text(array=True)
self.accept()
else:
QDialog.keyPressEvent(self, event) | [
"def",
"keyPressEvent",
"(",
"self",
",",
"event",
")",
":",
"QToolTip",
".",
"hideText",
"(",
")",
"ctrl",
"=",
"event",
".",
"modifiers",
"(",
")",
"&",
"Qt",
".",
"ControlModifier",
"if",
"event",
".",
"key",
"(",
")",
"in",
"[",
"Qt",
".",
"Key_Enter",
",",
"Qt",
".",
"Key_Return",
"]",
":",
"if",
"ctrl",
":",
"self",
".",
"process_text",
"(",
"array",
"=",
"False",
")",
"else",
":",
"self",
".",
"process_text",
"(",
"array",
"=",
"True",
")",
"self",
".",
"accept",
"(",
")",
"else",
":",
"QDialog",
".",
"keyPressEvent",
"(",
"self",
",",
"event",
")"
] | Qt override. | [
"Qt",
"override",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/click/termui.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/termui.py#L580-L606 | def pause(info='Press any key to continue ...', err=False):
"""This command stops execution and waits for the user to press any
key to continue. This is similar to the Windows batch "pause"
command. If the program is not run through a terminal, this command
will instead do nothing.
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `err` parameter.
:param info: the info string to print before pausing.
:param err: if set to message goes to ``stderr`` instead of
``stdout``, the same as with echo.
"""
if not isatty(sys.stdin) or not isatty(sys.stdout):
return
try:
if info:
echo(info, nl=False, err=err)
try:
getchar()
except (KeyboardInterrupt, EOFError):
pass
finally:
if info:
echo(err=err) | [
"def",
"pause",
"(",
"info",
"=",
"'Press any key to continue ...'",
",",
"err",
"=",
"False",
")",
":",
"if",
"not",
"isatty",
"(",
"sys",
".",
"stdin",
")",
"or",
"not",
"isatty",
"(",
"sys",
".",
"stdout",
")",
":",
"return",
"try",
":",
"if",
"info",
":",
"echo",
"(",
"info",
",",
"nl",
"=",
"False",
",",
"err",
"=",
"err",
")",
"try",
":",
"getchar",
"(",
")",
"except",
"(",
"KeyboardInterrupt",
",",
"EOFError",
")",
":",
"pass",
"finally",
":",
"if",
"info",
":",
"echo",
"(",
"err",
"=",
"err",
")"
] | This command stops execution and waits for the user to press any
key to continue. This is similar to the Windows batch "pause"
command. If the program is not run through a terminal, this command
will instead do nothing.
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `err` parameter.
:param info: the info string to print before pausing.
:param err: if set to message goes to ``stderr`` instead of
``stdout``, the same as with echo. | [
"This",
"command",
"stops",
"execution",
"and",
"waits",
"for",
"the",
"user",
"to",
"press",
"any",
"key",
"to",
"continue",
".",
"This",
"is",
"similar",
"to",
"the",
"Windows",
"batch",
"pause",
"command",
".",
"If",
"the",
"program",
"is",
"not",
"run",
"through",
"a",
"terminal",
"this",
"command",
"will",
"instead",
"do",
"nothing",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xscintillaedit/xscintillaedit.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xscintillaedit/xscintillaedit.py#L311-L361 | def initOptions(self, options):
"""
Initializes the edit with the inputed options data set.
:param options | <XScintillaEditOptions>
"""
self.setAutoIndent( options.value('autoIndent'))
self.setIndentationsUseTabs( options.value('indentationsUseTabs'))
self.setTabIndents( options.value('tabIndents'))
self.setTabWidth( options.value('tabWidth'))
self.setCaretLineVisible( options.value('showCaretLine'))
self.setShowWhitespaces( options.value('showWhitespaces'))
self.setMarginLineNumbers( 0, options.value('showLineNumbers'))
self.setIndentationGuides( options.value('showIndentations'))
self.setEolVisibility( options.value('showEndlines'))
if options.value('showLimitColumn'):
self.setEdgeMode(self.EdgeLine)
self.setEdgeColumn(options.value('limitColumn'))
else:
self.setEdgeMode(self.EdgeNone)
if options.value('showLineWrap'):
self.setWrapMode(self.WrapWord)
else:
self.setWrapMode(self.WrapNone)
# set the autocompletion source
if options.value('autoComplete'):
self.setAutoCompletionSource(QsciScintilla.AcsAll)
else:
self.setAutoCompletionSource(QsciScintilla.AcsNone)
self.setAutoCompletionThreshold(options.value('autoCompleteThreshold'))
# update the font information
font = options.value('documentFont')
font.setPointSize(options.value('documentFontSize'))
self.setFont(font)
# udpate the lexer
lexer = self.lexer()
if lexer:
lexer.setFont(font)
# create the margin font option
mfont = options.value('documentMarginFont')
mfont.setPointSize(font.pointSize() - 2)
self.setMarginsFont(mfont)
self.setMarginWidth(0, QFontMetrics(mfont).width('0000000') + 5) | [
"def",
"initOptions",
"(",
"self",
",",
"options",
")",
":",
"self",
".",
"setAutoIndent",
"(",
"options",
".",
"value",
"(",
"'autoIndent'",
")",
")",
"self",
".",
"setIndentationsUseTabs",
"(",
"options",
".",
"value",
"(",
"'indentationsUseTabs'",
")",
")",
"self",
".",
"setTabIndents",
"(",
"options",
".",
"value",
"(",
"'tabIndents'",
")",
")",
"self",
".",
"setTabWidth",
"(",
"options",
".",
"value",
"(",
"'tabWidth'",
")",
")",
"self",
".",
"setCaretLineVisible",
"(",
"options",
".",
"value",
"(",
"'showCaretLine'",
")",
")",
"self",
".",
"setShowWhitespaces",
"(",
"options",
".",
"value",
"(",
"'showWhitespaces'",
")",
")",
"self",
".",
"setMarginLineNumbers",
"(",
"0",
",",
"options",
".",
"value",
"(",
"'showLineNumbers'",
")",
")",
"self",
".",
"setIndentationGuides",
"(",
"options",
".",
"value",
"(",
"'showIndentations'",
")",
")",
"self",
".",
"setEolVisibility",
"(",
"options",
".",
"value",
"(",
"'showEndlines'",
")",
")",
"if",
"options",
".",
"value",
"(",
"'showLimitColumn'",
")",
":",
"self",
".",
"setEdgeMode",
"(",
"self",
".",
"EdgeLine",
")",
"self",
".",
"setEdgeColumn",
"(",
"options",
".",
"value",
"(",
"'limitColumn'",
")",
")",
"else",
":",
"self",
".",
"setEdgeMode",
"(",
"self",
".",
"EdgeNone",
")",
"if",
"options",
".",
"value",
"(",
"'showLineWrap'",
")",
":",
"self",
".",
"setWrapMode",
"(",
"self",
".",
"WrapWord",
")",
"else",
":",
"self",
".",
"setWrapMode",
"(",
"self",
".",
"WrapNone",
")",
"# set the autocompletion source",
"if",
"options",
".",
"value",
"(",
"'autoComplete'",
")",
":",
"self",
".",
"setAutoCompletionSource",
"(",
"QsciScintilla",
".",
"AcsAll",
")",
"else",
":",
"self",
".",
"setAutoCompletionSource",
"(",
"QsciScintilla",
".",
"AcsNone",
")",
"self",
".",
"setAutoCompletionThreshold",
"(",
"options",
".",
"value",
"(",
"'autoCompleteThreshold'",
")",
")",
"# update the font information\r",
"font",
"=",
"options",
".",
"value",
"(",
"'documentFont'",
")",
"font",
".",
"setPointSize",
"(",
"options",
".",
"value",
"(",
"'documentFontSize'",
")",
")",
"self",
".",
"setFont",
"(",
"font",
")",
"# udpate the lexer\r",
"lexer",
"=",
"self",
".",
"lexer",
"(",
")",
"if",
"lexer",
":",
"lexer",
".",
"setFont",
"(",
"font",
")",
"# create the margin font option",
"mfont",
"=",
"options",
".",
"value",
"(",
"'documentMarginFont'",
")",
"mfont",
".",
"setPointSize",
"(",
"font",
".",
"pointSize",
"(",
")",
"-",
"2",
")",
"self",
".",
"setMarginsFont",
"(",
"mfont",
")",
"self",
".",
"setMarginWidth",
"(",
"0",
",",
"QFontMetrics",
"(",
"mfont",
")",
".",
"width",
"(",
"'0000000'",
")",
"+",
"5",
")"
] | Initializes the edit with the inputed options data set.
:param options | <XScintillaEditOptions> | [
"Initializes",
"the",
"edit",
"with",
"the",
"inputed",
"options",
"data",
"set",
".",
":",
"param",
"options",
"|",
"<XScintillaEditOptions",
">"
] | python | train |
vingd/encrypted-pickle-python | encryptedpickle/encryptedpickle.py | https://github.com/vingd/encrypted-pickle-python/blob/7656233598e02e65971f69e11849a0f288b2b2a5/encryptedpickle/encryptedpickle.py#L362-L383 | def _decode(self, data, algorithm, key=None):
'''Decode data with specific algorithm'''
if algorithm['type'] == 'hmac':
verify_signature = data[-algorithm['hash_size']:]
data = data[:-algorithm['hash_size']]
signature = self._hmac_generate(data, algorithm, key)
if not const_equal(verify_signature, signature):
raise Exception('Invalid signature')
return data
elif algorithm['type'] == 'aes':
return self._aes_decrypt(data, algorithm, key)
elif algorithm['type'] == 'no-serialization':
return data
elif algorithm['type'] == 'json':
return json.loads(data)
elif algorithm['type'] == 'no-compression':
return data
elif algorithm['type'] == 'gzip':
return self._zlib_decompress(data, algorithm)
else:
raise Exception('Algorithm not supported: %s' % algorithm['type']) | [
"def",
"_decode",
"(",
"self",
",",
"data",
",",
"algorithm",
",",
"key",
"=",
"None",
")",
":",
"if",
"algorithm",
"[",
"'type'",
"]",
"==",
"'hmac'",
":",
"verify_signature",
"=",
"data",
"[",
"-",
"algorithm",
"[",
"'hash_size'",
"]",
":",
"]",
"data",
"=",
"data",
"[",
":",
"-",
"algorithm",
"[",
"'hash_size'",
"]",
"]",
"signature",
"=",
"self",
".",
"_hmac_generate",
"(",
"data",
",",
"algorithm",
",",
"key",
")",
"if",
"not",
"const_equal",
"(",
"verify_signature",
",",
"signature",
")",
":",
"raise",
"Exception",
"(",
"'Invalid signature'",
")",
"return",
"data",
"elif",
"algorithm",
"[",
"'type'",
"]",
"==",
"'aes'",
":",
"return",
"self",
".",
"_aes_decrypt",
"(",
"data",
",",
"algorithm",
",",
"key",
")",
"elif",
"algorithm",
"[",
"'type'",
"]",
"==",
"'no-serialization'",
":",
"return",
"data",
"elif",
"algorithm",
"[",
"'type'",
"]",
"==",
"'json'",
":",
"return",
"json",
".",
"loads",
"(",
"data",
")",
"elif",
"algorithm",
"[",
"'type'",
"]",
"==",
"'no-compression'",
":",
"return",
"data",
"elif",
"algorithm",
"[",
"'type'",
"]",
"==",
"'gzip'",
":",
"return",
"self",
".",
"_zlib_decompress",
"(",
"data",
",",
"algorithm",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Algorithm not supported: %s'",
"%",
"algorithm",
"[",
"'type'",
"]",
")"
] | Decode data with specific algorithm | [
"Decode",
"data",
"with",
"specific",
"algorithm"
] | python | valid |
puiterwijk/flask-oidc | flask_oidc/__init__.py | https://github.com/puiterwijk/flask-oidc/blob/7f16e27b926fc12953d6b2ae78a9b9cc9b8d1769/flask_oidc/__init__.py#L692-L744 | def _process_callback(self, statefield):
"""
Exchange the auth code for actual credentials,
then redirect to the originally requested page.
"""
# retrieve session and callback variables
try:
session_csrf_token = session.get('oidc_csrf_token')
state = _json_loads(urlsafe_b64decode(request.args['state'].encode('utf-8')))
csrf_token = state['csrf_token']
code = request.args['code']
except (KeyError, ValueError):
logger.debug("Can't retrieve CSRF token, state, or code",
exc_info=True)
return True, self._oidc_error()
# check callback CSRF token passed to IdP
# against session CSRF token held by user
if csrf_token != session_csrf_token:
logger.debug("CSRF token mismatch")
return True, self._oidc_error()
# make a request to IdP to exchange the auth code for OAuth credentials
flow = self._flow_for_request()
credentials = flow.step2_exchange(code)
id_token = credentials.id_token
if not self._is_id_token_valid(id_token):
logger.debug("Invalid ID token")
if id_token.get('hd') != current_app.config[
'OIDC_GOOGLE_APPS_DOMAIN']:
return True, self._oidc_error(
"You must log in with an account from the {0} domain."
.format(current_app.config['OIDC_GOOGLE_APPS_DOMAIN']),
self.WRONG_GOOGLE_APPS_DOMAIN)
return True, self._oidc_error()
# store credentials by subject
# when Google is the IdP, the subject is their G+ account number
self.credentials_store[id_token['sub']] = credentials.to_json()
# Retrieve the extra statefield data
try:
response = self.extra_data_serializer.loads(state[statefield])
except BadSignature:
logger.error('State field was invalid')
return True, self._oidc_error()
# set a persistent signed cookie containing the ID token
# and redirect to the final destination
self._set_cookie_id_token(id_token)
return False, response | [
"def",
"_process_callback",
"(",
"self",
",",
"statefield",
")",
":",
"# retrieve session and callback variables",
"try",
":",
"session_csrf_token",
"=",
"session",
".",
"get",
"(",
"'oidc_csrf_token'",
")",
"state",
"=",
"_json_loads",
"(",
"urlsafe_b64decode",
"(",
"request",
".",
"args",
"[",
"'state'",
"]",
".",
"encode",
"(",
"'utf-8'",
")",
")",
")",
"csrf_token",
"=",
"state",
"[",
"'csrf_token'",
"]",
"code",
"=",
"request",
".",
"args",
"[",
"'code'",
"]",
"except",
"(",
"KeyError",
",",
"ValueError",
")",
":",
"logger",
".",
"debug",
"(",
"\"Can't retrieve CSRF token, state, or code\"",
",",
"exc_info",
"=",
"True",
")",
"return",
"True",
",",
"self",
".",
"_oidc_error",
"(",
")",
"# check callback CSRF token passed to IdP",
"# against session CSRF token held by user",
"if",
"csrf_token",
"!=",
"session_csrf_token",
":",
"logger",
".",
"debug",
"(",
"\"CSRF token mismatch\"",
")",
"return",
"True",
",",
"self",
".",
"_oidc_error",
"(",
")",
"# make a request to IdP to exchange the auth code for OAuth credentials",
"flow",
"=",
"self",
".",
"_flow_for_request",
"(",
")",
"credentials",
"=",
"flow",
".",
"step2_exchange",
"(",
"code",
")",
"id_token",
"=",
"credentials",
".",
"id_token",
"if",
"not",
"self",
".",
"_is_id_token_valid",
"(",
"id_token",
")",
":",
"logger",
".",
"debug",
"(",
"\"Invalid ID token\"",
")",
"if",
"id_token",
".",
"get",
"(",
"'hd'",
")",
"!=",
"current_app",
".",
"config",
"[",
"'OIDC_GOOGLE_APPS_DOMAIN'",
"]",
":",
"return",
"True",
",",
"self",
".",
"_oidc_error",
"(",
"\"You must log in with an account from the {0} domain.\"",
".",
"format",
"(",
"current_app",
".",
"config",
"[",
"'OIDC_GOOGLE_APPS_DOMAIN'",
"]",
")",
",",
"self",
".",
"WRONG_GOOGLE_APPS_DOMAIN",
")",
"return",
"True",
",",
"self",
".",
"_oidc_error",
"(",
")",
"# store credentials by subject",
"# when Google is the IdP, the subject is their G+ account number",
"self",
".",
"credentials_store",
"[",
"id_token",
"[",
"'sub'",
"]",
"]",
"=",
"credentials",
".",
"to_json",
"(",
")",
"# Retrieve the extra statefield data",
"try",
":",
"response",
"=",
"self",
".",
"extra_data_serializer",
".",
"loads",
"(",
"state",
"[",
"statefield",
"]",
")",
"except",
"BadSignature",
":",
"logger",
".",
"error",
"(",
"'State field was invalid'",
")",
"return",
"True",
",",
"self",
".",
"_oidc_error",
"(",
")",
"# set a persistent signed cookie containing the ID token",
"# and redirect to the final destination",
"self",
".",
"_set_cookie_id_token",
"(",
"id_token",
")",
"return",
"False",
",",
"response"
] | Exchange the auth code for actual credentials,
then redirect to the originally requested page. | [
"Exchange",
"the",
"auth",
"code",
"for",
"actual",
"credentials",
"then",
"redirect",
"to",
"the",
"originally",
"requested",
"page",
"."
] | python | train |
SheffieldML/GPy | GPy/kern/src/independent_outputs.py | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/independent_outputs.py#L9-L35 | def index_to_slices(index):
"""
take a numpy array of integers (index) and return a nested list of slices such that the slices describe the start, stop points for each integer in the index.
e.g.
>>> index = np.asarray([0,0,0,1,1,1,2,2,2])
returns
>>> [[slice(0,3,None)],[slice(3,6,None)],[slice(6,9,None)]]
or, a more complicated example
>>> index = np.asarray([0,0,1,1,0,2,2,2,1,1])
returns
>>> [[slice(0,2,None),slice(4,5,None)],[slice(2,4,None),slice(8,10,None)],[slice(5,8,None)]]
"""
if len(index)==0:
return[]
#contruct the return structure
ind = np.asarray(index,dtype=np.int)
ret = [[] for i in range(ind.max()+1)]
#find the switchpoints
ind_ = np.hstack((ind,ind[0]+ind[-1]+1))
switchpoints = np.nonzero(ind_ - np.roll(ind_,+1))[0]
[ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))]
return ret | [
"def",
"index_to_slices",
"(",
"index",
")",
":",
"if",
"len",
"(",
"index",
")",
"==",
"0",
":",
"return",
"[",
"]",
"#contruct the return structure",
"ind",
"=",
"np",
".",
"asarray",
"(",
"index",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"ret",
"=",
"[",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"ind",
".",
"max",
"(",
")",
"+",
"1",
")",
"]",
"#find the switchpoints",
"ind_",
"=",
"np",
".",
"hstack",
"(",
"(",
"ind",
",",
"ind",
"[",
"0",
"]",
"+",
"ind",
"[",
"-",
"1",
"]",
"+",
"1",
")",
")",
"switchpoints",
"=",
"np",
".",
"nonzero",
"(",
"ind_",
"-",
"np",
".",
"roll",
"(",
"ind_",
",",
"+",
"1",
")",
")",
"[",
"0",
"]",
"[",
"ret",
"[",
"ind_i",
"]",
".",
"append",
"(",
"slice",
"(",
"*",
"indexes_i",
")",
")",
"for",
"ind_i",
",",
"indexes_i",
"in",
"zip",
"(",
"ind",
"[",
"switchpoints",
"[",
":",
"-",
"1",
"]",
"]",
",",
"zip",
"(",
"switchpoints",
",",
"switchpoints",
"[",
"1",
":",
"]",
")",
")",
"]",
"return",
"ret"
] | take a numpy array of integers (index) and return a nested list of slices such that the slices describe the start, stop points for each integer in the index.
e.g.
>>> index = np.asarray([0,0,0,1,1,1,2,2,2])
returns
>>> [[slice(0,3,None)],[slice(3,6,None)],[slice(6,9,None)]]
or, a more complicated example
>>> index = np.asarray([0,0,1,1,0,2,2,2,1,1])
returns
>>> [[slice(0,2,None),slice(4,5,None)],[slice(2,4,None),slice(8,10,None)],[slice(5,8,None)]] | [
"take",
"a",
"numpy",
"array",
"of",
"integers",
"(",
"index",
")",
"and",
"return",
"a",
"nested",
"list",
"of",
"slices",
"such",
"that",
"the",
"slices",
"describe",
"the",
"start",
"stop",
"points",
"for",
"each",
"integer",
"in",
"the",
"index",
"."
] | python | train |
gwpy/gwpy | gwpy/timeseries/core.py | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L382-L498 | def fetch_open_data(cls, ifo, start, end, sample_rate=4096,
tag=None, version=None,
format='hdf5', host=GWOSC_DEFAULT_HOST,
verbose=False, cache=None, **kwargs):
"""Fetch open-access data from the LIGO Open Science Center
Parameters
----------
ifo : `str`
the two-character prefix of the IFO in which you are interested,
e.g. `'L1'`
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
sample_rate : `float`, optional,
the sample rate of desired data; most data are stored
by LOSC at 4096 Hz, however there may be event-related
data releases with a 16384 Hz rate, default: `4096`
tag : `str`, optional
file tag, e.g. ``'CLN'`` to select cleaned data, or ``'C00'``
for 'raw' calibrated data.
version : `int`, optional
version of files to download, defaults to highest discovered
version
format : `str`, optional
the data format to download and parse, default: ``'h5py'``
- ``'hdf5'``
- ``'gwf'`` - requires |LDAStools.frameCPP|_
host : `str`, optional
HTTP host name of LOSC server to access
verbose : `bool`, optional, default: `False`
print verbose output while fetching data
cache : `bool`, optional
save/read a local copy of the remote URL, default: `False`;
useful if the same remote data are to be accessed multiple times.
Set `GWPY_CACHE=1` in the environment to auto-cache.
**kwargs
any other keyword arguments are passed to the `TimeSeries.read`
method that parses the file that was downloaded
Examples
--------
>>> from gwpy.timeseries import (TimeSeries, StateVector)
>>> print(TimeSeries.fetch_open_data('H1', 1126259446, 1126259478))
TimeSeries([ 2.17704028e-19, 2.08763900e-19, 2.39681183e-19,
..., 3.55365541e-20, 6.33533516e-20,
7.58121195e-20]
unit: Unit(dimensionless),
t0: 1126259446.0 s,
dt: 0.000244140625 s,
name: Strain,
channel: None)
>>> print(StateVector.fetch_open_data('H1', 1126259446, 1126259478))
StateVector([127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127]
unit: Unit(dimensionless),
t0: 1126259446.0 s,
dt: 1.0 s,
name: Data quality,
channel: None,
bits: Bits(0: data present
1: passes cbc CAT1 test
2: passes cbc CAT2 test
3: passes cbc CAT3 test
4: passes burst CAT1 test
5: passes burst CAT2 test
6: passes burst CAT3 test,
channel=None,
epoch=1126259446.0))
For the `StateVector`, the naming of the bits will be
``format``-dependent, because they are recorded differently by LOSC
in different formats.
For events published in O2 and later, LOSC typically provides
multiple data sets containing the original (``'C00'``) and cleaned
(``'CLN'``) data.
To select both data sets and plot a comparison, for example:
>>> orig = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896,
... tag='C00')
>>> cln = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896,
... tag='CLN')
>>> origasd = orig.asd(fftlength=4, overlap=2)
>>> clnasd = cln.asd(fftlength=4, overlap=2)
>>> plot = origasd.plot(label='Un-cleaned')
>>> ax = plot.gca()
>>> ax.plot(clnasd, label='Cleaned')
>>> ax.set_xlim(10, 1400)
>>> ax.set_ylim(1e-24, 1e-20)
>>> ax.legend()
>>> plot.show()
Notes
-----
`StateVector` data are not available in ``txt.gz`` format.
"""
from .io.losc import fetch_losc_data
return fetch_losc_data(ifo, start, end, sample_rate=sample_rate,
tag=tag, version=version, format=format,
verbose=verbose, cache=cache,
host=host, cls=cls, **kwargs) | [
"def",
"fetch_open_data",
"(",
"cls",
",",
"ifo",
",",
"start",
",",
"end",
",",
"sample_rate",
"=",
"4096",
",",
"tag",
"=",
"None",
",",
"version",
"=",
"None",
",",
"format",
"=",
"'hdf5'",
",",
"host",
"=",
"GWOSC_DEFAULT_HOST",
",",
"verbose",
"=",
"False",
",",
"cache",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"io",
".",
"losc",
"import",
"fetch_losc_data",
"return",
"fetch_losc_data",
"(",
"ifo",
",",
"start",
",",
"end",
",",
"sample_rate",
"=",
"sample_rate",
",",
"tag",
"=",
"tag",
",",
"version",
"=",
"version",
",",
"format",
"=",
"format",
",",
"verbose",
"=",
"verbose",
",",
"cache",
"=",
"cache",
",",
"host",
"=",
"host",
",",
"cls",
"=",
"cls",
",",
"*",
"*",
"kwargs",
")"
] | Fetch open-access data from the LIGO Open Science Center
Parameters
----------
ifo : `str`
the two-character prefix of the IFO in which you are interested,
e.g. `'L1'`
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
sample_rate : `float`, optional,
the sample rate of desired data; most data are stored
by LOSC at 4096 Hz, however there may be event-related
data releases with a 16384 Hz rate, default: `4096`
tag : `str`, optional
file tag, e.g. ``'CLN'`` to select cleaned data, or ``'C00'``
for 'raw' calibrated data.
version : `int`, optional
version of files to download, defaults to highest discovered
version
format : `str`, optional
the data format to download and parse, default: ``'h5py'``
- ``'hdf5'``
- ``'gwf'`` - requires |LDAStools.frameCPP|_
host : `str`, optional
HTTP host name of LOSC server to access
verbose : `bool`, optional, default: `False`
print verbose output while fetching data
cache : `bool`, optional
save/read a local copy of the remote URL, default: `False`;
useful if the same remote data are to be accessed multiple times.
Set `GWPY_CACHE=1` in the environment to auto-cache.
**kwargs
any other keyword arguments are passed to the `TimeSeries.read`
method that parses the file that was downloaded
Examples
--------
>>> from gwpy.timeseries import (TimeSeries, StateVector)
>>> print(TimeSeries.fetch_open_data('H1', 1126259446, 1126259478))
TimeSeries([ 2.17704028e-19, 2.08763900e-19, 2.39681183e-19,
..., 3.55365541e-20, 6.33533516e-20,
7.58121195e-20]
unit: Unit(dimensionless),
t0: 1126259446.0 s,
dt: 0.000244140625 s,
name: Strain,
channel: None)
>>> print(StateVector.fetch_open_data('H1', 1126259446, 1126259478))
StateVector([127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127]
unit: Unit(dimensionless),
t0: 1126259446.0 s,
dt: 1.0 s,
name: Data quality,
channel: None,
bits: Bits(0: data present
1: passes cbc CAT1 test
2: passes cbc CAT2 test
3: passes cbc CAT3 test
4: passes burst CAT1 test
5: passes burst CAT2 test
6: passes burst CAT3 test,
channel=None,
epoch=1126259446.0))
For the `StateVector`, the naming of the bits will be
``format``-dependent, because they are recorded differently by LOSC
in different formats.
For events published in O2 and later, LOSC typically provides
multiple data sets containing the original (``'C00'``) and cleaned
(``'CLN'``) data.
To select both data sets and plot a comparison, for example:
>>> orig = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896,
... tag='C00')
>>> cln = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896,
... tag='CLN')
>>> origasd = orig.asd(fftlength=4, overlap=2)
>>> clnasd = cln.asd(fftlength=4, overlap=2)
>>> plot = origasd.plot(label='Un-cleaned')
>>> ax = plot.gca()
>>> ax.plot(clnasd, label='Cleaned')
>>> ax.set_xlim(10, 1400)
>>> ax.set_ylim(1e-24, 1e-20)
>>> ax.legend()
>>> plot.show()
Notes
-----
`StateVector` data are not available in ``txt.gz`` format. | [
"Fetch",
"open",
"-",
"access",
"data",
"from",
"the",
"LIGO",
"Open",
"Science",
"Center"
] | python | train |
singularityhub/singularity-cli | spython/main/parse/docker.py | https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/main/parse/docker.py#L315-L325 | def _workdir(self, line):
'''A Docker WORKDIR command simply implies to cd to that location
Parameters
==========
line: the line from the recipe file to parse for WORKDIR
'''
workdir = self._setup('WORKDIR', line)
line = "cd %s" %(''.join(workdir))
self.install.append(line) | [
"def",
"_workdir",
"(",
"self",
",",
"line",
")",
":",
"workdir",
"=",
"self",
".",
"_setup",
"(",
"'WORKDIR'",
",",
"line",
")",
"line",
"=",
"\"cd %s\"",
"%",
"(",
"''",
".",
"join",
"(",
"workdir",
")",
")",
"self",
".",
"install",
".",
"append",
"(",
"line",
")"
] | A Docker WORKDIR command simply implies to cd to that location
Parameters
==========
line: the line from the recipe file to parse for WORKDIR | [
"A",
"Docker",
"WORKDIR",
"command",
"simply",
"implies",
"to",
"cd",
"to",
"that",
"location"
] | python | train |
apache/incubator-mxnet | example/gluon/lipnet/utils/preprocess_data.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/utils/preprocess_data.py#L118-L124 | def process_frames_mouth(self, frames):
"""
Preprocess from frames using mouth detector
"""
self.face = np.array(frames)
self.mouth = np.array(frames)
self.set_data(frames) | [
"def",
"process_frames_mouth",
"(",
"self",
",",
"frames",
")",
":",
"self",
".",
"face",
"=",
"np",
".",
"array",
"(",
"frames",
")",
"self",
".",
"mouth",
"=",
"np",
".",
"array",
"(",
"frames",
")",
"self",
".",
"set_data",
"(",
"frames",
")"
] | Preprocess from frames using mouth detector | [
"Preprocess",
"from",
"frames",
"using",
"mouth",
"detector"
] | python | train |
ssato/python-anyconfig | src/anyconfig/processors.py | https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/processors.py#L321-L334 | def findall(self, obj, forced_type=None,
cls=anyconfig.models.processor.Processor):
"""
:param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo' (namedtuple) object
:param forced_type: Forced processor type to find
:param cls: A class object to compare with 'ptype'
:return: A list of instances of processor classes to process 'obj'
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
"""
return [p() for p in findall(obj, self.list(),
forced_type=forced_type, cls=cls)] | [
"def",
"findall",
"(",
"self",
",",
"obj",
",",
"forced_type",
"=",
"None",
",",
"cls",
"=",
"anyconfig",
".",
"models",
".",
"processor",
".",
"Processor",
")",
":",
"return",
"[",
"p",
"(",
")",
"for",
"p",
"in",
"findall",
"(",
"obj",
",",
"self",
".",
"list",
"(",
")",
",",
"forced_type",
"=",
"forced_type",
",",
"cls",
"=",
"cls",
")",
"]"
] | :param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo' (namedtuple) object
:param forced_type: Forced processor type to find
:param cls: A class object to compare with 'ptype'
:return: A list of instances of processor classes to process 'obj'
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError | [
":",
"param",
"obj",
":",
"a",
"file",
"path",
"file",
"file",
"-",
"like",
"object",
"pathlib",
".",
"Path",
"object",
"or",
"an",
"anyconfig",
".",
"globals",
".",
"IOInfo",
"(",
"namedtuple",
")",
"object",
":",
"param",
"forced_type",
":",
"Forced",
"processor",
"type",
"to",
"find",
":",
"param",
"cls",
":",
"A",
"class",
"object",
"to",
"compare",
"with",
"ptype"
] | python | train |
monarch-initiative/dipper | dipper/sources/UCSCBands.py | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/UCSCBands.py#L208-L460 | def _get_chrbands(self, limit, taxon):
"""
:param limit:
:return:
"""
if limit is None:
limit = sys.maxsize # practical limit anyway
model = Model(self.graph)
line_counter = 0
myfile = '/'.join((self.rawdir, self.files[taxon]['file']))
LOG.info("Processing Chr bands from FILE: %s", myfile)
geno = Genotype(self.graph)
monochrom = Monochrom(self.graph_type, self.are_bnodes_skized)
# used to hold band definitions for a chr
# in order to compute extent of encompasing bands
mybands = {}
# build the organism's genome from the taxon
genome_label = self.files[taxon]['genome_label']
taxon_id = 'NCBITaxon:' + taxon
# add the taxon as a class. adding the class label elsewhere
model.addClassToGraph(taxon_id, None)
model.addSynonym(taxon_id, genome_label)
geno.addGenome(taxon_id, genome_label)
# add the build and the taxon it's in
build_num = self.files[taxon]['build_num']
build_id = 'UCSC:' + build_num
geno.addReferenceGenome(build_id, build_num, taxon_id)
# process the bands
col = ['scaffold', 'start', 'stop', 'band_num', 'rtype']
with gzip.open(myfile, 'rb') as f:
for line in f:
line_counter += 1
# skip comments
line = line.decode().strip()
if line[0] == '#' or line_counter > limit:
continue
# chr13 4500000 10000000 p12 stalk
row = line.split('\t')
scaffold = row[col.index('scaffold')]
start = row[col.index('start')]
stop = row[col.index('stop')]
band_num = row[col.index('band_num')].strip()
rtype = row[col.index('rtype')]
# NOTE some less-finished genomes have
# placed and unplaced scaffolds
# * Placed scaffolds:
# the scaffolds have been placed within a chromosome.
# * Unlocalized scaffolds:
# although the chromosome within which the scaffold occurs
# is known, the scaffold's position or orientation
# is not known.
# * Unplaced scaffolds:
# it is not known which chromosome the scaffold belongs to
#
# find out if the thing is a full on chromosome, or a scaffold:
# ex: unlocalized scaffold: chr10_KL568008v1_random
# ex: unplaced scaffold: chrUn_AABR07022428v1
placed_scaffold_pattern = r'(chr(?:\d+|X|Y|Z|W|M))'
unlocalized_scaffold_pattern = placed_scaffold_pattern+r'_(\w+)_random'
unplaced_scaffold_pattern = r'chr(Un(?:_\w+)?)'
mch = re.match(placed_scaffold_pattern + r'$', scaffold)
if mch is not None and len(mch.groups()) == 1:
# the chromosome is the first match of the pattern
chrom_num = mch.group(1)
else:
# skip over anything that isn't a placed_scaffold
# at the class level
LOG.info("Found non-placed chromosome %s", scaffold)
chrom_num = None
m_chr_unloc = re.match(unlocalized_scaffold_pattern, scaffold)
m_chr_unplaced = re.match(unplaced_scaffold_pattern, scaffold)
scaffold_num = None
if mch:
pass
elif m_chr_unloc is not None and len(m_chr_unloc.groups()) == 2:
chrom_num = m_chr_unloc.group(1)
scaffold_num = chrom_num + '_' + m_chr_unloc.group(2)
elif m_chr_unplaced is not None and len(m_chr_unplaced.groups()) == 1:
scaffold_num = m_chr_unplaced.group(1)
else:
LOG.error(
"There's a chr pattern that we aren't matching: %s", scaffold)
if chrom_num is not None:
# the chrom class (generic) id
chrom_class_id = makeChromID(chrom_num, taxon, 'CHR')
# first, add the chromosome class (in the taxon)
geno.addChromosomeClass(
chrom_num, taxon_id, self.files[taxon]['genome_label'])
# then, add the chromosome instance (from the given build)
geno.addChromosomeInstance(
chrom_num, build_id, build_num, chrom_class_id)
# add the chr to the hashmap of coordinates for this build
# the chromosome coordinate space is itself
if chrom_num not in mybands.keys():
mybands[chrom_num] = {
'min': 0,
'max': int(stop),
'chr': chrom_num,
'ref': build_id,
'parent': None,
'stain': None,
'type': self.globaltt['chromosome']}
if scaffold_num is not None:
# this will put the coordinates of the scaffold
# in the scaffold-space and make sure that the scaffold
# is part of the correct parent.
# if chrom_num is None,
# then it will attach it to the genome,
# just like a reg chrom
mybands[scaffold_num] = {
'min': start,
'max': stop,
'chr': scaffold_num,
'ref': build_id,
'parent': chrom_num,
'stain': None,
'type': self.globaltt['assembly_component'],
'synonym': scaffold}
parents = list()
if band_num is not None and band_num != '':
# add the specific band
mybands[chrom_num+band_num] = {
'min': start,
'max': stop,
'chr': chrom_num,
'ref': build_id,
'parent': None,
'stain': None,
'type': None}
# add the staining intensity of the band
if re.match(r'g(neg|pos|var)', rtype):
mybands[chrom_num+band_num]['stain'] = self.resolve(rtype)
# get the parent bands, and make them unique
parents = list(
monochrom.make_parent_bands(band_num, set()))
# alphabetical sort will put them in smallest to biggest,
# so we reverse
parents.sort(reverse=True)
# print('parents of',chrom,band,':',parents)
if len(parents) > 0:
mybands[chrom_num + band_num]['parent'] = chrom_num + parents[0]
# loop through the parents and add them to the hash
# add the parents to the graph, in hierarchical order
# TODO PYLINT Consider using enumerate
# instead of iterating with range and len
for i in range(len(parents)):
rti = getChrPartTypeByNotation(parents[i])
pnum = chrom_num+parents[i]
sta = int(start)
sto = int(stop)
if pnum not in mybands.keys():
# add the parental band to the hash
bnd = {
'min': min(sta, sto),
'max': max(sta, sto),
'chr': chrom_num,
'ref': build_id,
'parent': None,
'stain': None,
'type': rti}
mybands[pnum] = bnd
else:
# band already in the hash means it's a grouping band
# need to update the min/max coords
bnd = mybands.get(pnum)
bnd['min'] = min(sta, sto, bnd['min'])
bnd['max'] = max(sta, sto, bnd['max'])
mybands[pnum] = bnd
# also, set the max for the chrom
chrom = mybands.get(chrom_num)
chrom['max'] = max(sta, sto, chrom['max'])
mybands[chrom_num] = chrom
# add the parent relationships to each
if i < len(parents) - 1:
mybands[pnum]['parent'] = chrom_num+parents[i+1]
else:
# add the last one (p or q usually)
# as attached to the chromosome
mybands[pnum]['parent'] = chrom_num
f.close() # end looping through file
# loop through the hash and add the bands to the graph
for bnd in mybands.keys():
myband = mybands.get(bnd)
band_class_id = makeChromID(bnd, taxon, 'CHR')
band_class_label = makeChromLabel(bnd, genome_label)
band_build_id = makeChromID(bnd, build_num, 'MONARCH')
band_build_label = makeChromLabel(bnd, build_num)
# the build-specific chrom
chrom_in_build_id = makeChromID(myband['chr'], build_num, 'MONARCH')
# if it's != part, then add the class
if myband['type'] != self.globaltt['assembly_component']:
model.addClassToGraph(
band_class_id, band_class_label, myband['type'])
bfeature = Feature(
self.graph, band_build_id, band_build_label, band_class_id)
else:
bfeature = Feature(
self.graph, band_build_id, band_build_label, myband['type'])
if 'synonym' in myband:
model.addSynonym(band_build_id, myband['synonym'])
if myband['parent'] is None:
if myband['type'] == self.globaltt['assembly_component']:
# since we likely don't know the chr,
# add it as a part of the build
geno.addParts(band_build_id, build_id)
elif myband['type'] == self.globaltt['assembly_component']:
# geno.addParts(band_build_id, chrom_in_build_id)
parent_chrom_in_build = makeChromID(
myband['parent'], build_num, 'MONARCH')
bfeature.addSubsequenceOfFeature(parent_chrom_in_build)
# add the band as a feature
# (which also instantiates the owl:Individual)
bfeature.addFeatureStartLocation(myband['min'], chrom_in_build_id)
bfeature.addFeatureEndLocation(myband['max'], chrom_in_build_id)
if 'stain' in myband and myband['stain'] is not None:
bfeature.addFeatureProperty(
self.globaltt['has_sequence_attribute'], myband['stain'])
# type the band as a faldo:Region directly (add_region=False)
# bfeature.setNoBNodes(self.nobnodes)
# to come when we merge in ZFIN.py
bfeature.addFeatureToGraph(False)
return | [
"def",
"_get_chrbands",
"(",
"self",
",",
"limit",
",",
"taxon",
")",
":",
"if",
"limit",
"is",
"None",
":",
"limit",
"=",
"sys",
".",
"maxsize",
"# practical limit anyway",
"model",
"=",
"Model",
"(",
"self",
".",
"graph",
")",
"line_counter",
"=",
"0",
"myfile",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"taxon",
"]",
"[",
"'file'",
"]",
")",
")",
"LOG",
".",
"info",
"(",
"\"Processing Chr bands from FILE: %s\"",
",",
"myfile",
")",
"geno",
"=",
"Genotype",
"(",
"self",
".",
"graph",
")",
"monochrom",
"=",
"Monochrom",
"(",
"self",
".",
"graph_type",
",",
"self",
".",
"are_bnodes_skized",
")",
"# used to hold band definitions for a chr",
"# in order to compute extent of encompasing bands",
"mybands",
"=",
"{",
"}",
"# build the organism's genome from the taxon",
"genome_label",
"=",
"self",
".",
"files",
"[",
"taxon",
"]",
"[",
"'genome_label'",
"]",
"taxon_id",
"=",
"'NCBITaxon:'",
"+",
"taxon",
"# add the taxon as a class. adding the class label elsewhere",
"model",
".",
"addClassToGraph",
"(",
"taxon_id",
",",
"None",
")",
"model",
".",
"addSynonym",
"(",
"taxon_id",
",",
"genome_label",
")",
"geno",
".",
"addGenome",
"(",
"taxon_id",
",",
"genome_label",
")",
"# add the build and the taxon it's in",
"build_num",
"=",
"self",
".",
"files",
"[",
"taxon",
"]",
"[",
"'build_num'",
"]",
"build_id",
"=",
"'UCSC:'",
"+",
"build_num",
"geno",
".",
"addReferenceGenome",
"(",
"build_id",
",",
"build_num",
",",
"taxon_id",
")",
"# process the bands",
"col",
"=",
"[",
"'scaffold'",
",",
"'start'",
",",
"'stop'",
",",
"'band_num'",
",",
"'rtype'",
"]",
"with",
"gzip",
".",
"open",
"(",
"myfile",
",",
"'rb'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"line_counter",
"+=",
"1",
"# skip comments",
"line",
"=",
"line",
".",
"decode",
"(",
")",
".",
"strip",
"(",
")",
"if",
"line",
"[",
"0",
"]",
"==",
"'#'",
"or",
"line_counter",
">",
"limit",
":",
"continue",
"# chr13\t4500000\t10000000\tp12\tstalk",
"row",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"scaffold",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'scaffold'",
")",
"]",
"start",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'start'",
")",
"]",
"stop",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'stop'",
")",
"]",
"band_num",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'band_num'",
")",
"]",
".",
"strip",
"(",
")",
"rtype",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'rtype'",
")",
"]",
"# NOTE some less-finished genomes have",
"# placed and unplaced scaffolds",
"# * Placed scaffolds:",
"# the scaffolds have been placed within a chromosome.",
"# * Unlocalized scaffolds:",
"# although the chromosome within which the scaffold occurs",
"# is known, the scaffold's position or orientation",
"# is not known.",
"# * Unplaced scaffolds:",
"# it is not known which chromosome the scaffold belongs to",
"#",
"# find out if the thing is a full on chromosome, or a scaffold:",
"# ex: unlocalized scaffold: chr10_KL568008v1_random",
"# ex: unplaced scaffold: chrUn_AABR07022428v1",
"placed_scaffold_pattern",
"=",
"r'(chr(?:\\d+|X|Y|Z|W|M))'",
"unlocalized_scaffold_pattern",
"=",
"placed_scaffold_pattern",
"+",
"r'_(\\w+)_random'",
"unplaced_scaffold_pattern",
"=",
"r'chr(Un(?:_\\w+)?)'",
"mch",
"=",
"re",
".",
"match",
"(",
"placed_scaffold_pattern",
"+",
"r'$'",
",",
"scaffold",
")",
"if",
"mch",
"is",
"not",
"None",
"and",
"len",
"(",
"mch",
".",
"groups",
"(",
")",
")",
"==",
"1",
":",
"# the chromosome is the first match of the pattern",
"chrom_num",
"=",
"mch",
".",
"group",
"(",
"1",
")",
"else",
":",
"# skip over anything that isn't a placed_scaffold",
"# at the class level",
"LOG",
".",
"info",
"(",
"\"Found non-placed chromosome %s\"",
",",
"scaffold",
")",
"chrom_num",
"=",
"None",
"m_chr_unloc",
"=",
"re",
".",
"match",
"(",
"unlocalized_scaffold_pattern",
",",
"scaffold",
")",
"m_chr_unplaced",
"=",
"re",
".",
"match",
"(",
"unplaced_scaffold_pattern",
",",
"scaffold",
")",
"scaffold_num",
"=",
"None",
"if",
"mch",
":",
"pass",
"elif",
"m_chr_unloc",
"is",
"not",
"None",
"and",
"len",
"(",
"m_chr_unloc",
".",
"groups",
"(",
")",
")",
"==",
"2",
":",
"chrom_num",
"=",
"m_chr_unloc",
".",
"group",
"(",
"1",
")",
"scaffold_num",
"=",
"chrom_num",
"+",
"'_'",
"+",
"m_chr_unloc",
".",
"group",
"(",
"2",
")",
"elif",
"m_chr_unplaced",
"is",
"not",
"None",
"and",
"len",
"(",
"m_chr_unplaced",
".",
"groups",
"(",
")",
")",
"==",
"1",
":",
"scaffold_num",
"=",
"m_chr_unplaced",
".",
"group",
"(",
"1",
")",
"else",
":",
"LOG",
".",
"error",
"(",
"\"There's a chr pattern that we aren't matching: %s\"",
",",
"scaffold",
")",
"if",
"chrom_num",
"is",
"not",
"None",
":",
"# the chrom class (generic) id",
"chrom_class_id",
"=",
"makeChromID",
"(",
"chrom_num",
",",
"taxon",
",",
"'CHR'",
")",
"# first, add the chromosome class (in the taxon)",
"geno",
".",
"addChromosomeClass",
"(",
"chrom_num",
",",
"taxon_id",
",",
"self",
".",
"files",
"[",
"taxon",
"]",
"[",
"'genome_label'",
"]",
")",
"# then, add the chromosome instance (from the given build)",
"geno",
".",
"addChromosomeInstance",
"(",
"chrom_num",
",",
"build_id",
",",
"build_num",
",",
"chrom_class_id",
")",
"# add the chr to the hashmap of coordinates for this build",
"# the chromosome coordinate space is itself",
"if",
"chrom_num",
"not",
"in",
"mybands",
".",
"keys",
"(",
")",
":",
"mybands",
"[",
"chrom_num",
"]",
"=",
"{",
"'min'",
":",
"0",
",",
"'max'",
":",
"int",
"(",
"stop",
")",
",",
"'chr'",
":",
"chrom_num",
",",
"'ref'",
":",
"build_id",
",",
"'parent'",
":",
"None",
",",
"'stain'",
":",
"None",
",",
"'type'",
":",
"self",
".",
"globaltt",
"[",
"'chromosome'",
"]",
"}",
"if",
"scaffold_num",
"is",
"not",
"None",
":",
"# this will put the coordinates of the scaffold",
"# in the scaffold-space and make sure that the scaffold",
"# is part of the correct parent.",
"# if chrom_num is None,",
"# then it will attach it to the genome,",
"# just like a reg chrom",
"mybands",
"[",
"scaffold_num",
"]",
"=",
"{",
"'min'",
":",
"start",
",",
"'max'",
":",
"stop",
",",
"'chr'",
":",
"scaffold_num",
",",
"'ref'",
":",
"build_id",
",",
"'parent'",
":",
"chrom_num",
",",
"'stain'",
":",
"None",
",",
"'type'",
":",
"self",
".",
"globaltt",
"[",
"'assembly_component'",
"]",
",",
"'synonym'",
":",
"scaffold",
"}",
"parents",
"=",
"list",
"(",
")",
"if",
"band_num",
"is",
"not",
"None",
"and",
"band_num",
"!=",
"''",
":",
"# add the specific band",
"mybands",
"[",
"chrom_num",
"+",
"band_num",
"]",
"=",
"{",
"'min'",
":",
"start",
",",
"'max'",
":",
"stop",
",",
"'chr'",
":",
"chrom_num",
",",
"'ref'",
":",
"build_id",
",",
"'parent'",
":",
"None",
",",
"'stain'",
":",
"None",
",",
"'type'",
":",
"None",
"}",
"# add the staining intensity of the band",
"if",
"re",
".",
"match",
"(",
"r'g(neg|pos|var)'",
",",
"rtype",
")",
":",
"mybands",
"[",
"chrom_num",
"+",
"band_num",
"]",
"[",
"'stain'",
"]",
"=",
"self",
".",
"resolve",
"(",
"rtype",
")",
"# get the parent bands, and make them unique",
"parents",
"=",
"list",
"(",
"monochrom",
".",
"make_parent_bands",
"(",
"band_num",
",",
"set",
"(",
")",
")",
")",
"# alphabetical sort will put them in smallest to biggest,",
"# so we reverse",
"parents",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"# print('parents of',chrom,band,':',parents)",
"if",
"len",
"(",
"parents",
")",
">",
"0",
":",
"mybands",
"[",
"chrom_num",
"+",
"band_num",
"]",
"[",
"'parent'",
"]",
"=",
"chrom_num",
"+",
"parents",
"[",
"0",
"]",
"# loop through the parents and add them to the hash",
"# add the parents to the graph, in hierarchical order",
"# TODO PYLINT Consider using enumerate",
"# instead of iterating with range and len",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"parents",
")",
")",
":",
"rti",
"=",
"getChrPartTypeByNotation",
"(",
"parents",
"[",
"i",
"]",
")",
"pnum",
"=",
"chrom_num",
"+",
"parents",
"[",
"i",
"]",
"sta",
"=",
"int",
"(",
"start",
")",
"sto",
"=",
"int",
"(",
"stop",
")",
"if",
"pnum",
"not",
"in",
"mybands",
".",
"keys",
"(",
")",
":",
"# add the parental band to the hash",
"bnd",
"=",
"{",
"'min'",
":",
"min",
"(",
"sta",
",",
"sto",
")",
",",
"'max'",
":",
"max",
"(",
"sta",
",",
"sto",
")",
",",
"'chr'",
":",
"chrom_num",
",",
"'ref'",
":",
"build_id",
",",
"'parent'",
":",
"None",
",",
"'stain'",
":",
"None",
",",
"'type'",
":",
"rti",
"}",
"mybands",
"[",
"pnum",
"]",
"=",
"bnd",
"else",
":",
"# band already in the hash means it's a grouping band",
"# need to update the min/max coords",
"bnd",
"=",
"mybands",
".",
"get",
"(",
"pnum",
")",
"bnd",
"[",
"'min'",
"]",
"=",
"min",
"(",
"sta",
",",
"sto",
",",
"bnd",
"[",
"'min'",
"]",
")",
"bnd",
"[",
"'max'",
"]",
"=",
"max",
"(",
"sta",
",",
"sto",
",",
"bnd",
"[",
"'max'",
"]",
")",
"mybands",
"[",
"pnum",
"]",
"=",
"bnd",
"# also, set the max for the chrom",
"chrom",
"=",
"mybands",
".",
"get",
"(",
"chrom_num",
")",
"chrom",
"[",
"'max'",
"]",
"=",
"max",
"(",
"sta",
",",
"sto",
",",
"chrom",
"[",
"'max'",
"]",
")",
"mybands",
"[",
"chrom_num",
"]",
"=",
"chrom",
"# add the parent relationships to each",
"if",
"i",
"<",
"len",
"(",
"parents",
")",
"-",
"1",
":",
"mybands",
"[",
"pnum",
"]",
"[",
"'parent'",
"]",
"=",
"chrom_num",
"+",
"parents",
"[",
"i",
"+",
"1",
"]",
"else",
":",
"# add the last one (p or q usually)",
"# as attached to the chromosome",
"mybands",
"[",
"pnum",
"]",
"[",
"'parent'",
"]",
"=",
"chrom_num",
"f",
".",
"close",
"(",
")",
"# end looping through file",
"# loop through the hash and add the bands to the graph",
"for",
"bnd",
"in",
"mybands",
".",
"keys",
"(",
")",
":",
"myband",
"=",
"mybands",
".",
"get",
"(",
"bnd",
")",
"band_class_id",
"=",
"makeChromID",
"(",
"bnd",
",",
"taxon",
",",
"'CHR'",
")",
"band_class_label",
"=",
"makeChromLabel",
"(",
"bnd",
",",
"genome_label",
")",
"band_build_id",
"=",
"makeChromID",
"(",
"bnd",
",",
"build_num",
",",
"'MONARCH'",
")",
"band_build_label",
"=",
"makeChromLabel",
"(",
"bnd",
",",
"build_num",
")",
"# the build-specific chrom",
"chrom_in_build_id",
"=",
"makeChromID",
"(",
"myband",
"[",
"'chr'",
"]",
",",
"build_num",
",",
"'MONARCH'",
")",
"# if it's != part, then add the class",
"if",
"myband",
"[",
"'type'",
"]",
"!=",
"self",
".",
"globaltt",
"[",
"'assembly_component'",
"]",
":",
"model",
".",
"addClassToGraph",
"(",
"band_class_id",
",",
"band_class_label",
",",
"myband",
"[",
"'type'",
"]",
")",
"bfeature",
"=",
"Feature",
"(",
"self",
".",
"graph",
",",
"band_build_id",
",",
"band_build_label",
",",
"band_class_id",
")",
"else",
":",
"bfeature",
"=",
"Feature",
"(",
"self",
".",
"graph",
",",
"band_build_id",
",",
"band_build_label",
",",
"myband",
"[",
"'type'",
"]",
")",
"if",
"'synonym'",
"in",
"myband",
":",
"model",
".",
"addSynonym",
"(",
"band_build_id",
",",
"myband",
"[",
"'synonym'",
"]",
")",
"if",
"myband",
"[",
"'parent'",
"]",
"is",
"None",
":",
"if",
"myband",
"[",
"'type'",
"]",
"==",
"self",
".",
"globaltt",
"[",
"'assembly_component'",
"]",
":",
"# since we likely don't know the chr,",
"# add it as a part of the build",
"geno",
".",
"addParts",
"(",
"band_build_id",
",",
"build_id",
")",
"elif",
"myband",
"[",
"'type'",
"]",
"==",
"self",
".",
"globaltt",
"[",
"'assembly_component'",
"]",
":",
"# geno.addParts(band_build_id, chrom_in_build_id)",
"parent_chrom_in_build",
"=",
"makeChromID",
"(",
"myband",
"[",
"'parent'",
"]",
",",
"build_num",
",",
"'MONARCH'",
")",
"bfeature",
".",
"addSubsequenceOfFeature",
"(",
"parent_chrom_in_build",
")",
"# add the band as a feature",
"# (which also instantiates the owl:Individual)",
"bfeature",
".",
"addFeatureStartLocation",
"(",
"myband",
"[",
"'min'",
"]",
",",
"chrom_in_build_id",
")",
"bfeature",
".",
"addFeatureEndLocation",
"(",
"myband",
"[",
"'max'",
"]",
",",
"chrom_in_build_id",
")",
"if",
"'stain'",
"in",
"myband",
"and",
"myband",
"[",
"'stain'",
"]",
"is",
"not",
"None",
":",
"bfeature",
".",
"addFeatureProperty",
"(",
"self",
".",
"globaltt",
"[",
"'has_sequence_attribute'",
"]",
",",
"myband",
"[",
"'stain'",
"]",
")",
"# type the band as a faldo:Region directly (add_region=False)",
"# bfeature.setNoBNodes(self.nobnodes)",
"# to come when we merge in ZFIN.py",
"bfeature",
".",
"addFeatureToGraph",
"(",
"False",
")",
"return"
] | :param limit:
:return: | [
":",
"param",
"limit",
":",
":",
"return",
":"
] | python | train |
openstax/cnx-archive | cnxarchive/views/xpath.py | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/xpath.py#L116-L120 | def xpath_page(request, uuid, version):
"""Given a page UUID (and optional version), returns a JSON object of
results, as in xpath_book()"""
xpath_string = request.params.get('q')
return execute_xpath(xpath_string, 'xpath-module', uuid, version) | [
"def",
"xpath_page",
"(",
"request",
",",
"uuid",
",",
"version",
")",
":",
"xpath_string",
"=",
"request",
".",
"params",
".",
"get",
"(",
"'q'",
")",
"return",
"execute_xpath",
"(",
"xpath_string",
",",
"'xpath-module'",
",",
"uuid",
",",
"version",
")"
] | Given a page UUID (and optional version), returns a JSON object of
results, as in xpath_book() | [
"Given",
"a",
"page",
"UUID",
"(",
"and",
"optional",
"version",
")",
"returns",
"a",
"JSON",
"object",
"of",
"results",
"as",
"in",
"xpath_book",
"()"
] | python | train |
eshandas/simple_django_logger | django_project/simple_django_logger/tasks/__init__.py | https://github.com/eshandas/simple_django_logger/blob/a6d15ca1c1ded414ed8fe5cc0c4ca0c20a846582/django_project/simple_django_logger/tasks/__init__.py#L27-L34 | def purge_old_event_logs(delete_before_days=7):
"""
Purges old event logs from the database table
"""
delete_before_date = timezone.now() - timedelta(days=delete_before_days)
logs_deleted = EventLog.objects.filter(
created_on__lte=delete_before_date).delete()
return logs_deleted | [
"def",
"purge_old_event_logs",
"(",
"delete_before_days",
"=",
"7",
")",
":",
"delete_before_date",
"=",
"timezone",
".",
"now",
"(",
")",
"-",
"timedelta",
"(",
"days",
"=",
"delete_before_days",
")",
"logs_deleted",
"=",
"EventLog",
".",
"objects",
".",
"filter",
"(",
"created_on__lte",
"=",
"delete_before_date",
")",
".",
"delete",
"(",
")",
"return",
"logs_deleted"
] | Purges old event logs from the database table | [
"Purges",
"old",
"event",
"logs",
"from",
"the",
"database",
"table"
] | python | train |
brandon-rhodes/python-jplephem | jplephem/spk.py | https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/spk.py#L46-L53 | def close(self):
"""Close this SPK file."""
self.daf.file.close()
for segment in self.segments:
if hasattr(segment, '_data'):
del segment._data
self.daf._array = None
self.daf._map = None | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"daf",
".",
"file",
".",
"close",
"(",
")",
"for",
"segment",
"in",
"self",
".",
"segments",
":",
"if",
"hasattr",
"(",
"segment",
",",
"'_data'",
")",
":",
"del",
"segment",
".",
"_data",
"self",
".",
"daf",
".",
"_array",
"=",
"None",
"self",
".",
"daf",
".",
"_map",
"=",
"None"
] | Close this SPK file. | [
"Close",
"this",
"SPK",
"file",
"."
] | python | test |
theolind/pymysensors | mysensors/gateway_serial.py | https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/gateway_serial.py#L79-L96 | def _connect(self):
"""Connect to the serial port."""
try:
while True:
_LOGGER.info('Trying to connect to %s', self.port)
try:
yield from serial_asyncio.create_serial_connection(
self.loop, lambda: self.protocol, self.port, self.baud)
return
except serial.SerialException:
_LOGGER.error('Unable to connect to %s', self.port)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
yield from asyncio.sleep(
self.reconnect_timeout, loop=self.loop)
except asyncio.CancelledError:
_LOGGER.debug('Connect attempt to %s cancelled', self.port) | [
"def",
"_connect",
"(",
"self",
")",
":",
"try",
":",
"while",
"True",
":",
"_LOGGER",
".",
"info",
"(",
"'Trying to connect to %s'",
",",
"self",
".",
"port",
")",
"try",
":",
"yield",
"from",
"serial_asyncio",
".",
"create_serial_connection",
"(",
"self",
".",
"loop",
",",
"lambda",
":",
"self",
".",
"protocol",
",",
"self",
".",
"port",
",",
"self",
".",
"baud",
")",
"return",
"except",
"serial",
".",
"SerialException",
":",
"_LOGGER",
".",
"error",
"(",
"'Unable to connect to %s'",
",",
"self",
".",
"port",
")",
"_LOGGER",
".",
"info",
"(",
"'Waiting %s secs before trying to connect again'",
",",
"self",
".",
"reconnect_timeout",
")",
"yield",
"from",
"asyncio",
".",
"sleep",
"(",
"self",
".",
"reconnect_timeout",
",",
"loop",
"=",
"self",
".",
"loop",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"_LOGGER",
".",
"debug",
"(",
"'Connect attempt to %s cancelled'",
",",
"self",
".",
"port",
")"
] | Connect to the serial port. | [
"Connect",
"to",
"the",
"serial",
"port",
"."
] | python | train |
inspirehep/harvesting-kit | harvestingkit/aps_package.py | https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/aps_package.py#L143-L211 | def _add_references(self, rec):
""" Adds the reference to the record """
for ref in self.document.getElementsByTagName('ref'):
for ref_type, doi, authors, collaboration, journal, volume, page, year,\
label, arxiv, publisher, institution, unstructured_text,\
external_link, report_no, editors in self._get_reference(ref):
subfields = []
if doi:
subfields.append(('a', doi))
for author in authors:
subfields.append(('h', author))
for editor in editors:
subfields.append(('e', editor))
if year:
subfields.append(('y', year))
if unstructured_text:
if page:
subfields.append(('m', unstructured_text + ', ' + page))
else:
subfields.append(('m', unstructured_text))
if collaboration:
subfields.append(('c', collaboration))
if institution:
subfields.append(('m', institution))
if publisher:
subfields.append(('p', publisher))
if arxiv:
subfields.append(('r', arxiv))
if report_no:
subfields.append(('r', report_no))
if external_link:
subfields.append(('u', external_link))
if label:
subfields.append(('o', label))
if ref_type == 'book':
if journal:
subfields.append(('t', journal))
if volume:
subfields.append(('m', volume))
elif page and not unstructured_text:
subfields.append(('m', page))
else:
if volume and page:
subfields.append(('s', journal + "," + volume + "," + page))
elif journal:
subfields.append(('t', journal))
if ref_type:
subfields.append(('d', ref_type))
if not subfields:
#misc-type references
try:
r = ref.getElementsByTagName('mixed-citation')[0]
text = xml_to_text(r)
label = text.split()[0]
text = " ".join(text.split()[1:])
subfields.append(('s', text))
record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields)
except IndexError:
#references without 'mixed-citation' tag
try:
r = ref.getElementsByTagName('note')[0]
subfields.append(('s', xml_to_text(r)))
record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields)
except IndexError:
#references without 'note' tag
subfields.append(('s', xml_to_text(ref)))
record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields)
else:
record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields) | [
"def",
"_add_references",
"(",
"self",
",",
"rec",
")",
":",
"for",
"ref",
"in",
"self",
".",
"document",
".",
"getElementsByTagName",
"(",
"'ref'",
")",
":",
"for",
"ref_type",
",",
"doi",
",",
"authors",
",",
"collaboration",
",",
"journal",
",",
"volume",
",",
"page",
",",
"year",
",",
"label",
",",
"arxiv",
",",
"publisher",
",",
"institution",
",",
"unstructured_text",
",",
"external_link",
",",
"report_no",
",",
"editors",
"in",
"self",
".",
"_get_reference",
"(",
"ref",
")",
":",
"subfields",
"=",
"[",
"]",
"if",
"doi",
":",
"subfields",
".",
"append",
"(",
"(",
"'a'",
",",
"doi",
")",
")",
"for",
"author",
"in",
"authors",
":",
"subfields",
".",
"append",
"(",
"(",
"'h'",
",",
"author",
")",
")",
"for",
"editor",
"in",
"editors",
":",
"subfields",
".",
"append",
"(",
"(",
"'e'",
",",
"editor",
")",
")",
"if",
"year",
":",
"subfields",
".",
"append",
"(",
"(",
"'y'",
",",
"year",
")",
")",
"if",
"unstructured_text",
":",
"if",
"page",
":",
"subfields",
".",
"append",
"(",
"(",
"'m'",
",",
"unstructured_text",
"+",
"', '",
"+",
"page",
")",
")",
"else",
":",
"subfields",
".",
"append",
"(",
"(",
"'m'",
",",
"unstructured_text",
")",
")",
"if",
"collaboration",
":",
"subfields",
".",
"append",
"(",
"(",
"'c'",
",",
"collaboration",
")",
")",
"if",
"institution",
":",
"subfields",
".",
"append",
"(",
"(",
"'m'",
",",
"institution",
")",
")",
"if",
"publisher",
":",
"subfields",
".",
"append",
"(",
"(",
"'p'",
",",
"publisher",
")",
")",
"if",
"arxiv",
":",
"subfields",
".",
"append",
"(",
"(",
"'r'",
",",
"arxiv",
")",
")",
"if",
"report_no",
":",
"subfields",
".",
"append",
"(",
"(",
"'r'",
",",
"report_no",
")",
")",
"if",
"external_link",
":",
"subfields",
".",
"append",
"(",
"(",
"'u'",
",",
"external_link",
")",
")",
"if",
"label",
":",
"subfields",
".",
"append",
"(",
"(",
"'o'",
",",
"label",
")",
")",
"if",
"ref_type",
"==",
"'book'",
":",
"if",
"journal",
":",
"subfields",
".",
"append",
"(",
"(",
"'t'",
",",
"journal",
")",
")",
"if",
"volume",
":",
"subfields",
".",
"append",
"(",
"(",
"'m'",
",",
"volume",
")",
")",
"elif",
"page",
"and",
"not",
"unstructured_text",
":",
"subfields",
".",
"append",
"(",
"(",
"'m'",
",",
"page",
")",
")",
"else",
":",
"if",
"volume",
"and",
"page",
":",
"subfields",
".",
"append",
"(",
"(",
"'s'",
",",
"journal",
"+",
"\",\"",
"+",
"volume",
"+",
"\",\"",
"+",
"page",
")",
")",
"elif",
"journal",
":",
"subfields",
".",
"append",
"(",
"(",
"'t'",
",",
"journal",
")",
")",
"if",
"ref_type",
":",
"subfields",
".",
"append",
"(",
"(",
"'d'",
",",
"ref_type",
")",
")",
"if",
"not",
"subfields",
":",
"#misc-type references",
"try",
":",
"r",
"=",
"ref",
".",
"getElementsByTagName",
"(",
"'mixed-citation'",
")",
"[",
"0",
"]",
"text",
"=",
"xml_to_text",
"(",
"r",
")",
"label",
"=",
"text",
".",
"split",
"(",
")",
"[",
"0",
"]",
"text",
"=",
"\" \"",
".",
"join",
"(",
"text",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
")",
"subfields",
".",
"append",
"(",
"(",
"'s'",
",",
"text",
")",
")",
"record_add_field",
"(",
"rec",
",",
"'999'",
",",
"ind1",
"=",
"'C'",
",",
"ind2",
"=",
"'5'",
",",
"subfields",
"=",
"subfields",
")",
"except",
"IndexError",
":",
"#references without 'mixed-citation' tag",
"try",
":",
"r",
"=",
"ref",
".",
"getElementsByTagName",
"(",
"'note'",
")",
"[",
"0",
"]",
"subfields",
".",
"append",
"(",
"(",
"'s'",
",",
"xml_to_text",
"(",
"r",
")",
")",
")",
"record_add_field",
"(",
"rec",
",",
"'999'",
",",
"ind1",
"=",
"'C'",
",",
"ind2",
"=",
"'5'",
",",
"subfields",
"=",
"subfields",
")",
"except",
"IndexError",
":",
"#references without 'note' tag",
"subfields",
".",
"append",
"(",
"(",
"'s'",
",",
"xml_to_text",
"(",
"ref",
")",
")",
")",
"record_add_field",
"(",
"rec",
",",
"'999'",
",",
"ind1",
"=",
"'C'",
",",
"ind2",
"=",
"'5'",
",",
"subfields",
"=",
"subfields",
")",
"else",
":",
"record_add_field",
"(",
"rec",
",",
"'999'",
",",
"ind1",
"=",
"'C'",
",",
"ind2",
"=",
"'5'",
",",
"subfields",
"=",
"subfields",
")"
] | Adds the reference to the record | [
"Adds",
"the",
"reference",
"to",
"the",
"record"
] | python | valid |
pdkit/pdkit | pdkit/utils.py | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/utils.py#L514-L544 | def crossings_nonzero_pos2neg(data):
"""
Find `indices of zero crossings from positive to negative values <http://stackoverflow.com/questions/3843017/efficiently-detect-sign-changes-in-python>`_.
:param data: numpy array of floats
:type data: numpy array of floats
:return crossings: crossing indices to data
:rtype crossings: numpy array of integers
:Examples:
>>> import numpy as np
>>> from mhealthx.signals import crossings_nonzero_pos2neg
>>> data = np.random.random(100)
>>> crossings = crossings_nonzero_pos2neg(data)
"""
import numpy as np
if isinstance(data, np.ndarray):
pass
elif isinstance(data, list):
data = np.asarray(data)
else:
raise IOError('data should be a numpy array')
pos = data > 0
crossings = (pos[:-1] & ~pos[1:]).nonzero()[0]
return crossings | [
"def",
"crossings_nonzero_pos2neg",
"(",
"data",
")",
":",
"import",
"numpy",
"as",
"np",
"if",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"else",
":",
"raise",
"IOError",
"(",
"'data should be a numpy array'",
")",
"pos",
"=",
"data",
">",
"0",
"crossings",
"=",
"(",
"pos",
"[",
":",
"-",
"1",
"]",
"&",
"~",
"pos",
"[",
"1",
":",
"]",
")",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"return",
"crossings"
] | Find `indices of zero crossings from positive to negative values <http://stackoverflow.com/questions/3843017/efficiently-detect-sign-changes-in-python>`_.
:param data: numpy array of floats
:type data: numpy array of floats
:return crossings: crossing indices to data
:rtype crossings: numpy array of integers
:Examples:
>>> import numpy as np
>>> from mhealthx.signals import crossings_nonzero_pos2neg
>>> data = np.random.random(100)
>>> crossings = crossings_nonzero_pos2neg(data) | [
"Find",
"indices",
"of",
"zero",
"crossings",
"from",
"positive",
"to",
"negative",
"values",
"<http",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"questions",
"/",
"3843017",
"/",
"efficiently",
"-",
"detect",
"-",
"sign",
"-",
"changes",
"-",
"in",
"-",
"python",
">",
"_",
"."
] | python | train |
google/importlab | importlab/output.py | https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/output.py#L20-L33 | def format_file_node(import_graph, node, indent):
"""Prettyprint nodes based on their provenance."""
f = import_graph.provenance[node]
if isinstance(f, resolve.Direct):
out = '+ ' + f.short_path
elif isinstance(f, resolve.Local):
out = ' ' + f.short_path
elif isinstance(f, resolve.System):
out = ':: ' + f.short_path
elif isinstance(f, resolve.Builtin):
out = '(%s)' % f.module_name
else:
out = '%r' % node
return ' '*indent + out | [
"def",
"format_file_node",
"(",
"import_graph",
",",
"node",
",",
"indent",
")",
":",
"f",
"=",
"import_graph",
".",
"provenance",
"[",
"node",
"]",
"if",
"isinstance",
"(",
"f",
",",
"resolve",
".",
"Direct",
")",
":",
"out",
"=",
"'+ '",
"+",
"f",
".",
"short_path",
"elif",
"isinstance",
"(",
"f",
",",
"resolve",
".",
"Local",
")",
":",
"out",
"=",
"' '",
"+",
"f",
".",
"short_path",
"elif",
"isinstance",
"(",
"f",
",",
"resolve",
".",
"System",
")",
":",
"out",
"=",
"':: '",
"+",
"f",
".",
"short_path",
"elif",
"isinstance",
"(",
"f",
",",
"resolve",
".",
"Builtin",
")",
":",
"out",
"=",
"'(%s)'",
"%",
"f",
".",
"module_name",
"else",
":",
"out",
"=",
"'%r'",
"%",
"node",
"return",
"' '",
"*",
"indent",
"+",
"out"
] | Prettyprint nodes based on their provenance. | [
"Prettyprint",
"nodes",
"based",
"on",
"their",
"provenance",
"."
] | python | train |
markomanninen/abnum | abnum/search.py | https://github.com/markomanninen/abnum/blob/9bfc8f06f34d9a51aab038638f87e2bb5f9f4c99/abnum/search.py#L5-L22 | def find_cumulative_indices(list_of_numbers, search_sum):
"""
find_cumulative_indices([70, 58, 81, 909, 70, 215, 70, 1022, 580, 930, 898], 285) ->
[[4, 5],[5, 6]]
"""
u = 0
y = 0
result = []
for idx, val in enumerate(list_of_numbers):
y += list_of_numbers[idx]
while y >= search_sum:
if y == search_sum:
result.append(range(u, idx+1))
y -= list_of_numbers[u]
u += 1
# if matches are not found, empty string is returned
# for easier cell data handling on pandas dataframe
return result or '' | [
"def",
"find_cumulative_indices",
"(",
"list_of_numbers",
",",
"search_sum",
")",
":",
"u",
"=",
"0",
"y",
"=",
"0",
"result",
"=",
"[",
"]",
"for",
"idx",
",",
"val",
"in",
"enumerate",
"(",
"list_of_numbers",
")",
":",
"y",
"+=",
"list_of_numbers",
"[",
"idx",
"]",
"while",
"y",
">=",
"search_sum",
":",
"if",
"y",
"==",
"search_sum",
":",
"result",
".",
"append",
"(",
"range",
"(",
"u",
",",
"idx",
"+",
"1",
")",
")",
"y",
"-=",
"list_of_numbers",
"[",
"u",
"]",
"u",
"+=",
"1",
"# if matches are not found, empty string is returned",
"# for easier cell data handling on pandas dataframe",
"return",
"result",
"or",
"''"
] | find_cumulative_indices([70, 58, 81, 909, 70, 215, 70, 1022, 580, 930, 898], 285) ->
[[4, 5],[5, 6]] | [
"find_cumulative_indices",
"(",
"[",
"70",
"58",
"81",
"909",
"70",
"215",
"70",
"1022",
"580",
"930",
"898",
"]",
"285",
")",
"-",
">",
"[[",
"4",
"5",
"]",
"[",
"5",
"6",
"]]"
] | python | train |
cuihantao/andes | andes/models/line.py | https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/models/line.py#L131-L152 | def build_y(self):
"""Build transmission line admittance matrix into self.Y"""
if not self.n:
return
self.y1 = mul(self.u, self.g1 + self.b1 * 1j)
self.y2 = mul(self.u, self.g2 + self.b2 * 1j)
self.y12 = div(self.u, self.r + self.x * 1j)
self.m = polar(self.tap, self.phi * deg2rad)
self.m2 = abs(self.m)**2
self.mconj = conj(self.m)
# build self and mutual admittances into Y
self.Y = spmatrix(
div(self.y12 + self.y1, self.m2), self.a1, self.a1,
(self.nb, self.nb), 'z')
self.Y -= spmatrix(
div(self.y12, self.mconj), self.a1, self.a2, (self.nb, self.nb),
'z')
self.Y -= spmatrix(
div(self.y12, self.m), self.a2, self.a1, (self.nb, self.nb), 'z')
self.Y += spmatrix(self.y12 + self.y2, self.a2, self.a2,
(self.nb, self.nb), 'z') | [
"def",
"build_y",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"n",
":",
"return",
"self",
".",
"y1",
"=",
"mul",
"(",
"self",
".",
"u",
",",
"self",
".",
"g1",
"+",
"self",
".",
"b1",
"*",
"1j",
")",
"self",
".",
"y2",
"=",
"mul",
"(",
"self",
".",
"u",
",",
"self",
".",
"g2",
"+",
"self",
".",
"b2",
"*",
"1j",
")",
"self",
".",
"y12",
"=",
"div",
"(",
"self",
".",
"u",
",",
"self",
".",
"r",
"+",
"self",
".",
"x",
"*",
"1j",
")",
"self",
".",
"m",
"=",
"polar",
"(",
"self",
".",
"tap",
",",
"self",
".",
"phi",
"*",
"deg2rad",
")",
"self",
".",
"m2",
"=",
"abs",
"(",
"self",
".",
"m",
")",
"**",
"2",
"self",
".",
"mconj",
"=",
"conj",
"(",
"self",
".",
"m",
")",
"# build self and mutual admittances into Y",
"self",
".",
"Y",
"=",
"spmatrix",
"(",
"div",
"(",
"self",
".",
"y12",
"+",
"self",
".",
"y1",
",",
"self",
".",
"m2",
")",
",",
"self",
".",
"a1",
",",
"self",
".",
"a1",
",",
"(",
"self",
".",
"nb",
",",
"self",
".",
"nb",
")",
",",
"'z'",
")",
"self",
".",
"Y",
"-=",
"spmatrix",
"(",
"div",
"(",
"self",
".",
"y12",
",",
"self",
".",
"mconj",
")",
",",
"self",
".",
"a1",
",",
"self",
".",
"a2",
",",
"(",
"self",
".",
"nb",
",",
"self",
".",
"nb",
")",
",",
"'z'",
")",
"self",
".",
"Y",
"-=",
"spmatrix",
"(",
"div",
"(",
"self",
".",
"y12",
",",
"self",
".",
"m",
")",
",",
"self",
".",
"a2",
",",
"self",
".",
"a1",
",",
"(",
"self",
".",
"nb",
",",
"self",
".",
"nb",
")",
",",
"'z'",
")",
"self",
".",
"Y",
"+=",
"spmatrix",
"(",
"self",
".",
"y12",
"+",
"self",
".",
"y2",
",",
"self",
".",
"a2",
",",
"self",
".",
"a2",
",",
"(",
"self",
".",
"nb",
",",
"self",
".",
"nb",
")",
",",
"'z'",
")"
] | Build transmission line admittance matrix into self.Y | [
"Build",
"transmission",
"line",
"admittance",
"matrix",
"into",
"self",
".",
"Y"
] | python | train |
Nixiware/viper | nx/viper/application.py | https://github.com/Nixiware/viper/blob/fbe6057facd8d46103e9955880dfd99e63b7acb3/nx/viper/application.py#L62-L98 | def _getInterfaces(self):
"""
Load application communication interfaces.
:return: <dict>
"""
interfaces = {}
interfacesPath = os.path.join("application", "interface")
interfaceList = os.listdir(interfacesPath)
for file in interfaceList:
interfaceDirectoryPath = os.path.join(interfacesPath, file)
if not os.path.isdir(interfaceDirectoryPath) or file.startswith("__") or file.startswith("."):
continue
interfaceName = ntpath.basename(interfaceDirectoryPath)
interfacePath = os.path.join(interfaceDirectoryPath, interfaceName) + ".py"
if not os.path.isfile(interfacePath):
continue
# importing interface
interfaceSpec = importlib.util.spec_from_file_location(
interfaceName,
interfacePath
)
interface = importlib.util.module_from_spec(interfaceSpec)
interfaceSpec.loader.exec_module(interface)
# checking if there is an interface in the file
if hasattr(interface, "Service"):
# initializing interface
interfaceInstance = interface.Service(self)
interfaces[interfaceName] = interfaceInstance
return interfaces | [
"def",
"_getInterfaces",
"(",
"self",
")",
":",
"interfaces",
"=",
"{",
"}",
"interfacesPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"application\"",
",",
"\"interface\"",
")",
"interfaceList",
"=",
"os",
".",
"listdir",
"(",
"interfacesPath",
")",
"for",
"file",
"in",
"interfaceList",
":",
"interfaceDirectoryPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"interfacesPath",
",",
"file",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"interfaceDirectoryPath",
")",
"or",
"file",
".",
"startswith",
"(",
"\"__\"",
")",
"or",
"file",
".",
"startswith",
"(",
"\".\"",
")",
":",
"continue",
"interfaceName",
"=",
"ntpath",
".",
"basename",
"(",
"interfaceDirectoryPath",
")",
"interfacePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"interfaceDirectoryPath",
",",
"interfaceName",
")",
"+",
"\".py\"",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"interfacePath",
")",
":",
"continue",
"# importing interface",
"interfaceSpec",
"=",
"importlib",
".",
"util",
".",
"spec_from_file_location",
"(",
"interfaceName",
",",
"interfacePath",
")",
"interface",
"=",
"importlib",
".",
"util",
".",
"module_from_spec",
"(",
"interfaceSpec",
")",
"interfaceSpec",
".",
"loader",
".",
"exec_module",
"(",
"interface",
")",
"# checking if there is an interface in the file",
"if",
"hasattr",
"(",
"interface",
",",
"\"Service\"",
")",
":",
"# initializing interface",
"interfaceInstance",
"=",
"interface",
".",
"Service",
"(",
"self",
")",
"interfaces",
"[",
"interfaceName",
"]",
"=",
"interfaceInstance",
"return",
"interfaces"
] | Load application communication interfaces.
:return: <dict> | [
"Load",
"application",
"communication",
"interfaces",
"."
] | python | train |
enkore/i3pystatus | i3pystatus/sabnzbd.py | https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/sabnzbd.py#L52-L91 | def run(self):
"""Connect to SABnzbd and get the data."""
try:
answer = urlopen(self.url + "&mode=queue").read().decode()
except (HTTPError, URLError) as error:
self.output = {
"full_text": str(error.reason),
"color": "#FF0000"
}
return
answer = json.loads(answer)
# if answer["status"] exists and is False, an error occured
if not answer.get("status", True):
self.output = {
"full_text": answer["error"],
"color": "#FF0000"
}
return
queue = answer["queue"]
self.status = queue["status"]
if self.is_paused():
color = self.color_paused
elif self.is_downloading():
color = self.color_downloading
else:
color = self.color
if self.is_downloading():
full_text = self.format.format(**queue)
else:
full_text = self.format_paused.format(**queue)
self.output = {
"full_text": full_text,
"color": color
} | [
"def",
"run",
"(",
"self",
")",
":",
"try",
":",
"answer",
"=",
"urlopen",
"(",
"self",
".",
"url",
"+",
"\"&mode=queue\"",
")",
".",
"read",
"(",
")",
".",
"decode",
"(",
")",
"except",
"(",
"HTTPError",
",",
"URLError",
")",
"as",
"error",
":",
"self",
".",
"output",
"=",
"{",
"\"full_text\"",
":",
"str",
"(",
"error",
".",
"reason",
")",
",",
"\"color\"",
":",
"\"#FF0000\"",
"}",
"return",
"answer",
"=",
"json",
".",
"loads",
"(",
"answer",
")",
"# if answer[\"status\"] exists and is False, an error occured",
"if",
"not",
"answer",
".",
"get",
"(",
"\"status\"",
",",
"True",
")",
":",
"self",
".",
"output",
"=",
"{",
"\"full_text\"",
":",
"answer",
"[",
"\"error\"",
"]",
",",
"\"color\"",
":",
"\"#FF0000\"",
"}",
"return",
"queue",
"=",
"answer",
"[",
"\"queue\"",
"]",
"self",
".",
"status",
"=",
"queue",
"[",
"\"status\"",
"]",
"if",
"self",
".",
"is_paused",
"(",
")",
":",
"color",
"=",
"self",
".",
"color_paused",
"elif",
"self",
".",
"is_downloading",
"(",
")",
":",
"color",
"=",
"self",
".",
"color_downloading",
"else",
":",
"color",
"=",
"self",
".",
"color",
"if",
"self",
".",
"is_downloading",
"(",
")",
":",
"full_text",
"=",
"self",
".",
"format",
".",
"format",
"(",
"*",
"*",
"queue",
")",
"else",
":",
"full_text",
"=",
"self",
".",
"format_paused",
".",
"format",
"(",
"*",
"*",
"queue",
")",
"self",
".",
"output",
"=",
"{",
"\"full_text\"",
":",
"full_text",
",",
"\"color\"",
":",
"color",
"}"
] | Connect to SABnzbd and get the data. | [
"Connect",
"to",
"SABnzbd",
"and",
"get",
"the",
"data",
"."
] | python | train |
ask/carrot | carrot/messaging.py | https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/messaging.py#L821-L823 | def send(self, message_data, delivery_mode=None):
"""See :meth:`Publisher.send`"""
self.publisher.send(message_data, delivery_mode=delivery_mode) | [
"def",
"send",
"(",
"self",
",",
"message_data",
",",
"delivery_mode",
"=",
"None",
")",
":",
"self",
".",
"publisher",
".",
"send",
"(",
"message_data",
",",
"delivery_mode",
"=",
"delivery_mode",
")"
] | See :meth:`Publisher.send` | [
"See",
":",
"meth",
":",
"Publisher",
".",
"send"
] | python | train |
dj-stripe/dj-stripe | djstripe/models/base.py | https://github.com/dj-stripe/dj-stripe/blob/a5308a3808cd6e2baba49482f7a699f3a8992518/djstripe/models/base.py#L539-L560 | def _stripe_object_to_refunds(cls, target_cls, data, charge):
"""
Retrieves Refunds for a charge
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: ``Refund``
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param charge: The charge object that refunds are for.
:type invoice: ``djstripe.models.Refund``
:return:
"""
refunds = data.get("refunds")
if not refunds:
return []
refund_objs = []
for refund_data in refunds.get("data", []):
item, _ = target_cls._get_or_create_from_stripe_object(refund_data, refetch=False)
refund_objs.append(item)
return refund_objs | [
"def",
"_stripe_object_to_refunds",
"(",
"cls",
",",
"target_cls",
",",
"data",
",",
"charge",
")",
":",
"refunds",
"=",
"data",
".",
"get",
"(",
"\"refunds\"",
")",
"if",
"not",
"refunds",
":",
"return",
"[",
"]",
"refund_objs",
"=",
"[",
"]",
"for",
"refund_data",
"in",
"refunds",
".",
"get",
"(",
"\"data\"",
",",
"[",
"]",
")",
":",
"item",
",",
"_",
"=",
"target_cls",
".",
"_get_or_create_from_stripe_object",
"(",
"refund_data",
",",
"refetch",
"=",
"False",
")",
"refund_objs",
".",
"append",
"(",
"item",
")",
"return",
"refund_objs"
] | Retrieves Refunds for a charge
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: ``Refund``
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param charge: The charge object that refunds are for.
:type invoice: ``djstripe.models.Refund``
:return: | [
"Retrieves",
"Refunds",
"for",
"a",
"charge",
":",
"param",
"target_cls",
":",
"The",
"target",
"class",
"to",
"instantiate",
"per",
"invoice",
"item",
".",
":",
"type",
"target_cls",
":",
"Refund",
":",
"param",
"data",
":",
"The",
"data",
"dictionary",
"received",
"from",
"the",
"Stripe",
"API",
".",
":",
"type",
"data",
":",
"dict",
":",
"param",
"charge",
":",
"The",
"charge",
"object",
"that",
"refunds",
"are",
"for",
".",
":",
"type",
"invoice",
":",
"djstripe",
".",
"models",
".",
"Refund",
":",
"return",
":"
] | python | train |
rockyzhengwu/FoolNLTK | train/data_utils.py | https://github.com/rockyzhengwu/FoolNLTK/blob/f9929edc7b2f1b154b5f9bbd2c0c95203a5bddb3/train/data_utils.py#L80-L94 | def get_char_type(ch):
"""
0, 汉字
1, 英文字母
2. 数字
3. 其他
"""
if re.match(en_p, ch):
return 1
elif re.match("\d+", ch):
return 2
elif re.match(re_han, ch):
return 3
else:
return 4 | [
"def",
"get_char_type",
"(",
"ch",
")",
":",
"if",
"re",
".",
"match",
"(",
"en_p",
",",
"ch",
")",
":",
"return",
"1",
"elif",
"re",
".",
"match",
"(",
"\"\\d+\"",
",",
"ch",
")",
":",
"return",
"2",
"elif",
"re",
".",
"match",
"(",
"re_han",
",",
"ch",
")",
":",
"return",
"3",
"else",
":",
"return",
"4"
] | 0, 汉字
1, 英文字母
2. 数字
3. 其他 | [
"0",
"汉字",
"1",
"英文字母",
"2",
".",
"数字",
"3",
".",
"其他"
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.