repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L249-L257 | def plot_model(self, proj, ax):
"""
Plots the model proposed for the projection proj on ax.
"""
x, y = proj
y_vals = getattr(self.ig._eval_model(), y.name)
x_vals = self.ig._x_points[x]
plot, = ax.plot(x_vals, y_vals, c='red')
return plot | [
"def",
"plot_model",
"(",
"self",
",",
"proj",
",",
"ax",
")",
":",
"x",
",",
"y",
"=",
"proj",
"y_vals",
"=",
"getattr",
"(",
"self",
".",
"ig",
".",
"_eval_model",
"(",
")",
",",
"y",
".",
"name",
")",
"x_vals",
"=",
"self",
".",
"ig",
".",
"_x_points",
"[",
"x",
"]",
"plot",
",",
"=",
"ax",
".",
"plot",
"(",
"x_vals",
",",
"y_vals",
",",
"c",
"=",
"'red'",
")",
"return",
"plot"
]
| Plots the model proposed for the projection proj on ax. | [
"Plots",
"the",
"model",
"proposed",
"for",
"the",
"projection",
"proj",
"on",
"ax",
"."
]
| python | train |
miguelgrinberg/python-socketio | socketio/client.py | https://github.com/miguelgrinberg/python-socketio/blob/c0c1bf8d21e3597389b18938550a0724dd9676b7/socketio/client.py#L288-L317 | def call(self, event, data=None, namespace=None, timeout=60):
"""Emit a custom event to a client and wait for the response.
:param event: The event name. It can be any string. The event names
``'connect'``, ``'message'`` and ``'disconnect'`` are
reserved and should not be used.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param timeout: The waiting timeout. If the timeout is reached before
the client acknowledges the event, then a
``TimeoutError`` exception is raised.
"""
callback_event = self.eio.create_event()
callback_args = []
def event_callback(*args):
callback_args.append(args)
callback_event.set()
self.emit(event, data=data, namespace=namespace,
callback=event_callback)
if not callback_event.wait(timeout=timeout):
raise exceptions.TimeoutError()
return callback_args[0] if len(callback_args[0]) > 1 \
else callback_args[0][0] if len(callback_args[0]) == 1 \
else None | [
"def",
"call",
"(",
"self",
",",
"event",
",",
"data",
"=",
"None",
",",
"namespace",
"=",
"None",
",",
"timeout",
"=",
"60",
")",
":",
"callback_event",
"=",
"self",
".",
"eio",
".",
"create_event",
"(",
")",
"callback_args",
"=",
"[",
"]",
"def",
"event_callback",
"(",
"*",
"args",
")",
":",
"callback_args",
".",
"append",
"(",
"args",
")",
"callback_event",
".",
"set",
"(",
")",
"self",
".",
"emit",
"(",
"event",
",",
"data",
"=",
"data",
",",
"namespace",
"=",
"namespace",
",",
"callback",
"=",
"event_callback",
")",
"if",
"not",
"callback_event",
".",
"wait",
"(",
"timeout",
"=",
"timeout",
")",
":",
"raise",
"exceptions",
".",
"TimeoutError",
"(",
")",
"return",
"callback_args",
"[",
"0",
"]",
"if",
"len",
"(",
"callback_args",
"[",
"0",
"]",
")",
">",
"1",
"else",
"callback_args",
"[",
"0",
"]",
"[",
"0",
"]",
"if",
"len",
"(",
"callback_args",
"[",
"0",
"]",
")",
"==",
"1",
"else",
"None"
]
| Emit a custom event to a client and wait for the response.
:param event: The event name. It can be any string. The event names
``'connect'``, ``'message'`` and ``'disconnect'`` are
reserved and should not be used.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param timeout: The waiting timeout. If the timeout is reached before
the client acknowledges the event, then a
``TimeoutError`` exception is raised. | [
"Emit",
"a",
"custom",
"event",
"to",
"a",
"client",
"and",
"wait",
"for",
"the",
"response",
"."
]
| python | train |
echinopsii/net.echinopsii.ariane.community.cli.python3 | ariane_clip3/zeromq/driver.py | https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/zeromq/driver.py#L49-L62 | def call(self, my_args=None):
"""
publish the message in the topic
:param my_args: dict like {msg: 'msg'}
:return: nothing
"""
LOGGER.debug("zeromq.Publisher.call")
if my_args is None:
raise exceptions.ArianeConfError("publisher call arguments")
if 'topic' not in my_args or my_args['topic'] is None or not my_args['topic']:
raise exceptions.ArianeConfError("publisher topic")
if 'msg' not in my_args or my_args['msg'] is None or not my_args['msg']:
raise exceptions.ArianeConfError("publisher call msg")
self.zmqsocket.send_string("%s %s" % (my_args['topic'], my_args['msg'])) | [
"def",
"call",
"(",
"self",
",",
"my_args",
"=",
"None",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"zeromq.Publisher.call\"",
")",
"if",
"my_args",
"is",
"None",
":",
"raise",
"exceptions",
".",
"ArianeConfError",
"(",
"\"publisher call arguments\"",
")",
"if",
"'topic'",
"not",
"in",
"my_args",
"or",
"my_args",
"[",
"'topic'",
"]",
"is",
"None",
"or",
"not",
"my_args",
"[",
"'topic'",
"]",
":",
"raise",
"exceptions",
".",
"ArianeConfError",
"(",
"\"publisher topic\"",
")",
"if",
"'msg'",
"not",
"in",
"my_args",
"or",
"my_args",
"[",
"'msg'",
"]",
"is",
"None",
"or",
"not",
"my_args",
"[",
"'msg'",
"]",
":",
"raise",
"exceptions",
".",
"ArianeConfError",
"(",
"\"publisher call msg\"",
")",
"self",
".",
"zmqsocket",
".",
"send_string",
"(",
"\"%s %s\"",
"%",
"(",
"my_args",
"[",
"'topic'",
"]",
",",
"my_args",
"[",
"'msg'",
"]",
")",
")"
]
| publish the message in the topic
:param my_args: dict like {msg: 'msg'}
:return: nothing | [
"publish",
"the",
"message",
"in",
"the",
"topic",
":",
"param",
"my_args",
":",
"dict",
"like",
"{",
"msg",
":",
"msg",
"}",
":",
"return",
":",
"nothing"
]
| python | train |
google/grr | grr/client/grr_response_client/client_actions/standard.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/standard.py#L203-L229 | def ExecuteCommandFromClient(command):
"""Executes one of the predefined commands.
Args:
command: An `ExecuteRequest` object.
Yields:
`rdf_client_action.ExecuteResponse` objects.
"""
cmd = command.cmd
args = command.args
time_limit = command.time_limit
res = client_utils_common.Execute(cmd, args, time_limit)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
yield rdf_client_action.ExecuteResponse(
request=command,
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used)) | [
"def",
"ExecuteCommandFromClient",
"(",
"command",
")",
":",
"cmd",
"=",
"command",
".",
"cmd",
"args",
"=",
"command",
".",
"args",
"time_limit",
"=",
"command",
".",
"time_limit",
"res",
"=",
"client_utils_common",
".",
"Execute",
"(",
"cmd",
",",
"args",
",",
"time_limit",
")",
"(",
"stdout",
",",
"stderr",
",",
"status",
",",
"time_used",
")",
"=",
"res",
"# Limit output to 10MB so our response doesn't get too big.",
"stdout",
"=",
"stdout",
"[",
":",
"10",
"*",
"1024",
"*",
"1024",
"]",
"stderr",
"=",
"stderr",
"[",
":",
"10",
"*",
"1024",
"*",
"1024",
"]",
"yield",
"rdf_client_action",
".",
"ExecuteResponse",
"(",
"request",
"=",
"command",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"exit_status",
"=",
"status",
",",
"# We have to return microseconds.",
"time_used",
"=",
"int",
"(",
"1e6",
"*",
"time_used",
")",
")"
]
| Executes one of the predefined commands.
Args:
command: An `ExecuteRequest` object.
Yields:
`rdf_client_action.ExecuteResponse` objects. | [
"Executes",
"one",
"of",
"the",
"predefined",
"commands",
"."
]
| python | train |
nvbn/thefuck | thefuck/types.py | https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/types.py#L58-L66 | def update(self, **kwargs):
"""Returns new command with replaced fields.
:rtype: Command
"""
kwargs.setdefault('script', self.script)
kwargs.setdefault('output', self.output)
return Command(**kwargs) | [
"def",
"update",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'script'",
",",
"self",
".",
"script",
")",
"kwargs",
".",
"setdefault",
"(",
"'output'",
",",
"self",
".",
"output",
")",
"return",
"Command",
"(",
"*",
"*",
"kwargs",
")"
]
| Returns new command with replaced fields.
:rtype: Command | [
"Returns",
"new",
"command",
"with",
"replaced",
"fields",
"."
]
| python | train |
tanghaibao/jcvi | jcvi/projects/str.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/str.py#L1276-L1321 | def allelefreq(args):
"""
%prog allelefreq HD,DM1,SCA1,SCA17,FXTAS,FRAXE
Plot the allele frequencies of some STRs.
"""
p = OptionParser(allelefreq.__doc__)
p.add_option("--nopanels", default=False, action="store_true",
help="No panel labels A, B, ...")
p.add_option("--usereport", help="Use allele frequency in report file")
opts, args, iopts = p.set_image_options(args, figsize="9x13")
if len(args) != 1:
sys.exit(not p.print_help())
loci, = args
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(ncols=2, nrows=3,
figsize=(iopts.w, iopts.h))
plt.tight_layout(pad=4)
if opts.usereport:
treds, df = read_treds(tredsfile=opts.usereport)
else:
treds, df = read_treds()
df = df.set_index(["abbreviation"])
axes = (ax1, ax2, ax3, ax4, ax5, ax6)
loci = loci.split(",")
for ax, locus in zip(axes, loci):
plot_allelefreq(ax, df, locus)
# Delete unused axes
for ax in axes[len(loci):]:
ax.set_axis_off()
root = fig.add_axes([0, 0, 1, 1])
pad = .03
if not opts.nopanels:
panel_labels(root, ((pad / 2, 1 - pad, "A"), (.5 + pad, 1 - pad, "B"),
(pad / 2, 2 / 3. - pad / 2, "C"), (.5 + pad, 2 / 3. - pad / 2, "D"),
(pad / 2, 1 / 3. , "E"), (.5 + pad, 1 / 3. , "F"),
))
normalize_axes(root)
image_name = "allelefreq." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | [
"def",
"allelefreq",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"allelefreq",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--nopanels\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"No panel labels A, B, ...\"",
")",
"p",
".",
"add_option",
"(",
"\"--usereport\"",
",",
"help",
"=",
"\"Use allele frequency in report file\"",
")",
"opts",
",",
"args",
",",
"iopts",
"=",
"p",
".",
"set_image_options",
"(",
"args",
",",
"figsize",
"=",
"\"9x13\"",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"loci",
",",
"=",
"args",
"fig",
",",
"(",
"(",
"ax1",
",",
"ax2",
")",
",",
"(",
"ax3",
",",
"ax4",
")",
",",
"(",
"ax5",
",",
"ax6",
")",
")",
"=",
"plt",
".",
"subplots",
"(",
"ncols",
"=",
"2",
",",
"nrows",
"=",
"3",
",",
"figsize",
"=",
"(",
"iopts",
".",
"w",
",",
"iopts",
".",
"h",
")",
")",
"plt",
".",
"tight_layout",
"(",
"pad",
"=",
"4",
")",
"if",
"opts",
".",
"usereport",
":",
"treds",
",",
"df",
"=",
"read_treds",
"(",
"tredsfile",
"=",
"opts",
".",
"usereport",
")",
"else",
":",
"treds",
",",
"df",
"=",
"read_treds",
"(",
")",
"df",
"=",
"df",
".",
"set_index",
"(",
"[",
"\"abbreviation\"",
"]",
")",
"axes",
"=",
"(",
"ax1",
",",
"ax2",
",",
"ax3",
",",
"ax4",
",",
"ax5",
",",
"ax6",
")",
"loci",
"=",
"loci",
".",
"split",
"(",
"\",\"",
")",
"for",
"ax",
",",
"locus",
"in",
"zip",
"(",
"axes",
",",
"loci",
")",
":",
"plot_allelefreq",
"(",
"ax",
",",
"df",
",",
"locus",
")",
"# Delete unused axes",
"for",
"ax",
"in",
"axes",
"[",
"len",
"(",
"loci",
")",
":",
"]",
":",
"ax",
".",
"set_axis_off",
"(",
")",
"root",
"=",
"fig",
".",
"add_axes",
"(",
"[",
"0",
",",
"0",
",",
"1",
",",
"1",
"]",
")",
"pad",
"=",
".03",
"if",
"not",
"opts",
".",
"nopanels",
":",
"panel_labels",
"(",
"root",
",",
"(",
"(",
"pad",
"/",
"2",
",",
"1",
"-",
"pad",
",",
"\"A\"",
")",
",",
"(",
".5",
"+",
"pad",
",",
"1",
"-",
"pad",
",",
"\"B\"",
")",
",",
"(",
"pad",
"/",
"2",
",",
"2",
"/",
"3.",
"-",
"pad",
"/",
"2",
",",
"\"C\"",
")",
",",
"(",
".5",
"+",
"pad",
",",
"2",
"/",
"3.",
"-",
"pad",
"/",
"2",
",",
"\"D\"",
")",
",",
"(",
"pad",
"/",
"2",
",",
"1",
"/",
"3.",
",",
"\"E\"",
")",
",",
"(",
".5",
"+",
"pad",
",",
"1",
"/",
"3.",
",",
"\"F\"",
")",
",",
")",
")",
"normalize_axes",
"(",
"root",
")",
"image_name",
"=",
"\"allelefreq.\"",
"+",
"iopts",
".",
"format",
"savefig",
"(",
"image_name",
",",
"dpi",
"=",
"iopts",
".",
"dpi",
",",
"iopts",
"=",
"iopts",
")"
]
| %prog allelefreq HD,DM1,SCA1,SCA17,FXTAS,FRAXE
Plot the allele frequencies of some STRs. | [
"%prog",
"allelefreq",
"HD",
"DM1",
"SCA1",
"SCA17",
"FXTAS",
"FRAXE"
]
| python | train |
jsvine/spectra | spectra/grapefruit.py | https://github.com/jsvine/spectra/blob/2269a0ae9b5923154b15bd661fb81179608f7ec2/spectra/grapefruit.py#L1204-L1228 | def NewFromHsl(h, s, l, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed HSL values.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5)
(1.0, 0.5, 0.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5, 0.5)
(1.0, 0.5, 0.0, 0.5)
'''
return Color((h, s, l), 'hsl', alpha, wref) | [
"def",
"NewFromHsl",
"(",
"h",
",",
"s",
",",
"l",
",",
"alpha",
"=",
"1.0",
",",
"wref",
"=",
"_DEFAULT_WREF",
")",
":",
"return",
"Color",
"(",
"(",
"h",
",",
"s",
",",
"l",
")",
",",
"'hsl'",
",",
"alpha",
",",
"wref",
")"
]
| Create a new instance based on the specifed HSL values.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5)
(1.0, 0.5, 0.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5, 0.5)
(1.0, 0.5, 0.0, 0.5) | [
"Create",
"a",
"new",
"instance",
"based",
"on",
"the",
"specifed",
"HSL",
"values",
"."
]
| python | train |
samirelanduk/quickplots | quickplots/charts.py | https://github.com/samirelanduk/quickplots/blob/59f5e6ff367b2c1c24ba7cf1805d03552034c6d8/quickplots/charts.py#L196-L214 | def line(self, *args, **kwargs):
"""Adds a :py:class:`.LineSeries` to the chart.
:param \*data: The data for the series as either (x,y) values or two big\
tuples/lists of x and y values respectively.
:param str name: The name to be associated with the series.
:param str color: The hex colour of the line.
:param str linestyle: The line pattern. See\
`OmniCanvas docs <https://omnicanvas.readthedocs.io/en/latest/api/graph\
ics.html#omnicanvas.graphics.ShapeGraphic.line_style>`_ for acceptable \
values.
:param Number linewidth: The width in pixels of the line.
:raises ValueError: if the size and length of the data doesn't match\
either format."""
if "color" not in kwargs:
kwargs["color"] = self.next_color()
series = LineSeries(*args, **kwargs)
self.add_series(series) | [
"def",
"line",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"\"color\"",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"\"color\"",
"]",
"=",
"self",
".",
"next_color",
"(",
")",
"series",
"=",
"LineSeries",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"add_series",
"(",
"series",
")"
]
| Adds a :py:class:`.LineSeries` to the chart.
:param \*data: The data for the series as either (x,y) values or two big\
tuples/lists of x and y values respectively.
:param str name: The name to be associated with the series.
:param str color: The hex colour of the line.
:param str linestyle: The line pattern. See\
`OmniCanvas docs <https://omnicanvas.readthedocs.io/en/latest/api/graph\
ics.html#omnicanvas.graphics.ShapeGraphic.line_style>`_ for acceptable \
values.
:param Number linewidth: The width in pixels of the line.
:raises ValueError: if the size and length of the data doesn't match\
either format. | [
"Adds",
"a",
":",
"py",
":",
"class",
":",
".",
"LineSeries",
"to",
"the",
"chart",
"."
]
| python | train |
pycontribs/pyrax | pyrax/autoscale.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/autoscale.py#L1182-L1187 | def delete_policy(self, scaling_group, policy):
"""
Deletes the specified policy from the scaling group.
"""
return self._manager.delete_policy(scaling_group=scaling_group,
policy=policy) | [
"def",
"delete_policy",
"(",
"self",
",",
"scaling_group",
",",
"policy",
")",
":",
"return",
"self",
".",
"_manager",
".",
"delete_policy",
"(",
"scaling_group",
"=",
"scaling_group",
",",
"policy",
"=",
"policy",
")"
]
| Deletes the specified policy from the scaling group. | [
"Deletes",
"the",
"specified",
"policy",
"from",
"the",
"scaling",
"group",
"."
]
| python | train |
canonical-ols/acceptable | acceptable/_build_doubles.py | https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_build_doubles.py#L154-L162 | def _get_simple_assignments(tree):
"""Get simple assignments from node tree."""
result = {}
for node in ast.walk(tree):
if isinstance(node, ast.Assign):
for target in node.targets:
if isinstance(target, ast.Name):
result[target.id] = node.value
return result | [
"def",
"_get_simple_assignments",
"(",
"tree",
")",
":",
"result",
"=",
"{",
"}",
"for",
"node",
"in",
"ast",
".",
"walk",
"(",
"tree",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Assign",
")",
":",
"for",
"target",
"in",
"node",
".",
"targets",
":",
"if",
"isinstance",
"(",
"target",
",",
"ast",
".",
"Name",
")",
":",
"result",
"[",
"target",
".",
"id",
"]",
"=",
"node",
".",
"value",
"return",
"result"
]
| Get simple assignments from node tree. | [
"Get",
"simple",
"assignments",
"from",
"node",
"tree",
"."
]
| python | train |
HewlettPackard/python-hpOneView | hpOneView/resources/fc_sans/managed_sans.py | https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/fc_sans/managed_sans.py#L141-L154 | def create_issues_report(self, timeout=-1):
"""
Creates an unexpected zoning report for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
list: A list of FCIssueResponse dict.
"""
uri = "{}/issues/".format(self.data["uri"])
return self._helper.create_report(uri, timeout) | [
"def",
"create_issues_report",
"(",
"self",
",",
"timeout",
"=",
"-",
"1",
")",
":",
"uri",
"=",
"\"{}/issues/\"",
".",
"format",
"(",
"self",
".",
"data",
"[",
"\"uri\"",
"]",
")",
"return",
"self",
".",
"_helper",
".",
"create_report",
"(",
"uri",
",",
"timeout",
")"
]
| Creates an unexpected zoning report for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
list: A list of FCIssueResponse dict. | [
"Creates",
"an",
"unexpected",
"zoning",
"report",
"for",
"a",
"SAN",
"."
]
| python | train |
jilljenn/tryalgo | tryalgo/polygon.py | https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/polygon.py#L12-L23 | def area(p):
"""Area of a polygone
:param p: list of the points taken in any orientation,
p[0] can differ from p[-1]
:returns: area
:complexity: linear
"""
A = 0
for i in range(len(p)):
A += p[i - 1][0] * p[i][1] - p[i][0] * p[i - 1][1]
return A / 2. | [
"def",
"area",
"(",
"p",
")",
":",
"A",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"p",
")",
")",
":",
"A",
"+=",
"p",
"[",
"i",
"-",
"1",
"]",
"[",
"0",
"]",
"*",
"p",
"[",
"i",
"]",
"[",
"1",
"]",
"-",
"p",
"[",
"i",
"]",
"[",
"0",
"]",
"*",
"p",
"[",
"i",
"-",
"1",
"]",
"[",
"1",
"]",
"return",
"A",
"/",
"2."
]
| Area of a polygone
:param p: list of the points taken in any orientation,
p[0] can differ from p[-1]
:returns: area
:complexity: linear | [
"Area",
"of",
"a",
"polygone"
]
| python | train |
CalebBell/fluids | fluids/nrlmsise00/nrlmsise_00.py | https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/nrlmsise00/nrlmsise_00.py#L932-L1048 | def gtd7(Input, flags, output):
'''The standard model subroutine (GTD7) always computes the
‘‘thermospheric’’ mass density by explicitly summing the masses of
the species in equilibrium at the thermospheric temperature T(z).
'''
mn3 = 5
zn3 = [32.5,20.0,15.0,10.0,0.0]
mn2 = 4
zn2 = [72.5,55.0,45.0,32.5]
zmix = 62.5
soutput = nrlmsise_output()
tselec(flags);
#/* Latitude variation of gravity (none for sw[2]=0) */
xlat=Input.g_lat;
if (flags.sw[2]==0): # pragma: no cover
xlat=45.0;
glatf(xlat, gsurf, re);
xmm = pdm[2][4];
#/* THERMOSPHERE / MESOSPHERE (above zn2[0]) */
if (Input.alt>zn2[0]):
altt=Input.alt;
else:
altt=zn2[0];
tmp=Input.alt;
Input.alt=altt;
gts7(Input, flags, soutput);
altt=Input.alt;
Input.alt=tmp;
if (flags.sw[0]): # pragma: no cover #/* metric adjustment */
dm28m= dm28*1.0E6;
else:
dm28m = dm28;
output.t[0]=soutput.t[0];
output.t[1]=soutput.t[1];
if (Input.alt>=zn2[0]):
for i in range(9):
output.d[i]=soutput.d[i];
return
#/* LOWER MESOSPHERE/UPPER STRATOSPHERE (between zn3[0] and zn2[0])
#* Temperature at nodes and gradients at end nodes
#* Inverse temperature a linear function of spherical harmonics
#*/
meso_tgn2[0]=meso_tgn1[1];
meso_tn2[0]=meso_tn1[4];
meso_tn2[1]=pma[0][0]*pavgm[0]/(1.0-flags.sw[20]*glob7s(pma[0], Input, flags));
meso_tn2[2]=pma[1][0]*pavgm[1]/(1.0-flags.sw[20]*glob7s(pma[1], Input, flags));
meso_tn2[3]=pma[2][0]*pavgm[2]/(1.0-flags.sw[20]*flags.sw[22]*glob7s(pma[2], Input, flags));
meso_tgn2[1]=pavgm[8]*pma[9][0]*(1.0+flags.sw[20]*flags.sw[22]*glob7s(pma[9], Input, flags))*meso_tn2[3]*meso_tn2[3]/(pow((pma[2][0]*pavgm[2]),2.0));
meso_tn3[0]=meso_tn2[3];
if (Input.alt<zn3[0]):
#/* LOWER STRATOSPHERE AND TROPOSPHERE (below zn3[0])
#* Temperature at nodes and gradients at end nodes
#* Inverse temperature a linear function of spherical harmonics
#*/
meso_tgn3[0]=meso_tgn2[1];
meso_tn3[1]=pma[3][0]*pavgm[3]/(1.0-flags.sw[22]*glob7s(pma[3], Input, flags));
meso_tn3[2]=pma[4][0]*pavgm[4]/(1.0-flags.sw[22]*glob7s(pma[4], Input, flags));
meso_tn3[3]=pma[5][0]*pavgm[5]/(1.0-flags.sw[22]*glob7s(pma[5], Input, flags));
meso_tn3[4]=pma[6][0]*pavgm[6]/(1.0-flags.sw[22]*glob7s(pma[6], Input, flags));
meso_tgn3[1]=pma[7][0]*pavgm[7]*(1.0+flags.sw[22]*glob7s(pma[7], Input, flags)) *meso_tn3[4]*meso_tn3[4]/(pow((pma[6][0]*pavgm[6]),2.0));
#/* LINEAR TRANSITION TO FULL MIXING BELOW zn2[0] */
dmc=0;
if (Input.alt>zmix):
dmc = 1.0 - (zn2[0]-Input.alt)/(zn2[0] - zmix);
dz28=soutput.d[2];
#/**** N2 density ****/
dmr=soutput.d[2] / dm28m - 1.0;
tz = [0.0]
output.d[2]=densm(Input.alt,dm28m,xmm, tz, mn3, zn3, meso_tn3, meso_tgn3, mn2, zn2, meso_tn2, meso_tgn2);
output.d[2]=output.d[2] * (1.0 + dmr*dmc);
#/**** HE density ****/
dmr = soutput.d[0] / (dz28 * pdm[0][1]) - 1.0;
output.d[0] = output.d[2] * pdm[0][1] * (1.0 + dmr*dmc);
#/**** O density ****/
output.d[1] = 0;
output.d[8] = 0;
#/**** O2 density ****/
dmr = soutput.d[3] / (dz28 * pdm[3][1]) - 1.0;
output.d[3] = output.d[2] * pdm[3][1] * (1.0 + dmr*dmc);
#/**** AR density ***/
dmr = soutput.d[4] / (dz28 * pdm[4][1]) - 1.0;
output.d[4] = output.d[2] * pdm[4][1] * (1.0 + dmr*dmc);
#/**** Hydrogen density ****/
output.d[6] = 0;
#/**** Atomic nitrogen density ****/
output.d[7] = 0;
#/**** Total mass density */
output.d[5] = 1.66E-24 * (4.0 * output.d[0] + 16.0 * output.d[1] + 28.0 * output.d[2] + 32.0 * output.d[3] + 40.0 * output.d[4] + output.d[6] + 14.0 * output.d[7]);
if (flags.sw[0]): # pragma: no cover
output.d[5]=output.d[5]/1000;
#/**** temperature at altitude ****/
global dd
dd = densm(Input.alt, 1.0, 0, tz, mn3, zn3, meso_tn3, meso_tgn3, mn2, zn2, meso_tn2, meso_tgn2);
output.t[1]=tz[0];
return | [
"def",
"gtd7",
"(",
"Input",
",",
"flags",
",",
"output",
")",
":",
"mn3",
"=",
"5",
"zn3",
"=",
"[",
"32.5",
",",
"20.0",
",",
"15.0",
",",
"10.0",
",",
"0.0",
"]",
"mn2",
"=",
"4",
"zn2",
"=",
"[",
"72.5",
",",
"55.0",
",",
"45.0",
",",
"32.5",
"]",
"zmix",
"=",
"62.5",
"soutput",
"=",
"nrlmsise_output",
"(",
")",
"tselec",
"(",
"flags",
")",
"#/* Latitude variation of gravity (none for sw[2]=0) */",
"xlat",
"=",
"Input",
".",
"g_lat",
"if",
"(",
"flags",
".",
"sw",
"[",
"2",
"]",
"==",
"0",
")",
":",
"# pragma: no cover",
"xlat",
"=",
"45.0",
"glatf",
"(",
"xlat",
",",
"gsurf",
",",
"re",
")",
"xmm",
"=",
"pdm",
"[",
"2",
"]",
"[",
"4",
"]",
"#/* THERMOSPHERE / MESOSPHERE (above zn2[0]) */",
"if",
"(",
"Input",
".",
"alt",
">",
"zn2",
"[",
"0",
"]",
")",
":",
"altt",
"=",
"Input",
".",
"alt",
"else",
":",
"altt",
"=",
"zn2",
"[",
"0",
"]",
"tmp",
"=",
"Input",
".",
"alt",
"Input",
".",
"alt",
"=",
"altt",
"gts7",
"(",
"Input",
",",
"flags",
",",
"soutput",
")",
"altt",
"=",
"Input",
".",
"alt",
"Input",
".",
"alt",
"=",
"tmp",
"if",
"(",
"flags",
".",
"sw",
"[",
"0",
"]",
")",
":",
"# pragma: no cover #/* metric adjustment */",
"dm28m",
"=",
"dm28",
"*",
"1.0E6",
"else",
":",
"dm28m",
"=",
"dm28",
"output",
".",
"t",
"[",
"0",
"]",
"=",
"soutput",
".",
"t",
"[",
"0",
"]",
"output",
".",
"t",
"[",
"1",
"]",
"=",
"soutput",
".",
"t",
"[",
"1",
"]",
"if",
"(",
"Input",
".",
"alt",
">=",
"zn2",
"[",
"0",
"]",
")",
":",
"for",
"i",
"in",
"range",
"(",
"9",
")",
":",
"output",
".",
"d",
"[",
"i",
"]",
"=",
"soutput",
".",
"d",
"[",
"i",
"]",
"return",
"#/* LOWER MESOSPHERE/UPPER STRATOSPHERE (between zn3[0] and zn2[0])",
"#* Temperature at nodes and gradients at end nodes",
"#* Inverse temperature a linear function of spherical harmonics",
"#*/",
"meso_tgn2",
"[",
"0",
"]",
"=",
"meso_tgn1",
"[",
"1",
"]",
"meso_tn2",
"[",
"0",
"]",
"=",
"meso_tn1",
"[",
"4",
"]",
"meso_tn2",
"[",
"1",
"]",
"=",
"pma",
"[",
"0",
"]",
"[",
"0",
"]",
"*",
"pavgm",
"[",
"0",
"]",
"/",
"(",
"1.0",
"-",
"flags",
".",
"sw",
"[",
"20",
"]",
"*",
"glob7s",
"(",
"pma",
"[",
"0",
"]",
",",
"Input",
",",
"flags",
")",
")",
"meso_tn2",
"[",
"2",
"]",
"=",
"pma",
"[",
"1",
"]",
"[",
"0",
"]",
"*",
"pavgm",
"[",
"1",
"]",
"/",
"(",
"1.0",
"-",
"flags",
".",
"sw",
"[",
"20",
"]",
"*",
"glob7s",
"(",
"pma",
"[",
"1",
"]",
",",
"Input",
",",
"flags",
")",
")",
"meso_tn2",
"[",
"3",
"]",
"=",
"pma",
"[",
"2",
"]",
"[",
"0",
"]",
"*",
"pavgm",
"[",
"2",
"]",
"/",
"(",
"1.0",
"-",
"flags",
".",
"sw",
"[",
"20",
"]",
"*",
"flags",
".",
"sw",
"[",
"22",
"]",
"*",
"glob7s",
"(",
"pma",
"[",
"2",
"]",
",",
"Input",
",",
"flags",
")",
")",
"meso_tgn2",
"[",
"1",
"]",
"=",
"pavgm",
"[",
"8",
"]",
"*",
"pma",
"[",
"9",
"]",
"[",
"0",
"]",
"*",
"(",
"1.0",
"+",
"flags",
".",
"sw",
"[",
"20",
"]",
"*",
"flags",
".",
"sw",
"[",
"22",
"]",
"*",
"glob7s",
"(",
"pma",
"[",
"9",
"]",
",",
"Input",
",",
"flags",
")",
")",
"*",
"meso_tn2",
"[",
"3",
"]",
"*",
"meso_tn2",
"[",
"3",
"]",
"/",
"(",
"pow",
"(",
"(",
"pma",
"[",
"2",
"]",
"[",
"0",
"]",
"*",
"pavgm",
"[",
"2",
"]",
")",
",",
"2.0",
")",
")",
"meso_tn3",
"[",
"0",
"]",
"=",
"meso_tn2",
"[",
"3",
"]",
"if",
"(",
"Input",
".",
"alt",
"<",
"zn3",
"[",
"0",
"]",
")",
":",
"#/* LOWER STRATOSPHERE AND TROPOSPHERE (below zn3[0])",
"#* Temperature at nodes and gradients at end nodes",
"#* Inverse temperature a linear function of spherical harmonics",
"#*/",
"meso_tgn3",
"[",
"0",
"]",
"=",
"meso_tgn2",
"[",
"1",
"]",
"meso_tn3",
"[",
"1",
"]",
"=",
"pma",
"[",
"3",
"]",
"[",
"0",
"]",
"*",
"pavgm",
"[",
"3",
"]",
"/",
"(",
"1.0",
"-",
"flags",
".",
"sw",
"[",
"22",
"]",
"*",
"glob7s",
"(",
"pma",
"[",
"3",
"]",
",",
"Input",
",",
"flags",
")",
")",
"meso_tn3",
"[",
"2",
"]",
"=",
"pma",
"[",
"4",
"]",
"[",
"0",
"]",
"*",
"pavgm",
"[",
"4",
"]",
"/",
"(",
"1.0",
"-",
"flags",
".",
"sw",
"[",
"22",
"]",
"*",
"glob7s",
"(",
"pma",
"[",
"4",
"]",
",",
"Input",
",",
"flags",
")",
")",
"meso_tn3",
"[",
"3",
"]",
"=",
"pma",
"[",
"5",
"]",
"[",
"0",
"]",
"*",
"pavgm",
"[",
"5",
"]",
"/",
"(",
"1.0",
"-",
"flags",
".",
"sw",
"[",
"22",
"]",
"*",
"glob7s",
"(",
"pma",
"[",
"5",
"]",
",",
"Input",
",",
"flags",
")",
")",
"meso_tn3",
"[",
"4",
"]",
"=",
"pma",
"[",
"6",
"]",
"[",
"0",
"]",
"*",
"pavgm",
"[",
"6",
"]",
"/",
"(",
"1.0",
"-",
"flags",
".",
"sw",
"[",
"22",
"]",
"*",
"glob7s",
"(",
"pma",
"[",
"6",
"]",
",",
"Input",
",",
"flags",
")",
")",
"meso_tgn3",
"[",
"1",
"]",
"=",
"pma",
"[",
"7",
"]",
"[",
"0",
"]",
"*",
"pavgm",
"[",
"7",
"]",
"*",
"(",
"1.0",
"+",
"flags",
".",
"sw",
"[",
"22",
"]",
"*",
"glob7s",
"(",
"pma",
"[",
"7",
"]",
",",
"Input",
",",
"flags",
")",
")",
"*",
"meso_tn3",
"[",
"4",
"]",
"*",
"meso_tn3",
"[",
"4",
"]",
"/",
"(",
"pow",
"(",
"(",
"pma",
"[",
"6",
"]",
"[",
"0",
"]",
"*",
"pavgm",
"[",
"6",
"]",
")",
",",
"2.0",
")",
")",
"#/* LINEAR TRANSITION TO FULL MIXING BELOW zn2[0] */",
"dmc",
"=",
"0",
"if",
"(",
"Input",
".",
"alt",
">",
"zmix",
")",
":",
"dmc",
"=",
"1.0",
"-",
"(",
"zn2",
"[",
"0",
"]",
"-",
"Input",
".",
"alt",
")",
"/",
"(",
"zn2",
"[",
"0",
"]",
"-",
"zmix",
")",
"dz28",
"=",
"soutput",
".",
"d",
"[",
"2",
"]",
"#/**** N2 density ****/",
"dmr",
"=",
"soutput",
".",
"d",
"[",
"2",
"]",
"/",
"dm28m",
"-",
"1.0",
"tz",
"=",
"[",
"0.0",
"]",
"output",
".",
"d",
"[",
"2",
"]",
"=",
"densm",
"(",
"Input",
".",
"alt",
",",
"dm28m",
",",
"xmm",
",",
"tz",
",",
"mn3",
",",
"zn3",
",",
"meso_tn3",
",",
"meso_tgn3",
",",
"mn2",
",",
"zn2",
",",
"meso_tn2",
",",
"meso_tgn2",
")",
"output",
".",
"d",
"[",
"2",
"]",
"=",
"output",
".",
"d",
"[",
"2",
"]",
"*",
"(",
"1.0",
"+",
"dmr",
"*",
"dmc",
")",
"#/**** HE density ****/",
"dmr",
"=",
"soutput",
".",
"d",
"[",
"0",
"]",
"/",
"(",
"dz28",
"*",
"pdm",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"-",
"1.0",
"output",
".",
"d",
"[",
"0",
"]",
"=",
"output",
".",
"d",
"[",
"2",
"]",
"*",
"pdm",
"[",
"0",
"]",
"[",
"1",
"]",
"*",
"(",
"1.0",
"+",
"dmr",
"*",
"dmc",
")",
"#/**** O density ****/",
"output",
".",
"d",
"[",
"1",
"]",
"=",
"0",
"output",
".",
"d",
"[",
"8",
"]",
"=",
"0",
"#/**** O2 density ****/",
"dmr",
"=",
"soutput",
".",
"d",
"[",
"3",
"]",
"/",
"(",
"dz28",
"*",
"pdm",
"[",
"3",
"]",
"[",
"1",
"]",
")",
"-",
"1.0",
"output",
".",
"d",
"[",
"3",
"]",
"=",
"output",
".",
"d",
"[",
"2",
"]",
"*",
"pdm",
"[",
"3",
"]",
"[",
"1",
"]",
"*",
"(",
"1.0",
"+",
"dmr",
"*",
"dmc",
")",
"#/**** AR density ***/",
"dmr",
"=",
"soutput",
".",
"d",
"[",
"4",
"]",
"/",
"(",
"dz28",
"*",
"pdm",
"[",
"4",
"]",
"[",
"1",
"]",
")",
"-",
"1.0",
"output",
".",
"d",
"[",
"4",
"]",
"=",
"output",
".",
"d",
"[",
"2",
"]",
"*",
"pdm",
"[",
"4",
"]",
"[",
"1",
"]",
"*",
"(",
"1.0",
"+",
"dmr",
"*",
"dmc",
")",
"#/**** Hydrogen density ****/",
"output",
".",
"d",
"[",
"6",
"]",
"=",
"0",
"#/**** Atomic nitrogen density ****/",
"output",
".",
"d",
"[",
"7",
"]",
"=",
"0",
"#/**** Total mass density */",
"output",
".",
"d",
"[",
"5",
"]",
"=",
"1.66E-24",
"*",
"(",
"4.0",
"*",
"output",
".",
"d",
"[",
"0",
"]",
"+",
"16.0",
"*",
"output",
".",
"d",
"[",
"1",
"]",
"+",
"28.0",
"*",
"output",
".",
"d",
"[",
"2",
"]",
"+",
"32.0",
"*",
"output",
".",
"d",
"[",
"3",
"]",
"+",
"40.0",
"*",
"output",
".",
"d",
"[",
"4",
"]",
"+",
"output",
".",
"d",
"[",
"6",
"]",
"+",
"14.0",
"*",
"output",
".",
"d",
"[",
"7",
"]",
")",
"if",
"(",
"flags",
".",
"sw",
"[",
"0",
"]",
")",
":",
"# pragma: no cover",
"output",
".",
"d",
"[",
"5",
"]",
"=",
"output",
".",
"d",
"[",
"5",
"]",
"/",
"1000",
"#/**** temperature at altitude ****/",
"global",
"dd",
"dd",
"=",
"densm",
"(",
"Input",
".",
"alt",
",",
"1.0",
",",
"0",
",",
"tz",
",",
"mn3",
",",
"zn3",
",",
"meso_tn3",
",",
"meso_tgn3",
",",
"mn2",
",",
"zn2",
",",
"meso_tn2",
",",
"meso_tgn2",
")",
"output",
".",
"t",
"[",
"1",
"]",
"=",
"tz",
"[",
"0",
"]",
"return"
]
| The standard model subroutine (GTD7) always computes the
‘‘thermospheric’’ mass density by explicitly summing the masses of
the species in equilibrium at the thermospheric temperature T(z). | [
"The",
"standard",
"model",
"subroutine",
"(",
"GTD7",
")",
"always",
"computes",
"the",
"‘‘thermospheric’’",
"mass",
"density",
"by",
"explicitly",
"summing",
"the",
"masses",
"of",
"the",
"species",
"in",
"equilibrium",
"at",
"the",
"thermospheric",
"temperature",
"T",
"(",
"z",
")",
"."
]
| python | train |
Duke-GCB/DukeDSClient | ddsc/core/d4s2.py | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/d4s2.py#L307-L332 | def _share_project(self, destination, project, to_user, force_send, auth_role='', user_message='',
share_users=None):
"""
Send message to remote service to email/share project with to_user.
:param destination: str which type of sharing we are doing (SHARE_DESTINATION or DELIVER_DESTINATION)
:param project: RemoteProject project we are sharing
:param to_user: RemoteUser user we are sharing with
:param auth_role: str project role eg 'project_admin' email is customized based on this setting.
:param user_message: str message to be sent with the share
:param share_users: [RemoteUser] users to have this project shared with after delivery (delivery only)
:return: the email the user should receive a message on soon
"""
from_user = self.remote_store.get_current_user()
share_user_ids = None
if share_users:
share_user_ids = [share_user.id for share_user in share_users]
item = D4S2Item(destination=destination,
from_user_id=from_user.id,
to_user_id=to_user.id,
project_id=project.id,
project_name=project.name,
auth_role=auth_role,
user_message=user_message,
share_user_ids=share_user_ids)
item.send(self.api, force_send)
return to_user.email | [
"def",
"_share_project",
"(",
"self",
",",
"destination",
",",
"project",
",",
"to_user",
",",
"force_send",
",",
"auth_role",
"=",
"''",
",",
"user_message",
"=",
"''",
",",
"share_users",
"=",
"None",
")",
":",
"from_user",
"=",
"self",
".",
"remote_store",
".",
"get_current_user",
"(",
")",
"share_user_ids",
"=",
"None",
"if",
"share_users",
":",
"share_user_ids",
"=",
"[",
"share_user",
".",
"id",
"for",
"share_user",
"in",
"share_users",
"]",
"item",
"=",
"D4S2Item",
"(",
"destination",
"=",
"destination",
",",
"from_user_id",
"=",
"from_user",
".",
"id",
",",
"to_user_id",
"=",
"to_user",
".",
"id",
",",
"project_id",
"=",
"project",
".",
"id",
",",
"project_name",
"=",
"project",
".",
"name",
",",
"auth_role",
"=",
"auth_role",
",",
"user_message",
"=",
"user_message",
",",
"share_user_ids",
"=",
"share_user_ids",
")",
"item",
".",
"send",
"(",
"self",
".",
"api",
",",
"force_send",
")",
"return",
"to_user",
".",
"email"
]
| Send message to remote service to email/share project with to_user.
:param destination: str which type of sharing we are doing (SHARE_DESTINATION or DELIVER_DESTINATION)
:param project: RemoteProject project we are sharing
:param to_user: RemoteUser user we are sharing with
:param auth_role: str project role eg 'project_admin' email is customized based on this setting.
:param user_message: str message to be sent with the share
:param share_users: [RemoteUser] users to have this project shared with after delivery (delivery only)
:return: the email the user should receive a message on soon | [
"Send",
"message",
"to",
"remote",
"service",
"to",
"email",
"/",
"share",
"project",
"with",
"to_user",
".",
":",
"param",
"destination",
":",
"str",
"which",
"type",
"of",
"sharing",
"we",
"are",
"doing",
"(",
"SHARE_DESTINATION",
"or",
"DELIVER_DESTINATION",
")",
":",
"param",
"project",
":",
"RemoteProject",
"project",
"we",
"are",
"sharing",
":",
"param",
"to_user",
":",
"RemoteUser",
"user",
"we",
"are",
"sharing",
"with",
":",
"param",
"auth_role",
":",
"str",
"project",
"role",
"eg",
"project_admin",
"email",
"is",
"customized",
"based",
"on",
"this",
"setting",
".",
":",
"param",
"user_message",
":",
"str",
"message",
"to",
"be",
"sent",
"with",
"the",
"share",
":",
"param",
"share_users",
":",
"[",
"RemoteUser",
"]",
"users",
"to",
"have",
"this",
"project",
"shared",
"with",
"after",
"delivery",
"(",
"delivery",
"only",
")",
":",
"return",
":",
"the",
"email",
"the",
"user",
"should",
"receive",
"a",
"message",
"on",
"soon"
]
| python | train |
rmed/dev-init | dev_init/dev_init.py | https://github.com/rmed/dev-init/blob/afc5da13002e563324c6291dede0bf2e0f58171f/dev_init/dev_init.py#L84-L121 | def new_env(environment):
""" Create a new environment in the configuration and ask the
user for the commands for this specific environment.
"""
if not environment:
print("You need to supply an environment name")
return
parser = read_config()
if environment in parser.sections():
print("Environment '%s' already exists" % environment)
return
print("Please introduce (in order) the commands for '%s'\n" % environment)
print("Press RETURN to end command and RETURN with empty line to finish\n")
commands = []
cmd = ""
while True:
try:
cmd = raw_input("> ")
if not cmd:
break
commands.append(cmd)
except KeyboardInterrupt:
return
parser.add_section(environment)
parser.set(environment, "cmd", "\n".join(commands))
write_config(parser)
print("Added environment '%s'" % environment) | [
"def",
"new_env",
"(",
"environment",
")",
":",
"if",
"not",
"environment",
":",
"print",
"(",
"\"You need to supply an environment name\"",
")",
"return",
"parser",
"=",
"read_config",
"(",
")",
"if",
"environment",
"in",
"parser",
".",
"sections",
"(",
")",
":",
"print",
"(",
"\"Environment '%s' already exists\"",
"%",
"environment",
")",
"return",
"print",
"(",
"\"Please introduce (in order) the commands for '%s'\\n\"",
"%",
"environment",
")",
"print",
"(",
"\"Press RETURN to end command and RETURN with empty line to finish\\n\"",
")",
"commands",
"=",
"[",
"]",
"cmd",
"=",
"\"\"",
"while",
"True",
":",
"try",
":",
"cmd",
"=",
"raw_input",
"(",
"\"> \"",
")",
"if",
"not",
"cmd",
":",
"break",
"commands",
".",
"append",
"(",
"cmd",
")",
"except",
"KeyboardInterrupt",
":",
"return",
"parser",
".",
"add_section",
"(",
"environment",
")",
"parser",
".",
"set",
"(",
"environment",
",",
"\"cmd\"",
",",
"\"\\n\"",
".",
"join",
"(",
"commands",
")",
")",
"write_config",
"(",
"parser",
")",
"print",
"(",
"\"Added environment '%s'\"",
"%",
"environment",
")"
]
| Create a new environment in the configuration and ask the
user for the commands for this specific environment. | [
"Create",
"a",
"new",
"environment",
"in",
"the",
"configuration",
"and",
"ask",
"the",
"user",
"for",
"the",
"commands",
"for",
"this",
"specific",
"environment",
"."
]
| python | train |
rytilahti/python-songpal | songpal/device.py | https://github.com/rytilahti/python-songpal/blob/0443de6b3d960b9067a851d82261ca00e46b4618/songpal/device.py#L383-L389 | async def get_sound_settings(self, target="") -> List[Setting]:
"""Get the current sound settings.
:param str target: settings target, defaults to all.
"""
res = await self.services["audio"]["getSoundSettings"]({"target": target})
return [Setting.make(**x) for x in res] | [
"async",
"def",
"get_sound_settings",
"(",
"self",
",",
"target",
"=",
"\"\"",
")",
"->",
"List",
"[",
"Setting",
"]",
":",
"res",
"=",
"await",
"self",
".",
"services",
"[",
"\"audio\"",
"]",
"[",
"\"getSoundSettings\"",
"]",
"(",
"{",
"\"target\"",
":",
"target",
"}",
")",
"return",
"[",
"Setting",
".",
"make",
"(",
"*",
"*",
"x",
")",
"for",
"x",
"in",
"res",
"]"
]
| Get the current sound settings.
:param str target: settings target, defaults to all. | [
"Get",
"the",
"current",
"sound",
"settings",
"."
]
| python | train |
andrenarchy/krypy | krypy/utils.py | https://github.com/andrenarchy/krypy/blob/4883ec9a61d64ea56489e15c35cc40f0633ab2f1/krypy/utils.py#L648-L675 | def qr(X, ip_B=None, reorthos=1):
"""QR factorization with customizable inner product.
:param X: array with ``shape==(N,k)``
:param ip_B: (optional) inner product, see :py:meth:`inner`.
:param reorthos: (optional) numer of reorthogonalizations. Defaults to
1 (i.e. 2 runs of modified Gram-Schmidt) which should be enough in most
cases (TODO: add reference).
:return: Q, R where :math:`X=QR` with :math:`\\langle Q,Q \\rangle=I_k` and
R upper triangular.
"""
if ip_B is None and X.shape[1] > 0:
return scipy.linalg.qr(X, mode='economic')
else:
(N, k) = X.shape
Q = X.copy()
R = numpy.zeros((k, k), dtype=X.dtype)
for i in range(k):
for reortho in range(reorthos+1):
for j in range(i):
alpha = inner(Q[:, [j]], Q[:, [i]], ip_B=ip_B)[0, 0]
R[j, i] += alpha
Q[:, [i]] -= alpha * Q[:, [j]]
R[i, i] = norm(Q[:, [i]], ip_B=ip_B)
if R[i, i] >= 1e-15:
Q[:, [i]] /= R[i, i]
return Q, R | [
"def",
"qr",
"(",
"X",
",",
"ip_B",
"=",
"None",
",",
"reorthos",
"=",
"1",
")",
":",
"if",
"ip_B",
"is",
"None",
"and",
"X",
".",
"shape",
"[",
"1",
"]",
">",
"0",
":",
"return",
"scipy",
".",
"linalg",
".",
"qr",
"(",
"X",
",",
"mode",
"=",
"'economic'",
")",
"else",
":",
"(",
"N",
",",
"k",
")",
"=",
"X",
".",
"shape",
"Q",
"=",
"X",
".",
"copy",
"(",
")",
"R",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"k",
",",
"k",
")",
",",
"dtype",
"=",
"X",
".",
"dtype",
")",
"for",
"i",
"in",
"range",
"(",
"k",
")",
":",
"for",
"reortho",
"in",
"range",
"(",
"reorthos",
"+",
"1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
")",
":",
"alpha",
"=",
"inner",
"(",
"Q",
"[",
":",
",",
"[",
"j",
"]",
"]",
",",
"Q",
"[",
":",
",",
"[",
"i",
"]",
"]",
",",
"ip_B",
"=",
"ip_B",
")",
"[",
"0",
",",
"0",
"]",
"R",
"[",
"j",
",",
"i",
"]",
"+=",
"alpha",
"Q",
"[",
":",
",",
"[",
"i",
"]",
"]",
"-=",
"alpha",
"*",
"Q",
"[",
":",
",",
"[",
"j",
"]",
"]",
"R",
"[",
"i",
",",
"i",
"]",
"=",
"norm",
"(",
"Q",
"[",
":",
",",
"[",
"i",
"]",
"]",
",",
"ip_B",
"=",
"ip_B",
")",
"if",
"R",
"[",
"i",
",",
"i",
"]",
">=",
"1e-15",
":",
"Q",
"[",
":",
",",
"[",
"i",
"]",
"]",
"/=",
"R",
"[",
"i",
",",
"i",
"]",
"return",
"Q",
",",
"R"
]
| QR factorization with customizable inner product.
:param X: array with ``shape==(N,k)``
:param ip_B: (optional) inner product, see :py:meth:`inner`.
:param reorthos: (optional) numer of reorthogonalizations. Defaults to
1 (i.e. 2 runs of modified Gram-Schmidt) which should be enough in most
cases (TODO: add reference).
:return: Q, R where :math:`X=QR` with :math:`\\langle Q,Q \\rangle=I_k` and
R upper triangular. | [
"QR",
"factorization",
"with",
"customizable",
"inner",
"product",
"."
]
| python | train |
SmokinCaterpillar/pypet | pypet/storageservice.py | https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L2453-L2525 | def _trj_store_trajectory(self, traj, only_init=False, store_data=pypetconstants.STORE_DATA,
max_depth=None):
""" Stores a trajectory to an hdf5 file
Stores all groups, parameters and results
"""
if not only_init:
self._logger.info('Start storing Trajectory `%s`.' % self._trajectory_name)
else:
self._logger.info('Initialising storage or updating meta data of Trajectory `%s`.' %
self._trajectory_name)
store_data = pypetconstants.STORE_NOTHING
# In case we accidentally chose a trajectory name that already exist
# We do not want to mess up the stored trajectory but raise an Error
if not traj._stored and self._trajectory_group is not None:
raise RuntimeError('You want to store a completely new trajectory with name'
' `%s` but this trajectory is already found in file `%s`.'
'Did you try to accidentally overwrite existing data? If '
'you DO want to override existing data, use `overwrite_file=True`.'
'Note that this deletes the whole HDF5 file not just the particular '
'trajectroy therein! ' %
(traj.v_name, self._filename))
# Extract HDF5 settings from the trajectory
self._srvc_check_hdf_properties(traj)
# Store the trajectory for the first time if necessary:
if self._trajectory_group is None:
self._trajectory_group = self._hdf5file.create_group(where='/',
name=self._trajectory_name,
title=self._trajectory_name,
filters=self._all_get_filters())
# Store meta information
self._trj_store_meta_data(traj)
# # Store recursively the config subtree
# self._tree_store_recursively(pypetconstants.LEAF,traj.config,self._trajectory_group)
if store_data in (pypetconstants.STORE_DATA_SKIPPING,
pypetconstants.STORE_DATA,
pypetconstants.OVERWRITE_DATA):
counter = 0
maximum_display_other = 10
name_set = set(['parameters', 'config', 'derived_parameters', 'results'])
for child_name in traj._children:
if child_name in name_set:
self._logger.info('Storing branch `%s`.' % child_name)
else:
if counter < maximum_display_other:
self._logger.info('Storing branch/node `%s`.' % child_name)
elif counter == maximum_display_other:
self._logger.info('To many branches or nodes at root for display. '
'I will not inform you about storing anymore. '
'Branches are stored silently in the background. '
'Do not worry, I will not freeze! Pinky promise!!!')
counter += 1
# Store recursively the elements
self._tree_store_sub_branch(traj, child_name, store_data=store_data,
with_links=True,
recursive=True, max_depth=max_depth,
hdf5_group=self._trajectory_group)
self._logger.info('Finished storing Trajectory `%s`.' % self._trajectory_name)
else:
self._logger.info('Finished init or meta data update for `%s`.' %
self._trajectory_name)
traj._stored = True | [
"def",
"_trj_store_trajectory",
"(",
"self",
",",
"traj",
",",
"only_init",
"=",
"False",
",",
"store_data",
"=",
"pypetconstants",
".",
"STORE_DATA",
",",
"max_depth",
"=",
"None",
")",
":",
"if",
"not",
"only_init",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Start storing Trajectory `%s`.'",
"%",
"self",
".",
"_trajectory_name",
")",
"else",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Initialising storage or updating meta data of Trajectory `%s`.'",
"%",
"self",
".",
"_trajectory_name",
")",
"store_data",
"=",
"pypetconstants",
".",
"STORE_NOTHING",
"# In case we accidentally chose a trajectory name that already exist",
"# We do not want to mess up the stored trajectory but raise an Error",
"if",
"not",
"traj",
".",
"_stored",
"and",
"self",
".",
"_trajectory_group",
"is",
"not",
"None",
":",
"raise",
"RuntimeError",
"(",
"'You want to store a completely new trajectory with name'",
"' `%s` but this trajectory is already found in file `%s`.'",
"'Did you try to accidentally overwrite existing data? If '",
"'you DO want to override existing data, use `overwrite_file=True`.'",
"'Note that this deletes the whole HDF5 file not just the particular '",
"'trajectroy therein! '",
"%",
"(",
"traj",
".",
"v_name",
",",
"self",
".",
"_filename",
")",
")",
"# Extract HDF5 settings from the trajectory",
"self",
".",
"_srvc_check_hdf_properties",
"(",
"traj",
")",
"# Store the trajectory for the first time if necessary:",
"if",
"self",
".",
"_trajectory_group",
"is",
"None",
":",
"self",
".",
"_trajectory_group",
"=",
"self",
".",
"_hdf5file",
".",
"create_group",
"(",
"where",
"=",
"'/'",
",",
"name",
"=",
"self",
".",
"_trajectory_name",
",",
"title",
"=",
"self",
".",
"_trajectory_name",
",",
"filters",
"=",
"self",
".",
"_all_get_filters",
"(",
")",
")",
"# Store meta information",
"self",
".",
"_trj_store_meta_data",
"(",
"traj",
")",
"# # Store recursively the config subtree",
"# self._tree_store_recursively(pypetconstants.LEAF,traj.config,self._trajectory_group)",
"if",
"store_data",
"in",
"(",
"pypetconstants",
".",
"STORE_DATA_SKIPPING",
",",
"pypetconstants",
".",
"STORE_DATA",
",",
"pypetconstants",
".",
"OVERWRITE_DATA",
")",
":",
"counter",
"=",
"0",
"maximum_display_other",
"=",
"10",
"name_set",
"=",
"set",
"(",
"[",
"'parameters'",
",",
"'config'",
",",
"'derived_parameters'",
",",
"'results'",
"]",
")",
"for",
"child_name",
"in",
"traj",
".",
"_children",
":",
"if",
"child_name",
"in",
"name_set",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Storing branch `%s`.'",
"%",
"child_name",
")",
"else",
":",
"if",
"counter",
"<",
"maximum_display_other",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Storing branch/node `%s`.'",
"%",
"child_name",
")",
"elif",
"counter",
"==",
"maximum_display_other",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'To many branches or nodes at root for display. '",
"'I will not inform you about storing anymore. '",
"'Branches are stored silently in the background. '",
"'Do not worry, I will not freeze! Pinky promise!!!'",
")",
"counter",
"+=",
"1",
"# Store recursively the elements",
"self",
".",
"_tree_store_sub_branch",
"(",
"traj",
",",
"child_name",
",",
"store_data",
"=",
"store_data",
",",
"with_links",
"=",
"True",
",",
"recursive",
"=",
"True",
",",
"max_depth",
"=",
"max_depth",
",",
"hdf5_group",
"=",
"self",
".",
"_trajectory_group",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"'Finished storing Trajectory `%s`.'",
"%",
"self",
".",
"_trajectory_name",
")",
"else",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Finished init or meta data update for `%s`.'",
"%",
"self",
".",
"_trajectory_name",
")",
"traj",
".",
"_stored",
"=",
"True"
]
| Stores a trajectory to an hdf5 file
Stores all groups, parameters and results | [
"Stores",
"a",
"trajectory",
"to",
"an",
"hdf5",
"file"
]
| python | test |
alecthomas/voluptuous | voluptuous/schema_builder.py | https://github.com/alecthomas/voluptuous/blob/36c8c11e2b7eb402c24866fa558473661ede9403/voluptuous/schema_builder.py#L1256-L1301 | def validate(*a, **kw):
"""Decorator for validating arguments of a function against a given schema.
Set restrictions for arguments:
>>> @validate(arg1=int, arg2=int)
... def foo(arg1, arg2):
... return arg1 * arg2
Set restriction for returned value:
>>> @validate(arg=int, __return__=int)
... def bar(arg1):
... return arg1 * 2
"""
RETURNS_KEY = '__return__'
def validate_schema_decorator(func):
returns_defined = False
returns = None
schema_args_dict = _args_to_dict(func, a)
schema_arguments = _merge_args_with_kwargs(schema_args_dict, kw)
if RETURNS_KEY in schema_arguments:
returns_defined = True
returns = schema_arguments[RETURNS_KEY]
del schema_arguments[RETURNS_KEY]
input_schema = (Schema(schema_arguments, extra=ALLOW_EXTRA)
if len(schema_arguments) != 0 else lambda x: x)
output_schema = Schema(returns) if returns_defined else lambda x: x
@wraps(func)
def func_wrapper(*args, **kwargs):
args_dict = _args_to_dict(func, args)
arguments = _merge_args_with_kwargs(args_dict, kwargs)
validated_arguments = input_schema(arguments)
output = func(**validated_arguments)
return output_schema(output)
return func_wrapper
return validate_schema_decorator | [
"def",
"validate",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
":",
"RETURNS_KEY",
"=",
"'__return__'",
"def",
"validate_schema_decorator",
"(",
"func",
")",
":",
"returns_defined",
"=",
"False",
"returns",
"=",
"None",
"schema_args_dict",
"=",
"_args_to_dict",
"(",
"func",
",",
"a",
")",
"schema_arguments",
"=",
"_merge_args_with_kwargs",
"(",
"schema_args_dict",
",",
"kw",
")",
"if",
"RETURNS_KEY",
"in",
"schema_arguments",
":",
"returns_defined",
"=",
"True",
"returns",
"=",
"schema_arguments",
"[",
"RETURNS_KEY",
"]",
"del",
"schema_arguments",
"[",
"RETURNS_KEY",
"]",
"input_schema",
"=",
"(",
"Schema",
"(",
"schema_arguments",
",",
"extra",
"=",
"ALLOW_EXTRA",
")",
"if",
"len",
"(",
"schema_arguments",
")",
"!=",
"0",
"else",
"lambda",
"x",
":",
"x",
")",
"output_schema",
"=",
"Schema",
"(",
"returns",
")",
"if",
"returns_defined",
"else",
"lambda",
"x",
":",
"x",
"@",
"wraps",
"(",
"func",
")",
"def",
"func_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args_dict",
"=",
"_args_to_dict",
"(",
"func",
",",
"args",
")",
"arguments",
"=",
"_merge_args_with_kwargs",
"(",
"args_dict",
",",
"kwargs",
")",
"validated_arguments",
"=",
"input_schema",
"(",
"arguments",
")",
"output",
"=",
"func",
"(",
"*",
"*",
"validated_arguments",
")",
"return",
"output_schema",
"(",
"output",
")",
"return",
"func_wrapper",
"return",
"validate_schema_decorator"
]
| Decorator for validating arguments of a function against a given schema.
Set restrictions for arguments:
>>> @validate(arg1=int, arg2=int)
... def foo(arg1, arg2):
... return arg1 * arg2
Set restriction for returned value:
>>> @validate(arg=int, __return__=int)
... def bar(arg1):
... return arg1 * 2 | [
"Decorator",
"for",
"validating",
"arguments",
"of",
"a",
"function",
"against",
"a",
"given",
"schema",
"."
]
| python | train |
BD2KOnFHIR/fhirtordf | fhirtordf/fhir/fhirmetavoc.py | https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/fhirmetavoc.py#L94-L101 | def is_valid(self, t: URIRef) -> bool:
"""
Raise an exception if 't' is unrecognized
:param t: metadata URI
"""
if not self.has_type(t):
raise TypeError("Unrecognized FHIR type: {}".format(t))
return True | [
"def",
"is_valid",
"(",
"self",
",",
"t",
":",
"URIRef",
")",
"->",
"bool",
":",
"if",
"not",
"self",
".",
"has_type",
"(",
"t",
")",
":",
"raise",
"TypeError",
"(",
"\"Unrecognized FHIR type: {}\"",
".",
"format",
"(",
"t",
")",
")",
"return",
"True"
]
| Raise an exception if 't' is unrecognized
:param t: metadata URI | [
"Raise",
"an",
"exception",
"if",
"t",
"is",
"unrecognized",
":",
"param",
"t",
":",
"metadata",
"URI"
]
| python | train |
log2timeline/dfvfs | dfvfs/file_io/lvm_file_io.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/lvm_file_io.py#L95-L110 | def seek(self, offset, whence=os.SEEK_SET):
"""Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
"""
if not self._is_open:
raise IOError('Not opened.')
self._vslvm_logical_volume.seek(offset, whence) | [
"def",
"seek",
"(",
"self",
",",
"offset",
",",
"whence",
"=",
"os",
".",
"SEEK_SET",
")",
":",
"if",
"not",
"self",
".",
"_is_open",
":",
"raise",
"IOError",
"(",
"'Not opened.'",
")",
"self",
".",
"_vslvm_logical_volume",
".",
"seek",
"(",
"offset",
",",
"whence",
")"
]
| Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed. | [
"Seeks",
"to",
"an",
"offset",
"within",
"the",
"file",
"-",
"like",
"object",
"."
]
| python | train |
etcher-be/epab | epab/utils/_repo.py | https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_repo.py#L534-L542 | def create_branch_and_checkout(self, branch_name: str):
"""
Creates a new branch if it doesn't exist
Args:
branch_name: branch name
"""
self.create_branch(branch_name)
self.checkout(branch_name) | [
"def",
"create_branch_and_checkout",
"(",
"self",
",",
"branch_name",
":",
"str",
")",
":",
"self",
".",
"create_branch",
"(",
"branch_name",
")",
"self",
".",
"checkout",
"(",
"branch_name",
")"
]
| Creates a new branch if it doesn't exist
Args:
branch_name: branch name | [
"Creates",
"a",
"new",
"branch",
"if",
"it",
"doesn",
"t",
"exist"
]
| python | train |
skorch-dev/skorch | skorch/utils.py | https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/utils.py#L468-L483 | def get_map_location(target_device, fallback_device='cpu'):
"""Determine the location to map loaded data (e.g., weights)
for a given target device (e.g. 'cuda').
"""
map_location = torch.device(target_device)
# The user wants to use CUDA but there is no CUDA device
# available, thus fall back to CPU.
if map_location.type == 'cuda' and not torch.cuda.is_available():
warnings.warn(
'Requested to load data to CUDA but no CUDA devices '
'are available. Loading on device "{}" instead.'.format(
fallback_device,
), DeviceWarning)
map_location = torch.device(fallback_device)
return map_location | [
"def",
"get_map_location",
"(",
"target_device",
",",
"fallback_device",
"=",
"'cpu'",
")",
":",
"map_location",
"=",
"torch",
".",
"device",
"(",
"target_device",
")",
"# The user wants to use CUDA but there is no CUDA device",
"# available, thus fall back to CPU.",
"if",
"map_location",
".",
"type",
"==",
"'cuda'",
"and",
"not",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
":",
"warnings",
".",
"warn",
"(",
"'Requested to load data to CUDA but no CUDA devices '",
"'are available. Loading on device \"{}\" instead.'",
".",
"format",
"(",
"fallback_device",
",",
")",
",",
"DeviceWarning",
")",
"map_location",
"=",
"torch",
".",
"device",
"(",
"fallback_device",
")",
"return",
"map_location"
]
| Determine the location to map loaded data (e.g., weights)
for a given target device (e.g. 'cuda'). | [
"Determine",
"the",
"location",
"to",
"map",
"loaded",
"data",
"(",
"e",
".",
"g",
".",
"weights",
")",
"for",
"a",
"given",
"target",
"device",
"(",
"e",
".",
"g",
".",
"cuda",
")",
"."
]
| python | train |
apache/incubator-mxnet | python/mxnet/module/executor_group.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/executor_group.py#L307-L342 | def _collect_arrays(self):
"""Collect internal arrays from executors."""
# convenient data structures
self.data_arrays = [[(self.slices[i], e.arg_dict[name]) for i, e in enumerate(self.execs)]
for name, _ in self.data_shapes]
self.state_arrays = [[e.arg_dict[name] for e in self.execs]
for name in self.state_names]
if self.label_shapes is not None:
self.label_arrays = [[(self.slices[i], e.arg_dict[name])
for i, e in enumerate(self.execs)]
for name, _ in self.label_shapes]
else:
self.label_arrays = None
self.param_arrays = [[exec_.arg_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
if self.for_training:
self.grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
else:
self.grad_arrays = None
data_names = [x[0] for x in self.data_shapes]
if self.inputs_need_grad:
self.input_grad_arrays = [[exec_.grad_arrays[self.arg_names.index(name)]
for exec_ in self.execs]
for name in data_names if name in self.arg_names]
else:
self.input_grad_arrays = None
self.aux_arrays = [[exec_.aux_arrays[i] for exec_ in self.execs]
for i in range(len(self.aux_names))] | [
"def",
"_collect_arrays",
"(",
"self",
")",
":",
"# convenient data structures",
"self",
".",
"data_arrays",
"=",
"[",
"[",
"(",
"self",
".",
"slices",
"[",
"i",
"]",
",",
"e",
".",
"arg_dict",
"[",
"name",
"]",
")",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"self",
".",
"execs",
")",
"]",
"for",
"name",
",",
"_",
"in",
"self",
".",
"data_shapes",
"]",
"self",
".",
"state_arrays",
"=",
"[",
"[",
"e",
".",
"arg_dict",
"[",
"name",
"]",
"for",
"e",
"in",
"self",
".",
"execs",
"]",
"for",
"name",
"in",
"self",
".",
"state_names",
"]",
"if",
"self",
".",
"label_shapes",
"is",
"not",
"None",
":",
"self",
".",
"label_arrays",
"=",
"[",
"[",
"(",
"self",
".",
"slices",
"[",
"i",
"]",
",",
"e",
".",
"arg_dict",
"[",
"name",
"]",
")",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"self",
".",
"execs",
")",
"]",
"for",
"name",
",",
"_",
"in",
"self",
".",
"label_shapes",
"]",
"else",
":",
"self",
".",
"label_arrays",
"=",
"None",
"self",
".",
"param_arrays",
"=",
"[",
"[",
"exec_",
".",
"arg_arrays",
"[",
"i",
"]",
"for",
"exec_",
"in",
"self",
".",
"execs",
"]",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"self",
".",
"arg_names",
")",
"if",
"name",
"in",
"self",
".",
"param_names",
"]",
"if",
"self",
".",
"for_training",
":",
"self",
".",
"grad_arrays",
"=",
"[",
"[",
"exec_",
".",
"grad_arrays",
"[",
"i",
"]",
"for",
"exec_",
"in",
"self",
".",
"execs",
"]",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"self",
".",
"arg_names",
")",
"if",
"name",
"in",
"self",
".",
"param_names",
"]",
"else",
":",
"self",
".",
"grad_arrays",
"=",
"None",
"data_names",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"self",
".",
"data_shapes",
"]",
"if",
"self",
".",
"inputs_need_grad",
":",
"self",
".",
"input_grad_arrays",
"=",
"[",
"[",
"exec_",
".",
"grad_arrays",
"[",
"self",
".",
"arg_names",
".",
"index",
"(",
"name",
")",
"]",
"for",
"exec_",
"in",
"self",
".",
"execs",
"]",
"for",
"name",
"in",
"data_names",
"if",
"name",
"in",
"self",
".",
"arg_names",
"]",
"else",
":",
"self",
".",
"input_grad_arrays",
"=",
"None",
"self",
".",
"aux_arrays",
"=",
"[",
"[",
"exec_",
".",
"aux_arrays",
"[",
"i",
"]",
"for",
"exec_",
"in",
"self",
".",
"execs",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"aux_names",
")",
")",
"]"
]
| Collect internal arrays from executors. | [
"Collect",
"internal",
"arrays",
"from",
"executors",
"."
]
| python | train |
saltstack/salt | salt/states/monit.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/monit.py#L34-L65 | def monitor(name):
'''
Get the summary from module monit and try to see if service is
being monitored. If not then monitor the service.
'''
ret = {'result': None,
'name': name,
'comment': '',
'changes': {}
}
result = __salt__['monit.summary'](name)
try:
for key, value in result.items():
if 'Running' in value[name]:
ret['comment'] = ('{0} is being being monitored.').format(name)
ret['result'] = True
else:
if __opts__['test']:
ret['comment'] = 'Service {0} is set to be monitored.'.format(name)
ret['result'] = None
return ret
__salt__['monit.monitor'](name)
ret['comment'] = ('{0} started to be monitored.').format(name)
ret['changes'][name] = 'Running'
ret['result'] = True
break
except KeyError:
ret['comment'] = ('{0} not found in configuration.').format(name)
ret['result'] = False
return ret | [
"def",
"monitor",
"(",
"name",
")",
":",
"ret",
"=",
"{",
"'result'",
":",
"None",
",",
"'name'",
":",
"name",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"result",
"=",
"__salt__",
"[",
"'monit.summary'",
"]",
"(",
"name",
")",
"try",
":",
"for",
"key",
",",
"value",
"in",
"result",
".",
"items",
"(",
")",
":",
"if",
"'Running'",
"in",
"value",
"[",
"name",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'{0} is being being monitored.'",
")",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"else",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Service {0} is set to be monitored.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"__salt__",
"[",
"'monit.monitor'",
"]",
"(",
"name",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'{0} started to be monitored.'",
")",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'Running'",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"break",
"except",
"KeyError",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'{0} not found in configuration.'",
")",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret"
]
| Get the summary from module monit and try to see if service is
being monitored. If not then monitor the service. | [
"Get",
"the",
"summary",
"from",
"module",
"monit",
"and",
"try",
"to",
"see",
"if",
"service",
"is",
"being",
"monitored",
".",
"If",
"not",
"then",
"monitor",
"the",
"service",
"."
]
| python | train |
sibirrer/lenstronomy | lenstronomy/LightModel/light_model.py | https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LightModel/light_model.py#L90-L99 | def param_name_list(self):
"""
returns the list of all parameter names
:return: list of list of strings (for each light model separately)
"""
name_list = []
for func in self.func_list:
name_list.append(func.param_names)
return name_list | [
"def",
"param_name_list",
"(",
"self",
")",
":",
"name_list",
"=",
"[",
"]",
"for",
"func",
"in",
"self",
".",
"func_list",
":",
"name_list",
".",
"append",
"(",
"func",
".",
"param_names",
")",
"return",
"name_list"
]
| returns the list of all parameter names
:return: list of list of strings (for each light model separately) | [
"returns",
"the",
"list",
"of",
"all",
"parameter",
"names"
]
| python | train |
yougov/pmxbot | pmxbot/core.py | https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/core.py#L583-L591 | def init_config(overrides):
"""
Install the config dict as pmxbot.config, setting overrides,
and return the result.
"""
pmxbot.config = config = ConfigDict()
config.setdefault('bot_nickname', 'pmxbot')
config.update(overrides)
return config | [
"def",
"init_config",
"(",
"overrides",
")",
":",
"pmxbot",
".",
"config",
"=",
"config",
"=",
"ConfigDict",
"(",
")",
"config",
".",
"setdefault",
"(",
"'bot_nickname'",
",",
"'pmxbot'",
")",
"config",
".",
"update",
"(",
"overrides",
")",
"return",
"config"
]
| Install the config dict as pmxbot.config, setting overrides,
and return the result. | [
"Install",
"the",
"config",
"dict",
"as",
"pmxbot",
".",
"config",
"setting",
"overrides",
"and",
"return",
"the",
"result",
"."
]
| python | train |
wavycloud/pyboto3 | pyboto3/swf.py | https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/swf.py#L1483-L1693 | def list_closed_workflow_executions(domain=None, startTimeFilter=None, closeTimeFilter=None, executionFilter=None, closeStatusFilter=None, typeFilter=None, tagFilter=None, nextPageToken=None, maximumPageSize=None, reverseOrder=None):
"""
Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.
Access Control
You can use IAM policies to control this action's access to Amazon SWF resources as follows:
If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows .
See also: AWS API Documentation
:example: response = client.list_closed_workflow_executions(
domain='string',
startTimeFilter={
'oldestDate': datetime(2015, 1, 1),
'latestDate': datetime(2015, 1, 1)
},
closeTimeFilter={
'oldestDate': datetime(2015, 1, 1),
'latestDate': datetime(2015, 1, 1)
},
executionFilter={
'workflowId': 'string'
},
closeStatusFilter={
'status': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT'
},
typeFilter={
'name': 'string',
'version': 'string'
},
tagFilter={
'tag': 'string'
},
nextPageToken='string',
maximumPageSize=123,
reverseOrder=True|False
)
:type domain: string
:param domain: [REQUIRED]
The name of the domain that contains the workflow executions to list.
:type startTimeFilter: dict
:param startTimeFilter: If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
:type closeTimeFilter: dict
:param closeTimeFilter: If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
:type executionFilter: dict
:param executionFilter: If specified, only workflow executions matching the workflow ID specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter.
:type closeStatusFilter: dict
:param closeStatusFilter: If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter.
:type typeFilter: dict
:param typeFilter: If specified, only executions of the type specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
name (string) -- [REQUIRED]Required. Name of the workflow type.
version (string) --Version of the workflow type.
:type tagFilter: dict
:param tagFilter: If specified, only executions that have the matching tag are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria.
:type nextPageToken: string
:param nextPageToken: If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged.
The configured maximumPageSize determines how many results can be returned in a single call.
:type maximumPageSize: integer
:param maximumPageSize: The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.
This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.
:type reverseOrder: boolean
:param reverseOrder: When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions.
:rtype: dict
:return: {
'executionInfos': [
{
'execution': {
'workflowId': 'string',
'runId': 'string'
},
'workflowType': {
'name': 'string',
'version': 'string'
},
'startTimestamp': datetime(2015, 1, 1),
'closeTimestamp': datetime(2015, 1, 1),
'executionStatus': 'OPEN'|'CLOSED',
'closeStatus': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT',
'parent': {
'workflowId': 'string',
'runId': 'string'
},
'tagList': [
'string',
],
'cancelRequested': True|False
},
],
'nextPageToken': 'string'
}
:returns:
domain (string) -- [REQUIRED]
The name of the domain that contains the workflow executions to list.
startTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
closeTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
executionFilter (dict) -- If specified, only workflow executions matching the workflow ID specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter.
closeStatusFilter (dict) -- If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter.
typeFilter (dict) -- If specified, only executions of the type specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
name (string) -- [REQUIRED]Required. Name of the workflow type.
version (string) --Version of the workflow type.
tagFilter (dict) -- If specified, only executions that have the matching tag are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria.
nextPageToken (string) -- If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged.
The configured maximumPageSize determines how many results can be returned in a single call.
maximumPageSize (integer) -- The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.
This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.
reverseOrder (boolean) -- When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions.
"""
pass | [
"def",
"list_closed_workflow_executions",
"(",
"domain",
"=",
"None",
",",
"startTimeFilter",
"=",
"None",
",",
"closeTimeFilter",
"=",
"None",
",",
"executionFilter",
"=",
"None",
",",
"closeStatusFilter",
"=",
"None",
",",
"typeFilter",
"=",
"None",
",",
"tagFilter",
"=",
"None",
",",
"nextPageToken",
"=",
"None",
",",
"maximumPageSize",
"=",
"None",
",",
"reverseOrder",
"=",
"None",
")",
":",
"pass"
]
| Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.
Access Control
You can use IAM policies to control this action's access to Amazon SWF resources as follows:
If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows .
See also: AWS API Documentation
:example: response = client.list_closed_workflow_executions(
domain='string',
startTimeFilter={
'oldestDate': datetime(2015, 1, 1),
'latestDate': datetime(2015, 1, 1)
},
closeTimeFilter={
'oldestDate': datetime(2015, 1, 1),
'latestDate': datetime(2015, 1, 1)
},
executionFilter={
'workflowId': 'string'
},
closeStatusFilter={
'status': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT'
},
typeFilter={
'name': 'string',
'version': 'string'
},
tagFilter={
'tag': 'string'
},
nextPageToken='string',
maximumPageSize=123,
reverseOrder=True|False
)
:type domain: string
:param domain: [REQUIRED]
The name of the domain that contains the workflow executions to list.
:type startTimeFilter: dict
:param startTimeFilter: If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
:type closeTimeFilter: dict
:param closeTimeFilter: If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
:type executionFilter: dict
:param executionFilter: If specified, only workflow executions matching the workflow ID specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter.
:type closeStatusFilter: dict
:param closeStatusFilter: If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter.
:type typeFilter: dict
:param typeFilter: If specified, only executions of the type specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
name (string) -- [REQUIRED]Required. Name of the workflow type.
version (string) --Version of the workflow type.
:type tagFilter: dict
:param tagFilter: If specified, only executions that have the matching tag are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria.
:type nextPageToken: string
:param nextPageToken: If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged.
The configured maximumPageSize determines how many results can be returned in a single call.
:type maximumPageSize: integer
:param maximumPageSize: The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.
This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.
:type reverseOrder: boolean
:param reverseOrder: When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions.
:rtype: dict
:return: {
'executionInfos': [
{
'execution': {
'workflowId': 'string',
'runId': 'string'
},
'workflowType': {
'name': 'string',
'version': 'string'
},
'startTimestamp': datetime(2015, 1, 1),
'closeTimestamp': datetime(2015, 1, 1),
'executionStatus': 'OPEN'|'CLOSED',
'closeStatus': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT',
'parent': {
'workflowId': 'string',
'runId': 'string'
},
'tagList': [
'string',
],
'cancelRequested': True|False
},
],
'nextPageToken': 'string'
}
:returns:
domain (string) -- [REQUIRED]
The name of the domain that contains the workflow executions to list.
startTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
closeTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times.
Note
startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
executionFilter (dict) -- If specified, only workflow executions matching the workflow ID specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter.
closeStatusFilter (dict) -- If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter.
typeFilter (dict) -- If specified, only executions of the type specified in the filter are returned.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
name (string) -- [REQUIRED]Required. Name of the workflow type.
version (string) --Version of the workflow type.
tagFilter (dict) -- If specified, only executions that have the matching tag are listed.
Note
closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria.
nextPageToken (string) -- If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged.
The configured maximumPageSize determines how many results can be returned in a single call.
maximumPageSize (integer) -- The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.
This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.
reverseOrder (boolean) -- When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions. | [
"Returns",
"a",
"list",
"of",
"closed",
"workflow",
"executions",
"in",
"the",
"specified",
"domain",
"that",
"meet",
"the",
"filtering",
"criteria",
".",
"The",
"results",
"may",
"be",
"split",
"into",
"multiple",
"pages",
".",
"To",
"retrieve",
"subsequent",
"pages",
"make",
"the",
"call",
"again",
"using",
"the",
"nextPageToken",
"returned",
"by",
"the",
"initial",
"call",
".",
"Access",
"Control",
"You",
"can",
"use",
"IAM",
"policies",
"to",
"control",
"this",
"action",
"s",
"access",
"to",
"Amazon",
"SWF",
"resources",
"as",
"follows",
":",
"If",
"the",
"caller",
"does",
"not",
"have",
"sufficient",
"permissions",
"to",
"invoke",
"the",
"action",
"or",
"the",
"parameter",
"values",
"fall",
"outside",
"the",
"specified",
"constraints",
"the",
"action",
"fails",
".",
"The",
"associated",
"event",
"attribute",
"s",
"cause",
"parameter",
"will",
"be",
"set",
"to",
"OPERATION_NOT_PERMITTED",
".",
"For",
"details",
"and",
"example",
"IAM",
"policies",
"see",
"Using",
"IAM",
"to",
"Manage",
"Access",
"to",
"Amazon",
"SWF",
"Workflows",
".",
"See",
"also",
":",
"AWS",
"API",
"Documentation",
":",
"example",
":",
"response",
"=",
"client",
".",
"list_closed_workflow_executions",
"(",
"domain",
"=",
"string",
"startTimeFilter",
"=",
"{",
"oldestDate",
":",
"datetime",
"(",
"2015",
"1",
"1",
")",
"latestDate",
":",
"datetime",
"(",
"2015",
"1",
"1",
")",
"}",
"closeTimeFilter",
"=",
"{",
"oldestDate",
":",
"datetime",
"(",
"2015",
"1",
"1",
")",
"latestDate",
":",
"datetime",
"(",
"2015",
"1",
"1",
")",
"}",
"executionFilter",
"=",
"{",
"workflowId",
":",
"string",
"}",
"closeStatusFilter",
"=",
"{",
"status",
":",
"COMPLETED",
"|",
"FAILED",
"|",
"CANCELED",
"|",
"TERMINATED",
"|",
"CONTINUED_AS_NEW",
"|",
"TIMED_OUT",
"}",
"typeFilter",
"=",
"{",
"name",
":",
"string",
"version",
":",
"string",
"}",
"tagFilter",
"=",
"{",
"tag",
":",
"string",
"}",
"nextPageToken",
"=",
"string",
"maximumPageSize",
"=",
"123",
"reverseOrder",
"=",
"True|False",
")",
":",
"type",
"domain",
":",
"string",
":",
"param",
"domain",
":",
"[",
"REQUIRED",
"]",
"The",
"name",
"of",
"the",
"domain",
"that",
"contains",
"the",
"workflow",
"executions",
"to",
"list",
"."
]
| python | train |
binarydud/pyres | pyres/worker.py | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/worker.py#L121-L153 | def work(self, interval=5):
"""Invoked by ``run`` method. ``work`` listens on a list of queues and sleeps
for ``interval`` time.
``interval`` -- Number of seconds the worker will wait until processing the next job. Default is "5".
Whenever a worker finds a job on the queue it first calls ``reserve`` on
that job to make sure another worker won't run it, then *forks* itself to
work on that job.
"""
self._setproctitle("Starting")
logger.info("starting")
self.startup()
while True:
if self._shutdown:
logger.info('shutdown scheduled')
break
self.register_worker()
job = self.reserve(interval)
if job:
self.fork_worker(job)
else:
if interval == 0:
break
#procline @paused ? "Paused" : "Waiting for #{@queues.join(',')}"
self._setproctitle("Waiting")
#time.sleep(interval)
self.unregister_worker() | [
"def",
"work",
"(",
"self",
",",
"interval",
"=",
"5",
")",
":",
"self",
".",
"_setproctitle",
"(",
"\"Starting\"",
")",
"logger",
".",
"info",
"(",
"\"starting\"",
")",
"self",
".",
"startup",
"(",
")",
"while",
"True",
":",
"if",
"self",
".",
"_shutdown",
":",
"logger",
".",
"info",
"(",
"'shutdown scheduled'",
")",
"break",
"self",
".",
"register_worker",
"(",
")",
"job",
"=",
"self",
".",
"reserve",
"(",
"interval",
")",
"if",
"job",
":",
"self",
".",
"fork_worker",
"(",
"job",
")",
"else",
":",
"if",
"interval",
"==",
"0",
":",
"break",
"#procline @paused ? \"Paused\" : \"Waiting for #{@queues.join(',')}\"",
"self",
".",
"_setproctitle",
"(",
"\"Waiting\"",
")",
"#time.sleep(interval)",
"self",
".",
"unregister_worker",
"(",
")"
]
| Invoked by ``run`` method. ``work`` listens on a list of queues and sleeps
for ``interval`` time.
``interval`` -- Number of seconds the worker will wait until processing the next job. Default is "5".
Whenever a worker finds a job on the queue it first calls ``reserve`` on
that job to make sure another worker won't run it, then *forks* itself to
work on that job. | [
"Invoked",
"by",
"run",
"method",
".",
"work",
"listens",
"on",
"a",
"list",
"of",
"queues",
"and",
"sleeps",
"for",
"interval",
"time",
"."
]
| python | train |
ActivisionGameScience/assertpy | assertpy/assertpy.py | https://github.com/ActivisionGameScience/assertpy/blob/08d799cdb01f9a25d3e20672efac991c7bc26d79/assertpy/assertpy.py#L278-L292 | def does_not_contain(self, *items):
"""Asserts that val does not contain the given item or items."""
if len(items) == 0:
raise ValueError('one or more args must be given')
elif len(items) == 1:
if items[0] in self.val:
self._err('Expected <%s> to not contain item <%s>, but did.' % (self.val, items[0]))
else:
found = []
for i in items:
if i in self.val:
found.append(i)
if found:
self._err('Expected <%s> to not contain items %s, but did contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(found)))
return self | [
"def",
"does_not_contain",
"(",
"self",
",",
"*",
"items",
")",
":",
"if",
"len",
"(",
"items",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'one or more args must be given'",
")",
"elif",
"len",
"(",
"items",
")",
"==",
"1",
":",
"if",
"items",
"[",
"0",
"]",
"in",
"self",
".",
"val",
":",
"self",
".",
"_err",
"(",
"'Expected <%s> to not contain item <%s>, but did.'",
"%",
"(",
"self",
".",
"val",
",",
"items",
"[",
"0",
"]",
")",
")",
"else",
":",
"found",
"=",
"[",
"]",
"for",
"i",
"in",
"items",
":",
"if",
"i",
"in",
"self",
".",
"val",
":",
"found",
".",
"append",
"(",
"i",
")",
"if",
"found",
":",
"self",
".",
"_err",
"(",
"'Expected <%s> to not contain items %s, but did contain %s.'",
"%",
"(",
"self",
".",
"val",
",",
"self",
".",
"_fmt_items",
"(",
"items",
")",
",",
"self",
".",
"_fmt_items",
"(",
"found",
")",
")",
")",
"return",
"self"
]
| Asserts that val does not contain the given item or items. | [
"Asserts",
"that",
"val",
"does",
"not",
"contain",
"the",
"given",
"item",
"or",
"items",
"."
]
| python | valid |
cuihantao/andes | andes/filters/__init__.py | https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/filters/__init__.py#L32-L85 | def guess(system):
"""
input format guess function. First guess by extension, then test by lines
"""
files = system.files
maybe = []
if files.input_format:
maybe.append(files.input_format)
# first, guess by extension
for key, val in input_formats.items():
if type(val) == list:
for item in val:
if files.ext.strip('.').lower() == item:
maybe.append(key)
else:
if files.ext.strip('.').lower() == val:
maybe.append(key)
# second, guess by lines
true_format = ''
fid = open(files.case, 'r')
for item in maybe:
try:
parser = importlib.import_module('.' + item, __name__)
testlines = getattr(parser, 'testlines')
if testlines(fid):
true_format = item
break
except ImportError:
logger.debug(
'Parser for {:s} format is not found. '
'Format guess will continue.'.
format(item))
fid.close()
if true_format:
logger.debug('Input format guessed as {:s}.'.format(true_format))
else:
logger.error('Unable to determine case format.')
files.input_format = true_format
# guess addfile format
if files.addfile:
_, add_ext = os.path.splitext(files.addfile)
for key, val in input_formats.items():
if type(val) == list:
if add_ext[1:] in val:
files.add_format = key
else:
if add_ext[1:] == val:
files.add_format = key
return true_format | [
"def",
"guess",
"(",
"system",
")",
":",
"files",
"=",
"system",
".",
"files",
"maybe",
"=",
"[",
"]",
"if",
"files",
".",
"input_format",
":",
"maybe",
".",
"append",
"(",
"files",
".",
"input_format",
")",
"# first, guess by extension",
"for",
"key",
",",
"val",
"in",
"input_formats",
".",
"items",
"(",
")",
":",
"if",
"type",
"(",
"val",
")",
"==",
"list",
":",
"for",
"item",
"in",
"val",
":",
"if",
"files",
".",
"ext",
".",
"strip",
"(",
"'.'",
")",
".",
"lower",
"(",
")",
"==",
"item",
":",
"maybe",
".",
"append",
"(",
"key",
")",
"else",
":",
"if",
"files",
".",
"ext",
".",
"strip",
"(",
"'.'",
")",
".",
"lower",
"(",
")",
"==",
"val",
":",
"maybe",
".",
"append",
"(",
"key",
")",
"# second, guess by lines",
"true_format",
"=",
"''",
"fid",
"=",
"open",
"(",
"files",
".",
"case",
",",
"'r'",
")",
"for",
"item",
"in",
"maybe",
":",
"try",
":",
"parser",
"=",
"importlib",
".",
"import_module",
"(",
"'.'",
"+",
"item",
",",
"__name__",
")",
"testlines",
"=",
"getattr",
"(",
"parser",
",",
"'testlines'",
")",
"if",
"testlines",
"(",
"fid",
")",
":",
"true_format",
"=",
"item",
"break",
"except",
"ImportError",
":",
"logger",
".",
"debug",
"(",
"'Parser for {:s} format is not found. '",
"'Format guess will continue.'",
".",
"format",
"(",
"item",
")",
")",
"fid",
".",
"close",
"(",
")",
"if",
"true_format",
":",
"logger",
".",
"debug",
"(",
"'Input format guessed as {:s}.'",
".",
"format",
"(",
"true_format",
")",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Unable to determine case format.'",
")",
"files",
".",
"input_format",
"=",
"true_format",
"# guess addfile format",
"if",
"files",
".",
"addfile",
":",
"_",
",",
"add_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"files",
".",
"addfile",
")",
"for",
"key",
",",
"val",
"in",
"input_formats",
".",
"items",
"(",
")",
":",
"if",
"type",
"(",
"val",
")",
"==",
"list",
":",
"if",
"add_ext",
"[",
"1",
":",
"]",
"in",
"val",
":",
"files",
".",
"add_format",
"=",
"key",
"else",
":",
"if",
"add_ext",
"[",
"1",
":",
"]",
"==",
"val",
":",
"files",
".",
"add_format",
"=",
"key",
"return",
"true_format"
]
| input format guess function. First guess by extension, then test by lines | [
"input",
"format",
"guess",
"function",
".",
"First",
"guess",
"by",
"extension",
"then",
"test",
"by",
"lines"
]
| python | train |
titusjan/argos | argos/widgets/mainwindow.py | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/widgets/mainwindow.py#L105-L115 | def finalize(self):
""" Is called before destruction (when closing).
Can be used to clean-up resources.
"""
logger.debug("Finalizing: {}".format(self))
# Disconnect signals
self.collector.sigContentsChanged.disconnect(self.collectorContentsChanged)
self._configTreeModel.sigItemChanged.disconnect(self.configContentsChanged)
self.sigInspectorChanged.disconnect(self.inspectorSelectionPane.updateFromInspectorRegItem)
self.customContextMenuRequested.disconnect(self.showContextMenu) | [
"def",
"finalize",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Finalizing: {}\"",
".",
"format",
"(",
"self",
")",
")",
"# Disconnect signals",
"self",
".",
"collector",
".",
"sigContentsChanged",
".",
"disconnect",
"(",
"self",
".",
"collectorContentsChanged",
")",
"self",
".",
"_configTreeModel",
".",
"sigItemChanged",
".",
"disconnect",
"(",
"self",
".",
"configContentsChanged",
")",
"self",
".",
"sigInspectorChanged",
".",
"disconnect",
"(",
"self",
".",
"inspectorSelectionPane",
".",
"updateFromInspectorRegItem",
")",
"self",
".",
"customContextMenuRequested",
".",
"disconnect",
"(",
"self",
".",
"showContextMenu",
")"
]
| Is called before destruction (when closing).
Can be used to clean-up resources. | [
"Is",
"called",
"before",
"destruction",
"(",
"when",
"closing",
")",
".",
"Can",
"be",
"used",
"to",
"clean",
"-",
"up",
"resources",
"."
]
| python | train |
tanghaibao/jcvi | jcvi/algorithms/graph.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/algorithms/graph.py#L370-L382 | def make_paths(paths, weights=None):
"""
Zip together paths. Called by merge_paths().
"""
npaths = len(paths)
weights = weights or [1] * npaths
assert len(paths) == len(weights)
G = nx.DiGraph()
for path, w in zip(paths, weights):
for a, b in pairwise(path):
update_weight(G, a, b, w)
return G | [
"def",
"make_paths",
"(",
"paths",
",",
"weights",
"=",
"None",
")",
":",
"npaths",
"=",
"len",
"(",
"paths",
")",
"weights",
"=",
"weights",
"or",
"[",
"1",
"]",
"*",
"npaths",
"assert",
"len",
"(",
"paths",
")",
"==",
"len",
"(",
"weights",
")",
"G",
"=",
"nx",
".",
"DiGraph",
"(",
")",
"for",
"path",
",",
"w",
"in",
"zip",
"(",
"paths",
",",
"weights",
")",
":",
"for",
"a",
",",
"b",
"in",
"pairwise",
"(",
"path",
")",
":",
"update_weight",
"(",
"G",
",",
"a",
",",
"b",
",",
"w",
")",
"return",
"G"
]
| Zip together paths. Called by merge_paths(). | [
"Zip",
"together",
"paths",
".",
"Called",
"by",
"merge_paths",
"()",
"."
]
| python | train |
datamachine/twx.botapi | twx/botapi/botapi.py | https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L4370-L4372 | def leave_chat(self, *args, **kwargs):
"""See :func:`leave_chat_member`"""
return leave_chat(*args, **self._merge_overrides(**kwargs)).run() | [
"def",
"leave_chat",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"leave_chat",
"(",
"*",
"args",
",",
"*",
"*",
"self",
".",
"_merge_overrides",
"(",
"*",
"*",
"kwargs",
")",
")",
".",
"run",
"(",
")"
]
| See :func:`leave_chat_member` | [
"See",
":",
"func",
":",
"leave_chat_member"
]
| python | train |
hobson/pug-invest | pug/invest/plot.py | https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/plot.py#L277-L291 | def prettify_datetimes(datetimes, format="%b %Y", max_nonempty_strings=None, blank=''):
"""Designed for composing lists of strings suitable for pyplot axis labels
Often the xtick spacing doesn't allow room for 100's of text labels, so this
eliminates every other one, then every other one of those, until they fit.
>>> thin_string_list(['x']*20, 5) # doctring: +NORMALIZE_WHITESPACE
['x', '', '', '', 'x', '', '', '', 'x', '', '', '', 'x', '', '', '', 'x', '', '', '']
"""
# blank some labels to make sure they don't overlap
datetimes = [make_datetime(d) for d in datetimes]
datestrs = [d.strftime("%b %Y") for d in datetimes]
if max_nonempty_strings:
return thin_string_list(datestrs, max_nonempty_strings=max_nonempty_strings, blank=blank)
return datestrs | [
"def",
"prettify_datetimes",
"(",
"datetimes",
",",
"format",
"=",
"\"%b %Y\"",
",",
"max_nonempty_strings",
"=",
"None",
",",
"blank",
"=",
"''",
")",
":",
"# blank some labels to make sure they don't overlap",
"datetimes",
"=",
"[",
"make_datetime",
"(",
"d",
")",
"for",
"d",
"in",
"datetimes",
"]",
"datestrs",
"=",
"[",
"d",
".",
"strftime",
"(",
"\"%b %Y\"",
")",
"for",
"d",
"in",
"datetimes",
"]",
"if",
"max_nonempty_strings",
":",
"return",
"thin_string_list",
"(",
"datestrs",
",",
"max_nonempty_strings",
"=",
"max_nonempty_strings",
",",
"blank",
"=",
"blank",
")",
"return",
"datestrs"
]
| Designed for composing lists of strings suitable for pyplot axis labels
Often the xtick spacing doesn't allow room for 100's of text labels, so this
eliminates every other one, then every other one of those, until they fit.
>>> thin_string_list(['x']*20, 5) # doctring: +NORMALIZE_WHITESPACE
['x', '', '', '', 'x', '', '', '', 'x', '', '', '', 'x', '', '', '', 'x', '', '', ''] | [
"Designed",
"for",
"composing",
"lists",
"of",
"strings",
"suitable",
"for",
"pyplot",
"axis",
"labels"
]
| python | train |
salu133445/pypianoroll | pypianoroll/multitrack.py | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L486-L538 | def merge_tracks(self, track_indices=None, mode='sum', program=0,
is_drum=False, name='merged', remove_merged=False):
"""
Merge pianorolls of the tracks specified by `track_indices`. The merged
track will have program number as given by `program` and drum indicator
as given by `is_drum`. The merged track will be appended at the end of
the track list.
Parameters
----------
track_indices : list
The indices of tracks to be merged. Defaults to all the tracks.
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of the collected
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among the
collected pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the collected pianorolls has nonzero value at that
pixel; False if all the collected pianorolls are inactive
(zero-valued) at that pixel.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
name : str
A name to be assigned to the merged track. Defaults to 'merged'.
remove_merged : bool
True to remove the source tracks from the track list. False to keep
them. Defaults to False.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set
"""
if mode not in ('max', 'sum', 'any'):
raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.")
merged = self[track_indices].get_merged_pianoroll(mode)
merged_track = Track(merged, program, is_drum, name)
self.append_track(merged_track)
if remove_merged:
self.remove_tracks(track_indices) | [
"def",
"merge_tracks",
"(",
"self",
",",
"track_indices",
"=",
"None",
",",
"mode",
"=",
"'sum'",
",",
"program",
"=",
"0",
",",
"is_drum",
"=",
"False",
",",
"name",
"=",
"'merged'",
",",
"remove_merged",
"=",
"False",
")",
":",
"if",
"mode",
"not",
"in",
"(",
"'max'",
",",
"'sum'",
",",
"'any'",
")",
":",
"raise",
"ValueError",
"(",
"\"`mode` must be one of {'max', 'sum', 'any'}.\"",
")",
"merged",
"=",
"self",
"[",
"track_indices",
"]",
".",
"get_merged_pianoroll",
"(",
"mode",
")",
"merged_track",
"=",
"Track",
"(",
"merged",
",",
"program",
",",
"is_drum",
",",
"name",
")",
"self",
".",
"append_track",
"(",
"merged_track",
")",
"if",
"remove_merged",
":",
"self",
".",
"remove_tracks",
"(",
"track_indices",
")"
]
| Merge pianorolls of the tracks specified by `track_indices`. The merged
track will have program number as given by `program` and drum indicator
as given by `is_drum`. The merged track will be appended at the end of
the track list.
Parameters
----------
track_indices : list
The indices of tracks to be merged. Defaults to all the tracks.
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of the collected
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among the
collected pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the collected pianorolls has nonzero value at that
pixel; False if all the collected pianorolls are inactive
(zero-valued) at that pixel.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
name : str
A name to be assigned to the merged track. Defaults to 'merged'.
remove_merged : bool
True to remove the source tracks from the track list. False to keep
them. Defaults to False.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set | [
"Merge",
"pianorolls",
"of",
"the",
"tracks",
"specified",
"by",
"track_indices",
".",
"The",
"merged",
"track",
"will",
"have",
"program",
"number",
"as",
"given",
"by",
"program",
"and",
"drum",
"indicator",
"as",
"given",
"by",
"is_drum",
".",
"The",
"merged",
"track",
"will",
"be",
"appended",
"at",
"the",
"end",
"of",
"the",
"track",
"list",
"."
]
| python | train |
senaite/senaite.core | bika/lims/browser/analyses/view.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analyses/view.py#L323-L345 | def is_uncertainty_edition_allowed(self, analysis_brain):
"""Checks if the edition of the uncertainty field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False
"""
# Only allow to edit the uncertainty if result edition is allowed
if not self.is_result_edition_allowed(analysis_brain):
return False
# Get the ananylsis object
obj = api.get_object(analysis_brain)
# Manual setting of uncertainty is not allowed
if not obj.getAllowManualUncertainty():
return False
# Result is a detection limit -> uncertainty setting makes no sense!
if obj.getDetectionLimitOperand() in [LDL, UDL]:
return False
return True | [
"def",
"is_uncertainty_edition_allowed",
"(",
"self",
",",
"analysis_brain",
")",
":",
"# Only allow to edit the uncertainty if result edition is allowed",
"if",
"not",
"self",
".",
"is_result_edition_allowed",
"(",
"analysis_brain",
")",
":",
"return",
"False",
"# Get the ananylsis object",
"obj",
"=",
"api",
".",
"get_object",
"(",
"analysis_brain",
")",
"# Manual setting of uncertainty is not allowed",
"if",
"not",
"obj",
".",
"getAllowManualUncertainty",
"(",
")",
":",
"return",
"False",
"# Result is a detection limit -> uncertainty setting makes no sense!",
"if",
"obj",
".",
"getDetectionLimitOperand",
"(",
")",
"in",
"[",
"LDL",
",",
"UDL",
"]",
":",
"return",
"False",
"return",
"True"
]
| Checks if the edition of the uncertainty field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False | [
"Checks",
"if",
"the",
"edition",
"of",
"the",
"uncertainty",
"field",
"is",
"allowed"
]
| python | train |
facelessuser/backrefs | backrefs/uniprops/__init__.py | https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/uniprops/__init__.py#L271-L282 | def get_numeric_type_property(value, is_bytes=False):
"""Get `NUMERIC TYPE` property."""
obj = unidata.ascii_numeric_type if is_bytes else unidata.unicode_numeric_type
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['numerictype'].get(negated, negated)
else:
value = unidata.unicode_alias['numerictype'].get(value, value)
return obj[value] | [
"def",
"get_numeric_type_property",
"(",
"value",
",",
"is_bytes",
"=",
"False",
")",
":",
"obj",
"=",
"unidata",
".",
"ascii_numeric_type",
"if",
"is_bytes",
"else",
"unidata",
".",
"unicode_numeric_type",
"if",
"value",
".",
"startswith",
"(",
"'^'",
")",
":",
"negated",
"=",
"value",
"[",
"1",
":",
"]",
"value",
"=",
"'^'",
"+",
"unidata",
".",
"unicode_alias",
"[",
"'numerictype'",
"]",
".",
"get",
"(",
"negated",
",",
"negated",
")",
"else",
":",
"value",
"=",
"unidata",
".",
"unicode_alias",
"[",
"'numerictype'",
"]",
".",
"get",
"(",
"value",
",",
"value",
")",
"return",
"obj",
"[",
"value",
"]"
]
| Get `NUMERIC TYPE` property. | [
"Get",
"NUMERIC",
"TYPE",
"property",
"."
]
| python | train |
carpyncho/feets | feets/datasets/synthetic.py | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/datasets/synthetic.py#L63-L135 | def create_random(magf, magf_params, errf, errf_params,
timef=np.linspace, timef_params=None, size=DEFAULT_SIZE,
id=None, ds_name=DS_NAME, description=DESCRIPTION,
bands=BANDS, metadata=METADATA):
"""Generate a data with any given random function.
Parameters
----------
magf : callable
Function to generate the magnitudes.
magf_params : dict-like
Parameters to feed the `magf` function.
errf : callable
Function to generate the magnitudes.
errf_params : dict-like
Parameters to feed the `errf` function.
timef : callable, (default=numpy.linspace)
Function to generate the times.
timef_params : dict-like or None, (default={"start": 0., "stop": 1.})
Parameters to feed the `timef` callable.
size : int (default=10000)
Number of obervation of the light curves
id : object (default=None)
Id of the created data.
ds_name : str (default="feets-synthetic")
Name of the dataset
description : str (default="Lightcurve created with random numbers")
Description of the data
bands : tuple of strings (default=("B", "V"))
The bands to be created
metadata : dict-like or None (default=None)
The metadata of the created data
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> from numpy import random
>>> create_random(
... magf=random.normal, magf_params={"loc": 0, "scale": 1},
... errf=random.normal, errf_params={"loc": 0, "scale": 0.008})
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
"""
timef_params = (
{"start": 0., "stop": 1.}
if timef_params is None else
timef_params.copy())
timef_params.update(num=size)
magf_params = magf_params.copy()
magf_params.update(size=size)
errf_params = errf_params.copy()
errf_params.update(size=size)
data = {}
for band in bands:
data[band] = {
"time": timef(**timef_params),
"magnitude": magf(**magf_params),
"error": errf(**errf_params)}
return Data(
id=id, ds_name=ds_name, description=description,
bands=bands, metadata=metadata, data=data) | [
"def",
"create_random",
"(",
"magf",
",",
"magf_params",
",",
"errf",
",",
"errf_params",
",",
"timef",
"=",
"np",
".",
"linspace",
",",
"timef_params",
"=",
"None",
",",
"size",
"=",
"DEFAULT_SIZE",
",",
"id",
"=",
"None",
",",
"ds_name",
"=",
"DS_NAME",
",",
"description",
"=",
"DESCRIPTION",
",",
"bands",
"=",
"BANDS",
",",
"metadata",
"=",
"METADATA",
")",
":",
"timef_params",
"=",
"(",
"{",
"\"start\"",
":",
"0.",
",",
"\"stop\"",
":",
"1.",
"}",
"if",
"timef_params",
"is",
"None",
"else",
"timef_params",
".",
"copy",
"(",
")",
")",
"timef_params",
".",
"update",
"(",
"num",
"=",
"size",
")",
"magf_params",
"=",
"magf_params",
".",
"copy",
"(",
")",
"magf_params",
".",
"update",
"(",
"size",
"=",
"size",
")",
"errf_params",
"=",
"errf_params",
".",
"copy",
"(",
")",
"errf_params",
".",
"update",
"(",
"size",
"=",
"size",
")",
"data",
"=",
"{",
"}",
"for",
"band",
"in",
"bands",
":",
"data",
"[",
"band",
"]",
"=",
"{",
"\"time\"",
":",
"timef",
"(",
"*",
"*",
"timef_params",
")",
",",
"\"magnitude\"",
":",
"magf",
"(",
"*",
"*",
"magf_params",
")",
",",
"\"error\"",
":",
"errf",
"(",
"*",
"*",
"errf_params",
")",
"}",
"return",
"Data",
"(",
"id",
"=",
"id",
",",
"ds_name",
"=",
"ds_name",
",",
"description",
"=",
"description",
",",
"bands",
"=",
"bands",
",",
"metadata",
"=",
"metadata",
",",
"data",
"=",
"data",
")"
]
| Generate a data with any given random function.
Parameters
----------
magf : callable
Function to generate the magnitudes.
magf_params : dict-like
Parameters to feed the `magf` function.
errf : callable
Function to generate the magnitudes.
errf_params : dict-like
Parameters to feed the `errf` function.
timef : callable, (default=numpy.linspace)
Function to generate the times.
timef_params : dict-like or None, (default={"start": 0., "stop": 1.})
Parameters to feed the `timef` callable.
size : int (default=10000)
Number of obervation of the light curves
id : object (default=None)
Id of the created data.
ds_name : str (default="feets-synthetic")
Name of the dataset
description : str (default="Lightcurve created with random numbers")
Description of the data
bands : tuple of strings (default=("B", "V"))
The bands to be created
metadata : dict-like or None (default=None)
The metadata of the created data
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> from numpy import random
>>> create_random(
... magf=random.normal, magf_params={"loc": 0, "scale": 1},
... errf=random.normal, errf_params={"loc": 0, "scale": 0.008})
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V')) | [
"Generate",
"a",
"data",
"with",
"any",
"given",
"random",
"function",
"."
]
| python | train |
hawkular/hawkular-client-python | hawkular/alerts/triggers.py | https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L132-L148 | def get(self, tags=[], trigger_ids=[]):
"""
Get triggers with optional filtering. Querying without parameters returns all the trigger definitions.
:param tags: Fetch triggers with matching tags only. Use * to match all values.
:param trigger_ids: List of triggerIds to fetch
"""
params = {}
if len(tags) > 0:
params['tags'] = ','.join(tags)
if len(trigger_ids) > 0:
params['triggerIds'] = ','.join(trigger_ids)
url = self._service_url('triggers', params=params)
triggers_dict = self._get(url)
return Trigger.list_to_object_list(triggers_dict) | [
"def",
"get",
"(",
"self",
",",
"tags",
"=",
"[",
"]",
",",
"trigger_ids",
"=",
"[",
"]",
")",
":",
"params",
"=",
"{",
"}",
"if",
"len",
"(",
"tags",
")",
">",
"0",
":",
"params",
"[",
"'tags'",
"]",
"=",
"','",
".",
"join",
"(",
"tags",
")",
"if",
"len",
"(",
"trigger_ids",
")",
">",
"0",
":",
"params",
"[",
"'triggerIds'",
"]",
"=",
"','",
".",
"join",
"(",
"trigger_ids",
")",
"url",
"=",
"self",
".",
"_service_url",
"(",
"'triggers'",
",",
"params",
"=",
"params",
")",
"triggers_dict",
"=",
"self",
".",
"_get",
"(",
"url",
")",
"return",
"Trigger",
".",
"list_to_object_list",
"(",
"triggers_dict",
")"
]
| Get triggers with optional filtering. Querying without parameters returns all the trigger definitions.
:param tags: Fetch triggers with matching tags only. Use * to match all values.
:param trigger_ids: List of triggerIds to fetch | [
"Get",
"triggers",
"with",
"optional",
"filtering",
".",
"Querying",
"without",
"parameters",
"returns",
"all",
"the",
"trigger",
"definitions",
"."
]
| python | train |
yunojuno-archive/django-package-monitor | package_monitor/pypi.py | https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/pypi.py#L85-L93 | def data(self):
"""Fetch latest data from PyPI, and cache for 30s."""
key = cache_key(self.name)
data = cache.get(key)
if data is None:
logger.debug("Updating package info for %s from PyPI.", self.name)
data = requests.get(self.url).json()
cache.set(key, data, PYPI_CACHE_EXPIRY)
return data | [
"def",
"data",
"(",
"self",
")",
":",
"key",
"=",
"cache_key",
"(",
"self",
".",
"name",
")",
"data",
"=",
"cache",
".",
"get",
"(",
"key",
")",
"if",
"data",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Updating package info for %s from PyPI.\"",
",",
"self",
".",
"name",
")",
"data",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"url",
")",
".",
"json",
"(",
")",
"cache",
".",
"set",
"(",
"key",
",",
"data",
",",
"PYPI_CACHE_EXPIRY",
")",
"return",
"data"
]
| Fetch latest data from PyPI, and cache for 30s. | [
"Fetch",
"latest",
"data",
"from",
"PyPI",
"and",
"cache",
"for",
"30s",
"."
]
| python | train |
qzmfranklin/easyshell | easyshell/basic_shell.py | https://github.com/qzmfranklin/easyshell/blob/00c2e90e7767d32e7e127fc8c6875845aa308295/easyshell/basic_shell.py#L151-L178 | def _do_help(self, cmd, args):
"""Display doc strings of the shell and its commands.
"""
print(self.doc_string())
print()
# Create data of the commands table.
data_unsorted = []
cls = self.__class__
for name in dir(cls):
obj = getattr(cls, name)
if iscommand(obj):
cmds = []
for cmd in getcommands(obj):
cmds.append(cmd)
cmd_str = ','.join(sorted(cmds))
doc_str = textwrap.dedent(obj.__doc__).strip() if obj.__doc__ else \
'(no doc string available)'
data_unsorted.append([cmd_str, doc_str])
data_sorted = sorted(data_unsorted, key = lambda x: x[0])
data = [['COMMANDS', 'DOC STRING']] + data_sorted
# Create the commands table.
table_banner = 'List of Available Commands'
table = terminaltables.SingleTable(data, table_banner)
table.inner_row_border = True
table.inner_heading_row_border = True
print(table.table) | [
"def",
"_do_help",
"(",
"self",
",",
"cmd",
",",
"args",
")",
":",
"print",
"(",
"self",
".",
"doc_string",
"(",
")",
")",
"print",
"(",
")",
"# Create data of the commands table.",
"data_unsorted",
"=",
"[",
"]",
"cls",
"=",
"self",
".",
"__class__",
"for",
"name",
"in",
"dir",
"(",
"cls",
")",
":",
"obj",
"=",
"getattr",
"(",
"cls",
",",
"name",
")",
"if",
"iscommand",
"(",
"obj",
")",
":",
"cmds",
"=",
"[",
"]",
"for",
"cmd",
"in",
"getcommands",
"(",
"obj",
")",
":",
"cmds",
".",
"append",
"(",
"cmd",
")",
"cmd_str",
"=",
"','",
".",
"join",
"(",
"sorted",
"(",
"cmds",
")",
")",
"doc_str",
"=",
"textwrap",
".",
"dedent",
"(",
"obj",
".",
"__doc__",
")",
".",
"strip",
"(",
")",
"if",
"obj",
".",
"__doc__",
"else",
"'(no doc string available)'",
"data_unsorted",
".",
"append",
"(",
"[",
"cmd_str",
",",
"doc_str",
"]",
")",
"data_sorted",
"=",
"sorted",
"(",
"data_unsorted",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"data",
"=",
"[",
"[",
"'COMMANDS'",
",",
"'DOC STRING'",
"]",
"]",
"+",
"data_sorted",
"# Create the commands table.",
"table_banner",
"=",
"'List of Available Commands'",
"table",
"=",
"terminaltables",
".",
"SingleTable",
"(",
"data",
",",
"table_banner",
")",
"table",
".",
"inner_row_border",
"=",
"True",
"table",
".",
"inner_heading_row_border",
"=",
"True",
"print",
"(",
"table",
".",
"table",
")"
]
| Display doc strings of the shell and its commands. | [
"Display",
"doc",
"strings",
"of",
"the",
"shell",
"and",
"its",
"commands",
"."
]
| python | train |
jrfonseca/gprof2dot | gprof2dot.py | https://github.com/jrfonseca/gprof2dot/blob/0500e89f001e555f5eaa32e70793b4875f2f70db/gprof2dot.py#L484-L515 | def integrate(self, outevent, inevent):
"""Propagate function time ratio along the function calls.
Must be called after finding the cycles.
See also:
- http://citeseer.ist.psu.edu/graham82gprof.html
"""
# Sanity checking
assert outevent not in self
for function in compat_itervalues(self.functions):
assert outevent not in function
assert inevent in function
for call in compat_itervalues(function.calls):
assert outevent not in call
if call.callee_id != function.id:
assert call.ratio is not None
# Aggregate the input for each cycle
for cycle in self.cycles:
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self[inevent] = total
# Integrate along the edges
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self._integrate_function(function, outevent, inevent)
self[outevent] = total | [
"def",
"integrate",
"(",
"self",
",",
"outevent",
",",
"inevent",
")",
":",
"# Sanity checking",
"assert",
"outevent",
"not",
"in",
"self",
"for",
"function",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",
")",
":",
"assert",
"outevent",
"not",
"in",
"function",
"assert",
"inevent",
"in",
"function",
"for",
"call",
"in",
"compat_itervalues",
"(",
"function",
".",
"calls",
")",
":",
"assert",
"outevent",
"not",
"in",
"call",
"if",
"call",
".",
"callee_id",
"!=",
"function",
".",
"id",
":",
"assert",
"call",
".",
"ratio",
"is",
"not",
"None",
"# Aggregate the input for each cycle ",
"for",
"cycle",
"in",
"self",
".",
"cycles",
":",
"total",
"=",
"inevent",
".",
"null",
"(",
")",
"for",
"function",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",
")",
":",
"total",
"=",
"inevent",
".",
"aggregate",
"(",
"total",
",",
"function",
"[",
"inevent",
"]",
")",
"self",
"[",
"inevent",
"]",
"=",
"total",
"# Integrate along the edges",
"total",
"=",
"inevent",
".",
"null",
"(",
")",
"for",
"function",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",
")",
":",
"total",
"=",
"inevent",
".",
"aggregate",
"(",
"total",
",",
"function",
"[",
"inevent",
"]",
")",
"self",
".",
"_integrate_function",
"(",
"function",
",",
"outevent",
",",
"inevent",
")",
"self",
"[",
"outevent",
"]",
"=",
"total"
]
| Propagate function time ratio along the function calls.
Must be called after finding the cycles.
See also:
- http://citeseer.ist.psu.edu/graham82gprof.html | [
"Propagate",
"function",
"time",
"ratio",
"along",
"the",
"function",
"calls",
"."
]
| python | train |
myint/cppclean | cpp/symbols.py | https://github.com/myint/cppclean/blob/8a20c943dca1049e87d57137938f92d7735828dc/cpp/symbols.py#L64-L85 | def _lookup_global(self, symbol):
"""Helper for lookup_symbol that only looks up global variables.
Args:
symbol: Symbol
"""
assert symbol.parts
namespace = self.namespaces
if len(symbol.parts) == 1:
# If there is only one part, look in globals.
namespace = self.namespaces[None]
try:
# Try to do a normal, global namespace lookup.
return self._lookup_namespace(symbol, namespace)
except Error as orig_exc:
try:
# The normal lookup can fail if all of the parts aren't
# namespaces. This happens with OuterClass::Inner.
namespace = self.namespaces[None]
return self._lookup_namespace(symbol, namespace)
except Error:
raise orig_exc | [
"def",
"_lookup_global",
"(",
"self",
",",
"symbol",
")",
":",
"assert",
"symbol",
".",
"parts",
"namespace",
"=",
"self",
".",
"namespaces",
"if",
"len",
"(",
"symbol",
".",
"parts",
")",
"==",
"1",
":",
"# If there is only one part, look in globals.",
"namespace",
"=",
"self",
".",
"namespaces",
"[",
"None",
"]",
"try",
":",
"# Try to do a normal, global namespace lookup.",
"return",
"self",
".",
"_lookup_namespace",
"(",
"symbol",
",",
"namespace",
")",
"except",
"Error",
"as",
"orig_exc",
":",
"try",
":",
"# The normal lookup can fail if all of the parts aren't",
"# namespaces. This happens with OuterClass::Inner.",
"namespace",
"=",
"self",
".",
"namespaces",
"[",
"None",
"]",
"return",
"self",
".",
"_lookup_namespace",
"(",
"symbol",
",",
"namespace",
")",
"except",
"Error",
":",
"raise",
"orig_exc"
]
| Helper for lookup_symbol that only looks up global variables.
Args:
symbol: Symbol | [
"Helper",
"for",
"lookup_symbol",
"that",
"only",
"looks",
"up",
"global",
"variables",
"."
]
| python | train |
tensorlayer/tensorlayer | tensorlayer/iterate.py | https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/iterate.py#L15-L72 | def minibatches(inputs=None, targets=None, batch_size=None, allow_dynamic_batch_size=False, shuffle=False):
"""Generate a generator that input a group of example in numpy.array and
their labels, return the examples and labels by the given batch size.
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every row is a example.
batch_size : int
The batch size.
allow_dynamic_batch_size: boolean
Allow the use of the last data batch in case the number of examples is not a multiple of batch_size, this may result in unexpected behaviour if other functions expect a fixed-sized batch-size.
shuffle : boolean
Indicating whether to use a shuffling queue, shuffle the dataset before return.
Examples
--------
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.minibatches(inputs=X, targets=y, batch_size=2, shuffle=False):
>>> print(batch)
(array([['a', 'a'], ['b', 'b']], dtype='<U1'), array([0, 1]))
(array([['c', 'c'], ['d', 'd']], dtype='<U1'), array([2, 3]))
(array([['e', 'e'], ['f', 'f']], dtype='<U1'), array([4, 5]))
Notes
-----
If you have two inputs and one label and want to shuffle them together, e.g. X1 (1000, 100), X2 (1000, 80) and Y (1000, 1), you can stack them together (`np.hstack((X1, X2))`)
into (1000, 180) and feed to ``inputs``. After getting a batch, you can split it back into X1 and X2.
"""
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
# for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
# chulei: handling the case where the number of samples is not a multiple of batch_size, avoiding wasting samples
for start_idx in range(0, len(inputs), batch_size):
end_idx = start_idx + batch_size
if end_idx > len(inputs):
if allow_dynamic_batch_size:
end_idx = len(inputs)
else:
break
if shuffle:
excerpt = indices[start_idx:end_idx]
else:
excerpt = slice(start_idx, end_idx)
if (isinstance(inputs, list) or isinstance(targets, list)) and (shuffle ==True):
# zsdonghao: for list indexing when shuffle==True
yield [inputs[i] for i in excerpt], [targets[i] for i in excerpt]
else:
yield inputs[excerpt], targets[excerpt] | [
"def",
"minibatches",
"(",
"inputs",
"=",
"None",
",",
"targets",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"allow_dynamic_batch_size",
"=",
"False",
",",
"shuffle",
"=",
"False",
")",
":",
"if",
"len",
"(",
"inputs",
")",
"!=",
"len",
"(",
"targets",
")",
":",
"raise",
"AssertionError",
"(",
"\"The length of inputs and targets should be equal\"",
")",
"if",
"shuffle",
":",
"indices",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"inputs",
")",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"indices",
")",
"# for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):",
"# chulei: handling the case where the number of samples is not a multiple of batch_size, avoiding wasting samples",
"for",
"start_idx",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"inputs",
")",
",",
"batch_size",
")",
":",
"end_idx",
"=",
"start_idx",
"+",
"batch_size",
"if",
"end_idx",
">",
"len",
"(",
"inputs",
")",
":",
"if",
"allow_dynamic_batch_size",
":",
"end_idx",
"=",
"len",
"(",
"inputs",
")",
"else",
":",
"break",
"if",
"shuffle",
":",
"excerpt",
"=",
"indices",
"[",
"start_idx",
":",
"end_idx",
"]",
"else",
":",
"excerpt",
"=",
"slice",
"(",
"start_idx",
",",
"end_idx",
")",
"if",
"(",
"isinstance",
"(",
"inputs",
",",
"list",
")",
"or",
"isinstance",
"(",
"targets",
",",
"list",
")",
")",
"and",
"(",
"shuffle",
"==",
"True",
")",
":",
"# zsdonghao: for list indexing when shuffle==True",
"yield",
"[",
"inputs",
"[",
"i",
"]",
"for",
"i",
"in",
"excerpt",
"]",
",",
"[",
"targets",
"[",
"i",
"]",
"for",
"i",
"in",
"excerpt",
"]",
"else",
":",
"yield",
"inputs",
"[",
"excerpt",
"]",
",",
"targets",
"[",
"excerpt",
"]"
]
| Generate a generator that input a group of example in numpy.array and
their labels, return the examples and labels by the given batch size.
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every row is a example.
batch_size : int
The batch size.
allow_dynamic_batch_size: boolean
Allow the use of the last data batch in case the number of examples is not a multiple of batch_size, this may result in unexpected behaviour if other functions expect a fixed-sized batch-size.
shuffle : boolean
Indicating whether to use a shuffling queue, shuffle the dataset before return.
Examples
--------
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.minibatches(inputs=X, targets=y, batch_size=2, shuffle=False):
>>> print(batch)
(array([['a', 'a'], ['b', 'b']], dtype='<U1'), array([0, 1]))
(array([['c', 'c'], ['d', 'd']], dtype='<U1'), array([2, 3]))
(array([['e', 'e'], ['f', 'f']], dtype='<U1'), array([4, 5]))
Notes
-----
If you have two inputs and one label and want to shuffle them together, e.g. X1 (1000, 100), X2 (1000, 80) and Y (1000, 1), you can stack them together (`np.hstack((X1, X2))`)
into (1000, 180) and feed to ``inputs``. After getting a batch, you can split it back into X1 and X2. | [
"Generate",
"a",
"generator",
"that",
"input",
"a",
"group",
"of",
"example",
"in",
"numpy",
".",
"array",
"and",
"their",
"labels",
"return",
"the",
"examples",
"and",
"labels",
"by",
"the",
"given",
"batch",
"size",
"."
]
| python | valid |
moonso/loqusdb | loqusdb/utils/annotate.py | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/annotate.py#L32-L43 | def annotate_snv(adpter, variant):
"""Annotate an SNV/INDEL variant
Args:
adapter(loqusdb.plugin.adapter)
variant(cyvcf2.Variant)
"""
variant_id = get_variant_id(variant)
variant_obj = adapter.get_variant(variant={'_id':variant_id})
annotated_variant = annotated_variant(variant, variant_obj)
return annotated_variant | [
"def",
"annotate_snv",
"(",
"adpter",
",",
"variant",
")",
":",
"variant_id",
"=",
"get_variant_id",
"(",
"variant",
")",
"variant_obj",
"=",
"adapter",
".",
"get_variant",
"(",
"variant",
"=",
"{",
"'_id'",
":",
"variant_id",
"}",
")",
"annotated_variant",
"=",
"annotated_variant",
"(",
"variant",
",",
"variant_obj",
")",
"return",
"annotated_variant"
]
| Annotate an SNV/INDEL variant
Args:
adapter(loqusdb.plugin.adapter)
variant(cyvcf2.Variant) | [
"Annotate",
"an",
"SNV",
"/",
"INDEL",
"variant",
"Args",
":",
"adapter",
"(",
"loqusdb",
".",
"plugin",
".",
"adapter",
")",
"variant",
"(",
"cyvcf2",
".",
"Variant",
")"
]
| python | train |
nkmathew/yasi-sexp-indenter | yasi.py | https://github.com/nkmathew/yasi-sexp-indenter/blob/6ec2a4675e79606c555bcb67494a0ba994b05805/yasi.py#L314-L336 | def is_macro_name(func_name, dialect):
""" is_macro_name(func_name : str, dialect : str) -> bool
>>> is_macro_name('yacc:define-parser')
True
Tests if a word is a macro using the language's/dialect's convention,
e.g macros in Lisp usually start with 'def' and 'with' in Scheme. Saves
the effort of finding all the macros in Lisp/Scheme/Clojure/newLISP and storing
them in a list.
"""
if not func_name:
return False
if dialect == 'lisp':
return re.search('^(macro|def|do|with-)', func_name, re.I)
if dialect == 'scheme':
return re.search('^(call-|def|with-)', func_name)
if dialect == 'clojure':
return re.search('^(def|with)', func_name)
if dialect == 'newlisp':
return re.search('^(macro|def)', func_name)
else:
return False | [
"def",
"is_macro_name",
"(",
"func_name",
",",
"dialect",
")",
":",
"if",
"not",
"func_name",
":",
"return",
"False",
"if",
"dialect",
"==",
"'lisp'",
":",
"return",
"re",
".",
"search",
"(",
"'^(macro|def|do|with-)'",
",",
"func_name",
",",
"re",
".",
"I",
")",
"if",
"dialect",
"==",
"'scheme'",
":",
"return",
"re",
".",
"search",
"(",
"'^(call-|def|with-)'",
",",
"func_name",
")",
"if",
"dialect",
"==",
"'clojure'",
":",
"return",
"re",
".",
"search",
"(",
"'^(def|with)'",
",",
"func_name",
")",
"if",
"dialect",
"==",
"'newlisp'",
":",
"return",
"re",
".",
"search",
"(",
"'^(macro|def)'",
",",
"func_name",
")",
"else",
":",
"return",
"False"
]
| is_macro_name(func_name : str, dialect : str) -> bool
>>> is_macro_name('yacc:define-parser')
True
Tests if a word is a macro using the language's/dialect's convention,
e.g macros in Lisp usually start with 'def' and 'with' in Scheme. Saves
the effort of finding all the macros in Lisp/Scheme/Clojure/newLISP and storing
them in a list. | [
"is_macro_name",
"(",
"func_name",
":",
"str",
"dialect",
":",
"str",
")",
"-",
">",
"bool"
]
| python | train |
iron-io/iron_core_python | iron_core.py | https://github.com/iron-io/iron_core_python/blob/f09a160a854912efcb75a810702686bc25b74fa8/iron_core.py#L209-L270 | def request(self, url, method, body="", headers={}, retry=True):
"""Execute an HTTP request and return a dict containing the response
and the response status code.
Keyword arguments:
url -- The path to execute the result against, not including the API
version or project ID, with no leading /. Required.
method -- The HTTP method to use. Required.
body -- A string or file object to send as the body of the request.
Defaults to an empty string.
headers -- HTTP Headers to send with the request. Can overwrite the
defaults. Defaults to {}.
retry -- Whether exponential backoff should be employed. Defaults
to True.
"""
if headers:
headers = dict(list(headers.items()) + list(self.headers.items()))
else:
headers = self.headers
if not sys.version_info >= (3,) and headers:
headers = dict((k.encode('ascii') if isinstance(k, unicode) else k,
v.encode('ascii') if isinstance(v, unicode) else v)
for k, v in headers.items())
url = self.base_url + url
if not sys.version_info >= (3,):
if isinstance(url, unicode):
url = url.encode('ascii')
r = self._doRequest(url, method, body, headers)
retry_http_codes = [503, 504]
if r.status_code in retry_http_codes and retry:
tries = 5
delay = .5
backoff = 2
while r.status_code in retry_http_codes and tries > 0:
tries -= 1
time.sleep(delay)
delay *= backoff
r = self._doRequest(url, method, body, headers)
r.raise_for_status()
result = {}
contentType = r.headers["Content-Type"]
if contentType is None:
contentType = "text/plain"
else:
contentType = contentType.split(";")[0]
if contentType.lower() == "application/json":
try:
result["body"] = json.loads(r.text)
except:
result["body"] = r.text
else:
result["body"] = r.text
result["status"] = r.status_code
result["resp"] = r
result["content-type"] = contentType
return result | [
"def",
"request",
"(",
"self",
",",
"url",
",",
"method",
",",
"body",
"=",
"\"\"",
",",
"headers",
"=",
"{",
"}",
",",
"retry",
"=",
"True",
")",
":",
"if",
"headers",
":",
"headers",
"=",
"dict",
"(",
"list",
"(",
"headers",
".",
"items",
"(",
")",
")",
"+",
"list",
"(",
"self",
".",
"headers",
".",
"items",
"(",
")",
")",
")",
"else",
":",
"headers",
"=",
"self",
".",
"headers",
"if",
"not",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
")",
"and",
"headers",
":",
"headers",
"=",
"dict",
"(",
"(",
"k",
".",
"encode",
"(",
"'ascii'",
")",
"if",
"isinstance",
"(",
"k",
",",
"unicode",
")",
"else",
"k",
",",
"v",
".",
"encode",
"(",
"'ascii'",
")",
"if",
"isinstance",
"(",
"v",
",",
"unicode",
")",
"else",
"v",
")",
"for",
"k",
",",
"v",
"in",
"headers",
".",
"items",
"(",
")",
")",
"url",
"=",
"self",
".",
"base_url",
"+",
"url",
"if",
"not",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
")",
":",
"if",
"isinstance",
"(",
"url",
",",
"unicode",
")",
":",
"url",
"=",
"url",
".",
"encode",
"(",
"'ascii'",
")",
"r",
"=",
"self",
".",
"_doRequest",
"(",
"url",
",",
"method",
",",
"body",
",",
"headers",
")",
"retry_http_codes",
"=",
"[",
"503",
",",
"504",
"]",
"if",
"r",
".",
"status_code",
"in",
"retry_http_codes",
"and",
"retry",
":",
"tries",
"=",
"5",
"delay",
"=",
".5",
"backoff",
"=",
"2",
"while",
"r",
".",
"status_code",
"in",
"retry_http_codes",
"and",
"tries",
">",
"0",
":",
"tries",
"-=",
"1",
"time",
".",
"sleep",
"(",
"delay",
")",
"delay",
"*=",
"backoff",
"r",
"=",
"self",
".",
"_doRequest",
"(",
"url",
",",
"method",
",",
"body",
",",
"headers",
")",
"r",
".",
"raise_for_status",
"(",
")",
"result",
"=",
"{",
"}",
"contentType",
"=",
"r",
".",
"headers",
"[",
"\"Content-Type\"",
"]",
"if",
"contentType",
"is",
"None",
":",
"contentType",
"=",
"\"text/plain\"",
"else",
":",
"contentType",
"=",
"contentType",
".",
"split",
"(",
"\";\"",
")",
"[",
"0",
"]",
"if",
"contentType",
".",
"lower",
"(",
")",
"==",
"\"application/json\"",
":",
"try",
":",
"result",
"[",
"\"body\"",
"]",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"except",
":",
"result",
"[",
"\"body\"",
"]",
"=",
"r",
".",
"text",
"else",
":",
"result",
"[",
"\"body\"",
"]",
"=",
"r",
".",
"text",
"result",
"[",
"\"status\"",
"]",
"=",
"r",
".",
"status_code",
"result",
"[",
"\"resp\"",
"]",
"=",
"r",
"result",
"[",
"\"content-type\"",
"]",
"=",
"contentType",
"return",
"result"
]
| Execute an HTTP request and return a dict containing the response
and the response status code.
Keyword arguments:
url -- The path to execute the result against, not including the API
version or project ID, with no leading /. Required.
method -- The HTTP method to use. Required.
body -- A string or file object to send as the body of the request.
Defaults to an empty string.
headers -- HTTP Headers to send with the request. Can overwrite the
defaults. Defaults to {}.
retry -- Whether exponential backoff should be employed. Defaults
to True. | [
"Execute",
"an",
"HTTP",
"request",
"and",
"return",
"a",
"dict",
"containing",
"the",
"response",
"and",
"the",
"response",
"status",
"code",
"."
]
| python | train |
sixty-north/cosmic-ray | src/cosmic_ray/mutating.py | https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/mutating.py#L33-L56 | def apply_mutation(module_path, operator, occurrence):
"""Apply a specific mutation to a file on disk.
Args:
module_path: The path to the module to mutate.
operator: The `operator` instance to use.
occurrence: The occurrence of the operator to apply.
Returns: A `(unmutated-code, mutated-code)` tuple to the with-block. If there was
no mutation performed, the `mutated-code` is `None`.
"""
module_ast = get_ast(module_path, python_version=operator.python_version)
original_code = module_ast.get_code()
visitor = MutationVisitor(occurrence, operator)
mutated_ast = visitor.walk(module_ast)
mutated_code = None
if visitor.mutation_applied:
mutated_code = mutated_ast.get_code()
with module_path.open(mode='wt', encoding='utf-8') as handle:
handle.write(mutated_code)
handle.flush()
return original_code, mutated_code | [
"def",
"apply_mutation",
"(",
"module_path",
",",
"operator",
",",
"occurrence",
")",
":",
"module_ast",
"=",
"get_ast",
"(",
"module_path",
",",
"python_version",
"=",
"operator",
".",
"python_version",
")",
"original_code",
"=",
"module_ast",
".",
"get_code",
"(",
")",
"visitor",
"=",
"MutationVisitor",
"(",
"occurrence",
",",
"operator",
")",
"mutated_ast",
"=",
"visitor",
".",
"walk",
"(",
"module_ast",
")",
"mutated_code",
"=",
"None",
"if",
"visitor",
".",
"mutation_applied",
":",
"mutated_code",
"=",
"mutated_ast",
".",
"get_code",
"(",
")",
"with",
"module_path",
".",
"open",
"(",
"mode",
"=",
"'wt'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"mutated_code",
")",
"handle",
".",
"flush",
"(",
")",
"return",
"original_code",
",",
"mutated_code"
]
| Apply a specific mutation to a file on disk.
Args:
module_path: The path to the module to mutate.
operator: The `operator` instance to use.
occurrence: The occurrence of the operator to apply.
Returns: A `(unmutated-code, mutated-code)` tuple to the with-block. If there was
no mutation performed, the `mutated-code` is `None`. | [
"Apply",
"a",
"specific",
"mutation",
"to",
"a",
"file",
"on",
"disk",
"."
]
| python | train |
sbarham/dsrt | build/lib/dsrt/application/Context.py | https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/build/lib/dsrt/application/Context.py#L52-L59 | def build_model(self):
'''Find out the type of model configured and dispatch the request to the appropriate method'''
if self.model_config['model-type']:
return self.build_red()
elif self.model_config['model-type']:
return self.buidl_hred()
else:
raise Error("Unrecognized model type '{}'".format(self.model_config['model-type'])) | [
"def",
"build_model",
"(",
"self",
")",
":",
"if",
"self",
".",
"model_config",
"[",
"'model-type'",
"]",
":",
"return",
"self",
".",
"build_red",
"(",
")",
"elif",
"self",
".",
"model_config",
"[",
"'model-type'",
"]",
":",
"return",
"self",
".",
"buidl_hred",
"(",
")",
"else",
":",
"raise",
"Error",
"(",
"\"Unrecognized model type '{}'\"",
".",
"format",
"(",
"self",
".",
"model_config",
"[",
"'model-type'",
"]",
")",
")"
]
| Find out the type of model configured and dispatch the request to the appropriate method | [
"Find",
"out",
"the",
"type",
"of",
"model",
"configured",
"and",
"dispatch",
"the",
"request",
"to",
"the",
"appropriate",
"method"
]
| python | train |
gbiggs/rtctree | rtctree/manager.py | https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/manager.py#L198-L213 | def delete_component(self, instance_name):
'''Delete a component.
Deletes the component specified by @ref instance_name from the manager.
This will invalidate any objects that are children of this node.
@param instance_name The instance name of the component to delete.
@raises FailedToDeleteComponentError
'''
with self._mutex:
if self._obj.delete_component(instance_name) != RTC.RTC_OK:
raise exceptions.FailedToDeleteComponentError(instance_name)
# The list of child components will have changed now, so it must be
# reparsed.
self._parse_component_children() | [
"def",
"delete_component",
"(",
"self",
",",
"instance_name",
")",
":",
"with",
"self",
".",
"_mutex",
":",
"if",
"self",
".",
"_obj",
".",
"delete_component",
"(",
"instance_name",
")",
"!=",
"RTC",
".",
"RTC_OK",
":",
"raise",
"exceptions",
".",
"FailedToDeleteComponentError",
"(",
"instance_name",
")",
"# The list of child components will have changed now, so it must be",
"# reparsed.",
"self",
".",
"_parse_component_children",
"(",
")"
]
| Delete a component.
Deletes the component specified by @ref instance_name from the manager.
This will invalidate any objects that are children of this node.
@param instance_name The instance name of the component to delete.
@raises FailedToDeleteComponentError | [
"Delete",
"a",
"component",
"."
]
| python | train |
raiden-network/raiden | raiden/ui/cli.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/ui/cli.py#L64-L461 | def options(func):
"""Having the common app options as a decorator facilitates reuse."""
# Until https://github.com/pallets/click/issues/926 is fixed the options need to be re-defined
# for every use
options_ = [
option(
'--datadir',
help='Directory for storing raiden data.',
default=lambda: os.path.join(os.path.expanduser('~'), '.raiden'),
type=click.Path(
exists=False,
dir_okay=True,
file_okay=False,
writable=True,
resolve_path=True,
allow_dash=False,
),
show_default=True,
),
option(
'--config-file',
help='Configuration file (TOML)',
default=os.path.join('${datadir}', 'config.toml'),
type=PathRelativePath(
file_okay=True,
dir_okay=False,
exists=False,
readable=True,
resolve_path=True,
),
show_default=True,
),
option(
'--keystore-path',
help=(
'If you have a non-standard path for the ethereum keystore directory'
' provide it using this argument.'
),
default=None,
type=click.Path(exists=True),
show_default=True,
),
option(
'--address',
help=(
'The ethereum address you would like raiden to use and for which '
'a keystore file exists in your local system.'
),
default=None,
type=ADDRESS_TYPE,
show_default=True,
),
option(
'--password-file',
help='Text file containing the password for the provided account',
default=None,
type=click.File(lazy=True),
show_default=True,
),
option(
'--tokennetwork-registry-contract-address',
help='hex encoded address of the Token Network Registry contract.',
type=ADDRESS_TYPE,
show_default=True,
),
option(
'--secret-registry-contract-address',
help='hex encoded address of the Secret Registry contract.',
type=ADDRESS_TYPE,
show_default=True,
),
option(
'--service-registry-contract-address',
help='hex encoded address of the Service Registry contract.',
type=ADDRESS_TYPE,
),
option(
'--endpoint-registry-contract-address',
help='hex encoded address of the Endpoint Registry contract.',
type=ADDRESS_TYPE,
show_default=True,
),
option(
'--user-deposit-contract-address',
help='hex encoded address of the User Deposit contract.',
type=ADDRESS_TYPE,
),
option(
'--console',
help='Start the interactive raiden console',
is_flag=True,
),
option(
'--transport',
help='Transport system to use. UDP is not recommended',
type=click.Choice(['udp', 'matrix']),
default='matrix',
show_default=True,
),
option(
'--network-id',
help=(
'Specify the network name/id of the Ethereum network to run Raiden on.\n'
'Available networks:\n'
'"mainnet" - network id: 1\n'
'"ropsten" - network id: 3\n'
'"rinkeby" - network id: 4\n'
'"goerli" - network id: 5\n'
'"kovan" - network id: 42\n'
'"<NETWORK_ID>": use the given network id directly\n'
),
type=NetworkChoiceType([
'mainnet',
'ropsten',
'rinkeby',
'goerli',
'kovan',
'<NETWORK_ID>',
]),
default='mainnet',
show_default=True,
),
option(
'--environment-type',
help=(
'Specify the environment (production or development).\n'
'The "production" setting adds some safety measures and is mainly intended '
'for running Raiden on the mainnet.\n'
),
type=EnumChoiceType(Environment),
default=Environment.PRODUCTION.value,
show_default=True,
),
option(
'--accept-disclaimer',
help='Bypass the experimental software disclaimer prompt',
is_flag=True,
),
option(
'--showconfig',
help='Show all configuration values used to control Raiden\'s behavior',
is_flag=True,
),
option_group(
'Ethereum Node Options',
option(
'--sync-check/--no-sync-check',
help='Checks if the ethereum node is synchronized against etherscan.',
default=True,
show_default=True,
),
option(
'--gas-price',
help=(
'Set the gas price for ethereum transactions. If not provided '
'the normal gas price startegy is used.\n'
'Available options:\n'
'"fast" - transactions are usually mined within 60 seconds\n'
'"normal" - transactions are usually mined within 5 minutes\n'
'<GAS_PRICE> - use given gas price\n'
),
type=GasPriceChoiceType(['normal', 'fast']),
default='fast',
show_default=True,
),
option(
'--eth-rpc-endpoint',
help=(
'"host:port" address of ethereum JSON-RPC server.\n'
'Also accepts a protocol prefix (http:// or https://) with optional port'
),
default='http://127.0.0.1:8545', # geth default jsonrpc port
type=str,
show_default=True,
),
),
option_group(
'Raiden Services Options',
option(
'--routing-mode',
help=(
'Specify the routing mode to be used.\n'
'"basic": use local routing\n'
'"pfs": use the path finding service\n'
),
type=EnumChoiceType(RoutingMode),
default=RoutingMode.BASIC.value,
show_default=True,
),
option(
'--pathfinding-service-address',
help=(
'URL to the Raiden path finding service to request paths from.\n'
'Example: https://pfs-ropsten.services-dev.raiden.network\n'
'Can also be given the "auto" value so that raiden chooses a '
'PFS randomly from the service registry contract'
),
default='auto',
type=str,
show_default=True,
),
option(
'--pathfinding-eth-address',
help=(
'Ethereum address to which to pay the fees of the path finding service.\n'
'If the path finding service is chosen from the service registry contract, '
'this option will be ignored. If the path finding service is configured '
'manually, i. e. "--pathfinding-service-address" set to a value other than '
'"auto", this argument must be set to a valid EIP55 address.'
),
type=str,
),
option(
'--pathfinding-max-paths',
help='Set maximum number of paths to be requested from the path finding service.',
default=DEFAULT_PATHFINDING_MAX_PATHS,
type=int,
show_default=True,
),
option(
'--pathfinding-max-fee',
help='Set max fee per request paid to the path finding service.',
default=DEFAULT_PATHFINDING_MAX_FEE,
type=int,
show_default=True,
),
option(
'--pathfinding-iou-timeout',
help='Number of blocks before a new IOU to the path finding service expires.',
default=DEFAULT_PATHFINDING_IOU_TIMEOUT,
type=int,
show_default=True,
),
option(
'--enable-monitoring',
help='Enable broadcasting of balance proofs to the monitoring services.',
is_flag=True,
),
),
option_group(
'UDP Transport Options',
option(
'--listen-address',
help='"host:port" for the raiden service to listen on.',
default='0.0.0.0:{}'.format(INITIAL_PORT),
type=str,
show_default=True,
),
option(
'--max-unresponsive-time',
help=(
'Max time in seconds for which an address can send no packets and '
'still be considered healthy.'
),
default=30,
type=int,
show_default=True,
),
option(
'--send-ping-time',
help=(
'Time in seconds after which if we have received no message from a '
'node we have a connection with, we are going to send a PING message'
),
default=60,
type=int,
show_default=True,
),
option(
'--nat',
help=(
'Manually specify method to use for determining public IP / NAT traversal.\n'
'Available methods:\n'
'"auto" - Try UPnP, then STUN, fallback to none\n'
'"upnp" - Try UPnP, fallback to none\n'
'"stun" - Try STUN, fallback to none\n'
'"none" - Use the local interface address '
'(this will likely cause connectivity issues)\n'
'"ext:<IP>[:<PORT>]" - manually specify the external IP (and optionally port '
'number)'
),
type=NATChoiceType(['auto', 'upnp', 'stun', 'none', 'ext:<IP>[:<PORT>]']),
default='auto',
show_default=True,
option_group='udp_transport',
),
),
option_group(
'Matrix Transport Options',
option(
'--matrix-server',
help=(
'Matrix homeserver to use for communication.\n'
'Valid values:\n'
'"auto" - automatically select a suitable homeserver\n'
'A URL pointing to a Raiden matrix homeserver'
),
default='auto',
type=MatrixServerType(['auto', '<url>']),
show_default=True,
),
),
option_group(
'Logging Options',
option(
'--log-config',
help='Log level configuration.\n'
'Format: [<logger-name-1>]:<level>[,<logger-name-2>:level][,...]',
type=LOG_LEVEL_CONFIG_TYPE,
default=':info',
show_default=True,
),
option(
'--log-file',
help='file path for logging to file',
default=None,
type=str,
show_default=True,
),
option(
'--log-json',
help='Output log lines in JSON format',
is_flag=True,
),
option(
'--disable-debug-logfile',
help=(
'Disable the debug logfile feature. This is independent of '
'the normal logging setup'
),
is_flag=True,
),
),
option_group(
'RPC Options',
option(
'--rpc/--no-rpc',
help='Start with or without the RPC server.',
default=True,
show_default=True,
),
option(
'--rpccorsdomain',
help='Comma separated list of domains to accept cross origin requests.',
default='http://localhost:*/*',
type=str,
show_default=True,
),
option(
'--api-address',
help='"host:port" for the RPC server to listen on.',
default='127.0.0.1:5001',
type=str,
show_default=True,
),
option(
'--web-ui/--no-web-ui',
help=(
'Start with or without the web interface. Requires --rpc. '
'It will be accessible at http://<api-address>. '
),
default=True,
show_default=True,
),
),
option_group(
'Debugging options',
option(
'--unrecoverable-error-should-crash',
help=(
'DO NOT use, unless you know what you are doing. If provided '
'along with a production environment setting then all '
'unrecoverable errors will lead to a crash and not simply get logged.'
),
is_flag=True,
default=False,
),
),
option_group(
'Hash Resolver options',
option(
'--resolver-endpoint',
help=(
'URL of the resolver server that is used to resolve '
'a payment hash to a secret. '
'Accepts a protocol prefix (http:// or https://) with optional port'
),
default=None,
type=str,
show_default=True,
),
),
]
for option_ in reversed(options_):
func = option_(func)
return func | [
"def",
"options",
"(",
"func",
")",
":",
"# Until https://github.com/pallets/click/issues/926 is fixed the options need to be re-defined",
"# for every use",
"options_",
"=",
"[",
"option",
"(",
"'--datadir'",
",",
"help",
"=",
"'Directory for storing raiden data.'",
",",
"default",
"=",
"lambda",
":",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"'.raiden'",
")",
",",
"type",
"=",
"click",
".",
"Path",
"(",
"exists",
"=",
"False",
",",
"dir_okay",
"=",
"True",
",",
"file_okay",
"=",
"False",
",",
"writable",
"=",
"True",
",",
"resolve_path",
"=",
"True",
",",
"allow_dash",
"=",
"False",
",",
")",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--config-file'",
",",
"help",
"=",
"'Configuration file (TOML)'",
",",
"default",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'${datadir}'",
",",
"'config.toml'",
")",
",",
"type",
"=",
"PathRelativePath",
"(",
"file_okay",
"=",
"True",
",",
"dir_okay",
"=",
"False",
",",
"exists",
"=",
"False",
",",
"readable",
"=",
"True",
",",
"resolve_path",
"=",
"True",
",",
")",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--keystore-path'",
",",
"help",
"=",
"(",
"'If you have a non-standard path for the ethereum keystore directory'",
"' provide it using this argument.'",
")",
",",
"default",
"=",
"None",
",",
"type",
"=",
"click",
".",
"Path",
"(",
"exists",
"=",
"True",
")",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--address'",
",",
"help",
"=",
"(",
"'The ethereum address you would like raiden to use and for which '",
"'a keystore file exists in your local system.'",
")",
",",
"default",
"=",
"None",
",",
"type",
"=",
"ADDRESS_TYPE",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--password-file'",
",",
"help",
"=",
"'Text file containing the password for the provided account'",
",",
"default",
"=",
"None",
",",
"type",
"=",
"click",
".",
"File",
"(",
"lazy",
"=",
"True",
")",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--tokennetwork-registry-contract-address'",
",",
"help",
"=",
"'hex encoded address of the Token Network Registry contract.'",
",",
"type",
"=",
"ADDRESS_TYPE",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--secret-registry-contract-address'",
",",
"help",
"=",
"'hex encoded address of the Secret Registry contract.'",
",",
"type",
"=",
"ADDRESS_TYPE",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--service-registry-contract-address'",
",",
"help",
"=",
"'hex encoded address of the Service Registry contract.'",
",",
"type",
"=",
"ADDRESS_TYPE",
",",
")",
",",
"option",
"(",
"'--endpoint-registry-contract-address'",
",",
"help",
"=",
"'hex encoded address of the Endpoint Registry contract.'",
",",
"type",
"=",
"ADDRESS_TYPE",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--user-deposit-contract-address'",
",",
"help",
"=",
"'hex encoded address of the User Deposit contract.'",
",",
"type",
"=",
"ADDRESS_TYPE",
",",
")",
",",
"option",
"(",
"'--console'",
",",
"help",
"=",
"'Start the interactive raiden console'",
",",
"is_flag",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--transport'",
",",
"help",
"=",
"'Transport system to use. UDP is not recommended'",
",",
"type",
"=",
"click",
".",
"Choice",
"(",
"[",
"'udp'",
",",
"'matrix'",
"]",
")",
",",
"default",
"=",
"'matrix'",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--network-id'",
",",
"help",
"=",
"(",
"'Specify the network name/id of the Ethereum network to run Raiden on.\\n'",
"'Available networks:\\n'",
"'\"mainnet\" - network id: 1\\n'",
"'\"ropsten\" - network id: 3\\n'",
"'\"rinkeby\" - network id: 4\\n'",
"'\"goerli\" - network id: 5\\n'",
"'\"kovan\" - network id: 42\\n'",
"'\"<NETWORK_ID>\": use the given network id directly\\n'",
")",
",",
"type",
"=",
"NetworkChoiceType",
"(",
"[",
"'mainnet'",
",",
"'ropsten'",
",",
"'rinkeby'",
",",
"'goerli'",
",",
"'kovan'",
",",
"'<NETWORK_ID>'",
",",
"]",
")",
",",
"default",
"=",
"'mainnet'",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--environment-type'",
",",
"help",
"=",
"(",
"'Specify the environment (production or development).\\n'",
"'The \"production\" setting adds some safety measures and is mainly intended '",
"'for running Raiden on the mainnet.\\n'",
")",
",",
"type",
"=",
"EnumChoiceType",
"(",
"Environment",
")",
",",
"default",
"=",
"Environment",
".",
"PRODUCTION",
".",
"value",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--accept-disclaimer'",
",",
"help",
"=",
"'Bypass the experimental software disclaimer prompt'",
",",
"is_flag",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--showconfig'",
",",
"help",
"=",
"'Show all configuration values used to control Raiden\\'s behavior'",
",",
"is_flag",
"=",
"True",
",",
")",
",",
"option_group",
"(",
"'Ethereum Node Options'",
",",
"option",
"(",
"'--sync-check/--no-sync-check'",
",",
"help",
"=",
"'Checks if the ethereum node is synchronized against etherscan.'",
",",
"default",
"=",
"True",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--gas-price'",
",",
"help",
"=",
"(",
"'Set the gas price for ethereum transactions. If not provided '",
"'the normal gas price startegy is used.\\n'",
"'Available options:\\n'",
"'\"fast\" - transactions are usually mined within 60 seconds\\n'",
"'\"normal\" - transactions are usually mined within 5 minutes\\n'",
"'<GAS_PRICE> - use given gas price\\n'",
")",
",",
"type",
"=",
"GasPriceChoiceType",
"(",
"[",
"'normal'",
",",
"'fast'",
"]",
")",
",",
"default",
"=",
"'fast'",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--eth-rpc-endpoint'",
",",
"help",
"=",
"(",
"'\"host:port\" address of ethereum JSON-RPC server.\\n'",
"'Also accepts a protocol prefix (http:// or https://) with optional port'",
")",
",",
"default",
"=",
"'http://127.0.0.1:8545'",
",",
"# geth default jsonrpc port",
"type",
"=",
"str",
",",
"show_default",
"=",
"True",
",",
")",
",",
")",
",",
"option_group",
"(",
"'Raiden Services Options'",
",",
"option",
"(",
"'--routing-mode'",
",",
"help",
"=",
"(",
"'Specify the routing mode to be used.\\n'",
"'\"basic\": use local routing\\n'",
"'\"pfs\": use the path finding service\\n'",
")",
",",
"type",
"=",
"EnumChoiceType",
"(",
"RoutingMode",
")",
",",
"default",
"=",
"RoutingMode",
".",
"BASIC",
".",
"value",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--pathfinding-service-address'",
",",
"help",
"=",
"(",
"'URL to the Raiden path finding service to request paths from.\\n'",
"'Example: https://pfs-ropsten.services-dev.raiden.network\\n'",
"'Can also be given the \"auto\" value so that raiden chooses a '",
"'PFS randomly from the service registry contract'",
")",
",",
"default",
"=",
"'auto'",
",",
"type",
"=",
"str",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--pathfinding-eth-address'",
",",
"help",
"=",
"(",
"'Ethereum address to which to pay the fees of the path finding service.\\n'",
"'If the path finding service is chosen from the service registry contract, '",
"'this option will be ignored. If the path finding service is configured '",
"'manually, i. e. \"--pathfinding-service-address\" set to a value other than '",
"'\"auto\", this argument must be set to a valid EIP55 address.'",
")",
",",
"type",
"=",
"str",
",",
")",
",",
"option",
"(",
"'--pathfinding-max-paths'",
",",
"help",
"=",
"'Set maximum number of paths to be requested from the path finding service.'",
",",
"default",
"=",
"DEFAULT_PATHFINDING_MAX_PATHS",
",",
"type",
"=",
"int",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--pathfinding-max-fee'",
",",
"help",
"=",
"'Set max fee per request paid to the path finding service.'",
",",
"default",
"=",
"DEFAULT_PATHFINDING_MAX_FEE",
",",
"type",
"=",
"int",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--pathfinding-iou-timeout'",
",",
"help",
"=",
"'Number of blocks before a new IOU to the path finding service expires.'",
",",
"default",
"=",
"DEFAULT_PATHFINDING_IOU_TIMEOUT",
",",
"type",
"=",
"int",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--enable-monitoring'",
",",
"help",
"=",
"'Enable broadcasting of balance proofs to the monitoring services.'",
",",
"is_flag",
"=",
"True",
",",
")",
",",
")",
",",
"option_group",
"(",
"'UDP Transport Options'",
",",
"option",
"(",
"'--listen-address'",
",",
"help",
"=",
"'\"host:port\" for the raiden service to listen on.'",
",",
"default",
"=",
"'0.0.0.0:{}'",
".",
"format",
"(",
"INITIAL_PORT",
")",
",",
"type",
"=",
"str",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--max-unresponsive-time'",
",",
"help",
"=",
"(",
"'Max time in seconds for which an address can send no packets and '",
"'still be considered healthy.'",
")",
",",
"default",
"=",
"30",
",",
"type",
"=",
"int",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--send-ping-time'",
",",
"help",
"=",
"(",
"'Time in seconds after which if we have received no message from a '",
"'node we have a connection with, we are going to send a PING message'",
")",
",",
"default",
"=",
"60",
",",
"type",
"=",
"int",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--nat'",
",",
"help",
"=",
"(",
"'Manually specify method to use for determining public IP / NAT traversal.\\n'",
"'Available methods:\\n'",
"'\"auto\" - Try UPnP, then STUN, fallback to none\\n'",
"'\"upnp\" - Try UPnP, fallback to none\\n'",
"'\"stun\" - Try STUN, fallback to none\\n'",
"'\"none\" - Use the local interface address '",
"'(this will likely cause connectivity issues)\\n'",
"'\"ext:<IP>[:<PORT>]\" - manually specify the external IP (and optionally port '",
"'number)'",
")",
",",
"type",
"=",
"NATChoiceType",
"(",
"[",
"'auto'",
",",
"'upnp'",
",",
"'stun'",
",",
"'none'",
",",
"'ext:<IP>[:<PORT>]'",
"]",
")",
",",
"default",
"=",
"'auto'",
",",
"show_default",
"=",
"True",
",",
"option_group",
"=",
"'udp_transport'",
",",
")",
",",
")",
",",
"option_group",
"(",
"'Matrix Transport Options'",
",",
"option",
"(",
"'--matrix-server'",
",",
"help",
"=",
"(",
"'Matrix homeserver to use for communication.\\n'",
"'Valid values:\\n'",
"'\"auto\" - automatically select a suitable homeserver\\n'",
"'A URL pointing to a Raiden matrix homeserver'",
")",
",",
"default",
"=",
"'auto'",
",",
"type",
"=",
"MatrixServerType",
"(",
"[",
"'auto'",
",",
"'<url>'",
"]",
")",
",",
"show_default",
"=",
"True",
",",
")",
",",
")",
",",
"option_group",
"(",
"'Logging Options'",
",",
"option",
"(",
"'--log-config'",
",",
"help",
"=",
"'Log level configuration.\\n'",
"'Format: [<logger-name-1>]:<level>[,<logger-name-2>:level][,...]'",
",",
"type",
"=",
"LOG_LEVEL_CONFIG_TYPE",
",",
"default",
"=",
"':info'",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--log-file'",
",",
"help",
"=",
"'file path for logging to file'",
",",
"default",
"=",
"None",
",",
"type",
"=",
"str",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--log-json'",
",",
"help",
"=",
"'Output log lines in JSON format'",
",",
"is_flag",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--disable-debug-logfile'",
",",
"help",
"=",
"(",
"'Disable the debug logfile feature. This is independent of '",
"'the normal logging setup'",
")",
",",
"is_flag",
"=",
"True",
",",
")",
",",
")",
",",
"option_group",
"(",
"'RPC Options'",
",",
"option",
"(",
"'--rpc/--no-rpc'",
",",
"help",
"=",
"'Start with or without the RPC server.'",
",",
"default",
"=",
"True",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--rpccorsdomain'",
",",
"help",
"=",
"'Comma separated list of domains to accept cross origin requests.'",
",",
"default",
"=",
"'http://localhost:*/*'",
",",
"type",
"=",
"str",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--api-address'",
",",
"help",
"=",
"'\"host:port\" for the RPC server to listen on.'",
",",
"default",
"=",
"'127.0.0.1:5001'",
",",
"type",
"=",
"str",
",",
"show_default",
"=",
"True",
",",
")",
",",
"option",
"(",
"'--web-ui/--no-web-ui'",
",",
"help",
"=",
"(",
"'Start with or without the web interface. Requires --rpc. '",
"'It will be accessible at http://<api-address>. '",
")",
",",
"default",
"=",
"True",
",",
"show_default",
"=",
"True",
",",
")",
",",
")",
",",
"option_group",
"(",
"'Debugging options'",
",",
"option",
"(",
"'--unrecoverable-error-should-crash'",
",",
"help",
"=",
"(",
"'DO NOT use, unless you know what you are doing. If provided '",
"'along with a production environment setting then all '",
"'unrecoverable errors will lead to a crash and not simply get logged.'",
")",
",",
"is_flag",
"=",
"True",
",",
"default",
"=",
"False",
",",
")",
",",
")",
",",
"option_group",
"(",
"'Hash Resolver options'",
",",
"option",
"(",
"'--resolver-endpoint'",
",",
"help",
"=",
"(",
"'URL of the resolver server that is used to resolve '",
"'a payment hash to a secret. '",
"'Accepts a protocol prefix (http:// or https://) with optional port'",
")",
",",
"default",
"=",
"None",
",",
"type",
"=",
"str",
",",
"show_default",
"=",
"True",
",",
")",
",",
")",
",",
"]",
"for",
"option_",
"in",
"reversed",
"(",
"options_",
")",
":",
"func",
"=",
"option_",
"(",
"func",
")",
"return",
"func"
]
| Having the common app options as a decorator facilitates reuse. | [
"Having",
"the",
"common",
"app",
"options",
"as",
"a",
"decorator",
"facilitates",
"reuse",
"."
]
| python | train |
vanheeringen-lab/gimmemotifs | gimmemotifs/scanner.py | https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L533-L548 | def best_score(self, seqs, scan_rc=True, normalize=False):
"""
give the score of the best match of each motif in each sequence
returns an iterator of lists containing floats
"""
self.set_threshold(threshold=0.0)
if normalize and len(self.meanstd) == 0:
self.set_meanstd()
means = np.array([self.meanstd[m][0] for m in self.motif_ids])
stds = np.array([self.meanstd[m][1] for m in self.motif_ids])
for matches in self.scan(seqs, 1, scan_rc):
scores = np.array([sorted(m, key=lambda x: x[0])[0][0] for m in matches if len(m) > 0])
if normalize:
scores = (scores - means) / stds
yield scores | [
"def",
"best_score",
"(",
"self",
",",
"seqs",
",",
"scan_rc",
"=",
"True",
",",
"normalize",
"=",
"False",
")",
":",
"self",
".",
"set_threshold",
"(",
"threshold",
"=",
"0.0",
")",
"if",
"normalize",
"and",
"len",
"(",
"self",
".",
"meanstd",
")",
"==",
"0",
":",
"self",
".",
"set_meanstd",
"(",
")",
"means",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"meanstd",
"[",
"m",
"]",
"[",
"0",
"]",
"for",
"m",
"in",
"self",
".",
"motif_ids",
"]",
")",
"stds",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"meanstd",
"[",
"m",
"]",
"[",
"1",
"]",
"for",
"m",
"in",
"self",
".",
"motif_ids",
"]",
")",
"for",
"matches",
"in",
"self",
".",
"scan",
"(",
"seqs",
",",
"1",
",",
"scan_rc",
")",
":",
"scores",
"=",
"np",
".",
"array",
"(",
"[",
"sorted",
"(",
"m",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"for",
"m",
"in",
"matches",
"if",
"len",
"(",
"m",
")",
">",
"0",
"]",
")",
"if",
"normalize",
":",
"scores",
"=",
"(",
"scores",
"-",
"means",
")",
"/",
"stds",
"yield",
"scores"
]
| give the score of the best match of each motif in each sequence
returns an iterator of lists containing floats | [
"give",
"the",
"score",
"of",
"the",
"best",
"match",
"of",
"each",
"motif",
"in",
"each",
"sequence",
"returns",
"an",
"iterator",
"of",
"lists",
"containing",
"floats"
]
| python | train |
DLR-RM/RAFCON | source/rafcon/utils/filesystem.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/utils/filesystem.py#L37-L52 | def get_md5_file_hash(filename):
"""Calculates the MD5 hash of a file
:param str filename: The filename (including the path) of the file
:return: Md5 hash of the file
:rtype: str
"""
import hashlib
BLOCKSIZE = 65536
hasher = hashlib.md5()
with open(filename, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest() | [
"def",
"get_md5_file_hash",
"(",
"filename",
")",
":",
"import",
"hashlib",
"BLOCKSIZE",
"=",
"65536",
"hasher",
"=",
"hashlib",
".",
"md5",
"(",
")",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"afile",
":",
"buf",
"=",
"afile",
".",
"read",
"(",
"BLOCKSIZE",
")",
"while",
"len",
"(",
"buf",
")",
">",
"0",
":",
"hasher",
".",
"update",
"(",
"buf",
")",
"buf",
"=",
"afile",
".",
"read",
"(",
"BLOCKSIZE",
")",
"return",
"hasher",
".",
"hexdigest",
"(",
")"
]
| Calculates the MD5 hash of a file
:param str filename: The filename (including the path) of the file
:return: Md5 hash of the file
:rtype: str | [
"Calculates",
"the",
"MD5",
"hash",
"of",
"a",
"file"
]
| python | train |
bitesofcode/projexui | projexui/widgets/xviewwidget/xviewprofilemanagermenu.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewprofilemanagermenu.py#L44-L55 | def removeProfile( self ):
"""
Removes the current profile from the system.
"""
manager = self.parent()
prof = manager.currentProfile()
opts = QMessageBox.Yes | QMessageBox.No
question = 'Are you sure you want to remove "%s"?' % prof.name()
answer = QMessageBox.question( self, 'Remove Profile', question, opts)
if ( answer == QMessageBox.Yes ):
manager.removeProfile(prof) | [
"def",
"removeProfile",
"(",
"self",
")",
":",
"manager",
"=",
"self",
".",
"parent",
"(",
")",
"prof",
"=",
"manager",
".",
"currentProfile",
"(",
")",
"opts",
"=",
"QMessageBox",
".",
"Yes",
"|",
"QMessageBox",
".",
"No",
"question",
"=",
"'Are you sure you want to remove \"%s\"?'",
"%",
"prof",
".",
"name",
"(",
")",
"answer",
"=",
"QMessageBox",
".",
"question",
"(",
"self",
",",
"'Remove Profile'",
",",
"question",
",",
"opts",
")",
"if",
"(",
"answer",
"==",
"QMessageBox",
".",
"Yes",
")",
":",
"manager",
".",
"removeProfile",
"(",
"prof",
")"
]
| Removes the current profile from the system. | [
"Removes",
"the",
"current",
"profile",
"from",
"the",
"system",
"."
]
| python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/parallel/client/magics.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/client/magics.py#L346-L352 | def _disable_autopx(self):
"""Disable %autopx by restoring the original InteractiveShell.run_cell.
"""
if self._autopx:
self.shell.run_cell = self._original_run_cell
self._autopx = False
print "%autopx disabled" | [
"def",
"_disable_autopx",
"(",
"self",
")",
":",
"if",
"self",
".",
"_autopx",
":",
"self",
".",
"shell",
".",
"run_cell",
"=",
"self",
".",
"_original_run_cell",
"self",
".",
"_autopx",
"=",
"False",
"print",
"\"%autopx disabled\""
]
| Disable %autopx by restoring the original InteractiveShell.run_cell. | [
"Disable",
"%autopx",
"by",
"restoring",
"the",
"original",
"InteractiveShell",
".",
"run_cell",
"."
]
| python | test |
wright-group/WrightTools | WrightTools/data/_data.py | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_data.py#L1078-L1171 | def map_variable(
self, variable, points, input_units="same", *, name=None, parent=None, verbose=True
) -> "Data":
"""Map points of an axis to new points using linear interpolation.
Out-of-bounds points are written nan.
Parameters
----------
variable : string
The variable to map onto.
points : array-like or int
If array, the new points. If int, new points will have the same
limits, with int defining the number of evenly spaced points
between.
input_units : str (optional)
The units of the new points. Default is same, which assumes
the new points have the same units as the axis.
name : string (optional)
The name of the new data object. If None, generated from
natural_name. Default is None.
parent : WrightTools.Collection (optional)
Parent of new data object. If None, data is made at root of a
new temporary file.
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.Data
New data object.
"""
# get variable index
variable_index = wt_kit.get_index(self.variable_names, variable)
variable = self.variables[variable_index]
# get points
if isinstance(points, int):
points = np.linspace(variable.min(), variable.max(), points)
points = np.array(points)
# points dimensionality
if points.ndim < variable.ndim:
for i, d in enumerate(variable.shape):
if d == 1:
points = np.expand_dims(points, axis=i)
# convert points
if input_units == "same":
pass
else:
points = wt_units.converter(points, input_units, variable.units)
# construct new data object
special = ["name", "axes", "constants", "channel_names", "variable_names"]
kwargs = {k: v for k, v in self.attrs.items() if k not in special}
if name is None:
name = "{0}_{1}_mapped".format(self.natural_name, variable.natural_name)
kwargs["name"] = name
kwargs["parent"] = parent
out = Data(**kwargs)
# mapped variable
values = points
out.create_variable(values=values, **variable.attrs)
# orthogonal variables
for v in self.variables:
if wt_kit.orthogonal(v.shape, variable.shape):
out.create_variable(values=v[:], **v.attrs)
out.transform(*self.axis_expressions)
# interpolate
if self.ndim == 1:
def interpolate(dataset, points):
function = scipy.interpolate.interp1d(variable[:], dataset[:], bounds_error=False)
return function(points)
else:
pts = np.array([a.full.flatten() for a in self.axes]).T
out_pts = np.array([a.full.flatten() for a in out.axes]).T
def interpolate(dataset, points):
values = dataset.full.flatten()
function = scipy.interpolate.LinearNDInterpolator(pts, values, rescale=True)
new = function(out_pts)
new.shape = out.shape
return new
for v in self.variables:
if v.natural_name not in out.variable_names:
out.create_variable(values=interpolate(v, points), **v.attrs)
out.variable_names = self.variable_names # enforce old order
out._variables = None # force regeneration of variables @property
for channel in self.channels:
out.create_channel(values=interpolate(channel, points), **channel.attrs)
# finish
if verbose:
print("data mapped from {0} to {1}".format(self.shape, out.shape))
return out | [
"def",
"map_variable",
"(",
"self",
",",
"variable",
",",
"points",
",",
"input_units",
"=",
"\"same\"",
",",
"*",
",",
"name",
"=",
"None",
",",
"parent",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
"->",
"\"Data\"",
":",
"# get variable index",
"variable_index",
"=",
"wt_kit",
".",
"get_index",
"(",
"self",
".",
"variable_names",
",",
"variable",
")",
"variable",
"=",
"self",
".",
"variables",
"[",
"variable_index",
"]",
"# get points",
"if",
"isinstance",
"(",
"points",
",",
"int",
")",
":",
"points",
"=",
"np",
".",
"linspace",
"(",
"variable",
".",
"min",
"(",
")",
",",
"variable",
".",
"max",
"(",
")",
",",
"points",
")",
"points",
"=",
"np",
".",
"array",
"(",
"points",
")",
"# points dimensionality",
"if",
"points",
".",
"ndim",
"<",
"variable",
".",
"ndim",
":",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"variable",
".",
"shape",
")",
":",
"if",
"d",
"==",
"1",
":",
"points",
"=",
"np",
".",
"expand_dims",
"(",
"points",
",",
"axis",
"=",
"i",
")",
"# convert points",
"if",
"input_units",
"==",
"\"same\"",
":",
"pass",
"else",
":",
"points",
"=",
"wt_units",
".",
"converter",
"(",
"points",
",",
"input_units",
",",
"variable",
".",
"units",
")",
"# construct new data object",
"special",
"=",
"[",
"\"name\"",
",",
"\"axes\"",
",",
"\"constants\"",
",",
"\"channel_names\"",
",",
"\"variable_names\"",
"]",
"kwargs",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"attrs",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"special",
"}",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"{0}_{1}_mapped\"",
".",
"format",
"(",
"self",
".",
"natural_name",
",",
"variable",
".",
"natural_name",
")",
"kwargs",
"[",
"\"name\"",
"]",
"=",
"name",
"kwargs",
"[",
"\"parent\"",
"]",
"=",
"parent",
"out",
"=",
"Data",
"(",
"*",
"*",
"kwargs",
")",
"# mapped variable",
"values",
"=",
"points",
"out",
".",
"create_variable",
"(",
"values",
"=",
"values",
",",
"*",
"*",
"variable",
".",
"attrs",
")",
"# orthogonal variables",
"for",
"v",
"in",
"self",
".",
"variables",
":",
"if",
"wt_kit",
".",
"orthogonal",
"(",
"v",
".",
"shape",
",",
"variable",
".",
"shape",
")",
":",
"out",
".",
"create_variable",
"(",
"values",
"=",
"v",
"[",
":",
"]",
",",
"*",
"*",
"v",
".",
"attrs",
")",
"out",
".",
"transform",
"(",
"*",
"self",
".",
"axis_expressions",
")",
"# interpolate",
"if",
"self",
".",
"ndim",
"==",
"1",
":",
"def",
"interpolate",
"(",
"dataset",
",",
"points",
")",
":",
"function",
"=",
"scipy",
".",
"interpolate",
".",
"interp1d",
"(",
"variable",
"[",
":",
"]",
",",
"dataset",
"[",
":",
"]",
",",
"bounds_error",
"=",
"False",
")",
"return",
"function",
"(",
"points",
")",
"else",
":",
"pts",
"=",
"np",
".",
"array",
"(",
"[",
"a",
".",
"full",
".",
"flatten",
"(",
")",
"for",
"a",
"in",
"self",
".",
"axes",
"]",
")",
".",
"T",
"out_pts",
"=",
"np",
".",
"array",
"(",
"[",
"a",
".",
"full",
".",
"flatten",
"(",
")",
"for",
"a",
"in",
"out",
".",
"axes",
"]",
")",
".",
"T",
"def",
"interpolate",
"(",
"dataset",
",",
"points",
")",
":",
"values",
"=",
"dataset",
".",
"full",
".",
"flatten",
"(",
")",
"function",
"=",
"scipy",
".",
"interpolate",
".",
"LinearNDInterpolator",
"(",
"pts",
",",
"values",
",",
"rescale",
"=",
"True",
")",
"new",
"=",
"function",
"(",
"out_pts",
")",
"new",
".",
"shape",
"=",
"out",
".",
"shape",
"return",
"new",
"for",
"v",
"in",
"self",
".",
"variables",
":",
"if",
"v",
".",
"natural_name",
"not",
"in",
"out",
".",
"variable_names",
":",
"out",
".",
"create_variable",
"(",
"values",
"=",
"interpolate",
"(",
"v",
",",
"points",
")",
",",
"*",
"*",
"v",
".",
"attrs",
")",
"out",
".",
"variable_names",
"=",
"self",
".",
"variable_names",
"# enforce old order",
"out",
".",
"_variables",
"=",
"None",
"# force regeneration of variables @property",
"for",
"channel",
"in",
"self",
".",
"channels",
":",
"out",
".",
"create_channel",
"(",
"values",
"=",
"interpolate",
"(",
"channel",
",",
"points",
")",
",",
"*",
"*",
"channel",
".",
"attrs",
")",
"# finish",
"if",
"verbose",
":",
"print",
"(",
"\"data mapped from {0} to {1}\"",
".",
"format",
"(",
"self",
".",
"shape",
",",
"out",
".",
"shape",
")",
")",
"return",
"out"
]
| Map points of an axis to new points using linear interpolation.
Out-of-bounds points are written nan.
Parameters
----------
variable : string
The variable to map onto.
points : array-like or int
If array, the new points. If int, new points will have the same
limits, with int defining the number of evenly spaced points
between.
input_units : str (optional)
The units of the new points. Default is same, which assumes
the new points have the same units as the axis.
name : string (optional)
The name of the new data object. If None, generated from
natural_name. Default is None.
parent : WrightTools.Collection (optional)
Parent of new data object. If None, data is made at root of a
new temporary file.
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.Data
New data object. | [
"Map",
"points",
"of",
"an",
"axis",
"to",
"new",
"points",
"using",
"linear",
"interpolation",
"."
]
| python | train |
PmagPy/PmagPy | programs/replace_ac_specimens.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/replace_ac_specimens.py#L6-L96 | def main():
"""
NAME
replace_AC_specimens.py
DESCRIPTION
finds anisotropy corrected data and
replaces that specimen with it.
puts in pmag_specimen format file
SYNTAX
replace_AC_specimens.py [command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of file names
-fu TFILE uncorrected pmag_specimen format file with thellier interpretations
created by thellier_magic_redo.py
-fc AFILE anisotropy corrected pmag_specimen format file
created by thellier_magic_redo.py
-F FILE pmag_specimens format output file
DEFAULTS
TFILE: thellier_specimens.txt
AFILE: AC_specimens.txt
FILE: TorAC_specimens.txt
"""
dir_path='.'
tspec="thellier_specimens.txt"
aspec="AC_specimens.txt"
ofile="TorAC_specimens.txt"
critfile="pmag_criteria.txt"
ACSamplist,Samplist,sigmin=[],[],10000
GoodSamps,SpecOuts=[],[]
# get arguments from command line
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-fu' in sys.argv:
ind=sys.argv.index('-fu')
tspec=sys.argv[ind+1]
if '-fc' in sys.argv:
ind=sys.argv.index('-fc')
aspec=sys.argv[ind+1]
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
# read in pmag_specimens file
tspec=dir_path+'/'+tspec
aspec=dir_path+'/'+aspec
ofile=dir_path+'/'+ofile
Specs,file_type=pmag.magic_read(tspec)
Specs,file_type=pmag.magic_read(tspec)
Speclist=pmag.get_specs(Specs)
ACSpecs,file_type=pmag.magic_read(aspec)
ACspeclist=pmag.get_specs(ACSpecs)
for spec in Specs:
if spec["er_sample_name"] not in Samplist:Samplist.append(spec["er_sample_name"])
for spec in ACSpecs:
if spec["er_sample_name"] not in ACSamplist:ACSamplist.append(spec["er_sample_name"])
#
for samp in Samplist:
useAC,Ints,ACInts,GoodSpecs,AC,UC=0,[],[],[],[],[]
for spec in Specs:
if spec["er_sample_name"].lower()==samp.lower():
UC.append(spec)
if samp in ACSamplist:
for spec in ACSpecs:
if spec["er_sample_name"].lower()==samp.lower():
AC.append(spec)
if len(AC)>0:
AClist=[]
for spec in AC:
SpecOuts.append(spec)
AClist.append(spec['er_specimen_name'])
print('using AC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int'])))
for spec in UC:
if spec['er_specimen_name'] not in AClist:
SpecOuts.append(spec)
# print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))
else:
for spec in UC:
SpecOuts.append(spec)
# print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))
SpecOuts,keys=pmag.fillkeys(SpecOuts)
pmag.magic_write(ofile,SpecOuts,'pmag_specimens')
print('thellier data assessed for AC correction put in ', ofile) | [
"def",
"main",
"(",
")",
":",
"dir_path",
"=",
"'.'",
"tspec",
"=",
"\"thellier_specimens.txt\"",
"aspec",
"=",
"\"AC_specimens.txt\"",
"ofile",
"=",
"\"TorAC_specimens.txt\"",
"critfile",
"=",
"\"pmag_criteria.txt\"",
"ACSamplist",
",",
"Samplist",
",",
"sigmin",
"=",
"[",
"]",
",",
"[",
"]",
",",
"10000",
"GoodSamps",
",",
"SpecOuts",
"=",
"[",
"]",
",",
"[",
"]",
"# get arguments from command line",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"'-fu'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-fu'",
")",
"tspec",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-fc'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-fc'",
")",
"aspec",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-F'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-F'",
")",
"ofile",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-WD'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-WD'",
")",
"dir_path",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"# read in pmag_specimens file",
"tspec",
"=",
"dir_path",
"+",
"'/'",
"+",
"tspec",
"aspec",
"=",
"dir_path",
"+",
"'/'",
"+",
"aspec",
"ofile",
"=",
"dir_path",
"+",
"'/'",
"+",
"ofile",
"Specs",
",",
"file_type",
"=",
"pmag",
".",
"magic_read",
"(",
"tspec",
")",
"Specs",
",",
"file_type",
"=",
"pmag",
".",
"magic_read",
"(",
"tspec",
")",
"Speclist",
"=",
"pmag",
".",
"get_specs",
"(",
"Specs",
")",
"ACSpecs",
",",
"file_type",
"=",
"pmag",
".",
"magic_read",
"(",
"aspec",
")",
"ACspeclist",
"=",
"pmag",
".",
"get_specs",
"(",
"ACSpecs",
")",
"for",
"spec",
"in",
"Specs",
":",
"if",
"spec",
"[",
"\"er_sample_name\"",
"]",
"not",
"in",
"Samplist",
":",
"Samplist",
".",
"append",
"(",
"spec",
"[",
"\"er_sample_name\"",
"]",
")",
"for",
"spec",
"in",
"ACSpecs",
":",
"if",
"spec",
"[",
"\"er_sample_name\"",
"]",
"not",
"in",
"ACSamplist",
":",
"ACSamplist",
".",
"append",
"(",
"spec",
"[",
"\"er_sample_name\"",
"]",
")",
"#",
"for",
"samp",
"in",
"Samplist",
":",
"useAC",
",",
"Ints",
",",
"ACInts",
",",
"GoodSpecs",
",",
"AC",
",",
"UC",
"=",
"0",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"spec",
"in",
"Specs",
":",
"if",
"spec",
"[",
"\"er_sample_name\"",
"]",
".",
"lower",
"(",
")",
"==",
"samp",
".",
"lower",
"(",
")",
":",
"UC",
".",
"append",
"(",
"spec",
")",
"if",
"samp",
"in",
"ACSamplist",
":",
"for",
"spec",
"in",
"ACSpecs",
":",
"if",
"spec",
"[",
"\"er_sample_name\"",
"]",
".",
"lower",
"(",
")",
"==",
"samp",
".",
"lower",
"(",
")",
":",
"AC",
".",
"append",
"(",
"spec",
")",
"if",
"len",
"(",
"AC",
")",
">",
"0",
":",
"AClist",
"=",
"[",
"]",
"for",
"spec",
"in",
"AC",
":",
"SpecOuts",
".",
"append",
"(",
"spec",
")",
"AClist",
".",
"append",
"(",
"spec",
"[",
"'er_specimen_name'",
"]",
")",
"print",
"(",
"'using AC: '",
",",
"spec",
"[",
"'er_specimen_name'",
"]",
",",
"'%7.1f'",
"%",
"(",
"1e6",
"*",
"float",
"(",
"spec",
"[",
"'specimen_int'",
"]",
")",
")",
")",
"for",
"spec",
"in",
"UC",
":",
"if",
"spec",
"[",
"'er_specimen_name'",
"]",
"not",
"in",
"AClist",
":",
"SpecOuts",
".",
"append",
"(",
"spec",
")",
"# print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))",
"else",
":",
"for",
"spec",
"in",
"UC",
":",
"SpecOuts",
".",
"append",
"(",
"spec",
")",
"# print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))",
"SpecOuts",
",",
"keys",
"=",
"pmag",
".",
"fillkeys",
"(",
"SpecOuts",
")",
"pmag",
".",
"magic_write",
"(",
"ofile",
",",
"SpecOuts",
",",
"'pmag_specimens'",
")",
"print",
"(",
"'thellier data assessed for AC correction put in '",
",",
"ofile",
")"
]
| NAME
replace_AC_specimens.py
DESCRIPTION
finds anisotropy corrected data and
replaces that specimen with it.
puts in pmag_specimen format file
SYNTAX
replace_AC_specimens.py [command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of file names
-fu TFILE uncorrected pmag_specimen format file with thellier interpretations
created by thellier_magic_redo.py
-fc AFILE anisotropy corrected pmag_specimen format file
created by thellier_magic_redo.py
-F FILE pmag_specimens format output file
DEFAULTS
TFILE: thellier_specimens.txt
AFILE: AC_specimens.txt
FILE: TorAC_specimens.txt | [
"NAME",
"replace_AC_specimens",
".",
"py",
"DESCRIPTION",
"finds",
"anisotropy",
"corrected",
"data",
"and",
"replaces",
"that",
"specimen",
"with",
"it",
".",
"puts",
"in",
"pmag_specimen",
"format",
"file",
"SYNTAX",
"replace_AC_specimens",
".",
"py",
"[",
"command",
"line",
"options",
"]"
]
| python | train |
m32/endesive | endesive/pdf/fpdf/html.py | https://github.com/m32/endesive/blob/973091dc69847fe2df594c80ac9235a8d08460ff/endesive/pdf/fpdf/html.py#L397-L401 | def write_html(self, text, image_map=None):
"Parse HTML and convert it to PDF"
h2p = HTML2FPDF(self, image_map)
text = h2p.unescape(text) # To deal with HTML entities
h2p.feed(text) | [
"def",
"write_html",
"(",
"self",
",",
"text",
",",
"image_map",
"=",
"None",
")",
":",
"h2p",
"=",
"HTML2FPDF",
"(",
"self",
",",
"image_map",
")",
"text",
"=",
"h2p",
".",
"unescape",
"(",
"text",
")",
"# To deal with HTML entities",
"h2p",
".",
"feed",
"(",
"text",
")"
]
| Parse HTML and convert it to PDF | [
"Parse",
"HTML",
"and",
"convert",
"it",
"to",
"PDF"
]
| python | train |
pypa/pipenv | pipenv/utils.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/utils.py#L1505-L1530 | def handle_remove_readonly(func, path, exc):
"""Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion."""
# Check for read-only attribute
default_warning_message = (
"Unable to remove file due to permissions restriction: {!r}"
)
# split the initial exception out into its type, exception, and traceback
exc_type, exc_exception, exc_tb = exc
if is_readonly_path(path):
# Apply write permission and call original function
set_write_bit(path)
try:
func(path)
except (OSError, IOError) as e:
if e.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
if exc_exception.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
raise exc | [
"def",
"handle_remove_readonly",
"(",
"func",
",",
"path",
",",
"exc",
")",
":",
"# Check for read-only attribute",
"default_warning_message",
"=",
"(",
"\"Unable to remove file due to permissions restriction: {!r}\"",
")",
"# split the initial exception out into its type, exception, and traceback",
"exc_type",
",",
"exc_exception",
",",
"exc_tb",
"=",
"exc",
"if",
"is_readonly_path",
"(",
"path",
")",
":",
"# Apply write permission and call original function",
"set_write_bit",
"(",
"path",
")",
"try",
":",
"func",
"(",
"path",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"in",
"[",
"errno",
".",
"EACCES",
",",
"errno",
".",
"EPERM",
"]",
":",
"warnings",
".",
"warn",
"(",
"default_warning_message",
".",
"format",
"(",
"path",
")",
",",
"ResourceWarning",
")",
"return",
"if",
"exc_exception",
".",
"errno",
"in",
"[",
"errno",
".",
"EACCES",
",",
"errno",
".",
"EPERM",
"]",
":",
"warnings",
".",
"warn",
"(",
"default_warning_message",
".",
"format",
"(",
"path",
")",
",",
"ResourceWarning",
")",
"return",
"raise",
"exc"
]
| Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion. | [
"Error",
"handler",
"for",
"shutil",
".",
"rmtree",
"."
]
| python | train |
useblocks/groundwork | groundwork/pluginmanager.py | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/pluginmanager.py#L229-L243 | def get(self, name=None):
"""
Returns the plugin object with the given name.
Or if a name is not given, the complete plugin dictionary is returned.
:param name: Name of a plugin
:return: None, single plugin or dictionary of plugins
"""
if name is None:
return self._plugins
else:
if name not in self._plugins.keys():
return None
else:
return self._plugins[name] | [
"def",
"get",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"return",
"self",
".",
"_plugins",
"else",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_plugins",
".",
"keys",
"(",
")",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"_plugins",
"[",
"name",
"]"
]
| Returns the plugin object with the given name.
Or if a name is not given, the complete plugin dictionary is returned.
:param name: Name of a plugin
:return: None, single plugin or dictionary of plugins | [
"Returns",
"the",
"plugin",
"object",
"with",
"the",
"given",
"name",
".",
"Or",
"if",
"a",
"name",
"is",
"not",
"given",
"the",
"complete",
"plugin",
"dictionary",
"is",
"returned",
"."
]
| python | train |
smartmob-project/smartmob-agent | smartmob_agent/__init__.py | https://github.com/smartmob-project/smartmob-agent/blob/4039f577ab7230d135f00df68c611a51e45ddbc7/smartmob_agent/__init__.py#L73-L84 | async def inject_request_id(app, handler):
"""aiohttp middleware: ensures each request has a unique request ID.
See: ``inject_request_id``.
"""
async def trace_request(request):
request['x-request-id'] = \
request.headers.get('x-request-id') or str(uuid.uuid4())
return await handler(request)
return trace_request | [
"async",
"def",
"inject_request_id",
"(",
"app",
",",
"handler",
")",
":",
"async",
"def",
"trace_request",
"(",
"request",
")",
":",
"request",
"[",
"'x-request-id'",
"]",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"'x-request-id'",
")",
"or",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"return",
"await",
"handler",
"(",
"request",
")",
"return",
"trace_request"
]
| aiohttp middleware: ensures each request has a unique request ID.
See: ``inject_request_id``. | [
"aiohttp",
"middleware",
":",
"ensures",
"each",
"request",
"has",
"a",
"unique",
"request",
"ID",
"."
]
| python | train |
refenv/cijoe | modules/cij/reporter.py | https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/modules/cij/reporter.py#L34-L62 | def tcase_comment(tcase):
"""
Extract testcase comment section / testcase description
@returns the testcase-comment from the tcase["fpath"] as a list of strings
"""
src = open(tcase["fpath"]).read()
if len(src) < 3:
cij.err("rprtr::tcase_comment: invalid src, tcase: %r" % tcase["name"])
return None
ext = os.path.splitext(tcase["fpath"])[-1]
if ext not in [".sh", ".py"]:
cij.err("rprtr::tcase_comment: invalid ext: %r, tcase: %r" % (
ext, tcase["name"]
))
return None
comment = []
for line in src.splitlines()[2:]:
if ext == ".sh" and not line.startswith("#"):
break
elif ext == ".py" and not '"""' in line:
break
comment.append(line)
return comment | [
"def",
"tcase_comment",
"(",
"tcase",
")",
":",
"src",
"=",
"open",
"(",
"tcase",
"[",
"\"fpath\"",
"]",
")",
".",
"read",
"(",
")",
"if",
"len",
"(",
"src",
")",
"<",
"3",
":",
"cij",
".",
"err",
"(",
"\"rprtr::tcase_comment: invalid src, tcase: %r\"",
"%",
"tcase",
"[",
"\"name\"",
"]",
")",
"return",
"None",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"tcase",
"[",
"\"fpath\"",
"]",
")",
"[",
"-",
"1",
"]",
"if",
"ext",
"not",
"in",
"[",
"\".sh\"",
",",
"\".py\"",
"]",
":",
"cij",
".",
"err",
"(",
"\"rprtr::tcase_comment: invalid ext: %r, tcase: %r\"",
"%",
"(",
"ext",
",",
"tcase",
"[",
"\"name\"",
"]",
")",
")",
"return",
"None",
"comment",
"=",
"[",
"]",
"for",
"line",
"in",
"src",
".",
"splitlines",
"(",
")",
"[",
"2",
":",
"]",
":",
"if",
"ext",
"==",
"\".sh\"",
"and",
"not",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"break",
"elif",
"ext",
"==",
"\".py\"",
"and",
"not",
"'\"\"\"'",
"in",
"line",
":",
"break",
"comment",
".",
"append",
"(",
"line",
")",
"return",
"comment"
]
| Extract testcase comment section / testcase description
@returns the testcase-comment from the tcase["fpath"] as a list of strings | [
"Extract",
"testcase",
"comment",
"section",
"/",
"testcase",
"description"
]
| python | valid |
MrYsLab/PyMata | PyMata/pymata.py | https://github.com/MrYsLab/PyMata/blob/7e0ec34670b5a0d3d6b74bcbe4f3808c845cc429/PyMata/pymata.py#L764-L782 | def set_digital_latch(self, pin, threshold_type, cb=None):
"""
This method "arms" a digital pin for its data to be latched and saved in the latching table
If a callback method is provided, when latching criteria is achieved, the callback function is called
with latching data notification. In that case, the latching table is not updated.
:param pin: Digital pin number
:param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW
:param cb: callback function
:return: True if successful, False if parameter data is invalid
"""
if 0 <= threshold_type <= 1:
self._command_handler.set_digital_latch(pin, threshold_type, cb)
return True
else:
return False | [
"def",
"set_digital_latch",
"(",
"self",
",",
"pin",
",",
"threshold_type",
",",
"cb",
"=",
"None",
")",
":",
"if",
"0",
"<=",
"threshold_type",
"<=",
"1",
":",
"self",
".",
"_command_handler",
".",
"set_digital_latch",
"(",
"pin",
",",
"threshold_type",
",",
"cb",
")",
"return",
"True",
"else",
":",
"return",
"False"
]
| This method "arms" a digital pin for its data to be latched and saved in the latching table
If a callback method is provided, when latching criteria is achieved, the callback function is called
with latching data notification. In that case, the latching table is not updated.
:param pin: Digital pin number
:param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW
:param cb: callback function
:return: True if successful, False if parameter data is invalid | [
"This",
"method",
"arms",
"a",
"digital",
"pin",
"for",
"its",
"data",
"to",
"be",
"latched",
"and",
"saved",
"in",
"the",
"latching",
"table",
"If",
"a",
"callback",
"method",
"is",
"provided",
"when",
"latching",
"criteria",
"is",
"achieved",
"the",
"callback",
"function",
"is",
"called",
"with",
"latching",
"data",
"notification",
".",
"In",
"that",
"case",
"the",
"latching",
"table",
"is",
"not",
"updated",
"."
]
| python | valid |
uploadcare/pyuploadcare | pyuploadcare/api_resources.py | https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L267-L287 | def create_local_copy(self, effects=None, store=None):
"""Creates a Local File Copy on Uploadcare Storage.
Args:
- effects:
Adds CDN image effects. If ``self.default_effects`` property
is set effects will be combined with default effects.
- store:
If ``store`` option is set to False the copy of your file will
be deleted in 24 hour period after the upload.
Works only if `autostore` is enabled in the project.
"""
effects = self._build_effects(effects)
store = store or ''
data = {
'source': self.cdn_path(effects)
}
if store:
data['store'] = store
return rest_request('POST', 'files/', data=data) | [
"def",
"create_local_copy",
"(",
"self",
",",
"effects",
"=",
"None",
",",
"store",
"=",
"None",
")",
":",
"effects",
"=",
"self",
".",
"_build_effects",
"(",
"effects",
")",
"store",
"=",
"store",
"or",
"''",
"data",
"=",
"{",
"'source'",
":",
"self",
".",
"cdn_path",
"(",
"effects",
")",
"}",
"if",
"store",
":",
"data",
"[",
"'store'",
"]",
"=",
"store",
"return",
"rest_request",
"(",
"'POST'",
",",
"'files/'",
",",
"data",
"=",
"data",
")"
]
| Creates a Local File Copy on Uploadcare Storage.
Args:
- effects:
Adds CDN image effects. If ``self.default_effects`` property
is set effects will be combined with default effects.
- store:
If ``store`` option is set to False the copy of your file will
be deleted in 24 hour period after the upload.
Works only if `autostore` is enabled in the project. | [
"Creates",
"a",
"Local",
"File",
"Copy",
"on",
"Uploadcare",
"Storage",
"."
]
| python | test |
bitesofcode/projexui | projexui/widgets/xorbtreewidget/xorbrecorditem.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbtreewidget/xorbrecorditem.py#L320-L360 | def updateColumnValue(self, column, value, index=None):
"""
Assigns the value for the column of this record to the inputed value.
:param index | <int>
value | <variant>
"""
if index is None:
index = self.treeWidget().column(column.name())
if type(value) == datetime.date:
self.setData(index, Qt.EditRole, wrapVariant(value))
elif type(value) == datetime.time:
self.setData(index, Qt.EditRole, wrapVariant(value))
elif type(value) == datetime.datetime:
self.setData(index, Qt.EditRole, wrapVariant(value))
elif type(value) in (float, int):
if column.enum():
self.setText(index, column.enum().displayText(value))
else:
self.setData(index, Qt.EditRole, wrapVariant(value))
elif value is not None:
self.setText(index, nativestring(value))
else:
self.setText(index, '')
self.setSortData(index, value)
# map default value information
try:
mapper = self.treeWidget().columnMappers().get(column.columnName())
except AttributeError:
mapper = None
if mapper is None:
form = column.stringFormat()
if form:
mapper = form.format
if mapper:
self.setText(index, mapper(value)) | [
"def",
"updateColumnValue",
"(",
"self",
",",
"column",
",",
"value",
",",
"index",
"=",
"None",
")",
":",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"self",
".",
"treeWidget",
"(",
")",
".",
"column",
"(",
"column",
".",
"name",
"(",
")",
")",
"if",
"type",
"(",
"value",
")",
"==",
"datetime",
".",
"date",
":",
"self",
".",
"setData",
"(",
"index",
",",
"Qt",
".",
"EditRole",
",",
"wrapVariant",
"(",
"value",
")",
")",
"elif",
"type",
"(",
"value",
")",
"==",
"datetime",
".",
"time",
":",
"self",
".",
"setData",
"(",
"index",
",",
"Qt",
".",
"EditRole",
",",
"wrapVariant",
"(",
"value",
")",
")",
"elif",
"type",
"(",
"value",
")",
"==",
"datetime",
".",
"datetime",
":",
"self",
".",
"setData",
"(",
"index",
",",
"Qt",
".",
"EditRole",
",",
"wrapVariant",
"(",
"value",
")",
")",
"elif",
"type",
"(",
"value",
")",
"in",
"(",
"float",
",",
"int",
")",
":",
"if",
"column",
".",
"enum",
"(",
")",
":",
"self",
".",
"setText",
"(",
"index",
",",
"column",
".",
"enum",
"(",
")",
".",
"displayText",
"(",
"value",
")",
")",
"else",
":",
"self",
".",
"setData",
"(",
"index",
",",
"Qt",
".",
"EditRole",
",",
"wrapVariant",
"(",
"value",
")",
")",
"elif",
"value",
"is",
"not",
"None",
":",
"self",
".",
"setText",
"(",
"index",
",",
"nativestring",
"(",
"value",
")",
")",
"else",
":",
"self",
".",
"setText",
"(",
"index",
",",
"''",
")",
"self",
".",
"setSortData",
"(",
"index",
",",
"value",
")",
"# map default value information\r",
"try",
":",
"mapper",
"=",
"self",
".",
"treeWidget",
"(",
")",
".",
"columnMappers",
"(",
")",
".",
"get",
"(",
"column",
".",
"columnName",
"(",
")",
")",
"except",
"AttributeError",
":",
"mapper",
"=",
"None",
"if",
"mapper",
"is",
"None",
":",
"form",
"=",
"column",
".",
"stringFormat",
"(",
")",
"if",
"form",
":",
"mapper",
"=",
"form",
".",
"format",
"if",
"mapper",
":",
"self",
".",
"setText",
"(",
"index",
",",
"mapper",
"(",
"value",
")",
")"
]
| Assigns the value for the column of this record to the inputed value.
:param index | <int>
value | <variant> | [
"Assigns",
"the",
"value",
"for",
"the",
"column",
"of",
"this",
"record",
"to",
"the",
"inputed",
"value",
".",
":",
"param",
"index",
"|",
"<int",
">",
"value",
"|",
"<variant",
">"
]
| python | train |
janpipek/physt | physt/util.py | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/util.py#L21-L34 | def find_subclass(base: type, name: str) -> type:
"""Find a named subclass of a base class.
Uses only the class name without namespace.
"""
class_candidates = [klass
for klass in all_subclasses(base)
if klass.__name__ == name
]
if len(class_candidates) == 0:
raise RuntimeError("No \"{0}\" subclass of \"{1}\".".format(base.__name__, name))
elif len(class_candidates) > 1:
raise RuntimeError("Multiple \"{0}\" subclasses of \"{1}\".".format(base.__name__, name))
return class_candidates[0] | [
"def",
"find_subclass",
"(",
"base",
":",
"type",
",",
"name",
":",
"str",
")",
"->",
"type",
":",
"class_candidates",
"=",
"[",
"klass",
"for",
"klass",
"in",
"all_subclasses",
"(",
"base",
")",
"if",
"klass",
".",
"__name__",
"==",
"name",
"]",
"if",
"len",
"(",
"class_candidates",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"No \\\"{0}\\\" subclass of \\\"{1}\\\".\"",
".",
"format",
"(",
"base",
".",
"__name__",
",",
"name",
")",
")",
"elif",
"len",
"(",
"class_candidates",
")",
">",
"1",
":",
"raise",
"RuntimeError",
"(",
"\"Multiple \\\"{0}\\\" subclasses of \\\"{1}\\\".\"",
".",
"format",
"(",
"base",
".",
"__name__",
",",
"name",
")",
")",
"return",
"class_candidates",
"[",
"0",
"]"
]
| Find a named subclass of a base class.
Uses only the class name without namespace. | [
"Find",
"a",
"named",
"subclass",
"of",
"a",
"base",
"class",
"."
]
| python | train |
fermiPy/fermipy | fermipy/jobs/target_sim.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/target_sim.py#L97-L109 | def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
name_keys = dict(target_type=args.ttype,
target_name=args.target,
sim_name=args.sim,
fullpath=True)
orig_dir = NAME_FACTORY.targetdir(**name_keys)
dest_dir = NAME_FACTORY.sim_targetdir(**name_keys)
self.copy_target_dir(orig_dir, dest_dir,
args.roi_baseline, args.extracopy) | [
"def",
"run_analysis",
"(",
"self",
",",
"argv",
")",
":",
"args",
"=",
"self",
".",
"_parser",
".",
"parse_args",
"(",
"argv",
")",
"name_keys",
"=",
"dict",
"(",
"target_type",
"=",
"args",
".",
"ttype",
",",
"target_name",
"=",
"args",
".",
"target",
",",
"sim_name",
"=",
"args",
".",
"sim",
",",
"fullpath",
"=",
"True",
")",
"orig_dir",
"=",
"NAME_FACTORY",
".",
"targetdir",
"(",
"*",
"*",
"name_keys",
")",
"dest_dir",
"=",
"NAME_FACTORY",
".",
"sim_targetdir",
"(",
"*",
"*",
"name_keys",
")",
"self",
".",
"copy_target_dir",
"(",
"orig_dir",
",",
"dest_dir",
",",
"args",
".",
"roi_baseline",
",",
"args",
".",
"extracopy",
")"
]
| Run this analysis | [
"Run",
"this",
"analysis"
]
| python | train |
Skype4Py/Skype4Py | Skype4Py/skype.py | https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/skype.py#L413-L425 | def Call(self, Id=0):
"""Queries a call object.
:Parameters:
Id : int
Call identifier.
:return: Call object.
:rtype: `call.Call`
"""
o = Call(self, Id)
o.Status # Test if such a call exists.
return o | [
"def",
"Call",
"(",
"self",
",",
"Id",
"=",
"0",
")",
":",
"o",
"=",
"Call",
"(",
"self",
",",
"Id",
")",
"o",
".",
"Status",
"# Test if such a call exists.",
"return",
"o"
]
| Queries a call object.
:Parameters:
Id : int
Call identifier.
:return: Call object.
:rtype: `call.Call` | [
"Queries",
"a",
"call",
"object",
"."
]
| python | train |
mjirik/imtools | imtools/sample_data.py | https://github.com/mjirik/imtools/blob/eb29fa59df0e0684d8334eb3bc5ef36ea46d1d3a/imtools/sample_data.py#L166-L191 | def checksum(path, hashfunc='md5'):
"""
Return checksum given by path. Wildcards can be used in check sum. Function is strongly
dependent on checksumdir package by 'cakepietoast'.
:param path:
:param hashfunc:
:return:
"""
import checksumdir
hash_func = checksumdir.HASH_FUNCS.get(hashfunc)
if not hash_func:
raise NotImplementedError('{} not implemented.'.format(hashfunc))
if os.path.isdir(path):
return checksumdir.dirhash(path, hashfunc=hashfunc)
hashvalues = []
path_list = glob.glob(path)
logger.debug("path_list " + str(path_list))
for path in path_list:
if os.path.isfile(path):
hashvalues.append(checksumdir._filehash(path, hashfunc=hash_func))
logger.debug(str(hashvalues))
hash = checksumdir._reduce_hash(hashvalues, hashfunc=hash_func)
return hash | [
"def",
"checksum",
"(",
"path",
",",
"hashfunc",
"=",
"'md5'",
")",
":",
"import",
"checksumdir",
"hash_func",
"=",
"checksumdir",
".",
"HASH_FUNCS",
".",
"get",
"(",
"hashfunc",
")",
"if",
"not",
"hash_func",
":",
"raise",
"NotImplementedError",
"(",
"'{} not implemented.'",
".",
"format",
"(",
"hashfunc",
")",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"return",
"checksumdir",
".",
"dirhash",
"(",
"path",
",",
"hashfunc",
"=",
"hashfunc",
")",
"hashvalues",
"=",
"[",
"]",
"path_list",
"=",
"glob",
".",
"glob",
"(",
"path",
")",
"logger",
".",
"debug",
"(",
"\"path_list \"",
"+",
"str",
"(",
"path_list",
")",
")",
"for",
"path",
"in",
"path_list",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"hashvalues",
".",
"append",
"(",
"checksumdir",
".",
"_filehash",
"(",
"path",
",",
"hashfunc",
"=",
"hash_func",
")",
")",
"logger",
".",
"debug",
"(",
"str",
"(",
"hashvalues",
")",
")",
"hash",
"=",
"checksumdir",
".",
"_reduce_hash",
"(",
"hashvalues",
",",
"hashfunc",
"=",
"hash_func",
")",
"return",
"hash"
]
| Return checksum given by path. Wildcards can be used in check sum. Function is strongly
dependent on checksumdir package by 'cakepietoast'.
:param path:
:param hashfunc:
:return: | [
"Return",
"checksum",
"given",
"by",
"path",
".",
"Wildcards",
"can",
"be",
"used",
"in",
"check",
"sum",
".",
"Function",
"is",
"strongly",
"dependent",
"on",
"checksumdir",
"package",
"by",
"cakepietoast",
"."
]
| python | train |
quantumlib/Cirq | cirq/circuits/text_diagram_drawer.py | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/text_diagram_drawer.py#L89-L105 | def content_present(self, x: int, y: int) -> bool:
"""Determines if a line or printed text is at the given location."""
# Text?
if (x, y) in self.entries:
return True
# Vertical line?
if any(v.x == x and v.y1 < y < v.y2 for v in self.vertical_lines):
return True
# Horizontal line?
if any(line_y == y and x1 < x < x2
for line_y, x1, x2, _ in self.horizontal_lines):
return True
return False | [
"def",
"content_present",
"(",
"self",
",",
"x",
":",
"int",
",",
"y",
":",
"int",
")",
"->",
"bool",
":",
"# Text?",
"if",
"(",
"x",
",",
"y",
")",
"in",
"self",
".",
"entries",
":",
"return",
"True",
"# Vertical line?",
"if",
"any",
"(",
"v",
".",
"x",
"==",
"x",
"and",
"v",
".",
"y1",
"<",
"y",
"<",
"v",
".",
"y2",
"for",
"v",
"in",
"self",
".",
"vertical_lines",
")",
":",
"return",
"True",
"# Horizontal line?",
"if",
"any",
"(",
"line_y",
"==",
"y",
"and",
"x1",
"<",
"x",
"<",
"x2",
"for",
"line_y",
",",
"x1",
",",
"x2",
",",
"_",
"in",
"self",
".",
"horizontal_lines",
")",
":",
"return",
"True",
"return",
"False"
]
| Determines if a line or printed text is at the given location. | [
"Determines",
"if",
"a",
"line",
"or",
"printed",
"text",
"is",
"at",
"the",
"given",
"location",
"."
]
| python | train |
brocade/pynos | pynos/versions/base/yang/brocade_ras.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/brocade_ras.py#L280-L291 | def bna_config_cmd_output_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
bna_config_cmd = ET.Element("bna_config_cmd")
config = bna_config_cmd
output = ET.SubElement(bna_config_cmd, "output")
status = ET.SubElement(output, "status")
status.text = kwargs.pop('status')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"bna_config_cmd_output_status",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"bna_config_cmd",
"=",
"ET",
".",
"Element",
"(",
"\"bna_config_cmd\"",
")",
"config",
"=",
"bna_config_cmd",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"bna_config_cmd",
",",
"\"output\"",
")",
"status",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"status\"",
")",
"status",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'status'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| Auto Generated Code | [
"Auto",
"Generated",
"Code"
]
| python | train |
sontek/bulby | bulby/color.py | https://github.com/sontek/bulby/blob/a2e741f843ee8e361b50a6079601108bfbe52526/bulby/color.py#L90-L117 | def get_xy_from_hex(hex_value):
'''
Returns X, Y coordinates containing the closest avilable CIE 1931
based on the hex_value provided.
'''
red, green, blue = struct.unpack('BBB', codecs.decode(hex_value, 'hex'))
r = ((red + 0.055) / (1.0 + 0.055)) ** 2.4 if (red > 0.04045) else (red / 12.92) # pragma: noqa
g = ((green + 0.055) / (1.0 + 0.055)) ** 2.4 if (green > 0.04045) else (green / 12.92) # pragma: noqa
b = ((blue + 0.055) / (1.0 + 0.055)) ** 2.4 if (blue > 0.04045) else (blue / 12.92) # pragma: noqa
X = r * 0.4360747 + g * 0.3850649 + b * 0.0930804
Y = r * 0.2225045 + g * 0.7168786 + b * 0.0406169
Z = r * 0.0139322 + g * 0.0971045 + b * 0.7141733
if X + Y + Z == 0:
cx = cy = 0
else:
cx = X / (X + Y + Z)
cy = Y / (X + Y + Z)
# Check if the given XY value is within the colourreach of our lamps.
xy_point = XYPoint(cx, cy)
is_in_reach = in_lamp_reach(xy_point)
if not is_in_reach:
xy_point = get_closest_point_to_point(xy_point)
return xy_point | [
"def",
"get_xy_from_hex",
"(",
"hex_value",
")",
":",
"red",
",",
"green",
",",
"blue",
"=",
"struct",
".",
"unpack",
"(",
"'BBB'",
",",
"codecs",
".",
"decode",
"(",
"hex_value",
",",
"'hex'",
")",
")",
"r",
"=",
"(",
"(",
"red",
"+",
"0.055",
")",
"/",
"(",
"1.0",
"+",
"0.055",
")",
")",
"**",
"2.4",
"if",
"(",
"red",
">",
"0.04045",
")",
"else",
"(",
"red",
"/",
"12.92",
")",
"# pragma: noqa",
"g",
"=",
"(",
"(",
"green",
"+",
"0.055",
")",
"/",
"(",
"1.0",
"+",
"0.055",
")",
")",
"**",
"2.4",
"if",
"(",
"green",
">",
"0.04045",
")",
"else",
"(",
"green",
"/",
"12.92",
")",
"# pragma: noqa",
"b",
"=",
"(",
"(",
"blue",
"+",
"0.055",
")",
"/",
"(",
"1.0",
"+",
"0.055",
")",
")",
"**",
"2.4",
"if",
"(",
"blue",
">",
"0.04045",
")",
"else",
"(",
"blue",
"/",
"12.92",
")",
"# pragma: noqa",
"X",
"=",
"r",
"*",
"0.4360747",
"+",
"g",
"*",
"0.3850649",
"+",
"b",
"*",
"0.0930804",
"Y",
"=",
"r",
"*",
"0.2225045",
"+",
"g",
"*",
"0.7168786",
"+",
"b",
"*",
"0.0406169",
"Z",
"=",
"r",
"*",
"0.0139322",
"+",
"g",
"*",
"0.0971045",
"+",
"b",
"*",
"0.7141733",
"if",
"X",
"+",
"Y",
"+",
"Z",
"==",
"0",
":",
"cx",
"=",
"cy",
"=",
"0",
"else",
":",
"cx",
"=",
"X",
"/",
"(",
"X",
"+",
"Y",
"+",
"Z",
")",
"cy",
"=",
"Y",
"/",
"(",
"X",
"+",
"Y",
"+",
"Z",
")",
"# Check if the given XY value is within the colourreach of our lamps.",
"xy_point",
"=",
"XYPoint",
"(",
"cx",
",",
"cy",
")",
"is_in_reach",
"=",
"in_lamp_reach",
"(",
"xy_point",
")",
"if",
"not",
"is_in_reach",
":",
"xy_point",
"=",
"get_closest_point_to_point",
"(",
"xy_point",
")",
"return",
"xy_point"
]
| Returns X, Y coordinates containing the closest avilable CIE 1931
based on the hex_value provided. | [
"Returns",
"X",
"Y",
"coordinates",
"containing",
"the",
"closest",
"avilable",
"CIE",
"1931",
"based",
"on",
"the",
"hex_value",
"provided",
"."
]
| python | train |
gitpython-developers/GitPython | git/repo/base.py | https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/repo/base.py#L986-L997 | def clone(self, path, progress=None, **kwargs):
"""Create a clone from this repository.
:param path: is the full path of the new repo (traditionally ends with ./<name>.git).
:param progress: See 'git.remote.Remote.push'.
:param kwargs:
* odbt = ObjectDatabase Type, allowing to determine the object database
implementation used by the returned Repo instance
* All remaining keyword arguments are given to the git-clone command
:return: ``git.Repo`` (the newly cloned repo)"""
return self._clone(self.git, self.common_dir, path, type(self.odb), progress, **kwargs) | [
"def",
"clone",
"(",
"self",
",",
"path",
",",
"progress",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_clone",
"(",
"self",
".",
"git",
",",
"self",
".",
"common_dir",
",",
"path",
",",
"type",
"(",
"self",
".",
"odb",
")",
",",
"progress",
",",
"*",
"*",
"kwargs",
")"
]
| Create a clone from this repository.
:param path: is the full path of the new repo (traditionally ends with ./<name>.git).
:param progress: See 'git.remote.Remote.push'.
:param kwargs:
* odbt = ObjectDatabase Type, allowing to determine the object database
implementation used by the returned Repo instance
* All remaining keyword arguments are given to the git-clone command
:return: ``git.Repo`` (the newly cloned repo) | [
"Create",
"a",
"clone",
"from",
"this",
"repository",
"."
]
| python | train |
pywbem/pywbem | attic/twisted_client.py | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/twisted_client.py#L178-L249 | def methodcallPayload(self, methodname, obj, namespace, **kwargs):
"""Generate the XML payload for an extrinsic methodcall."""
if isinstance(obj, CIMInstanceName):
path = obj.copy()
path.host = None
path.namespace = None
localpath = cim_xml.LOCALINSTANCEPATH(
cim_xml.LOCALNAMESPACEPATH(
[cim_xml.NAMESPACE(ns)
for ns in namespace.split('/')]),
path.tocimxml())
else:
localpath = cim_xml.LOCALCLASSPATH(
cim_xml.LOCALNAMESPACEPATH(
[cim_xml.NAMESPACE(ns)
for ns in namespace.split('/')]),
obj)
def paramtype(obj):
"""Return a string to be used as the CIMTYPE for a parameter."""
if isinstance(obj, cim_types.CIMType):
return obj.cimtype
elif type(obj) == bool:
return 'boolean'
elif isinstance(obj, six.string_types):
return 'string'
elif isinstance(obj, (datetime, timedelta)):
return 'datetime'
elif isinstance(obj, (CIMClassName, CIMInstanceName)):
return 'reference'
elif isinstance(obj, (CIMClass, CIMInstance)):
return 'string'
elif isinstance(obj, list):
return paramtype(obj[0])
raise TypeError('Unsupported parameter type "%s"' % type(obj))
def paramvalue(obj):
"""Return a cim_xml node to be used as the value for a
parameter."""
if isinstance(obj, (datetime, timedelta)):
obj = CIMDateTime(obj)
if isinstance(obj, (cim_types.CIMType, bool, six.string_types)):
return cim_xml.VALUE(cim_types.atomic_to_cim_xml(obj))
if isinstance(obj, (CIMClassName, CIMInstanceName)):
return cim_xml.VALUE_REFERENCE(obj.tocimxml())
if isinstance(obj, (CIMClass, CIMInstance)):
return cim_xml.VALUE(obj.tocimxml().toxml())
if isinstance(obj, list):
if isinstance(obj[0], (CIMClassName, CIMInstanceName)):
return cim_xml.VALUE_REFARRAY([paramvalue(x) for x in obj])
return cim_xml.VALUE_ARRAY([paramvalue(x) for x in obj])
raise TypeError('Unsupported parameter type "%s"' % type(obj))
param_list = [cim_xml.PARAMVALUE(x[0],
paramvalue(x[1]),
paramtype(x[1]))
for x in kwargs.items()]
payload = cim_xml.CIM(
cim_xml.MESSAGE(
cim_xml.SIMPLEREQ(
cim_xml.METHODCALL(methodname,
localpath,
param_list)),
'1001', '1.0'),
'2.0', '2.0')
return self.xml_header + payload.toxml() | [
"def",
"methodcallPayload",
"(",
"self",
",",
"methodname",
",",
"obj",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"CIMInstanceName",
")",
":",
"path",
"=",
"obj",
".",
"copy",
"(",
")",
"path",
".",
"host",
"=",
"None",
"path",
".",
"namespace",
"=",
"None",
"localpath",
"=",
"cim_xml",
".",
"LOCALINSTANCEPATH",
"(",
"cim_xml",
".",
"LOCALNAMESPACEPATH",
"(",
"[",
"cim_xml",
".",
"NAMESPACE",
"(",
"ns",
")",
"for",
"ns",
"in",
"namespace",
".",
"split",
"(",
"'/'",
")",
"]",
")",
",",
"path",
".",
"tocimxml",
"(",
")",
")",
"else",
":",
"localpath",
"=",
"cim_xml",
".",
"LOCALCLASSPATH",
"(",
"cim_xml",
".",
"LOCALNAMESPACEPATH",
"(",
"[",
"cim_xml",
".",
"NAMESPACE",
"(",
"ns",
")",
"for",
"ns",
"in",
"namespace",
".",
"split",
"(",
"'/'",
")",
"]",
")",
",",
"obj",
")",
"def",
"paramtype",
"(",
"obj",
")",
":",
"\"\"\"Return a string to be used as the CIMTYPE for a parameter.\"\"\"",
"if",
"isinstance",
"(",
"obj",
",",
"cim_types",
".",
"CIMType",
")",
":",
"return",
"obj",
".",
"cimtype",
"elif",
"type",
"(",
"obj",
")",
"==",
"bool",
":",
"return",
"'boolean'",
"elif",
"isinstance",
"(",
"obj",
",",
"six",
".",
"string_types",
")",
":",
"return",
"'string'",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"datetime",
",",
"timedelta",
")",
")",
":",
"return",
"'datetime'",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"CIMClassName",
",",
"CIMInstanceName",
")",
")",
":",
"return",
"'reference'",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"CIMClass",
",",
"CIMInstance",
")",
")",
":",
"return",
"'string'",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"return",
"paramtype",
"(",
"obj",
"[",
"0",
"]",
")",
"raise",
"TypeError",
"(",
"'Unsupported parameter type \"%s\"'",
"%",
"type",
"(",
"obj",
")",
")",
"def",
"paramvalue",
"(",
"obj",
")",
":",
"\"\"\"Return a cim_xml node to be used as the value for a\n parameter.\"\"\"",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"datetime",
",",
"timedelta",
")",
")",
":",
"obj",
"=",
"CIMDateTime",
"(",
"obj",
")",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"cim_types",
".",
"CIMType",
",",
"bool",
",",
"six",
".",
"string_types",
")",
")",
":",
"return",
"cim_xml",
".",
"VALUE",
"(",
"cim_types",
".",
"atomic_to_cim_xml",
"(",
"obj",
")",
")",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"CIMClassName",
",",
"CIMInstanceName",
")",
")",
":",
"return",
"cim_xml",
".",
"VALUE_REFERENCE",
"(",
"obj",
".",
"tocimxml",
"(",
")",
")",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"CIMClass",
",",
"CIMInstance",
")",
")",
":",
"return",
"cim_xml",
".",
"VALUE",
"(",
"obj",
".",
"tocimxml",
"(",
")",
".",
"toxml",
"(",
")",
")",
"if",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"if",
"isinstance",
"(",
"obj",
"[",
"0",
"]",
",",
"(",
"CIMClassName",
",",
"CIMInstanceName",
")",
")",
":",
"return",
"cim_xml",
".",
"VALUE_REFARRAY",
"(",
"[",
"paramvalue",
"(",
"x",
")",
"for",
"x",
"in",
"obj",
"]",
")",
"return",
"cim_xml",
".",
"VALUE_ARRAY",
"(",
"[",
"paramvalue",
"(",
"x",
")",
"for",
"x",
"in",
"obj",
"]",
")",
"raise",
"TypeError",
"(",
"'Unsupported parameter type \"%s\"'",
"%",
"type",
"(",
"obj",
")",
")",
"param_list",
"=",
"[",
"cim_xml",
".",
"PARAMVALUE",
"(",
"x",
"[",
"0",
"]",
",",
"paramvalue",
"(",
"x",
"[",
"1",
"]",
")",
",",
"paramtype",
"(",
"x",
"[",
"1",
"]",
")",
")",
"for",
"x",
"in",
"kwargs",
".",
"items",
"(",
")",
"]",
"payload",
"=",
"cim_xml",
".",
"CIM",
"(",
"cim_xml",
".",
"MESSAGE",
"(",
"cim_xml",
".",
"SIMPLEREQ",
"(",
"cim_xml",
".",
"METHODCALL",
"(",
"methodname",
",",
"localpath",
",",
"param_list",
")",
")",
",",
"'1001'",
",",
"'1.0'",
")",
",",
"'2.0'",
",",
"'2.0'",
")",
"return",
"self",
".",
"xml_header",
"+",
"payload",
".",
"toxml",
"(",
")"
]
| Generate the XML payload for an extrinsic methodcall. | [
"Generate",
"the",
"XML",
"payload",
"for",
"an",
"extrinsic",
"methodcall",
"."
]
| python | train |
neo4j/neo4j-python-driver | neo4j/types/temporal.py | https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/temporal.py#L166-L173 | def dehydrate_duration(value):
""" Dehydrator for `duration` values.
:param value:
:type value: Duration
:return:
"""
return Structure(b"E", value.months, value.days, value.seconds, int(1000000000 * value.subseconds)) | [
"def",
"dehydrate_duration",
"(",
"value",
")",
":",
"return",
"Structure",
"(",
"b\"E\"",
",",
"value",
".",
"months",
",",
"value",
".",
"days",
",",
"value",
".",
"seconds",
",",
"int",
"(",
"1000000000",
"*",
"value",
".",
"subseconds",
")",
")"
]
| Dehydrator for `duration` values.
:param value:
:type value: Duration
:return: | [
"Dehydrator",
"for",
"duration",
"values",
"."
]
| python | train |
apache/incubator-mxnet | example/ssd/evaluate/eval_metric.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_metric.py#L86-L195 | def update(self, labels, preds):
"""
Update internal records. This function now only update internal buffer,
sum_metric and num_inst are updated in _update() function instead when
get() is called to return results.
Params:
----------
labels: mx.nd.array (n * 6) or (n * 5), difficult column is optional
2-d array of ground-truths, n objects(id-xmin-ymin-xmax-ymax-[difficult])
preds: mx.nd.array (m * 6)
2-d array of detections, m objects(id-score-xmin-ymin-xmax-ymax)
"""
def iou(x, ys):
"""
Calculate intersection-over-union overlap
Params:
----------
x : numpy.array
single box [xmin, ymin ,xmax, ymax]
ys : numpy.array
multiple box [[xmin, ymin, xmax, ymax], [...], ]
Returns:
-----------
numpy.array
[iou1, iou2, ...], size == ys.shape[0]
"""
ixmin = np.maximum(ys[:, 0], x[0])
iymin = np.maximum(ys[:, 1], x[1])
ixmax = np.minimum(ys[:, 2], x[2])
iymax = np.minimum(ys[:, 3], x[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = (x[2] - x[0]) * (x[3] - x[1]) + (ys[:, 2] - ys[:, 0]) * \
(ys[:, 3] - ys[:, 1]) - inters
ious = inters / uni
ious[uni < 1e-12] = 0 # in case bad boxes
return ious
# independant execution for each image
for i in range(labels[0].shape[0]):
# get as numpy arrays
label = labels[0][i].asnumpy()
if np.sum(label[:, 0] >= 0) < 1:
continue
pred = preds[self.pred_idx][i].asnumpy()
# calculate for each class
while (pred.shape[0] > 0):
cid = int(pred[0, 0])
indices = np.where(pred[:, 0].astype(int) == cid)[0]
if cid < 0:
pred = np.delete(pred, indices, axis=0)
continue
dets = pred[indices]
pred = np.delete(pred, indices, axis=0)
# sort by score, desceding
dets = dets[dets[:,1].argsort()[::-1]]
records = np.hstack((dets[:, 1][:, np.newaxis], np.zeros((dets.shape[0], 1))))
# ground-truths
label_indices = np.where(label[:, 0].astype(int) == cid)[0]
gts = label[label_indices, :]
label = np.delete(label, label_indices, axis=0)
if gts.size > 0:
found = [False] * gts.shape[0]
for j in range(dets.shape[0]):
# compute overlaps
ious = iou(dets[j, 2:], gts[:, 1:5])
ovargmax = np.argmax(ious)
ovmax = ious[ovargmax]
if ovmax > self.ovp_thresh:
if (not self.use_difficult and
gts.shape[1] >= 6 and
gts[ovargmax, 5] > 0):
pass
else:
if not found[ovargmax]:
records[j, -1] = 1 # tp
found[ovargmax] = True
else:
# duplicate
records[j, -1] = 2 # fp
else:
records[j, -1] = 2 # fp
else:
# no gt, mark all fp
records[:, -1] = 2
# ground truth count
if (not self.use_difficult and gts.shape[1] >= 6):
gt_count = np.sum(gts[:, 5] < 1)
else:
gt_count = gts.shape[0]
# now we push records to buffer
# first column: score, second column: tp/fp
# 0: not set(matched to difficult or something), 1: tp, 2: fp
records = records[np.where(records[:, -1] > 0)[0], :]
if records.size > 0:
self._insert(cid, records, gt_count)
# add missing class if not present in prediction
while (label.shape[0] > 0):
cid = int(label[0, 0])
label_indices = np.where(label[:, 0].astype(int) == cid)[0]
label = np.delete(label, label_indices, axis=0)
if cid < 0:
continue
gt_count = label_indices.size
self._insert(cid, np.array([[0, 0]]), gt_count) | [
"def",
"update",
"(",
"self",
",",
"labels",
",",
"preds",
")",
":",
"def",
"iou",
"(",
"x",
",",
"ys",
")",
":",
"\"\"\"\n Calculate intersection-over-union overlap\n Params:\n ----------\n x : numpy.array\n single box [xmin, ymin ,xmax, ymax]\n ys : numpy.array\n multiple box [[xmin, ymin, xmax, ymax], [...], ]\n Returns:\n -----------\n numpy.array\n [iou1, iou2, ...], size == ys.shape[0]\n \"\"\"",
"ixmin",
"=",
"np",
".",
"maximum",
"(",
"ys",
"[",
":",
",",
"0",
"]",
",",
"x",
"[",
"0",
"]",
")",
"iymin",
"=",
"np",
".",
"maximum",
"(",
"ys",
"[",
":",
",",
"1",
"]",
",",
"x",
"[",
"1",
"]",
")",
"ixmax",
"=",
"np",
".",
"minimum",
"(",
"ys",
"[",
":",
",",
"2",
"]",
",",
"x",
"[",
"2",
"]",
")",
"iymax",
"=",
"np",
".",
"minimum",
"(",
"ys",
"[",
":",
",",
"3",
"]",
",",
"x",
"[",
"3",
"]",
")",
"iw",
"=",
"np",
".",
"maximum",
"(",
"ixmax",
"-",
"ixmin",
",",
"0.",
")",
"ih",
"=",
"np",
".",
"maximum",
"(",
"iymax",
"-",
"iymin",
",",
"0.",
")",
"inters",
"=",
"iw",
"*",
"ih",
"uni",
"=",
"(",
"x",
"[",
"2",
"]",
"-",
"x",
"[",
"0",
"]",
")",
"*",
"(",
"x",
"[",
"3",
"]",
"-",
"x",
"[",
"1",
"]",
")",
"+",
"(",
"ys",
"[",
":",
",",
"2",
"]",
"-",
"ys",
"[",
":",
",",
"0",
"]",
")",
"*",
"(",
"ys",
"[",
":",
",",
"3",
"]",
"-",
"ys",
"[",
":",
",",
"1",
"]",
")",
"-",
"inters",
"ious",
"=",
"inters",
"/",
"uni",
"ious",
"[",
"uni",
"<",
"1e-12",
"]",
"=",
"0",
"# in case bad boxes",
"return",
"ious",
"# independant execution for each image",
"for",
"i",
"in",
"range",
"(",
"labels",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
")",
":",
"# get as numpy arrays",
"label",
"=",
"labels",
"[",
"0",
"]",
"[",
"i",
"]",
".",
"asnumpy",
"(",
")",
"if",
"np",
".",
"sum",
"(",
"label",
"[",
":",
",",
"0",
"]",
">=",
"0",
")",
"<",
"1",
":",
"continue",
"pred",
"=",
"preds",
"[",
"self",
".",
"pred_idx",
"]",
"[",
"i",
"]",
".",
"asnumpy",
"(",
")",
"# calculate for each class",
"while",
"(",
"pred",
".",
"shape",
"[",
"0",
"]",
">",
"0",
")",
":",
"cid",
"=",
"int",
"(",
"pred",
"[",
"0",
",",
"0",
"]",
")",
"indices",
"=",
"np",
".",
"where",
"(",
"pred",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"int",
")",
"==",
"cid",
")",
"[",
"0",
"]",
"if",
"cid",
"<",
"0",
":",
"pred",
"=",
"np",
".",
"delete",
"(",
"pred",
",",
"indices",
",",
"axis",
"=",
"0",
")",
"continue",
"dets",
"=",
"pred",
"[",
"indices",
"]",
"pred",
"=",
"np",
".",
"delete",
"(",
"pred",
",",
"indices",
",",
"axis",
"=",
"0",
")",
"# sort by score, desceding",
"dets",
"=",
"dets",
"[",
"dets",
"[",
":",
",",
"1",
"]",
".",
"argsort",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"]",
"records",
"=",
"np",
".",
"hstack",
"(",
"(",
"dets",
"[",
":",
",",
"1",
"]",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"np",
".",
"zeros",
"(",
"(",
"dets",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
")",
")",
"# ground-truths",
"label_indices",
"=",
"np",
".",
"where",
"(",
"label",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"int",
")",
"==",
"cid",
")",
"[",
"0",
"]",
"gts",
"=",
"label",
"[",
"label_indices",
",",
":",
"]",
"label",
"=",
"np",
".",
"delete",
"(",
"label",
",",
"label_indices",
",",
"axis",
"=",
"0",
")",
"if",
"gts",
".",
"size",
">",
"0",
":",
"found",
"=",
"[",
"False",
"]",
"*",
"gts",
".",
"shape",
"[",
"0",
"]",
"for",
"j",
"in",
"range",
"(",
"dets",
".",
"shape",
"[",
"0",
"]",
")",
":",
"# compute overlaps",
"ious",
"=",
"iou",
"(",
"dets",
"[",
"j",
",",
"2",
":",
"]",
",",
"gts",
"[",
":",
",",
"1",
":",
"5",
"]",
")",
"ovargmax",
"=",
"np",
".",
"argmax",
"(",
"ious",
")",
"ovmax",
"=",
"ious",
"[",
"ovargmax",
"]",
"if",
"ovmax",
">",
"self",
".",
"ovp_thresh",
":",
"if",
"(",
"not",
"self",
".",
"use_difficult",
"and",
"gts",
".",
"shape",
"[",
"1",
"]",
">=",
"6",
"and",
"gts",
"[",
"ovargmax",
",",
"5",
"]",
">",
"0",
")",
":",
"pass",
"else",
":",
"if",
"not",
"found",
"[",
"ovargmax",
"]",
":",
"records",
"[",
"j",
",",
"-",
"1",
"]",
"=",
"1",
"# tp",
"found",
"[",
"ovargmax",
"]",
"=",
"True",
"else",
":",
"# duplicate",
"records",
"[",
"j",
",",
"-",
"1",
"]",
"=",
"2",
"# fp",
"else",
":",
"records",
"[",
"j",
",",
"-",
"1",
"]",
"=",
"2",
"# fp",
"else",
":",
"# no gt, mark all fp",
"records",
"[",
":",
",",
"-",
"1",
"]",
"=",
"2",
"# ground truth count",
"if",
"(",
"not",
"self",
".",
"use_difficult",
"and",
"gts",
".",
"shape",
"[",
"1",
"]",
">=",
"6",
")",
":",
"gt_count",
"=",
"np",
".",
"sum",
"(",
"gts",
"[",
":",
",",
"5",
"]",
"<",
"1",
")",
"else",
":",
"gt_count",
"=",
"gts",
".",
"shape",
"[",
"0",
"]",
"# now we push records to buffer",
"# first column: score, second column: tp/fp",
"# 0: not set(matched to difficult or something), 1: tp, 2: fp",
"records",
"=",
"records",
"[",
"np",
".",
"where",
"(",
"records",
"[",
":",
",",
"-",
"1",
"]",
">",
"0",
")",
"[",
"0",
"]",
",",
":",
"]",
"if",
"records",
".",
"size",
">",
"0",
":",
"self",
".",
"_insert",
"(",
"cid",
",",
"records",
",",
"gt_count",
")",
"# add missing class if not present in prediction",
"while",
"(",
"label",
".",
"shape",
"[",
"0",
"]",
">",
"0",
")",
":",
"cid",
"=",
"int",
"(",
"label",
"[",
"0",
",",
"0",
"]",
")",
"label_indices",
"=",
"np",
".",
"where",
"(",
"label",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"int",
")",
"==",
"cid",
")",
"[",
"0",
"]",
"label",
"=",
"np",
".",
"delete",
"(",
"label",
",",
"label_indices",
",",
"axis",
"=",
"0",
")",
"if",
"cid",
"<",
"0",
":",
"continue",
"gt_count",
"=",
"label_indices",
".",
"size",
"self",
".",
"_insert",
"(",
"cid",
",",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"0",
"]",
"]",
")",
",",
"gt_count",
")"
]
| Update internal records. This function now only update internal buffer,
sum_metric and num_inst are updated in _update() function instead when
get() is called to return results.
Params:
----------
labels: mx.nd.array (n * 6) or (n * 5), difficult column is optional
2-d array of ground-truths, n objects(id-xmin-ymin-xmax-ymax-[difficult])
preds: mx.nd.array (m * 6)
2-d array of detections, m objects(id-score-xmin-ymin-xmax-ymax) | [
"Update",
"internal",
"records",
".",
"This",
"function",
"now",
"only",
"update",
"internal",
"buffer",
"sum_metric",
"and",
"num_inst",
"are",
"updated",
"in",
"_update",
"()",
"function",
"instead",
"when",
"get",
"()",
"is",
"called",
"to",
"return",
"results",
"."
]
| python | train |
saltstack/salt | salt/client/ssh/wrapper/state.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/wrapper/state.py#L525-L565 | def request(mods=None,
**kwargs):
'''
.. versionadded:: 2017.7.3
Request that the local admin execute a state run via
`salt-call state.run_request`
All arguments match state.apply
CLI Example:
.. code-block:: bash
salt '*' state.request
salt '*' state.request test
salt '*' state.request test,pkgs
'''
kwargs['test'] = True
ret = apply_(mods, **kwargs)
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
req = check_request()
req.update({kwargs.get('name', 'default'): {
'test_run': ret,
'mods': mods,
'kwargs': kwargs
}
})
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
log.error(
'Unable to write state request file %s. Check permission.',
notify_path
)
return ret | [
"def",
"request",
"(",
"mods",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'test'",
"]",
"=",
"True",
"ret",
"=",
"apply_",
"(",
"mods",
",",
"*",
"*",
"kwargs",
")",
"notify_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'cachedir'",
"]",
",",
"'req_state.p'",
")",
"serial",
"=",
"salt",
".",
"payload",
".",
"Serial",
"(",
"__opts__",
")",
"req",
"=",
"check_request",
"(",
")",
"req",
".",
"update",
"(",
"{",
"kwargs",
".",
"get",
"(",
"'name'",
",",
"'default'",
")",
":",
"{",
"'test_run'",
":",
"ret",
",",
"'mods'",
":",
"mods",
",",
"'kwargs'",
":",
"kwargs",
"}",
"}",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"set_umask",
"(",
"0o077",
")",
":",
"try",
":",
"if",
"salt",
".",
"utils",
".",
"platform",
".",
"is_windows",
"(",
")",
":",
"# Make sure cache file isn't read-only",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'attrib -R \"{0}\"'",
".",
"format",
"(",
"notify_path",
")",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"notify_path",
",",
"'w+b'",
")",
"as",
"fp_",
":",
"serial",
".",
"dump",
"(",
"req",
",",
"fp_",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
":",
"log",
".",
"error",
"(",
"'Unable to write state request file %s. Check permission.'",
",",
"notify_path",
")",
"return",
"ret"
]
| .. versionadded:: 2017.7.3
Request that the local admin execute a state run via
`salt-call state.run_request`
All arguments match state.apply
CLI Example:
.. code-block:: bash
salt '*' state.request
salt '*' state.request test
salt '*' state.request test,pkgs | [
"..",
"versionadded",
"::",
"2017",
".",
"7",
".",
"3"
]
| python | train |
SKA-ScienceDataProcessor/integration-prototype | sip/execution_control/configuration_db/sip_config_db/utils/generate_sbi_config.py | https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/utils/generate_sbi_config.py#L62-L78 | def generate_version(max_major: int = 1, max_minor: int = 7,
max_patch: int = 15) -> str:
"""Select a random version.
Args:
max_major (int, optional) maximum major version
max_minor (int, optional) maximum minor version
max_patch (int, optional) maximum patch version
Returns:
str, Version String
"""
major = randint(0, max_major)
minor = randint(0, max_minor)
patch = randint(0, max_patch)
return '{:d}.{:d}.{:d}'.format(major, minor, patch) | [
"def",
"generate_version",
"(",
"max_major",
":",
"int",
"=",
"1",
",",
"max_minor",
":",
"int",
"=",
"7",
",",
"max_patch",
":",
"int",
"=",
"15",
")",
"->",
"str",
":",
"major",
"=",
"randint",
"(",
"0",
",",
"max_major",
")",
"minor",
"=",
"randint",
"(",
"0",
",",
"max_minor",
")",
"patch",
"=",
"randint",
"(",
"0",
",",
"max_patch",
")",
"return",
"'{:d}.{:d}.{:d}'",
".",
"format",
"(",
"major",
",",
"minor",
",",
"patch",
")"
]
| Select a random version.
Args:
max_major (int, optional) maximum major version
max_minor (int, optional) maximum minor version
max_patch (int, optional) maximum patch version
Returns:
str, Version String | [
"Select",
"a",
"random",
"version",
"."
]
| python | train |
tamasgal/km3pipe | km3pipe/utils/tohdf5.py | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/tohdf5.py#L46-L73 | def tohdf5(input_files, output_file, n_events, conv_times_to_jte, **kwargs):
"""Convert Any file to HDF5 file"""
if len(input_files) > 1:
cprint(
"Preparing to convert {} files to HDF5.".format(len(input_files))
)
from km3pipe import Pipeline # noqa
from km3pipe.io import GenericPump, HDF5Sink, HDF5MetaData # noqa
for input_file in input_files:
cprint("Converting '{}'...".format(input_file))
if len(input_files) > 1:
output_file = input_file + '.h5'
meta_data = kwargs.copy()
meta_data['origin'] = input_file
pipe = Pipeline()
pipe.attach(HDF5MetaData, data=meta_data)
pipe.attach(GenericPump, filenames=input_file, **kwargs)
pipe.attach(StatusBar, every=250)
if conv_times_to_jte:
from km3modules.mc import MCTimeCorrector
pipe.attach(MCTimeCorrector)
pipe.attach(HDF5Sink, filename=output_file, **kwargs)
pipe.drain(n_events)
cprint("File '{}' was converted.".format(input_file)) | [
"def",
"tohdf5",
"(",
"input_files",
",",
"output_file",
",",
"n_events",
",",
"conv_times_to_jte",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"input_files",
")",
">",
"1",
":",
"cprint",
"(",
"\"Preparing to convert {} files to HDF5.\"",
".",
"format",
"(",
"len",
"(",
"input_files",
")",
")",
")",
"from",
"km3pipe",
"import",
"Pipeline",
"# noqa",
"from",
"km3pipe",
".",
"io",
"import",
"GenericPump",
",",
"HDF5Sink",
",",
"HDF5MetaData",
"# noqa",
"for",
"input_file",
"in",
"input_files",
":",
"cprint",
"(",
"\"Converting '{}'...\"",
".",
"format",
"(",
"input_file",
")",
")",
"if",
"len",
"(",
"input_files",
")",
">",
"1",
":",
"output_file",
"=",
"input_file",
"+",
"'.h5'",
"meta_data",
"=",
"kwargs",
".",
"copy",
"(",
")",
"meta_data",
"[",
"'origin'",
"]",
"=",
"input_file",
"pipe",
"=",
"Pipeline",
"(",
")",
"pipe",
".",
"attach",
"(",
"HDF5MetaData",
",",
"data",
"=",
"meta_data",
")",
"pipe",
".",
"attach",
"(",
"GenericPump",
",",
"filenames",
"=",
"input_file",
",",
"*",
"*",
"kwargs",
")",
"pipe",
".",
"attach",
"(",
"StatusBar",
",",
"every",
"=",
"250",
")",
"if",
"conv_times_to_jte",
":",
"from",
"km3modules",
".",
"mc",
"import",
"MCTimeCorrector",
"pipe",
".",
"attach",
"(",
"MCTimeCorrector",
")",
"pipe",
".",
"attach",
"(",
"HDF5Sink",
",",
"filename",
"=",
"output_file",
",",
"*",
"*",
"kwargs",
")",
"pipe",
".",
"drain",
"(",
"n_events",
")",
"cprint",
"(",
"\"File '{}' was converted.\"",
".",
"format",
"(",
"input_file",
")",
")"
]
| Convert Any file to HDF5 file | [
"Convert",
"Any",
"file",
"to",
"HDF5",
"file"
]
| python | train |
evhub/coconut | coconut/compiler/compiler.py | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1179-L1199 | def classlist_handle(self, original, loc, tokens):
"""Process class inheritance lists."""
if len(tokens) == 0:
if self.target.startswith("3"):
return ""
else:
return "(_coconut.object)"
elif len(tokens) == 1 and len(tokens[0]) == 1:
if "tests" in tokens[0]:
if self.strict and tokens[0][0] == "(object)":
raise self.make_err(CoconutStyleError, "unnecessary inheriting from object (Coconut does this automatically)", original, loc)
return tokens[0][0]
elif "args" in tokens[0]:
if self.target.startswith("3"):
return tokens[0][0]
else:
raise self.make_err(CoconutTargetError, "found Python 3 keyword class definition", original, loc, target="3")
else:
raise CoconutInternalException("invalid inner classlist token", tokens[0])
else:
raise CoconutInternalException("invalid classlist tokens", tokens) | [
"def",
"classlist_handle",
"(",
"self",
",",
"original",
",",
"loc",
",",
"tokens",
")",
":",
"if",
"len",
"(",
"tokens",
")",
"==",
"0",
":",
"if",
"self",
".",
"target",
".",
"startswith",
"(",
"\"3\"",
")",
":",
"return",
"\"\"",
"else",
":",
"return",
"\"(_coconut.object)\"",
"elif",
"len",
"(",
"tokens",
")",
"==",
"1",
"and",
"len",
"(",
"tokens",
"[",
"0",
"]",
")",
"==",
"1",
":",
"if",
"\"tests\"",
"in",
"tokens",
"[",
"0",
"]",
":",
"if",
"self",
".",
"strict",
"and",
"tokens",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"\"(object)\"",
":",
"raise",
"self",
".",
"make_err",
"(",
"CoconutStyleError",
",",
"\"unnecessary inheriting from object (Coconut does this automatically)\"",
",",
"original",
",",
"loc",
")",
"return",
"tokens",
"[",
"0",
"]",
"[",
"0",
"]",
"elif",
"\"args\"",
"in",
"tokens",
"[",
"0",
"]",
":",
"if",
"self",
".",
"target",
".",
"startswith",
"(",
"\"3\"",
")",
":",
"return",
"tokens",
"[",
"0",
"]",
"[",
"0",
"]",
"else",
":",
"raise",
"self",
".",
"make_err",
"(",
"CoconutTargetError",
",",
"\"found Python 3 keyword class definition\"",
",",
"original",
",",
"loc",
",",
"target",
"=",
"\"3\"",
")",
"else",
":",
"raise",
"CoconutInternalException",
"(",
"\"invalid inner classlist token\"",
",",
"tokens",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"CoconutInternalException",
"(",
"\"invalid classlist tokens\"",
",",
"tokens",
")"
]
| Process class inheritance lists. | [
"Process",
"class",
"inheritance",
"lists",
"."
]
| python | train |
prompt-toolkit/pymux | pymux/client/posix.py | https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/client/posix.py#L175-L183 | def _send_packet(self, data):
" Send to server. "
data = json.dumps(data).encode('utf-8')
# Be sure that our socket is blocking, otherwise, the send() call could
# raise `BlockingIOError` if the buffer is full.
self.socket.setblocking(1)
self.socket.send(data + b'\0') | [
"def",
"_send_packet",
"(",
"self",
",",
"data",
")",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"# Be sure that our socket is blocking, otherwise, the send() call could",
"# raise `BlockingIOError` if the buffer is full.",
"self",
".",
"socket",
".",
"setblocking",
"(",
"1",
")",
"self",
".",
"socket",
".",
"send",
"(",
"data",
"+",
"b'\\0'",
")"
]
| Send to server. | [
"Send",
"to",
"server",
"."
]
| python | train |
adamchainz/django-mysql | django_mysql/models/handler.py | https://github.com/adamchainz/django-mysql/blob/967daa4245cf55c9bc5dc018e560f417c528916a/django_mysql/models/handler.py#L116-L150 | def _parse_index_value(self, kwargs):
"""
Parse the HANDLER-supported subset of django's __ expression syntax
"""
if len(kwargs) == 0:
return None, None
elif len(kwargs) > 1:
raise ValueError("You can't pass more than one value expression, "
"you passed {}".format(",".join(kwargs.keys())))
name, value = list(kwargs.items())[0]
if not name.startswith('value'):
raise ValueError("The keyword arg {} is not valid for this "
"function".format(name))
if name == 'value':
return ('=', value)
if not name.startswith('value__'):
raise ValueError("The keyword arg {} is not valid for this "
"function".format(name))
operator = name[name.find('__') + 2:]
try:
return (self._operator_values[operator], value)
except KeyError:
raise ValueError(
"The operator {op} is not valid for index value matching. "
"Valid operators are {valid}"
.format(
op=operator,
valid=",".join(self._operator_values.keys()),
),
) | [
"def",
"_parse_index_value",
"(",
"self",
",",
"kwargs",
")",
":",
"if",
"len",
"(",
"kwargs",
")",
"==",
"0",
":",
"return",
"None",
",",
"None",
"elif",
"len",
"(",
"kwargs",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"You can't pass more than one value expression, \"",
"\"you passed {}\"",
".",
"format",
"(",
"\",\"",
".",
"join",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
")",
")",
"name",
",",
"value",
"=",
"list",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
"[",
"0",
"]",
"if",
"not",
"name",
".",
"startswith",
"(",
"'value'",
")",
":",
"raise",
"ValueError",
"(",
"\"The keyword arg {} is not valid for this \"",
"\"function\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"name",
"==",
"'value'",
":",
"return",
"(",
"'='",
",",
"value",
")",
"if",
"not",
"name",
".",
"startswith",
"(",
"'value__'",
")",
":",
"raise",
"ValueError",
"(",
"\"The keyword arg {} is not valid for this \"",
"\"function\"",
".",
"format",
"(",
"name",
")",
")",
"operator",
"=",
"name",
"[",
"name",
".",
"find",
"(",
"'__'",
")",
"+",
"2",
":",
"]",
"try",
":",
"return",
"(",
"self",
".",
"_operator_values",
"[",
"operator",
"]",
",",
"value",
")",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"The operator {op} is not valid for index value matching. \"",
"\"Valid operators are {valid}\"",
".",
"format",
"(",
"op",
"=",
"operator",
",",
"valid",
"=",
"\",\"",
".",
"join",
"(",
"self",
".",
"_operator_values",
".",
"keys",
"(",
")",
")",
",",
")",
",",
")"
]
| Parse the HANDLER-supported subset of django's __ expression syntax | [
"Parse",
"the",
"HANDLER",
"-",
"supported",
"subset",
"of",
"django",
"s",
"__",
"expression",
"syntax"
]
| python | train |
uw-it-aca/uw-restclients-canvas | uw_canvas/roles.py | https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/roles.py#L50-L55 | def get_role_by_account_sis_id(self, account_sis_id, role_id):
"""
Get information about a single role, for the passed account SIS ID.
"""
return self.get_role(self._sis_id(account_sis_id, sis_field="account"),
role_id) | [
"def",
"get_role_by_account_sis_id",
"(",
"self",
",",
"account_sis_id",
",",
"role_id",
")",
":",
"return",
"self",
".",
"get_role",
"(",
"self",
".",
"_sis_id",
"(",
"account_sis_id",
",",
"sis_field",
"=",
"\"account\"",
")",
",",
"role_id",
")"
]
| Get information about a single role, for the passed account SIS ID. | [
"Get",
"information",
"about",
"a",
"single",
"role",
"for",
"the",
"passed",
"account",
"SIS",
"ID",
"."
]
| python | test |
jeffknupp/sandman2 | sandman2/app.py | https://github.com/jeffknupp/sandman2/blob/1ce21d6f7a6df77fa96fab694b0f9bb8469c166b/sandman2/app.py#L176-L187 | def _register_user_models(user_models, admin=None, schema=None):
"""Register any user-defined models with the API Service.
:param list user_models: A list of user-defined models to include in the
API service
"""
if any([issubclass(cls, AutomapModel) for cls in user_models]):
AutomapModel.prepare( # pylint:disable=maybe-no-member
db.engine, reflect=True, schema=schema)
for user_model in user_models:
register_model(user_model, admin) | [
"def",
"_register_user_models",
"(",
"user_models",
",",
"admin",
"=",
"None",
",",
"schema",
"=",
"None",
")",
":",
"if",
"any",
"(",
"[",
"issubclass",
"(",
"cls",
",",
"AutomapModel",
")",
"for",
"cls",
"in",
"user_models",
"]",
")",
":",
"AutomapModel",
".",
"prepare",
"(",
"# pylint:disable=maybe-no-member",
"db",
".",
"engine",
",",
"reflect",
"=",
"True",
",",
"schema",
"=",
"schema",
")",
"for",
"user_model",
"in",
"user_models",
":",
"register_model",
"(",
"user_model",
",",
"admin",
")"
]
| Register any user-defined models with the API Service.
:param list user_models: A list of user-defined models to include in the
API service | [
"Register",
"any",
"user",
"-",
"defined",
"models",
"with",
"the",
"API",
"Service",
"."
]
| python | train |
pyroscope/pyrocore | src/pyrocore/scripts/pyroadmin.py | https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/scripts/pyroadmin.py#L81-L102 | def download_resource(self, download_url, target, guard):
""" Helper to download and install external resources.
"""
download_url = download_url.strip()
if not os.path.isabs(target):
target = os.path.join(config.config_dir, target)
if os.path.exists(os.path.join(target, guard)):
self.LOG.info("Already have '%s' in '%s'..." % (download_url, target))
return
if not os.path.isdir(target):
os.makedirs(target)
self.LOG.info("Downloading '%s' to '%s'..." % (download_url, target))
with closing(urllib2.urlopen(download_url)) as url_handle:
if download_url.endswith(".zip"):
with closing(ZipFile(StringIO(url_handle.read()))) as zip_handle: # pylint: disable=no-member
zip_handle.extractall(target) # pylint: disable=no-member
else:
with open(os.path.join(target, guard), "wb") as file_handle:
shutil.copyfileobj(url_handle, file_handle) | [
"def",
"download_resource",
"(",
"self",
",",
"download_url",
",",
"target",
",",
"guard",
")",
":",
"download_url",
"=",
"download_url",
".",
"strip",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"target",
")",
":",
"target",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"config_dir",
",",
"target",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"target",
",",
"guard",
")",
")",
":",
"self",
".",
"LOG",
".",
"info",
"(",
"\"Already have '%s' in '%s'...\"",
"%",
"(",
"download_url",
",",
"target",
")",
")",
"return",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"target",
")",
":",
"os",
".",
"makedirs",
"(",
"target",
")",
"self",
".",
"LOG",
".",
"info",
"(",
"\"Downloading '%s' to '%s'...\"",
"%",
"(",
"download_url",
",",
"target",
")",
")",
"with",
"closing",
"(",
"urllib2",
".",
"urlopen",
"(",
"download_url",
")",
")",
"as",
"url_handle",
":",
"if",
"download_url",
".",
"endswith",
"(",
"\".zip\"",
")",
":",
"with",
"closing",
"(",
"ZipFile",
"(",
"StringIO",
"(",
"url_handle",
".",
"read",
"(",
")",
")",
")",
")",
"as",
"zip_handle",
":",
"# pylint: disable=no-member",
"zip_handle",
".",
"extractall",
"(",
"target",
")",
"# pylint: disable=no-member",
"else",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"target",
",",
"guard",
")",
",",
"\"wb\"",
")",
"as",
"file_handle",
":",
"shutil",
".",
"copyfileobj",
"(",
"url_handle",
",",
"file_handle",
")"
]
| Helper to download and install external resources. | [
"Helper",
"to",
"download",
"and",
"install",
"external",
"resources",
"."
]
| python | train |
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L295-L329 | def isdefined(obj, force_import=False, namespace=None):
"""Return True if object is defined in namespace
If namespace is None --> namespace = locals()"""
if namespace is None:
namespace = locals()
attr_list = obj.split('.')
base = attr_list.pop(0)
if len(base) == 0:
return False
if base not in builtins.__dict__ and base not in namespace:
if force_import:
try:
module = __import__(base, globals(), namespace)
if base not in globals():
globals()[base] = module
namespace[base] = module
except Exception:
return False
else:
return False
for attr in attr_list:
try:
attr_not_found = not hasattr(eval(base, namespace), attr)
except (SyntaxError, AttributeError):
return False
if attr_not_found:
if force_import:
try:
__import__(base+'.'+attr, globals(), namespace)
except (ImportError, SyntaxError):
return False
else:
return False
base += '.'+attr
return True | [
"def",
"isdefined",
"(",
"obj",
",",
"force_import",
"=",
"False",
",",
"namespace",
"=",
"None",
")",
":",
"if",
"namespace",
"is",
"None",
":",
"namespace",
"=",
"locals",
"(",
")",
"attr_list",
"=",
"obj",
".",
"split",
"(",
"'.'",
")",
"base",
"=",
"attr_list",
".",
"pop",
"(",
"0",
")",
"if",
"len",
"(",
"base",
")",
"==",
"0",
":",
"return",
"False",
"if",
"base",
"not",
"in",
"builtins",
".",
"__dict__",
"and",
"base",
"not",
"in",
"namespace",
":",
"if",
"force_import",
":",
"try",
":",
"module",
"=",
"__import__",
"(",
"base",
",",
"globals",
"(",
")",
",",
"namespace",
")",
"if",
"base",
"not",
"in",
"globals",
"(",
")",
":",
"globals",
"(",
")",
"[",
"base",
"]",
"=",
"module",
"namespace",
"[",
"base",
"]",
"=",
"module",
"except",
"Exception",
":",
"return",
"False",
"else",
":",
"return",
"False",
"for",
"attr",
"in",
"attr_list",
":",
"try",
":",
"attr_not_found",
"=",
"not",
"hasattr",
"(",
"eval",
"(",
"base",
",",
"namespace",
")",
",",
"attr",
")",
"except",
"(",
"SyntaxError",
",",
"AttributeError",
")",
":",
"return",
"False",
"if",
"attr_not_found",
":",
"if",
"force_import",
":",
"try",
":",
"__import__",
"(",
"base",
"+",
"'.'",
"+",
"attr",
",",
"globals",
"(",
")",
",",
"namespace",
")",
"except",
"(",
"ImportError",
",",
"SyntaxError",
")",
":",
"return",
"False",
"else",
":",
"return",
"False",
"base",
"+=",
"'.'",
"+",
"attr",
"return",
"True"
]
| Return True if object is defined in namespace
If namespace is None --> namespace = locals() | [
"Return",
"True",
"if",
"object",
"is",
"defined",
"in",
"namespace",
"If",
"namespace",
"is",
"None",
"--",
">",
"namespace",
"=",
"locals",
"()"
]
| python | train |
tgalal/python-axolotl | axolotl/protocol/senderkeymessage.py | https://github.com/tgalal/python-axolotl/blob/0c681af4b756f556e23a9bf961abfbc6f82800cc/axolotl/protocol/senderkeymessage.py#L90-L98 | def getSignature(self, signatureKey, serialized):
"""
:type signatureKey: ECPrivateKey
:type serialized: bytearray
"""
try:
return Curve.calculateSignature(signatureKey, serialized)
except InvalidKeyException as e:
raise AssertionError(e) | [
"def",
"getSignature",
"(",
"self",
",",
"signatureKey",
",",
"serialized",
")",
":",
"try",
":",
"return",
"Curve",
".",
"calculateSignature",
"(",
"signatureKey",
",",
"serialized",
")",
"except",
"InvalidKeyException",
"as",
"e",
":",
"raise",
"AssertionError",
"(",
"e",
")"
]
| :type signatureKey: ECPrivateKey
:type serialized: bytearray | [
":",
"type",
"signatureKey",
":",
"ECPrivateKey",
":",
"type",
"serialized",
":",
"bytearray"
]
| python | train |
MIR-MU/ntcir-math-density | ntcir_math_density/view.py | https://github.com/MIR-MU/ntcir-math-density/blob/648c74bfc5bd304603ef67da753ff25b65e829ef/ntcir_math_density/view.py#L19-L46 | def plot_estimates(positions, estimates):
"""
Plots density, and probability estimates.
Parameters
----------
positions : iterable of float
Paragraph positions for which densities, and probabilities were estimated.
estimates : six-tuple of (sequence of float)
Estimates of P(relevant), p(position), p(position | relevant), P(position, relevant), and
P(relevant | position).
Returns
-------
matplotlib.figure.Figure
The plotted figure.
"""
x = list(positions)
fig = plt.figure(figsize=(SUBPLOT_WIDTH * len(estimates), FIGURE_HEIGHT))
for i, (title, y) in enumerate(zip(ESTIMATE_TITLES, estimates)):
ax = fig.add_subplot(1, len(estimates), i + 1)
ax.plot(x, y, linewidth=LINE_WIDTH, c=LINE_COLOR)
ax.title.set_text(title)
ax.set_xlim(0, 1)
ax.set_xlabel("position")
ax.set_ylabel("$\\hat P$")
ax.grid()
return fig | [
"def",
"plot_estimates",
"(",
"positions",
",",
"estimates",
")",
":",
"x",
"=",
"list",
"(",
"positions",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"SUBPLOT_WIDTH",
"*",
"len",
"(",
"estimates",
")",
",",
"FIGURE_HEIGHT",
")",
")",
"for",
"i",
",",
"(",
"title",
",",
"y",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"ESTIMATE_TITLES",
",",
"estimates",
")",
")",
":",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"len",
"(",
"estimates",
")",
",",
"i",
"+",
"1",
")",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"linewidth",
"=",
"LINE_WIDTH",
",",
"c",
"=",
"LINE_COLOR",
")",
"ax",
".",
"title",
".",
"set_text",
"(",
"title",
")",
"ax",
".",
"set_xlim",
"(",
"0",
",",
"1",
")",
"ax",
".",
"set_xlabel",
"(",
"\"position\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"$\\\\hat P$\"",
")",
"ax",
".",
"grid",
"(",
")",
"return",
"fig"
]
| Plots density, and probability estimates.
Parameters
----------
positions : iterable of float
Paragraph positions for which densities, and probabilities were estimated.
estimates : six-tuple of (sequence of float)
Estimates of P(relevant), p(position), p(position | relevant), P(position, relevant), and
P(relevant | position).
Returns
-------
matplotlib.figure.Figure
The plotted figure. | [
"Plots",
"density",
"and",
"probability",
"estimates",
"."
]
| python | train |
JoelBender/bacpypes | py25/bacpypes/constructeddata.py | https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/constructeddata.py#L1218-L1243 | def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: Choice._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# look for the chosen element
for element in self.choiceElements:
value = getattr(self, element.name, None)
if value is None:
continue
if issubclass(element.klass, Atomic):
mapped_value = value ### ambiguous
elif issubclass(element.klass, AnyAtomic):
mapped_value = value.value ### ambiguous
elif isinstance(value, element.klass):
mapped_value = value.dict_contents(as_class=as_class)
use_dict.__setitem__(element.name, mapped_value)
break
# return what we built/updated
return use_dict | [
"def",
"dict_contents",
"(",
"self",
",",
"use_dict",
"=",
"None",
",",
"as_class",
"=",
"dict",
")",
":",
"if",
"_debug",
":",
"Choice",
".",
"_debug",
"(",
"\"dict_contents use_dict=%r as_class=%r\"",
",",
"use_dict",
",",
"as_class",
")",
"# make/extend the dictionary of content",
"if",
"use_dict",
"is",
"None",
":",
"use_dict",
"=",
"as_class",
"(",
")",
"# look for the chosen element",
"for",
"element",
"in",
"self",
".",
"choiceElements",
":",
"value",
"=",
"getattr",
"(",
"self",
",",
"element",
".",
"name",
",",
"None",
")",
"if",
"value",
"is",
"None",
":",
"continue",
"if",
"issubclass",
"(",
"element",
".",
"klass",
",",
"Atomic",
")",
":",
"mapped_value",
"=",
"value",
"### ambiguous",
"elif",
"issubclass",
"(",
"element",
".",
"klass",
",",
"AnyAtomic",
")",
":",
"mapped_value",
"=",
"value",
".",
"value",
"### ambiguous",
"elif",
"isinstance",
"(",
"value",
",",
"element",
".",
"klass",
")",
":",
"mapped_value",
"=",
"value",
".",
"dict_contents",
"(",
"as_class",
"=",
"as_class",
")",
"use_dict",
".",
"__setitem__",
"(",
"element",
".",
"name",
",",
"mapped_value",
")",
"break",
"# return what we built/updated",
"return",
"use_dict"
]
| Return the contents of an object as a dict. | [
"Return",
"the",
"contents",
"of",
"an",
"object",
"as",
"a",
"dict",
"."
]
| python | train |
ibm-watson-iot/iot-python | tmp/src/things/things.py | https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/tmp/src/things/things.py#L496-L509 | def getSchemaContent(self, schemaId, draft=False):
"""
Get the content for a schema. Parameters: schemaId (string), draft (boolean). Throws APIException on failure.
"""
if draft:
req = ApiClient.oneSchemaContentUrl % (self.host, "/draft", schemaId)
else:
req = ApiClient.oneSchemaContentUrl % (self.host, "", schemaId)
resp = requests.get(req, auth=self.credentials, verify=self.verify)
if resp.status_code == 200:
self.logger.debug("Schema content retrieved")
else:
raise ibmiotf.APIException(resp.status_code, "HTTP error getting schema content", resp)
return resp.json() | [
"def",
"getSchemaContent",
"(",
"self",
",",
"schemaId",
",",
"draft",
"=",
"False",
")",
":",
"if",
"draft",
":",
"req",
"=",
"ApiClient",
".",
"oneSchemaContentUrl",
"%",
"(",
"self",
".",
"host",
",",
"\"/draft\"",
",",
"schemaId",
")",
"else",
":",
"req",
"=",
"ApiClient",
".",
"oneSchemaContentUrl",
"%",
"(",
"self",
".",
"host",
",",
"\"\"",
",",
"schemaId",
")",
"resp",
"=",
"requests",
".",
"get",
"(",
"req",
",",
"auth",
"=",
"self",
".",
"credentials",
",",
"verify",
"=",
"self",
".",
"verify",
")",
"if",
"resp",
".",
"status_code",
"==",
"200",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Schema content retrieved\"",
")",
"else",
":",
"raise",
"ibmiotf",
".",
"APIException",
"(",
"resp",
".",
"status_code",
",",
"\"HTTP error getting schema content\"",
",",
"resp",
")",
"return",
"resp",
".",
"json",
"(",
")"
]
| Get the content for a schema. Parameters: schemaId (string), draft (boolean). Throws APIException on failure. | [
"Get",
"the",
"content",
"for",
"a",
"schema",
".",
"Parameters",
":",
"schemaId",
"(",
"string",
")",
"draft",
"(",
"boolean",
")",
".",
"Throws",
"APIException",
"on",
"failure",
"."
]
| python | test |
lemieuxl/pyplink | pyplink/pyplink.py | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L560-L572 | def _grouper(iterable, n, fillvalue=0):
"""Collect data into fixed-length chunks or blocks.
Args:
n (int): The size of the chunk.
fillvalue (int): The fill value.
Returns:
iterator: An iterator over the chunks.
"""
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args) | [
"def",
"_grouper",
"(",
"iterable",
",",
"n",
",",
"fillvalue",
"=",
"0",
")",
":",
"args",
"=",
"[",
"iter",
"(",
"iterable",
")",
"]",
"*",
"n",
"return",
"zip_longest",
"(",
"fillvalue",
"=",
"fillvalue",
",",
"*",
"args",
")"
]
| Collect data into fixed-length chunks or blocks.
Args:
n (int): The size of the chunk.
fillvalue (int): The fill value.
Returns:
iterator: An iterator over the chunks. | [
"Collect",
"data",
"into",
"fixed",
"-",
"length",
"chunks",
"or",
"blocks",
"."
]
| python | train |
awslabs/serverless-application-model | samtranslator/plugins/api/implicit_api_plugin.py | https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/plugins/api/implicit_api_plugin.py#L282-L317 | def _maybe_add_conditions_to_implicit_api_paths(self, template):
"""
Add conditions to implicit API paths if necessary.
Implicit API resource methods are constructed from API events on individual serverless functions within the SAM
template. Since serverless functions can have conditions on them, it's possible to have a case where all methods
under a resource path have conditions on them. If all of these conditions evaluate to false, the entire resource
path should not be defined either. This method checks all resource paths' methods and if all methods under a
given path contain a condition, a composite condition is added to the overall template Conditions section and
that composite condition is added to the resource path.
"""
for api_id, api in template.iterate(SamResourceType.Api.value):
if not api.properties.get('__MANAGE_SWAGGER'):
continue
swagger = api.properties.get("DefinitionBody")
editor = SwaggerEditor(swagger)
for path in editor.iter_on_path():
all_method_conditions = set(
[condition for method, condition in self.api_conditions[api_id][path].items()]
)
at_least_one_method = len(all_method_conditions) > 0
all_methods_contain_conditions = None not in all_method_conditions
if at_least_one_method and all_methods_contain_conditions:
if len(all_method_conditions) == 1:
editor.make_path_conditional(path, all_method_conditions.pop())
else:
path_condition_name = self._path_condition_name(api_id, path)
self._add_combined_condition_to_template(
template.template_dict, path_condition_name, all_method_conditions)
editor.make_path_conditional(path, path_condition_name)
api.properties["DefinitionBody"] = editor.swagger
template.set(api_id, api) | [
"def",
"_maybe_add_conditions_to_implicit_api_paths",
"(",
"self",
",",
"template",
")",
":",
"for",
"api_id",
",",
"api",
"in",
"template",
".",
"iterate",
"(",
"SamResourceType",
".",
"Api",
".",
"value",
")",
":",
"if",
"not",
"api",
".",
"properties",
".",
"get",
"(",
"'__MANAGE_SWAGGER'",
")",
":",
"continue",
"swagger",
"=",
"api",
".",
"properties",
".",
"get",
"(",
"\"DefinitionBody\"",
")",
"editor",
"=",
"SwaggerEditor",
"(",
"swagger",
")",
"for",
"path",
"in",
"editor",
".",
"iter_on_path",
"(",
")",
":",
"all_method_conditions",
"=",
"set",
"(",
"[",
"condition",
"for",
"method",
",",
"condition",
"in",
"self",
".",
"api_conditions",
"[",
"api_id",
"]",
"[",
"path",
"]",
".",
"items",
"(",
")",
"]",
")",
"at_least_one_method",
"=",
"len",
"(",
"all_method_conditions",
")",
">",
"0",
"all_methods_contain_conditions",
"=",
"None",
"not",
"in",
"all_method_conditions",
"if",
"at_least_one_method",
"and",
"all_methods_contain_conditions",
":",
"if",
"len",
"(",
"all_method_conditions",
")",
"==",
"1",
":",
"editor",
".",
"make_path_conditional",
"(",
"path",
",",
"all_method_conditions",
".",
"pop",
"(",
")",
")",
"else",
":",
"path_condition_name",
"=",
"self",
".",
"_path_condition_name",
"(",
"api_id",
",",
"path",
")",
"self",
".",
"_add_combined_condition_to_template",
"(",
"template",
".",
"template_dict",
",",
"path_condition_name",
",",
"all_method_conditions",
")",
"editor",
".",
"make_path_conditional",
"(",
"path",
",",
"path_condition_name",
")",
"api",
".",
"properties",
"[",
"\"DefinitionBody\"",
"]",
"=",
"editor",
".",
"swagger",
"template",
".",
"set",
"(",
"api_id",
",",
"api",
")"
]
| Add conditions to implicit API paths if necessary.
Implicit API resource methods are constructed from API events on individual serverless functions within the SAM
template. Since serverless functions can have conditions on them, it's possible to have a case where all methods
under a resource path have conditions on them. If all of these conditions evaluate to false, the entire resource
path should not be defined either. This method checks all resource paths' methods and if all methods under a
given path contain a condition, a composite condition is added to the overall template Conditions section and
that composite condition is added to the resource path. | [
"Add",
"conditions",
"to",
"implicit",
"API",
"paths",
"if",
"necessary",
"."
]
| python | train |
mitsei/dlkit | dlkit/json_/assessment/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L9468-L9483 | def remove_child_banks(self, bank_id):
"""Removes all children from a bank.
arg: bank_id (osid.id.Id): the ``Id`` of a bank
raise: NotFound - ``bank_id`` is not in hierarchy
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalogs(catalog_id=bank_id)
return self._hierarchy_session.remove_children(id_=bank_id) | [
"def",
"remove_child_banks",
"(",
"self",
",",
"bank_id",
")",
":",
"# Implemented from template for",
"# osid.resource.BinHierarchyDesignSession.remove_child_bin_template",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"remove_child_catalogs",
"(",
"catalog_id",
"=",
"bank_id",
")",
"return",
"self",
".",
"_hierarchy_session",
".",
"remove_children",
"(",
"id_",
"=",
"bank_id",
")"
]
| Removes all children from a bank.
arg: bank_id (osid.id.Id): the ``Id`` of a bank
raise: NotFound - ``bank_id`` is not in hierarchy
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* | [
"Removes",
"all",
"children",
"from",
"a",
"bank",
"."
]
| python | train |
pyrogram/pyrogram | pyrogram/vendor/typing/typing.py | https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/vendor/typing/typing.py#L387-L403 | def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, type) and not isinstance(obj, TypingMeta):
if obj.__module__ == 'builtins':
return _qualname(obj)
return '%s.%s' % (obj.__module__, _qualname(obj))
if obj is ...:
return ('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj) | [
"def",
"_type_repr",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"type",
")",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"TypingMeta",
")",
":",
"if",
"obj",
".",
"__module__",
"==",
"'builtins'",
":",
"return",
"_qualname",
"(",
"obj",
")",
"return",
"'%s.%s'",
"%",
"(",
"obj",
".",
"__module__",
",",
"_qualname",
"(",
"obj",
")",
")",
"if",
"obj",
"is",
"...",
":",
"return",
"(",
"'...'",
")",
"if",
"isinstance",
"(",
"obj",
",",
"types",
".",
"FunctionType",
")",
":",
"return",
"obj",
".",
"__name__",
"return",
"repr",
"(",
"obj",
")"
]
| Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj). | [
"Return",
"the",
"repr",
"()",
"of",
"an",
"object",
"special",
"-",
"casing",
"types",
"(",
"internal",
"helper",
")",
"."
]
| python | train |
PmagPy/PmagPy | dialogs/demag_dialogs.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/demag_dialogs.py#L182-L208 | def on_change_plot_cursor(self,event):
"""
If mouse is over data point making it selectable change the shape of the cursor
@param: event -> the wx Mouseevent for that click
"""
if not self.xdata or not self.ydata: return
pos=event.GetPosition()
width, height = self.canvas.get_width_height()
pos[1] = height - pos[1]
xpick_data,ypick_data = pos
xdata_org = self.xdata
ydata_org = self.ydata
data_corrected = self.map.transData.transform(vstack([xdata_org,ydata_org]).T)
xdata,ydata = data_corrected.T
xdata = list(map(float,xdata))
ydata = list(map(float,ydata))
e = 4e0
if self.plot_setting == "Zoom":
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_CROSS))
else:
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_ARROW))
for i,(x,y) in enumerate(zip(xdata,ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_HAND))
break
event.Skip() | [
"def",
"on_change_plot_cursor",
"(",
"self",
",",
"event",
")",
":",
"if",
"not",
"self",
".",
"xdata",
"or",
"not",
"self",
".",
"ydata",
":",
"return",
"pos",
"=",
"event",
".",
"GetPosition",
"(",
")",
"width",
",",
"height",
"=",
"self",
".",
"canvas",
".",
"get_width_height",
"(",
")",
"pos",
"[",
"1",
"]",
"=",
"height",
"-",
"pos",
"[",
"1",
"]",
"xpick_data",
",",
"ypick_data",
"=",
"pos",
"xdata_org",
"=",
"self",
".",
"xdata",
"ydata_org",
"=",
"self",
".",
"ydata",
"data_corrected",
"=",
"self",
".",
"map",
".",
"transData",
".",
"transform",
"(",
"vstack",
"(",
"[",
"xdata_org",
",",
"ydata_org",
"]",
")",
".",
"T",
")",
"xdata",
",",
"ydata",
"=",
"data_corrected",
".",
"T",
"xdata",
"=",
"list",
"(",
"map",
"(",
"float",
",",
"xdata",
")",
")",
"ydata",
"=",
"list",
"(",
"map",
"(",
"float",
",",
"ydata",
")",
")",
"e",
"=",
"4e0",
"if",
"self",
".",
"plot_setting",
"==",
"\"Zoom\"",
":",
"self",
".",
"canvas",
".",
"SetCursor",
"(",
"wx",
".",
"Cursor",
"(",
"wx",
".",
"CURSOR_CROSS",
")",
")",
"else",
":",
"self",
".",
"canvas",
".",
"SetCursor",
"(",
"wx",
".",
"Cursor",
"(",
"wx",
".",
"CURSOR_ARROW",
")",
")",
"for",
"i",
",",
"(",
"x",
",",
"y",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"xdata",
",",
"ydata",
")",
")",
":",
"if",
"0",
"<",
"sqrt",
"(",
"(",
"x",
"-",
"xpick_data",
")",
"**",
"2.",
"+",
"(",
"y",
"-",
"ypick_data",
")",
"**",
"2.",
")",
"<",
"e",
":",
"self",
".",
"canvas",
".",
"SetCursor",
"(",
"wx",
".",
"Cursor",
"(",
"wx",
".",
"CURSOR_HAND",
")",
")",
"break",
"event",
".",
"Skip",
"(",
")"
]
| If mouse is over data point making it selectable change the shape of the cursor
@param: event -> the wx Mouseevent for that click | [
"If",
"mouse",
"is",
"over",
"data",
"point",
"making",
"it",
"selectable",
"change",
"the",
"shape",
"of",
"the",
"cursor"
]
| python | train |
pandas-dev/pandas | pandas/io/pytables.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1794-L1797 | def write_metadata(self, handler):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata) | [
"def",
"write_metadata",
"(",
"self",
",",
"handler",
")",
":",
"if",
"self",
".",
"metadata",
"is",
"not",
"None",
":",
"handler",
".",
"write_metadata",
"(",
"self",
".",
"cname",
",",
"self",
".",
"metadata",
")"
]
| set the meta data | [
"set",
"the",
"meta",
"data"
]
| python | train |
skymill/automated-ebs-snapshots | automated_ebs_snapshots/snapshot_manager.py | https://github.com/skymill/automated-ebs-snapshots/blob/9595bc49d458f6ffb93430722757d2284e878fab/automated_ebs_snapshots/snapshot_manager.py#L27-L40 | def _create_snapshot(volume):
""" Create a new snapshot
:type volume: boto.ec2.volume.Volume
:param volume: Volume to snapshot
:returns: boto.ec2.snapshot.Snapshot -- The new snapshot
"""
logger.info('Creating new snapshot for {}'.format(volume.id))
snapshot = volume.create_snapshot(
description="Automatic snapshot by Automated EBS Snapshots")
logger.info('Created snapshot {} for volume {}'.format(
snapshot.id, volume.id))
return snapshot | [
"def",
"_create_snapshot",
"(",
"volume",
")",
":",
"logger",
".",
"info",
"(",
"'Creating new snapshot for {}'",
".",
"format",
"(",
"volume",
".",
"id",
")",
")",
"snapshot",
"=",
"volume",
".",
"create_snapshot",
"(",
"description",
"=",
"\"Automatic snapshot by Automated EBS Snapshots\"",
")",
"logger",
".",
"info",
"(",
"'Created snapshot {} for volume {}'",
".",
"format",
"(",
"snapshot",
".",
"id",
",",
"volume",
".",
"id",
")",
")",
"return",
"snapshot"
]
| Create a new snapshot
:type volume: boto.ec2.volume.Volume
:param volume: Volume to snapshot
:returns: boto.ec2.snapshot.Snapshot -- The new snapshot | [
"Create",
"a",
"new",
"snapshot"
]
| python | train |
workforce-data-initiative/skills-utils | skills_utils/es.py | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/es.py#L48-L56 | def create_index(index_name, index_config, client):
"""Creates an index with a given configuration
Args:
index_name (str): Name of the index you want to create
index_config (dict) configuration for the index
client (Elasticsearch.IndicesClient) the Elasticsearch client
"""
client.create(index=index_name, body=index_config) | [
"def",
"create_index",
"(",
"index_name",
",",
"index_config",
",",
"client",
")",
":",
"client",
".",
"create",
"(",
"index",
"=",
"index_name",
",",
"body",
"=",
"index_config",
")"
]
| Creates an index with a given configuration
Args:
index_name (str): Name of the index you want to create
index_config (dict) configuration for the index
client (Elasticsearch.IndicesClient) the Elasticsearch client | [
"Creates",
"an",
"index",
"with",
"a",
"given",
"configuration"
]
| python | train |
jic-dtool/dtoolcore | dtoolcore/utils.py | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/utils.py#L42-L59 | def generous_parse_uri(uri):
"""Return a urlparse.ParseResult object with the results of parsing the
given URI. This has the same properties as the result of parse_uri.
When passed a relative path, it determines the absolute path, sets the
scheme to file, the netloc to localhost and returns a parse of the result.
"""
parse_result = urlparse(uri)
if parse_result.scheme == '':
abspath = os.path.abspath(parse_result.path)
if IS_WINDOWS:
abspath = windows_to_unix_path(abspath)
fixed_uri = "file://{}".format(abspath)
parse_result = urlparse(fixed_uri)
return parse_result | [
"def",
"generous_parse_uri",
"(",
"uri",
")",
":",
"parse_result",
"=",
"urlparse",
"(",
"uri",
")",
"if",
"parse_result",
".",
"scheme",
"==",
"''",
":",
"abspath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"parse_result",
".",
"path",
")",
"if",
"IS_WINDOWS",
":",
"abspath",
"=",
"windows_to_unix_path",
"(",
"abspath",
")",
"fixed_uri",
"=",
"\"file://{}\"",
".",
"format",
"(",
"abspath",
")",
"parse_result",
"=",
"urlparse",
"(",
"fixed_uri",
")",
"return",
"parse_result"
]
| Return a urlparse.ParseResult object with the results of parsing the
given URI. This has the same properties as the result of parse_uri.
When passed a relative path, it determines the absolute path, sets the
scheme to file, the netloc to localhost and returns a parse of the result. | [
"Return",
"a",
"urlparse",
".",
"ParseResult",
"object",
"with",
"the",
"results",
"of",
"parsing",
"the",
"given",
"URI",
".",
"This",
"has",
"the",
"same",
"properties",
"as",
"the",
"result",
"of",
"parse_uri",
"."
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.