text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def _get_parser(description):
"""Build an ArgumentParser with common arguments for both operations."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('key', help="Camellia key.")
parser.add_argument('input_file', nargs='*',
help="File(s) to read as input data. If none are "
"provided, assume STDIN.")
parser.add_argument('-o', '--output_file',
help="Output file. If not provided, assume STDOUT.")
parser.add_argument('-l', '--keylen', type=int, default=128,
help="Length of 'key' in bits, must be in one of %s "
"(default 128)." % camcrypt.ACCEPTABLE_KEY_LENGTHS)
parser.add_argument('-H', '--hexkey', action='store_true',
help="Treat 'key' as a hex string rather than binary.")
return parser | [
"def",
"_get_parser",
"(",
"description",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"description",
")",
"parser",
".",
"add_argument",
"(",
"'key'",
",",
"help",
"=",
"\"Camellia key.\"",
")",
"parser",
".",
"add_argument",
"(",
"'input_file'",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"\"File(s) to read as input data. If none are \"",
"\"provided, assume STDIN.\"",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--output_file'",
",",
"help",
"=",
"\"Output file. If not provided, assume STDOUT.\"",
")",
"parser",
".",
"add_argument",
"(",
"'-l'",
",",
"'--keylen'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"128",
",",
"help",
"=",
"\"Length of 'key' in bits, must be in one of %s \"",
"\"(default 128).\"",
"%",
"camcrypt",
".",
"ACCEPTABLE_KEY_LENGTHS",
")",
"parser",
".",
"add_argument",
"(",
"'-H'",
",",
"'--hexkey'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Treat 'key' as a hex string rather than binary.\"",
")",
"return",
"parser"
]
| 55.375 | 22.375 |
def touch(self, mode=0o666, exist_ok=True):
"""
Create this file with the given access mode, if it doesn't exist.
"""
if self._closed:
self._raise_closed()
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
self._accessor.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = self._raw_open(flags, mode)
os.close(fd) | [
"def",
"touch",
"(",
"self",
",",
"mode",
"=",
"0o666",
",",
"exist_ok",
"=",
"True",
")",
":",
"if",
"self",
".",
"_closed",
":",
"self",
".",
"_raise_closed",
"(",
")",
"if",
"exist_ok",
":",
"# First try to bump modification time",
"# Implementation note: GNU touch uses the UTIME_NOW option of",
"# the utimensat() / futimens() functions.",
"try",
":",
"self",
".",
"_accessor",
".",
"utime",
"(",
"self",
",",
"None",
")",
"except",
"OSError",
":",
"# Avoid exception chaining",
"pass",
"else",
":",
"return",
"flags",
"=",
"os",
".",
"O_CREAT",
"|",
"os",
".",
"O_WRONLY",
"if",
"not",
"exist_ok",
":",
"flags",
"|=",
"os",
".",
"O_EXCL",
"fd",
"=",
"self",
".",
"_raw_open",
"(",
"flags",
",",
"mode",
")",
"os",
".",
"close",
"(",
"fd",
")"
]
| 33.409091 | 13.136364 |
def confd_state_rest_listen_tcp_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
rest = ET.SubElement(confd_state, "rest")
listen = ET.SubElement(rest, "listen")
tcp = ET.SubElement(listen, "tcp")
ip = ET.SubElement(tcp, "ip")
ip.text = kwargs.pop('ip')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"confd_state_rest_listen_tcp_ip",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"confd_state",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"confd-state\"",
",",
"xmlns",
"=",
"\"http://tail-f.com/yang/confd-monitoring\"",
")",
"rest",
"=",
"ET",
".",
"SubElement",
"(",
"confd_state",
",",
"\"rest\"",
")",
"listen",
"=",
"ET",
".",
"SubElement",
"(",
"rest",
",",
"\"listen\"",
")",
"tcp",
"=",
"ET",
".",
"SubElement",
"(",
"listen",
",",
"\"tcp\"",
")",
"ip",
"=",
"ET",
".",
"SubElement",
"(",
"tcp",
",",
"\"ip\"",
")",
"ip",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'ip'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| 40.923077 | 13.307692 |
def _from_timeseries(ts1, ts2, stride, fftlength=None, overlap=None,
window=None, **kwargs):
"""Generate a time-frequency coherence
:class:`~gwpy.spectrogram.Spectrogram` from a pair of
:class:`~gwpy.timeseries.TimeSeries`.
For each `stride`, a PSD :class:`~gwpy.frequencyseries.FrequencySeries`
is generated, with all resulting spectra stacked in time and returned.
"""
# check sampling rates
if ts1.sample_rate.to('Hertz') != ts2.sample_rate.to('Hertz'):
sampling = min(ts1.sample_rate.value, ts2.sample_rate.value)
# resample higher rate series
if ts1.sample_rate.value == sampling:
ts2 = ts2.resample(sampling)
else:
ts1 = ts1.resample(sampling)
else:
sampling = ts1.sample_rate.value
# format FFT parameters
if fftlength is None:
fftlength = stride
if overlap is None:
overlap = 0
nstride = int(stride * sampling)
# get size of spectrogram
nsteps = int(ts1.size // nstride)
nfreqs = int(fftlength * sampling // 2 + 1)
# generate output spectrogram
out = Spectrogram(zeros((nsteps, nfreqs)), epoch=ts1.epoch, dt=stride,
f0=0, df=1/fftlength, copy=True, unit='coherence')
if not nsteps:
return out
# stride through TimeSeries, recording PSDs as columns of spectrogram
for step in range(nsteps):
# find step TimeSeries
idx = nstride * step
idx_end = idx + nstride
stepseries1 = ts1[idx:idx_end]
stepseries2 = ts2[idx:idx_end]
stepcoh = stepseries1.coherence(stepseries2, fftlength=fftlength,
overlap=overlap, window=window,
**kwargs)
out.value[step] = stepcoh.value
return out | [
"def",
"_from_timeseries",
"(",
"ts1",
",",
"ts2",
",",
"stride",
",",
"fftlength",
"=",
"None",
",",
"overlap",
"=",
"None",
",",
"window",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# check sampling rates",
"if",
"ts1",
".",
"sample_rate",
".",
"to",
"(",
"'Hertz'",
")",
"!=",
"ts2",
".",
"sample_rate",
".",
"to",
"(",
"'Hertz'",
")",
":",
"sampling",
"=",
"min",
"(",
"ts1",
".",
"sample_rate",
".",
"value",
",",
"ts2",
".",
"sample_rate",
".",
"value",
")",
"# resample higher rate series",
"if",
"ts1",
".",
"sample_rate",
".",
"value",
"==",
"sampling",
":",
"ts2",
"=",
"ts2",
".",
"resample",
"(",
"sampling",
")",
"else",
":",
"ts1",
"=",
"ts1",
".",
"resample",
"(",
"sampling",
")",
"else",
":",
"sampling",
"=",
"ts1",
".",
"sample_rate",
".",
"value",
"# format FFT parameters",
"if",
"fftlength",
"is",
"None",
":",
"fftlength",
"=",
"stride",
"if",
"overlap",
"is",
"None",
":",
"overlap",
"=",
"0",
"nstride",
"=",
"int",
"(",
"stride",
"*",
"sampling",
")",
"# get size of spectrogram",
"nsteps",
"=",
"int",
"(",
"ts1",
".",
"size",
"//",
"nstride",
")",
"nfreqs",
"=",
"int",
"(",
"fftlength",
"*",
"sampling",
"//",
"2",
"+",
"1",
")",
"# generate output spectrogram",
"out",
"=",
"Spectrogram",
"(",
"zeros",
"(",
"(",
"nsteps",
",",
"nfreqs",
")",
")",
",",
"epoch",
"=",
"ts1",
".",
"epoch",
",",
"dt",
"=",
"stride",
",",
"f0",
"=",
"0",
",",
"df",
"=",
"1",
"/",
"fftlength",
",",
"copy",
"=",
"True",
",",
"unit",
"=",
"'coherence'",
")",
"if",
"not",
"nsteps",
":",
"return",
"out",
"# stride through TimeSeries, recording PSDs as columns of spectrogram",
"for",
"step",
"in",
"range",
"(",
"nsteps",
")",
":",
"# find step TimeSeries",
"idx",
"=",
"nstride",
"*",
"step",
"idx_end",
"=",
"idx",
"+",
"nstride",
"stepseries1",
"=",
"ts1",
"[",
"idx",
":",
"idx_end",
"]",
"stepseries2",
"=",
"ts2",
"[",
"idx",
":",
"idx_end",
"]",
"stepcoh",
"=",
"stepseries1",
".",
"coherence",
"(",
"stepseries2",
",",
"fftlength",
"=",
"fftlength",
",",
"overlap",
"=",
"overlap",
",",
"window",
"=",
"window",
",",
"*",
"*",
"kwargs",
")",
"out",
".",
"value",
"[",
"step",
"]",
"=",
"stepcoh",
".",
"value",
"return",
"out"
]
| 34.423077 | 18.711538 |
def list_installed_genomes(genome_dir=None):
"""
List all available genomes.
Parameters
----------
genome_dir : str
Directory with installed genomes.
Returns
-------
list with genome names
"""
if not genome_dir:
genome_dir = config.get("genome_dir", None)
if not genome_dir:
raise norns.exceptions.ConfigError("Please provide or configure a genome_dir")
return [f for f in os.listdir(genome_dir) if
_is_genome_dir(genome_dir + "/" + f)] | [
"def",
"list_installed_genomes",
"(",
"genome_dir",
"=",
"None",
")",
":",
"if",
"not",
"genome_dir",
":",
"genome_dir",
"=",
"config",
".",
"get",
"(",
"\"genome_dir\"",
",",
"None",
")",
"if",
"not",
"genome_dir",
":",
"raise",
"norns",
".",
"exceptions",
".",
"ConfigError",
"(",
"\"Please provide or configure a genome_dir\"",
")",
"return",
"[",
"f",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"genome_dir",
")",
"if",
"_is_genome_dir",
"(",
"genome_dir",
"+",
"\"/\"",
"+",
"f",
")",
"]"
]
| 25.25 | 19.45 |
def get_libs_dir(self, arch):
'''The libs dir for a given arch.'''
ensure_dir(join(self.libs_dir, arch))
return join(self.libs_dir, arch) | [
"def",
"get_libs_dir",
"(",
"self",
",",
"arch",
")",
":",
"ensure_dir",
"(",
"join",
"(",
"self",
".",
"libs_dir",
",",
"arch",
")",
")",
"return",
"join",
"(",
"self",
".",
"libs_dir",
",",
"arch",
")"
]
| 39.5 | 5 |
def entry_links(self):
""" Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions)
"""
return {entry['link'] for entry in self.feed.entries if entry and entry.get('link')} | [
"def",
"entry_links",
"(",
"self",
")",
":",
"return",
"{",
"entry",
"[",
"'link'",
"]",
"for",
"entry",
"in",
"self",
".",
"feed",
".",
"entries",
"if",
"entry",
"and",
"entry",
".",
"get",
"(",
"'link'",
")",
"}"
]
| 55.6 | 20.6 |
def set_chat_title(
self,
chat_id: Union[int, str],
title: str
) -> bool:
"""Use this method to change the title of a chat.
Titles can't be changed for private chats.
You must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note:
In regular groups (non-supergroups), this method will only work if the "All Members Are Admins"
setting is off.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
title (``str``):
New chat title, 1-255 characters.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` if a chat_id belongs to user.
"""
peer = self.resolve_peer(chat_id)
if isinstance(peer, types.InputPeerChat):
self.send(
functions.messages.EditChatTitle(
chat_id=peer.chat_id,
title=title
)
)
elif isinstance(peer, types.InputPeerChannel):
self.send(
functions.channels.EditTitle(
channel=peer,
title=title
)
)
else:
raise ValueError("The chat_id \"{}\" belongs to a user".format(chat_id))
return True | [
"def",
"set_chat_title",
"(",
"self",
",",
"chat_id",
":",
"Union",
"[",
"int",
",",
"str",
"]",
",",
"title",
":",
"str",
")",
"->",
"bool",
":",
"peer",
"=",
"self",
".",
"resolve_peer",
"(",
"chat_id",
")",
"if",
"isinstance",
"(",
"peer",
",",
"types",
".",
"InputPeerChat",
")",
":",
"self",
".",
"send",
"(",
"functions",
".",
"messages",
".",
"EditChatTitle",
"(",
"chat_id",
"=",
"peer",
".",
"chat_id",
",",
"title",
"=",
"title",
")",
")",
"elif",
"isinstance",
"(",
"peer",
",",
"types",
".",
"InputPeerChannel",
")",
":",
"self",
".",
"send",
"(",
"functions",
".",
"channels",
".",
"EditTitle",
"(",
"channel",
"=",
"peer",
",",
"title",
"=",
"title",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"The chat_id \\\"{}\\\" belongs to a user\"",
".",
"format",
"(",
"chat_id",
")",
")",
"return",
"True"
]
| 31.255319 | 22.659574 |
def add_license(self, contents):
"""Adds the given ``contents`` as a new custom license to the J-Link.
Args:
self (JLink): the ``JLink`` instance
contents: the string contents of the new custom license
Returns:
``True`` if license was added, ``False`` if license already existed.
Raises:
JLinkException: if the write fails.
Note:
J-Link V9 and J-Link ULTRA/PRO V4 have 336 Bytes of memory for
licenses, while older versions of 80 bytes.
"""
buf_size = len(contents)
buf = (ctypes.c_char * (buf_size + 1))(*contents.encode())
res = self._dll.JLINK_EMU_AddLicense(buf)
if res == -1:
raise errors.JLinkException('Unspecified error.')
elif res == -2:
raise errors.JLinkException('Failed to read/write license area.')
elif res == -3:
raise errors.JLinkException('J-Link out of space.')
return (res == 0) | [
"def",
"add_license",
"(",
"self",
",",
"contents",
")",
":",
"buf_size",
"=",
"len",
"(",
"contents",
")",
"buf",
"=",
"(",
"ctypes",
".",
"c_char",
"*",
"(",
"buf_size",
"+",
"1",
")",
")",
"(",
"*",
"contents",
".",
"encode",
"(",
")",
")",
"res",
"=",
"self",
".",
"_dll",
".",
"JLINK_EMU_AddLicense",
"(",
"buf",
")",
"if",
"res",
"==",
"-",
"1",
":",
"raise",
"errors",
".",
"JLinkException",
"(",
"'Unspecified error.'",
")",
"elif",
"res",
"==",
"-",
"2",
":",
"raise",
"errors",
".",
"JLinkException",
"(",
"'Failed to read/write license area.'",
")",
"elif",
"res",
"==",
"-",
"3",
":",
"raise",
"errors",
".",
"JLinkException",
"(",
"'J-Link out of space.'",
")",
"return",
"(",
"res",
"==",
"0",
")"
]
| 32.533333 | 23.4 |
def standardize_polygons_str(data_str):
"""Given a POLYGON string, standardize the coordinates to a 1x1 grid.
Input : data_str (taken from above)
Output: tuple of polygon objects
"""
# find all of the polygons in the letter (for instance an A
# needs to be constructed from 2 polygons)
path_strs = re.findall("\(\(([^\)]+?)\)\)", data_str.strip())
# convert the data into a numpy array
polygons_data = []
for path_str in path_strs:
data = np.array([
tuple(map(float, x.split())) for x in path_str.strip().split(",")])
polygons_data.append(data)
# standardize the coordinates
min_coords = np.vstack(data.min(0) for data in polygons_data).min(0)
max_coords = np.vstack(data.max(0) for data in polygons_data).max(0)
for data in polygons_data:
data[:, ] -= min_coords
data[:, ] /= (max_coords - min_coords)
polygons = []
for data in polygons_data:
polygons.append(load_wkt(
"POLYGON((%s))" % ",".join(" ".join(map(str, x)) for x in data)))
return tuple(polygons) | [
"def",
"standardize_polygons_str",
"(",
"data_str",
")",
":",
"# find all of the polygons in the letter (for instance an A",
"# needs to be constructed from 2 polygons)",
"path_strs",
"=",
"re",
".",
"findall",
"(",
"\"\\(\\(([^\\)]+?)\\)\\)\"",
",",
"data_str",
".",
"strip",
"(",
")",
")",
"# convert the data into a numpy array",
"polygons_data",
"=",
"[",
"]",
"for",
"path_str",
"in",
"path_strs",
":",
"data",
"=",
"np",
".",
"array",
"(",
"[",
"tuple",
"(",
"map",
"(",
"float",
",",
"x",
".",
"split",
"(",
")",
")",
")",
"for",
"x",
"in",
"path_str",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\",\"",
")",
"]",
")",
"polygons_data",
".",
"append",
"(",
"data",
")",
"# standardize the coordinates",
"min_coords",
"=",
"np",
".",
"vstack",
"(",
"data",
".",
"min",
"(",
"0",
")",
"for",
"data",
"in",
"polygons_data",
")",
".",
"min",
"(",
"0",
")",
"max_coords",
"=",
"np",
".",
"vstack",
"(",
"data",
".",
"max",
"(",
"0",
")",
"for",
"data",
"in",
"polygons_data",
")",
".",
"max",
"(",
"0",
")",
"for",
"data",
"in",
"polygons_data",
":",
"data",
"[",
":",
",",
"]",
"-=",
"min_coords",
"data",
"[",
":",
",",
"]",
"/=",
"(",
"max_coords",
"-",
"min_coords",
")",
"polygons",
"=",
"[",
"]",
"for",
"data",
"in",
"polygons_data",
":",
"polygons",
".",
"append",
"(",
"load_wkt",
"(",
"\"POLYGON((%s))\"",
"%",
"\",\"",
".",
"join",
"(",
"\" \"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"x",
")",
")",
"for",
"x",
"in",
"data",
")",
")",
")",
"return",
"tuple",
"(",
"polygons",
")"
]
| 36.758621 | 17.103448 |
def parse_rest_doc(doc):
""" Extract the headers, delimiters, and text from reST-formatted docstrings.
Parameters
----------
doc: Union[str, None]
Returns
-------
Dict[str, Section] """
class Section(object):
def __init__(self, header=None, body=None):
self.header = header # str
self.body = body # str
doc_sections = OrderedDict([('', Section(header=''))])
if not doc:
return doc_sections
doc = cleandoc(doc)
lines = iter(doc.splitlines())
header = ''
body = []
section = Section(header=header)
line = ''
while True:
try:
prev_line = line
line = next(lines)
# section header encountered
if is_delimiter(line) and 0 < len(prev_line) <= len(line):
# prev-prev-line is overline
if len(body) >= 2 and len(body[-2]) == len(line) \
and body[-2][0] == line[0] and is_delimiter(body[-2]):
lim = -2
else:
lim = -1
section.body = "\n".join(body[:lim]).rstrip()
doc_sections.update([(header.strip(), section)])
section = Section(header="\n".join(body[lim:] + [line]))
header = prev_line
body = []
line = ''
else:
body.append(line)
except StopIteration:
section.body = "\n".join(body).rstrip()
doc_sections.update([(header.strip(), section)])
break
return doc_sections | [
"def",
"parse_rest_doc",
"(",
"doc",
")",
":",
"class",
"Section",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"header",
"=",
"None",
",",
"body",
"=",
"None",
")",
":",
"self",
".",
"header",
"=",
"header",
"# str",
"self",
".",
"body",
"=",
"body",
"# str",
"doc_sections",
"=",
"OrderedDict",
"(",
"[",
"(",
"''",
",",
"Section",
"(",
"header",
"=",
"''",
")",
")",
"]",
")",
"if",
"not",
"doc",
":",
"return",
"doc_sections",
"doc",
"=",
"cleandoc",
"(",
"doc",
")",
"lines",
"=",
"iter",
"(",
"doc",
".",
"splitlines",
"(",
")",
")",
"header",
"=",
"''",
"body",
"=",
"[",
"]",
"section",
"=",
"Section",
"(",
"header",
"=",
"header",
")",
"line",
"=",
"''",
"while",
"True",
":",
"try",
":",
"prev_line",
"=",
"line",
"line",
"=",
"next",
"(",
"lines",
")",
"# section header encountered",
"if",
"is_delimiter",
"(",
"line",
")",
"and",
"0",
"<",
"len",
"(",
"prev_line",
")",
"<=",
"len",
"(",
"line",
")",
":",
"# prev-prev-line is overline",
"if",
"len",
"(",
"body",
")",
">=",
"2",
"and",
"len",
"(",
"body",
"[",
"-",
"2",
"]",
")",
"==",
"len",
"(",
"line",
")",
"and",
"body",
"[",
"-",
"2",
"]",
"[",
"0",
"]",
"==",
"line",
"[",
"0",
"]",
"and",
"is_delimiter",
"(",
"body",
"[",
"-",
"2",
"]",
")",
":",
"lim",
"=",
"-",
"2",
"else",
":",
"lim",
"=",
"-",
"1",
"section",
".",
"body",
"=",
"\"\\n\"",
".",
"join",
"(",
"body",
"[",
":",
"lim",
"]",
")",
".",
"rstrip",
"(",
")",
"doc_sections",
".",
"update",
"(",
"[",
"(",
"header",
".",
"strip",
"(",
")",
",",
"section",
")",
"]",
")",
"section",
"=",
"Section",
"(",
"header",
"=",
"\"\\n\"",
".",
"join",
"(",
"body",
"[",
"lim",
":",
"]",
"+",
"[",
"line",
"]",
")",
")",
"header",
"=",
"prev_line",
"body",
"=",
"[",
"]",
"line",
"=",
"''",
"else",
":",
"body",
".",
"append",
"(",
"line",
")",
"except",
"StopIteration",
":",
"section",
".",
"body",
"=",
"\"\\n\"",
".",
"join",
"(",
"body",
")",
".",
"rstrip",
"(",
")",
"doc_sections",
".",
"update",
"(",
"[",
"(",
"header",
".",
"strip",
"(",
")",
",",
"section",
")",
"]",
")",
"break",
"return",
"doc_sections"
]
| 29.37037 | 19.907407 |
def getQCAnalyses(self, qctype=None, review_state=None):
"""return the QC analyses performed in the worksheet in which, at
least, one sample of this AR is present.
Depending on qctype value, returns the analyses of:
- 'b': all Blank Reference Samples used in related worksheet/s
- 'c': all Control Reference Samples used in related worksheet/s
- 'd': duplicates only for samples contained in this AR
If qctype==None, returns all type of qc analyses mentioned above
"""
qcanalyses = []
suids = []
ans = self.getAnalyses()
wf = getToolByName(self, 'portal_workflow')
for an in ans:
an = an.getObject()
if an.getServiceUID() not in suids:
suids.append(an.getServiceUID())
def valid_dup(wan):
if wan.portal_type == 'ReferenceAnalysis':
return False
an_state = wf.getInfoFor(wan, 'review_state')
return \
wan.portal_type == 'DuplicateAnalysis' \
and wan.getRequestID() == self.id \
and (review_state is None or an_state in review_state)
def valid_ref(wan):
if wan.portal_type != 'ReferenceAnalysis':
return False
an_state = wf.getInfoFor(wan, 'review_state')
an_reftype = wan.getReferenceType()
return wan.getServiceUID() in suids \
and wan not in qcanalyses \
and (qctype is None or an_reftype == qctype) \
and (review_state is None or an_state in review_state)
for an in ans:
an = an.getObject()
ws = an.getWorksheet()
if not ws:
continue
was = ws.getAnalyses()
for wa in was:
if valid_dup(wa):
qcanalyses.append(wa)
elif valid_ref(wa):
qcanalyses.append(wa)
return qcanalyses | [
"def",
"getQCAnalyses",
"(",
"self",
",",
"qctype",
"=",
"None",
",",
"review_state",
"=",
"None",
")",
":",
"qcanalyses",
"=",
"[",
"]",
"suids",
"=",
"[",
"]",
"ans",
"=",
"self",
".",
"getAnalyses",
"(",
")",
"wf",
"=",
"getToolByName",
"(",
"self",
",",
"'portal_workflow'",
")",
"for",
"an",
"in",
"ans",
":",
"an",
"=",
"an",
".",
"getObject",
"(",
")",
"if",
"an",
".",
"getServiceUID",
"(",
")",
"not",
"in",
"suids",
":",
"suids",
".",
"append",
"(",
"an",
".",
"getServiceUID",
"(",
")",
")",
"def",
"valid_dup",
"(",
"wan",
")",
":",
"if",
"wan",
".",
"portal_type",
"==",
"'ReferenceAnalysis'",
":",
"return",
"False",
"an_state",
"=",
"wf",
".",
"getInfoFor",
"(",
"wan",
",",
"'review_state'",
")",
"return",
"wan",
".",
"portal_type",
"==",
"'DuplicateAnalysis'",
"and",
"wan",
".",
"getRequestID",
"(",
")",
"==",
"self",
".",
"id",
"and",
"(",
"review_state",
"is",
"None",
"or",
"an_state",
"in",
"review_state",
")",
"def",
"valid_ref",
"(",
"wan",
")",
":",
"if",
"wan",
".",
"portal_type",
"!=",
"'ReferenceAnalysis'",
":",
"return",
"False",
"an_state",
"=",
"wf",
".",
"getInfoFor",
"(",
"wan",
",",
"'review_state'",
")",
"an_reftype",
"=",
"wan",
".",
"getReferenceType",
"(",
")",
"return",
"wan",
".",
"getServiceUID",
"(",
")",
"in",
"suids",
"and",
"wan",
"not",
"in",
"qcanalyses",
"and",
"(",
"qctype",
"is",
"None",
"or",
"an_reftype",
"==",
"qctype",
")",
"and",
"(",
"review_state",
"is",
"None",
"or",
"an_state",
"in",
"review_state",
")",
"for",
"an",
"in",
"ans",
":",
"an",
"=",
"an",
".",
"getObject",
"(",
")",
"ws",
"=",
"an",
".",
"getWorksheet",
"(",
")",
"if",
"not",
"ws",
":",
"continue",
"was",
"=",
"ws",
".",
"getAnalyses",
"(",
")",
"for",
"wa",
"in",
"was",
":",
"if",
"valid_dup",
"(",
"wa",
")",
":",
"qcanalyses",
".",
"append",
"(",
"wa",
")",
"elif",
"valid_ref",
"(",
"wa",
")",
":",
"qcanalyses",
".",
"append",
"(",
"wa",
")",
"return",
"qcanalyses"
]
| 37.283019 | 17.509434 |
def create_directory(self, filename):
"""Create a subdirectory in the temporary directory."""
path = os.path.join(self.path, filename)
makedirs(path)
return path | [
"def",
"create_directory",
"(",
"self",
",",
"filename",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"filename",
")",
"makedirs",
"(",
"path",
")",
"return",
"path"
]
| 37.8 | 10 |
def _add_ts2q(self, pore, queue):
"""
Helper method to add throats to the cluster queue
"""
net = self.project.network
elem_type = 'throat'
# Find throats connected to newly invaded pore
Ts = net.find_neighbor_throats(pores=pore)
# Remove already invaded throats from Ts
Ts = Ts[self['throat.invasion_sequence'][Ts] <= 0]
tcp = self['throat.entry_pressure']
if len(Ts) > 0:
self._interface_Ts[Ts] = True
for T in Ts:
data = []
# Pc
if self._bidirectional:
# Get index of pore being invaded next and apply correct
# entry pressure
pmap = net['throat.conns'][T] != pore
pind = list(pmap).index(True)
data.append(tcp[T][pind])
else:
data.append(tcp[T])
# Element Index
data.append(T)
# Element Type (Pore of Throat)
data.append(elem_type)
hq.heappush(queue, data) | [
"def",
"_add_ts2q",
"(",
"self",
",",
"pore",
",",
"queue",
")",
":",
"net",
"=",
"self",
".",
"project",
".",
"network",
"elem_type",
"=",
"'throat'",
"# Find throats connected to newly invaded pore",
"Ts",
"=",
"net",
".",
"find_neighbor_throats",
"(",
"pores",
"=",
"pore",
")",
"# Remove already invaded throats from Ts",
"Ts",
"=",
"Ts",
"[",
"self",
"[",
"'throat.invasion_sequence'",
"]",
"[",
"Ts",
"]",
"<=",
"0",
"]",
"tcp",
"=",
"self",
"[",
"'throat.entry_pressure'",
"]",
"if",
"len",
"(",
"Ts",
")",
">",
"0",
":",
"self",
".",
"_interface_Ts",
"[",
"Ts",
"]",
"=",
"True",
"for",
"T",
"in",
"Ts",
":",
"data",
"=",
"[",
"]",
"# Pc",
"if",
"self",
".",
"_bidirectional",
":",
"# Get index of pore being invaded next and apply correct",
"# entry pressure",
"pmap",
"=",
"net",
"[",
"'throat.conns'",
"]",
"[",
"T",
"]",
"!=",
"pore",
"pind",
"=",
"list",
"(",
"pmap",
")",
".",
"index",
"(",
"True",
")",
"data",
".",
"append",
"(",
"tcp",
"[",
"T",
"]",
"[",
"pind",
"]",
")",
"else",
":",
"data",
".",
"append",
"(",
"tcp",
"[",
"T",
"]",
")",
"# Element Index",
"data",
".",
"append",
"(",
"T",
")",
"# Element Type (Pore of Throat)",
"data",
".",
"append",
"(",
"elem_type",
")",
"hq",
".",
"heappush",
"(",
"queue",
",",
"data",
")"
]
| 38.206897 | 9.793103 |
def chroma(sr, n_fft, n_chroma=12, A440=440.0, ctroct=5.0,
octwidth=2, norm=2, base_c=True, dtype=np.float32):
"""Create a Filterbank matrix to convert STFT to chroma
Parameters
----------
sr : number > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
number of FFT bins
n_chroma : int > 0 [scalar]
number of chroma bins
A440 : float > 0 [scalar]
Reference frequency for A440
ctroct : float > 0 [scalar]
octwidth : float > 0 or None [scalar]
`ctroct` and `octwidth` specify a dominance window -
a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz)
and with a gaussian half-width of `octwidth`.
Set `octwidth` to `None` to use a flat weighting.
norm : float > 0 or np.inf
Normalization factor for each filter
base_c : bool
If True, the filter bank will start at 'C'.
If False, the filter bank will start at 'A'.
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)]
Chroma filter matrix
See Also
--------
util.normalize
feature.chroma_stft
Notes
-----
This function caches at level 10.
Examples
--------
Build a simple chroma filter bank
>>> chromafb = librosa.filters.chroma(22050, 4096)
array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17],
[ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25],
...,
[ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06],
[ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]])
Use quarter-tones instead of semitones
>>> librosa.filters.chroma(22050, 4096, n_chroma=24)
array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63],
[ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79],
...,
[ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38],
[ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]])
Equally weight all octaves
>>> librosa.filters.chroma(22050, 4096, octwidth=None)
array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16],
[ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24],
...,
[ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05],
[ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chromafb, x_axis='linear')
>>> plt.ylabel('Chroma filter')
>>> plt.title('Chroma filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
wts = np.zeros((n_chroma, n_fft))
# Get the FFT bins, not counting the DC component
frequencies = np.linspace(0, sr, n_fft, endpoint=False)[1:]
frqbins = n_chroma * hz_to_octs(frequencies, A440)
# make up a value for the 0 Hz bin = 1.5 octaves below bin 1
# (so chroma is 50% rotated from bin 1, and bin width is broad)
frqbins = np.concatenate(([frqbins[0] - 1.5 * n_chroma], frqbins))
binwidthbins = np.concatenate((np.maximum(frqbins[1:] - frqbins[:-1],
1.0), [1]))
D = np.subtract.outer(frqbins, np.arange(0, n_chroma, dtype='d')).T
n_chroma2 = np.round(float(n_chroma) / 2)
# Project into range -n_chroma/2 .. n_chroma/2
# add on fixed offset of 10*n_chroma to ensure all values passed to
# rem are positive
D = np.remainder(D + n_chroma2 + 10*n_chroma, n_chroma) - n_chroma2
# Gaussian bumps - 2*D to make them narrower
wts = np.exp(-0.5 * (2*D / np.tile(binwidthbins, (n_chroma, 1)))**2)
# normalize each column
wts = util.normalize(wts, norm=norm, axis=0)
# Maybe apply scaling for fft bins
if octwidth is not None:
wts *= np.tile(
np.exp(-0.5 * (((frqbins/n_chroma - ctroct)/octwidth)**2)),
(n_chroma, 1))
if base_c:
wts = np.roll(wts, -3, axis=0)
# remove aliasing columns, copy to ensure row-contiguity
return np.ascontiguousarray(wts[:, :int(1 + n_fft/2)], dtype=dtype) | [
"def",
"chroma",
"(",
"sr",
",",
"n_fft",
",",
"n_chroma",
"=",
"12",
",",
"A440",
"=",
"440.0",
",",
"ctroct",
"=",
"5.0",
",",
"octwidth",
"=",
"2",
",",
"norm",
"=",
"2",
",",
"base_c",
"=",
"True",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
":",
"wts",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_chroma",
",",
"n_fft",
")",
")",
"# Get the FFT bins, not counting the DC component",
"frequencies",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"sr",
",",
"n_fft",
",",
"endpoint",
"=",
"False",
")",
"[",
"1",
":",
"]",
"frqbins",
"=",
"n_chroma",
"*",
"hz_to_octs",
"(",
"frequencies",
",",
"A440",
")",
"# make up a value for the 0 Hz bin = 1.5 octaves below bin 1",
"# (so chroma is 50% rotated from bin 1, and bin width is broad)",
"frqbins",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"frqbins",
"[",
"0",
"]",
"-",
"1.5",
"*",
"n_chroma",
"]",
",",
"frqbins",
")",
")",
"binwidthbins",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"maximum",
"(",
"frqbins",
"[",
"1",
":",
"]",
"-",
"frqbins",
"[",
":",
"-",
"1",
"]",
",",
"1.0",
")",
",",
"[",
"1",
"]",
")",
")",
"D",
"=",
"np",
".",
"subtract",
".",
"outer",
"(",
"frqbins",
",",
"np",
".",
"arange",
"(",
"0",
",",
"n_chroma",
",",
"dtype",
"=",
"'d'",
")",
")",
".",
"T",
"n_chroma2",
"=",
"np",
".",
"round",
"(",
"float",
"(",
"n_chroma",
")",
"/",
"2",
")",
"# Project into range -n_chroma/2 .. n_chroma/2",
"# add on fixed offset of 10*n_chroma to ensure all values passed to",
"# rem are positive",
"D",
"=",
"np",
".",
"remainder",
"(",
"D",
"+",
"n_chroma2",
"+",
"10",
"*",
"n_chroma",
",",
"n_chroma",
")",
"-",
"n_chroma2",
"# Gaussian bumps - 2*D to make them narrower",
"wts",
"=",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"(",
"2",
"*",
"D",
"/",
"np",
".",
"tile",
"(",
"binwidthbins",
",",
"(",
"n_chroma",
",",
"1",
")",
")",
")",
"**",
"2",
")",
"# normalize each column",
"wts",
"=",
"util",
".",
"normalize",
"(",
"wts",
",",
"norm",
"=",
"norm",
",",
"axis",
"=",
"0",
")",
"# Maybe apply scaling for fft bins",
"if",
"octwidth",
"is",
"not",
"None",
":",
"wts",
"*=",
"np",
".",
"tile",
"(",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"(",
"(",
"(",
"frqbins",
"/",
"n_chroma",
"-",
"ctroct",
")",
"/",
"octwidth",
")",
"**",
"2",
")",
")",
",",
"(",
"n_chroma",
",",
"1",
")",
")",
"if",
"base_c",
":",
"wts",
"=",
"np",
".",
"roll",
"(",
"wts",
",",
"-",
"3",
",",
"axis",
"=",
"0",
")",
"# remove aliasing columns, copy to ensure row-contiguity",
"return",
"np",
".",
"ascontiguousarray",
"(",
"wts",
"[",
":",
",",
":",
"int",
"(",
"1",
"+",
"n_fft",
"/",
"2",
")",
"]",
",",
"dtype",
"=",
"dtype",
")"
]
| 31.709924 | 23.89313 |
def get_random_xy_color(self):
"""Returns the approximate CIE 1931 x,y coordinates represented by the
supplied hexColor parameter, or of a random color if the parameter
is not passed."""
r = self.color.random_rgb_value()
g = self.color.random_rgb_value()
b = self.color.random_rgb_value()
return self.rgb_to_xy(r, g, b) | [
"def",
"get_random_xy_color",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"color",
".",
"random_rgb_value",
"(",
")",
"g",
"=",
"self",
".",
"color",
".",
"random_rgb_value",
"(",
")",
"b",
"=",
"self",
".",
"color",
".",
"random_rgb_value",
"(",
")",
"return",
"self",
".",
"rgb_to_xy",
"(",
"r",
",",
"g",
",",
"b",
")"
]
| 46 | 6.125 |
def open_upload_stream(self, filename, chunk_size_bytes=None,
metadata=None):
"""Opens a Stream that the application can write the contents of the
file to.
The user must specify the filename, and can choose to add any
additional information in the metadata field of the file document or
modify the chunk size.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
grid_in, file_id = fs.open_upload_stream(
"test_file", chunk_size_bytes=4,
metadata={"contentType": "text/plain"})
grid_in.write("data I want to store!")
grid_in.close() # uploaded on close
Returns an instance of :class:`~gridfs.grid_file.GridIn`.
Raises :exc:`~gridfs.errors.NoFile` if no such version of
that file exists.
Raises :exc:`~ValueError` if `filename` is not a string.
:Parameters:
- `filename`: The name of the file to upload.
- `chunk_size_bytes` (options): The number of bytes per chunk of this
file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`.
- `metadata` (optional): User data for the 'metadata' field of the
files collection document. If not provided the metadata field will
be omitted from the files collection document.
"""
validate_string("filename", filename)
opts = {"filename": filename,
"chunk_size": (chunk_size_bytes if chunk_size_bytes
is not None else self._chunk_size_bytes)}
if metadata is not None:
opts["metadata"] = metadata
return GridIn(self._collection, **opts) | [
"def",
"open_upload_stream",
"(",
"self",
",",
"filename",
",",
"chunk_size_bytes",
"=",
"None",
",",
"metadata",
"=",
"None",
")",
":",
"validate_string",
"(",
"\"filename\"",
",",
"filename",
")",
"opts",
"=",
"{",
"\"filename\"",
":",
"filename",
",",
"\"chunk_size\"",
":",
"(",
"chunk_size_bytes",
"if",
"chunk_size_bytes",
"is",
"not",
"None",
"else",
"self",
".",
"_chunk_size_bytes",
")",
"}",
"if",
"metadata",
"is",
"not",
"None",
":",
"opts",
"[",
"\"metadata\"",
"]",
"=",
"metadata",
"return",
"GridIn",
"(",
"self",
".",
"_collection",
",",
"*",
"*",
"opts",
")"
]
| 41.95122 | 20.804878 |
def execute(self, request, session=None, method='post'):
'''
pyTOP.API -- TOPRequest instance
'''
params = {
'app_key' : self.API_KEY,
'v' : self.API_VERSION,
'format' : self.FORMAT,
#'sign_method' : self.SIGN_METHOD,
'partner_id' : self.SDK_VERSON
}
api_params = request.get_api_params()
params['timestamp'] = self._get_timestamp()
params['method'] = request.get_method_name()
if session is not None :
params['session'] = session
params.update(api_params)
params['sign'] = self._sign(params)
#print params
method = method.lower()
if method == 'get':
form_data = urllib.urlencode(params)
rsp = requests.get('%s?%s'%(self.GATEWAY, form_data))
elif method == 'post':
rsp = requests.post(self.GATEWAY, data=params)
rsp = json.loads(rsp.content)
if rsp.has_key('error_response'):
error_code = rsp['error_response']['code']
if 'sub_msg' in rsp['error_response']:
msg = '%s [%s]'%(rsp['error_response']['sub_msg'], rsp['error_response']['msg'])
else:
msg = rsp['error_response']['msg']
raise TOPException(error_code, msg)
else:
#pprint(rsp)
rsp = rsp[request.get_method_name().replace('.','_')[7:] + '_response']
if not rsp: return None
return rsp | [
"def",
"execute",
"(",
"self",
",",
"request",
",",
"session",
"=",
"None",
",",
"method",
"=",
"'post'",
")",
":",
"params",
"=",
"{",
"'app_key'",
":",
"self",
".",
"API_KEY",
",",
"'v'",
":",
"self",
".",
"API_VERSION",
",",
"'format'",
":",
"self",
".",
"FORMAT",
",",
"#'sign_method' : self.SIGN_METHOD,",
"'partner_id'",
":",
"self",
".",
"SDK_VERSON",
"}",
"api_params",
"=",
"request",
".",
"get_api_params",
"(",
")",
"params",
"[",
"'timestamp'",
"]",
"=",
"self",
".",
"_get_timestamp",
"(",
")",
"params",
"[",
"'method'",
"]",
"=",
"request",
".",
"get_method_name",
"(",
")",
"if",
"session",
"is",
"not",
"None",
":",
"params",
"[",
"'session'",
"]",
"=",
"session",
"params",
".",
"update",
"(",
"api_params",
")",
"params",
"[",
"'sign'",
"]",
"=",
"self",
".",
"_sign",
"(",
"params",
")",
"#print params",
"method",
"=",
"method",
".",
"lower",
"(",
")",
"if",
"method",
"==",
"'get'",
":",
"form_data",
"=",
"urllib",
".",
"urlencode",
"(",
"params",
")",
"rsp",
"=",
"requests",
".",
"get",
"(",
"'%s?%s'",
"%",
"(",
"self",
".",
"GATEWAY",
",",
"form_data",
")",
")",
"elif",
"method",
"==",
"'post'",
":",
"rsp",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"GATEWAY",
",",
"data",
"=",
"params",
")",
"rsp",
"=",
"json",
".",
"loads",
"(",
"rsp",
".",
"content",
")",
"if",
"rsp",
".",
"has_key",
"(",
"'error_response'",
")",
":",
"error_code",
"=",
"rsp",
"[",
"'error_response'",
"]",
"[",
"'code'",
"]",
"if",
"'sub_msg'",
"in",
"rsp",
"[",
"'error_response'",
"]",
":",
"msg",
"=",
"'%s [%s]'",
"%",
"(",
"rsp",
"[",
"'error_response'",
"]",
"[",
"'sub_msg'",
"]",
",",
"rsp",
"[",
"'error_response'",
"]",
"[",
"'msg'",
"]",
")",
"else",
":",
"msg",
"=",
"rsp",
"[",
"'error_response'",
"]",
"[",
"'msg'",
"]",
"raise",
"TOPException",
"(",
"error_code",
",",
"msg",
")",
"else",
":",
"#pprint(rsp)",
"rsp",
"=",
"rsp",
"[",
"request",
".",
"get_method_name",
"(",
")",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
"[",
"7",
":",
"]",
"+",
"'_response'",
"]",
"if",
"not",
"rsp",
":",
"return",
"None",
"return",
"rsp"
]
| 39.315789 | 13.789474 |
def _prune_penalty_box(self):
"""Restores clients that have reconnected.
This function should be called first for every public method.
"""
added = False
for client in self.penalty_box.get():
log.info("Client %r is back up.", client)
self.active_clients.append(client)
added = True
if added:
self._sort_clients() | [
"def",
"_prune_penalty_box",
"(",
"self",
")",
":",
"added",
"=",
"False",
"for",
"client",
"in",
"self",
".",
"penalty_box",
".",
"get",
"(",
")",
":",
"log",
".",
"info",
"(",
"\"Client %r is back up.\"",
",",
"client",
")",
"self",
".",
"active_clients",
".",
"append",
"(",
"client",
")",
"added",
"=",
"True",
"if",
"added",
":",
"self",
".",
"_sort_clients",
"(",
")"
]
| 33.083333 | 14.166667 |
def slice_before(predicate, iterable):
"""Returns groups of elements from iterable,
slicing just before predicate(elem) is True
"""
if isinstance(predicate, string_type):
predicate = re.compile(predicate)
if hasattr(predicate, 'match'):
predicate = predicate.match
record = []
for i, item in enumerate(iterable):
if i == 0:
record = [item]
continue
if predicate(item):
yield tuple(record)
record = [item]
else:
record.append(item)
if record:
yield tuple(record) | [
"def",
"slice_before",
"(",
"predicate",
",",
"iterable",
")",
":",
"if",
"isinstance",
"(",
"predicate",
",",
"string_type",
")",
":",
"predicate",
"=",
"re",
".",
"compile",
"(",
"predicate",
")",
"if",
"hasattr",
"(",
"predicate",
",",
"'match'",
")",
":",
"predicate",
"=",
"predicate",
".",
"match",
"record",
"=",
"[",
"]",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"iterable",
")",
":",
"if",
"i",
"==",
"0",
":",
"record",
"=",
"[",
"item",
"]",
"continue",
"if",
"predicate",
"(",
"item",
")",
":",
"yield",
"tuple",
"(",
"record",
")",
"record",
"=",
"[",
"item",
"]",
"else",
":",
"record",
".",
"append",
"(",
"item",
")",
"if",
"record",
":",
"yield",
"tuple",
"(",
"record",
")"
]
| 23.28 | 16.52 |
def read_sql(sql, con, filePath, index_col=None, coerce_float=True,
params=None, parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrameModel.
Provide a filePath argument in addition to the *args/**kwargs from
pandas.read_sql and get a DataFrameModel.
NOTE: The chunksize option is overridden to None always (for now).
Reference:
http://pandas.pydata.org/pandas-docs/version/0.18.1/generated/pandas.read_sql.html
pandas.read_sql(sql, con, index_col=None, coerce_float=True,
params=None, parse_dates=None, columns=None, chunksize=None)
:return: DataFrameModel
"""
# TODO: Decide if chunksize is worth keeping and how to handle?
df = pandas.read_sql(sql, con, index_col, coerce_float,
params, parse_dates, columns, chunksize=None)
return DataFrameModel(df, filePath=filePath) | [
"def",
"read_sql",
"(",
"sql",
",",
"con",
",",
"filePath",
",",
"index_col",
"=",
"None",
",",
"coerce_float",
"=",
"True",
",",
"params",
"=",
"None",
",",
"parse_dates",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"chunksize",
"=",
"None",
")",
":",
"# TODO: Decide if chunksize is worth keeping and how to handle?",
"df",
"=",
"pandas",
".",
"read_sql",
"(",
"sql",
",",
"con",
",",
"index_col",
",",
"coerce_float",
",",
"params",
",",
"parse_dates",
",",
"columns",
",",
"chunksize",
"=",
"None",
")",
"return",
"DataFrameModel",
"(",
"df",
",",
"filePath",
"=",
"filePath",
")"
]
| 39.521739 | 26.652174 |
def default_diff(latest_config, current_config):
"""Determine if two revisions have actually changed."""
# Pop off the fields we don't care about:
pop_no_diff_fields(latest_config, current_config)
diff = DeepDiff(
latest_config,
current_config,
ignore_order=True
)
return diff | [
"def",
"default_diff",
"(",
"latest_config",
",",
"current_config",
")",
":",
"# Pop off the fields we don't care about:",
"pop_no_diff_fields",
"(",
"latest_config",
",",
"current_config",
")",
"diff",
"=",
"DeepDiff",
"(",
"latest_config",
",",
"current_config",
",",
"ignore_order",
"=",
"True",
")",
"return",
"diff"
]
| 28.636364 | 17.818182 |
def publish(self, msg):
'''
Send message to all connected sockets
'''
if not self.streams:
return
pack = salt.transport.frame.frame_msg_ipc(msg, raw_body=True)
for stream in self.streams:
self.io_loop.spawn_callback(self._write, stream, pack) | [
"def",
"publish",
"(",
"self",
",",
"msg",
")",
":",
"if",
"not",
"self",
".",
"streams",
":",
"return",
"pack",
"=",
"salt",
".",
"transport",
".",
"frame",
".",
"frame_msg_ipc",
"(",
"msg",
",",
"raw_body",
"=",
"True",
")",
"for",
"stream",
"in",
"self",
".",
"streams",
":",
"self",
".",
"io_loop",
".",
"spawn_callback",
"(",
"self",
".",
"_write",
",",
"stream",
",",
"pack",
")"
]
| 27.818182 | 23.090909 |
def get_categories(self, languages=None):
"""GetCategories.
[Preview API]
:param str languages:
:rtype: [str]
"""
query_parameters = {}
if languages is not None:
query_parameters['languages'] = self._serialize.query('languages', languages, 'str')
response = self._send(http_method='GET',
location_id='e0a5a71e-3ac3-43a0-ae7d-0bb5c3046a2a',
version='5.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response)) | [
"def",
"get_categories",
"(",
"self",
",",
"languages",
"=",
"None",
")",
":",
"query_parameters",
"=",
"{",
"}",
"if",
"languages",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'languages'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'languages'",
",",
"languages",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'e0a5a71e-3ac3-43a0-ae7d-0bb5c3046a2a'",
",",
"version",
"=",
"'5.1-preview.1'",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[str]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
]
| 44.928571 | 17.642857 |
def bbox_transform(ex_rois, gt_rois, box_stds):
"""
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4]
"""
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14) / box_stds[0]
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14) / box_stds[1]
targets_dw = np.log(gt_widths / ex_widths) / box_stds[2]
targets_dh = np.log(gt_heights / ex_heights) / box_stds[3]
targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets | [
"def",
"bbox_transform",
"(",
"ex_rois",
",",
"gt_rois",
",",
"box_stds",
")",
":",
"assert",
"ex_rois",
".",
"shape",
"[",
"0",
"]",
"==",
"gt_rois",
".",
"shape",
"[",
"0",
"]",
",",
"'inconsistent rois number'",
"ex_widths",
"=",
"ex_rois",
"[",
":",
",",
"2",
"]",
"-",
"ex_rois",
"[",
":",
",",
"0",
"]",
"+",
"1.0",
"ex_heights",
"=",
"ex_rois",
"[",
":",
",",
"3",
"]",
"-",
"ex_rois",
"[",
":",
",",
"1",
"]",
"+",
"1.0",
"ex_ctr_x",
"=",
"ex_rois",
"[",
":",
",",
"0",
"]",
"+",
"0.5",
"*",
"(",
"ex_widths",
"-",
"1.0",
")",
"ex_ctr_y",
"=",
"ex_rois",
"[",
":",
",",
"1",
"]",
"+",
"0.5",
"*",
"(",
"ex_heights",
"-",
"1.0",
")",
"gt_widths",
"=",
"gt_rois",
"[",
":",
",",
"2",
"]",
"-",
"gt_rois",
"[",
":",
",",
"0",
"]",
"+",
"1.0",
"gt_heights",
"=",
"gt_rois",
"[",
":",
",",
"3",
"]",
"-",
"gt_rois",
"[",
":",
",",
"1",
"]",
"+",
"1.0",
"gt_ctr_x",
"=",
"gt_rois",
"[",
":",
",",
"0",
"]",
"+",
"0.5",
"*",
"(",
"gt_widths",
"-",
"1.0",
")",
"gt_ctr_y",
"=",
"gt_rois",
"[",
":",
",",
"1",
"]",
"+",
"0.5",
"*",
"(",
"gt_heights",
"-",
"1.0",
")",
"targets_dx",
"=",
"(",
"gt_ctr_x",
"-",
"ex_ctr_x",
")",
"/",
"(",
"ex_widths",
"+",
"1e-14",
")",
"/",
"box_stds",
"[",
"0",
"]",
"targets_dy",
"=",
"(",
"gt_ctr_y",
"-",
"ex_ctr_y",
")",
"/",
"(",
"ex_heights",
"+",
"1e-14",
")",
"/",
"box_stds",
"[",
"1",
"]",
"targets_dw",
"=",
"np",
".",
"log",
"(",
"gt_widths",
"/",
"ex_widths",
")",
"/",
"box_stds",
"[",
"2",
"]",
"targets_dh",
"=",
"np",
".",
"log",
"(",
"gt_heights",
"/",
"ex_heights",
")",
"/",
"box_stds",
"[",
"3",
"]",
"targets",
"=",
"np",
".",
"vstack",
"(",
"(",
"targets_dx",
",",
"targets_dy",
",",
"targets_dw",
",",
"targets_dh",
")",
")",
".",
"transpose",
"(",
")",
"return",
"targets"
]
| 41.230769 | 21.538462 |
def calcELStaeckel(R,vR,vT,z,vz,pot,vc=1.,ro=1.):
"""
NAME:
calcELStaeckel
PURPOSE:
calculate the energy and angular momentum
INPUT:
R - Galactocentric radius (/ro)
vR - radial part of the velocity (/vc)
vT - azimuthal part of the velocity (/vc)
vc - circular velocity
ro - reference radius
OUTPUT:
(E,L)
HISTORY:
2012-11-30 - Written - Bovy (IAS)
"""
return (_evaluatePotentials(pot,R,z)+vR**2./2.+vT**2./2.+vz**2./2.,R*vT) | [
"def",
"calcELStaeckel",
"(",
"R",
",",
"vR",
",",
"vT",
",",
"z",
",",
"vz",
",",
"pot",
",",
"vc",
"=",
"1.",
",",
"ro",
"=",
"1.",
")",
":",
"return",
"(",
"_evaluatePotentials",
"(",
"pot",
",",
"R",
",",
"z",
")",
"+",
"vR",
"**",
"2.",
"/",
"2.",
"+",
"vT",
"**",
"2.",
"/",
"2.",
"+",
"vz",
"**",
"2.",
"/",
"2.",
",",
"R",
"*",
"vT",
")"
]
| 29.388889 | 15.777778 |
def side_effect(self):
"""
Executes ``ansible-playbook`` against the side_effect playbook and
returns None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.side_effect)
pb.execute() | [
"def",
"side_effect",
"(",
"self",
")",
":",
"pb",
"=",
"self",
".",
"_get_ansible_playbook",
"(",
"self",
".",
"playbooks",
".",
"side_effect",
")",
"pb",
".",
"execute",
"(",
")"
]
| 27.444444 | 19.666667 |
def clean(self):
"""
Make sure that the scope is less or equal to the previous scope!
"""
data = self.cleaned_data
want_scope = data.get('scope') or 0
refresh_token = data.get('refresh_token')
access_token = getattr(refresh_token, 'access_token', None) if \
refresh_token else \
None
has_scope = access_token.scope if access_token else 0
# Only check if we've actually got a scope in the data
# (read: All fields have been cleaned)
if want_scope is not 0 and not scope.check(want_scope, has_scope):
raise OAuthValidationError({'error': 'invalid_scope'})
return data | [
"def",
"clean",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"cleaned_data",
"want_scope",
"=",
"data",
".",
"get",
"(",
"'scope'",
")",
"or",
"0",
"refresh_token",
"=",
"data",
".",
"get",
"(",
"'refresh_token'",
")",
"access_token",
"=",
"getattr",
"(",
"refresh_token",
",",
"'access_token'",
",",
"None",
")",
"if",
"refresh_token",
"else",
"None",
"has_scope",
"=",
"access_token",
".",
"scope",
"if",
"access_token",
"else",
"0",
"# Only check if we've actually got a scope in the data",
"# (read: All fields have been cleaned)",
"if",
"want_scope",
"is",
"not",
"0",
"and",
"not",
"scope",
".",
"check",
"(",
"want_scope",
",",
"has_scope",
")",
":",
"raise",
"OAuthValidationError",
"(",
"{",
"'error'",
":",
"'invalid_scope'",
"}",
")",
"return",
"data"
]
| 37.888889 | 19.444444 |
def _get_subparser_cell_args(self, subparser_prog):
""" Get cell args of a specified subparser by its prog."""
subparsers = self._get_subparsers()
for subparser in subparsers:
if subparser_prog == subparser.prog:
return subparser._cell_args
return None | [
"def",
"_get_subparser_cell_args",
"(",
"self",
",",
"subparser_prog",
")",
":",
"subparsers",
"=",
"self",
".",
"_get_subparsers",
"(",
")",
"for",
"subparser",
"in",
"subparsers",
":",
"if",
"subparser_prog",
"==",
"subparser",
".",
"prog",
":",
"return",
"subparser",
".",
"_cell_args",
"return",
"None"
]
| 30.666667 | 14.666667 |
def wr_tsv(fout_tsv, tsv_data, **kws):
"""Write a file of tab-separated table data"""
items_str = "items" if "items" not in kws else kws["items"]
if tsv_data:
ifstrm = sys.stdout if fout_tsv is None else open(fout_tsv, 'w')
num_items = prt_tsv(ifstrm, tsv_data, **kws)
if fout_tsv is not None:
sys.stdout.write(" {N:>5} {ITEMS} WROTE: {FOUT}\n".format(
N=num_items, ITEMS=items_str, FOUT=fout_tsv))
ifstrm.close()
else:
sys.stdout.write(" 0 {ITEMS}. NOT WRITING {FOUT}\n".format(
ITEMS=items_str, FOUT=fout_tsv)) | [
"def",
"wr_tsv",
"(",
"fout_tsv",
",",
"tsv_data",
",",
"*",
"*",
"kws",
")",
":",
"items_str",
"=",
"\"items\"",
"if",
"\"items\"",
"not",
"in",
"kws",
"else",
"kws",
"[",
"\"items\"",
"]",
"if",
"tsv_data",
":",
"ifstrm",
"=",
"sys",
".",
"stdout",
"if",
"fout_tsv",
"is",
"None",
"else",
"open",
"(",
"fout_tsv",
",",
"'w'",
")",
"num_items",
"=",
"prt_tsv",
"(",
"ifstrm",
",",
"tsv_data",
",",
"*",
"*",
"kws",
")",
"if",
"fout_tsv",
"is",
"not",
"None",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" {N:>5} {ITEMS} WROTE: {FOUT}\\n\"",
".",
"format",
"(",
"N",
"=",
"num_items",
",",
"ITEMS",
"=",
"items_str",
",",
"FOUT",
"=",
"fout_tsv",
")",
")",
"ifstrm",
".",
"close",
"(",
")",
"else",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" 0 {ITEMS}. NOT WRITING {FOUT}\\n\"",
".",
"format",
"(",
"ITEMS",
"=",
"items_str",
",",
"FOUT",
"=",
"fout_tsv",
")",
")"
]
| 46.615385 | 18 |
def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False):
"""Load Flickr25K dataset.
Returns a list of images by a given tag from Flick25k dataset,
it will download Flickr25k from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`__
at the first time you use it.
Parameters
------------
tag : str or None
What images to return.
- If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`__.
- If you want to get all images, set to ``None``.
path : str
The path that the data is downloaded to, defaults is ``data/flickr25k/``.
n_threads : int
The number of thread to read image.
printable : boolean
Whether to print infomation when reading images, default is ``False``.
Examples
-----------
Get images with tag of sky
>>> images = tl.files.load_flickr25k_dataset(tag='sky')
Get all images
>>> images = tl.files.load_flickr25k_dataset(tag=None, n_threads=100, printable=True)
"""
path = os.path.join(path, 'flickr25k')
filename = 'mirflickr25k.zip'
url = 'http://press.liacs.nl/mirflickr/mirflickr25k/'
# download dataset
if folder_exists(os.path.join(path, "mirflickr")) is False:
logging.info("[*] Flickr25k is nonexistent in {}".format(path))
maybe_download_and_extract(filename, path, url, extract=True)
del_file(os.path.join(path, filename))
# return images by the given tag.
# 1. image path list
folder_imgs = os.path.join(path, "mirflickr")
path_imgs = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
path_imgs.sort(key=natural_keys)
# 2. tag path list
folder_tags = os.path.join(path, "mirflickr", "meta", "tags")
path_tags = load_file_list(path=folder_tags, regx='\\.txt', printable=False)
path_tags.sort(key=natural_keys)
# 3. select images
if tag is None:
logging.info("[Flickr25k] reading all images")
else:
logging.info("[Flickr25k] reading images with tag: {}".format(tag))
images_list = []
for idx, _v in enumerate(path_tags):
tags = read_file(os.path.join(folder_tags, path_tags[idx])).split('\n')
# logging.info(idx+1, tags)
if tag is None or tag in tags:
images_list.append(path_imgs[idx])
images = visualize.read_images(images_list, folder_imgs, n_threads=n_threads, printable=printable)
return images | [
"def",
"load_flickr25k_dataset",
"(",
"tag",
"=",
"'sky'",
",",
"path",
"=",
"\"data\"",
",",
"n_threads",
"=",
"50",
",",
"printable",
"=",
"False",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'flickr25k'",
")",
"filename",
"=",
"'mirflickr25k.zip'",
"url",
"=",
"'http://press.liacs.nl/mirflickr/mirflickr25k/'",
"# download dataset",
"if",
"folder_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"mirflickr\"",
")",
")",
"is",
"False",
":",
"logging",
".",
"info",
"(",
"\"[*] Flickr25k is nonexistent in {}\"",
".",
"format",
"(",
"path",
")",
")",
"maybe_download_and_extract",
"(",
"filename",
",",
"path",
",",
"url",
",",
"extract",
"=",
"True",
")",
"del_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
")",
"# return images by the given tag.",
"# 1. image path list",
"folder_imgs",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"mirflickr\"",
")",
"path_imgs",
"=",
"load_file_list",
"(",
"path",
"=",
"folder_imgs",
",",
"regx",
"=",
"'\\\\.jpg'",
",",
"printable",
"=",
"False",
")",
"path_imgs",
".",
"sort",
"(",
"key",
"=",
"natural_keys",
")",
"# 2. tag path list",
"folder_tags",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"mirflickr\"",
",",
"\"meta\"",
",",
"\"tags\"",
")",
"path_tags",
"=",
"load_file_list",
"(",
"path",
"=",
"folder_tags",
",",
"regx",
"=",
"'\\\\.txt'",
",",
"printable",
"=",
"False",
")",
"path_tags",
".",
"sort",
"(",
"key",
"=",
"natural_keys",
")",
"# 3. select images",
"if",
"tag",
"is",
"None",
":",
"logging",
".",
"info",
"(",
"\"[Flickr25k] reading all images\"",
")",
"else",
":",
"logging",
".",
"info",
"(",
"\"[Flickr25k] reading images with tag: {}\"",
".",
"format",
"(",
"tag",
")",
")",
"images_list",
"=",
"[",
"]",
"for",
"idx",
",",
"_v",
"in",
"enumerate",
"(",
"path_tags",
")",
":",
"tags",
"=",
"read_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder_tags",
",",
"path_tags",
"[",
"idx",
"]",
")",
")",
".",
"split",
"(",
"'\\n'",
")",
"# logging.info(idx+1, tags)",
"if",
"tag",
"is",
"None",
"or",
"tag",
"in",
"tags",
":",
"images_list",
".",
"append",
"(",
"path_imgs",
"[",
"idx",
"]",
")",
"images",
"=",
"visualize",
".",
"read_images",
"(",
"images_list",
",",
"folder_imgs",
",",
"n_threads",
"=",
"n_threads",
",",
"printable",
"=",
"printable",
")",
"return",
"images"
]
| 36.397059 | 26 |
def pick(rest):
"Pick between a few options"
question = rest.strip()
choices = util.splitem(question)
if len(choices) == 1:
return "I can't pick if you give me only one choice!"
else:
pick = random.choice(choices)
certainty = random.sample(phrases.certainty_opts, 1)[0]
return "%s... %s %s" % (pick, certainty, pick) | [
"def",
"pick",
"(",
"rest",
")",
":",
"question",
"=",
"rest",
".",
"strip",
"(",
")",
"choices",
"=",
"util",
".",
"splitem",
"(",
"question",
")",
"if",
"len",
"(",
"choices",
")",
"==",
"1",
":",
"return",
"\"I can't pick if you give me only one choice!\"",
"else",
":",
"pick",
"=",
"random",
".",
"choice",
"(",
"choices",
")",
"certainty",
"=",
"random",
".",
"sample",
"(",
"phrases",
".",
"certainty_opts",
",",
"1",
")",
"[",
"0",
"]",
"return",
"\"%s... %s %s\"",
"%",
"(",
"pick",
",",
"certainty",
",",
"pick",
")"
]
| 32 | 16 |
def read_all(filename):
"""
Reads the serialized objects from disk. Caller must wrap objects in appropriate Python wrapper classes.
:param filename: the file with the serialized objects
:type filename: str
:return: the list of JB_OBjects
:rtype: list
"""
array = javabridge.static_call(
"Lweka/core/SerializationHelper;", "readAll",
"(Ljava/lang/String;)[Ljava/lang/Object;",
filename)
if array is None:
return None
else:
return javabridge.get_env().get_object_array_elements(array) | [
"def",
"read_all",
"(",
"filename",
")",
":",
"array",
"=",
"javabridge",
".",
"static_call",
"(",
"\"Lweka/core/SerializationHelper;\"",
",",
"\"readAll\"",
",",
"\"(Ljava/lang/String;)[Ljava/lang/Object;\"",
",",
"filename",
")",
"if",
"array",
"is",
"None",
":",
"return",
"None",
"else",
":",
"return",
"javabridge",
".",
"get_env",
"(",
")",
".",
"get_object_array_elements",
"(",
"array",
")"
]
| 32.176471 | 19.823529 |
def set_attribute(attribute, attribute_value, instance_name=None, instance_id=None, region=None, key=None, keyid=None,
profile=None, filters=None):
'''
Set an EC2 instance attribute.
Returns whether the operation succeeded or not.
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.set_attribute sourceDestCheck False instance_name=my_instance
Available attributes:
* instanceType
* kernel
* ramdisk
* userData
* disableApiTermination
* instanceInitiatedShutdownBehavior
* rootDeviceName
* blockDeviceMapping
* productCodes
* sourceDestCheck
* groupSet
* ebsOptimized
* sriovNetSupport
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attribute_list = ['instanceType', 'kernel', 'ramdisk', 'userData', 'disableApiTermination',
'instanceInitiatedShutdownBehavior', 'rootDeviceName', 'blockDeviceMapping', 'productCodes',
'sourceDestCheck', 'groupSet', 'ebsOptimized', 'sriovNetSupport']
if not any((instance_name, instance_id)):
raise SaltInvocationError('At least one of the following must be specified: instance_name or instance_id.')
if instance_name and instance_id:
raise SaltInvocationError('Both instance_name and instance_id can not be specified in the same command.')
if attribute not in attribute_list:
raise SaltInvocationError('Attribute must be one of: {0}.'.format(attribute_list))
try:
if instance_name:
instances = find_instances(name=instance_name, region=region, key=key, keyid=keyid, profile=profile,
filters=filters)
if len(instances) != 1:
raise CommandExecutionError('Found more than one EC2 instance matching the criteria.')
instance_id = instances[0]
attribute = conn.modify_instance_attribute(instance_id, attribute, attribute_value)
if not attribute:
return False
return attribute
except boto.exception.BotoServerError as exc:
log.error(exc)
return False | [
"def",
"set_attribute",
"(",
"attribute",
",",
"attribute_value",
",",
"instance_name",
"=",
"None",
",",
"instance_id",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"filters",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"attribute_list",
"=",
"[",
"'instanceType'",
",",
"'kernel'",
",",
"'ramdisk'",
",",
"'userData'",
",",
"'disableApiTermination'",
",",
"'instanceInitiatedShutdownBehavior'",
",",
"'rootDeviceName'",
",",
"'blockDeviceMapping'",
",",
"'productCodes'",
",",
"'sourceDestCheck'",
",",
"'groupSet'",
",",
"'ebsOptimized'",
",",
"'sriovNetSupport'",
"]",
"if",
"not",
"any",
"(",
"(",
"instance_name",
",",
"instance_id",
")",
")",
":",
"raise",
"SaltInvocationError",
"(",
"'At least one of the following must be specified: instance_name or instance_id.'",
")",
"if",
"instance_name",
"and",
"instance_id",
":",
"raise",
"SaltInvocationError",
"(",
"'Both instance_name and instance_id can not be specified in the same command.'",
")",
"if",
"attribute",
"not",
"in",
"attribute_list",
":",
"raise",
"SaltInvocationError",
"(",
"'Attribute must be one of: {0}.'",
".",
"format",
"(",
"attribute_list",
")",
")",
"try",
":",
"if",
"instance_name",
":",
"instances",
"=",
"find_instances",
"(",
"name",
"=",
"instance_name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
",",
"filters",
"=",
"filters",
")",
"if",
"len",
"(",
"instances",
")",
"!=",
"1",
":",
"raise",
"CommandExecutionError",
"(",
"'Found more than one EC2 instance matching the criteria.'",
")",
"instance_id",
"=",
"instances",
"[",
"0",
"]",
"attribute",
"=",
"conn",
".",
"modify_instance_attribute",
"(",
"instance_id",
",",
"attribute",
",",
"attribute_value",
")",
"if",
"not",
"attribute",
":",
"return",
"False",
"return",
"attribute",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"exc",
":",
"log",
".",
"error",
"(",
"exc",
")",
"return",
"False"
]
| 41.711538 | 27.942308 |
def pretty_print_config_to_json(self, configs):
"""JSON string description of a protorpc.remote.Service in a discovery doc.
Args:
configs: Either a single dict or a list of dicts containing the service
configurations to list.
Returns:
string, The directory list document as a JSON string.
"""
descriptor = self.get_directory_list_doc(configs)
return json.dumps(descriptor, sort_keys=True, indent=2,
separators=(',', ': ')) | [
"def",
"pretty_print_config_to_json",
"(",
"self",
",",
"configs",
")",
":",
"descriptor",
"=",
"self",
".",
"get_directory_list_doc",
"(",
"configs",
")",
"return",
"json",
".",
"dumps",
"(",
"descriptor",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")"
]
| 36.769231 | 19.076923 |
def create(parallel, dirs, config):
"""Create a cluster based on the provided parallel arguments.
Returns an IPython view on the cluster, enabling processing on jobs.
Adds a mincores specification if he have machines with a larger
number of cores to allow jobs to be batched together for shared
memory usage.
"""
profile_dir = utils.safe_makedir(os.path.join(dirs["work"], get_log_dir(config), "ipython"))
has_mincores = any(x.startswith("mincores=") for x in parallel["resources"])
cores = min(_get_common_cores(config["resources"]), parallel["system_cores"])
if cores > 1 and not has_mincores:
adj_cores = max(1, int(math.floor(cores * float(parallel.get("mem_pct", 1.0)))))
# if we have less scheduled cores than per machine, use the scheduled count
if cores > parallel["cores"]:
cores = parallel["cores"]
# if we have less total cores required for the entire process, use that
elif adj_cores > parallel["num_jobs"] * parallel["cores_per_job"]:
cores = parallel["num_jobs"] * parallel["cores_per_job"]
else:
cores = adj_cores
cores = per_machine_target_cores(cores, parallel["num_jobs"])
parallel["resources"].append("mincores=%s" % cores)
return ipython_cluster.cluster_view(parallel["scheduler"].lower(), parallel["queue"],
parallel["num_jobs"], parallel["cores_per_job"],
profile=profile_dir, start_wait=parallel["timeout"],
extra_params={"resources": parallel["resources"],
"mem": parallel["mem"],
"tag": parallel.get("tag"),
"run_local": parallel.get("run_local"),
"local_controller": parallel.get("local_controller")},
retries=parallel.get("retries")) | [
"def",
"create",
"(",
"parallel",
",",
"dirs",
",",
"config",
")",
":",
"profile_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirs",
"[",
"\"work\"",
"]",
",",
"get_log_dir",
"(",
"config",
")",
",",
"\"ipython\"",
")",
")",
"has_mincores",
"=",
"any",
"(",
"x",
".",
"startswith",
"(",
"\"mincores=\"",
")",
"for",
"x",
"in",
"parallel",
"[",
"\"resources\"",
"]",
")",
"cores",
"=",
"min",
"(",
"_get_common_cores",
"(",
"config",
"[",
"\"resources\"",
"]",
")",
",",
"parallel",
"[",
"\"system_cores\"",
"]",
")",
"if",
"cores",
">",
"1",
"and",
"not",
"has_mincores",
":",
"adj_cores",
"=",
"max",
"(",
"1",
",",
"int",
"(",
"math",
".",
"floor",
"(",
"cores",
"*",
"float",
"(",
"parallel",
".",
"get",
"(",
"\"mem_pct\"",
",",
"1.0",
")",
")",
")",
")",
")",
"# if we have less scheduled cores than per machine, use the scheduled count",
"if",
"cores",
">",
"parallel",
"[",
"\"cores\"",
"]",
":",
"cores",
"=",
"parallel",
"[",
"\"cores\"",
"]",
"# if we have less total cores required for the entire process, use that",
"elif",
"adj_cores",
">",
"parallel",
"[",
"\"num_jobs\"",
"]",
"*",
"parallel",
"[",
"\"cores_per_job\"",
"]",
":",
"cores",
"=",
"parallel",
"[",
"\"num_jobs\"",
"]",
"*",
"parallel",
"[",
"\"cores_per_job\"",
"]",
"else",
":",
"cores",
"=",
"adj_cores",
"cores",
"=",
"per_machine_target_cores",
"(",
"cores",
",",
"parallel",
"[",
"\"num_jobs\"",
"]",
")",
"parallel",
"[",
"\"resources\"",
"]",
".",
"append",
"(",
"\"mincores=%s\"",
"%",
"cores",
")",
"return",
"ipython_cluster",
".",
"cluster_view",
"(",
"parallel",
"[",
"\"scheduler\"",
"]",
".",
"lower",
"(",
")",
",",
"parallel",
"[",
"\"queue\"",
"]",
",",
"parallel",
"[",
"\"num_jobs\"",
"]",
",",
"parallel",
"[",
"\"cores_per_job\"",
"]",
",",
"profile",
"=",
"profile_dir",
",",
"start_wait",
"=",
"parallel",
"[",
"\"timeout\"",
"]",
",",
"extra_params",
"=",
"{",
"\"resources\"",
":",
"parallel",
"[",
"\"resources\"",
"]",
",",
"\"mem\"",
":",
"parallel",
"[",
"\"mem\"",
"]",
",",
"\"tag\"",
":",
"parallel",
".",
"get",
"(",
"\"tag\"",
")",
",",
"\"run_local\"",
":",
"parallel",
".",
"get",
"(",
"\"run_local\"",
")",
",",
"\"local_controller\"",
":",
"parallel",
".",
"get",
"(",
"\"local_controller\"",
")",
"}",
",",
"retries",
"=",
"parallel",
".",
"get",
"(",
"\"retries\"",
")",
")"
]
| 62.242424 | 31.818182 |
def summoner_names_to_id(summoners):
"""
Gets a list of summoners names and return a dictionary mapping the player name to his/her summoner id
:param summoners: a list of player names
:return: a dictionary name -> id
"""
ids = {}
for start, end in _slice(0, len(summoners), 40):
result = get_summoners_by_name(summoners[start:end])
for name, summoner in result.items():
ids[name] = summoner.id
return ids | [
"def",
"summoner_names_to_id",
"(",
"summoners",
")",
":",
"ids",
"=",
"{",
"}",
"for",
"start",
",",
"end",
"in",
"_slice",
"(",
"0",
",",
"len",
"(",
"summoners",
")",
",",
"40",
")",
":",
"result",
"=",
"get_summoners_by_name",
"(",
"summoners",
"[",
"start",
":",
"end",
"]",
")",
"for",
"name",
",",
"summoner",
"in",
"result",
".",
"items",
"(",
")",
":",
"ids",
"[",
"name",
"]",
"=",
"summoner",
".",
"id",
"return",
"ids"
]
| 37.75 | 14.416667 |
def height_water_critical(FlowRate, Width):
"""Return the critical local water depth."""
#Checking input validity
ut.check_range([FlowRate, ">0", "Flow rate"], [Width, ">0", "Width"])
return (FlowRate / (Width * np.sqrt(gravity.magnitude))) ** (2/3) | [
"def",
"height_water_critical",
"(",
"FlowRate",
",",
"Width",
")",
":",
"#Checking input validity",
"ut",
".",
"check_range",
"(",
"[",
"FlowRate",
",",
"\">0\"",
",",
"\"Flow rate\"",
"]",
",",
"[",
"Width",
",",
"\">0\"",
",",
"\"Width\"",
"]",
")",
"return",
"(",
"FlowRate",
"/",
"(",
"Width",
"*",
"np",
".",
"sqrt",
"(",
"gravity",
".",
"magnitude",
")",
")",
")",
"**",
"(",
"2",
"/",
"3",
")"
]
| 52.2 | 15.4 |
def handle_aliases_in_init_files(name, import_alias_mapping):
"""Returns either None or the handled alias.
Used in add_module.
"""
for key, val in import_alias_mapping.items():
# e.g. Foo == Foo
# e.g. Foo.Bar startswith Foo.
if name == val or \
name.startswith(val + '.'):
# Replace val with key in name
# e.g. StarbucksVisitor.Tea -> Eataly.Tea because
# "from .nested_folder import StarbucksVisitor as Eataly"
return name.replace(val, key)
return None | [
"def",
"handle_aliases_in_init_files",
"(",
"name",
",",
"import_alias_mapping",
")",
":",
"for",
"key",
",",
"val",
"in",
"import_alias_mapping",
".",
"items",
"(",
")",
":",
"# e.g. Foo == Foo",
"# e.g. Foo.Bar startswith Foo.",
"if",
"name",
"==",
"val",
"or",
"name",
".",
"startswith",
"(",
"val",
"+",
"'.'",
")",
":",
"# Replace val with key in name",
"# e.g. StarbucksVisitor.Tea -> Eataly.Tea because",
"# \"from .nested_folder import StarbucksVisitor as Eataly\"",
"return",
"name",
".",
"replace",
"(",
"val",
",",
"key",
")",
"return",
"None"
]
| 36.733333 | 13.333333 |
def tofile(self, path='filters'):
r"""Save filter values to ascii-files.
Store the filter base and the filter coefficients in separate files
in the directory `path`; `path` can be a relative or absolute path.
Examples
--------
>>> import empymod
>>> # Load a filter
>>> filt = empymod.filters.wer_201_2018()
>>> # Save it to pure ascii-files
>>> filt.tofile()
>>> # This will save the following three files:
>>> # ./filters/wer_201_2018_base.txt
>>> # ./filters/wer_201_2018_j0.txt
>>> # ./filters/wer_201_2018_j1.txt
"""
# Get name of filter
name = self.savename
# Get absolute path, create if it doesn't exist
path = os.path.abspath(path)
os.makedirs(path, exist_ok=True)
# Save filter base
basefile = os.path.join(path, name + '_base.txt')
with open(basefile, 'w') as f:
self.base.tofile(f, sep="\n")
# Save filter coefficients
for val in ['j0', 'j1', 'sin', 'cos']:
if hasattr(self, val):
attrfile = os.path.join(path, name + '_' + val + '.txt')
with open(attrfile, 'w') as f:
getattr(self, val).tofile(f, sep="\n") | [
"def",
"tofile",
"(",
"self",
",",
"path",
"=",
"'filters'",
")",
":",
"# Get name of filter",
"name",
"=",
"self",
".",
"savename",
"# Get absolute path, create if it doesn't exist",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"os",
".",
"makedirs",
"(",
"path",
",",
"exist_ok",
"=",
"True",
")",
"# Save filter base",
"basefile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"name",
"+",
"'_base.txt'",
")",
"with",
"open",
"(",
"basefile",
",",
"'w'",
")",
"as",
"f",
":",
"self",
".",
"base",
".",
"tofile",
"(",
"f",
",",
"sep",
"=",
"\"\\n\"",
")",
"# Save filter coefficients",
"for",
"val",
"in",
"[",
"'j0'",
",",
"'j1'",
",",
"'sin'",
",",
"'cos'",
"]",
":",
"if",
"hasattr",
"(",
"self",
",",
"val",
")",
":",
"attrfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"name",
"+",
"'_'",
"+",
"val",
"+",
"'.txt'",
")",
"with",
"open",
"(",
"attrfile",
",",
"'w'",
")",
"as",
"f",
":",
"getattr",
"(",
"self",
",",
"val",
")",
".",
"tofile",
"(",
"f",
",",
"sep",
"=",
"\"\\n\"",
")"
]
| 33.526316 | 16.921053 |
def reciprocal_space(space, axes=None, halfcomplex=False, shift=True,
**kwargs):
"""Return the range of the Fourier transform on ``space``.
Parameters
----------
space : `DiscreteLp`
Real space whose reciprocal is calculated. It must be
uniformly discretized.
axes : sequence of ints, optional
Dimensions along which the Fourier transform is taken.
Default: all axes
halfcomplex : bool, optional
If ``True``, take only the negative frequency part along the last
axis for. For ``False``, use the full frequency space.
This option can only be used if ``space`` is a space of
real-valued functions.
shift : bool or sequence of bools, optional
If ``True``, the reciprocal grid is shifted by half a stride in
the negative direction. With a boolean sequence, this option
is applied separately to each axis.
If a sequence is provided, it must have the same length as
``axes`` if supplied. Note that this must be set to ``True``
in the halved axis in half-complex transforms.
Default: ``True``
impl : string, optional
Implementation back-end for the created space.
Default: ``'numpy'``
exponent : float, optional
Create a space with this exponent. By default, the conjugate
exponent ``q = p / (p - 1)`` of the exponent of ``space`` is
used, where ``q = inf`` for ``p = 1`` and vice versa.
dtype : optional
Complex data type of the created space. By default, the
complex counterpart of ``space.dtype`` is used.
Returns
-------
rspace : `DiscreteLp`
Reciprocal of the input ``space``. If ``halfcomplex=True``, the
upper end of the domain (where the half space ends) is chosen to
coincide with the grid node.
"""
if not isinstance(space, DiscreteLp):
raise TypeError('`space` {!r} is not a `DiscreteLp` instance'
''.format(space))
if axes is None:
axes = tuple(range(space.ndim))
axes = normalized_axes_tuple(axes, space.ndim)
if not all(space.is_uniform_byaxis[axis] for axis in axes):
raise ValueError('`space` is not uniformly discretized in the '
'`axes` of the transform')
if halfcomplex and space.field != RealNumbers():
raise ValueError('`halfcomplex` option can only be used with real '
'spaces')
exponent = kwargs.pop('exponent', None)
if exponent is None:
exponent = conj_exponent(space.exponent)
dtype = kwargs.pop('dtype', None)
if dtype is None:
dtype = complex_dtype(space.dtype)
else:
if not is_complex_floating_dtype(dtype):
raise ValueError('{} is not a complex data type'
''.format(dtype_repr(dtype)))
impl = kwargs.pop('impl', 'numpy')
# Calculate range
recip_grid = reciprocal_grid(space.grid, shift=shift,
halfcomplex=halfcomplex, axes=axes)
# Need to do this for axes of length 1 that are not transformed
non_axes = [i for i in range(space.ndim) if i not in axes]
min_pt = {i: space.min_pt[i] for i in non_axes}
max_pt = {i: space.max_pt[i] for i in non_axes}
# Make a partition with nodes on the boundary in the last transform axis
# if `halfcomplex == True`, otherwise a standard partition.
if halfcomplex:
max_pt[axes[-1]] = recip_grid.max_pt[axes[-1]]
part = uniform_partition_fromgrid(recip_grid, min_pt, max_pt)
# Use convention of adding a hat to represent fourier transform of variable
axis_labels = list(space.axis_labels)
for i in axes:
# Avoid double math
label = axis_labels[i].replace('$', '')
axis_labels[i] = '$\\^{{{}}}$'.format(label)
recip_spc = uniform_discr_frompartition(part, exponent=exponent,
dtype=dtype, impl=impl,
axis_labels=axis_labels)
return recip_spc | [
"def",
"reciprocal_space",
"(",
"space",
",",
"axes",
"=",
"None",
",",
"halfcomplex",
"=",
"False",
",",
"shift",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"space",
",",
"DiscreteLp",
")",
":",
"raise",
"TypeError",
"(",
"'`space` {!r} is not a `DiscreteLp` instance'",
"''",
".",
"format",
"(",
"space",
")",
")",
"if",
"axes",
"is",
"None",
":",
"axes",
"=",
"tuple",
"(",
"range",
"(",
"space",
".",
"ndim",
")",
")",
"axes",
"=",
"normalized_axes_tuple",
"(",
"axes",
",",
"space",
".",
"ndim",
")",
"if",
"not",
"all",
"(",
"space",
".",
"is_uniform_byaxis",
"[",
"axis",
"]",
"for",
"axis",
"in",
"axes",
")",
":",
"raise",
"ValueError",
"(",
"'`space` is not uniformly discretized in the '",
"'`axes` of the transform'",
")",
"if",
"halfcomplex",
"and",
"space",
".",
"field",
"!=",
"RealNumbers",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'`halfcomplex` option can only be used with real '",
"'spaces'",
")",
"exponent",
"=",
"kwargs",
".",
"pop",
"(",
"'exponent'",
",",
"None",
")",
"if",
"exponent",
"is",
"None",
":",
"exponent",
"=",
"conj_exponent",
"(",
"space",
".",
"exponent",
")",
"dtype",
"=",
"kwargs",
".",
"pop",
"(",
"'dtype'",
",",
"None",
")",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"complex_dtype",
"(",
"space",
".",
"dtype",
")",
"else",
":",
"if",
"not",
"is_complex_floating_dtype",
"(",
"dtype",
")",
":",
"raise",
"ValueError",
"(",
"'{} is not a complex data type'",
"''",
".",
"format",
"(",
"dtype_repr",
"(",
"dtype",
")",
")",
")",
"impl",
"=",
"kwargs",
".",
"pop",
"(",
"'impl'",
",",
"'numpy'",
")",
"# Calculate range",
"recip_grid",
"=",
"reciprocal_grid",
"(",
"space",
".",
"grid",
",",
"shift",
"=",
"shift",
",",
"halfcomplex",
"=",
"halfcomplex",
",",
"axes",
"=",
"axes",
")",
"# Need to do this for axes of length 1 that are not transformed",
"non_axes",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"space",
".",
"ndim",
")",
"if",
"i",
"not",
"in",
"axes",
"]",
"min_pt",
"=",
"{",
"i",
":",
"space",
".",
"min_pt",
"[",
"i",
"]",
"for",
"i",
"in",
"non_axes",
"}",
"max_pt",
"=",
"{",
"i",
":",
"space",
".",
"max_pt",
"[",
"i",
"]",
"for",
"i",
"in",
"non_axes",
"}",
"# Make a partition with nodes on the boundary in the last transform axis",
"# if `halfcomplex == True`, otherwise a standard partition.",
"if",
"halfcomplex",
":",
"max_pt",
"[",
"axes",
"[",
"-",
"1",
"]",
"]",
"=",
"recip_grid",
".",
"max_pt",
"[",
"axes",
"[",
"-",
"1",
"]",
"]",
"part",
"=",
"uniform_partition_fromgrid",
"(",
"recip_grid",
",",
"min_pt",
",",
"max_pt",
")",
"# Use convention of adding a hat to represent fourier transform of variable",
"axis_labels",
"=",
"list",
"(",
"space",
".",
"axis_labels",
")",
"for",
"i",
"in",
"axes",
":",
"# Avoid double math",
"label",
"=",
"axis_labels",
"[",
"i",
"]",
".",
"replace",
"(",
"'$'",
",",
"''",
")",
"axis_labels",
"[",
"i",
"]",
"=",
"'$\\\\^{{{}}}$'",
".",
"format",
"(",
"label",
")",
"recip_spc",
"=",
"uniform_discr_frompartition",
"(",
"part",
",",
"exponent",
"=",
"exponent",
",",
"dtype",
"=",
"dtype",
",",
"impl",
"=",
"impl",
",",
"axis_labels",
"=",
"axis_labels",
")",
"return",
"recip_spc"
]
| 40.13 | 20.7 |
def remove_commented_topic(self, topic_id):
"""
删除回复的话题(删除所有自己发布的评论)
:param topic_id: 话题ID
:return: None
"""
return [self.remove_comment(topic_id, item['id']) for item in self.list_user_comments(topic_id)] | [
"def",
"remove_commented_topic",
"(",
"self",
",",
"topic_id",
")",
":",
"return",
"[",
"self",
".",
"remove_comment",
"(",
"topic_id",
",",
"item",
"[",
"'id'",
"]",
")",
"for",
"item",
"in",
"self",
".",
"list_user_comments",
"(",
"topic_id",
")",
"]"
]
| 31.875 | 17.625 |
def refresh(self) -> None:
"""Prepare the actual |anntools.SeasonalANN| object for calculations.
Dispite all automated refreshings explained in the general
documentation on class |anntools.SeasonalANN|, it is still possible
to destroy the inner consistency of a |anntools.SeasonalANN| instance,
as it stores its |anntools.ANN| objects by reference. This is shown
by the following example:
>>> from hydpy import SeasonalANN, ann
>>> seasonalann = SeasonalANN(None)
>>> seasonalann.simulationstep = '1d'
>>> jan = ann(nmb_inputs=1, nmb_neurons=(1,), nmb_outputs=1,
... weights_input=0.0, weights_output=0.0,
... intercepts_hidden=0.0, intercepts_output=1.0)
>>> seasonalann(_1_1_12=jan)
>>> jan.nmb_inputs, jan.nmb_outputs = 2, 3
>>> jan.nmb_inputs, jan.nmb_outputs
(2, 3)
>>> seasonalann.nmb_inputs, seasonalann.nmb_outputs
(1, 1)
Due to the C level implementation of the mathematical core of
both |anntools.ANN| and |anntools.SeasonalANN| in module |annutils|,
such an inconsistency might result in a program crash without any
informative error message. Whenever you are afraid some
inconsistency might have crept in, and you want to repair it,
call method |anntools.SeasonalANN.refresh| explicitly:
>>> seasonalann.refresh()
>>> jan.nmb_inputs, jan.nmb_outputs
(2, 3)
>>> seasonalann.nmb_inputs, seasonalann.nmb_outputs
(2, 3)
"""
# pylint: disable=unsupported-assignment-operation
if self._do_refresh:
if self.anns:
self.__sann = annutils.SeasonalANN(self.anns)
setattr(self.fastaccess, self.name, self._sann)
self._set_shape((None, self._sann.nmb_anns))
if self._sann.nmb_anns > 1:
self._interp()
else:
self._sann.ratios[:, 0] = 1.
self.verify()
else:
self.__sann = None | [
"def",
"refresh",
"(",
"self",
")",
"->",
"None",
":",
"# pylint: disable=unsupported-assignment-operation",
"if",
"self",
".",
"_do_refresh",
":",
"if",
"self",
".",
"anns",
":",
"self",
".",
"__sann",
"=",
"annutils",
".",
"SeasonalANN",
"(",
"self",
".",
"anns",
")",
"setattr",
"(",
"self",
".",
"fastaccess",
",",
"self",
".",
"name",
",",
"self",
".",
"_sann",
")",
"self",
".",
"_set_shape",
"(",
"(",
"None",
",",
"self",
".",
"_sann",
".",
"nmb_anns",
")",
")",
"if",
"self",
".",
"_sann",
".",
"nmb_anns",
">",
"1",
":",
"self",
".",
"_interp",
"(",
")",
"else",
":",
"self",
".",
"_sann",
".",
"ratios",
"[",
":",
",",
"0",
"]",
"=",
"1.",
"self",
".",
"verify",
"(",
")",
"else",
":",
"self",
".",
"__sann",
"=",
"None"
]
| 43.416667 | 19.416667 |
def meta_model_fit(X_train, y_train, svm_hardness, fit_intercept, number_of_threads, regressor_type="LinearSVR"):
"""
Trains meta-labeler for predicting number of labels for each user.
Based on: Tang, L., Rajan, S., & Narayanan, V. K. (2009, April).
Large scale multi-label classification via metalabeler.
In Proceedings of the 18th international conference on World wide web (pp. 211-220). ACM.
"""
if regressor_type == "LinearSVR":
if X_train.shape[0] > X_train.shape[1]:
dual = False
else:
dual = True
model = LinearSVR(C=svm_hardness, random_state=0, dual=dual,
fit_intercept=fit_intercept)
y_train_meta = y_train.sum(axis=1)
model.fit(X_train, y_train_meta)
else:
print("Invalid regressor type.")
raise RuntimeError
return model | [
"def",
"meta_model_fit",
"(",
"X_train",
",",
"y_train",
",",
"svm_hardness",
",",
"fit_intercept",
",",
"number_of_threads",
",",
"regressor_type",
"=",
"\"LinearSVR\"",
")",
":",
"if",
"regressor_type",
"==",
"\"LinearSVR\"",
":",
"if",
"X_train",
".",
"shape",
"[",
"0",
"]",
">",
"X_train",
".",
"shape",
"[",
"1",
"]",
":",
"dual",
"=",
"False",
"else",
":",
"dual",
"=",
"True",
"model",
"=",
"LinearSVR",
"(",
"C",
"=",
"svm_hardness",
",",
"random_state",
"=",
"0",
",",
"dual",
"=",
"dual",
",",
"fit_intercept",
"=",
"fit_intercept",
")",
"y_train_meta",
"=",
"y_train",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"model",
".",
"fit",
"(",
"X_train",
",",
"y_train_meta",
")",
"else",
":",
"print",
"(",
"\"Invalid regressor type.\"",
")",
"raise",
"RuntimeError",
"return",
"model"
]
| 38.086957 | 22.869565 |
def put(self, vpn_id: int) -> Vpn:
"""
Updates the Vpn Resource with the
name.
"""
vpn = self._get_or_abort(vpn_id)
self.update(vpn)
session.commit()
return vpn | [
"def",
"put",
"(",
"self",
",",
"vpn_id",
":",
"int",
")",
"->",
"Vpn",
":",
"vpn",
"=",
"self",
".",
"_get_or_abort",
"(",
"vpn_id",
")",
"self",
".",
"update",
"(",
"vpn",
")",
"session",
".",
"commit",
"(",
")",
"return",
"vpn"
]
| 24 | 9.777778 |
def start(self, execution_history, backward_execution=False, generate_run_id=True):
""" Starts the execution of the state in a new thread.
:return:
"""
self.execution_history = execution_history
if generate_run_id:
self._run_id = run_id_generator()
self.backward_execution = copy.copy(backward_execution)
self.thread = threading.Thread(target=self.run)
self.thread.start() | [
"def",
"start",
"(",
"self",
",",
"execution_history",
",",
"backward_execution",
"=",
"False",
",",
"generate_run_id",
"=",
"True",
")",
":",
"self",
".",
"execution_history",
"=",
"execution_history",
"if",
"generate_run_id",
":",
"self",
".",
"_run_id",
"=",
"run_id_generator",
"(",
")",
"self",
".",
"backward_execution",
"=",
"copy",
".",
"copy",
"(",
"backward_execution",
")",
"self",
".",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"run",
")",
"self",
".",
"thread",
".",
"start",
"(",
")"
]
| 39.909091 | 16.909091 |
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise TiffSequence.ParseError('invalid pattern')
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(os.path.split(self.files[0])[-1])
if not matches:
raise TiffSequence.ParseError('pattern does not match file names')
matches = matches[-1]
if len(matches) % 2:
raise TiffSequence.ParseError(
'pattern does not match axis name and index')
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise TiffSequence.ParseError('pattern does not match file names')
indices = []
for fname in self.files:
fname = os.path.split(fname)[-1]
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError('axes do not match within image sequence')
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
startindex = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, startindex))
if product(shape) != len(self.files):
log.warning(
'TiffSequence: files are missing. Missing data are zeroed')
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._startindex = startindex | [
"def",
"_parse",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"pattern",
":",
"raise",
"TiffSequence",
".",
"ParseError",
"(",
"'invalid pattern'",
")",
"pattern",
"=",
"re",
".",
"compile",
"(",
"self",
".",
"pattern",
",",
"re",
".",
"IGNORECASE",
"|",
"re",
".",
"VERBOSE",
")",
"matches",
"=",
"pattern",
".",
"findall",
"(",
"os",
".",
"path",
".",
"split",
"(",
"self",
".",
"files",
"[",
"0",
"]",
")",
"[",
"-",
"1",
"]",
")",
"if",
"not",
"matches",
":",
"raise",
"TiffSequence",
".",
"ParseError",
"(",
"'pattern does not match file names'",
")",
"matches",
"=",
"matches",
"[",
"-",
"1",
"]",
"if",
"len",
"(",
"matches",
")",
"%",
"2",
":",
"raise",
"TiffSequence",
".",
"ParseError",
"(",
"'pattern does not match axis name and index'",
")",
"axes",
"=",
"''",
".",
"join",
"(",
"m",
"for",
"m",
"in",
"matches",
"[",
":",
":",
"2",
"]",
"if",
"m",
")",
"if",
"not",
"axes",
":",
"raise",
"TiffSequence",
".",
"ParseError",
"(",
"'pattern does not match file names'",
")",
"indices",
"=",
"[",
"]",
"for",
"fname",
"in",
"self",
".",
"files",
":",
"fname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"fname",
")",
"[",
"-",
"1",
"]",
"matches",
"=",
"pattern",
".",
"findall",
"(",
"fname",
")",
"[",
"-",
"1",
"]",
"if",
"axes",
"!=",
"''",
".",
"join",
"(",
"m",
"for",
"m",
"in",
"matches",
"[",
":",
":",
"2",
"]",
"if",
"m",
")",
":",
"raise",
"ValueError",
"(",
"'axes do not match within image sequence'",
")",
"indices",
".",
"append",
"(",
"[",
"int",
"(",
"m",
")",
"for",
"m",
"in",
"matches",
"[",
"1",
":",
":",
"2",
"]",
"if",
"m",
"]",
")",
"shape",
"=",
"tuple",
"(",
"numpy",
".",
"max",
"(",
"indices",
",",
"axis",
"=",
"0",
")",
")",
"startindex",
"=",
"tuple",
"(",
"numpy",
".",
"min",
"(",
"indices",
",",
"axis",
"=",
"0",
")",
")",
"shape",
"=",
"tuple",
"(",
"i",
"-",
"j",
"+",
"1",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"shape",
",",
"startindex",
")",
")",
"if",
"product",
"(",
"shape",
")",
"!=",
"len",
"(",
"self",
".",
"files",
")",
":",
"log",
".",
"warning",
"(",
"'TiffSequence: files are missing. Missing data are zeroed'",
")",
"self",
".",
"axes",
"=",
"axes",
".",
"upper",
"(",
")",
"self",
".",
"shape",
"=",
"shape",
"self",
".",
"_indices",
"=",
"indices",
"self",
".",
"_startindex",
"=",
"startindex"
]
| 43.588235 | 18.205882 |
def connect(self):
"""
Starts the mongodb connection. Must be called before anything else
will work.
"""
self.client = MongoClient(self.mongo_uri)
self.db = self.client[self.db_name] | [
"def",
"connect",
"(",
"self",
")",
":",
"self",
".",
"client",
"=",
"MongoClient",
"(",
"self",
".",
"mongo_uri",
")",
"self",
".",
"db",
"=",
"self",
".",
"client",
"[",
"self",
".",
"db_name",
"]"
]
| 32 | 12.857143 |
def filter_code(source, additional_imports=None,
expand_star_imports=False,
remove_all_unused_imports=False,
remove_duplicate_keys=False,
remove_unused_variables=False,
ignore_init_module_imports=False,
):
"""Yield code with unused imports removed."""
imports = SAFE_IMPORTS
if additional_imports:
imports |= frozenset(additional_imports)
del additional_imports
messages = check(source)
if ignore_init_module_imports:
marked_import_line_numbers = frozenset()
else:
marked_import_line_numbers = frozenset(
unused_import_line_numbers(messages))
marked_unused_module = collections.defaultdict(lambda: [])
for line_number, module_name in unused_import_module_name(messages):
marked_unused_module[line_number].append(module_name)
if expand_star_imports and not (
# See explanations in #18.
re.search(r'\b__all__\b', source) or
re.search(r'\bdel\b', source)
):
marked_star_import_line_numbers = frozenset(
star_import_used_line_numbers(messages))
if len(marked_star_import_line_numbers) > 1:
# Auto expanding only possible for single star import
marked_star_import_line_numbers = frozenset()
else:
undefined_names = []
for line_number, undefined_name, _ \
in star_import_usage_undefined_name(messages):
undefined_names.append(undefined_name)
if not undefined_names:
marked_star_import_line_numbers = frozenset()
else:
marked_star_import_line_numbers = frozenset()
if remove_unused_variables:
marked_variable_line_numbers = frozenset(
unused_variable_line_numbers(messages))
else:
marked_variable_line_numbers = frozenset()
if remove_duplicate_keys:
marked_key_line_numbers = frozenset(
duplicate_key_line_numbers(messages, source))
else:
marked_key_line_numbers = frozenset()
line_messages = get_messages_by_line(messages)
sio = io.StringIO(source)
previous_line = ''
for line_number, line in enumerate(sio.readlines(), start=1):
if '#' in line:
yield line
elif line_number in marked_import_line_numbers:
yield filter_unused_import(
line,
unused_module=marked_unused_module[line_number],
remove_all_unused_imports=remove_all_unused_imports,
imports=imports,
previous_line=previous_line)
elif line_number in marked_variable_line_numbers:
yield filter_unused_variable(line)
elif line_number in marked_key_line_numbers:
yield filter_duplicate_key(line, line_messages[line_number],
line_number, marked_key_line_numbers,
source)
elif line_number in marked_star_import_line_numbers:
yield filter_star_import(line, undefined_names)
else:
yield line
previous_line = line | [
"def",
"filter_code",
"(",
"source",
",",
"additional_imports",
"=",
"None",
",",
"expand_star_imports",
"=",
"False",
",",
"remove_all_unused_imports",
"=",
"False",
",",
"remove_duplicate_keys",
"=",
"False",
",",
"remove_unused_variables",
"=",
"False",
",",
"ignore_init_module_imports",
"=",
"False",
",",
")",
":",
"imports",
"=",
"SAFE_IMPORTS",
"if",
"additional_imports",
":",
"imports",
"|=",
"frozenset",
"(",
"additional_imports",
")",
"del",
"additional_imports",
"messages",
"=",
"check",
"(",
"source",
")",
"if",
"ignore_init_module_imports",
":",
"marked_import_line_numbers",
"=",
"frozenset",
"(",
")",
"else",
":",
"marked_import_line_numbers",
"=",
"frozenset",
"(",
"unused_import_line_numbers",
"(",
"messages",
")",
")",
"marked_unused_module",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"[",
"]",
")",
"for",
"line_number",
",",
"module_name",
"in",
"unused_import_module_name",
"(",
"messages",
")",
":",
"marked_unused_module",
"[",
"line_number",
"]",
".",
"append",
"(",
"module_name",
")",
"if",
"expand_star_imports",
"and",
"not",
"(",
"# See explanations in #18.",
"re",
".",
"search",
"(",
"r'\\b__all__\\b'",
",",
"source",
")",
"or",
"re",
".",
"search",
"(",
"r'\\bdel\\b'",
",",
"source",
")",
")",
":",
"marked_star_import_line_numbers",
"=",
"frozenset",
"(",
"star_import_used_line_numbers",
"(",
"messages",
")",
")",
"if",
"len",
"(",
"marked_star_import_line_numbers",
")",
">",
"1",
":",
"# Auto expanding only possible for single star import",
"marked_star_import_line_numbers",
"=",
"frozenset",
"(",
")",
"else",
":",
"undefined_names",
"=",
"[",
"]",
"for",
"line_number",
",",
"undefined_name",
",",
"_",
"in",
"star_import_usage_undefined_name",
"(",
"messages",
")",
":",
"undefined_names",
".",
"append",
"(",
"undefined_name",
")",
"if",
"not",
"undefined_names",
":",
"marked_star_import_line_numbers",
"=",
"frozenset",
"(",
")",
"else",
":",
"marked_star_import_line_numbers",
"=",
"frozenset",
"(",
")",
"if",
"remove_unused_variables",
":",
"marked_variable_line_numbers",
"=",
"frozenset",
"(",
"unused_variable_line_numbers",
"(",
"messages",
")",
")",
"else",
":",
"marked_variable_line_numbers",
"=",
"frozenset",
"(",
")",
"if",
"remove_duplicate_keys",
":",
"marked_key_line_numbers",
"=",
"frozenset",
"(",
"duplicate_key_line_numbers",
"(",
"messages",
",",
"source",
")",
")",
"else",
":",
"marked_key_line_numbers",
"=",
"frozenset",
"(",
")",
"line_messages",
"=",
"get_messages_by_line",
"(",
"messages",
")",
"sio",
"=",
"io",
".",
"StringIO",
"(",
"source",
")",
"previous_line",
"=",
"''",
"for",
"line_number",
",",
"line",
"in",
"enumerate",
"(",
"sio",
".",
"readlines",
"(",
")",
",",
"start",
"=",
"1",
")",
":",
"if",
"'#'",
"in",
"line",
":",
"yield",
"line",
"elif",
"line_number",
"in",
"marked_import_line_numbers",
":",
"yield",
"filter_unused_import",
"(",
"line",
",",
"unused_module",
"=",
"marked_unused_module",
"[",
"line_number",
"]",
",",
"remove_all_unused_imports",
"=",
"remove_all_unused_imports",
",",
"imports",
"=",
"imports",
",",
"previous_line",
"=",
"previous_line",
")",
"elif",
"line_number",
"in",
"marked_variable_line_numbers",
":",
"yield",
"filter_unused_variable",
"(",
"line",
")",
"elif",
"line_number",
"in",
"marked_key_line_numbers",
":",
"yield",
"filter_duplicate_key",
"(",
"line",
",",
"line_messages",
"[",
"line_number",
"]",
",",
"line_number",
",",
"marked_key_line_numbers",
",",
"source",
")",
"elif",
"line_number",
"in",
"marked_star_import_line_numbers",
":",
"yield",
"filter_star_import",
"(",
"line",
",",
"undefined_names",
")",
"else",
":",
"yield",
"line",
"previous_line",
"=",
"line"
]
| 38.109756 | 17.073171 |
def OSPFNeighborState_OSPFNeighborIpAddress(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
OSPFNeighborState = ET.SubElement(config, "OSPFNeighborState", xmlns="http://brocade.com/ns/brocade-notification-stream")
OSPFNeighborIpAddress = ET.SubElement(OSPFNeighborState, "OSPFNeighborIpAddress")
OSPFNeighborIpAddress.text = kwargs.pop('OSPFNeighborIpAddress')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"OSPFNeighborState_OSPFNeighborIpAddress",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"OSPFNeighborState",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"OSPFNeighborState\"",
",",
"xmlns",
"=",
"\"http://brocade.com/ns/brocade-notification-stream\"",
")",
"OSPFNeighborIpAddress",
"=",
"ET",
".",
"SubElement",
"(",
"OSPFNeighborState",
",",
"\"OSPFNeighborIpAddress\"",
")",
"OSPFNeighborIpAddress",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'OSPFNeighborIpAddress'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| 51.6 | 25.9 |
def run(self, callback=None, limit=0):
"""
Start pcap's loop over the interface, calling the given callback for each packet
:param callback: a function receiving (win_pcap, param, header, pkt_data) for each packet intercepted
:param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity)
"""
if self._handle is None:
raise self.DeviceIsNotOpen()
# Set new callback
self._callback = callback
# Run loop with callback wrapper
wtypes.pcap_loop(self._handle, limit, self._callback_wrapper, None) | [
"def",
"run",
"(",
"self",
",",
"callback",
"=",
"None",
",",
"limit",
"=",
"0",
")",
":",
"if",
"self",
".",
"_handle",
"is",
"None",
":",
"raise",
"self",
".",
"DeviceIsNotOpen",
"(",
")",
"# Set new callback",
"self",
".",
"_callback",
"=",
"callback",
"# Run loop with callback wrapper",
"wtypes",
".",
"pcap_loop",
"(",
"self",
".",
"_handle",
",",
"limit",
",",
"self",
".",
"_callback_wrapper",
",",
"None",
")"
]
| 49.916667 | 19.916667 |
def get_timedelta(self, now=None):
"""
Returns number of seconds that passed since ``self.started``, as float.
None is returned if ``self.started`` was not set yet.
"""
def datetime_to_time(timestamp):
atime = time.mktime(timestamp.timetuple())
atime += timestamp.microsecond / 10.0**6
return atime
if self.started is not None:
now = now or datetime.datetime.now()
started_time = datetime_to_time(self.started)
now_time = datetime_to_time(now)
return now_time - started_time
return None | [
"def",
"get_timedelta",
"(",
"self",
",",
"now",
"=",
"None",
")",
":",
"def",
"datetime_to_time",
"(",
"timestamp",
")",
":",
"atime",
"=",
"time",
".",
"mktime",
"(",
"timestamp",
".",
"timetuple",
"(",
")",
")",
"atime",
"+=",
"timestamp",
".",
"microsecond",
"/",
"10.0",
"**",
"6",
"return",
"atime",
"if",
"self",
".",
"started",
"is",
"not",
"None",
":",
"now",
"=",
"now",
"or",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"started_time",
"=",
"datetime_to_time",
"(",
"self",
".",
"started",
")",
"now_time",
"=",
"datetime_to_time",
"(",
"now",
")",
"return",
"now_time",
"-",
"started_time",
"return",
"None"
]
| 40.8 | 10.933333 |
def table(self):
"""
The |Table| object contained in this graphic frame. Raises
|ValueError| if this graphic frame does not contain a table.
"""
if not self.has_table:
raise ValueError('shape does not contain a table')
tbl = self._element.graphic.graphicData.tbl
return Table(tbl, self) | [
"def",
"table",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"has_table",
":",
"raise",
"ValueError",
"(",
"'shape does not contain a table'",
")",
"tbl",
"=",
"self",
".",
"_element",
".",
"graphic",
".",
"graphicData",
".",
"tbl",
"return",
"Table",
"(",
"tbl",
",",
"self",
")"
]
| 38.444444 | 14.444444 |
def print_paired_paths(nb_file, fmt):
"""Display the paired paths for this notebook"""
notebook = readf(nb_file, fmt)
formats = notebook.metadata.get('jupytext', {}).get('formats')
if formats:
for path, _ in paired_paths(nb_file, fmt, formats):
if path != nb_file:
sys.stdout.write(path + '\n') | [
"def",
"print_paired_paths",
"(",
"nb_file",
",",
"fmt",
")",
":",
"notebook",
"=",
"readf",
"(",
"nb_file",
",",
"fmt",
")",
"formats",
"=",
"notebook",
".",
"metadata",
".",
"get",
"(",
"'jupytext'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'formats'",
")",
"if",
"formats",
":",
"for",
"path",
",",
"_",
"in",
"paired_paths",
"(",
"nb_file",
",",
"fmt",
",",
"formats",
")",
":",
"if",
"path",
"!=",
"nb_file",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"path",
"+",
"'\\n'",
")"
]
| 42.375 | 11.625 |
def GetEntries(self, parser_mediator, data=None, **unused_kwargs):
"""Extract data from Transmission's resume folder files.
This is the main parsing engine for the parser. It determines if
the selected file is the proper file to parse and extracts current
running torrents.
Transmission stores an individual Bencoded file for each active download
in a folder named resume under the user's application data folder.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
data (Optional[dict[str, object]]): bencode data values.
"""
seeding_time = data.get('seeding-time-seconds', None)
event_data = TransmissionEventData()
event_data.destination = data.get('destination', None)
# Convert seconds to minutes.
event_data.seedtime, _ = divmod(seeding_time, 60)
# Create timeline events based on extracted values.
timestamp = data.get('added-date', None)
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = data.get('done-date', None)
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = data.get('activity-date', None)
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data) | [
"def",
"GetEntries",
"(",
"self",
",",
"parser_mediator",
",",
"data",
"=",
"None",
",",
"*",
"*",
"unused_kwargs",
")",
":",
"seeding_time",
"=",
"data",
".",
"get",
"(",
"'seeding-time-seconds'",
",",
"None",
")",
"event_data",
"=",
"TransmissionEventData",
"(",
")",
"event_data",
".",
"destination",
"=",
"data",
".",
"get",
"(",
"'destination'",
",",
"None",
")",
"# Convert seconds to minutes.",
"event_data",
".",
"seedtime",
",",
"_",
"=",
"divmod",
"(",
"seeding_time",
",",
"60",
")",
"# Create timeline events based on extracted values.",
"timestamp",
"=",
"data",
".",
"get",
"(",
"'added-date'",
",",
"None",
")",
"if",
"timestamp",
":",
"date_time",
"=",
"dfdatetime_posix_time",
".",
"PosixTime",
"(",
"timestamp",
"=",
"timestamp",
")",
"event",
"=",
"time_events",
".",
"DateTimeValuesEvent",
"(",
"date_time",
",",
"definitions",
".",
"TIME_DESCRIPTION_ADDED",
")",
"parser_mediator",
".",
"ProduceEventWithEventData",
"(",
"event",
",",
"event_data",
")",
"timestamp",
"=",
"data",
".",
"get",
"(",
"'done-date'",
",",
"None",
")",
"if",
"timestamp",
":",
"date_time",
"=",
"dfdatetime_posix_time",
".",
"PosixTime",
"(",
"timestamp",
"=",
"timestamp",
")",
"event",
"=",
"time_events",
".",
"DateTimeValuesEvent",
"(",
"date_time",
",",
"definitions",
".",
"TIME_DESCRIPTION_FILE_DOWNLOADED",
")",
"parser_mediator",
".",
"ProduceEventWithEventData",
"(",
"event",
",",
"event_data",
")",
"timestamp",
"=",
"data",
".",
"get",
"(",
"'activity-date'",
",",
"None",
")",
"if",
"timestamp",
":",
"date_time",
"=",
"dfdatetime_posix_time",
".",
"PosixTime",
"(",
"timestamp",
"=",
"timestamp",
")",
"event",
"=",
"time_events",
".",
"DateTimeValuesEvent",
"(",
"date_time",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_ACCESS",
")",
"parser_mediator",
".",
"ProduceEventWithEventData",
"(",
"event",
",",
"event_data",
")"
]
| 43.139535 | 22.325581 |
def check_node_position(
self, parent_id, position, on_same_branch, db_session=None, *args, **kwargs
):
"""
Checks if node position for given parent is valid, raises exception if
this is not the case
:param parent_id:
:param position:
:param on_same_branch: indicates that we are checking same branch
:param db_session:
:return:
"""
return self.service.check_node_position(
parent_id=parent_id,
position=position,
on_same_branch=on_same_branch,
db_session=db_session,
*args,
**kwargs
) | [
"def",
"check_node_position",
"(",
"self",
",",
"parent_id",
",",
"position",
",",
"on_same_branch",
",",
"db_session",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"service",
".",
"check_node_position",
"(",
"parent_id",
"=",
"parent_id",
",",
"position",
"=",
"position",
",",
"on_same_branch",
"=",
"on_same_branch",
",",
"db_session",
"=",
"db_session",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| 30.380952 | 18.666667 |
def walk(self, *types):
'''
Iterator which visits all suites and suite files,
yielding test cases and keywords
'''
requested = types if len(types) > 0 else [SuiteFile, ResourceFile, SuiteFolder, Testcase, Keyword]
for thing in self.robot_files:
if thing.__class__ in requested:
yield thing
if isinstance(thing, SuiteFolder):
for child in thing.walk():
if child.__class__ in requested:
yield child
else:
for child in thing.walk(*types):
yield child | [
"def",
"walk",
"(",
"self",
",",
"*",
"types",
")",
":",
"requested",
"=",
"types",
"if",
"len",
"(",
"types",
")",
">",
"0",
"else",
"[",
"SuiteFile",
",",
"ResourceFile",
",",
"SuiteFolder",
",",
"Testcase",
",",
"Keyword",
"]",
"for",
"thing",
"in",
"self",
".",
"robot_files",
":",
"if",
"thing",
".",
"__class__",
"in",
"requested",
":",
"yield",
"thing",
"if",
"isinstance",
"(",
"thing",
",",
"SuiteFolder",
")",
":",
"for",
"child",
"in",
"thing",
".",
"walk",
"(",
")",
":",
"if",
"child",
".",
"__class__",
"in",
"requested",
":",
"yield",
"child",
"else",
":",
"for",
"child",
"in",
"thing",
".",
"walk",
"(",
"*",
"types",
")",
":",
"yield",
"child"
]
| 36.941176 | 16.588235 |
def disks(self):
"""Instance depends on the API version:
* 2016-04-30-preview: :class:`DisksOperations<azure.mgmt.compute.v2016_04_30_preview.operations.DisksOperations>`
* 2017-03-30: :class:`DisksOperations<azure.mgmt.compute.v2017_03_30.operations.DisksOperations>`
* 2018-04-01: :class:`DisksOperations<azure.mgmt.compute.v2018_04_01.operations.DisksOperations>`
* 2018-06-01: :class:`DisksOperations<azure.mgmt.compute.v2018_06_01.operations.DisksOperations>`
* 2018-09-30: :class:`DisksOperations<azure.mgmt.compute.v2018_09_30.operations.DisksOperations>`
"""
api_version = self._get_api_version('disks')
if api_version == '2016-04-30-preview':
from .v2016_04_30_preview.operations import DisksOperations as OperationClass
elif api_version == '2017-03-30':
from .v2017_03_30.operations import DisksOperations as OperationClass
elif api_version == '2018-04-01':
from .v2018_04_01.operations import DisksOperations as OperationClass
elif api_version == '2018-06-01':
from .v2018_06_01.operations import DisksOperations as OperationClass
elif api_version == '2018-09-30':
from .v2018_09_30.operations import DisksOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | [
"def",
"disks",
"(",
"self",
")",
":",
"api_version",
"=",
"self",
".",
"_get_api_version",
"(",
"'disks'",
")",
"if",
"api_version",
"==",
"'2016-04-30-preview'",
":",
"from",
".",
"v2016_04_30_preview",
".",
"operations",
"import",
"DisksOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2017-03-30'",
":",
"from",
".",
"v2017_03_30",
".",
"operations",
"import",
"DisksOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2018-04-01'",
":",
"from",
".",
"v2018_04_01",
".",
"operations",
"import",
"DisksOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2018-06-01'",
":",
"from",
".",
"v2018_06_01",
".",
"operations",
"import",
"DisksOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2018-09-30'",
":",
"from",
".",
"v2018_09_30",
".",
"operations",
"import",
"DisksOperations",
"as",
"OperationClass",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"APIVersion {} is not available\"",
".",
"format",
"(",
"api_version",
")",
")",
"return",
"OperationClass",
"(",
"self",
".",
"_client",
",",
"self",
".",
"config",
",",
"Serializer",
"(",
"self",
".",
"_models_dict",
"(",
"api_version",
")",
")",
",",
"Deserializer",
"(",
"self",
".",
"_models_dict",
"(",
"api_version",
")",
")",
")"
]
| 67.652174 | 36.521739 |
def absent(name, tags=None, region=None, key=None, keyid=None, profile=None):
'''
Ensure VPC with passed properties is absent.
name
Name of the VPC.
tags
A list of tags. All tags must match.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
r = __salt__['boto_vpc.get_id'](name=name, tags=tags, region=region,
key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to delete VPC: {0}.'.format(r['error']['message'])
return ret
_id = r.get('id')
if not _id:
ret['comment'] = '{0} VPC does not exist.'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'VPC {0} is set to be removed.'.format(name)
ret['result'] = None
return ret
r = __salt__['boto_vpc.delete'](vpc_name=name, tags=tags,
region=region, key=key,
keyid=keyid, profile=profile)
if not r['deleted']:
ret['result'] = False
ret['comment'] = 'Failed to delete VPC: {0}.'.format(r['error']['message'])
return ret
ret['changes']['old'] = {'vpc': _id}
ret['changes']['new'] = {'vpc': None}
ret['comment'] = 'VPC {0} deleted.'.format(name)
return ret | [
"def",
"absent",
"(",
"name",
",",
"tags",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"r",
"=",
"__salt__",
"[",
"'boto_vpc.get_id'",
"]",
"(",
"name",
"=",
"name",
",",
"tags",
"=",
"tags",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"'error'",
"in",
"r",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to delete VPC: {0}.'",
".",
"format",
"(",
"r",
"[",
"'error'",
"]",
"[",
"'message'",
"]",
")",
"return",
"ret",
"_id",
"=",
"r",
".",
"get",
"(",
"'id'",
")",
"if",
"not",
"_id",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'{0} VPC does not exist.'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'VPC {0} is set to be removed.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"r",
"=",
"__salt__",
"[",
"'boto_vpc.delete'",
"]",
"(",
"vpc_name",
"=",
"name",
",",
"tags",
"=",
"tags",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"not",
"r",
"[",
"'deleted'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to delete VPC: {0}.'",
".",
"format",
"(",
"r",
"[",
"'error'",
"]",
"[",
"'message'",
"]",
")",
"return",
"ret",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"=",
"{",
"'vpc'",
":",
"_id",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"=",
"{",
"'vpc'",
":",
"None",
"}",
"ret",
"[",
"'comment'",
"]",
"=",
"'VPC {0} deleted.'",
".",
"format",
"(",
"name",
")",
"return",
"ret"
]
| 28.438596 | 24.684211 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
if hasattr(self, 'count') and self.count is not None:
_dict['count'] = self.count
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'field'",
")",
"and",
"self",
".",
"field",
"is",
"not",
"None",
":",
"_dict",
"[",
"'field'",
"]",
"=",
"self",
".",
"field",
"if",
"hasattr",
"(",
"self",
",",
"'count'",
")",
"and",
"self",
".",
"count",
"is",
"not",
"None",
":",
"_dict",
"[",
"'count'",
"]",
"=",
"self",
".",
"count",
"return",
"_dict"
]
| 40 | 13.375 |
def fts_match(self, features, segment):
"""Answer question "are `ft_mask`'s features a subset of ft_seg?"
This is like `FeatureTable.match` except that it checks whether a
segment is valid and returns None if it is not.
Args:
features (set): pattern defined as set of (value, feature) tuples
segment (set): segment defined as a set of (value, feature) tuples
Returns:
bool: True iff all features in `ft_mask` are also in `ft_seg`; None
if segment is not valid
"""
features = set(features)
if self.seg_known(segment):
return features <= self.fts(segment)
else:
return None | [
"def",
"fts_match",
"(",
"self",
",",
"features",
",",
"segment",
")",
":",
"features",
"=",
"set",
"(",
"features",
")",
"if",
"self",
".",
"seg_known",
"(",
"segment",
")",
":",
"return",
"features",
"<=",
"self",
".",
"fts",
"(",
"segment",
")",
"else",
":",
"return",
"None"
]
| 37.157895 | 21.052632 |
def init_parser():
"""Initialize option parser."""
usage = "Usage: %(prog)s <option(s)> <file(s)>"
description = " Display information from object <file(s)>.\n"
description += " At least one of the following switches must be given:"
#
# Create an argument parser and an exclusive group.
#
parser = ArgumentParser(
usage=usage, description=description, add_help=False)
group = parser.add_mutually_exclusive_group()
#
# Add objdump parameters.
#
group.add_argument("-a", "--archive-headers",
action=DumpArchieveHeadersAction,
type=FileType("r"), nargs="+",
help="Display archive header information")
group.add_argument("-f", "--file-headers",
action=DumpFileHeadersAction,
type=FileType("r"), nargs="+",
help="Display the contents of the overall file header")
#group.add_argument("-p", "--private-headers", action="store", type=FileType("r"), nargs="+", help="Display object format specific file header contents")
#group.add_argument("-P", "--private=OPT,OPT...", action="store", type=FileType("r"), nargs="+", help="Display object format specific contents")
group.add_argument("-h", "--section-headers",
action=DumpSectionHeadersAction,
type=FileType("r"), nargs="+",
help="Display the contents of the section headers")
#group.add_argument("-x", "--all-headers", action="store", type=FileType("r"), nargs="+", help="Display the contents of all headers")
group.add_argument("-d", "--disassemble",
action=DisassembleSectionAction,
type=FileType("r"), nargs="+",
help="Display assembler contents of executable sections")
group.add_argument("-D", "--disassemble-all",
action=DisassembleSectionsAction,
type=FileType("r"), nargs="+",
help="Display assembler contents of executable sections")
#group.add_argument("-S", "--source", action="store", type=FileType("r"), nargs="+", help="Intermix source code with disassembly")
group.add_argument("-s", "--full-contents",
action=DumpSectionContentAction,
type=FileType("r"), nargs="+",
help="Display the full contents of all sections requested")
#group.add_argument("-g", "--debugging", action="store", type=FileType("r"), nargs="+", help="Display debug information in object file")
#group.add_argument("-e", "--debugging-tags", action="store", type=FileType("r"), nargs="+", help="Display debug information using ctags style")
#group.add_argument("-G", "--stabs", action="store", type=FileType("r"), nargs="+", help="Display (in raw form) any STABS info in the file")
#-W[lLiaprmfFsoRt] or")
#--dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,")
# =frames-interp,=str,=loc,=Ranges,=pubtypes,")
# =gdb_index,=trace_info,=trace_abbrev,=trace_aranges]")
# Display DWARF info in the file")
group.add_argument("-t", "--syms",
action=DumpFileSymbols,
type=FileType("r"), nargs="+",
help="Display the contents of the symbol table(s)")
#-T, --dynamic-syms Display the contents of the dynamic symbol table")
#-r, --reloc Display the relocation entries in the file")
#-R, --dynamic-reloc Display the dynamic relocation entries in the file")
group.add_argument("-v", "--version", action="version",
version="%%(prog)s %s (%s)" % (__version__, __description__),
help="Display this program's version number")
group.add_argument("-i", "--info",
action=ListFormatAndArchitecturesInformationAction,
nargs=REMAINDER,
help="List object formats and architectures supported")
group.add_argument("-H", "--help", action="store_true", default=False,
help="Display this information")
return parser | [
"def",
"init_parser",
"(",
")",
":",
"usage",
"=",
"\"Usage: %(prog)s <option(s)> <file(s)>\"",
"description",
"=",
"\" Display information from object <file(s)>.\\n\"",
"description",
"+=",
"\" At least one of the following switches must be given:\"",
"#",
"# Create an argument parser and an exclusive group.",
"#",
"parser",
"=",
"ArgumentParser",
"(",
"usage",
"=",
"usage",
",",
"description",
"=",
"description",
",",
"add_help",
"=",
"False",
")",
"group",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
")",
"#",
"# Add objdump parameters.",
"#",
"group",
".",
"add_argument",
"(",
"\"-a\"",
",",
"\"--archive-headers\"",
",",
"action",
"=",
"DumpArchieveHeadersAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display archive header information\"",
")",
"group",
".",
"add_argument",
"(",
"\"-f\"",
",",
"\"--file-headers\"",
",",
"action",
"=",
"DumpFileHeadersAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display the contents of the overall file header\"",
")",
"#group.add_argument(\"-p\", \"--private-headers\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display object format specific file header contents\")",
"#group.add_argument(\"-P\", \"--private=OPT,OPT...\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display object format specific contents\")",
"group",
".",
"add_argument",
"(",
"\"-h\"",
",",
"\"--section-headers\"",
",",
"action",
"=",
"DumpSectionHeadersAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display the contents of the section headers\"",
")",
"#group.add_argument(\"-x\", \"--all-headers\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display the contents of all headers\")",
"group",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--disassemble\"",
",",
"action",
"=",
"DisassembleSectionAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display assembler contents of executable sections\"",
")",
"group",
".",
"add_argument",
"(",
"\"-D\"",
",",
"\"--disassemble-all\"",
",",
"action",
"=",
"DisassembleSectionsAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display assembler contents of executable sections\"",
")",
"#group.add_argument(\"-S\", \"--source\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Intermix source code with disassembly\")",
"group",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--full-contents\"",
",",
"action",
"=",
"DumpSectionContentAction",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display the full contents of all sections requested\"",
")",
"#group.add_argument(\"-g\", \"--debugging\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display debug information in object file\")",
"#group.add_argument(\"-e\", \"--debugging-tags\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display debug information using ctags style\")",
"#group.add_argument(\"-G\", \"--stabs\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display (in raw form) any STABS info in the file\")",
"#-W[lLiaprmfFsoRt] or\")",
"#--dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,\")",
"# =frames-interp,=str,=loc,=Ranges,=pubtypes,\")",
"# =gdb_index,=trace_info,=trace_abbrev,=trace_aranges]\")",
"# Display DWARF info in the file\")",
"group",
".",
"add_argument",
"(",
"\"-t\"",
",",
"\"--syms\"",
",",
"action",
"=",
"DumpFileSymbols",
",",
"type",
"=",
"FileType",
"(",
"\"r\"",
")",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Display the contents of the symbol table(s)\"",
")",
"#-T, --dynamic-syms Display the contents of the dynamic symbol table\")",
"#-r, --reloc Display the relocation entries in the file\")",
"#-R, --dynamic-reloc Display the dynamic relocation entries in the file\")",
"group",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--version\"",
",",
"action",
"=",
"\"version\"",
",",
"version",
"=",
"\"%%(prog)s %s (%s)\"",
"%",
"(",
"__version__",
",",
"__description__",
")",
",",
"help",
"=",
"\"Display this program's version number\"",
")",
"group",
".",
"add_argument",
"(",
"\"-i\"",
",",
"\"--info\"",
",",
"action",
"=",
"ListFormatAndArchitecturesInformationAction",
",",
"nargs",
"=",
"REMAINDER",
",",
"help",
"=",
"\"List object formats and architectures supported\"",
")",
"group",
".",
"add_argument",
"(",
"\"-H\"",
",",
"\"--help\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Display this information\"",
")",
"return",
"parser"
]
| 43.795455 | 29.011364 |
def condensed_coords_between(pop1, pop2, n):
"""Return indices into a condensed distance matrix for all pairwise
comparisons between two populations.
Parameters
----------
pop1 : array_like, int
Indices of samples or haplotypes within the first population.
pop2 : array_like, int
Indices of samples or haplotypes within the second population.
n : int
Size of the square matrix (length of first or second dimension).
Returns
-------
indices : ndarray, int
"""
return [condensed_coords(i, j, n)
for i, j in itertools.product(sorted(pop1), sorted(pop2))] | [
"def",
"condensed_coords_between",
"(",
"pop1",
",",
"pop2",
",",
"n",
")",
":",
"return",
"[",
"condensed_coords",
"(",
"i",
",",
"j",
",",
"n",
")",
"for",
"i",
",",
"j",
"in",
"itertools",
".",
"product",
"(",
"sorted",
"(",
"pop1",
")",
",",
"sorted",
"(",
"pop2",
")",
")",
"]"
]
| 29.47619 | 22.333333 |
def transform(self, data, allow_timestamps=False):
"""
Transform H2OFrame using a MOJO Pipeline.
:param data: Frame to be transformed.
:param allow_timestamps: Allows datetime columns to be used directly with MOJO pipelines. It is recommended
to parse your datetime columns as Strings when using pipelines because pipelines can interpret certain datetime
formats in a different way. If your H2OFrame is parsed from a binary file format (eg. Parquet) instead of CSV
it is safe to turn this option on and use datetime columns directly.
:returns: A new H2OFrame.
"""
assert_is_type(data, H2OFrame)
assert_is_type(allow_timestamps, bool)
return H2OFrame._expr(ExprNode("mojo.pipeline.transform", self.pipeline_id[0], data, allow_timestamps)) | [
"def",
"transform",
"(",
"self",
",",
"data",
",",
"allow_timestamps",
"=",
"False",
")",
":",
"assert_is_type",
"(",
"data",
",",
"H2OFrame",
")",
"assert_is_type",
"(",
"allow_timestamps",
",",
"bool",
")",
"return",
"H2OFrame",
".",
"_expr",
"(",
"ExprNode",
"(",
"\"mojo.pipeline.transform\"",
",",
"self",
".",
"pipeline_id",
"[",
"0",
"]",
",",
"data",
",",
"allow_timestamps",
")",
")"
]
| 54.733333 | 30.466667 |
def convert_fillstyle(inputstyle, mode, inputmode=None):
"""
Convert *inputstyle* to ROOT or matplotlib format.
Output format is determined by *mode* ('root' or 'mpl'). The *inputstyle*
may be a ROOT fill style, a matplotlib hatch style, None, 'none', 'hollow',
or 'solid'.
"""
mode = mode.lower()
if mode not in ('mpl', 'root'):
raise ValueError("`{0}` is not a valid `mode`".format(mode))
if inputmode is None:
try:
# inputstyle is a ROOT linestyle
inputstyle = int(inputstyle)
inputmode = 'root'
except (TypeError, ValueError):
if inputstyle is None:
inputmode = 'mpl'
elif inputstyle in fillstyles_text2root:
inputmode = 'root'
inputstyle = fillstyles_text2root[inputstyle]
elif inputstyle[0] in fillstyles_mpl2root:
inputmode = 'mpl'
else:
raise ValueError(
"`{0}` is not a valid `fillstyle`".format(inputstyle))
if inputmode == 'root':
if mode == 'root':
return inputstyle
if inputstyle in fillstyles_root2mpl:
return fillstyles_root2mpl[inputstyle]
raise ValueError(
"`{0}` is not a valid `fillstyle`".format(inputstyle))
else:
if inputstyle is not None and inputstyle[0] not in fillstyles_mpl2root:
raise ValueError(
"`{0}` is not a valid matplotlib `fillstyle`".format(
inputstyle))
if mode == 'mpl':
return inputstyle
if inputstyle is None:
return fillstyles_mpl2root[inputstyle]
return fillstyles_mpl2root[inputstyle[0]] | [
"def",
"convert_fillstyle",
"(",
"inputstyle",
",",
"mode",
",",
"inputmode",
"=",
"None",
")",
":",
"mode",
"=",
"mode",
".",
"lower",
"(",
")",
"if",
"mode",
"not",
"in",
"(",
"'mpl'",
",",
"'root'",
")",
":",
"raise",
"ValueError",
"(",
"\"`{0}` is not a valid `mode`\"",
".",
"format",
"(",
"mode",
")",
")",
"if",
"inputmode",
"is",
"None",
":",
"try",
":",
"# inputstyle is a ROOT linestyle",
"inputstyle",
"=",
"int",
"(",
"inputstyle",
")",
"inputmode",
"=",
"'root'",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"if",
"inputstyle",
"is",
"None",
":",
"inputmode",
"=",
"'mpl'",
"elif",
"inputstyle",
"in",
"fillstyles_text2root",
":",
"inputmode",
"=",
"'root'",
"inputstyle",
"=",
"fillstyles_text2root",
"[",
"inputstyle",
"]",
"elif",
"inputstyle",
"[",
"0",
"]",
"in",
"fillstyles_mpl2root",
":",
"inputmode",
"=",
"'mpl'",
"else",
":",
"raise",
"ValueError",
"(",
"\"`{0}` is not a valid `fillstyle`\"",
".",
"format",
"(",
"inputstyle",
")",
")",
"if",
"inputmode",
"==",
"'root'",
":",
"if",
"mode",
"==",
"'root'",
":",
"return",
"inputstyle",
"if",
"inputstyle",
"in",
"fillstyles_root2mpl",
":",
"return",
"fillstyles_root2mpl",
"[",
"inputstyle",
"]",
"raise",
"ValueError",
"(",
"\"`{0}` is not a valid `fillstyle`\"",
".",
"format",
"(",
"inputstyle",
")",
")",
"else",
":",
"if",
"inputstyle",
"is",
"not",
"None",
"and",
"inputstyle",
"[",
"0",
"]",
"not",
"in",
"fillstyles_mpl2root",
":",
"raise",
"ValueError",
"(",
"\"`{0}` is not a valid matplotlib `fillstyle`\"",
".",
"format",
"(",
"inputstyle",
")",
")",
"if",
"mode",
"==",
"'mpl'",
":",
"return",
"inputstyle",
"if",
"inputstyle",
"is",
"None",
":",
"return",
"fillstyles_mpl2root",
"[",
"inputstyle",
"]",
"return",
"fillstyles_mpl2root",
"[",
"inputstyle",
"[",
"0",
"]",
"]"
]
| 38.772727 | 15.545455 |
def set_target(self, target, ctx=None):
"""Set target.
:param target: new target to use.
:param target ctx: target ctx if target is an class/instance attribute.
"""
if target is not None:
# check if target is already intercepted
if is_intercepted(target):
# set self interception last target reference
self._interception = target
# and targets, ctx
self.target, self.ctx = get_intercepted(target)
else:
# if not, update target reference with new interception
self.apply_pointcut(target, ctx=ctx) | [
"def",
"set_target",
"(",
"self",
",",
"target",
",",
"ctx",
"=",
"None",
")",
":",
"if",
"target",
"is",
"not",
"None",
":",
"# check if target is already intercepted",
"if",
"is_intercepted",
"(",
"target",
")",
":",
"# set self interception last target reference",
"self",
".",
"_interception",
"=",
"target",
"# and targets, ctx",
"self",
".",
"target",
",",
"self",
".",
"ctx",
"=",
"get_intercepted",
"(",
"target",
")",
"else",
":",
"# if not, update target reference with new interception",
"self",
".",
"apply_pointcut",
"(",
"target",
",",
"ctx",
"=",
"ctx",
")"
]
| 38.411765 | 15.529412 |
def translate_state(self, s):
"""Translate the given state string
"""
if not isinstance(s, basestring):
return s
s = s.capitalize().replace("_", " ")
return t(_(s)) | [
"def",
"translate_state",
"(",
"self",
",",
"s",
")",
":",
"if",
"not",
"isinstance",
"(",
"s",
",",
"basestring",
")",
":",
"return",
"s",
"s",
"=",
"s",
".",
"capitalize",
"(",
")",
".",
"replace",
"(",
"\"_\"",
",",
"\" \"",
")",
"return",
"t",
"(",
"_",
"(",
"s",
")",
")"
]
| 30 | 7.714286 |
def _modify(self, **patch):
"""Override modify to check kwargs before request sent to device."""
if 'state' in patch:
if patch['state'] != 'user-up' and patch['state'] != 'user-down':
msg = "The members resource does not support a modify with " \
"the value of the 'state' attribute as %s. " \
"The accepted values are 'user-up' or " \
"'user-down'" % patch['state']
raise MemberStateModifyUnsupported(msg)
if 'session' in patch:
if patch['session'] != 'user-enabled' and patch['session'] != \
'user-disabled':
msg = "The members resource does not support a modify with " \
"the value of the 'session' attribute as %s. " \
"The accepted values are 'user-enabled' or " \
"'user-disabled'" % patch['session']
raise MemberStateModifyUnsupported(msg)
super(Members, self)._modify(**patch) | [
"def",
"_modify",
"(",
"self",
",",
"*",
"*",
"patch",
")",
":",
"if",
"'state'",
"in",
"patch",
":",
"if",
"patch",
"[",
"'state'",
"]",
"!=",
"'user-up'",
"and",
"patch",
"[",
"'state'",
"]",
"!=",
"'user-down'",
":",
"msg",
"=",
"\"The members resource does not support a modify with \"",
"\"the value of the 'state' attribute as %s. \"",
"\"The accepted values are 'user-up' or \"",
"\"'user-down'\"",
"%",
"patch",
"[",
"'state'",
"]",
"raise",
"MemberStateModifyUnsupported",
"(",
"msg",
")",
"if",
"'session'",
"in",
"patch",
":",
"if",
"patch",
"[",
"'session'",
"]",
"!=",
"'user-enabled'",
"and",
"patch",
"[",
"'session'",
"]",
"!=",
"'user-disabled'",
":",
"msg",
"=",
"\"The members resource does not support a modify with \"",
"\"the value of the 'session' attribute as %s. \"",
"\"The accepted values are 'user-enabled' or \"",
"\"'user-disabled'\"",
"%",
"patch",
"[",
"'session'",
"]",
"raise",
"MemberStateModifyUnsupported",
"(",
"msg",
")",
"super",
"(",
"Members",
",",
"self",
")",
".",
"_modify",
"(",
"*",
"*",
"patch",
")"
]
| 57.722222 | 20.055556 |
def _dev_by_id(self, device_type):
"""! Get a dict, USBID -> device, for a device class
@param device_type The type of devices to search. For exmaple, "serial"
looks for all serial devices connected to this computer
@return A dict: Device USBID -> device file in /dev
"""
dir = os.path.join("/dev", device_type, "by-id")
if os.path.isdir(dir):
to_ret = dict(
self._hex_ids([os.path.join(dir, f) for f in os.listdir(dir)])
)
return to_ret
else:
logger.error(
"Could not get %s devices by id. "
"This could be because your Linux distribution "
"does not use udev, or does not create /dev/%s/by-id "
"symlinks. Please submit an issue to github.com/"
"armmbed/mbed-ls.",
device_type,
device_type,
)
return {} | [
"def",
"_dev_by_id",
"(",
"self",
",",
"device_type",
")",
":",
"dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"/dev\"",
",",
"device_type",
",",
"\"by-id\"",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dir",
")",
":",
"to_ret",
"=",
"dict",
"(",
"self",
".",
"_hex_ids",
"(",
"[",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"f",
")",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"dir",
")",
"]",
")",
")",
"return",
"to_ret",
"else",
":",
"logger",
".",
"error",
"(",
"\"Could not get %s devices by id. \"",
"\"This could be because your Linux distribution \"",
"\"does not use udev, or does not create /dev/%s/by-id \"",
"\"symlinks. Please submit an issue to github.com/\"",
"\"armmbed/mbed-ls.\"",
",",
"device_type",
",",
"device_type",
",",
")",
"return",
"{",
"}"
]
| 41.217391 | 18.043478 |
def replace(s, replace):
"""Replace multiple values in a string"""
for r in replace:
s = s.replace(*r)
return s | [
"def",
"replace",
"(",
"s",
",",
"replace",
")",
":",
"for",
"r",
"in",
"replace",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"*",
"r",
")",
"return",
"s"
]
| 25.4 | 15.6 |
def _make_builder_configs():
"""Make built-in Librispeech BuilderConfigs.
Uses 4 text encodings (plain text, bytes, subwords with 8k vocab, subwords
with 32k vocab) crossed with the data subsets (clean100, clean360, all).
Returns:
`list<tfds.audio.LibrispeechConfig>`
"""
text_encoder_configs = [
None,
tfds.features.text.TextEncoderConfig(
name="bytes", encoder=tfds.features.text.ByteTextEncoder()),
tfds.features.text.TextEncoderConfig(
name="subwords8k",
encoder_cls=tfds.features.text.SubwordTextEncoder,
vocab_size=2**13),
tfds.features.text.TextEncoderConfig(
name="subwords32k",
encoder_cls=tfds.features.text.SubwordTextEncoder,
vocab_size=2**15),
]
version = "0.1.0"
configs = []
for text_encoder_config in text_encoder_configs:
for data in _DATA_OPTIONS:
config = LibrispeechConfig(
version=version, text_encoder_config=text_encoder_config, data=data)
configs.append(config)
return configs | [
"def",
"_make_builder_configs",
"(",
")",
":",
"text_encoder_configs",
"=",
"[",
"None",
",",
"tfds",
".",
"features",
".",
"text",
".",
"TextEncoderConfig",
"(",
"name",
"=",
"\"bytes\"",
",",
"encoder",
"=",
"tfds",
".",
"features",
".",
"text",
".",
"ByteTextEncoder",
"(",
")",
")",
",",
"tfds",
".",
"features",
".",
"text",
".",
"TextEncoderConfig",
"(",
"name",
"=",
"\"subwords8k\"",
",",
"encoder_cls",
"=",
"tfds",
".",
"features",
".",
"text",
".",
"SubwordTextEncoder",
",",
"vocab_size",
"=",
"2",
"**",
"13",
")",
",",
"tfds",
".",
"features",
".",
"text",
".",
"TextEncoderConfig",
"(",
"name",
"=",
"\"subwords32k\"",
",",
"encoder_cls",
"=",
"tfds",
".",
"features",
".",
"text",
".",
"SubwordTextEncoder",
",",
"vocab_size",
"=",
"2",
"**",
"15",
")",
",",
"]",
"version",
"=",
"\"0.1.0\"",
"configs",
"=",
"[",
"]",
"for",
"text_encoder_config",
"in",
"text_encoder_configs",
":",
"for",
"data",
"in",
"_DATA_OPTIONS",
":",
"config",
"=",
"LibrispeechConfig",
"(",
"version",
"=",
"version",
",",
"text_encoder_config",
"=",
"text_encoder_config",
",",
"data",
"=",
"data",
")",
"configs",
".",
"append",
"(",
"config",
")",
"return",
"configs"
]
| 33.966667 | 18.2 |
def stop_workers_async(self):
"""Signal that all workers should stop without waiting."""
self._started = False
for worker in self._workers:
worker.signal_stop() | [
"def",
"stop_workers_async",
"(",
"self",
")",
":",
"self",
".",
"_started",
"=",
"False",
"for",
"worker",
"in",
"self",
".",
"_workers",
":",
"worker",
".",
"signal_stop",
"(",
")"
]
| 32 | 12.333333 |
def on_modified(self, event):
"""Handle a file modified event."""
path = event.src_path
if path not in self.saw:
self.saw.add(path)
self.recompile(path) | [
"def",
"on_modified",
"(",
"self",
",",
"event",
")",
":",
"path",
"=",
"event",
".",
"src_path",
"if",
"path",
"not",
"in",
"self",
".",
"saw",
":",
"self",
".",
"saw",
".",
"add",
"(",
"path",
")",
"self",
".",
"recompile",
"(",
"path",
")"
]
| 32.5 | 8 |
def open(self, path, binary=False):
"""Open file and return a stream."""
if binary:
return open(path, "rb")
return open(path, encoding="utf-8") | [
"def",
"open",
"(",
"self",
",",
"path",
",",
"binary",
"=",
"False",
")",
":",
"if",
"binary",
":",
"return",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"return",
"open",
"(",
"path",
",",
"encoding",
"=",
"\"utf-8\"",
")"
]
| 35 | 7 |
def check_connection (self):
"""
In case of proxy, delegate to HttpUrl. Else check in this
order: login, changing directory, list the file.
"""
# proxy support (we support only http)
self.set_proxy(self.aggregate.config["proxy"].get(self.scheme))
if self.proxy:
# using a (HTTP) proxy
http = httpurl.HttpUrl(self.base_url,
self.recursion_level,
self.aggregate,
parent_url=self.parent_url,
base_ref=self.base_ref,
line=self.line,
column=self.column,
name=self.name)
http.build_url()
return http.check()
self.login()
self.negotiate_encoding()
self.filename = self.cwd()
self.listfile()
self.files = []
return None | [
"def",
"check_connection",
"(",
"self",
")",
":",
"# proxy support (we support only http)",
"self",
".",
"set_proxy",
"(",
"self",
".",
"aggregate",
".",
"config",
"[",
"\"proxy\"",
"]",
".",
"get",
"(",
"self",
".",
"scheme",
")",
")",
"if",
"self",
".",
"proxy",
":",
"# using a (HTTP) proxy",
"http",
"=",
"httpurl",
".",
"HttpUrl",
"(",
"self",
".",
"base_url",
",",
"self",
".",
"recursion_level",
",",
"self",
".",
"aggregate",
",",
"parent_url",
"=",
"self",
".",
"parent_url",
",",
"base_ref",
"=",
"self",
".",
"base_ref",
",",
"line",
"=",
"self",
".",
"line",
",",
"column",
"=",
"self",
".",
"column",
",",
"name",
"=",
"self",
".",
"name",
")",
"http",
".",
"build_url",
"(",
")",
"return",
"http",
".",
"check",
"(",
")",
"self",
".",
"login",
"(",
")",
"self",
".",
"negotiate_encoding",
"(",
")",
"self",
".",
"filename",
"=",
"self",
".",
"cwd",
"(",
")",
"self",
".",
"listfile",
"(",
")",
"self",
".",
"files",
"=",
"[",
"]",
"return",
"None"
]
| 34.6 | 10.52 |
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg) | [
"def",
"get_service_info",
"(",
"service_instance",
")",
":",
"try",
":",
"return",
"service_instance",
".",
"content",
".",
"about",
"except",
"vim",
".",
"fault",
".",
"NoPermission",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"'Not enough permissions. Required privilege: '",
"'{0}'",
".",
"format",
"(",
"exc",
".",
"privilegeId",
")",
")",
"except",
"vim",
".",
"fault",
".",
"VimFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"exc",
".",
"msg",
")",
"except",
"vmodl",
".",
"RuntimeFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareRuntimeError",
"(",
"exc",
".",
"msg",
")"
]
| 35.05 | 15.75 |
def create_pool(batch_service_client, pool_id,
resource_files, publisher, offer, sku,
task_file, vm_size, node_count):
"""Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param list resource_files: A collection of resource files for the pool's
start task.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
:param str task_file: A file name of the script
:param str vm_size: A type of vm
:param str node_count: The number of nodes
"""
_log.info('Creating pool [{}]...'.format(pool_id))
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
# Specify the commands for the pool's start task. The start task is run
# on each node as it joins the pool, and when it's rebooted or re-imaged.
# We use the start task to prep the node for running our task script.
task_commands = [
# Copy the python_tutorial_task.py script to the "shared" directory
# that all tasks that run on the node have access to. Note that
# we are using the -p flag with cp to preserve the file uid/gid,
# otherwise since this start task is run as an admin, it would not
# be accessible by tasks run as a non-admin user.
'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'.format(os.path.basename(task_file)),
# Install pip
'curl -fSsL https://bootstrap.pypa.io/get-pip.py | python',
# Install the azure-storage module so that the task script can access
# Azure Blob storage, pre-cryptography version
'pip install azure-storage==0.32.0',
# Install E-Cell 4
'pip install https://1028-6348303-gh.circle-artifacts.com/0/root/circle/wheelhouse/ecell-4.1.2-cp27-cp27mu-manylinux1_x86_64.whl']
# Get the node agent SKU and image reference for the virtual machine
# configuration.
# For more information about the virtual machine configuration, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
sku_to_use, image_ref_to_use = \
select_latest_verified_vm_image_with_node_agent_sku(
batch_service_client, publisher, offer, sku)
user = batchmodels.AutoUserSpecification(
scope=batchmodels.AutoUserScope.pool,
elevation_level=batchmodels.ElevationLevel.admin)
new_pool = batch.models.PoolAddParameter(
id=pool_id,
virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
image_reference=image_ref_to_use,
node_agent_sku_id=sku_to_use),
vm_size=vm_size,
target_dedicated_nodes=0,
target_low_priority_nodes=node_count,
start_task=batch.models.StartTask(
command_line=wrap_commands_in_shell('linux', task_commands),
user_identity=batchmodels.UserIdentity(auto_user=user),
wait_for_success=True,
resource_files=resource_files),
)
try:
batch_service_client.pool.add(new_pool)
except batchmodels.BatchErrorException as err:
print_batch_exception(err)
raise | [
"def",
"create_pool",
"(",
"batch_service_client",
",",
"pool_id",
",",
"resource_files",
",",
"publisher",
",",
"offer",
",",
"sku",
",",
"task_file",
",",
"vm_size",
",",
"node_count",
")",
":",
"_log",
".",
"info",
"(",
"'Creating pool [{}]...'",
".",
"format",
"(",
"pool_id",
")",
")",
"# Create a new pool of Linux compute nodes using an Azure Virtual Machines",
"# Marketplace image. For more information about creating pools of Linux",
"# nodes, see:",
"# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/",
"# Specify the commands for the pool's start task. The start task is run",
"# on each node as it joins the pool, and when it's rebooted or re-imaged.",
"# We use the start task to prep the node for running our task script.",
"task_commands",
"=",
"[",
"# Copy the python_tutorial_task.py script to the \"shared\" directory",
"# that all tasks that run on the node have access to. Note that",
"# we are using the -p flag with cp to preserve the file uid/gid,",
"# otherwise since this start task is run as an admin, it would not",
"# be accessible by tasks run as a non-admin user.",
"'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"task_file",
")",
")",
",",
"# Install pip",
"'curl -fSsL https://bootstrap.pypa.io/get-pip.py | python'",
",",
"# Install the azure-storage module so that the task script can access",
"# Azure Blob storage, pre-cryptography version",
"'pip install azure-storage==0.32.0'",
",",
"# Install E-Cell 4",
"'pip install https://1028-6348303-gh.circle-artifacts.com/0/root/circle/wheelhouse/ecell-4.1.2-cp27-cp27mu-manylinux1_x86_64.whl'",
"]",
"# Get the node agent SKU and image reference for the virtual machine",
"# configuration.",
"# For more information about the virtual machine configuration, see:",
"# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/",
"sku_to_use",
",",
"image_ref_to_use",
"=",
"select_latest_verified_vm_image_with_node_agent_sku",
"(",
"batch_service_client",
",",
"publisher",
",",
"offer",
",",
"sku",
")",
"user",
"=",
"batchmodels",
".",
"AutoUserSpecification",
"(",
"scope",
"=",
"batchmodels",
".",
"AutoUserScope",
".",
"pool",
",",
"elevation_level",
"=",
"batchmodels",
".",
"ElevationLevel",
".",
"admin",
")",
"new_pool",
"=",
"batch",
".",
"models",
".",
"PoolAddParameter",
"(",
"id",
"=",
"pool_id",
",",
"virtual_machine_configuration",
"=",
"batchmodels",
".",
"VirtualMachineConfiguration",
"(",
"image_reference",
"=",
"image_ref_to_use",
",",
"node_agent_sku_id",
"=",
"sku_to_use",
")",
",",
"vm_size",
"=",
"vm_size",
",",
"target_dedicated_nodes",
"=",
"0",
",",
"target_low_priority_nodes",
"=",
"node_count",
",",
"start_task",
"=",
"batch",
".",
"models",
".",
"StartTask",
"(",
"command_line",
"=",
"wrap_commands_in_shell",
"(",
"'linux'",
",",
"task_commands",
")",
",",
"user_identity",
"=",
"batchmodels",
".",
"UserIdentity",
"(",
"auto_user",
"=",
"user",
")",
",",
"wait_for_success",
"=",
"True",
",",
"resource_files",
"=",
"resource_files",
")",
",",
")",
"try",
":",
"batch_service_client",
".",
"pool",
".",
"add",
"(",
"new_pool",
")",
"except",
"batchmodels",
".",
"BatchErrorException",
"as",
"err",
":",
"print_batch_exception",
"(",
"err",
")",
"raise"
]
| 47.805556 | 21.125 |
def _remove_mapper_from_plotter(plotter, actor, reset_camera):
"""removes this actor's mapper from the given plotter's _scalar_bar_mappers"""
try:
mapper = actor.GetMapper()
except AttributeError:
return
for name in list(plotter._scalar_bar_mappers.keys()):
try:
plotter._scalar_bar_mappers[name].remove(mapper)
except ValueError:
pass
if len(plotter._scalar_bar_mappers[name]) < 1:
slot = plotter._scalar_bar_slot_lookup.pop(name)
plotter._scalar_bar_mappers.pop(name)
plotter._scalar_bar_ranges.pop(name)
plotter.remove_actor(plotter._scalar_bar_actors.pop(name), reset_camera=reset_camera)
plotter._scalar_bar_slots.add(slot)
return | [
"def",
"_remove_mapper_from_plotter",
"(",
"plotter",
",",
"actor",
",",
"reset_camera",
")",
":",
"try",
":",
"mapper",
"=",
"actor",
".",
"GetMapper",
"(",
")",
"except",
"AttributeError",
":",
"return",
"for",
"name",
"in",
"list",
"(",
"plotter",
".",
"_scalar_bar_mappers",
".",
"keys",
"(",
")",
")",
":",
"try",
":",
"plotter",
".",
"_scalar_bar_mappers",
"[",
"name",
"]",
".",
"remove",
"(",
"mapper",
")",
"except",
"ValueError",
":",
"pass",
"if",
"len",
"(",
"plotter",
".",
"_scalar_bar_mappers",
"[",
"name",
"]",
")",
"<",
"1",
":",
"slot",
"=",
"plotter",
".",
"_scalar_bar_slot_lookup",
".",
"pop",
"(",
"name",
")",
"plotter",
".",
"_scalar_bar_mappers",
".",
"pop",
"(",
"name",
")",
"plotter",
".",
"_scalar_bar_ranges",
".",
"pop",
"(",
"name",
")",
"plotter",
".",
"remove_actor",
"(",
"plotter",
".",
"_scalar_bar_actors",
".",
"pop",
"(",
"name",
")",
",",
"reset_camera",
"=",
"reset_camera",
")",
"plotter",
".",
"_scalar_bar_slots",
".",
"add",
"(",
"slot",
")",
"return"
]
| 42.333333 | 19.333333 |
def merge(left, right):
"""
deep merge dictionary on the left with the one
on the right.
Fill in left dictionary with right one where
the value of the key from the right one in
the left one is missing or None.
"""
if isinstance(left, dict) and isinstance(right, dict):
for key, value in right.items():
if key not in left:
left[key] = value
elif left[key] is None:
left[key] = value
else:
left[key] = merge(left[key], value)
return left | [
"def",
"merge",
"(",
"left",
",",
"right",
")",
":",
"if",
"isinstance",
"(",
"left",
",",
"dict",
")",
"and",
"isinstance",
"(",
"right",
",",
"dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"right",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"left",
":",
"left",
"[",
"key",
"]",
"=",
"value",
"elif",
"left",
"[",
"key",
"]",
"is",
"None",
":",
"left",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"left",
"[",
"key",
"]",
"=",
"merge",
"(",
"left",
"[",
"key",
"]",
",",
"value",
")",
"return",
"left"
]
| 30.388889 | 11.833333 |
def clean(all=False, docs=False, dist=False, extra=None):
"""Clean up build files"""
run('find . -type f -name "*.py[co]" -delete')
run('find . -type d -name "__pycache__" -delete')
patterns = ['build', '*.egg-info/']
if all or docs:
patterns.append('doc/build/*')
if all or dist:
patterns.append('dist')
if extra:
patterns.append(extra)
for pattern in patterns:
run('rm -rf {}'.format(pattern)) | [
"def",
"clean",
"(",
"all",
"=",
"False",
",",
"docs",
"=",
"False",
",",
"dist",
"=",
"False",
",",
"extra",
"=",
"None",
")",
":",
"run",
"(",
"'find . -type f -name \"*.py[co]\" -delete'",
")",
"run",
"(",
"'find . -type d -name \"__pycache__\" -delete'",
")",
"patterns",
"=",
"[",
"'build'",
",",
"'*.egg-info/'",
"]",
"if",
"all",
"or",
"docs",
":",
"patterns",
".",
"append",
"(",
"'doc/build/*'",
")",
"if",
"all",
"or",
"dist",
":",
"patterns",
".",
"append",
"(",
"'dist'",
")",
"if",
"extra",
":",
"patterns",
".",
"append",
"(",
"extra",
")",
"for",
"pattern",
"in",
"patterns",
":",
"run",
"(",
"'rm -rf {}'",
".",
"format",
"(",
"pattern",
")",
")"
]
| 29.8 | 14.866667 |
def toindices(self, mask):
r"""
Convert a boolean mask to a list of pore or throat indices
Parameters
----------
mask : array_like booleans
A boolean array with True at locations where indices are desired.
The appropriate indices are returned based an the length of mask,
which must be either Np or Nt long.
Returns
-------
A list of pore or throat indices corresponding the locations where
the received mask was True.
See Also
--------
tomask
Notes
-----
This behavior could just as easily be accomplished by using the mask
in ``pn.pores()[mask]`` or ``pn.throats()[mask]``. This method is
just a convenience function and is a complement to ``tomask``.
"""
if sp.amax(mask) > 1:
raise Exception('Received mask is invalid, with values above 1')
mask = sp.array(mask, dtype=bool)
indices = self._parse_indices(mask)
return indices | [
"def",
"toindices",
"(",
"self",
",",
"mask",
")",
":",
"if",
"sp",
".",
"amax",
"(",
"mask",
")",
">",
"1",
":",
"raise",
"Exception",
"(",
"'Received mask is invalid, with values above 1'",
")",
"mask",
"=",
"sp",
".",
"array",
"(",
"mask",
",",
"dtype",
"=",
"bool",
")",
"indices",
"=",
"self",
".",
"_parse_indices",
"(",
"mask",
")",
"return",
"indices"
]
| 32.125 | 23.65625 |
def result_relpath(self, package_index):
"""Returns the relative path of the result
This method returns the path to the result relative to the
top dir of the working area. This method simply constructs the
path based on the convention and doesn't check if the result
actually exists.
Parameters
----------
package_index :
a package index
Returns
-------
str
the relative path to the result
"""
dirname = 'task_{:05d}'.format(package_index)
# e.g., 'task_00009'
ret = os.path.join('results', dirname, 'result.p.gz')
# e.g., 'results/task_00009/result.p.gz'
return ret | [
"def",
"result_relpath",
"(",
"self",
",",
"package_index",
")",
":",
"dirname",
"=",
"'task_{:05d}'",
".",
"format",
"(",
"package_index",
")",
"# e.g., 'task_00009'",
"ret",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'results'",
",",
"dirname",
",",
"'result.p.gz'",
")",
"# e.g., 'results/task_00009/result.p.gz'",
"return",
"ret"
]
| 26.185185 | 22.666667 |
def check_arguments(c: typing.Callable,
hints: typing.Mapping[str, typing.Optional[type]],
*args, **kwargs) -> None:
"""Check arguments type, raise :class:`TypeError` if argument type is not
expected type.
:param c: callable object want to check types
:param hints: assumed type of given ``c`` result of
:func:`typing.get_type_hints`
"""
signature = inspect.signature(c)
bound = signature.bind(*args, **kwargs)
for argument_name, value in bound.arguments.items():
try:
type_hint = hints[argument_name]
except KeyError:
continue
actual_type, correct = check_type(value, type_hint)
if not correct:
raise TypeError(
'Incorrect type `{}`, expected `{}` for `{}`'.format(
actual_type, type_hint, argument_name
)
) | [
"def",
"check_arguments",
"(",
"c",
":",
"typing",
".",
"Callable",
",",
"hints",
":",
"typing",
".",
"Mapping",
"[",
"str",
",",
"typing",
".",
"Optional",
"[",
"type",
"]",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"signature",
"=",
"inspect",
".",
"signature",
"(",
"c",
")",
"bound",
"=",
"signature",
".",
"bind",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"argument_name",
",",
"value",
"in",
"bound",
".",
"arguments",
".",
"items",
"(",
")",
":",
"try",
":",
"type_hint",
"=",
"hints",
"[",
"argument_name",
"]",
"except",
"KeyError",
":",
"continue",
"actual_type",
",",
"correct",
"=",
"check_type",
"(",
"value",
",",
"type_hint",
")",
"if",
"not",
"correct",
":",
"raise",
"TypeError",
"(",
"'Incorrect type `{}`, expected `{}` for `{}`'",
".",
"format",
"(",
"actual_type",
",",
"type_hint",
",",
"argument_name",
")",
")"
]
| 36.32 | 16.16 |
def get_summary_data(self, group_name):
""" Get the summary data for an analysis group.
:param group_name: The name of the analysis group to pull summary
data for.
:returns: A dictionary whose keys are analysis steps, and whose
values are dictionaries of key/value pairs for the results of
that step.
"""
self.assert_open()
group = 'Analyses/{}/Summary'.format(group_name)
summary = None
if group in self.handle:
summary = self._parse_attribute_tree(group)
return summary | [
"def",
"get_summary_data",
"(",
"self",
",",
"group_name",
")",
":",
"self",
".",
"assert_open",
"(",
")",
"group",
"=",
"'Analyses/{}/Summary'",
".",
"format",
"(",
"group_name",
")",
"summary",
"=",
"None",
"if",
"group",
"in",
"self",
".",
"handle",
":",
"summary",
"=",
"self",
".",
"_parse_attribute_tree",
"(",
"group",
")",
"return",
"summary"
]
| 39.066667 | 17.066667 |
def getCentreAndSpreadOffsets(spaceShape,
spreadShape,
stepSize=1):
"""
Generates centre offsets and spread offsets for block-mode based training
regimes - star, cross, block.
Parameters:
-----------------------------------------------
spaceShape: The (height, width) of the 2-D space to explore. This
sets the number of center-points.
spreadShape: The shape (height, width) of the area around each center-point
to explore.
stepSize: The step size. How big each step is, in pixels. This controls
*both* the spacing of the center-points within the block and the
points we explore around each center-point
retval: (centreOffsets, spreadOffsets)
"""
from nupic.math.cross import cross
# =====================================================================
# Init data structures
# What is the range on the X and Y offsets of the center points?
shape = spaceShape
# If the shape is (1,1), special case of just 1 center point
if shape[0] == 1 and shape[1] == 1:
centerOffsets = [(0,0)]
else:
xMin = -1 * (shape[1] // 2)
xMax = xMin + shape[1] - 1
xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize)
yMin = -1 * (shape[0] // 2)
yMax = yMin + shape[0] - 1
yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize)
centerOffsets = list(cross(yPositions, xPositions))
numCenterOffsets = len(centerOffsets)
print "centerOffsets:", centerOffsets
# What is the range on the X and Y offsets of the spread points?
shape = spreadShape
# If the shape is (1,1), special case of no spreading around each center
# point
if shape[0] == 1 and shape[1] == 1:
spreadOffsets = [(0,0)]
else:
xMin = -1 * (shape[1] // 2)
xMax = xMin + shape[1] - 1
xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize)
yMin = -1 * (shape[0] // 2)
yMax = yMin + shape[0] - 1
yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize)
spreadOffsets = list(cross(yPositions, xPositions))
# Put the (0,0) entry first
spreadOffsets.remove((0,0))
spreadOffsets.insert(0, (0,0))
numSpreadOffsets = len(spreadOffsets)
print "spreadOffsets:", spreadOffsets
return centerOffsets, spreadOffsets | [
"def",
"getCentreAndSpreadOffsets",
"(",
"spaceShape",
",",
"spreadShape",
",",
"stepSize",
"=",
"1",
")",
":",
"from",
"nupic",
".",
"math",
".",
"cross",
"import",
"cross",
"# =====================================================================",
"# Init data structures",
"# What is the range on the X and Y offsets of the center points?",
"shape",
"=",
"spaceShape",
"# If the shape is (1,1), special case of just 1 center point",
"if",
"shape",
"[",
"0",
"]",
"==",
"1",
"and",
"shape",
"[",
"1",
"]",
"==",
"1",
":",
"centerOffsets",
"=",
"[",
"(",
"0",
",",
"0",
")",
"]",
"else",
":",
"xMin",
"=",
"-",
"1",
"*",
"(",
"shape",
"[",
"1",
"]",
"//",
"2",
")",
"xMax",
"=",
"xMin",
"+",
"shape",
"[",
"1",
"]",
"-",
"1",
"xPositions",
"=",
"range",
"(",
"stepSize",
"*",
"xMin",
",",
"stepSize",
"*",
"xMax",
"+",
"1",
",",
"stepSize",
")",
"yMin",
"=",
"-",
"1",
"*",
"(",
"shape",
"[",
"0",
"]",
"//",
"2",
")",
"yMax",
"=",
"yMin",
"+",
"shape",
"[",
"0",
"]",
"-",
"1",
"yPositions",
"=",
"range",
"(",
"stepSize",
"*",
"yMin",
",",
"stepSize",
"*",
"yMax",
"+",
"1",
",",
"stepSize",
")",
"centerOffsets",
"=",
"list",
"(",
"cross",
"(",
"yPositions",
",",
"xPositions",
")",
")",
"numCenterOffsets",
"=",
"len",
"(",
"centerOffsets",
")",
"print",
"\"centerOffsets:\"",
",",
"centerOffsets",
"# What is the range on the X and Y offsets of the spread points?",
"shape",
"=",
"spreadShape",
"# If the shape is (1,1), special case of no spreading around each center",
"# point",
"if",
"shape",
"[",
"0",
"]",
"==",
"1",
"and",
"shape",
"[",
"1",
"]",
"==",
"1",
":",
"spreadOffsets",
"=",
"[",
"(",
"0",
",",
"0",
")",
"]",
"else",
":",
"xMin",
"=",
"-",
"1",
"*",
"(",
"shape",
"[",
"1",
"]",
"//",
"2",
")",
"xMax",
"=",
"xMin",
"+",
"shape",
"[",
"1",
"]",
"-",
"1",
"xPositions",
"=",
"range",
"(",
"stepSize",
"*",
"xMin",
",",
"stepSize",
"*",
"xMax",
"+",
"1",
",",
"stepSize",
")",
"yMin",
"=",
"-",
"1",
"*",
"(",
"shape",
"[",
"0",
"]",
"//",
"2",
")",
"yMax",
"=",
"yMin",
"+",
"shape",
"[",
"0",
"]",
"-",
"1",
"yPositions",
"=",
"range",
"(",
"stepSize",
"*",
"yMin",
",",
"stepSize",
"*",
"yMax",
"+",
"1",
",",
"stepSize",
")",
"spreadOffsets",
"=",
"list",
"(",
"cross",
"(",
"yPositions",
",",
"xPositions",
")",
")",
"# Put the (0,0) entry first",
"spreadOffsets",
".",
"remove",
"(",
"(",
"0",
",",
"0",
")",
")",
"spreadOffsets",
".",
"insert",
"(",
"0",
",",
"(",
"0",
",",
"0",
")",
")",
"numSpreadOffsets",
"=",
"len",
"(",
"spreadOffsets",
")",
"print",
"\"spreadOffsets:\"",
",",
"spreadOffsets",
"return",
"centerOffsets",
",",
"spreadOffsets"
]
| 34.686567 | 20.179104 |
def Sum(a, axis, keep_dims):
"""
Sum reduction op.
"""
return np.sum(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | [
"def",
"Sum",
"(",
"a",
",",
"axis",
",",
"keep_dims",
")",
":",
"return",
"np",
".",
"sum",
"(",
"a",
",",
"axis",
"=",
"axis",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"np",
".",
"ndarray",
")",
"else",
"tuple",
"(",
"axis",
")",
",",
"keepdims",
"=",
"keep_dims",
")",
","
]
| 30.833333 | 12.833333 |
def emulate_mouse(self, key_code, x_val, y_val, data):
"""Emulate the ev codes using the data Windows has given us.
Note that by default in Windows, to recognise a double click,
you just notice two clicks in a row within a reasonablely
short time period.
However, if the application developer sets the application
window's class style to CS_DBLCLKS, the operating system will
notice the four button events (down, up, down, up), intercept
them and then send a single key code instead.
There are no such special double click codes on other
platforms, so not obvious what to do with them. It might be
best to just convert them back to four events.
Currently we do nothing.
((0x0203, 'WM_LBUTTONDBLCLK'),
(0x0206, 'WM_RBUTTONDBLCLK'),
(0x0209, 'WM_MBUTTONDBLCLK'),
(0x020D, 'WM_XBUTTONDBLCLK'))
"""
# Once again ignore Windows' relative time (since system
# startup) and use the absolute time (since epoch i.e. 1st Jan
# 1970).
self.update_timeval()
events = []
if key_code == 0x0200:
# We have a mouse move alone.
# So just pass through to below
pass
elif key_code == 0x020A:
# We have a vertical mouse wheel turn
events.append(self.emulate_wheel(data, 'y', self.timeval))
elif key_code == 0x020E:
# We have a horizontal mouse wheel turn
# https://msdn.microsoft.com/en-us/library/windows/desktop/
# ms645614%28v=vs.85%29.aspx
events.append(self.emulate_wheel(data, 'x', self.timeval))
else:
# We have a button press.
# Distinguish the second extra button
if key_code == 0x020B and data == 2:
key_code = 0x020B2
elif key_code == 0x020C and data == 2:
key_code = 0x020C2
# Get the mouse codes
code, value, scan_code = self.mouse_codes[key_code]
# Add in the press events
scan_event, key_event = self.emulate_press(
code, scan_code, value, self.timeval)
events.append(scan_event)
events.append(key_event)
# Add in the absolute position of the mouse cursor
x_event, y_event = self.emulate_abs(x_val, y_val, self.timeval)
events.append(x_event)
events.append(y_event)
# End with a sync marker
events.append(self.sync_marker(self.timeval))
# We are done
self.write_to_pipe(events) | [
"def",
"emulate_mouse",
"(",
"self",
",",
"key_code",
",",
"x_val",
",",
"y_val",
",",
"data",
")",
":",
"# Once again ignore Windows' relative time (since system",
"# startup) and use the absolute time (since epoch i.e. 1st Jan",
"# 1970).",
"self",
".",
"update_timeval",
"(",
")",
"events",
"=",
"[",
"]",
"if",
"key_code",
"==",
"0x0200",
":",
"# We have a mouse move alone.",
"# So just pass through to below",
"pass",
"elif",
"key_code",
"==",
"0x020A",
":",
"# We have a vertical mouse wheel turn",
"events",
".",
"append",
"(",
"self",
".",
"emulate_wheel",
"(",
"data",
",",
"'y'",
",",
"self",
".",
"timeval",
")",
")",
"elif",
"key_code",
"==",
"0x020E",
":",
"# We have a horizontal mouse wheel turn",
"# https://msdn.microsoft.com/en-us/library/windows/desktop/",
"# ms645614%28v=vs.85%29.aspx",
"events",
".",
"append",
"(",
"self",
".",
"emulate_wheel",
"(",
"data",
",",
"'x'",
",",
"self",
".",
"timeval",
")",
")",
"else",
":",
"# We have a button press.",
"# Distinguish the second extra button",
"if",
"key_code",
"==",
"0x020B",
"and",
"data",
"==",
"2",
":",
"key_code",
"=",
"0x020B2",
"elif",
"key_code",
"==",
"0x020C",
"and",
"data",
"==",
"2",
":",
"key_code",
"=",
"0x020C2",
"# Get the mouse codes",
"code",
",",
"value",
",",
"scan_code",
"=",
"self",
".",
"mouse_codes",
"[",
"key_code",
"]",
"# Add in the press events",
"scan_event",
",",
"key_event",
"=",
"self",
".",
"emulate_press",
"(",
"code",
",",
"scan_code",
",",
"value",
",",
"self",
".",
"timeval",
")",
"events",
".",
"append",
"(",
"scan_event",
")",
"events",
".",
"append",
"(",
"key_event",
")",
"# Add in the absolute position of the mouse cursor",
"x_event",
",",
"y_event",
"=",
"self",
".",
"emulate_abs",
"(",
"x_val",
",",
"y_val",
",",
"self",
".",
"timeval",
")",
"events",
".",
"append",
"(",
"x_event",
")",
"events",
".",
"append",
"(",
"y_event",
")",
"# End with a sync marker",
"events",
".",
"append",
"(",
"self",
".",
"sync_marker",
"(",
"self",
".",
"timeval",
")",
")",
"# We are done",
"self",
".",
"write_to_pipe",
"(",
"events",
")"
]
| 36.671429 | 18.628571 |
def status_job(self, fn=None, name=None, timeout=3):
"""Decorator that invokes `add_status_job`.
::
@app.status_job
def postgresql():
# query/ping postgres
@app.status_job(name="Active Directory")
def active_directory():
# query active directory
@app.status_job(timeout=5)
def paypal():
# query paypal, timeout after 5 seconds
"""
if fn is None:
def decorator(fn):
self.add_status_job(fn, name, timeout)
return decorator
else:
self.add_status_job(fn, name, timeout) | [
"def",
"status_job",
"(",
"self",
",",
"fn",
"=",
"None",
",",
"name",
"=",
"None",
",",
"timeout",
"=",
"3",
")",
":",
"if",
"fn",
"is",
"None",
":",
"def",
"decorator",
"(",
"fn",
")",
":",
"self",
".",
"add_status_job",
"(",
"fn",
",",
"name",
",",
"timeout",
")",
"return",
"decorator",
"else",
":",
"self",
".",
"add_status_job",
"(",
"fn",
",",
"name",
",",
"timeout",
")"
]
| 27.458333 | 17.041667 |
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[\\[\\]\n.!?,;:\t\\-\\"\\(\\)\\\'\u2019\u2013]')
sentences = sentence_delimiters.split(text)
return sentences | [
"def",
"split_sentences",
"(",
"text",
")",
":",
"sentence_delimiters",
"=",
"re",
".",
"compile",
"(",
"u'[\\\\[\\\\]\\n.!?,;:\\t\\\\-\\\\\"\\\\(\\\\)\\\\\\'\\u2019\\u2013]'",
")",
"sentences",
"=",
"sentence_delimiters",
".",
"split",
"(",
"text",
")",
"return",
"sentences"
]
| 38.125 | 14.875 |
def execute_notebook_with_engine(self, engine_name, nb, kernel_name, **kwargs):
"""Fetch a named engine and execute the nb object against it."""
return self.get_engine(engine_name).execute_notebook(nb, kernel_name, **kwargs) | [
"def",
"execute_notebook_with_engine",
"(",
"self",
",",
"engine_name",
",",
"nb",
",",
"kernel_name",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"get_engine",
"(",
"engine_name",
")",
".",
"execute_notebook",
"(",
"nb",
",",
"kernel_name",
",",
"*",
"*",
"kwargs",
")"
]
| 79.333333 | 28.666667 |
def post_execute(self):
"""Cache the modification times of any modules imported in this execution
"""
newly_loaded_modules = set(sys.modules) - self.loaded_modules
for modname in newly_loaded_modules:
_, pymtime = self._reloader.filename_and_mtime(sys.modules[modname])
if pymtime is not None:
self._reloader.modules_mtimes[modname] = pymtime
self.loaded_modules.update(newly_loaded_modules) | [
"def",
"post_execute",
"(",
"self",
")",
":",
"newly_loaded_modules",
"=",
"set",
"(",
"sys",
".",
"modules",
")",
"-",
"self",
".",
"loaded_modules",
"for",
"modname",
"in",
"newly_loaded_modules",
":",
"_",
",",
"pymtime",
"=",
"self",
".",
"_reloader",
".",
"filename_and_mtime",
"(",
"sys",
".",
"modules",
"[",
"modname",
"]",
")",
"if",
"pymtime",
"is",
"not",
"None",
":",
"self",
".",
"_reloader",
".",
"modules_mtimes",
"[",
"modname",
"]",
"=",
"pymtime",
"self",
".",
"loaded_modules",
".",
"update",
"(",
"newly_loaded_modules",
")"
]
| 46.3 | 17.5 |
def load_data(filename):
"""
:rtype : numpy matrix
"""
data = pandas.read_csv(filename, header=None, delimiter='\t', skiprows=9)
return data.as_matrix() | [
"def",
"load_data",
"(",
"filename",
")",
":",
"data",
"=",
"pandas",
".",
"read_csv",
"(",
"filename",
",",
"header",
"=",
"None",
",",
"delimiter",
"=",
"'\\t'",
",",
"skiprows",
"=",
"9",
")",
"return",
"data",
".",
"as_matrix",
"(",
")"
]
| 27.833333 | 13.5 |
def _read_as_tiledir(
self,
out_tile=None,
td_crs=None,
tiles_paths=None,
profile=None,
validity_check=False,
indexes=None,
resampling=None,
dst_nodata=None,
gdal_opts=None,
**kwargs
):
"""
Read reprojected & resampled input data.
Parameters
----------
validity_check : bool
vector file: also run checks if reprojected geometry is valid,
otherwise throw RuntimeError (default: True)
indexes : list or int
raster file: a list of band numbers; None will read all.
dst_nodata : int or float, optional
raster file: if not set, the nodata value from the source dataset
will be used
gdal_opts : dict
raster file: GDAL options passed on to rasterio.Env()
Returns
-------
data : list for vector files or numpy array for raster files
"""
return _read_as_tiledir(
data_type=self.METADATA["data_type"],
out_tile=out_tile,
td_crs=td_crs,
tiles_paths=tiles_paths,
profile=profile,
validity_check=validity_check,
indexes=indexes,
resampling=resampling,
dst_nodata=dst_nodata,
gdal_opts=gdal_opts,
**{k: v for k, v in kwargs.items() if k != "data_type"}
) | [
"def",
"_read_as_tiledir",
"(",
"self",
",",
"out_tile",
"=",
"None",
",",
"td_crs",
"=",
"None",
",",
"tiles_paths",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"validity_check",
"=",
"False",
",",
"indexes",
"=",
"None",
",",
"resampling",
"=",
"None",
",",
"dst_nodata",
"=",
"None",
",",
"gdal_opts",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_read_as_tiledir",
"(",
"data_type",
"=",
"self",
".",
"METADATA",
"[",
"\"data_type\"",
"]",
",",
"out_tile",
"=",
"out_tile",
",",
"td_crs",
"=",
"td_crs",
",",
"tiles_paths",
"=",
"tiles_paths",
",",
"profile",
"=",
"profile",
",",
"validity_check",
"=",
"validity_check",
",",
"indexes",
"=",
"indexes",
",",
"resampling",
"=",
"resampling",
",",
"dst_nodata",
"=",
"dst_nodata",
",",
"gdal_opts",
"=",
"gdal_opts",
",",
"*",
"*",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"k",
"!=",
"\"data_type\"",
"}",
")"
]
| 29.893617 | 18.106383 |
def _process_container_metric(self, type, metric_name, metric, scraper_config):
"""
Takes a simple metric about a container, reports it as a rate or gauge.
If several series are found for a given container, values are summed before submission.
"""
if metric.type not in METRIC_TYPES:
self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name))
return
samples = self._sum_values_by_context(metric, self._get_entity_id_if_container_metric)
for c_id, sample in iteritems(samples):
pod_uid = self._get_pod_uid(sample[self.SAMPLE_LABELS])
if self.pod_list_utils.is_excluded(c_id, pod_uid):
continue
tags = tagger.tag(c_id, tagger.HIGH)
tags += scraper_config['custom_tags']
# FIXME we are forced to do that because the Kubelet PodList isn't updated
# for static pods, see https://github.com/kubernetes/kubernetes/pull/59948
pod = self._get_pod_by_metric_label(sample[self.SAMPLE_LABELS])
if pod is not None and is_static_pending_pod(pod):
tags += tagger.tag('kubernetes_pod://%s' % pod["metadata"]["uid"], tagger.HIGH)
tags += self._get_kube_container_name(sample[self.SAMPLE_LABELS])
tags = list(set(tags))
val = sample[self.SAMPLE_VALUE]
if "rate" == type:
self.rate(metric_name, val, tags)
elif "gauge" == type:
self.gauge(metric_name, val, tags) | [
"def",
"_process_container_metric",
"(",
"self",
",",
"type",
",",
"metric_name",
",",
"metric",
",",
"scraper_config",
")",
":",
"if",
"metric",
".",
"type",
"not",
"in",
"METRIC_TYPES",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Metric type %s unsupported for metric %s\"",
"%",
"(",
"metric",
".",
"type",
",",
"metric",
".",
"name",
")",
")",
"return",
"samples",
"=",
"self",
".",
"_sum_values_by_context",
"(",
"metric",
",",
"self",
".",
"_get_entity_id_if_container_metric",
")",
"for",
"c_id",
",",
"sample",
"in",
"iteritems",
"(",
"samples",
")",
":",
"pod_uid",
"=",
"self",
".",
"_get_pod_uid",
"(",
"sample",
"[",
"self",
".",
"SAMPLE_LABELS",
"]",
")",
"if",
"self",
".",
"pod_list_utils",
".",
"is_excluded",
"(",
"c_id",
",",
"pod_uid",
")",
":",
"continue",
"tags",
"=",
"tagger",
".",
"tag",
"(",
"c_id",
",",
"tagger",
".",
"HIGH",
")",
"tags",
"+=",
"scraper_config",
"[",
"'custom_tags'",
"]",
"# FIXME we are forced to do that because the Kubelet PodList isn't updated",
"# for static pods, see https://github.com/kubernetes/kubernetes/pull/59948",
"pod",
"=",
"self",
".",
"_get_pod_by_metric_label",
"(",
"sample",
"[",
"self",
".",
"SAMPLE_LABELS",
"]",
")",
"if",
"pod",
"is",
"not",
"None",
"and",
"is_static_pending_pod",
"(",
"pod",
")",
":",
"tags",
"+=",
"tagger",
".",
"tag",
"(",
"'kubernetes_pod://%s'",
"%",
"pod",
"[",
"\"metadata\"",
"]",
"[",
"\"uid\"",
"]",
",",
"tagger",
".",
"HIGH",
")",
"tags",
"+=",
"self",
".",
"_get_kube_container_name",
"(",
"sample",
"[",
"self",
".",
"SAMPLE_LABELS",
"]",
")",
"tags",
"=",
"list",
"(",
"set",
"(",
"tags",
")",
")",
"val",
"=",
"sample",
"[",
"self",
".",
"SAMPLE_VALUE",
"]",
"if",
"\"rate\"",
"==",
"type",
":",
"self",
".",
"rate",
"(",
"metric_name",
",",
"val",
",",
"tags",
")",
"elif",
"\"gauge\"",
"==",
"type",
":",
"self",
".",
"gauge",
"(",
"metric_name",
",",
"val",
",",
"tags",
")"
]
| 48.5625 | 26.4375 |
def _get_tns_search_results(
self):
"""
*query the tns and result the response*
"""
self.log.info('starting the ``_get_tns_search_results`` method')
try:
response = requests.get(
url="http://wis-tns.weizmann.ac.il/search",
params={
"page": self.page,
"ra": self.ra,
"decl": self.dec,
"radius": self.radiusArcsec,
"name": self.name,
"internal_name": self.internal_name,
"date_start[date]": self.start,
"date_end[date]": self.end,
"num_page": self.batchSize,
"display[redshift]": "1",
"display[hostname]": "1",
"display[host_redshift]": "1",
"display[source_group_name]": "1",
"display[internal_name]": "1",
"display[spectra_count]": "1",
"display[discoverymag]": "1",
"display[discmagfilter]": "1",
"display[discoverydate]": "1",
"display[discoverer]": "1",
"display[sources]": "1",
"display[bibcode]": "1",
},
)
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_get_tns_search_results`` method')
return response.status_code, response.content, response.url | [
"def",
"_get_tns_search_results",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'starting the ``_get_tns_search_results`` method'",
")",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
"=",
"\"http://wis-tns.weizmann.ac.il/search\"",
",",
"params",
"=",
"{",
"\"page\"",
":",
"self",
".",
"page",
",",
"\"ra\"",
":",
"self",
".",
"ra",
",",
"\"decl\"",
":",
"self",
".",
"dec",
",",
"\"radius\"",
":",
"self",
".",
"radiusArcsec",
",",
"\"name\"",
":",
"self",
".",
"name",
",",
"\"internal_name\"",
":",
"self",
".",
"internal_name",
",",
"\"date_start[date]\"",
":",
"self",
".",
"start",
",",
"\"date_end[date]\"",
":",
"self",
".",
"end",
",",
"\"num_page\"",
":",
"self",
".",
"batchSize",
",",
"\"display[redshift]\"",
":",
"\"1\"",
",",
"\"display[hostname]\"",
":",
"\"1\"",
",",
"\"display[host_redshift]\"",
":",
"\"1\"",
",",
"\"display[source_group_name]\"",
":",
"\"1\"",
",",
"\"display[internal_name]\"",
":",
"\"1\"",
",",
"\"display[spectra_count]\"",
":",
"\"1\"",
",",
"\"display[discoverymag]\"",
":",
"\"1\"",
",",
"\"display[discmagfilter]\"",
":",
"\"1\"",
",",
"\"display[discoverydate]\"",
":",
"\"1\"",
",",
"\"display[discoverer]\"",
":",
"\"1\"",
",",
"\"display[sources]\"",
":",
"\"1\"",
",",
"\"display[bibcode]\"",
":",
"\"1\"",
",",
"}",
",",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
":",
"print",
"(",
"'HTTP Request failed'",
")",
"self",
".",
"log",
".",
"info",
"(",
"'completed the ``_get_tns_search_results`` method'",
")",
"return",
"response",
".",
"status_code",
",",
"response",
".",
"content",
",",
"response",
".",
"url"
]
| 38.875 | 13.525 |
def throttle(self, key, amount=1, rate=None, capacity=None,
exc_class=Throttled, **kwargs):
"""Consume an amount for a given key, or raise a Throttled exception."""
if not self.consume(key, amount, rate, capacity, **kwargs):
raise exc_class("Request of %d unit for %s exceeds capacity."
% (amount, key)) | [
"def",
"throttle",
"(",
"self",
",",
"key",
",",
"amount",
"=",
"1",
",",
"rate",
"=",
"None",
",",
"capacity",
"=",
"None",
",",
"exc_class",
"=",
"Throttled",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"consume",
"(",
"key",
",",
"amount",
",",
"rate",
",",
"capacity",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"exc_class",
"(",
"\"Request of %d unit for %s exceeds capacity.\"",
"%",
"(",
"amount",
",",
"key",
")",
")"
]
| 51.142857 | 18 |
def update_config(file_name, yaml_contents):
'''
Update master config with
``yaml_contents``.
Writes ``yaml_contents`` to a file named
``file_name.conf`` under the folder
specified by ``default_include``.
This folder is named ``master.d`` by
default. Please look at
:conf_master:`include-configuration`
for more information.
Example low data:
.. code-block:: python
data = {
'username': 'salt',
'password': 'salt',
'fun': 'config.update_config',
'file_name': 'gui',
'yaml_contents': {'id': 1},
'client': 'wheel',
'eauth': 'pam',
}
'''
file_name = '{0}{1}'.format(file_name, '.conf')
dir_path = os.path.join(__opts__['config_dir'],
os.path.dirname(__opts__['default_include']))
try:
yaml_out = salt.utils.yaml.safe_dump(yaml_contents, default_flow_style=False)
if not os.path.exists(dir_path):
log.debug('Creating directory %s', dir_path)
os.makedirs(dir_path, 0o755)
file_path = os.path.join(dir_path, file_name)
with salt.utils.files.fopen(file_path, 'w') as fp_:
fp_.write(yaml_out)
return 'Wrote {0}'.format(file_name)
except (IOError, OSError, salt.utils.yaml.YAMLError, ValueError) as err:
return six.text_type(err) | [
"def",
"update_config",
"(",
"file_name",
",",
"yaml_contents",
")",
":",
"file_name",
"=",
"'{0}{1}'",
".",
"format",
"(",
"file_name",
",",
"'.conf'",
")",
"dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'config_dir'",
"]",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"__opts__",
"[",
"'default_include'",
"]",
")",
")",
"try",
":",
"yaml_out",
"=",
"salt",
".",
"utils",
".",
"yaml",
".",
"safe_dump",
"(",
"yaml_contents",
",",
"default_flow_style",
"=",
"False",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir_path",
")",
":",
"log",
".",
"debug",
"(",
"'Creating directory %s'",
",",
"dir_path",
")",
"os",
".",
"makedirs",
"(",
"dir_path",
",",
"0o755",
")",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"file_name",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"file_path",
",",
"'w'",
")",
"as",
"fp_",
":",
"fp_",
".",
"write",
"(",
"yaml_out",
")",
"return",
"'Wrote {0}'",
".",
"format",
"(",
"file_name",
")",
"except",
"(",
"IOError",
",",
"OSError",
",",
"salt",
".",
"utils",
".",
"yaml",
".",
"YAMLError",
",",
"ValueError",
")",
"as",
"err",
":",
"return",
"six",
".",
"text_type",
"(",
"err",
")"
]
| 31 | 18 |
def unlock(name,
zk_hosts=None, # in case you need to unlock without having run lock (failed execution for example)
identifier=None,
max_concurrency=1,
ephemeral_lease=False,
profile=None,
scheme=None,
username=None,
password=None,
default_acl=None):
'''
Remove lease from semaphore.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
conn_kwargs = {'profile': profile, 'scheme': scheme,
'username': username, 'password': password, 'default_acl': default_acl}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Released lock if it is here'
return ret
if identifier is None:
identifier = __grains__['id']
unlocked = __salt__['zk_concurrency.unlock'](name,
zk_hosts=zk_hosts,
identifier=identifier,
max_concurrency=max_concurrency,
ephemeral_lease=ephemeral_lease,
**conn_kwargs)
if unlocked:
ret['result'] = True
else:
ret['comment'] = 'Unable to find lease for path {0}'.format(name)
return ret | [
"def",
"unlock",
"(",
"name",
",",
"zk_hosts",
"=",
"None",
",",
"# in case you need to unlock without having run lock (failed execution for example)",
"identifier",
"=",
"None",
",",
"max_concurrency",
"=",
"1",
",",
"ephemeral_lease",
"=",
"False",
",",
"profile",
"=",
"None",
",",
"scheme",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"default_acl",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"False",
",",
"'comment'",
":",
"''",
"}",
"conn_kwargs",
"=",
"{",
"'profile'",
":",
"profile",
",",
"'scheme'",
":",
"scheme",
",",
"'username'",
":",
"username",
",",
"'password'",
":",
"password",
",",
"'default_acl'",
":",
"default_acl",
"}",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'Released lock if it is here'",
"return",
"ret",
"if",
"identifier",
"is",
"None",
":",
"identifier",
"=",
"__grains__",
"[",
"'id'",
"]",
"unlocked",
"=",
"__salt__",
"[",
"'zk_concurrency.unlock'",
"]",
"(",
"name",
",",
"zk_hosts",
"=",
"zk_hosts",
",",
"identifier",
"=",
"identifier",
",",
"max_concurrency",
"=",
"max_concurrency",
",",
"ephemeral_lease",
"=",
"ephemeral_lease",
",",
"*",
"*",
"conn_kwargs",
")",
"if",
"unlocked",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Unable to find lease for path {0}'",
".",
"format",
"(",
"name",
")",
"return",
"ret"
]
| 33.609756 | 23.95122 |
def get_evernote_notes(self, evernote_filter):
"""
get the notes related to the filter
:param evernote_filter: filtering
:return: notes
"""
data = []
note_store = self.client.get_note_store()
our_note_list = note_store.findNotesMetadata(self.token, evernote_filter, 0, 100,
EvernoteMgr.set_evernote_spec())
for note in our_note_list.notes:
whole_note = note_store.getNote(self.token, note.guid, True, True, False, False)
content = self._cleaning_content(whole_note.content)
data.append({'title': note.title, 'my_date': arrow.get(note.created),
'link': whole_note.attributes.sourceURL, 'content': content})
return data | [
"def",
"get_evernote_notes",
"(",
"self",
",",
"evernote_filter",
")",
":",
"data",
"=",
"[",
"]",
"note_store",
"=",
"self",
".",
"client",
".",
"get_note_store",
"(",
")",
"our_note_list",
"=",
"note_store",
".",
"findNotesMetadata",
"(",
"self",
".",
"token",
",",
"evernote_filter",
",",
"0",
",",
"100",
",",
"EvernoteMgr",
".",
"set_evernote_spec",
"(",
")",
")",
"for",
"note",
"in",
"our_note_list",
".",
"notes",
":",
"whole_note",
"=",
"note_store",
".",
"getNote",
"(",
"self",
".",
"token",
",",
"note",
".",
"guid",
",",
"True",
",",
"True",
",",
"False",
",",
"False",
")",
"content",
"=",
"self",
".",
"_cleaning_content",
"(",
"whole_note",
".",
"content",
")",
"data",
".",
"append",
"(",
"{",
"'title'",
":",
"note",
".",
"title",
",",
"'my_date'",
":",
"arrow",
".",
"get",
"(",
"note",
".",
"created",
")",
",",
"'link'",
":",
"whole_note",
".",
"attributes",
".",
"sourceURL",
",",
"'content'",
":",
"content",
"}",
")",
"return",
"data"
]
| 42.526316 | 24.315789 |
def from_form(self, param_name, field):
"""
A decorator that converts a request form into a function parameter based on the specified field.
:param str param_name: The parameter which receives the argument.
:param Field field: The field class or instance used to deserialize the request form to a Python object.
:return: A function
"""
return self.__from_source(param_name, field, lambda: request.form, 'form') | [
"def",
"from_form",
"(",
"self",
",",
"param_name",
",",
"field",
")",
":",
"return",
"self",
".",
"__from_source",
"(",
"param_name",
",",
"field",
",",
"lambda",
":",
"request",
".",
"form",
",",
"'form'",
")"
]
| 51 | 29.444444 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.