repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
balloob/pychromecast
|
pychromecast/controllers/multizone.py
|
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/controllers/multizone.py#L135-L137
|
def deregister_listener(self, member_uuid, listener):
""" Deregister listener for audio group changes of cast uuid."""
self._casts[str(member_uuid)]['listeners'].remove(listener)
|
[
"def",
"deregister_listener",
"(",
"self",
",",
"member_uuid",
",",
"listener",
")",
":",
"self",
".",
"_casts",
"[",
"str",
"(",
"member_uuid",
")",
"]",
"[",
"'listeners'",
"]",
".",
"remove",
"(",
"listener",
")"
] |
Deregister listener for audio group changes of cast uuid.
|
[
"Deregister",
"listener",
"for",
"audio",
"group",
"changes",
"of",
"cast",
"uuid",
"."
] |
python
|
train
|
connectordb/connectordb-python
|
connectordb/_connection.py
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connection.py#L129-L134
|
def create(self, path, data=None):
"""Send a POST CRUD API request to the given path using the given data which will be converted
to json"""
return self.handleresult(self.r.post(urljoin(self.url + CRUD_PATH,
path),
data=json.dumps(data)))
|
[
"def",
"create",
"(",
"self",
",",
"path",
",",
"data",
"=",
"None",
")",
":",
"return",
"self",
".",
"handleresult",
"(",
"self",
".",
"r",
".",
"post",
"(",
"urljoin",
"(",
"self",
".",
"url",
"+",
"CRUD_PATH",
",",
"path",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
")",
")"
] |
Send a POST CRUD API request to the given path using the given data which will be converted
to json
|
[
"Send",
"a",
"POST",
"CRUD",
"API",
"request",
"to",
"the",
"given",
"path",
"using",
"the",
"given",
"data",
"which",
"will",
"be",
"converted",
"to",
"json"
] |
python
|
test
|
casastorta/python-sar
|
sar/parser.py
|
https://github.com/casastorta/python-sar/blob/e6d8bb86524102d677f37e985302fad34e3297c1/sar/parser.py#L310-L403
|
def __split_info(self, info_part, patternsname, patterns):
"""
Splits info from SAR parts into logical stuff :-)
:param info_part: Part of SAR output we want to split into usable data
:param patternsname: ???
:param patterns: ???
:return: ``List``-style info from SAR files, now finally \
completely parsed into meaningful data for further processing
"""
pattern = patterns['PATTERN']
if pattern == '':
return False
return_dict = {}
pattern_re = re.compile(pattern)
for part_line in info_part.split('\n'):
if part_line.strip() != '' and not pattern_re.search(part_line):
# Take care of AM/PM timestamps in SAR file
is_24hr = True
is_AM = False
if part_line[9:11] == 'AM':
is_24hr = False
is_AM = True
elif part_line[9:11] == 'PM':
is_24hr = False
is_AM = False
if is_24hr is False:
part_line = ('%s_%s XX %s' % (part_line[:8], part_line[9:11], part_line[12:]))
# Line is not empty, nor it's header.
# let's hit the road Jack!
elems = part_line.split()
full_time = elems[0].strip()
if full_time != "Average:":
# Convert time to 24hr format if needed
if is_24hr is False:
full_time = full_time[:-3]
# 12 is a bitch in AM/PM notation
if full_time[:2] == '12':
if is_AM is True:
full_time = ('%s:%s' % ('00', full_time[3:]))
is_AM = not is_AM
if is_AM is False and full_time[0:2] != '00':
hours = int(full_time[:2]) + 12
hours = ('%02d' % (hours,))
full_time = ('%s:%s' % (hours, full_time[3:]))
try:
blah = return_dict[full_time]
del blah
except KeyError:
return_dict[full_time] = {}
fields = self.__fields[patternsname]
pairs = patterns["PAIRS"]
for sectionname in pairs.iterkeys():
value = elems[fields[pairs[sectionname]]]
if sectionname == 'membuffer' or \
sectionname == 'memcache' or \
sectionname == 'memfree' or \
sectionname == 'memused' or \
sectionname == 'swapfree' or \
sectionname == 'swapused':
value = int(value)
else:
value = float(value)
if patternsname == 'CPU':
cpuid = elems[(1 if is_24hr is True else 2)]
try:
blah = return_dict[full_time][cpuid]
del blah
except KeyError:
return_dict[full_time][cpuid] = {}
return_dict[full_time][cpuid][sectionname] = \
value
else:
return_dict[full_time][sectionname] = value
return return_dict
|
[
"def",
"__split_info",
"(",
"self",
",",
"info_part",
",",
"patternsname",
",",
"patterns",
")",
":",
"pattern",
"=",
"patterns",
"[",
"'PATTERN'",
"]",
"if",
"pattern",
"==",
"''",
":",
"return",
"False",
"return_dict",
"=",
"{",
"}",
"pattern_re",
"=",
"re",
".",
"compile",
"(",
"pattern",
")",
"for",
"part_line",
"in",
"info_part",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"part_line",
".",
"strip",
"(",
")",
"!=",
"''",
"and",
"not",
"pattern_re",
".",
"search",
"(",
"part_line",
")",
":",
"# Take care of AM/PM timestamps in SAR file",
"is_24hr",
"=",
"True",
"is_AM",
"=",
"False",
"if",
"part_line",
"[",
"9",
":",
"11",
"]",
"==",
"'AM'",
":",
"is_24hr",
"=",
"False",
"is_AM",
"=",
"True",
"elif",
"part_line",
"[",
"9",
":",
"11",
"]",
"==",
"'PM'",
":",
"is_24hr",
"=",
"False",
"is_AM",
"=",
"False",
"if",
"is_24hr",
"is",
"False",
":",
"part_line",
"=",
"(",
"'%s_%s XX %s'",
"%",
"(",
"part_line",
"[",
":",
"8",
"]",
",",
"part_line",
"[",
"9",
":",
"11",
"]",
",",
"part_line",
"[",
"12",
":",
"]",
")",
")",
"# Line is not empty, nor it's header.",
"# let's hit the road Jack!",
"elems",
"=",
"part_line",
".",
"split",
"(",
")",
"full_time",
"=",
"elems",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"full_time",
"!=",
"\"Average:\"",
":",
"# Convert time to 24hr format if needed",
"if",
"is_24hr",
"is",
"False",
":",
"full_time",
"=",
"full_time",
"[",
":",
"-",
"3",
"]",
"# 12 is a bitch in AM/PM notation",
"if",
"full_time",
"[",
":",
"2",
"]",
"==",
"'12'",
":",
"if",
"is_AM",
"is",
"True",
":",
"full_time",
"=",
"(",
"'%s:%s'",
"%",
"(",
"'00'",
",",
"full_time",
"[",
"3",
":",
"]",
")",
")",
"is_AM",
"=",
"not",
"is_AM",
"if",
"is_AM",
"is",
"False",
"and",
"full_time",
"[",
"0",
":",
"2",
"]",
"!=",
"'00'",
":",
"hours",
"=",
"int",
"(",
"full_time",
"[",
":",
"2",
"]",
")",
"+",
"12",
"hours",
"=",
"(",
"'%02d'",
"%",
"(",
"hours",
",",
")",
")",
"full_time",
"=",
"(",
"'%s:%s'",
"%",
"(",
"hours",
",",
"full_time",
"[",
"3",
":",
"]",
")",
")",
"try",
":",
"blah",
"=",
"return_dict",
"[",
"full_time",
"]",
"del",
"blah",
"except",
"KeyError",
":",
"return_dict",
"[",
"full_time",
"]",
"=",
"{",
"}",
"fields",
"=",
"self",
".",
"__fields",
"[",
"patternsname",
"]",
"pairs",
"=",
"patterns",
"[",
"\"PAIRS\"",
"]",
"for",
"sectionname",
"in",
"pairs",
".",
"iterkeys",
"(",
")",
":",
"value",
"=",
"elems",
"[",
"fields",
"[",
"pairs",
"[",
"sectionname",
"]",
"]",
"]",
"if",
"sectionname",
"==",
"'membuffer'",
"or",
"sectionname",
"==",
"'memcache'",
"or",
"sectionname",
"==",
"'memfree'",
"or",
"sectionname",
"==",
"'memused'",
"or",
"sectionname",
"==",
"'swapfree'",
"or",
"sectionname",
"==",
"'swapused'",
":",
"value",
"=",
"int",
"(",
"value",
")",
"else",
":",
"value",
"=",
"float",
"(",
"value",
")",
"if",
"patternsname",
"==",
"'CPU'",
":",
"cpuid",
"=",
"elems",
"[",
"(",
"1",
"if",
"is_24hr",
"is",
"True",
"else",
"2",
")",
"]",
"try",
":",
"blah",
"=",
"return_dict",
"[",
"full_time",
"]",
"[",
"cpuid",
"]",
"del",
"blah",
"except",
"KeyError",
":",
"return_dict",
"[",
"full_time",
"]",
"[",
"cpuid",
"]",
"=",
"{",
"}",
"return_dict",
"[",
"full_time",
"]",
"[",
"cpuid",
"]",
"[",
"sectionname",
"]",
"=",
"value",
"else",
":",
"return_dict",
"[",
"full_time",
"]",
"[",
"sectionname",
"]",
"=",
"value",
"return",
"return_dict"
] |
Splits info from SAR parts into logical stuff :-)
:param info_part: Part of SAR output we want to split into usable data
:param patternsname: ???
:param patterns: ???
:return: ``List``-style info from SAR files, now finally \
completely parsed into meaningful data for further processing
|
[
"Splits",
"info",
"from",
"SAR",
"parts",
"into",
"logical",
"stuff",
":",
"-",
")",
":",
"param",
"info_part",
":",
"Part",
"of",
"SAR",
"output",
"we",
"want",
"to",
"split",
"into",
"usable",
"data",
":",
"param",
"patternsname",
":",
"???",
":",
"param",
"patterns",
":",
"???",
":",
"return",
":",
"List",
"-",
"style",
"info",
"from",
"SAR",
"files",
"now",
"finally",
"\\",
"completely",
"parsed",
"into",
"meaningful",
"data",
"for",
"further",
"processing"
] |
python
|
train
|
HiPERCAM/hcam_widgets
|
hcam_widgets/misc.py
|
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/misc.py#L535-L554
|
def getFrameNumber(g):
"""
Polls the data server to find the current frame number.
Throws an exceotion if it cannot determine it.
"""
if not g.cpars['hcam_server_on']:
raise DriverError('getRunNumber error: servers are not active')
url = g.cpars['hipercam_server'] + 'status/DET.FRAM2.NO'
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=False)
try:
msg = rs.msg
except:
raise DriverError('getFrameNumber error: no message found')
try:
frame_no = int(msg.split()[1])
except:
raise DriverError('getFrameNumber error: invalid msg ' + msg)
return frame_no
|
[
"def",
"getFrameNumber",
"(",
"g",
")",
":",
"if",
"not",
"g",
".",
"cpars",
"[",
"'hcam_server_on'",
"]",
":",
"raise",
"DriverError",
"(",
"'getRunNumber error: servers are not active'",
")",
"url",
"=",
"g",
".",
"cpars",
"[",
"'hipercam_server'",
"]",
"+",
"'status/DET.FRAM2.NO'",
"response",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"url",
",",
"timeout",
"=",
"2",
")",
"rs",
"=",
"ReadServer",
"(",
"response",
".",
"read",
"(",
")",
",",
"status_msg",
"=",
"False",
")",
"try",
":",
"msg",
"=",
"rs",
".",
"msg",
"except",
":",
"raise",
"DriverError",
"(",
"'getFrameNumber error: no message found'",
")",
"try",
":",
"frame_no",
"=",
"int",
"(",
"msg",
".",
"split",
"(",
")",
"[",
"1",
"]",
")",
"except",
":",
"raise",
"DriverError",
"(",
"'getFrameNumber error: invalid msg '",
"+",
"msg",
")",
"return",
"frame_no"
] |
Polls the data server to find the current frame number.
Throws an exceotion if it cannot determine it.
|
[
"Polls",
"the",
"data",
"server",
"to",
"find",
"the",
"current",
"frame",
"number",
"."
] |
python
|
train
|
sandwichcloud/ingredients.db
|
ingredients_db/database.py
|
https://github.com/sandwichcloud/ingredients.db/blob/e91602fbece74290e051016439346fd4a3f1524e/ingredients_db/database.py#L53-L76
|
def _add_process_guards(self, engine):
"""Add multiprocessing guards.
Forces a connection to be reconnected if it is detected
as having been shared to a sub-process.
"""
@sqlalchemy.event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
connection_record.info['pid'] = os.getpid()
@sqlalchemy.event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
if connection_record.info['pid'] != pid:
self.logger.debug(
"Parent process %(orig)s forked (%(newproc)s) with an open database connection, "
"which is being discarded and recreated." % {"newproc": pid, "orig": connection_record.info['pid']})
connection_record.connection = connection_proxy.connection = None
raise exc.DisconnectionError(
"Connection record belongs to pid %s, attempting to check out in pid %s" % (
connection_record.info['pid'], pid)
)
|
[
"def",
"_add_process_guards",
"(",
"self",
",",
"engine",
")",
":",
"@",
"sqlalchemy",
".",
"event",
".",
"listens_for",
"(",
"engine",
",",
"\"connect\"",
")",
"def",
"connect",
"(",
"dbapi_connection",
",",
"connection_record",
")",
":",
"connection_record",
".",
"info",
"[",
"'pid'",
"]",
"=",
"os",
".",
"getpid",
"(",
")",
"@",
"sqlalchemy",
".",
"event",
".",
"listens_for",
"(",
"engine",
",",
"\"checkout\"",
")",
"def",
"checkout",
"(",
"dbapi_connection",
",",
"connection_record",
",",
"connection_proxy",
")",
":",
"pid",
"=",
"os",
".",
"getpid",
"(",
")",
"if",
"connection_record",
".",
"info",
"[",
"'pid'",
"]",
"!=",
"pid",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Parent process %(orig)s forked (%(newproc)s) with an open database connection, \"",
"\"which is being discarded and recreated.\"",
"%",
"{",
"\"newproc\"",
":",
"pid",
",",
"\"orig\"",
":",
"connection_record",
".",
"info",
"[",
"'pid'",
"]",
"}",
")",
"connection_record",
".",
"connection",
"=",
"connection_proxy",
".",
"connection",
"=",
"None",
"raise",
"exc",
".",
"DisconnectionError",
"(",
"\"Connection record belongs to pid %s, attempting to check out in pid %s\"",
"%",
"(",
"connection_record",
".",
"info",
"[",
"'pid'",
"]",
",",
"pid",
")",
")"
] |
Add multiprocessing guards.
Forces a connection to be reconnected if it is detected
as having been shared to a sub-process.
|
[
"Add",
"multiprocessing",
"guards",
"."
] |
python
|
train
|
bpython/curtsies
|
curtsies/formatstring.py
|
https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/curtsies/formatstring.py#L557-L576
|
def width_aware_slice(self, index):
"""Slice based on the number of columns it would take to display the substring."""
if wcswidth(self.s) == -1:
raise ValueError('bad values for width aware slicing')
index = normalize_slice(self.width, index)
counter = 0
parts = []
for chunk in self.chunks:
if index.start < counter + chunk.width and index.stop > counter:
start = max(0, index.start - counter)
end = min(index.stop - counter, chunk.width)
if end - start == chunk.width:
parts.append(chunk)
else:
s_part = width_aware_slice(chunk.s, max(0, index.start - counter), index.stop - counter)
parts.append(Chunk(s_part, chunk.atts))
counter += chunk.width
if index.stop < counter:
break
return FmtStr(*parts) if parts else fmtstr('')
|
[
"def",
"width_aware_slice",
"(",
"self",
",",
"index",
")",
":",
"if",
"wcswidth",
"(",
"self",
".",
"s",
")",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"'bad values for width aware slicing'",
")",
"index",
"=",
"normalize_slice",
"(",
"self",
".",
"width",
",",
"index",
")",
"counter",
"=",
"0",
"parts",
"=",
"[",
"]",
"for",
"chunk",
"in",
"self",
".",
"chunks",
":",
"if",
"index",
".",
"start",
"<",
"counter",
"+",
"chunk",
".",
"width",
"and",
"index",
".",
"stop",
">",
"counter",
":",
"start",
"=",
"max",
"(",
"0",
",",
"index",
".",
"start",
"-",
"counter",
")",
"end",
"=",
"min",
"(",
"index",
".",
"stop",
"-",
"counter",
",",
"chunk",
".",
"width",
")",
"if",
"end",
"-",
"start",
"==",
"chunk",
".",
"width",
":",
"parts",
".",
"append",
"(",
"chunk",
")",
"else",
":",
"s_part",
"=",
"width_aware_slice",
"(",
"chunk",
".",
"s",
",",
"max",
"(",
"0",
",",
"index",
".",
"start",
"-",
"counter",
")",
",",
"index",
".",
"stop",
"-",
"counter",
")",
"parts",
".",
"append",
"(",
"Chunk",
"(",
"s_part",
",",
"chunk",
".",
"atts",
")",
")",
"counter",
"+=",
"chunk",
".",
"width",
"if",
"index",
".",
"stop",
"<",
"counter",
":",
"break",
"return",
"FmtStr",
"(",
"*",
"parts",
")",
"if",
"parts",
"else",
"fmtstr",
"(",
"''",
")"
] |
Slice based on the number of columns it would take to display the substring.
|
[
"Slice",
"based",
"on",
"the",
"number",
"of",
"columns",
"it",
"would",
"take",
"to",
"display",
"the",
"substring",
"."
] |
python
|
train
|
AndrewAnnex/SpiceyPy
|
spiceypy/utils/support_types.py
|
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/utils/support_types.py#L154-L168
|
def cVectorToPython(x):
"""
Convert the c vector data into the correct python data type
(numpy arrays or strings)
:param x:
:return:
"""
if isinstance(x[0], bool):
return numpy.frombuffer(x, dtype=numpy.bool).copy()
elif isinstance(x[0], int):
return numpy.frombuffer(x, dtype=numpy.int32).copy()
elif isinstance(x[0], float):
return numpy.frombuffer(x, dtype=numpy.float64).copy()
elif isinstance(x[0].value, bytes):
return [toPythonString(y) for y in x]
|
[
"def",
"cVectorToPython",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
"[",
"0",
"]",
",",
"bool",
")",
":",
"return",
"numpy",
".",
"frombuffer",
"(",
"x",
",",
"dtype",
"=",
"numpy",
".",
"bool",
")",
".",
"copy",
"(",
")",
"elif",
"isinstance",
"(",
"x",
"[",
"0",
"]",
",",
"int",
")",
":",
"return",
"numpy",
".",
"frombuffer",
"(",
"x",
",",
"dtype",
"=",
"numpy",
".",
"int32",
")",
".",
"copy",
"(",
")",
"elif",
"isinstance",
"(",
"x",
"[",
"0",
"]",
",",
"float",
")",
":",
"return",
"numpy",
".",
"frombuffer",
"(",
"x",
",",
"dtype",
"=",
"numpy",
".",
"float64",
")",
".",
"copy",
"(",
")",
"elif",
"isinstance",
"(",
"x",
"[",
"0",
"]",
".",
"value",
",",
"bytes",
")",
":",
"return",
"[",
"toPythonString",
"(",
"y",
")",
"for",
"y",
"in",
"x",
"]"
] |
Convert the c vector data into the correct python data type
(numpy arrays or strings)
:param x:
:return:
|
[
"Convert",
"the",
"c",
"vector",
"data",
"into",
"the",
"correct",
"python",
"data",
"type",
"(",
"numpy",
"arrays",
"or",
"strings",
")",
":",
"param",
"x",
":",
":",
"return",
":"
] |
python
|
train
|
buriburisuri/sugartensor
|
sugartensor/sg_main.py
|
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L360-L443
|
def sg_rnn_layer_func(func):
r"""Decorates function as sg_rnn_layer functions.
Args:
func: function to decorate
"""
@wraps(func)
def wrapper(tensor, **kwargs):
r"""Manages arguments of `tf.sg_opt`.
Args:
tensor: automatically passed by decorator
kwargs:
in_dim: An integer. The size of input dimension, which is set to the last one by default.
dim: An integer. The size of output dimension. Has the same value as in_dim by default.
ln: Boolean. If True, layer normalization is applied.
bias: Boolean. If True, biases are added. As a default, it is set to True
name: A name for the layer. As a default, the function name is assigned.
reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope
as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
summary: If True, summaries are added. The default is True.
"""
# kwargs parsing
opt = tf.sg_opt(kwargs) + sg_get_context()
# set default argument
try:
shape = tensor.get_shape().as_list()
# dropout off
opt += tf.sg_opt(shape=shape, in_dim=shape[-1], dim=shape[-1], dout=0, summary=True)
# disable bias when normalization on
opt += tf.sg_opt(bias=not opt.ln)
finally:
pass
# automatic layer naming
if opt.name is None:
# layer function name will be used as layer name
opt.name = func.__name__.replace('sg_', '')
# find existing layer names
exist_layers = []
for t in tf.global_variables():
scope_name = tf.get_variable_scope().name
prefix = scope_name + '/' if len(scope_name) > 0 else ''
i = t.name.rfind(prefix + opt.name)
if i >= 0:
exist_layers.append(t.name[i:].split('/')[-2])
exist_layers = list(set(exist_layers))
# layer name numbering
if len(exist_layers) == 0:
opt.name += '_1'
else:
opt.name += '_%d' % (max([int(n.split('_')[-1]) for n in exist_layers]) + 1)
with tf.variable_scope(opt.name, reuse=opt.reuse) as scope:
# call layer function
out = func(tensor, opt)
# apply dropout
if opt.dout:
out = tf.cond(_phase,
lambda: tf.nn.dropout(out, 1 - opt.dout),
lambda: out)
# rename tensor
out = tf.identity(out, 'out')
# add final output summary
if opt.summary:
tf.sg_summary_activation(out)
# save node info for reuse
out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs) + sg_get_context(),
prev=tensor, is_layer=True, name=opt.name)
# inject reuse function
out.sg_reuse = types.MethodType(sg_reuse, out)
return out
return wrapper
|
[
"def",
"sg_rnn_layer_func",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"tensor",
",",
"*",
"*",
"kwargs",
")",
":",
"r\"\"\"Manages arguments of `tf.sg_opt`.\n\n Args:\n tensor: automatically passed by decorator\n kwargs:\n in_dim: An integer. The size of input dimension, which is set to the last one by default.\n dim: An integer. The size of output dimension. Has the same value as in_dim by default.\n ln: Boolean. If True, layer normalization is applied.\n bias: Boolean. If True, biases are added. As a default, it is set to True\n name: A name for the layer. As a default, the function name is assigned.\n reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope\n as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.\n summary: If True, summaries are added. The default is True.\n \"\"\"",
"# kwargs parsing",
"opt",
"=",
"tf",
".",
"sg_opt",
"(",
"kwargs",
")",
"+",
"sg_get_context",
"(",
")",
"# set default argument",
"try",
":",
"shape",
"=",
"tensor",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"# dropout off",
"opt",
"+=",
"tf",
".",
"sg_opt",
"(",
"shape",
"=",
"shape",
",",
"in_dim",
"=",
"shape",
"[",
"-",
"1",
"]",
",",
"dim",
"=",
"shape",
"[",
"-",
"1",
"]",
",",
"dout",
"=",
"0",
",",
"summary",
"=",
"True",
")",
"# disable bias when normalization on",
"opt",
"+=",
"tf",
".",
"sg_opt",
"(",
"bias",
"=",
"not",
"opt",
".",
"ln",
")",
"finally",
":",
"pass",
"# automatic layer naming",
"if",
"opt",
".",
"name",
"is",
"None",
":",
"# layer function name will be used as layer name",
"opt",
".",
"name",
"=",
"func",
".",
"__name__",
".",
"replace",
"(",
"'sg_'",
",",
"''",
")",
"# find existing layer names",
"exist_layers",
"=",
"[",
"]",
"for",
"t",
"in",
"tf",
".",
"global_variables",
"(",
")",
":",
"scope_name",
"=",
"tf",
".",
"get_variable_scope",
"(",
")",
".",
"name",
"prefix",
"=",
"scope_name",
"+",
"'/'",
"if",
"len",
"(",
"scope_name",
")",
">",
"0",
"else",
"''",
"i",
"=",
"t",
".",
"name",
".",
"rfind",
"(",
"prefix",
"+",
"opt",
".",
"name",
")",
"if",
"i",
">=",
"0",
":",
"exist_layers",
".",
"append",
"(",
"t",
".",
"name",
"[",
"i",
":",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"2",
"]",
")",
"exist_layers",
"=",
"list",
"(",
"set",
"(",
"exist_layers",
")",
")",
"# layer name numbering",
"if",
"len",
"(",
"exist_layers",
")",
"==",
"0",
":",
"opt",
".",
"name",
"+=",
"'_1'",
"else",
":",
"opt",
".",
"name",
"+=",
"'_%d'",
"%",
"(",
"max",
"(",
"[",
"int",
"(",
"n",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
")",
"for",
"n",
"in",
"exist_layers",
"]",
")",
"+",
"1",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"opt",
".",
"name",
",",
"reuse",
"=",
"opt",
".",
"reuse",
")",
"as",
"scope",
":",
"# call layer function",
"out",
"=",
"func",
"(",
"tensor",
",",
"opt",
")",
"# apply dropout",
"if",
"opt",
".",
"dout",
":",
"out",
"=",
"tf",
".",
"cond",
"(",
"_phase",
",",
"lambda",
":",
"tf",
".",
"nn",
".",
"dropout",
"(",
"out",
",",
"1",
"-",
"opt",
".",
"dout",
")",
",",
"lambda",
":",
"out",
")",
"# rename tensor",
"out",
"=",
"tf",
".",
"identity",
"(",
"out",
",",
"'out'",
")",
"# add final output summary",
"if",
"opt",
".",
"summary",
":",
"tf",
".",
"sg_summary_activation",
"(",
"out",
")",
"# save node info for reuse",
"out",
".",
"_sugar",
"=",
"tf",
".",
"sg_opt",
"(",
"func",
"=",
"func",
",",
"arg",
"=",
"tf",
".",
"sg_opt",
"(",
"kwargs",
")",
"+",
"sg_get_context",
"(",
")",
",",
"prev",
"=",
"tensor",
",",
"is_layer",
"=",
"True",
",",
"name",
"=",
"opt",
".",
"name",
")",
"# inject reuse function",
"out",
".",
"sg_reuse",
"=",
"types",
".",
"MethodType",
"(",
"sg_reuse",
",",
"out",
")",
"return",
"out",
"return",
"wrapper"
] |
r"""Decorates function as sg_rnn_layer functions.
Args:
func: function to decorate
|
[
"r",
"Decorates",
"function",
"as",
"sg_rnn_layer",
"functions",
".",
"Args",
":",
"func",
":",
"function",
"to",
"decorate"
] |
python
|
train
|
jim-easterbrook/pywws
|
src/pywws/weatherstation.py
|
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/weatherstation.py#L713-L723
|
def current_pos(self):
"""Get circular buffer location where current data is being written."""
new_ptr = _decode(
self._read_fixed_block(0x0020), self.lo_fix_format['current_pos'])
if new_ptr == self._current_ptr:
return self._current_ptr
if self._current_ptr and new_ptr != self.inc_ptr(self._current_ptr):
logger.error(
'unexpected ptr change %06x -> %06x', self._current_ptr, new_ptr)
self._current_ptr = new_ptr
return self._current_ptr
|
[
"def",
"current_pos",
"(",
"self",
")",
":",
"new_ptr",
"=",
"_decode",
"(",
"self",
".",
"_read_fixed_block",
"(",
"0x0020",
")",
",",
"self",
".",
"lo_fix_format",
"[",
"'current_pos'",
"]",
")",
"if",
"new_ptr",
"==",
"self",
".",
"_current_ptr",
":",
"return",
"self",
".",
"_current_ptr",
"if",
"self",
".",
"_current_ptr",
"and",
"new_ptr",
"!=",
"self",
".",
"inc_ptr",
"(",
"self",
".",
"_current_ptr",
")",
":",
"logger",
".",
"error",
"(",
"'unexpected ptr change %06x -> %06x'",
",",
"self",
".",
"_current_ptr",
",",
"new_ptr",
")",
"self",
".",
"_current_ptr",
"=",
"new_ptr",
"return",
"self",
".",
"_current_ptr"
] |
Get circular buffer location where current data is being written.
|
[
"Get",
"circular",
"buffer",
"location",
"where",
"current",
"data",
"is",
"being",
"written",
"."
] |
python
|
train
|
pennlabs/penn-sdk-python
|
penn/studyspaces.py
|
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/studyspaces.py#L268-L286
|
def get_room_info(self, room_ids):
"""Gets room information for a given list of ids.
:param room_ids: a room id or a list of room ids (comma separated).
:type room_ids: string
"""
try:
resp = self._request("GET", "/1.1/space/item/{}".format(room_ids))
rooms = resp.json()
for room in rooms:
if not room["image"].startswith("http"):
room["image"] = "https:" + room["image"]
if "description" in room:
description = room["description"].replace(u'\xa0', u' ')
room["description"] = BeautifulSoup(description, "html.parser").text.strip()
except resp.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
return rooms
|
[
"def",
"get_room_info",
"(",
"self",
",",
"room_ids",
")",
":",
"try",
":",
"resp",
"=",
"self",
".",
"_request",
"(",
"\"GET\"",
",",
"\"/1.1/space/item/{}\"",
".",
"format",
"(",
"room_ids",
")",
")",
"rooms",
"=",
"resp",
".",
"json",
"(",
")",
"for",
"room",
"in",
"rooms",
":",
"if",
"not",
"room",
"[",
"\"image\"",
"]",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"room",
"[",
"\"image\"",
"]",
"=",
"\"https:\"",
"+",
"room",
"[",
"\"image\"",
"]",
"if",
"\"description\"",
"in",
"room",
":",
"description",
"=",
"room",
"[",
"\"description\"",
"]",
".",
"replace",
"(",
"u'\\xa0'",
",",
"u' '",
")",
"room",
"[",
"\"description\"",
"]",
"=",
"BeautifulSoup",
"(",
"description",
",",
"\"html.parser\"",
")",
".",
"text",
".",
"strip",
"(",
")",
"except",
"resp",
".",
"exceptions",
".",
"HTTPError",
"as",
"error",
":",
"raise",
"APIError",
"(",
"\"Server Error: {}\"",
".",
"format",
"(",
"error",
")",
")",
"return",
"rooms"
] |
Gets room information for a given list of ids.
:param room_ids: a room id or a list of room ids (comma separated).
:type room_ids: string
|
[
"Gets",
"room",
"information",
"for",
"a",
"given",
"list",
"of",
"ids",
"."
] |
python
|
train
|
onicagroup/runway
|
runway/commands/runway/gen_sample.py
|
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/commands/runway/gen_sample.py#L279-L296
|
def execute(self):
"""Run selected module generator."""
if self._cli_arguments['cfn']:
generate_sample_cfn_module(self.env_root)
elif self._cli_arguments['sls']:
generate_sample_sls_module(self.env_root)
elif self._cli_arguments['sls-tsc']:
generate_sample_sls_tsc_module(self.env_root)
elif self._cli_arguments['stacker']:
generate_sample_stacker_module(self.env_root)
elif self._cli_arguments['tf']:
generate_sample_tf_module(self.env_root)
elif self._cli_arguments['cdk-tsc']:
generate_sample_cdk_tsc_module(self.env_root)
elif self._cli_arguments['cdk-py']:
generate_sample_cdk_py_module(self.env_root)
elif self._cli_arguments['cdk-csharp']:
generate_sample_cdk_cs_module(self.env_root)
|
[
"def",
"execute",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cli_arguments",
"[",
"'cfn'",
"]",
":",
"generate_sample_cfn_module",
"(",
"self",
".",
"env_root",
")",
"elif",
"self",
".",
"_cli_arguments",
"[",
"'sls'",
"]",
":",
"generate_sample_sls_module",
"(",
"self",
".",
"env_root",
")",
"elif",
"self",
".",
"_cli_arguments",
"[",
"'sls-tsc'",
"]",
":",
"generate_sample_sls_tsc_module",
"(",
"self",
".",
"env_root",
")",
"elif",
"self",
".",
"_cli_arguments",
"[",
"'stacker'",
"]",
":",
"generate_sample_stacker_module",
"(",
"self",
".",
"env_root",
")",
"elif",
"self",
".",
"_cli_arguments",
"[",
"'tf'",
"]",
":",
"generate_sample_tf_module",
"(",
"self",
".",
"env_root",
")",
"elif",
"self",
".",
"_cli_arguments",
"[",
"'cdk-tsc'",
"]",
":",
"generate_sample_cdk_tsc_module",
"(",
"self",
".",
"env_root",
")",
"elif",
"self",
".",
"_cli_arguments",
"[",
"'cdk-py'",
"]",
":",
"generate_sample_cdk_py_module",
"(",
"self",
".",
"env_root",
")",
"elif",
"self",
".",
"_cli_arguments",
"[",
"'cdk-csharp'",
"]",
":",
"generate_sample_cdk_cs_module",
"(",
"self",
".",
"env_root",
")"
] |
Run selected module generator.
|
[
"Run",
"selected",
"module",
"generator",
"."
] |
python
|
train
|
ramrod-project/database-brain
|
schema/brain/controller/verification.py
|
https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/controller/verification.py#L9-L25
|
def verify_port_map(plugin):
"""
extra validation /
PB2 sees empty lists the same as non-existant lists
:param plugin:
:return:
"""
result = True
ex_ports = plugin.get(EX_PORTS_KEY)
result &= isinstance(ex_ports, list) and verify_ports(ex_ports)
in_ports = plugin.get(IN_PORTS_KEY)
result &= isinstance(in_ports, list) and verify_ports(in_ports)
env_list = plugin.get(ENV_KEY)
result &= isinstance(env_list, list) and verify_environment(env_list)
if result:
result &= len(ex_ports) == len(in_ports)
return result
|
[
"def",
"verify_port_map",
"(",
"plugin",
")",
":",
"result",
"=",
"True",
"ex_ports",
"=",
"plugin",
".",
"get",
"(",
"EX_PORTS_KEY",
")",
"result",
"&=",
"isinstance",
"(",
"ex_ports",
",",
"list",
")",
"and",
"verify_ports",
"(",
"ex_ports",
")",
"in_ports",
"=",
"plugin",
".",
"get",
"(",
"IN_PORTS_KEY",
")",
"result",
"&=",
"isinstance",
"(",
"in_ports",
",",
"list",
")",
"and",
"verify_ports",
"(",
"in_ports",
")",
"env_list",
"=",
"plugin",
".",
"get",
"(",
"ENV_KEY",
")",
"result",
"&=",
"isinstance",
"(",
"env_list",
",",
"list",
")",
"and",
"verify_environment",
"(",
"env_list",
")",
"if",
"result",
":",
"result",
"&=",
"len",
"(",
"ex_ports",
")",
"==",
"len",
"(",
"in_ports",
")",
"return",
"result"
] |
extra validation /
PB2 sees empty lists the same as non-existant lists
:param plugin:
:return:
|
[
"extra",
"validation",
"/",
"PB2",
"sees",
"empty",
"lists",
"the",
"same",
"as",
"non",
"-",
"existant",
"lists",
":",
"param",
"plugin",
":",
":",
"return",
":"
] |
python
|
train
|
googleapis/google-cloud-python
|
dlp/google/cloud/dlp_v2/gapic/dlp_service_client.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dlp/google/cloud/dlp_v2/gapic/dlp_service_client.py#L151-L157
|
def organization_stored_info_type_path(cls, organization, stored_info_type):
"""Return a fully-qualified organization_stored_info_type string."""
return google.api_core.path_template.expand(
"organizations/{organization}/storedInfoTypes/{stored_info_type}",
organization=organization,
stored_info_type=stored_info_type,
)
|
[
"def",
"organization_stored_info_type_path",
"(",
"cls",
",",
"organization",
",",
"stored_info_type",
")",
":",
"return",
"google",
".",
"api_core",
".",
"path_template",
".",
"expand",
"(",
"\"organizations/{organization}/storedInfoTypes/{stored_info_type}\"",
",",
"organization",
"=",
"organization",
",",
"stored_info_type",
"=",
"stored_info_type",
",",
")"
] |
Return a fully-qualified organization_stored_info_type string.
|
[
"Return",
"a",
"fully",
"-",
"qualified",
"organization_stored_info_type",
"string",
"."
] |
python
|
train
|
blink1073/oct2py
|
oct2py/core.py
|
https://github.com/blink1073/oct2py/blob/bfc69d2168ae3d98258f95bbc55a858c21836b58/oct2py/core.py#L695-L698
|
def _get_user_class(self, name):
"""Get or create a user class of the given type."""
self._user_classes.setdefault(name, _make_user_class(self, name))
return self._user_classes[name]
|
[
"def",
"_get_user_class",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"_user_classes",
".",
"setdefault",
"(",
"name",
",",
"_make_user_class",
"(",
"self",
",",
"name",
")",
")",
"return",
"self",
".",
"_user_classes",
"[",
"name",
"]"
] |
Get or create a user class of the given type.
|
[
"Get",
"or",
"create",
"a",
"user",
"class",
"of",
"the",
"given",
"type",
"."
] |
python
|
valid
|
globocom/GloboNetworkAPI-client-python
|
networkapiclient/ApiInterface.py
|
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiInterface.py#L187-L195
|
def create_channel(self, channel):
"""
Method to create a channel.
:param channel: List containing channel's desired to be created on database.
:return: Id.
"""
data = {'channels': channel}
return super(ApiInterfaceRequest, self).post('api/v3/channel/', data)
|
[
"def",
"create_channel",
"(",
"self",
",",
"channel",
")",
":",
"data",
"=",
"{",
"'channels'",
":",
"channel",
"}",
"return",
"super",
"(",
"ApiInterfaceRequest",
",",
"self",
")",
".",
"post",
"(",
"'api/v3/channel/'",
",",
"data",
")"
] |
Method to create a channel.
:param channel: List containing channel's desired to be created on database.
:return: Id.
|
[
"Method",
"to",
"create",
"a",
"channel",
".",
":",
"param",
"channel",
":",
"List",
"containing",
"channel",
"s",
"desired",
"to",
"be",
"created",
"on",
"database",
".",
":",
"return",
":",
"Id",
"."
] |
python
|
train
|
rrwen/search_google
|
search_google/api.py
|
https://github.com/rrwen/search_google/blob/e647868ba5da2803e787a3c06b32e09452068736/search_google/api.py#L134-L172
|
def preview(self, n=10, k='items', kheader='displayLink', klink='link', kdescription='snippet'):
"""Print a preview of the search results.
Args:
n (int):
Maximum number of search results to preview
k (str):
Key in :class:`api.results`.metadata to preview
kheader (str):
Key in :class:`api.results`.metadata[``k``] to use as the header
klink (str):
Key in :class:`api.results`.metadata[``k``] to use as the link if image search
kdescription (str):
Key in :class:`api.results`.metadata[``k``] to use as the description
"""
if 'searchType' in self.cseargs:
searchType = self.cseargs['searchType']
else:
searchType = None
items = self.metadata[k]
# (cse_print) Print results
for i, kv in enumerate(items[:n]):
if 'start' in self.cseargs:
i += int(self.cseargs['start'])
# (print_header) Print result header
header = '\n[' + str(i) + '] ' + kv[kheader]
print(header)
print('=' * len(header))
# (print_image) Print result image file
if searchType == 'image':
link = '\n' + path.basename(kv[klink])
print(link)
# (print_description) Print result snippet
description = '\n' + kv[kdescription]
print(description)
|
[
"def",
"preview",
"(",
"self",
",",
"n",
"=",
"10",
",",
"k",
"=",
"'items'",
",",
"kheader",
"=",
"'displayLink'",
",",
"klink",
"=",
"'link'",
",",
"kdescription",
"=",
"'snippet'",
")",
":",
"if",
"'searchType'",
"in",
"self",
".",
"cseargs",
":",
"searchType",
"=",
"self",
".",
"cseargs",
"[",
"'searchType'",
"]",
"else",
":",
"searchType",
"=",
"None",
"items",
"=",
"self",
".",
"metadata",
"[",
"k",
"]",
"# (cse_print) Print results",
"for",
"i",
",",
"kv",
"in",
"enumerate",
"(",
"items",
"[",
":",
"n",
"]",
")",
":",
"if",
"'start'",
"in",
"self",
".",
"cseargs",
":",
"i",
"+=",
"int",
"(",
"self",
".",
"cseargs",
"[",
"'start'",
"]",
")",
"# (print_header) Print result header",
"header",
"=",
"'\\n['",
"+",
"str",
"(",
"i",
")",
"+",
"'] '",
"+",
"kv",
"[",
"kheader",
"]",
"print",
"(",
"header",
")",
"print",
"(",
"'='",
"*",
"len",
"(",
"header",
")",
")",
"# (print_image) Print result image file",
"if",
"searchType",
"==",
"'image'",
":",
"link",
"=",
"'\\n'",
"+",
"path",
".",
"basename",
"(",
"kv",
"[",
"klink",
"]",
")",
"print",
"(",
"link",
")",
"# (print_description) Print result snippet",
"description",
"=",
"'\\n'",
"+",
"kv",
"[",
"kdescription",
"]",
"print",
"(",
"description",
")"
] |
Print a preview of the search results.
Args:
n (int):
Maximum number of search results to preview
k (str):
Key in :class:`api.results`.metadata to preview
kheader (str):
Key in :class:`api.results`.metadata[``k``] to use as the header
klink (str):
Key in :class:`api.results`.metadata[``k``] to use as the link if image search
kdescription (str):
Key in :class:`api.results`.metadata[``k``] to use as the description
|
[
"Print",
"a",
"preview",
"of",
"the",
"search",
"results",
".",
"Args",
":",
"n",
"(",
"int",
")",
":",
"Maximum",
"number",
"of",
"search",
"results",
"to",
"preview",
"k",
"(",
"str",
")",
":",
"Key",
"in",
":",
"class",
":",
"api",
".",
"results",
".",
"metadata",
"to",
"preview",
"kheader",
"(",
"str",
")",
":",
"Key",
"in",
":",
"class",
":",
"api",
".",
"results",
".",
"metadata",
"[",
"k",
"]",
"to",
"use",
"as",
"the",
"header",
"klink",
"(",
"str",
")",
":",
"Key",
"in",
":",
"class",
":",
"api",
".",
"results",
".",
"metadata",
"[",
"k",
"]",
"to",
"use",
"as",
"the",
"link",
"if",
"image",
"search",
"kdescription",
"(",
"str",
")",
":",
"Key",
"in",
":",
"class",
":",
"api",
".",
"results",
".",
"metadata",
"[",
"k",
"]",
"to",
"use",
"as",
"the",
"description"
] |
python
|
train
|
brocade/pynos
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L706-L720
|
def get_interface_detail_input_request_type_get_next_request_last_rcvd_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
input = ET.SubElement(get_interface_detail, "input")
request_type = ET.SubElement(input, "request-type")
get_next_request = ET.SubElement(request_type, "get-next-request")
last_rcvd_interface = ET.SubElement(get_next_request, "last-rcvd-interface")
interface_name = ET.SubElement(last_rcvd_interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"get_interface_detail_input_request_type_get_next_request_last_rcvd_interface_interface_name",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_interface_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_interface_detail\"",
")",
"config",
"=",
"get_interface_detail",
"input",
"=",
"ET",
".",
"SubElement",
"(",
"get_interface_detail",
",",
"\"input\"",
")",
"request_type",
"=",
"ET",
".",
"SubElement",
"(",
"input",
",",
"\"request-type\"",
")",
"get_next_request",
"=",
"ET",
".",
"SubElement",
"(",
"request_type",
",",
"\"get-next-request\"",
")",
"last_rcvd_interface",
"=",
"ET",
".",
"SubElement",
"(",
"get_next_request",
",",
"\"last-rcvd-interface\"",
")",
"interface_name",
"=",
"ET",
".",
"SubElement",
"(",
"last_rcvd_interface",
",",
"\"interface-name\"",
")",
"interface_name",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'interface_name'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
|
user-cont/conu
|
conu/backend/nspawn/image.py
|
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/nspawn/image.py#L345-L403
|
def run_via_binary(self, command=None, foreground=False, volumes=None,
additional_opts=None, default_options=None, name=None, *args, **kwargs):
"""
Create new instance NspawnContianer in case of not running at foreground, in case foreground run, return process
object
:param command: list - command to run
:param foreground: bool - run process at foreground
:param volumes: list - put additional bind mounts
:param additional_opts: list of more boot options for systemd-nspawn command
:param default_options: default boot option (-b)
:param name: str - name of running instance
:param args: pass thru params to subprocess.Popen
:param kwargs: pass thru params to subprocess.Popen
:return: process or NspawnContianer instance
"""
command = deepcopy(command) or []
volumes = deepcopy(volumes) or []
additional_opts = deepcopy(additional_opts) or []
internalkw = deepcopy(kwargs) or {}
inernalargs = deepcopy(args) or []
if default_options is None:
default_options = ["-b"]
# TODO: reconsile parameters (changed from API definition)
logger.info("run container via binary in background")
machine_name = constants.CONU_ARTIFACT_TAG
if name:
machine_name += name
else:
machine_name += random_str()
if not foreground:
# WARN: avoid to run boot without stderr and stdout to terminal, it breaks terminal,
# it systemd-nspawn does some magic with console
# TODO: is able to avoid this behaviour in better way?
internalkw["stdout"] = subprocess.PIPE
internalkw["stderr"] = subprocess.PIPE
additional_opts += default_options
if volumes:
additional_opts += self.get_volume_options(volumes=volumes)
logger.debug("starting NSPAWN")
systemd_command = [
"systemd-nspawn",
"--machine",
machine_name,
"-i",
self.local_location] + additional_opts + command
logger.debug("Start command: %s" % " ".join(systemd_command))
callback_method = (subprocess.Popen, systemd_command, inernalargs, internalkw)
self.container_process = NspawnContainer.internal_run_container(
name=machine_name,
callback_method=callback_method,
foreground=foreground
)
if foreground:
return self.container_process
else:
return NspawnContainer(self, None, name=machine_name,
start_process=self.container_process, start_action=callback_method)
|
[
"def",
"run_via_binary",
"(",
"self",
",",
"command",
"=",
"None",
",",
"foreground",
"=",
"False",
",",
"volumes",
"=",
"None",
",",
"additional_opts",
"=",
"None",
",",
"default_options",
"=",
"None",
",",
"name",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"command",
"=",
"deepcopy",
"(",
"command",
")",
"or",
"[",
"]",
"volumes",
"=",
"deepcopy",
"(",
"volumes",
")",
"or",
"[",
"]",
"additional_opts",
"=",
"deepcopy",
"(",
"additional_opts",
")",
"or",
"[",
"]",
"internalkw",
"=",
"deepcopy",
"(",
"kwargs",
")",
"or",
"{",
"}",
"inernalargs",
"=",
"deepcopy",
"(",
"args",
")",
"or",
"[",
"]",
"if",
"default_options",
"is",
"None",
":",
"default_options",
"=",
"[",
"\"-b\"",
"]",
"# TODO: reconsile parameters (changed from API definition)",
"logger",
".",
"info",
"(",
"\"run container via binary in background\"",
")",
"machine_name",
"=",
"constants",
".",
"CONU_ARTIFACT_TAG",
"if",
"name",
":",
"machine_name",
"+=",
"name",
"else",
":",
"machine_name",
"+=",
"random_str",
"(",
")",
"if",
"not",
"foreground",
":",
"# WARN: avoid to run boot without stderr and stdout to terminal, it breaks terminal,",
"# it systemd-nspawn does some magic with console",
"# TODO: is able to avoid this behaviour in better way?",
"internalkw",
"[",
"\"stdout\"",
"]",
"=",
"subprocess",
".",
"PIPE",
"internalkw",
"[",
"\"stderr\"",
"]",
"=",
"subprocess",
".",
"PIPE",
"additional_opts",
"+=",
"default_options",
"if",
"volumes",
":",
"additional_opts",
"+=",
"self",
".",
"get_volume_options",
"(",
"volumes",
"=",
"volumes",
")",
"logger",
".",
"debug",
"(",
"\"starting NSPAWN\"",
")",
"systemd_command",
"=",
"[",
"\"systemd-nspawn\"",
",",
"\"--machine\"",
",",
"machine_name",
",",
"\"-i\"",
",",
"self",
".",
"local_location",
"]",
"+",
"additional_opts",
"+",
"command",
"logger",
".",
"debug",
"(",
"\"Start command: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"systemd_command",
")",
")",
"callback_method",
"=",
"(",
"subprocess",
".",
"Popen",
",",
"systemd_command",
",",
"inernalargs",
",",
"internalkw",
")",
"self",
".",
"container_process",
"=",
"NspawnContainer",
".",
"internal_run_container",
"(",
"name",
"=",
"machine_name",
",",
"callback_method",
"=",
"callback_method",
",",
"foreground",
"=",
"foreground",
")",
"if",
"foreground",
":",
"return",
"self",
".",
"container_process",
"else",
":",
"return",
"NspawnContainer",
"(",
"self",
",",
"None",
",",
"name",
"=",
"machine_name",
",",
"start_process",
"=",
"self",
".",
"container_process",
",",
"start_action",
"=",
"callback_method",
")"
] |
Create new instance NspawnContianer in case of not running at foreground, in case foreground run, return process
object
:param command: list - command to run
:param foreground: bool - run process at foreground
:param volumes: list - put additional bind mounts
:param additional_opts: list of more boot options for systemd-nspawn command
:param default_options: default boot option (-b)
:param name: str - name of running instance
:param args: pass thru params to subprocess.Popen
:param kwargs: pass thru params to subprocess.Popen
:return: process or NspawnContianer instance
|
[
"Create",
"new",
"instance",
"NspawnContianer",
"in",
"case",
"of",
"not",
"running",
"at",
"foreground",
"in",
"case",
"foreground",
"run",
"return",
"process",
"object"
] |
python
|
train
|
crossbario/txaio-etcd
|
txaioetcd/_client_tx.py
|
https://github.com/crossbario/txaio-etcd/blob/c9aebff7f288a0b219bffc9d2579d22cf543baa5/txaioetcd/_client_tx.py#L237-L253
|
def status(self, timeout=None):
"""
Get etcd status.
:param timeout: Request timeout in seconds.
:type timeout: int
:returns: The current etcd cluster status.
:rtype: instance of :class:`txaioetcd.Status`
"""
assembler = commons.StatusRequestAssembler(self._url)
obj = yield self._post(assembler.url, assembler.data, timeout)
status = Status._parse(obj)
returnValue(status)
|
[
"def",
"status",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"assembler",
"=",
"commons",
".",
"StatusRequestAssembler",
"(",
"self",
".",
"_url",
")",
"obj",
"=",
"yield",
"self",
".",
"_post",
"(",
"assembler",
".",
"url",
",",
"assembler",
".",
"data",
",",
"timeout",
")",
"status",
"=",
"Status",
".",
"_parse",
"(",
"obj",
")",
"returnValue",
"(",
"status",
")"
] |
Get etcd status.
:param timeout: Request timeout in seconds.
:type timeout: int
:returns: The current etcd cluster status.
:rtype: instance of :class:`txaioetcd.Status`
|
[
"Get",
"etcd",
"status",
"."
] |
python
|
train
|
tjomasc/snekbol
|
snekbol/document.py
|
https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L460-L481
|
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules
|
[
"def",
"_extend_module_definitions",
"(",
"self",
",",
"graph",
")",
":",
"for",
"mod_id",
"in",
"self",
".",
"_modules",
":",
"mod_identity",
"=",
"self",
".",
"_get_triplet_value",
"(",
"graph",
",",
"URIRef",
"(",
"mod_id",
")",
",",
"SBOL",
".",
"module",
")",
"modules",
"=",
"[",
"]",
"for",
"mod",
"in",
"graph",
".",
"triples",
"(",
"(",
"mod_identity",
",",
"SBOL",
".",
"module",
",",
"None",
")",
")",
":",
"md",
"=",
"self",
".",
"_get_rdf_identified",
"(",
"graph",
",",
"mod",
"[",
"2",
"]",
")",
"definition_id",
"=",
"self",
".",
"_get_triplet_value",
"(",
"graph",
",",
"mod",
"[",
"2",
"]",
",",
"SBOL",
".",
"definition",
")",
"md",
"[",
"'definition'",
"]",
"=",
"self",
".",
"_modules",
"[",
"definition_id",
"]",
"maps_to",
"=",
"[",
"]",
"for",
"m",
"in",
"graph",
".",
"triples",
"(",
"(",
"mod",
"[",
"2",
"]",
",",
"SBOL",
".",
"mapsTo",
",",
"None",
")",
")",
":",
"mt",
"=",
"self",
".",
"_get_rdf_identified",
"(",
"graph",
",",
"m",
"[",
"2",
"]",
")",
"mt",
"[",
"'refinement'",
"]",
"=",
"self",
".",
"_get_triplet_value",
"(",
"graph",
",",
"m",
"[",
"2",
"]",
",",
"SBOL",
".",
"refinement",
")",
"local_id",
"=",
"self",
".",
"_get_triplet_value",
"(",
"graph",
",",
"m",
"[",
"2",
"]",
",",
"SBOL",
".",
"local",
")",
"remote_id",
"=",
"self",
".",
"_get_triplet_value",
"(",
"graph",
",",
"m",
"[",
"2",
"]",
",",
"SBOL",
".",
"remote",
")",
"mt",
"[",
"'local'",
"]",
"=",
"self",
".",
"_functional_component_store",
"[",
"local_id",
"]",
"mt",
"[",
"'remote'",
"]",
"=",
"self",
".",
"_functional_component_store",
"[",
"remote_id",
"]",
"maps_to",
".",
"append",
"(",
"MapsTo",
"(",
"*",
"*",
"mt",
")",
")",
"modules",
".",
"append",
"(",
"Module",
"(",
"maps_to",
"=",
"maps_to",
",",
"*",
"*",
"md",
")",
")",
"self",
".",
"_modules",
"[",
"mod_id",
"]",
".",
"modules",
"=",
"modules"
] |
Using collected module definitions extend linkages
|
[
"Using",
"collected",
"module",
"definitions",
"extend",
"linkages"
] |
python
|
train
|
lmcinnes/umap
|
umap/umap_.py
|
https://github.com/lmcinnes/umap/blob/bbb01c03ba49f7bff8f77fd662d00e50d6686c77/umap/umap_.py#L535-L559
|
def reset_local_connectivity(simplicial_set):
"""Reset the local connectivity requirement -- each data sample should
have complete confidence in at least one 1-simplex in the simplicial set.
We can enforce this by locally rescaling confidences, and then remerging the
different local simplicial sets together.
Parameters
----------
simplicial_set: sparse matrix
The simplicial set for which to recalculate with respect to local
connectivity.
Returns
-------
simplicial_set: sparse_matrix
The recalculated simplicial set, now with the local connectivity
assumption restored.
"""
simplicial_set = normalize(simplicial_set, norm="max")
transpose = simplicial_set.transpose()
prod_matrix = simplicial_set.multiply(transpose)
simplicial_set = simplicial_set + transpose - prod_matrix
simplicial_set.eliminate_zeros()
return simplicial_set
|
[
"def",
"reset_local_connectivity",
"(",
"simplicial_set",
")",
":",
"simplicial_set",
"=",
"normalize",
"(",
"simplicial_set",
",",
"norm",
"=",
"\"max\"",
")",
"transpose",
"=",
"simplicial_set",
".",
"transpose",
"(",
")",
"prod_matrix",
"=",
"simplicial_set",
".",
"multiply",
"(",
"transpose",
")",
"simplicial_set",
"=",
"simplicial_set",
"+",
"transpose",
"-",
"prod_matrix",
"simplicial_set",
".",
"eliminate_zeros",
"(",
")",
"return",
"simplicial_set"
] |
Reset the local connectivity requirement -- each data sample should
have complete confidence in at least one 1-simplex in the simplicial set.
We can enforce this by locally rescaling confidences, and then remerging the
different local simplicial sets together.
Parameters
----------
simplicial_set: sparse matrix
The simplicial set for which to recalculate with respect to local
connectivity.
Returns
-------
simplicial_set: sparse_matrix
The recalculated simplicial set, now with the local connectivity
assumption restored.
|
[
"Reset",
"the",
"local",
"connectivity",
"requirement",
"--",
"each",
"data",
"sample",
"should",
"have",
"complete",
"confidence",
"in",
"at",
"least",
"one",
"1",
"-",
"simplex",
"in",
"the",
"simplicial",
"set",
".",
"We",
"can",
"enforce",
"this",
"by",
"locally",
"rescaling",
"confidences",
"and",
"then",
"remerging",
"the",
"different",
"local",
"simplicial",
"sets",
"together",
"."
] |
python
|
train
|
hbldh/sudokuextract
|
sudokuextract/imgproc/geometry.py
|
https://github.com/hbldh/sudokuextract/blob/0dff3b46b9896a8bedfc474c61a089e7901f720c/sudokuextract/imgproc/geometry.py#L28-L56
|
def warp_image_by_corner_points_projection(corner_points, image):
"""Given corner points of a Sudoku, warps original selection to a square image.
:param corner_points:
:type: corner_points: list
:param image:
:type image:
:return:
:rtype:
"""
# Clarify by storing in named variables.
top_left, top_right, bottom_left, bottom_right = np.array(corner_points)
top_edge = np.linalg.norm(top_right - top_left)
bottom_edge = np.linalg.norm(bottom_right - bottom_left)
left_edge = np.linalg.norm(top_left - bottom_left)
right_edge = np.linalg.norm(top_right - bottom_right)
L = int(np.ceil(max([top_edge, bottom_edge, left_edge, right_edge])))
src = np.array([top_left, top_right, bottom_left, bottom_right])
dst = np.array([[0, 0], [L - 1, 0], [0, L - 1], [L - 1, L - 1]])
tr = ProjectiveTransform()
tr.estimate(dst, src)
warped_image = warp(image, tr, output_shape=(L, L))
out = resize(warped_image, (500, 500))
return out
|
[
"def",
"warp_image_by_corner_points_projection",
"(",
"corner_points",
",",
"image",
")",
":",
"# Clarify by storing in named variables.",
"top_left",
",",
"top_right",
",",
"bottom_left",
",",
"bottom_right",
"=",
"np",
".",
"array",
"(",
"corner_points",
")",
"top_edge",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"top_right",
"-",
"top_left",
")",
"bottom_edge",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"bottom_right",
"-",
"bottom_left",
")",
"left_edge",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"top_left",
"-",
"bottom_left",
")",
"right_edge",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"top_right",
"-",
"bottom_right",
")",
"L",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"max",
"(",
"[",
"top_edge",
",",
"bottom_edge",
",",
"left_edge",
",",
"right_edge",
"]",
")",
")",
")",
"src",
"=",
"np",
".",
"array",
"(",
"[",
"top_left",
",",
"top_right",
",",
"bottom_left",
",",
"bottom_right",
"]",
")",
"dst",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"L",
"-",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"L",
"-",
"1",
"]",
",",
"[",
"L",
"-",
"1",
",",
"L",
"-",
"1",
"]",
"]",
")",
"tr",
"=",
"ProjectiveTransform",
"(",
")",
"tr",
".",
"estimate",
"(",
"dst",
",",
"src",
")",
"warped_image",
"=",
"warp",
"(",
"image",
",",
"tr",
",",
"output_shape",
"=",
"(",
"L",
",",
"L",
")",
")",
"out",
"=",
"resize",
"(",
"warped_image",
",",
"(",
"500",
",",
"500",
")",
")",
"return",
"out"
] |
Given corner points of a Sudoku, warps original selection to a square image.
:param corner_points:
:type: corner_points: list
:param image:
:type image:
:return:
:rtype:
|
[
"Given",
"corner",
"points",
"of",
"a",
"Sudoku",
"warps",
"original",
"selection",
"to",
"a",
"square",
"image",
"."
] |
python
|
train
|
cackharot/suds-py3
|
suds/xsd/schema.py
|
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/xsd/schema.py#L60-L74
|
def add(self, schema):
"""
Add a schema node to the collection. Schema(s) within the same target
namespace are consolidated.
@param schema: A schema object.
@type schema: (L{Schema})
"""
key = schema.tns[1]
existing = self.namespaces.get(key)
if existing is None:
self.children.append(schema)
self.namespaces[key] = schema
else:
existing.root.children += schema.root.children
existing.root.nsprefixes.update(schema.root.nsprefixes)
|
[
"def",
"add",
"(",
"self",
",",
"schema",
")",
":",
"key",
"=",
"schema",
".",
"tns",
"[",
"1",
"]",
"existing",
"=",
"self",
".",
"namespaces",
".",
"get",
"(",
"key",
")",
"if",
"existing",
"is",
"None",
":",
"self",
".",
"children",
".",
"append",
"(",
"schema",
")",
"self",
".",
"namespaces",
"[",
"key",
"]",
"=",
"schema",
"else",
":",
"existing",
".",
"root",
".",
"children",
"+=",
"schema",
".",
"root",
".",
"children",
"existing",
".",
"root",
".",
"nsprefixes",
".",
"update",
"(",
"schema",
".",
"root",
".",
"nsprefixes",
")"
] |
Add a schema node to the collection. Schema(s) within the same target
namespace are consolidated.
@param schema: A schema object.
@type schema: (L{Schema})
|
[
"Add",
"a",
"schema",
"node",
"to",
"the",
"collection",
".",
"Schema",
"(",
"s",
")",
"within",
"the",
"same",
"target",
"namespace",
"are",
"consolidated",
"."
] |
python
|
train
|
dhylands/rshell
|
rshell/main.py
|
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L2390-L2427
|
def repl_serial_to_stdout(self, dev):
"""Runs as a thread which has a sole purpose of readding bytes from
the serial port and writing them to stdout. Used by do_repl.
"""
with self.serial_reader_running:
try:
save_timeout = dev.timeout
# Set a timeout so that the read returns periodically with no data
# and allows us to check whether the main thread wants us to quit.
dev.timeout = 1
while not self.quit_serial_reader:
try:
char = dev.read(1)
except serial.serialutil.SerialException:
# This happens if the pyboard reboots, or a USB port
# goes away.
return
except TypeError:
# This is a bug in serialposix.py starting with python 3.3
# which causes a TypeError during the handling of the
# select.error. So we treat this the same as
# serial.serialutil.SerialException:
return
except ConnectionResetError:
# This happens over a telnet session, if it resets
return
if not char:
# This means that the read timed out. We'll check the quit
# flag and return if needed
if self.quit_when_no_output:
break
continue
self.stdout.write(char)
self.stdout.flush()
dev.timeout = save_timeout
except DeviceError:
# The device is no longer present.
return
|
[
"def",
"repl_serial_to_stdout",
"(",
"self",
",",
"dev",
")",
":",
"with",
"self",
".",
"serial_reader_running",
":",
"try",
":",
"save_timeout",
"=",
"dev",
".",
"timeout",
"# Set a timeout so that the read returns periodically with no data",
"# and allows us to check whether the main thread wants us to quit.",
"dev",
".",
"timeout",
"=",
"1",
"while",
"not",
"self",
".",
"quit_serial_reader",
":",
"try",
":",
"char",
"=",
"dev",
".",
"read",
"(",
"1",
")",
"except",
"serial",
".",
"serialutil",
".",
"SerialException",
":",
"# This happens if the pyboard reboots, or a USB port",
"# goes away.",
"return",
"except",
"TypeError",
":",
"# This is a bug in serialposix.py starting with python 3.3",
"# which causes a TypeError during the handling of the",
"# select.error. So we treat this the same as",
"# serial.serialutil.SerialException:",
"return",
"except",
"ConnectionResetError",
":",
"# This happens over a telnet session, if it resets",
"return",
"if",
"not",
"char",
":",
"# This means that the read timed out. We'll check the quit",
"# flag and return if needed",
"if",
"self",
".",
"quit_when_no_output",
":",
"break",
"continue",
"self",
".",
"stdout",
".",
"write",
"(",
"char",
")",
"self",
".",
"stdout",
".",
"flush",
"(",
")",
"dev",
".",
"timeout",
"=",
"save_timeout",
"except",
"DeviceError",
":",
"# The device is no longer present.",
"return"
] |
Runs as a thread which has a sole purpose of readding bytes from
the serial port and writing them to stdout. Used by do_repl.
|
[
"Runs",
"as",
"a",
"thread",
"which",
"has",
"a",
"sole",
"purpose",
"of",
"readding",
"bytes",
"from",
"the",
"serial",
"port",
"and",
"writing",
"them",
"to",
"stdout",
".",
"Used",
"by",
"do_repl",
"."
] |
python
|
train
|
henzk/ape
|
ape/__init__.py
|
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/__init__.py#L145-L161
|
def print_task_help(self, task, name):
"""
Prints the help for the passed task with the passed name.
:param task: the task function object
:param name: the name of the module.
:return: None
"""
TerminalColor.set('GREEN')
print(get_signature(name, task))
# TODO: print the location does not work properly and sometimes returns None
# print(' => defined in: {}'.format(inspect.getsourcefile(task)))
help_msg = inspect.getdoc(task) or ''
TerminalColor.reset()
print(' ' + help_msg.replace('\n', '\n '))
TerminalColor.reset()
print()
|
[
"def",
"print_task_help",
"(",
"self",
",",
"task",
",",
"name",
")",
":",
"TerminalColor",
".",
"set",
"(",
"'GREEN'",
")",
"print",
"(",
"get_signature",
"(",
"name",
",",
"task",
")",
")",
"# TODO: print the location does not work properly and sometimes returns None",
"# print(' => defined in: {}'.format(inspect.getsourcefile(task)))",
"help_msg",
"=",
"inspect",
".",
"getdoc",
"(",
"task",
")",
"or",
"''",
"TerminalColor",
".",
"reset",
"(",
")",
"print",
"(",
"' '",
"+",
"help_msg",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n '",
")",
")",
"TerminalColor",
".",
"reset",
"(",
")",
"print",
"(",
")"
] |
Prints the help for the passed task with the passed name.
:param task: the task function object
:param name: the name of the module.
:return: None
|
[
"Prints",
"the",
"help",
"for",
"the",
"passed",
"task",
"with",
"the",
"passed",
"name",
".",
":",
"param",
"task",
":",
"the",
"task",
"function",
"object",
":",
"param",
"name",
":",
"the",
"name",
"of",
"the",
"module",
".",
":",
"return",
":",
"None"
] |
python
|
train
|
bitshares/python-bitshares
|
bitshares/bitshares.py
|
https://github.com/bitshares/python-bitshares/blob/8a3b5954a6abcaaff7c6a5c41d910e58eea3142f/bitshares/bitshares.py#L989-L1062
|
def publish_price_feed(
self, symbol, settlement_price, cer=None, mssr=110, mcr=200, account=None
):
""" Publish a price feed for a market-pegged asset
:param str symbol: Symbol of the asset to publish feed for
:param bitshares.price.Price settlement_price: Price for settlement
:param bitshares.price.Price cer: Core exchange Rate (default
``settlement_price + 5%``)
:param float mssr: Percentage for max short squeeze ratio (default:
110%)
:param float mcr: Percentage for maintenance collateral ratio
(default: 200%)
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
.. note:: The ``account`` needs to be allowed to produce a
price feed for ``symbol``. For witness produced
feeds this means ``account`` is a witness account!
"""
assert mcr > 100
assert mssr > 100
assert isinstance(
settlement_price, Price
), "settlement_price needs to be instance of `bitshares.price.Price`!"
assert cer is None or isinstance(
cer, Price
), "cer needs to be instance of `bitshares.price.Price`!"
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account, blockchain_instance=self)
asset = Asset(symbol, blockchain_instance=self, full=True)
backing_asset = asset["bitasset_data"]["options"]["short_backing_asset"]
assert (
asset["id"] == settlement_price["base"]["asset"]["id"]
or asset["id"] == settlement_price["quote"]["asset"]["id"]
), "Price needs to contain the asset of the symbol you'd like to produce a feed for!"
assert asset.is_bitasset, "Symbol needs to be a bitasset!"
assert (
settlement_price["base"]["asset"]["id"] == backing_asset
or settlement_price["quote"]["asset"]["id"] == backing_asset
), "The Price needs to be relative to the backing collateral!"
settlement_price = settlement_price.as_base(symbol)
if cer:
cer = cer.as_base(symbol)
if cer["quote"]["asset"]["id"] != "1.3.0":
raise ValueError("CER must be defined against core asset '1.3.0'")
else:
if settlement_price["quote"]["asset"]["id"] != "1.3.0":
raise ValueError(
"CER must be manually provided because it relates to core asset '1.3.0'"
)
cer = settlement_price.as_quote(symbol) * 0.95
op = operations.Asset_publish_feed(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"publisher": account["id"],
"asset_id": asset["id"],
"feed": {
"settlement_price": settlement_price.as_base(symbol).json(),
"core_exchange_rate": cer.as_base(symbol).json(),
"maximum_short_squeeze_ratio": int(mssr * 10),
"maintenance_collateral_ratio": int(mcr * 10),
},
"prefix": self.prefix,
}
)
return self.finalizeOp(op, account["name"], "active")
|
[
"def",
"publish_price_feed",
"(",
"self",
",",
"symbol",
",",
"settlement_price",
",",
"cer",
"=",
"None",
",",
"mssr",
"=",
"110",
",",
"mcr",
"=",
"200",
",",
"account",
"=",
"None",
")",
":",
"assert",
"mcr",
">",
"100",
"assert",
"mssr",
">",
"100",
"assert",
"isinstance",
"(",
"settlement_price",
",",
"Price",
")",
",",
"\"settlement_price needs to be instance of `bitshares.price.Price`!\"",
"assert",
"cer",
"is",
"None",
"or",
"isinstance",
"(",
"cer",
",",
"Price",
")",
",",
"\"cer needs to be instance of `bitshares.price.Price`!\"",
"if",
"not",
"account",
":",
"if",
"\"default_account\"",
"in",
"self",
".",
"config",
":",
"account",
"=",
"self",
".",
"config",
"[",
"\"default_account\"",
"]",
"if",
"not",
"account",
":",
"raise",
"ValueError",
"(",
"\"You need to provide an account\"",
")",
"account",
"=",
"Account",
"(",
"account",
",",
"blockchain_instance",
"=",
"self",
")",
"asset",
"=",
"Asset",
"(",
"symbol",
",",
"blockchain_instance",
"=",
"self",
",",
"full",
"=",
"True",
")",
"backing_asset",
"=",
"asset",
"[",
"\"bitasset_data\"",
"]",
"[",
"\"options\"",
"]",
"[",
"\"short_backing_asset\"",
"]",
"assert",
"(",
"asset",
"[",
"\"id\"",
"]",
"==",
"settlement_price",
"[",
"\"base\"",
"]",
"[",
"\"asset\"",
"]",
"[",
"\"id\"",
"]",
"or",
"asset",
"[",
"\"id\"",
"]",
"==",
"settlement_price",
"[",
"\"quote\"",
"]",
"[",
"\"asset\"",
"]",
"[",
"\"id\"",
"]",
")",
",",
"\"Price needs to contain the asset of the symbol you'd like to produce a feed for!\"",
"assert",
"asset",
".",
"is_bitasset",
",",
"\"Symbol needs to be a bitasset!\"",
"assert",
"(",
"settlement_price",
"[",
"\"base\"",
"]",
"[",
"\"asset\"",
"]",
"[",
"\"id\"",
"]",
"==",
"backing_asset",
"or",
"settlement_price",
"[",
"\"quote\"",
"]",
"[",
"\"asset\"",
"]",
"[",
"\"id\"",
"]",
"==",
"backing_asset",
")",
",",
"\"The Price needs to be relative to the backing collateral!\"",
"settlement_price",
"=",
"settlement_price",
".",
"as_base",
"(",
"symbol",
")",
"if",
"cer",
":",
"cer",
"=",
"cer",
".",
"as_base",
"(",
"symbol",
")",
"if",
"cer",
"[",
"\"quote\"",
"]",
"[",
"\"asset\"",
"]",
"[",
"\"id\"",
"]",
"!=",
"\"1.3.0\"",
":",
"raise",
"ValueError",
"(",
"\"CER must be defined against core asset '1.3.0'\"",
")",
"else",
":",
"if",
"settlement_price",
"[",
"\"quote\"",
"]",
"[",
"\"asset\"",
"]",
"[",
"\"id\"",
"]",
"!=",
"\"1.3.0\"",
":",
"raise",
"ValueError",
"(",
"\"CER must be manually provided because it relates to core asset '1.3.0'\"",
")",
"cer",
"=",
"settlement_price",
".",
"as_quote",
"(",
"symbol",
")",
"*",
"0.95",
"op",
"=",
"operations",
".",
"Asset_publish_feed",
"(",
"*",
"*",
"{",
"\"fee\"",
":",
"{",
"\"amount\"",
":",
"0",
",",
"\"asset_id\"",
":",
"\"1.3.0\"",
"}",
",",
"\"publisher\"",
":",
"account",
"[",
"\"id\"",
"]",
",",
"\"asset_id\"",
":",
"asset",
"[",
"\"id\"",
"]",
",",
"\"feed\"",
":",
"{",
"\"settlement_price\"",
":",
"settlement_price",
".",
"as_base",
"(",
"symbol",
")",
".",
"json",
"(",
")",
",",
"\"core_exchange_rate\"",
":",
"cer",
".",
"as_base",
"(",
"symbol",
")",
".",
"json",
"(",
")",
",",
"\"maximum_short_squeeze_ratio\"",
":",
"int",
"(",
"mssr",
"*",
"10",
")",
",",
"\"maintenance_collateral_ratio\"",
":",
"int",
"(",
"mcr",
"*",
"10",
")",
",",
"}",
",",
"\"prefix\"",
":",
"self",
".",
"prefix",
",",
"}",
")",
"return",
"self",
".",
"finalizeOp",
"(",
"op",
",",
"account",
"[",
"\"name\"",
"]",
",",
"\"active\"",
")"
] |
Publish a price feed for a market-pegged asset
:param str symbol: Symbol of the asset to publish feed for
:param bitshares.price.Price settlement_price: Price for settlement
:param bitshares.price.Price cer: Core exchange Rate (default
``settlement_price + 5%``)
:param float mssr: Percentage for max short squeeze ratio (default:
110%)
:param float mcr: Percentage for maintenance collateral ratio
(default: 200%)
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
.. note:: The ``account`` needs to be allowed to produce a
price feed for ``symbol``. For witness produced
feeds this means ``account`` is a witness account!
|
[
"Publish",
"a",
"price",
"feed",
"for",
"a",
"market",
"-",
"pegged",
"asset"
] |
python
|
train
|
saltstack/salt
|
salt/modules/useradd.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/useradd.py#L96-L120
|
def _update_gecos(name, key, value, root=None):
'''
Common code to change a user's GECOS information
'''
if value is None:
value = ''
elif not isinstance(value, six.string_types):
value = six.text_type(value)
else:
value = salt.utils.stringutils.to_unicode(value)
pre_info = _get_gecos(name, root=root)
if not pre_info:
return False
if value == pre_info[key]:
return True
gecos_data = copy.deepcopy(pre_info)
gecos_data[key] = value
cmd = ['usermod']
if root is not None and __grains__['kernel'] != 'AIX':
cmd.extend(('-R', root))
cmd.extend(('-c', _build_gecos(gecos_data), name))
__salt__['cmd.run'](cmd, python_shell=False)
return _get_gecos(name, root=root).get(key) == value
|
[
"def",
"_update_gecos",
"(",
"name",
",",
"key",
",",
"value",
",",
"root",
"=",
"None",
")",
":",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"''",
"elif",
"not",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"value",
"=",
"six",
".",
"text_type",
"(",
"value",
")",
"else",
":",
"value",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"value",
")",
"pre_info",
"=",
"_get_gecos",
"(",
"name",
",",
"root",
"=",
"root",
")",
"if",
"not",
"pre_info",
":",
"return",
"False",
"if",
"value",
"==",
"pre_info",
"[",
"key",
"]",
":",
"return",
"True",
"gecos_data",
"=",
"copy",
".",
"deepcopy",
"(",
"pre_info",
")",
"gecos_data",
"[",
"key",
"]",
"=",
"value",
"cmd",
"=",
"[",
"'usermod'",
"]",
"if",
"root",
"is",
"not",
"None",
"and",
"__grains__",
"[",
"'kernel'",
"]",
"!=",
"'AIX'",
":",
"cmd",
".",
"extend",
"(",
"(",
"'-R'",
",",
"root",
")",
")",
"cmd",
".",
"extend",
"(",
"(",
"'-c'",
",",
"_build_gecos",
"(",
"gecos_data",
")",
",",
"name",
")",
")",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"return",
"_get_gecos",
"(",
"name",
",",
"root",
"=",
"root",
")",
".",
"get",
"(",
"key",
")",
"==",
"value"
] |
Common code to change a user's GECOS information
|
[
"Common",
"code",
"to",
"change",
"a",
"user",
"s",
"GECOS",
"information"
] |
python
|
train
|
google/apitools
|
apitools/base/protorpclite/protojson.py
|
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/protojson.py#L178-L193
|
def encode_message(self, message):
"""Encode Message instance to JSON string.
Args:
Message instance to encode in to JSON string.
Returns:
String encoding of Message instance in protocol JSON format.
Raises:
messages.ValidationError if message is not initialized.
"""
message.check_initialized()
return json.dumps(message, cls=MessageJSONEncoder,
protojson_protocol=self)
|
[
"def",
"encode_message",
"(",
"self",
",",
"message",
")",
":",
"message",
".",
"check_initialized",
"(",
")",
"return",
"json",
".",
"dumps",
"(",
"message",
",",
"cls",
"=",
"MessageJSONEncoder",
",",
"protojson_protocol",
"=",
"self",
")"
] |
Encode Message instance to JSON string.
Args:
Message instance to encode in to JSON string.
Returns:
String encoding of Message instance in protocol JSON format.
Raises:
messages.ValidationError if message is not initialized.
|
[
"Encode",
"Message",
"instance",
"to",
"JSON",
"string",
"."
] |
python
|
train
|
apple/turicreate
|
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L409-L415
|
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return b"".join(pieces)
|
[
"def",
"_VarintBytes",
"(",
"value",
")",
":",
"pieces",
"=",
"[",
"]",
"_EncodeVarint",
"(",
"pieces",
".",
"append",
",",
"value",
")",
"return",
"b\"\"",
".",
"join",
"(",
"pieces",
")"
] |
Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast.
|
[
"Encode",
"the",
"given",
"integer",
"as",
"a",
"varint",
"and",
"return",
"the",
"bytes",
".",
"This",
"is",
"only",
"called",
"at",
"startup",
"time",
"so",
"it",
"doesn",
"t",
"need",
"to",
"be",
"fast",
"."
] |
python
|
train
|
ulule/django-linguist
|
linguist/utils.py
|
https://github.com/ulule/django-linguist/blob/d2b95a6ab921039d56d5eeb352badfe5be9e8f77/linguist/utils.py#L185-L212
|
def get_model_string(model_name):
"""
Returns the model string notation Django uses for lazily loaded ForeignKeys
(eg 'auth.User') to prevent circular imports.
This is needed to allow our crazy custom model usage.
"""
setting_name = "LINGUIST_%s_MODEL" % model_name.upper().replace("_", "")
class_path = getattr(settings, setting_name, None)
if not class_path:
return "linguist.%s" % model_name
elif isinstance(class_path, basestring):
parts = class_path.split(".")
try:
index = parts.index("models") - 1
except ValueError:
raise exceptions.ImproperlyConfigured(
CLASS_PATH_ERROR % (setting_name, setting_name)
)
app_label, model_name = parts[index], parts[-1]
else:
try:
class_path, app_label = class_path
model_name = class_path.split(".")[-1]
except:
raise exceptions.ImproperlyConfigured(
CLASS_PATH_ERROR % (setting_name, setting_name)
)
return "%s.%s" % (app_label, model_name)
|
[
"def",
"get_model_string",
"(",
"model_name",
")",
":",
"setting_name",
"=",
"\"LINGUIST_%s_MODEL\"",
"%",
"model_name",
".",
"upper",
"(",
")",
".",
"replace",
"(",
"\"_\"",
",",
"\"\"",
")",
"class_path",
"=",
"getattr",
"(",
"settings",
",",
"setting_name",
",",
"None",
")",
"if",
"not",
"class_path",
":",
"return",
"\"linguist.%s\"",
"%",
"model_name",
"elif",
"isinstance",
"(",
"class_path",
",",
"basestring",
")",
":",
"parts",
"=",
"class_path",
".",
"split",
"(",
"\".\"",
")",
"try",
":",
"index",
"=",
"parts",
".",
"index",
"(",
"\"models\"",
")",
"-",
"1",
"except",
"ValueError",
":",
"raise",
"exceptions",
".",
"ImproperlyConfigured",
"(",
"CLASS_PATH_ERROR",
"%",
"(",
"setting_name",
",",
"setting_name",
")",
")",
"app_label",
",",
"model_name",
"=",
"parts",
"[",
"index",
"]",
",",
"parts",
"[",
"-",
"1",
"]",
"else",
":",
"try",
":",
"class_path",
",",
"app_label",
"=",
"class_path",
"model_name",
"=",
"class_path",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
"except",
":",
"raise",
"exceptions",
".",
"ImproperlyConfigured",
"(",
"CLASS_PATH_ERROR",
"%",
"(",
"setting_name",
",",
"setting_name",
")",
")",
"return",
"\"%s.%s\"",
"%",
"(",
"app_label",
",",
"model_name",
")"
] |
Returns the model string notation Django uses for lazily loaded ForeignKeys
(eg 'auth.User') to prevent circular imports.
This is needed to allow our crazy custom model usage.
|
[
"Returns",
"the",
"model",
"string",
"notation",
"Django",
"uses",
"for",
"lazily",
"loaded",
"ForeignKeys",
"(",
"eg",
"auth",
".",
"User",
")",
"to",
"prevent",
"circular",
"imports",
".",
"This",
"is",
"needed",
"to",
"allow",
"our",
"crazy",
"custom",
"model",
"usage",
"."
] |
python
|
train
|
mikekatz04/BOWIE
|
snr_calculator_folder/gwsnrcalc/utils/sensitivity.py
|
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/sensitivity.py#L61-L136
|
def _prep_noise_interpolants(self):
"""Construct interpolated sensitivity curves
This will construct the interpolated sensitivity curves
using scipy.interpolate.interp1d. It will add wd noise
if that is requested.
Raises:
ValueError: ``len(noise_type_in) != len(sensitivity_curves)``
ValueError: Issue with sensitivity curve type provided.
"""
noise_lists = {}
self.noise_interpolants = {}
if isinstance(self.sensitivity_curves, str):
self.sensitivity_curves = [self.sensitivity_curves]
if isinstance(self.noise_type_in, list):
if len(self.noise_type_in) != len(self.sensitivity_curves):
raise ValueError('noise_type_in must have same shape as sensitivity_curves if it is'
+ 'provided as a list.'
+ 'If all curves are of the same type, provide a string.')
else:
assert isinstance(self.noise_type_in, str)
self.noise_type_in = [self.noise_type_in for _ in self.sensitivity_curves]
if isinstance(self.signal_type, str):
self.signal_type = [self.signal_type]
# read in all the noise curves
for num, sc in enumerate(self.sensitivity_curves):
if isinstance(sc, str):
f, h_n = read_noise_curve(sc, noise_type_in=self.noise_type_in[num],
noise_type_out='char_strain')
if sc[-4:] == '.txt':
key = sc.split('.')[0].split('/')[-1]
else:
key = sc
elif isinstance(sc, list):
# TODO: add to docs if inputing special noise curve, make sure its char_strain
f, h_n = sc
key = str(num)
else:
raise ValueError('Sensitivity curves must either be string'
+ 'or list containing f_n and asd_n.')
noise_lists[key] = [f, h_n]
# add wd noise
if str(self.add_wd_noise).lower() in ['true', 'both', 'yes']:
if isinstance(self.wd_noise, str):
f_n_wd, h_n_wd = read_noise_curve(self.wd_noise,
noise_type_in=self.wd_noise_type_in,
noise_type_out='char_strain')
elif isinstance(self, wd_noise, list):
f_n_wd, h_n_wd = self.wd_noise
trans_dict = {}
for sc in noise_lists.keys():
f_n, h_n = noise_lists[sc]
if self.add_wd_noise.lower() == 'both':
trans_dict[sc] = [f_n, h_n]
f_n, h_n = combine_with_wd_noise(f_n, h_n, f_n_wd, h_n_wd)
trans_dict[sc + '_wd'] = [f_n, h_n]
noise_lists = trans_dict
# interpolate
for sc in noise_lists:
f_n, h_n = noise_lists[sc]
self.noise_interpolants[sc] = (interpolate.interp1d(f_n, h_n,
bounds_error=False, fill_value=1e30))
return
|
[
"def",
"_prep_noise_interpolants",
"(",
"self",
")",
":",
"noise_lists",
"=",
"{",
"}",
"self",
".",
"noise_interpolants",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"self",
".",
"sensitivity_curves",
",",
"str",
")",
":",
"self",
".",
"sensitivity_curves",
"=",
"[",
"self",
".",
"sensitivity_curves",
"]",
"if",
"isinstance",
"(",
"self",
".",
"noise_type_in",
",",
"list",
")",
":",
"if",
"len",
"(",
"self",
".",
"noise_type_in",
")",
"!=",
"len",
"(",
"self",
".",
"sensitivity_curves",
")",
":",
"raise",
"ValueError",
"(",
"'noise_type_in must have same shape as sensitivity_curves if it is'",
"+",
"'provided as a list.'",
"+",
"'If all curves are of the same type, provide a string.'",
")",
"else",
":",
"assert",
"isinstance",
"(",
"self",
".",
"noise_type_in",
",",
"str",
")",
"self",
".",
"noise_type_in",
"=",
"[",
"self",
".",
"noise_type_in",
"for",
"_",
"in",
"self",
".",
"sensitivity_curves",
"]",
"if",
"isinstance",
"(",
"self",
".",
"signal_type",
",",
"str",
")",
":",
"self",
".",
"signal_type",
"=",
"[",
"self",
".",
"signal_type",
"]",
"# read in all the noise curves",
"for",
"num",
",",
"sc",
"in",
"enumerate",
"(",
"self",
".",
"sensitivity_curves",
")",
":",
"if",
"isinstance",
"(",
"sc",
",",
"str",
")",
":",
"f",
",",
"h_n",
"=",
"read_noise_curve",
"(",
"sc",
",",
"noise_type_in",
"=",
"self",
".",
"noise_type_in",
"[",
"num",
"]",
",",
"noise_type_out",
"=",
"'char_strain'",
")",
"if",
"sc",
"[",
"-",
"4",
":",
"]",
"==",
"'.txt'",
":",
"key",
"=",
"sc",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"else",
":",
"key",
"=",
"sc",
"elif",
"isinstance",
"(",
"sc",
",",
"list",
")",
":",
"# TODO: add to docs if inputing special noise curve, make sure its char_strain",
"f",
",",
"h_n",
"=",
"sc",
"key",
"=",
"str",
"(",
"num",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Sensitivity curves must either be string'",
"+",
"'or list containing f_n and asd_n.'",
")",
"noise_lists",
"[",
"key",
"]",
"=",
"[",
"f",
",",
"h_n",
"]",
"# add wd noise",
"if",
"str",
"(",
"self",
".",
"add_wd_noise",
")",
".",
"lower",
"(",
")",
"in",
"[",
"'true'",
",",
"'both'",
",",
"'yes'",
"]",
":",
"if",
"isinstance",
"(",
"self",
".",
"wd_noise",
",",
"str",
")",
":",
"f_n_wd",
",",
"h_n_wd",
"=",
"read_noise_curve",
"(",
"self",
".",
"wd_noise",
",",
"noise_type_in",
"=",
"self",
".",
"wd_noise_type_in",
",",
"noise_type_out",
"=",
"'char_strain'",
")",
"elif",
"isinstance",
"(",
"self",
",",
"wd_noise",
",",
"list",
")",
":",
"f_n_wd",
",",
"h_n_wd",
"=",
"self",
".",
"wd_noise",
"trans_dict",
"=",
"{",
"}",
"for",
"sc",
"in",
"noise_lists",
".",
"keys",
"(",
")",
":",
"f_n",
",",
"h_n",
"=",
"noise_lists",
"[",
"sc",
"]",
"if",
"self",
".",
"add_wd_noise",
".",
"lower",
"(",
")",
"==",
"'both'",
":",
"trans_dict",
"[",
"sc",
"]",
"=",
"[",
"f_n",
",",
"h_n",
"]",
"f_n",
",",
"h_n",
"=",
"combine_with_wd_noise",
"(",
"f_n",
",",
"h_n",
",",
"f_n_wd",
",",
"h_n_wd",
")",
"trans_dict",
"[",
"sc",
"+",
"'_wd'",
"]",
"=",
"[",
"f_n",
",",
"h_n",
"]",
"noise_lists",
"=",
"trans_dict",
"# interpolate",
"for",
"sc",
"in",
"noise_lists",
":",
"f_n",
",",
"h_n",
"=",
"noise_lists",
"[",
"sc",
"]",
"self",
".",
"noise_interpolants",
"[",
"sc",
"]",
"=",
"(",
"interpolate",
".",
"interp1d",
"(",
"f_n",
",",
"h_n",
",",
"bounds_error",
"=",
"False",
",",
"fill_value",
"=",
"1e30",
")",
")",
"return"
] |
Construct interpolated sensitivity curves
This will construct the interpolated sensitivity curves
using scipy.interpolate.interp1d. It will add wd noise
if that is requested.
Raises:
ValueError: ``len(noise_type_in) != len(sensitivity_curves)``
ValueError: Issue with sensitivity curve type provided.
|
[
"Construct",
"interpolated",
"sensitivity",
"curves"
] |
python
|
train
|
pandas-dev/pandas
|
pandas/core/indexes/base.py
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L153-L163
|
def _new_Index(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__.
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
|
[
"def",
"_new_Index",
"(",
"cls",
",",
"d",
")",
":",
"# required for backward compat, because PI can't be instantiated with",
"# ordinals through __new__ GH #13277",
"if",
"issubclass",
"(",
"cls",
",",
"ABCPeriodIndex",
")",
":",
"from",
"pandas",
".",
"core",
".",
"indexes",
".",
"period",
"import",
"_new_PeriodIndex",
"return",
"_new_PeriodIndex",
"(",
"cls",
",",
"*",
"*",
"d",
")",
"return",
"cls",
".",
"__new__",
"(",
"cls",
",",
"*",
"*",
"d",
")"
] |
This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__.
|
[
"This",
"is",
"called",
"upon",
"unpickling",
"rather",
"than",
"the",
"default",
"which",
"doesn",
"t",
"have",
"arguments",
"and",
"breaks",
"__new__",
"."
] |
python
|
train
|
djaodjin/djaodjin-deployutils
|
src/djd.py
|
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/src/djd.py#L100-L103
|
def pub_upload(args, location=None, prefix=""):
"""Upload theme packages to the stage *location*.
"""
upload(location, remotes=args, prefix=prefix)
|
[
"def",
"pub_upload",
"(",
"args",
",",
"location",
"=",
"None",
",",
"prefix",
"=",
"\"\"",
")",
":",
"upload",
"(",
"location",
",",
"remotes",
"=",
"args",
",",
"prefix",
"=",
"prefix",
")"
] |
Upload theme packages to the stage *location*.
|
[
"Upload",
"theme",
"packages",
"to",
"the",
"stage",
"*",
"location",
"*",
"."
] |
python
|
train
|
osrg/ryu
|
ryu/services/protocols/bgp/core_managers/table_manager.py
|
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/core_managers/table_manager.py#L433-L449
|
def re_install_net_ctrl_paths(self, vrf_table):
"""Re-installs paths from NC with current BGP policy.
Iterates over known paths from NC installed in `vrf4_table` and
adds new path with path attributes as per current VRF configuration.
"""
assert vrf_table
for dest in vrf_table.values():
for path in dest.known_path_list:
if path.source is None:
vrf_table.insert_vrf_path(
nlri=path.nlri,
next_hop=path.nexthop,
gen_lbl=True
)
LOG.debug('Re-installed NC paths with current policy for table %s.',
vrf_table)
|
[
"def",
"re_install_net_ctrl_paths",
"(",
"self",
",",
"vrf_table",
")",
":",
"assert",
"vrf_table",
"for",
"dest",
"in",
"vrf_table",
".",
"values",
"(",
")",
":",
"for",
"path",
"in",
"dest",
".",
"known_path_list",
":",
"if",
"path",
".",
"source",
"is",
"None",
":",
"vrf_table",
".",
"insert_vrf_path",
"(",
"nlri",
"=",
"path",
".",
"nlri",
",",
"next_hop",
"=",
"path",
".",
"nexthop",
",",
"gen_lbl",
"=",
"True",
")",
"LOG",
".",
"debug",
"(",
"'Re-installed NC paths with current policy for table %s.'",
",",
"vrf_table",
")"
] |
Re-installs paths from NC with current BGP policy.
Iterates over known paths from NC installed in `vrf4_table` and
adds new path with path attributes as per current VRF configuration.
|
[
"Re",
"-",
"installs",
"paths",
"from",
"NC",
"with",
"current",
"BGP",
"policy",
"."
] |
python
|
train
|
BerkeleyAutomation/perception
|
perception/video_recorder.py
|
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/video_recorder.py#L142-L153
|
def stop(self):
""" Stop the camera process. """
if not self._started:
raise Exception("Cannot stop a video recorder before starting it!")
self._started = False
if self._actual_camera.is_running:
self._actual_camera.stop()
if self._camera is not None:
try:
self._camera.terminate()
except:
pass
|
[
"def",
"stop",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_started",
":",
"raise",
"Exception",
"(",
"\"Cannot stop a video recorder before starting it!\"",
")",
"self",
".",
"_started",
"=",
"False",
"if",
"self",
".",
"_actual_camera",
".",
"is_running",
":",
"self",
".",
"_actual_camera",
".",
"stop",
"(",
")",
"if",
"self",
".",
"_camera",
"is",
"not",
"None",
":",
"try",
":",
"self",
".",
"_camera",
".",
"terminate",
"(",
")",
"except",
":",
"pass"
] |
Stop the camera process.
|
[
"Stop",
"the",
"camera",
"process",
"."
] |
python
|
train
|
kurtbrose/faststat
|
faststat/faststat.py
|
https://github.com/kurtbrose/faststat/blob/5060c0e10acaafd4a48de3f16869bfccc1deb44a/faststat/faststat.py#L325-L347
|
def pformat(self, prefix=()):
'''
Makes a pretty ASCII format of the data, suitable for
displaying in a console or saving to a text file.
Returns a list of lines.
'''
nan = float("nan")
def sformat(segment, stat):
FMT = "n={0}, mean={1}, p50/95={2}/{3}, max={4}"
line_segs = [segment]
for s in [stat]:
p = s.get_percentiles()
p50, p95 = p.get(0.50, nan), p.get(0.95, nan)
line_segs.append(FMT.format(s.n, s.mean, p50, p95, s.max))
return '{0}: {1}'.format(*line_segs)
lines = []
for path in sorted(self.path_stats.keys()):
lines.append('=====================')
for seg, stat in zip(path, self.path_stats[path]):
lines.append(sformat(seg, stat))
return lines
|
[
"def",
"pformat",
"(",
"self",
",",
"prefix",
"=",
"(",
")",
")",
":",
"nan",
"=",
"float",
"(",
"\"nan\"",
")",
"def",
"sformat",
"(",
"segment",
",",
"stat",
")",
":",
"FMT",
"=",
"\"n={0}, mean={1}, p50/95={2}/{3}, max={4}\"",
"line_segs",
"=",
"[",
"segment",
"]",
"for",
"s",
"in",
"[",
"stat",
"]",
":",
"p",
"=",
"s",
".",
"get_percentiles",
"(",
")",
"p50",
",",
"p95",
"=",
"p",
".",
"get",
"(",
"0.50",
",",
"nan",
")",
",",
"p",
".",
"get",
"(",
"0.95",
",",
"nan",
")",
"line_segs",
".",
"append",
"(",
"FMT",
".",
"format",
"(",
"s",
".",
"n",
",",
"s",
".",
"mean",
",",
"p50",
",",
"p95",
",",
"s",
".",
"max",
")",
")",
"return",
"'{0}: {1}'",
".",
"format",
"(",
"*",
"line_segs",
")",
"lines",
"=",
"[",
"]",
"for",
"path",
"in",
"sorted",
"(",
"self",
".",
"path_stats",
".",
"keys",
"(",
")",
")",
":",
"lines",
".",
"append",
"(",
"'====================='",
")",
"for",
"seg",
",",
"stat",
"in",
"zip",
"(",
"path",
",",
"self",
".",
"path_stats",
"[",
"path",
"]",
")",
":",
"lines",
".",
"append",
"(",
"sformat",
"(",
"seg",
",",
"stat",
")",
")",
"return",
"lines"
] |
Makes a pretty ASCII format of the data, suitable for
displaying in a console or saving to a text file.
Returns a list of lines.
|
[
"Makes",
"a",
"pretty",
"ASCII",
"format",
"of",
"the",
"data",
"suitable",
"for",
"displaying",
"in",
"a",
"console",
"or",
"saving",
"to",
"a",
"text",
"file",
".",
"Returns",
"a",
"list",
"of",
"lines",
"."
] |
python
|
train
|
openstack/python-monascaclient
|
monascaclient/v2_0/shell.py
|
https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/shell.py#L1014-L1047
|
def do_alarm_definition_patch(mc, args):
'''Patch the alarm definition.'''
fields = {}
fields['alarm_id'] = args.id
if args.name:
fields['name'] = args.name
if args.description:
fields['description'] = args.description
if args.expression:
fields['expression'] = args.expression
if args.alarm_actions:
fields['alarm_actions'] = _arg_split_patch_update(args.alarm_actions, patch=True)
if args.ok_actions:
fields['ok_actions'] = _arg_split_patch_update(args.ok_actions, patch=True)
if args.undetermined_actions:
fields['undetermined_actions'] = _arg_split_patch_update(args.undetermined_actions,
patch=True)
if args.actions_enabled:
if args.actions_enabled not in enabled_types:
errmsg = ('Invalid value, not one of [' +
', '.join(enabled_types) + ']')
print(errmsg)
return
fields['actions_enabled'] = args.actions_enabled in ['true', 'True']
if args.severity:
if not _validate_severity(args.severity):
return
fields['severity'] = args.severity
try:
alarm = mc.alarm_definitions.patch(**fields)
except (osc_exc.ClientException, k_exc.HttpError) as he:
raise osc_exc.CommandError('%s\n%s' % (he.message, he.details))
else:
print(jsonutils.dumps(alarm, indent=2))
|
[
"def",
"do_alarm_definition_patch",
"(",
"mc",
",",
"args",
")",
":",
"fields",
"=",
"{",
"}",
"fields",
"[",
"'alarm_id'",
"]",
"=",
"args",
".",
"id",
"if",
"args",
".",
"name",
":",
"fields",
"[",
"'name'",
"]",
"=",
"args",
".",
"name",
"if",
"args",
".",
"description",
":",
"fields",
"[",
"'description'",
"]",
"=",
"args",
".",
"description",
"if",
"args",
".",
"expression",
":",
"fields",
"[",
"'expression'",
"]",
"=",
"args",
".",
"expression",
"if",
"args",
".",
"alarm_actions",
":",
"fields",
"[",
"'alarm_actions'",
"]",
"=",
"_arg_split_patch_update",
"(",
"args",
".",
"alarm_actions",
",",
"patch",
"=",
"True",
")",
"if",
"args",
".",
"ok_actions",
":",
"fields",
"[",
"'ok_actions'",
"]",
"=",
"_arg_split_patch_update",
"(",
"args",
".",
"ok_actions",
",",
"patch",
"=",
"True",
")",
"if",
"args",
".",
"undetermined_actions",
":",
"fields",
"[",
"'undetermined_actions'",
"]",
"=",
"_arg_split_patch_update",
"(",
"args",
".",
"undetermined_actions",
",",
"patch",
"=",
"True",
")",
"if",
"args",
".",
"actions_enabled",
":",
"if",
"args",
".",
"actions_enabled",
"not",
"in",
"enabled_types",
":",
"errmsg",
"=",
"(",
"'Invalid value, not one of ['",
"+",
"', '",
".",
"join",
"(",
"enabled_types",
")",
"+",
"']'",
")",
"print",
"(",
"errmsg",
")",
"return",
"fields",
"[",
"'actions_enabled'",
"]",
"=",
"args",
".",
"actions_enabled",
"in",
"[",
"'true'",
",",
"'True'",
"]",
"if",
"args",
".",
"severity",
":",
"if",
"not",
"_validate_severity",
"(",
"args",
".",
"severity",
")",
":",
"return",
"fields",
"[",
"'severity'",
"]",
"=",
"args",
".",
"severity",
"try",
":",
"alarm",
"=",
"mc",
".",
"alarm_definitions",
".",
"patch",
"(",
"*",
"*",
"fields",
")",
"except",
"(",
"osc_exc",
".",
"ClientException",
",",
"k_exc",
".",
"HttpError",
")",
"as",
"he",
":",
"raise",
"osc_exc",
".",
"CommandError",
"(",
"'%s\\n%s'",
"%",
"(",
"he",
".",
"message",
",",
"he",
".",
"details",
")",
")",
"else",
":",
"print",
"(",
"jsonutils",
".",
"dumps",
"(",
"alarm",
",",
"indent",
"=",
"2",
")",
")"
] |
Patch the alarm definition.
|
[
"Patch",
"the",
"alarm",
"definition",
"."
] |
python
|
train
|
adafruit/Adafruit_Python_GPIO
|
Adafruit_GPIO/I2C.py
|
https://github.com/adafruit/Adafruit_Python_GPIO/blob/a92a23d6b5869663b2bc1ccf78bb11585076a9c4/Adafruit_GPIO/I2C.py#L68-L82
|
def require_repeated_start():
"""Enable repeated start conditions for I2C register reads. This is the
normal behavior for I2C, however on some platforms like the Raspberry Pi
there are bugs which disable repeated starts unless explicitly enabled with
this function. See this thread for more details:
http://www.raspberrypi.org/forums/viewtopic.php?f=44&t=15840
"""
plat = Platform.platform_detect()
if plat == Platform.RASPBERRY_PI and os.path.exists('/sys/module/i2c_bcm2708/parameters/combined'):
# On the Raspberry Pi there is a bug where register reads don't send a
# repeated start condition like the kernel smbus I2C driver functions
# define. As a workaround this bit in the BCM2708 driver sysfs tree can
# be changed to enable I2C repeated starts.
subprocess.check_call('chmod 666 /sys/module/i2c_bcm2708/parameters/combined', shell=True)
subprocess.check_call('echo -n 1 > /sys/module/i2c_bcm2708/parameters/combined', shell=True)
|
[
"def",
"require_repeated_start",
"(",
")",
":",
"plat",
"=",
"Platform",
".",
"platform_detect",
"(",
")",
"if",
"plat",
"==",
"Platform",
".",
"RASPBERRY_PI",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"'/sys/module/i2c_bcm2708/parameters/combined'",
")",
":",
"# On the Raspberry Pi there is a bug where register reads don't send a",
"# repeated start condition like the kernel smbus I2C driver functions",
"# define. As a workaround this bit in the BCM2708 driver sysfs tree can",
"# be changed to enable I2C repeated starts.",
"subprocess",
".",
"check_call",
"(",
"'chmod 666 /sys/module/i2c_bcm2708/parameters/combined'",
",",
"shell",
"=",
"True",
")",
"subprocess",
".",
"check_call",
"(",
"'echo -n 1 > /sys/module/i2c_bcm2708/parameters/combined'",
",",
"shell",
"=",
"True",
")"
] |
Enable repeated start conditions for I2C register reads. This is the
normal behavior for I2C, however on some platforms like the Raspberry Pi
there are bugs which disable repeated starts unless explicitly enabled with
this function. See this thread for more details:
http://www.raspberrypi.org/forums/viewtopic.php?f=44&t=15840
|
[
"Enable",
"repeated",
"start",
"conditions",
"for",
"I2C",
"register",
"reads",
".",
"This",
"is",
"the",
"normal",
"behavior",
"for",
"I2C",
"however",
"on",
"some",
"platforms",
"like",
"the",
"Raspberry",
"Pi",
"there",
"are",
"bugs",
"which",
"disable",
"repeated",
"starts",
"unless",
"explicitly",
"enabled",
"with",
"this",
"function",
".",
"See",
"this",
"thread",
"for",
"more",
"details",
":",
"http",
":",
"//",
"www",
".",
"raspberrypi",
".",
"org",
"/",
"forums",
"/",
"viewtopic",
".",
"php?f",
"=",
"44&t",
"=",
"15840"
] |
python
|
valid
|
sirfoga/pyhal
|
hal/files/models/system.py
|
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/models/system.py#L201-L213
|
def ls_dir(path, include_hidden=False):
"""Finds content of folder
:param path: directory to get list of files and folders
:param include_hidden: True iff include hidden files in list
:return: List of paths in given directory
"""
lst = []
for file in os.listdir(path):
hidden_file = FileSystem(file).is_hidden()
if (hidden_file and include_hidden) or (not hidden_file):
lst.append(os.path.join(path, file))
return list(set(lst))
|
[
"def",
"ls_dir",
"(",
"path",
",",
"include_hidden",
"=",
"False",
")",
":",
"lst",
"=",
"[",
"]",
"for",
"file",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"hidden_file",
"=",
"FileSystem",
"(",
"file",
")",
".",
"is_hidden",
"(",
")",
"if",
"(",
"hidden_file",
"and",
"include_hidden",
")",
"or",
"(",
"not",
"hidden_file",
")",
":",
"lst",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"file",
")",
")",
"return",
"list",
"(",
"set",
"(",
"lst",
")",
")"
] |
Finds content of folder
:param path: directory to get list of files and folders
:param include_hidden: True iff include hidden files in list
:return: List of paths in given directory
|
[
"Finds",
"content",
"of",
"folder"
] |
python
|
train
|
kevinconway/venvctrl
|
venvctrl/cli/relocate.py
|
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/cli/relocate.py#L31-L54
|
def main():
"""Relocate a virtual environment."""
parser = argparse.ArgumentParser(
description='Relocate a virtual environment.'
)
parser.add_argument(
'--source',
help='The existing virtual environment.',
required=True,
)
parser.add_argument(
'--destination',
help='The location for which to configure the virtual environment.',
required=True,
)
parser.add_argument(
'--move',
help='Move the virtual environment to the destination.',
default=False,
action='store_true',
)
args = parser.parse_args()
relocate(args.source, args.destination, args.move)
|
[
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Relocate a virtual environment.'",
")",
"parser",
".",
"add_argument",
"(",
"'--source'",
",",
"help",
"=",
"'The existing virtual environment.'",
",",
"required",
"=",
"True",
",",
")",
"parser",
".",
"add_argument",
"(",
"'--destination'",
",",
"help",
"=",
"'The location for which to configure the virtual environment.'",
",",
"required",
"=",
"True",
",",
")",
"parser",
".",
"add_argument",
"(",
"'--move'",
",",
"help",
"=",
"'Move the virtual environment to the destination.'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"'store_true'",
",",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"relocate",
"(",
"args",
".",
"source",
",",
"args",
".",
"destination",
",",
"args",
".",
"move",
")"
] |
Relocate a virtual environment.
|
[
"Relocate",
"a",
"virtual",
"environment",
"."
] |
python
|
train
|
AndrewAnnex/SpiceyPy
|
spiceypy/spiceypy.py
|
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L9162-L9182
|
def orderd(array, ndim=None):
"""
Determine the order of elements in a double precision array.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/orderd_c.html
:param array: Input array.
:type array: Array of floats
:param ndim: Optional Length of input array
:type ndim: int
:return: Order vector for array.
:rtype: array of ints
"""
if ndim is None:
ndim = ctypes.c_int(len(array))
else:
ndim = ctypes.c_int(ndim)
array = stypes.toDoubleVector(array)
iorder = stypes.emptyIntVector(ndim)
libspice.orderd_c(array, ndim, iorder)
return stypes.cVectorToPython(iorder)
|
[
"def",
"orderd",
"(",
"array",
",",
"ndim",
"=",
"None",
")",
":",
"if",
"ndim",
"is",
"None",
":",
"ndim",
"=",
"ctypes",
".",
"c_int",
"(",
"len",
"(",
"array",
")",
")",
"else",
":",
"ndim",
"=",
"ctypes",
".",
"c_int",
"(",
"ndim",
")",
"array",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"array",
")",
"iorder",
"=",
"stypes",
".",
"emptyIntVector",
"(",
"ndim",
")",
"libspice",
".",
"orderd_c",
"(",
"array",
",",
"ndim",
",",
"iorder",
")",
"return",
"stypes",
".",
"cVectorToPython",
"(",
"iorder",
")"
] |
Determine the order of elements in a double precision array.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/orderd_c.html
:param array: Input array.
:type array: Array of floats
:param ndim: Optional Length of input array
:type ndim: int
:return: Order vector for array.
:rtype: array of ints
|
[
"Determine",
"the",
"order",
"of",
"elements",
"in",
"a",
"double",
"precision",
"array",
"."
] |
python
|
train
|
scidash/sciunit
|
sciunit/models/runnable.py
|
https://github.com/scidash/sciunit/blob/41b2e38c45c0776727ab1f281a572b65be19cea1/sciunit/models/runnable.py#L33-L64
|
def set_backend(self, backend):
"""Set the simulation backend."""
if isinstance(backend, str):
name = backend
args = []
kwargs = {}
elif isinstance(backend, (tuple, list)):
name = ''
args = []
kwargs = {}
for i in range(len(backend)):
if i == 0:
name = backend[i]
else:
if isinstance(backend[i], dict):
kwargs.update(backend[i])
else:
args += backend[i]
else:
raise TypeError("Backend must be string, tuple, or list")
if name in available_backends:
self.backend = name
self._backend = available_backends[name]()
elif name is None:
# The base class should not be called.
raise Exception(("A backend (e.g. 'jNeuroML' or 'NEURON') "
"must be selected"))
else:
raise Exception("Backend %s not found in backends.py"
% name)
self._backend.model = self
self._backend.init_backend(*args, **kwargs)
|
[
"def",
"set_backend",
"(",
"self",
",",
"backend",
")",
":",
"if",
"isinstance",
"(",
"backend",
",",
"str",
")",
":",
"name",
"=",
"backend",
"args",
"=",
"[",
"]",
"kwargs",
"=",
"{",
"}",
"elif",
"isinstance",
"(",
"backend",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"name",
"=",
"''",
"args",
"=",
"[",
"]",
"kwargs",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"backend",
")",
")",
":",
"if",
"i",
"==",
"0",
":",
"name",
"=",
"backend",
"[",
"i",
"]",
"else",
":",
"if",
"isinstance",
"(",
"backend",
"[",
"i",
"]",
",",
"dict",
")",
":",
"kwargs",
".",
"update",
"(",
"backend",
"[",
"i",
"]",
")",
"else",
":",
"args",
"+=",
"backend",
"[",
"i",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"Backend must be string, tuple, or list\"",
")",
"if",
"name",
"in",
"available_backends",
":",
"self",
".",
"backend",
"=",
"name",
"self",
".",
"_backend",
"=",
"available_backends",
"[",
"name",
"]",
"(",
")",
"elif",
"name",
"is",
"None",
":",
"# The base class should not be called.",
"raise",
"Exception",
"(",
"(",
"\"A backend (e.g. 'jNeuroML' or 'NEURON') \"",
"\"must be selected\"",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Backend %s not found in backends.py\"",
"%",
"name",
")",
"self",
".",
"_backend",
".",
"model",
"=",
"self",
"self",
".",
"_backend",
".",
"init_backend",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Set the simulation backend.
|
[
"Set",
"the",
"simulation",
"backend",
"."
] |
python
|
train
|
MostAwesomeDude/gentleman
|
gentleman/base.py
|
https://github.com/MostAwesomeDude/gentleman/blob/17fb8ffb922aa4af9d8bcab85e452c9311d41805/gentleman/base.py#L405-L422
|
def DeleteInstanceTags(r, instance, tags, dry_run=False):
"""
Deletes tags from an instance.
@type instance: str
@param instance: instance to delete tags from
@type tags: list of str
@param tags: tags to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run
"""
query = {
"tag": tags,
"dry-run": dry_run,
}
return r.request("delete", "/2/instances/%s/tags" % instance, query=query)
|
[
"def",
"DeleteInstanceTags",
"(",
"r",
",",
"instance",
",",
"tags",
",",
"dry_run",
"=",
"False",
")",
":",
"query",
"=",
"{",
"\"tag\"",
":",
"tags",
",",
"\"dry-run\"",
":",
"dry_run",
",",
"}",
"return",
"r",
".",
"request",
"(",
"\"delete\"",
",",
"\"/2/instances/%s/tags\"",
"%",
"instance",
",",
"query",
"=",
"query",
")"
] |
Deletes tags from an instance.
@type instance: str
@param instance: instance to delete tags from
@type tags: list of str
@param tags: tags to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run
|
[
"Deletes",
"tags",
"from",
"an",
"instance",
"."
] |
python
|
train
|
fermiPy/fermipy
|
fermipy/jobs/analysis_utils.py
|
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/analysis_utils.py#L67-L127
|
def add_source_get_correlated(gta, name, src_dict, correl_thresh=0.25, non_null_src=False):
"""Add a source and get the set of correlated sources
Parameters
----------
gta : `fermipy.gtaanalysis.GTAnalysis`
The analysis object
name : str
Name of the source we are adding
src_dict : dict
Dictionary of the source parameters
correl_thresh : float
Threshold for considering a source to be correlated
non_null_src : bool
If True, don't zero the source
Returns
-------
cdict : dict
Dictionary with names and correlation factors of correlated sources
test_src_name : bool
Name of the test source
"""
if gta.roi.has_source(name):
gta.zero_source(name)
gta.update_source(name)
test_src_name = "%s_test" % name
else:
test_src_name = name
gta.add_source(test_src_name, src_dict)
gta.free_norm(test_src_name)
gta.free_shape(test_src_name, free=False)
fit_result = gta.fit(covar=True)
mask = fit_result['is_norm']
src_names = np.array(fit_result['src_names'])[mask]
idx = (src_names == test_src_name).argmax()
correl_vals = fit_result['correlation'][idx][mask]
cdict = {}
for src_name, correl_val in zip(src_names, correl_vals):
if src_name == name:
continue
if np.fabs(correl_val) > 0.25:
cdict[src_name] = correl_val
if not non_null_src:
gta.zero_source(test_src_name)
gta.fit(covar=True)
return cdict, test_src_name
|
[
"def",
"add_source_get_correlated",
"(",
"gta",
",",
"name",
",",
"src_dict",
",",
"correl_thresh",
"=",
"0.25",
",",
"non_null_src",
"=",
"False",
")",
":",
"if",
"gta",
".",
"roi",
".",
"has_source",
"(",
"name",
")",
":",
"gta",
".",
"zero_source",
"(",
"name",
")",
"gta",
".",
"update_source",
"(",
"name",
")",
"test_src_name",
"=",
"\"%s_test\"",
"%",
"name",
"else",
":",
"test_src_name",
"=",
"name",
"gta",
".",
"add_source",
"(",
"test_src_name",
",",
"src_dict",
")",
"gta",
".",
"free_norm",
"(",
"test_src_name",
")",
"gta",
".",
"free_shape",
"(",
"test_src_name",
",",
"free",
"=",
"False",
")",
"fit_result",
"=",
"gta",
".",
"fit",
"(",
"covar",
"=",
"True",
")",
"mask",
"=",
"fit_result",
"[",
"'is_norm'",
"]",
"src_names",
"=",
"np",
".",
"array",
"(",
"fit_result",
"[",
"'src_names'",
"]",
")",
"[",
"mask",
"]",
"idx",
"=",
"(",
"src_names",
"==",
"test_src_name",
")",
".",
"argmax",
"(",
")",
"correl_vals",
"=",
"fit_result",
"[",
"'correlation'",
"]",
"[",
"idx",
"]",
"[",
"mask",
"]",
"cdict",
"=",
"{",
"}",
"for",
"src_name",
",",
"correl_val",
"in",
"zip",
"(",
"src_names",
",",
"correl_vals",
")",
":",
"if",
"src_name",
"==",
"name",
":",
"continue",
"if",
"np",
".",
"fabs",
"(",
"correl_val",
")",
">",
"0.25",
":",
"cdict",
"[",
"src_name",
"]",
"=",
"correl_val",
"if",
"not",
"non_null_src",
":",
"gta",
".",
"zero_source",
"(",
"test_src_name",
")",
"gta",
".",
"fit",
"(",
"covar",
"=",
"True",
")",
"return",
"cdict",
",",
"test_src_name"
] |
Add a source and get the set of correlated sources
Parameters
----------
gta : `fermipy.gtaanalysis.GTAnalysis`
The analysis object
name : str
Name of the source we are adding
src_dict : dict
Dictionary of the source parameters
correl_thresh : float
Threshold for considering a source to be correlated
non_null_src : bool
If True, don't zero the source
Returns
-------
cdict : dict
Dictionary with names and correlation factors of correlated sources
test_src_name : bool
Name of the test source
|
[
"Add",
"a",
"source",
"and",
"get",
"the",
"set",
"of",
"correlated",
"sources"
] |
python
|
train
|
rbarrois/confutils
|
confutils/configfile.py
|
https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/configfile.py#L448-L460
|
def remove_line(self, section, line):
"""Remove all instances of a line.
Returns:
int: the number of lines removed
"""
try:
s = self._get_section(section, create=False)
except KeyError:
# No such section, skip.
return 0
return s.remove(line)
|
[
"def",
"remove_line",
"(",
"self",
",",
"section",
",",
"line",
")",
":",
"try",
":",
"s",
"=",
"self",
".",
"_get_section",
"(",
"section",
",",
"create",
"=",
"False",
")",
"except",
"KeyError",
":",
"# No such section, skip.",
"return",
"0",
"return",
"s",
".",
"remove",
"(",
"line",
")"
] |
Remove all instances of a line.
Returns:
int: the number of lines removed
|
[
"Remove",
"all",
"instances",
"of",
"a",
"line",
"."
] |
python
|
train
|
pandas-dev/pandas
|
pandas/core/groupby/ops.py
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L257-L264
|
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby)
|
[
"def",
"groups",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"groupings",
")",
"==",
"1",
":",
"return",
"self",
".",
"groupings",
"[",
"0",
"]",
".",
"groups",
"else",
":",
"to_groupby",
"=",
"lzip",
"(",
"*",
"(",
"ping",
".",
"grouper",
"for",
"ping",
"in",
"self",
".",
"groupings",
")",
")",
"to_groupby",
"=",
"Index",
"(",
"to_groupby",
")",
"return",
"self",
".",
"axis",
".",
"groupby",
"(",
"to_groupby",
")"
] |
dict {group name -> group labels}
|
[
"dict",
"{",
"group",
"name",
"-",
">",
"group",
"labels",
"}"
] |
python
|
train
|
budacom/trading-bots
|
trading_bots/core/management/__init__.py
|
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/core/management/__init__.py#L89-L94
|
def abort(bot, config, settings):
"""Run the abort command of a specified BOT by label e.g. 'MyBot'"""
print_options(bot, config, settings)
click.echo()
bot_task = BotTask(bot, config)
bot_task.abort()
|
[
"def",
"abort",
"(",
"bot",
",",
"config",
",",
"settings",
")",
":",
"print_options",
"(",
"bot",
",",
"config",
",",
"settings",
")",
"click",
".",
"echo",
"(",
")",
"bot_task",
"=",
"BotTask",
"(",
"bot",
",",
"config",
")",
"bot_task",
".",
"abort",
"(",
")"
] |
Run the abort command of a specified BOT by label e.g. 'MyBot
|
[
"Run",
"the",
"abort",
"command",
"of",
"a",
"specified",
"BOT",
"by",
"label",
"e",
".",
"g",
".",
"MyBot"
] |
python
|
train
|
googleapis/google-cloud-python
|
spanner/google/cloud/spanner_v1/snapshot.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/snapshot.py#L308-L381
|
def partition_query(
self,
sql,
params=None,
param_types=None,
partition_size_bytes=None,
max_partitions=None,
):
"""Perform a ``ParitionQuery`` API request.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of bytes
:returns: a sequence of partition tokens
:raises ValueError:
for single-use snapshots, or if a transaction ID is
already associtated with the snapshot.
"""
if not self._multi_use:
raise ValueError("Cannot use single-use snapshot.")
if self._transaction_id is None:
raise ValueError("Transaction not started.")
if params is not None:
if param_types is None:
raise ValueError("Specify 'param_types' when passing 'params'.")
params_pb = Struct(
fields={key: _make_value_pb(value) for key, value in params.items()}
)
else:
params_pb = None
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
partition_options = PartitionOptions(
partition_size_bytes=partition_size_bytes, max_partitions=max_partitions
)
response = api.partition_query(
session=self._session.name,
sql=sql,
transaction=transaction,
params=params_pb,
param_types=param_types,
partition_options=partition_options,
metadata=metadata,
)
return [partition.partition_token for partition in response.partitions]
|
[
"def",
"partition_query",
"(",
"self",
",",
"sql",
",",
"params",
"=",
"None",
",",
"param_types",
"=",
"None",
",",
"partition_size_bytes",
"=",
"None",
",",
"max_partitions",
"=",
"None",
",",
")",
":",
"if",
"not",
"self",
".",
"_multi_use",
":",
"raise",
"ValueError",
"(",
"\"Cannot use single-use snapshot.\"",
")",
"if",
"self",
".",
"_transaction_id",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Transaction not started.\"",
")",
"if",
"params",
"is",
"not",
"None",
":",
"if",
"param_types",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Specify 'param_types' when passing 'params'.\"",
")",
"params_pb",
"=",
"Struct",
"(",
"fields",
"=",
"{",
"key",
":",
"_make_value_pb",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"params",
".",
"items",
"(",
")",
"}",
")",
"else",
":",
"params_pb",
"=",
"None",
"database",
"=",
"self",
".",
"_session",
".",
"_database",
"api",
"=",
"database",
".",
"spanner_api",
"metadata",
"=",
"_metadata_with_prefix",
"(",
"database",
".",
"name",
")",
"transaction",
"=",
"self",
".",
"_make_txn_selector",
"(",
")",
"partition_options",
"=",
"PartitionOptions",
"(",
"partition_size_bytes",
"=",
"partition_size_bytes",
",",
"max_partitions",
"=",
"max_partitions",
")",
"response",
"=",
"api",
".",
"partition_query",
"(",
"session",
"=",
"self",
".",
"_session",
".",
"name",
",",
"sql",
"=",
"sql",
",",
"transaction",
"=",
"transaction",
",",
"params",
"=",
"params_pb",
",",
"param_types",
"=",
"param_types",
",",
"partition_options",
"=",
"partition_options",
",",
"metadata",
"=",
"metadata",
",",
")",
"return",
"[",
"partition",
".",
"partition_token",
"for",
"partition",
"in",
"response",
".",
"partitions",
"]"
] |
Perform a ``ParitionQuery`` API request.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of bytes
:returns: a sequence of partition tokens
:raises ValueError:
for single-use snapshots, or if a transaction ID is
already associtated with the snapshot.
|
[
"Perform",
"a",
"ParitionQuery",
"API",
"request",
"."
] |
python
|
train
|
fedora-infra/fedora-messaging
|
fedora_messaging/message.py
|
https://github.com/fedora-infra/fedora-messaging/blob/be3e88534e2b15d579bcd24f9c4b7e795cb7e0b7/fedora_messaging/message.py#L646-L659
|
def _dump(self):
"""
Dump message attributes.
Returns:
dict: A dictionary of message attributes.
"""
return {
"topic": self.topic,
"headers": self._headers,
"id": self.id,
"body": self.body,
"queue": self.queue,
}
|
[
"def",
"_dump",
"(",
"self",
")",
":",
"return",
"{",
"\"topic\"",
":",
"self",
".",
"topic",
",",
"\"headers\"",
":",
"self",
".",
"_headers",
",",
"\"id\"",
":",
"self",
".",
"id",
",",
"\"body\"",
":",
"self",
".",
"body",
",",
"\"queue\"",
":",
"self",
".",
"queue",
",",
"}"
] |
Dump message attributes.
Returns:
dict: A dictionary of message attributes.
|
[
"Dump",
"message",
"attributes",
"."
] |
python
|
train
|
RedFantom/ttkwidgets
|
ttkwidgets/font/sizedropdown.py
|
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/font/sizedropdown.py#L38-L45
|
def _on_click(self, event):
"""
Function bound to event of selection in the Combobox, calls callback if callable
:param event: Tkinter event
"""
if callable(self.__callback):
self.__callback(self.selection)
|
[
"def",
"_on_click",
"(",
"self",
",",
"event",
")",
":",
"if",
"callable",
"(",
"self",
".",
"__callback",
")",
":",
"self",
".",
"__callback",
"(",
"self",
".",
"selection",
")"
] |
Function bound to event of selection in the Combobox, calls callback if callable
:param event: Tkinter event
|
[
"Function",
"bound",
"to",
"event",
"of",
"selection",
"in",
"the",
"Combobox",
"calls",
"callback",
"if",
"callable",
":",
"param",
"event",
":",
"Tkinter",
"event"
] |
python
|
train
|
saltstack/salt
|
salt/modules/boto3_sns.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto3_sns.py#L220-L240
|
def list_subscriptions_by_topic(TopicArn, region=None, key=None, keyid=None, profile=None):
'''
Returns a list of the subscriptions to a specific topic
CLI example::
salt myminion boto3_sns.list_subscriptions_by_topic mytopic region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
NextToken = ''
res = []
try:
while NextToken is not None:
ret = conn.list_subscriptions_by_topic(TopicArn=TopicArn, NextToken=NextToken)
NextToken = ret.get('NextToken', None)
subs = ret.get('Subscriptions', [])
res += subs
except botocore.exceptions.ClientError as e:
log.error('Failed to list subscriptions for SNS topic %s: %s', TopicArn, e)
return None
return res
|
[
"def",
"list_subscriptions_by_topic",
"(",
"TopicArn",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"NextToken",
"=",
"''",
"res",
"=",
"[",
"]",
"try",
":",
"while",
"NextToken",
"is",
"not",
"None",
":",
"ret",
"=",
"conn",
".",
"list_subscriptions_by_topic",
"(",
"TopicArn",
"=",
"TopicArn",
",",
"NextToken",
"=",
"NextToken",
")",
"NextToken",
"=",
"ret",
".",
"get",
"(",
"'NextToken'",
",",
"None",
")",
"subs",
"=",
"ret",
".",
"get",
"(",
"'Subscriptions'",
",",
"[",
"]",
")",
"res",
"+=",
"subs",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"e",
":",
"log",
".",
"error",
"(",
"'Failed to list subscriptions for SNS topic %s: %s'",
",",
"TopicArn",
",",
"e",
")",
"return",
"None",
"return",
"res"
] |
Returns a list of the subscriptions to a specific topic
CLI example::
salt myminion boto3_sns.list_subscriptions_by_topic mytopic region=us-east-1
|
[
"Returns",
"a",
"list",
"of",
"the",
"subscriptions",
"to",
"a",
"specific",
"topic"
] |
python
|
train
|
bspaans/python-mingus
|
mingus/midi/midi_file_out.py
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_file_out.py#L71-L86
|
def write_Note(file, note, bpm=120, repeat=0, verbose=False):
"""Expect a Note object from mingus.containers and save it into a MIDI
file, specified in file.
You can set the velocity and channel in Note.velocity and Note.channel.
"""
m = MidiFile()
t = MidiTrack(bpm)
m.tracks = [t]
while repeat >= 0:
t.set_deltatime('\x00')
t.play_Note(note)
t.set_deltatime("\x48")
t.stop_Note(note)
repeat -= 1
return m.write_file(file, verbose)
|
[
"def",
"write_Note",
"(",
"file",
",",
"note",
",",
"bpm",
"=",
"120",
",",
"repeat",
"=",
"0",
",",
"verbose",
"=",
"False",
")",
":",
"m",
"=",
"MidiFile",
"(",
")",
"t",
"=",
"MidiTrack",
"(",
"bpm",
")",
"m",
".",
"tracks",
"=",
"[",
"t",
"]",
"while",
"repeat",
">=",
"0",
":",
"t",
".",
"set_deltatime",
"(",
"'\\x00'",
")",
"t",
".",
"play_Note",
"(",
"note",
")",
"t",
".",
"set_deltatime",
"(",
"\"\\x48\"",
")",
"t",
".",
"stop_Note",
"(",
"note",
")",
"repeat",
"-=",
"1",
"return",
"m",
".",
"write_file",
"(",
"file",
",",
"verbose",
")"
] |
Expect a Note object from mingus.containers and save it into a MIDI
file, specified in file.
You can set the velocity and channel in Note.velocity and Note.channel.
|
[
"Expect",
"a",
"Note",
"object",
"from",
"mingus",
".",
"containers",
"and",
"save",
"it",
"into",
"a",
"MIDI",
"file",
"specified",
"in",
"file",
"."
] |
python
|
train
|
fhamborg/news-please
|
newsplease/__init__.py
|
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__init__.py#L36-L70
|
def from_html(html, url=None, download_date=None):
"""
Extracts relevant information from an HTML page given as a string. This function does not invoke scrapy but only
uses the article extractor. If you have the original URL make sure to provide it as this helps NewsPlease
to extract the publishing date and title.
:param html:
:param url:
:return:
"""
extractor = article_extractor.Extractor(
['newspaper_extractor', 'readability_extractor', 'date_extractor', 'lang_detect_extractor'])
title_encoded = ''.encode()
if not url:
url = ''
# if an url was given, we can use that as the filename
filename = urllib.parse.quote_plus(url) + '.json'
item = NewscrawlerItem()
item['spider_response'] = DotMap()
item['spider_response'].body = html
item['url'] = url
item['source_domain'] = urllib.parse.urlparse(url).hostname.encode() if url != '' else ''.encode()
item['html_title'] = title_encoded
item['rss_title'] = title_encoded
item['local_path'] = None
item['filename'] = filename
item['download_date'] = download_date
item['modified_date'] = None
item = extractor.extract(item)
tmp_article = ExtractedInformationStorage.extract_relevant_info(item)
final_article = ExtractedInformationStorage.convert_to_class(tmp_article)
return final_article
|
[
"def",
"from_html",
"(",
"html",
",",
"url",
"=",
"None",
",",
"download_date",
"=",
"None",
")",
":",
"extractor",
"=",
"article_extractor",
".",
"Extractor",
"(",
"[",
"'newspaper_extractor'",
",",
"'readability_extractor'",
",",
"'date_extractor'",
",",
"'lang_detect_extractor'",
"]",
")",
"title_encoded",
"=",
"''",
".",
"encode",
"(",
")",
"if",
"not",
"url",
":",
"url",
"=",
"''",
"# if an url was given, we can use that as the filename",
"filename",
"=",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"url",
")",
"+",
"'.json'",
"item",
"=",
"NewscrawlerItem",
"(",
")",
"item",
"[",
"'spider_response'",
"]",
"=",
"DotMap",
"(",
")",
"item",
"[",
"'spider_response'",
"]",
".",
"body",
"=",
"html",
"item",
"[",
"'url'",
"]",
"=",
"url",
"item",
"[",
"'source_domain'",
"]",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
".",
"hostname",
".",
"encode",
"(",
")",
"if",
"url",
"!=",
"''",
"else",
"''",
".",
"encode",
"(",
")",
"item",
"[",
"'html_title'",
"]",
"=",
"title_encoded",
"item",
"[",
"'rss_title'",
"]",
"=",
"title_encoded",
"item",
"[",
"'local_path'",
"]",
"=",
"None",
"item",
"[",
"'filename'",
"]",
"=",
"filename",
"item",
"[",
"'download_date'",
"]",
"=",
"download_date",
"item",
"[",
"'modified_date'",
"]",
"=",
"None",
"item",
"=",
"extractor",
".",
"extract",
"(",
"item",
")",
"tmp_article",
"=",
"ExtractedInformationStorage",
".",
"extract_relevant_info",
"(",
"item",
")",
"final_article",
"=",
"ExtractedInformationStorage",
".",
"convert_to_class",
"(",
"tmp_article",
")",
"return",
"final_article"
] |
Extracts relevant information from an HTML page given as a string. This function does not invoke scrapy but only
uses the article extractor. If you have the original URL make sure to provide it as this helps NewsPlease
to extract the publishing date and title.
:param html:
:param url:
:return:
|
[
"Extracts",
"relevant",
"information",
"from",
"an",
"HTML",
"page",
"given",
"as",
"a",
"string",
".",
"This",
"function",
"does",
"not",
"invoke",
"scrapy",
"but",
"only",
"uses",
"the",
"article",
"extractor",
".",
"If",
"you",
"have",
"the",
"original",
"URL",
"make",
"sure",
"to",
"provide",
"it",
"as",
"this",
"helps",
"NewsPlease",
"to",
"extract",
"the",
"publishing",
"date",
"and",
"title",
".",
":",
"param",
"html",
":",
":",
"param",
"url",
":",
":",
"return",
":"
] |
python
|
train
|
Azure/azure-uamqp-python
|
uamqp/address.py
|
https://github.com/Azure/azure-uamqp-python/blob/b67e4fcaf2e8a337636947523570239c10a58ae2/uamqp/address.py#L134-L146
|
def _validate_address(self, address):
"""Confirm that supplied address is a valid URL and
has an `amqp` or `amqps` scheme.
:param address: The endpiont URL.
:type address: str
:rtype: ~urllib.parse.ParseResult
"""
parsed = compat.urlparse(address)
if not parsed.path:
raise ValueError("Invalid {} address: {}".format(
self.__class__.__name__, parsed))
return parsed
|
[
"def",
"_validate_address",
"(",
"self",
",",
"address",
")",
":",
"parsed",
"=",
"compat",
".",
"urlparse",
"(",
"address",
")",
"if",
"not",
"parsed",
".",
"path",
":",
"raise",
"ValueError",
"(",
"\"Invalid {} address: {}\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"parsed",
")",
")",
"return",
"parsed"
] |
Confirm that supplied address is a valid URL and
has an `amqp` or `amqps` scheme.
:param address: The endpiont URL.
:type address: str
:rtype: ~urllib.parse.ParseResult
|
[
"Confirm",
"that",
"supplied",
"address",
"is",
"a",
"valid",
"URL",
"and",
"has",
"an",
"amqp",
"or",
"amqps",
"scheme",
"."
] |
python
|
train
|
Gscorreia89/pyChemometrics
|
pyChemometrics/ChemometricsScaler.py
|
https://github.com/Gscorreia89/pyChemometrics/blob/539f5cd719795685271faa7fb1c6d53d7dd4de19/pyChemometrics/ChemometricsScaler.py#L138-L169
|
def transform(self, X, y=None, copy=None):
"""
Perform standardization by centering and scaling using the parameters.
:param X: Data matrix to scale.
:type X: numpy.ndarray, shape [n_samples, n_features]
:param y: Passthrough for scikit-learn ``Pipeline`` compatibility.
:type y: None
:param bool copy: Copy the X matrix.
:return: Scaled version of the X data matrix.
:rtype: numpy.ndarray, shape [n_samples, n_features]
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
|
[
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"copy",
"=",
"None",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"'scale_'",
")",
"copy",
"=",
"copy",
"if",
"copy",
"is",
"not",
"None",
"else",
"self",
".",
"copy",
"X",
"=",
"check_array",
"(",
"X",
",",
"accept_sparse",
"=",
"'csr'",
",",
"copy",
"=",
"copy",
",",
"warn_on_dtype",
"=",
"True",
",",
"estimator",
"=",
"self",
",",
"dtype",
"=",
"FLOAT_DTYPES",
")",
"if",
"sparse",
".",
"issparse",
"(",
"X",
")",
":",
"if",
"self",
".",
"with_mean",
":",
"raise",
"ValueError",
"(",
"\"Cannot center sparse matrices: pass `with_mean=False` \"",
"\"instead. See docstring for motivation and alternatives.\"",
")",
"if",
"self",
".",
"scale_",
"is",
"not",
"None",
":",
"inplace_column_scale",
"(",
"X",
",",
"1",
"/",
"self",
".",
"scale_",
")",
"else",
":",
"if",
"self",
".",
"with_mean",
":",
"X",
"-=",
"self",
".",
"mean_",
"if",
"self",
".",
"with_std",
":",
"X",
"/=",
"self",
".",
"scale_",
"return",
"X"
] |
Perform standardization by centering and scaling using the parameters.
:param X: Data matrix to scale.
:type X: numpy.ndarray, shape [n_samples, n_features]
:param y: Passthrough for scikit-learn ``Pipeline`` compatibility.
:type y: None
:param bool copy: Copy the X matrix.
:return: Scaled version of the X data matrix.
:rtype: numpy.ndarray, shape [n_samples, n_features]
|
[
"Perform",
"standardization",
"by",
"centering",
"and",
"scaling",
"using",
"the",
"parameters",
"."
] |
python
|
train
|
reingart/gui2py
|
gui/doc/ext/autosummary/__init__.py
|
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/doc/ext/autosummary/__init__.py#L342-L375
|
def mangle_signature(sig, max_chars=30):
"""Reformat a function signature to a more compact form."""
s = re.sub(r"^\((.*)\)$", r"\1", sig).strip()
# Strip strings (which can contain things that confuse the code below)
s = re.sub(r"\\\\", "", s)
s = re.sub(r"\\'", "", s)
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
args = []
opts = []
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
m = opt_re.search(s)
if not m:
# The rest are arguments
args = s.split(', ')
break
opts.insert(0, m.group(2))
s = m.group(1)[:-2]
# Produce a more compact signature
sig = limited_join(", ", args, max_chars=max_chars-2)
if opts:
if not sig:
sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars-4)
elif len(sig) < max_chars - 4 - 2 - 3:
sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars-len(sig)-4-2)
return u"(%s)" % sig
|
[
"def",
"mangle_signature",
"(",
"sig",
",",
"max_chars",
"=",
"30",
")",
":",
"s",
"=",
"re",
".",
"sub",
"(",
"r\"^\\((.*)\\)$\"",
",",
"r\"\\1\"",
",",
"sig",
")",
".",
"strip",
"(",
")",
"# Strip strings (which can contain things that confuse the code below)",
"s",
"=",
"re",
".",
"sub",
"(",
"r\"\\\\\\\\\"",
",",
"\"\"",
",",
"s",
")",
"s",
"=",
"re",
".",
"sub",
"(",
"r\"\\\\'\"",
",",
"\"\"",
",",
"s",
")",
"s",
"=",
"re",
".",
"sub",
"(",
"r\"'[^']*'\"",
",",
"\"\"",
",",
"s",
")",
"# Parse the signature to arguments + options",
"args",
"=",
"[",
"]",
"opts",
"=",
"[",
"]",
"opt_re",
"=",
"re",
".",
"compile",
"(",
"r\"^(.*, |)([a-zA-Z0-9_*]+)=\"",
")",
"while",
"s",
":",
"m",
"=",
"opt_re",
".",
"search",
"(",
"s",
")",
"if",
"not",
"m",
":",
"# The rest are arguments",
"args",
"=",
"s",
".",
"split",
"(",
"', '",
")",
"break",
"opts",
".",
"insert",
"(",
"0",
",",
"m",
".",
"group",
"(",
"2",
")",
")",
"s",
"=",
"m",
".",
"group",
"(",
"1",
")",
"[",
":",
"-",
"2",
"]",
"# Produce a more compact signature",
"sig",
"=",
"limited_join",
"(",
"\", \"",
",",
"args",
",",
"max_chars",
"=",
"max_chars",
"-",
"2",
")",
"if",
"opts",
":",
"if",
"not",
"sig",
":",
"sig",
"=",
"\"[%s]\"",
"%",
"limited_join",
"(",
"\", \"",
",",
"opts",
",",
"max_chars",
"=",
"max_chars",
"-",
"4",
")",
"elif",
"len",
"(",
"sig",
")",
"<",
"max_chars",
"-",
"4",
"-",
"2",
"-",
"3",
":",
"sig",
"+=",
"\"[, %s]\"",
"%",
"limited_join",
"(",
"\", \"",
",",
"opts",
",",
"max_chars",
"=",
"max_chars",
"-",
"len",
"(",
"sig",
")",
"-",
"4",
"-",
"2",
")",
"return",
"u\"(%s)\"",
"%",
"sig"
] |
Reformat a function signature to a more compact form.
|
[
"Reformat",
"a",
"function",
"signature",
"to",
"a",
"more",
"compact",
"form",
"."
] |
python
|
test
|
Carreau/Love
|
love/flit.py
|
https://github.com/Carreau/Love/blob/a85d1139b32ee926b3bee73447e32e89b86983ba/love/flit.py#L11-L28
|
def modify_config(path):
"""
Context manager to modify a flit config file.
Will read the config file, validate the config, yield the config object,
validate and write back the config to the file on exit
"""
if isinstance(path, str):
path = Path(path)
config = _read_pkg_ini(path)
_validate_config(config, path)
# don't catch exception, we won't write the new config.
yield config
_validate_config(config, path)
with path.open('w') as f:
config.write(f)
|
[
"def",
"modify_config",
"(",
"path",
")",
":",
"if",
"isinstance",
"(",
"path",
",",
"str",
")",
":",
"path",
"=",
"Path",
"(",
"path",
")",
"config",
"=",
"_read_pkg_ini",
"(",
"path",
")",
"_validate_config",
"(",
"config",
",",
"path",
")",
"# don't catch exception, we won't write the new config.",
"yield",
"config",
"_validate_config",
"(",
"config",
",",
"path",
")",
"with",
"path",
".",
"open",
"(",
"'w'",
")",
"as",
"f",
":",
"config",
".",
"write",
"(",
"f",
")"
] |
Context manager to modify a flit config file.
Will read the config file, validate the config, yield the config object,
validate and write back the config to the file on exit
|
[
"Context",
"manager",
"to",
"modify",
"a",
"flit",
"config",
"file",
"."
] |
python
|
train
|
globocom/GloboNetworkAPI-client-python
|
networkapiclient/Ip.py
|
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Ip.py#L275-L304
|
def get_available_ip6(self, id_network6):
"""
Get a available IP in Network ipv6
:param id_network6: Network ipv6 identifier. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'ip6': {'ip6': < available_ip6 >}}
:raise IpNotAvailableError: Network dont have available IP.
:raise NetworkIPv4NotFoundError: Network was not found.
:raise UserNotAuthorizedError: User dont have permission to get a available IP.
:raise InvalidParameterError: Network ipv6 identifier is none or invalid.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database.
"""
if not is_valid_int_param(id_network6):
raise InvalidParameterError(
u'Network ipv6 identifier is invalid or was not informed.')
url = 'ip/availableip6/' + str(id_network6) + "/"
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml)
|
[
"def",
"get_available_ip6",
"(",
"self",
",",
"id_network6",
")",
":",
"if",
"not",
"is_valid_int_param",
"(",
"id_network6",
")",
":",
"raise",
"InvalidParameterError",
"(",
"u'Network ipv6 identifier is invalid or was not informed.'",
")",
"url",
"=",
"'ip/availableip6/'",
"+",
"str",
"(",
"id_network6",
")",
"+",
"\"/\"",
"code",
",",
"xml",
"=",
"self",
".",
"submit",
"(",
"None",
",",
"'GET'",
",",
"url",
")",
"return",
"self",
".",
"response",
"(",
"code",
",",
"xml",
")"
] |
Get a available IP in Network ipv6
:param id_network6: Network ipv6 identifier. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'ip6': {'ip6': < available_ip6 >}}
:raise IpNotAvailableError: Network dont have available IP.
:raise NetworkIPv4NotFoundError: Network was not found.
:raise UserNotAuthorizedError: User dont have permission to get a available IP.
:raise InvalidParameterError: Network ipv6 identifier is none or invalid.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database.
|
[
"Get",
"a",
"available",
"IP",
"in",
"Network",
"ipv6"
] |
python
|
train
|
NLeSC/scriptcwl
|
scriptcwl/library.py
|
https://github.com/NLeSC/scriptcwl/blob/33bb847a875379da3a5702c7a98dfa585306b960/scriptcwl/library.py#L83-L131
|
def load_steps(working_dir=None, steps_dir=None, step_file=None,
step_list=None):
"""Return a dictionary containing Steps read from file.
Args:
steps_dir (str, optional): path to directory containing CWL files.
step_file (str, optional): path or http(s) url to a single CWL file.
step_list (list, optional): a list of directories, urls or local file
paths to CWL files or directories containing CWL files.
Return:
dict containing (name, Step) entries.
"""
if steps_dir is not None:
step_files = glob.glob(os.path.join(steps_dir, '*.cwl'))
elif step_file is not None:
step_files = [step_file]
elif step_list is not None:
step_files = []
for path in step_list:
if os.path.isdir(path):
step_files += glob.glob(os.path.join(path, '*.cwl'))
else:
step_files.append(path)
else:
step_files = []
if working_dir is not None:
step_files = sort_loading_order(step_files)
steps = {}
for f in step_files:
if working_dir is not None:
# Copy file to working_dir
if not working_dir == os.path.dirname(f) and not is_url(f):
copied_file = os.path.join(working_dir, os.path.basename(f))
shutil.copy2(f, copied_file)
f = copied_file
# Create steps
try:
s = Step(f)
steps[s.name] = s
except (NotImplementedError, ValidationException,
PackedWorkflowException) as e:
logger.warning(e)
return steps
|
[
"def",
"load_steps",
"(",
"working_dir",
"=",
"None",
",",
"steps_dir",
"=",
"None",
",",
"step_file",
"=",
"None",
",",
"step_list",
"=",
"None",
")",
":",
"if",
"steps_dir",
"is",
"not",
"None",
":",
"step_files",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"steps_dir",
",",
"'*.cwl'",
")",
")",
"elif",
"step_file",
"is",
"not",
"None",
":",
"step_files",
"=",
"[",
"step_file",
"]",
"elif",
"step_list",
"is",
"not",
"None",
":",
"step_files",
"=",
"[",
"]",
"for",
"path",
"in",
"step_list",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"step_files",
"+=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'*.cwl'",
")",
")",
"else",
":",
"step_files",
".",
"append",
"(",
"path",
")",
"else",
":",
"step_files",
"=",
"[",
"]",
"if",
"working_dir",
"is",
"not",
"None",
":",
"step_files",
"=",
"sort_loading_order",
"(",
"step_files",
")",
"steps",
"=",
"{",
"}",
"for",
"f",
"in",
"step_files",
":",
"if",
"working_dir",
"is",
"not",
"None",
":",
"# Copy file to working_dir",
"if",
"not",
"working_dir",
"==",
"os",
".",
"path",
".",
"dirname",
"(",
"f",
")",
"and",
"not",
"is_url",
"(",
"f",
")",
":",
"copied_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"working_dir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
")",
"shutil",
".",
"copy2",
"(",
"f",
",",
"copied_file",
")",
"f",
"=",
"copied_file",
"# Create steps",
"try",
":",
"s",
"=",
"Step",
"(",
"f",
")",
"steps",
"[",
"s",
".",
"name",
"]",
"=",
"s",
"except",
"(",
"NotImplementedError",
",",
"ValidationException",
",",
"PackedWorkflowException",
")",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"e",
")",
"return",
"steps"
] |
Return a dictionary containing Steps read from file.
Args:
steps_dir (str, optional): path to directory containing CWL files.
step_file (str, optional): path or http(s) url to a single CWL file.
step_list (list, optional): a list of directories, urls or local file
paths to CWL files or directories containing CWL files.
Return:
dict containing (name, Step) entries.
|
[
"Return",
"a",
"dictionary",
"containing",
"Steps",
"read",
"from",
"file",
"."
] |
python
|
train
|
google/grr
|
grr/server/grr_response_server/artifact_registry.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/artifact_registry.py#L760-L774
|
def GetArtifactParserDependencies(rdf_artifact):
"""Return the set of knowledgebase path dependencies required by the parser.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"]
"""
deps = set()
processors = parser.Parser.GetClassesByArtifact(rdf_artifact.name)
for p in processors:
deps.update(p.knowledgebase_dependencies)
return deps
|
[
"def",
"GetArtifactParserDependencies",
"(",
"rdf_artifact",
")",
":",
"deps",
"=",
"set",
"(",
")",
"processors",
"=",
"parser",
".",
"Parser",
".",
"GetClassesByArtifact",
"(",
"rdf_artifact",
".",
"name",
")",
"for",
"p",
"in",
"processors",
":",
"deps",
".",
"update",
"(",
"p",
".",
"knowledgebase_dependencies",
")",
"return",
"deps"
] |
Return the set of knowledgebase path dependencies required by the parser.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"]
|
[
"Return",
"the",
"set",
"of",
"knowledgebase",
"path",
"dependencies",
"required",
"by",
"the",
"parser",
"."
] |
python
|
train
|
dls-controls/pymalcolm
|
malcolm/core/future.py
|
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/future.py#L50-L68
|
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
if self._state == self.RUNNING:
self._context.wait_all_futures([self], timeout)
return self._exception
|
[
"def",
"exception",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"self",
".",
"_state",
"==",
"self",
".",
"RUNNING",
":",
"self",
".",
"_context",
".",
"wait_all_futures",
"(",
"[",
"self",
"]",
",",
"timeout",
")",
"return",
"self",
".",
"_exception"
] |
Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
TimeoutError: If the future didn't finish executing before the given
timeout.
|
[
"Return",
"the",
"exception",
"raised",
"by",
"the",
"call",
"that",
"the",
"future",
"represents",
"."
] |
python
|
train
|
exa-analytics/exa
|
exa/core/numerical.py
|
https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/core/numerical.py#L371-L391
|
def check_key(data_object, key, cardinal=False):
"""
Update the value of an index key by matching values or getting positionals.
"""
itype = (int, np.int32, np.int64)
if not isinstance(key, itype + (slice, tuple, list, np.ndarray)):
raise KeyError("Unknown key type {} for key {}".format(type(key), key))
keys = data_object.index.values
if cardinal and data_object._cardinal is not None:
keys = data_object[data_object._cardinal[0]].unique()
elif isinstance(key, itype) and key in keys:
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, itype) and key < 0:
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, itype):
key = [key]
elif isinstance(key, slice):
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, (tuple, list, pd.Index)) and not np.all(k in keys for k in key):
key = list(sorted(data_object.index.values[key]))
return key
|
[
"def",
"check_key",
"(",
"data_object",
",",
"key",
",",
"cardinal",
"=",
"False",
")",
":",
"itype",
"=",
"(",
"int",
",",
"np",
".",
"int32",
",",
"np",
".",
"int64",
")",
"if",
"not",
"isinstance",
"(",
"key",
",",
"itype",
"+",
"(",
"slice",
",",
"tuple",
",",
"list",
",",
"np",
".",
"ndarray",
")",
")",
":",
"raise",
"KeyError",
"(",
"\"Unknown key type {} for key {}\"",
".",
"format",
"(",
"type",
"(",
"key",
")",
",",
"key",
")",
")",
"keys",
"=",
"data_object",
".",
"index",
".",
"values",
"if",
"cardinal",
"and",
"data_object",
".",
"_cardinal",
"is",
"not",
"None",
":",
"keys",
"=",
"data_object",
"[",
"data_object",
".",
"_cardinal",
"[",
"0",
"]",
"]",
".",
"unique",
"(",
")",
"elif",
"isinstance",
"(",
"key",
",",
"itype",
")",
"and",
"key",
"in",
"keys",
":",
"key",
"=",
"list",
"(",
"sorted",
"(",
"data_object",
".",
"index",
".",
"values",
"[",
"key",
"]",
")",
")",
"elif",
"isinstance",
"(",
"key",
",",
"itype",
")",
"and",
"key",
"<",
"0",
":",
"key",
"=",
"list",
"(",
"sorted",
"(",
"data_object",
".",
"index",
".",
"values",
"[",
"key",
"]",
")",
")",
"elif",
"isinstance",
"(",
"key",
",",
"itype",
")",
":",
"key",
"=",
"[",
"key",
"]",
"elif",
"isinstance",
"(",
"key",
",",
"slice",
")",
":",
"key",
"=",
"list",
"(",
"sorted",
"(",
"data_object",
".",
"index",
".",
"values",
"[",
"key",
"]",
")",
")",
"elif",
"isinstance",
"(",
"key",
",",
"(",
"tuple",
",",
"list",
",",
"pd",
".",
"Index",
")",
")",
"and",
"not",
"np",
".",
"all",
"(",
"k",
"in",
"keys",
"for",
"k",
"in",
"key",
")",
":",
"key",
"=",
"list",
"(",
"sorted",
"(",
"data_object",
".",
"index",
".",
"values",
"[",
"key",
"]",
")",
")",
"return",
"key"
] |
Update the value of an index key by matching values or getting positionals.
|
[
"Update",
"the",
"value",
"of",
"an",
"index",
"key",
"by",
"matching",
"values",
"or",
"getting",
"positionals",
"."
] |
python
|
train
|
flowersteam/explauto
|
explauto/interest_model/tree.py
|
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/interest_model/tree.py#L736-L743
|
def fold_up(self, f_inter, f_leaf):
"""
Apply recursively the function f_inter from leaves to root, begining with function f_leaf on leaves.
"""
return f_leaf(self) if self.leafnode else f_inter(self,
self.lower.fold_up(f_inter, f_leaf),
self.greater.fold_up(f_inter, f_leaf))
|
[
"def",
"fold_up",
"(",
"self",
",",
"f_inter",
",",
"f_leaf",
")",
":",
"return",
"f_leaf",
"(",
"self",
")",
"if",
"self",
".",
"leafnode",
"else",
"f_inter",
"(",
"self",
",",
"self",
".",
"lower",
".",
"fold_up",
"(",
"f_inter",
",",
"f_leaf",
")",
",",
"self",
".",
"greater",
".",
"fold_up",
"(",
"f_inter",
",",
"f_leaf",
")",
")"
] |
Apply recursively the function f_inter from leaves to root, begining with function f_leaf on leaves.
|
[
"Apply",
"recursively",
"the",
"function",
"f_inter",
"from",
"leaves",
"to",
"root",
"begining",
"with",
"function",
"f_leaf",
"on",
"leaves",
"."
] |
python
|
train
|
openstack/python-scciclient
|
scciclient/irmc/ipmi.py
|
https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/ipmi.py#L75-L109
|
def get_tpm_status(d_info):
"""Get the TPM support status.
Get the TPM support status of the node.
:param d_info: the list of ipmitool parameters for accessing a node.
:returns: TPM support status
"""
# note:
# Get TPM support status : ipmi cmd '0xF5', valid flags '0xC0'
#
# $ ipmitool raw 0x2E 0xF5 0x80 0x28 0x00 0x81 0xC0
#
# Raw response:
# 80 28 00 C0 C0: True
# 80 28 00 -- --: False (other values than 'C0 C0')
ipmicmd = ipmi_command.Command(bmc=d_info['irmc_address'],
userid=d_info['irmc_username'],
password=d_info['irmc_password'])
try:
response = _send_raw_command(ipmicmd, GET_TPM_STATUS)
if response['code'] != 0:
raise IPMIFailure(
"IPMI operation '%(operation)s' failed: %(error)s" %
{'operation': "GET TMP status",
'error': response.get('error')})
out = ' '.join('{:02X}'.format(x) for x in response['data'])
return out is not None and out[-5:] == 'C0 C0'
except ipmi_exception.IpmiException as e:
raise IPMIFailure(
"IPMI operation '%(operation)s' failed: %(error)s" %
{'operation': "GET TMP status", 'error': e})
|
[
"def",
"get_tpm_status",
"(",
"d_info",
")",
":",
"# note:",
"# Get TPM support status : ipmi cmd '0xF5', valid flags '0xC0'",
"#",
"# $ ipmitool raw 0x2E 0xF5 0x80 0x28 0x00 0x81 0xC0",
"#",
"# Raw response:",
"# 80 28 00 C0 C0: True",
"# 80 28 00 -- --: False (other values than 'C0 C0')",
"ipmicmd",
"=",
"ipmi_command",
".",
"Command",
"(",
"bmc",
"=",
"d_info",
"[",
"'irmc_address'",
"]",
",",
"userid",
"=",
"d_info",
"[",
"'irmc_username'",
"]",
",",
"password",
"=",
"d_info",
"[",
"'irmc_password'",
"]",
")",
"try",
":",
"response",
"=",
"_send_raw_command",
"(",
"ipmicmd",
",",
"GET_TPM_STATUS",
")",
"if",
"response",
"[",
"'code'",
"]",
"!=",
"0",
":",
"raise",
"IPMIFailure",
"(",
"\"IPMI operation '%(operation)s' failed: %(error)s\"",
"%",
"{",
"'operation'",
":",
"\"GET TMP status\"",
",",
"'error'",
":",
"response",
".",
"get",
"(",
"'error'",
")",
"}",
")",
"out",
"=",
"' '",
".",
"join",
"(",
"'{:02X}'",
".",
"format",
"(",
"x",
")",
"for",
"x",
"in",
"response",
"[",
"'data'",
"]",
")",
"return",
"out",
"is",
"not",
"None",
"and",
"out",
"[",
"-",
"5",
":",
"]",
"==",
"'C0 C0'",
"except",
"ipmi_exception",
".",
"IpmiException",
"as",
"e",
":",
"raise",
"IPMIFailure",
"(",
"\"IPMI operation '%(operation)s' failed: %(error)s\"",
"%",
"{",
"'operation'",
":",
"\"GET TMP status\"",
",",
"'error'",
":",
"e",
"}",
")"
] |
Get the TPM support status.
Get the TPM support status of the node.
:param d_info: the list of ipmitool parameters for accessing a node.
:returns: TPM support status
|
[
"Get",
"the",
"TPM",
"support",
"status",
"."
] |
python
|
train
|
bunq/sdk_python
|
bunq/sdk/security.py
|
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/security.py#L147-L163
|
def encrypt(api_context, request_bytes, custom_headers):
"""
:type api_context: bunq.sdk.context.ApiContext
:type request_bytes: bytes
:type custom_headers: dict[str, str]
:rtype: bytes
"""
key = Random.get_random_bytes(_AES_KEY_SIZE)
iv = Random.get_random_bytes(_BLOCK_SIZE)
_add_header_client_encryption_key(api_context, key, custom_headers)
_add_header_client_encryption_iv(iv, custom_headers)
request_bytes = _encrypt_request_bytes(request_bytes, key, iv)
_add_header_client_encryption_hmac(request_bytes, key, iv, custom_headers)
return request_bytes
|
[
"def",
"encrypt",
"(",
"api_context",
",",
"request_bytes",
",",
"custom_headers",
")",
":",
"key",
"=",
"Random",
".",
"get_random_bytes",
"(",
"_AES_KEY_SIZE",
")",
"iv",
"=",
"Random",
".",
"get_random_bytes",
"(",
"_BLOCK_SIZE",
")",
"_add_header_client_encryption_key",
"(",
"api_context",
",",
"key",
",",
"custom_headers",
")",
"_add_header_client_encryption_iv",
"(",
"iv",
",",
"custom_headers",
")",
"request_bytes",
"=",
"_encrypt_request_bytes",
"(",
"request_bytes",
",",
"key",
",",
"iv",
")",
"_add_header_client_encryption_hmac",
"(",
"request_bytes",
",",
"key",
",",
"iv",
",",
"custom_headers",
")",
"return",
"request_bytes"
] |
:type api_context: bunq.sdk.context.ApiContext
:type request_bytes: bytes
:type custom_headers: dict[str, str]
:rtype: bytes
|
[
":",
"type",
"api_context",
":",
"bunq",
".",
"sdk",
".",
"context",
".",
"ApiContext",
":",
"type",
"request_bytes",
":",
"bytes",
":",
"type",
"custom_headers",
":",
"dict",
"[",
"str",
"str",
"]"
] |
python
|
train
|
phaethon/kamene
|
kamene/contrib/gsm_um.py
|
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L868-L882
|
def pagingRequestType2(MobileId_presence=0):
"""PAGING REQUEST TYPE 2 Section 9.1.23"""
a = L2PseudoLength()
b = TpPd(pd=0x6)
c = MessageType(mesType=0x22) # 00100010
d = PageModeAndChannelNeeded()
f = MobileId()
g = MobileId()
packet = a / b / c / d / f / g
if MobileId_presence is 1:
h = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0)
packet = packet / h
i = P2RestOctets()
packet = packet / i
return packet
|
[
"def",
"pagingRequestType2",
"(",
"MobileId_presence",
"=",
"0",
")",
":",
"a",
"=",
"L2PseudoLength",
"(",
")",
"b",
"=",
"TpPd",
"(",
"pd",
"=",
"0x6",
")",
"c",
"=",
"MessageType",
"(",
"mesType",
"=",
"0x22",
")",
"# 00100010",
"d",
"=",
"PageModeAndChannelNeeded",
"(",
")",
"f",
"=",
"MobileId",
"(",
")",
"g",
"=",
"MobileId",
"(",
")",
"packet",
"=",
"a",
"/",
"b",
"/",
"c",
"/",
"d",
"/",
"f",
"/",
"g",
"if",
"MobileId_presence",
"is",
"1",
":",
"h",
"=",
"MobileIdHdr",
"(",
"ieiMI",
"=",
"0x17",
",",
"eightBitMI",
"=",
"0x0",
")",
"packet",
"=",
"packet",
"/",
"h",
"i",
"=",
"P2RestOctets",
"(",
")",
"packet",
"=",
"packet",
"/",
"i",
"return",
"packet"
] |
PAGING REQUEST TYPE 2 Section 9.1.23
|
[
"PAGING",
"REQUEST",
"TYPE",
"2",
"Section",
"9",
".",
"1",
".",
"23"
] |
python
|
train
|
theelous3/asks
|
asks/request_object.py
|
https://github.com/theelous3/asks/blob/ea522ea971ecb031d488a6301dc2718516cadcd6/asks/request_object.py#L641-L657
|
async def _auth_handler_post_get_auth(self):
'''
If the user supplied auth does rely on a response
(is a PostResponseAuth object) then we call the auth's __call__
returning a dict to update the request's headers with, as long
as there is an appropriate 401'd response object to calculate auth
details from.
'''
# pylint: disable=not-callable
if isinstance(self.auth, PostResponseAuth):
if self.history_objects:
authable_resp = self.history_objects[-1]
if authable_resp.status_code == 401:
if not self.auth.auth_attempted:
self.auth.auth_attempted = True
return await self.auth(authable_resp, self)
return {}
|
[
"async",
"def",
"_auth_handler_post_get_auth",
"(",
"self",
")",
":",
"# pylint: disable=not-callable",
"if",
"isinstance",
"(",
"self",
".",
"auth",
",",
"PostResponseAuth",
")",
":",
"if",
"self",
".",
"history_objects",
":",
"authable_resp",
"=",
"self",
".",
"history_objects",
"[",
"-",
"1",
"]",
"if",
"authable_resp",
".",
"status_code",
"==",
"401",
":",
"if",
"not",
"self",
".",
"auth",
".",
"auth_attempted",
":",
"self",
".",
"auth",
".",
"auth_attempted",
"=",
"True",
"return",
"await",
"self",
".",
"auth",
"(",
"authable_resp",
",",
"self",
")",
"return",
"{",
"}"
] |
If the user supplied auth does rely on a response
(is a PostResponseAuth object) then we call the auth's __call__
returning a dict to update the request's headers with, as long
as there is an appropriate 401'd response object to calculate auth
details from.
|
[
"If",
"the",
"user",
"supplied",
"auth",
"does",
"rely",
"on",
"a",
"response",
"(",
"is",
"a",
"PostResponseAuth",
"object",
")",
"then",
"we",
"call",
"the",
"auth",
"s",
"__call__",
"returning",
"a",
"dict",
"to",
"update",
"the",
"request",
"s",
"headers",
"with",
"as",
"long",
"as",
"there",
"is",
"an",
"appropriate",
"401",
"d",
"response",
"object",
"to",
"calculate",
"auth",
"details",
"from",
"."
] |
python
|
train
|
dcramer/logan
|
logan/runner.py
|
https://github.com/dcramer/logan/blob/8b18456802d631a822e2823bf9a4e9810a15a20e/logan/runner.py#L63-L141
|
def configure_app(config_path=None, project=None, default_config_path=None,
default_settings=None, settings_initializer=None,
settings_envvar=None, initializer=None, allow_extras=True,
config_module_name=None, runner_name=None, on_configure=None):
"""
:param project: should represent the canonical name for the project, generally
the same name it assigned in distutils.
:param default_config_path: the default location for the configuration file.
:param default_settings: default settings to load (think inheritence).
:param settings_initializer: a callback function which should return a string
representing the default settings template to generate.
:param initializer: a callback function which will be executed before the command
is executed. It is passed a dictionary of various configuration attributes.
"""
global __configured
project_filename = sanitize_name(project)
if default_config_path is None:
default_config_path = '~/%s/%s.conf.py' % (project_filename, project_filename)
if settings_envvar is None:
settings_envvar = project_filename.upper() + '_CONF'
if config_module_name is None:
config_module_name = project_filename + '_config'
# normalize path
if settings_envvar in os.environ:
default_config_path = os.environ.get(settings_envvar)
else:
default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path)))
if not config_path:
config_path = default_config_path
config_path = os.path.expanduser(config_path)
if not os.path.exists(config_path):
if runner_name:
raise ValueError("Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,))
raise ValueError("Configuration file does not exist at %r" % (config_path,))
os.environ['DJANGO_SETTINGS_MODULE'] = config_module_name
def settings_callback(settings):
if initializer is None:
return
try:
initializer({
'project': project,
'config_path': config_path,
'settings': settings,
})
except Exception:
# XXX: Django doesn't like various errors in this path
import sys
import traceback
traceback.print_exc()
sys.exit(1)
importer.install(
config_module_name, config_path, default_settings,
allow_extras=allow_extras, callback=settings_callback)
__configured = True
# HACK(dcramer): we need to force access of django.conf.settings to
# ensure we don't hit any import-driven recursive behavior
from django.conf import settings
hasattr(settings, 'INSTALLED_APPS')
if on_configure:
on_configure({
'project': project,
'config_path': config_path,
'settings': settings,
})
|
[
"def",
"configure_app",
"(",
"config_path",
"=",
"None",
",",
"project",
"=",
"None",
",",
"default_config_path",
"=",
"None",
",",
"default_settings",
"=",
"None",
",",
"settings_initializer",
"=",
"None",
",",
"settings_envvar",
"=",
"None",
",",
"initializer",
"=",
"None",
",",
"allow_extras",
"=",
"True",
",",
"config_module_name",
"=",
"None",
",",
"runner_name",
"=",
"None",
",",
"on_configure",
"=",
"None",
")",
":",
"global",
"__configured",
"project_filename",
"=",
"sanitize_name",
"(",
"project",
")",
"if",
"default_config_path",
"is",
"None",
":",
"default_config_path",
"=",
"'~/%s/%s.conf.py'",
"%",
"(",
"project_filename",
",",
"project_filename",
")",
"if",
"settings_envvar",
"is",
"None",
":",
"settings_envvar",
"=",
"project_filename",
".",
"upper",
"(",
")",
"+",
"'_CONF'",
"if",
"config_module_name",
"is",
"None",
":",
"config_module_name",
"=",
"project_filename",
"+",
"'_config'",
"# normalize path",
"if",
"settings_envvar",
"in",
"os",
".",
"environ",
":",
"default_config_path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"settings_envvar",
")",
"else",
":",
"default_config_path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"default_config_path",
")",
")",
")",
"if",
"not",
"config_path",
":",
"config_path",
"=",
"default_config_path",
"config_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"config_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"config_path",
")",
":",
"if",
"runner_name",
":",
"raise",
"ValueError",
"(",
"\"Configuration file does not exist. Use '%s init' to initialize the file.\"",
"%",
"(",
"runner_name",
",",
")",
")",
"raise",
"ValueError",
"(",
"\"Configuration file does not exist at %r\"",
"%",
"(",
"config_path",
",",
")",
")",
"os",
".",
"environ",
"[",
"'DJANGO_SETTINGS_MODULE'",
"]",
"=",
"config_module_name",
"def",
"settings_callback",
"(",
"settings",
")",
":",
"if",
"initializer",
"is",
"None",
":",
"return",
"try",
":",
"initializer",
"(",
"{",
"'project'",
":",
"project",
",",
"'config_path'",
":",
"config_path",
",",
"'settings'",
":",
"settings",
",",
"}",
")",
"except",
"Exception",
":",
"# XXX: Django doesn't like various errors in this path",
"import",
"sys",
"import",
"traceback",
"traceback",
".",
"print_exc",
"(",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"importer",
".",
"install",
"(",
"config_module_name",
",",
"config_path",
",",
"default_settings",
",",
"allow_extras",
"=",
"allow_extras",
",",
"callback",
"=",
"settings_callback",
")",
"__configured",
"=",
"True",
"# HACK(dcramer): we need to force access of django.conf.settings to",
"# ensure we don't hit any import-driven recursive behavior",
"from",
"django",
".",
"conf",
"import",
"settings",
"hasattr",
"(",
"settings",
",",
"'INSTALLED_APPS'",
")",
"if",
"on_configure",
":",
"on_configure",
"(",
"{",
"'project'",
":",
"project",
",",
"'config_path'",
":",
"config_path",
",",
"'settings'",
":",
"settings",
",",
"}",
")"
] |
:param project: should represent the canonical name for the project, generally
the same name it assigned in distutils.
:param default_config_path: the default location for the configuration file.
:param default_settings: default settings to load (think inheritence).
:param settings_initializer: a callback function which should return a string
representing the default settings template to generate.
:param initializer: a callback function which will be executed before the command
is executed. It is passed a dictionary of various configuration attributes.
|
[
":",
"param",
"project",
":",
"should",
"represent",
"the",
"canonical",
"name",
"for",
"the",
"project",
"generally",
"the",
"same",
"name",
"it",
"assigned",
"in",
"distutils",
".",
":",
"param",
"default_config_path",
":",
"the",
"default",
"location",
"for",
"the",
"configuration",
"file",
".",
":",
"param",
"default_settings",
":",
"default",
"settings",
"to",
"load",
"(",
"think",
"inheritence",
")",
".",
":",
"param",
"settings_initializer",
":",
"a",
"callback",
"function",
"which",
"should",
"return",
"a",
"string",
"representing",
"the",
"default",
"settings",
"template",
"to",
"generate",
".",
":",
"param",
"initializer",
":",
"a",
"callback",
"function",
"which",
"will",
"be",
"executed",
"before",
"the",
"command",
"is",
"executed",
".",
"It",
"is",
"passed",
"a",
"dictionary",
"of",
"various",
"configuration",
"attributes",
"."
] |
python
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/base_vae.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/base_vae.py#L97-L118
|
def construct_latent_tower(self, images, time_axis):
"""Create the latent tower."""
# No latent in the first phase
first_phase = tf.less(
self.get_iteration_num(), self.hparams.num_iterations_1st_stage)
# use all frames by default but this allows more
# predicted frames at inference time
latent_num_frames = self.hparams.latent_num_frames
tf.logging.info("Creating latent tower with %d frames." % latent_num_frames)
if latent_num_frames > 0:
images = images[:, :latent_num_frames]
return common_video.conv_latent_tower(
images=images,
time_axis=time_axis,
latent_channels=self.hparams.latent_channels,
min_logvar=self.hparams.latent_std_min,
is_training=self.is_training,
random_latent=first_phase,
tiny_mode=self.hparams.tiny_mode,
small_mode=self.hparams.small_mode)
|
[
"def",
"construct_latent_tower",
"(",
"self",
",",
"images",
",",
"time_axis",
")",
":",
"# No latent in the first phase",
"first_phase",
"=",
"tf",
".",
"less",
"(",
"self",
".",
"get_iteration_num",
"(",
")",
",",
"self",
".",
"hparams",
".",
"num_iterations_1st_stage",
")",
"# use all frames by default but this allows more",
"# predicted frames at inference time",
"latent_num_frames",
"=",
"self",
".",
"hparams",
".",
"latent_num_frames",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Creating latent tower with %d frames.\"",
"%",
"latent_num_frames",
")",
"if",
"latent_num_frames",
">",
"0",
":",
"images",
"=",
"images",
"[",
":",
",",
":",
"latent_num_frames",
"]",
"return",
"common_video",
".",
"conv_latent_tower",
"(",
"images",
"=",
"images",
",",
"time_axis",
"=",
"time_axis",
",",
"latent_channels",
"=",
"self",
".",
"hparams",
".",
"latent_channels",
",",
"min_logvar",
"=",
"self",
".",
"hparams",
".",
"latent_std_min",
",",
"is_training",
"=",
"self",
".",
"is_training",
",",
"random_latent",
"=",
"first_phase",
",",
"tiny_mode",
"=",
"self",
".",
"hparams",
".",
"tiny_mode",
",",
"small_mode",
"=",
"self",
".",
"hparams",
".",
"small_mode",
")"
] |
Create the latent tower.
|
[
"Create",
"the",
"latent",
"tower",
"."
] |
python
|
train
|
opencobra/memote
|
memote/support/annotation.py
|
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/annotation.py#L125-L143
|
def find_components_without_annotation(model, components):
"""
Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any annotation.
"""
return [elem for elem in getattr(model, components) if
elem.annotation is None or len(elem.annotation) == 0]
|
[
"def",
"find_components_without_annotation",
"(",
"model",
",",
"components",
")",
":",
"return",
"[",
"elem",
"for",
"elem",
"in",
"getattr",
"(",
"model",
",",
"components",
")",
"if",
"elem",
".",
"annotation",
"is",
"None",
"or",
"len",
"(",
"elem",
".",
"annotation",
")",
"==",
"0",
"]"
] |
Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any annotation.
|
[
"Find",
"model",
"components",
"with",
"empty",
"annotation",
"attributes",
"."
] |
python
|
train
|
bwohlberg/sporco
|
sporco/dictlrn/cbpdndl.py
|
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/cbpdndl.py#L43-L57
|
def ConvBPDNOptionsDefaults(method='admm'):
"""Get defaults dict for the ConvBPDN class specified by the ``method``
parameter.
"""
dflt = copy.deepcopy(cbpdn_class_label_lookup(method).Options.defaults)
if method == 'admm':
dflt.update({'MaxMainIter': 1, 'AutoRho':
{'Period': 10, 'AutoScaling': False,
'RsdlRatio': 10.0, 'Scaling': 2.0,
'RsdlTarget': 1.0}})
else:
dflt.update({'MaxMainIter': 1, 'BackTrack':
{'gamma_u': 1.2, 'MaxIter': 50}})
return dflt
|
[
"def",
"ConvBPDNOptionsDefaults",
"(",
"method",
"=",
"'admm'",
")",
":",
"dflt",
"=",
"copy",
".",
"deepcopy",
"(",
"cbpdn_class_label_lookup",
"(",
"method",
")",
".",
"Options",
".",
"defaults",
")",
"if",
"method",
"==",
"'admm'",
":",
"dflt",
".",
"update",
"(",
"{",
"'MaxMainIter'",
":",
"1",
",",
"'AutoRho'",
":",
"{",
"'Period'",
":",
"10",
",",
"'AutoScaling'",
":",
"False",
",",
"'RsdlRatio'",
":",
"10.0",
",",
"'Scaling'",
":",
"2.0",
",",
"'RsdlTarget'",
":",
"1.0",
"}",
"}",
")",
"else",
":",
"dflt",
".",
"update",
"(",
"{",
"'MaxMainIter'",
":",
"1",
",",
"'BackTrack'",
":",
"{",
"'gamma_u'",
":",
"1.2",
",",
"'MaxIter'",
":",
"50",
"}",
"}",
")",
"return",
"dflt"
] |
Get defaults dict for the ConvBPDN class specified by the ``method``
parameter.
|
[
"Get",
"defaults",
"dict",
"for",
"the",
"ConvBPDN",
"class",
"specified",
"by",
"the",
"method",
"parameter",
"."
] |
python
|
train
|
SolutionsCloud/apidoc
|
apidoc/service/template.py
|
https://github.com/SolutionsCloud/apidoc/blob/1ee25d886a5bea11dc744c2f3d0abb0b55d942e1/apidoc/service/template.py#L19-L84
|
def render(self, sources, config, out=sys.stdout):
"""Render the documentation as defined in config Object
"""
logger = logging.getLogger()
template = self.env.get_template(self.input)
output = template.render(sources=sources, layout=config["output"]["layout"], config=config["output"])
if self.output == "stdout":
out.write(output)
else:
dir = os.path.dirname(self.output)
if dir and not os.path.exists(dir):
try:
os.makedirs(dir)
except IOError as ioerror:
logger.error('Error on creating dir "{}": {}'.format(dir, str(ioerror)))
return
if config["output"]["template"] == "default":
if config["output"]["componants"] == "local":
for template_dir in self.env.loader.searchpath:
files = (
os.path.join(template_dir, "resource", "js", "combined.js"),
os.path.join(template_dir, "resource", "css", "combined.css"),
os.path.join(template_dir, "resource", "font", "apidoc.eot"),
os.path.join(template_dir, "resource", "font", "apidoc.woff"),
os.path.join(template_dir, "resource", "font", "apidoc.ttf"),
os.path.join(template_dir, "resource", "font", "source-code-pro.eot"),
os.path.join(template_dir, "resource", "font", "source-code-pro.woff"),
os.path.join(template_dir, "resource", "font", "source-code-pro.ttf"),
)
for file in files:
filename = os.path.basename(file)
dirname = os.path.basename(os.path.dirname(file))
if not os.path.exists(os.path.join(dir, dirname)):
os.makedirs(os.path.join(dir, dirname))
if os.path.exists(file):
shutil.copyfile(file, os.path.join(dir, dirname, filename))
else:
logger.warn('Missing resource file "%s". If you run apidoc in virtualenv, run "%s"' % (filename, "python setup.py resources"))
if config["output"]["componants"] == "remote":
for template_dir in self.env.loader.searchpath:
files = (
os.path.join(template_dir, "resource", "js", "combined.js"),
os.path.join(template_dir, "resource", "css", "combined-embedded.css"),
os.path.join(template_dir, "resource", "font", "apidoc.eot"),
os.path.join(template_dir, "resource", "font", "apidoc.woff"),
os.path.join(template_dir, "resource", "font", "apidoc.ttf"),
os.path.join(template_dir, "resource", "font", "source-code-pro.eot"),
os.path.join(template_dir, "resource", "font", "source-code-pro.woff"),
os.path.join(template_dir, "resource", "font", "source-code-pro.ttf"),
)
for file in files:
filename = os.path.basename(file)
dirname = os.path.basename(os.path.dirname(file))
if not os.path.exists(os.path.join(dir, dirname)):
os.makedirs(os.path.join(dir, dirname))
if os.path.exists(file):
shutil.copyfile(file, os.path.join(dir, dirname, filename))
else:
logger.warn('Missing resource file "%s". If you run apidoc in virtualenv, run "%s"' % (filename, "python setup.py resources"))
open(self.output, "w").write(output)
|
[
"def",
"render",
"(",
"self",
",",
"sources",
",",
"config",
",",
"out",
"=",
"sys",
".",
"stdout",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"template",
"=",
"self",
".",
"env",
".",
"get_template",
"(",
"self",
".",
"input",
")",
"output",
"=",
"template",
".",
"render",
"(",
"sources",
"=",
"sources",
",",
"layout",
"=",
"config",
"[",
"\"output\"",
"]",
"[",
"\"layout\"",
"]",
",",
"config",
"=",
"config",
"[",
"\"output\"",
"]",
")",
"if",
"self",
".",
"output",
"==",
"\"stdout\"",
":",
"out",
".",
"write",
"(",
"output",
")",
"else",
":",
"dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"output",
")",
"if",
"dir",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"dir",
")",
"except",
"IOError",
"as",
"ioerror",
":",
"logger",
".",
"error",
"(",
"'Error on creating dir \"{}\": {}'",
".",
"format",
"(",
"dir",
",",
"str",
"(",
"ioerror",
")",
")",
")",
"return",
"if",
"config",
"[",
"\"output\"",
"]",
"[",
"\"template\"",
"]",
"==",
"\"default\"",
":",
"if",
"config",
"[",
"\"output\"",
"]",
"[",
"\"componants\"",
"]",
"==",
"\"local\"",
":",
"for",
"template_dir",
"in",
"self",
".",
"env",
".",
"loader",
".",
"searchpath",
":",
"files",
"=",
"(",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"js\"",
",",
"\"combined.js\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"css\"",
",",
"\"combined.css\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"apidoc.eot\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"apidoc.woff\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"apidoc.ttf\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"source-code-pro.eot\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"source-code-pro.woff\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"source-code-pro.ttf\"",
")",
",",
")",
"for",
"file",
"in",
"files",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file",
")",
"dirname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"file",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"dirname",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"dirname",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file",
")",
":",
"shutil",
".",
"copyfile",
"(",
"file",
",",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"dirname",
",",
"filename",
")",
")",
"else",
":",
"logger",
".",
"warn",
"(",
"'Missing resource file \"%s\". If you run apidoc in virtualenv, run \"%s\"'",
"%",
"(",
"filename",
",",
"\"python setup.py resources\"",
")",
")",
"if",
"config",
"[",
"\"output\"",
"]",
"[",
"\"componants\"",
"]",
"==",
"\"remote\"",
":",
"for",
"template_dir",
"in",
"self",
".",
"env",
".",
"loader",
".",
"searchpath",
":",
"files",
"=",
"(",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"js\"",
",",
"\"combined.js\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"css\"",
",",
"\"combined-embedded.css\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"apidoc.eot\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"apidoc.woff\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"apidoc.ttf\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"source-code-pro.eot\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"source-code-pro.woff\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"\"resource\"",
",",
"\"font\"",
",",
"\"source-code-pro.ttf\"",
")",
",",
")",
"for",
"file",
"in",
"files",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file",
")",
"dirname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"file",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"dirname",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"dirname",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file",
")",
":",
"shutil",
".",
"copyfile",
"(",
"file",
",",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"dirname",
",",
"filename",
")",
")",
"else",
":",
"logger",
".",
"warn",
"(",
"'Missing resource file \"%s\". If you run apidoc in virtualenv, run \"%s\"'",
"%",
"(",
"filename",
",",
"\"python setup.py resources\"",
")",
")",
"open",
"(",
"self",
".",
"output",
",",
"\"w\"",
")",
".",
"write",
"(",
"output",
")"
] |
Render the documentation as defined in config Object
|
[
"Render",
"the",
"documentation",
"as",
"defined",
"in",
"config",
"Object"
] |
python
|
train
|
mosdef-hub/mbuild
|
mbuild/coordinate_transform.py
|
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/coordinate_transform.py#L256-L262
|
def angle(u, v, w=None):
"""Returns the angle in radians between two vectors. """
if w is not None:
u = u - v
v = w - v
c = np.dot(u, v) / norm(u) / norm(v)
return np.arccos(np.clip(c, -1, 1))
|
[
"def",
"angle",
"(",
"u",
",",
"v",
",",
"w",
"=",
"None",
")",
":",
"if",
"w",
"is",
"not",
"None",
":",
"u",
"=",
"u",
"-",
"v",
"v",
"=",
"w",
"-",
"v",
"c",
"=",
"np",
".",
"dot",
"(",
"u",
",",
"v",
")",
"/",
"norm",
"(",
"u",
")",
"/",
"norm",
"(",
"v",
")",
"return",
"np",
".",
"arccos",
"(",
"np",
".",
"clip",
"(",
"c",
",",
"-",
"1",
",",
"1",
")",
")"
] |
Returns the angle in radians between two vectors.
|
[
"Returns",
"the",
"angle",
"in",
"radians",
"between",
"two",
"vectors",
"."
] |
python
|
train
|
atztogo/phono3py
|
phono3py/phonon3/displacement_fc3.py
|
https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/displacement_fc3.py#L52-L148
|
def get_third_order_displacements(cell,
symmetry,
is_plusminus='auto',
is_diagonal=False):
"""Create dispalcement dataset
Note
----
Atoms 1, 2, and 3 are defined as follows:
Atom 1: The first displaced atom. Third order force constant
between Atoms 1, 2, and 3 is calculated.
Atom 2: The second displaced atom. Second order force constant
between Atoms 2 and 3 is calculated.
Atom 3: Force is mesuared on this atom.
Parameters
----------
cell : PhonopyAtoms
Supercell
symmetry : Symmetry
Symmetry of supercell
is_plusminus : str or bool, optional
Type of displacements, plus only (False), always plus and minus (True),
and plus and minus depending on site symmetry ('auto').
is_diagonal : bool, optional
Whether allow diagonal displacements of Atom 2 or not
Returns
-------
dict
Data structure is like:
{'natom': 64,
'cutoff_distance': 4.000000,
'first_atoms':
[{'number': atom1,
'displacement': [0.03, 0., 0.],
'second_atoms': [ {'number': atom2,
'displacement': [0., -0.03, 0.],
'distance': 2.353},
{'number': ... }, ... ] },
{'number': atom1, ... } ]}
"""
positions = cell.get_scaled_positions()
lattice = cell.get_cell().T
# Least displacements of first atoms (Atom 1) are searched by
# using respective site symmetries of the original crystal.
# 'is_diagonal=False' below is made intentionally to expect
# better accuracy.
disps_first = get_least_displacements(symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
symprec = symmetry.get_symmetry_tolerance()
dds = []
for disp in disps_first:
atom1 = disp[0]
disp1 = disp[1:4]
site_sym = symmetry.get_site_symmetry(atom1)
dds_atom1 = {'number': atom1,
'direction': disp1,
'second_atoms': []}
# Reduced site symmetry at the first atom with respect to
# the displacement of the first atoms.
reduced_site_sym = get_reduced_site_symmetry(site_sym, disp1, symprec)
# Searching orbits (second atoms) with respect to
# the first atom and its reduced site symmetry.
second_atoms = get_least_orbits(atom1,
cell,
reduced_site_sym,
symprec)
for atom2 in second_atoms:
dds_atom2 = get_next_displacements(atom1,
atom2,
reduced_site_sym,
lattice,
positions,
symprec,
is_diagonal)
min_vec = get_equivalent_smallest_vectors(atom1,
atom2,
cell,
symprec)[0]
min_distance = np.linalg.norm(np.dot(lattice, min_vec))
dds_atom2['distance'] = min_distance
dds_atom1['second_atoms'].append(dds_atom2)
dds.append(dds_atom1)
return dds
|
[
"def",
"get_third_order_displacements",
"(",
"cell",
",",
"symmetry",
",",
"is_plusminus",
"=",
"'auto'",
",",
"is_diagonal",
"=",
"False",
")",
":",
"positions",
"=",
"cell",
".",
"get_scaled_positions",
"(",
")",
"lattice",
"=",
"cell",
".",
"get_cell",
"(",
")",
".",
"T",
"# Least displacements of first atoms (Atom 1) are searched by",
"# using respective site symmetries of the original crystal.",
"# 'is_diagonal=False' below is made intentionally to expect",
"# better accuracy.",
"disps_first",
"=",
"get_least_displacements",
"(",
"symmetry",
",",
"is_plusminus",
"=",
"is_plusminus",
",",
"is_diagonal",
"=",
"False",
")",
"symprec",
"=",
"symmetry",
".",
"get_symmetry_tolerance",
"(",
")",
"dds",
"=",
"[",
"]",
"for",
"disp",
"in",
"disps_first",
":",
"atom1",
"=",
"disp",
"[",
"0",
"]",
"disp1",
"=",
"disp",
"[",
"1",
":",
"4",
"]",
"site_sym",
"=",
"symmetry",
".",
"get_site_symmetry",
"(",
"atom1",
")",
"dds_atom1",
"=",
"{",
"'number'",
":",
"atom1",
",",
"'direction'",
":",
"disp1",
",",
"'second_atoms'",
":",
"[",
"]",
"}",
"# Reduced site symmetry at the first atom with respect to",
"# the displacement of the first atoms.",
"reduced_site_sym",
"=",
"get_reduced_site_symmetry",
"(",
"site_sym",
",",
"disp1",
",",
"symprec",
")",
"# Searching orbits (second atoms) with respect to",
"# the first atom and its reduced site symmetry.",
"second_atoms",
"=",
"get_least_orbits",
"(",
"atom1",
",",
"cell",
",",
"reduced_site_sym",
",",
"symprec",
")",
"for",
"atom2",
"in",
"second_atoms",
":",
"dds_atom2",
"=",
"get_next_displacements",
"(",
"atom1",
",",
"atom2",
",",
"reduced_site_sym",
",",
"lattice",
",",
"positions",
",",
"symprec",
",",
"is_diagonal",
")",
"min_vec",
"=",
"get_equivalent_smallest_vectors",
"(",
"atom1",
",",
"atom2",
",",
"cell",
",",
"symprec",
")",
"[",
"0",
"]",
"min_distance",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"np",
".",
"dot",
"(",
"lattice",
",",
"min_vec",
")",
")",
"dds_atom2",
"[",
"'distance'",
"]",
"=",
"min_distance",
"dds_atom1",
"[",
"'second_atoms'",
"]",
".",
"append",
"(",
"dds_atom2",
")",
"dds",
".",
"append",
"(",
"dds_atom1",
")",
"return",
"dds"
] |
Create dispalcement dataset
Note
----
Atoms 1, 2, and 3 are defined as follows:
Atom 1: The first displaced atom. Third order force constant
between Atoms 1, 2, and 3 is calculated.
Atom 2: The second displaced atom. Second order force constant
between Atoms 2 and 3 is calculated.
Atom 3: Force is mesuared on this atom.
Parameters
----------
cell : PhonopyAtoms
Supercell
symmetry : Symmetry
Symmetry of supercell
is_plusminus : str or bool, optional
Type of displacements, plus only (False), always plus and minus (True),
and plus and minus depending on site symmetry ('auto').
is_diagonal : bool, optional
Whether allow diagonal displacements of Atom 2 or not
Returns
-------
dict
Data structure is like:
{'natom': 64,
'cutoff_distance': 4.000000,
'first_atoms':
[{'number': atom1,
'displacement': [0.03, 0., 0.],
'second_atoms': [ {'number': atom2,
'displacement': [0., -0.03, 0.],
'distance': 2.353},
{'number': ... }, ... ] },
{'number': atom1, ... } ]}
|
[
"Create",
"dispalcement",
"dataset"
] |
python
|
train
|
gem/oq-engine
|
openquake/hazardlib/geo/mesh.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/mesh.py#L395-L455
|
def _get_proj_enclosing_polygon(self):
"""
See :meth:`Mesh._get_proj_enclosing_polygon`.
:class:`RectangularMesh` contains an information about relative
positions of points, so it allows to define the minimum polygon,
containing the projection of the mesh, which doesn't necessarily
have to be convex (in contrast to :class:`Mesh` implementation).
:returns:
Same structure as :meth:`Mesh._get_proj_convex_hull`.
"""
if self.lons.size < 4:
# the mesh doesn't contain even a single cell
return self._get_proj_convex_hull()
proj = geo_utils.OrthographicProjection(
*geo_utils.get_spherical_bounding_box(self.lons, self.lats))
if len(self.lons.shape) == 1: # 1D mesh
lons = self.lons.reshape(len(self.lons), 1)
lats = self.lats.reshape(len(self.lats), 1)
else: # 2D mesh
lons = self.lons.T
lats = self.lats.T
mesh2d = numpy.array(proj(lons, lats)).T
lines = iter(mesh2d)
# we iterate over horizontal stripes, keeping the "previous"
# line of points. we keep it reversed, such that together
# with the current line they define the sequence of points
# around the stripe.
prev_line = next(lines)[::-1]
polygons = []
for i, line in enumerate(lines):
coords = numpy.concatenate((prev_line, line, prev_line[0:1]))
# create the shapely polygon object from the stripe
# coordinates and simplify it (remove redundant points,
# if there are any lying on the straight line).
stripe = shapely.geometry.LineString(coords) \
.simplify(self.DIST_TOLERANCE) \
.buffer(self.DIST_TOLERANCE, 2)
polygons.append(shapely.geometry.Polygon(stripe.exterior))
prev_line = line[::-1]
try:
# create a final polygon as the union of all the stripe ones
polygon = shapely.ops.cascaded_union(polygons) \
.simplify(self.DIST_TOLERANCE)
except ValueError:
# NOTE(larsbutler): In some rare cases, we've observed ValueErrors
# ("No Shapely geometry can be created from null value") with very
# specific sets of polygons such that there are two unique
# and many duplicates of one.
# This bug is very difficult to reproduce consistently (except on
# specific platforms) so the work around here is to remove the
# duplicate polygons. In fact, we only observed this error on our
# CI/build machine. None of our dev environments or production
# machines has encountered this error, at least consistently. >:(
polygons = [shapely.wkt.loads(x) for x in
list(set(p.wkt for p in polygons))]
polygon = shapely.ops.cascaded_union(polygons) \
.simplify(self.DIST_TOLERANCE)
return proj, polygon
|
[
"def",
"_get_proj_enclosing_polygon",
"(",
"self",
")",
":",
"if",
"self",
".",
"lons",
".",
"size",
"<",
"4",
":",
"# the mesh doesn't contain even a single cell",
"return",
"self",
".",
"_get_proj_convex_hull",
"(",
")",
"proj",
"=",
"geo_utils",
".",
"OrthographicProjection",
"(",
"*",
"geo_utils",
".",
"get_spherical_bounding_box",
"(",
"self",
".",
"lons",
",",
"self",
".",
"lats",
")",
")",
"if",
"len",
"(",
"self",
".",
"lons",
".",
"shape",
")",
"==",
"1",
":",
"# 1D mesh",
"lons",
"=",
"self",
".",
"lons",
".",
"reshape",
"(",
"len",
"(",
"self",
".",
"lons",
")",
",",
"1",
")",
"lats",
"=",
"self",
".",
"lats",
".",
"reshape",
"(",
"len",
"(",
"self",
".",
"lats",
")",
",",
"1",
")",
"else",
":",
"# 2D mesh",
"lons",
"=",
"self",
".",
"lons",
".",
"T",
"lats",
"=",
"self",
".",
"lats",
".",
"T",
"mesh2d",
"=",
"numpy",
".",
"array",
"(",
"proj",
"(",
"lons",
",",
"lats",
")",
")",
".",
"T",
"lines",
"=",
"iter",
"(",
"mesh2d",
")",
"# we iterate over horizontal stripes, keeping the \"previous\"",
"# line of points. we keep it reversed, such that together",
"# with the current line they define the sequence of points",
"# around the stripe.",
"prev_line",
"=",
"next",
"(",
"lines",
")",
"[",
":",
":",
"-",
"1",
"]",
"polygons",
"=",
"[",
"]",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"coords",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"prev_line",
",",
"line",
",",
"prev_line",
"[",
"0",
":",
"1",
"]",
")",
")",
"# create the shapely polygon object from the stripe",
"# coordinates and simplify it (remove redundant points,",
"# if there are any lying on the straight line).",
"stripe",
"=",
"shapely",
".",
"geometry",
".",
"LineString",
"(",
"coords",
")",
".",
"simplify",
"(",
"self",
".",
"DIST_TOLERANCE",
")",
".",
"buffer",
"(",
"self",
".",
"DIST_TOLERANCE",
",",
"2",
")",
"polygons",
".",
"append",
"(",
"shapely",
".",
"geometry",
".",
"Polygon",
"(",
"stripe",
".",
"exterior",
")",
")",
"prev_line",
"=",
"line",
"[",
":",
":",
"-",
"1",
"]",
"try",
":",
"# create a final polygon as the union of all the stripe ones",
"polygon",
"=",
"shapely",
".",
"ops",
".",
"cascaded_union",
"(",
"polygons",
")",
".",
"simplify",
"(",
"self",
".",
"DIST_TOLERANCE",
")",
"except",
"ValueError",
":",
"# NOTE(larsbutler): In some rare cases, we've observed ValueErrors",
"# (\"No Shapely geometry can be created from null value\") with very",
"# specific sets of polygons such that there are two unique",
"# and many duplicates of one.",
"# This bug is very difficult to reproduce consistently (except on",
"# specific platforms) so the work around here is to remove the",
"# duplicate polygons. In fact, we only observed this error on our",
"# CI/build machine. None of our dev environments or production",
"# machines has encountered this error, at least consistently. >:(",
"polygons",
"=",
"[",
"shapely",
".",
"wkt",
".",
"loads",
"(",
"x",
")",
"for",
"x",
"in",
"list",
"(",
"set",
"(",
"p",
".",
"wkt",
"for",
"p",
"in",
"polygons",
")",
")",
"]",
"polygon",
"=",
"shapely",
".",
"ops",
".",
"cascaded_union",
"(",
"polygons",
")",
".",
"simplify",
"(",
"self",
".",
"DIST_TOLERANCE",
")",
"return",
"proj",
",",
"polygon"
] |
See :meth:`Mesh._get_proj_enclosing_polygon`.
:class:`RectangularMesh` contains an information about relative
positions of points, so it allows to define the minimum polygon,
containing the projection of the mesh, which doesn't necessarily
have to be convex (in contrast to :class:`Mesh` implementation).
:returns:
Same structure as :meth:`Mesh._get_proj_convex_hull`.
|
[
"See",
":",
"meth",
":",
"Mesh",
".",
"_get_proj_enclosing_polygon",
"."
] |
python
|
train
|
linuxsoftware/ls.joyous
|
ls/joyous/models/calendar.py
|
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/calendar.py#L462-L465
|
def _getPastEvents(self, request):
"""Return the past events in this site."""
home = request.site.root_page
return getAllPastEvents(request, home=home)
|
[
"def",
"_getPastEvents",
"(",
"self",
",",
"request",
")",
":",
"home",
"=",
"request",
".",
"site",
".",
"root_page",
"return",
"getAllPastEvents",
"(",
"request",
",",
"home",
"=",
"home",
")"
] |
Return the past events in this site.
|
[
"Return",
"the",
"past",
"events",
"in",
"this",
"site",
"."
] |
python
|
train
|
mmerickel/subparse
|
src/subparse/__init__.py
|
https://github.com/mmerickel/subparse/blob/a4bb94b709c0776ccf81f6dcb47922fa5910c19f/src/subparse/__init__.py#L256-L271
|
def parse_docstring(docstring):
"""
Parse a PEP-257 docstring.
SHORT -> blank line -> LONG
"""
short_desc = long_desc = ''
if docstring:
docstring = trim(docstring.lstrip('\n'))
lines = docstring.split('\n\n', 1)
short_desc = lines[0].strip().replace('\n', ' ')
if len(lines) > 1:
long_desc = lines[1].strip()
return short_desc, long_desc
|
[
"def",
"parse_docstring",
"(",
"docstring",
")",
":",
"short_desc",
"=",
"long_desc",
"=",
"''",
"if",
"docstring",
":",
"docstring",
"=",
"trim",
"(",
"docstring",
".",
"lstrip",
"(",
"'\\n'",
")",
")",
"lines",
"=",
"docstring",
".",
"split",
"(",
"'\\n\\n'",
",",
"1",
")",
"short_desc",
"=",
"lines",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"if",
"len",
"(",
"lines",
")",
">",
"1",
":",
"long_desc",
"=",
"lines",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"return",
"short_desc",
",",
"long_desc"
] |
Parse a PEP-257 docstring.
SHORT -> blank line -> LONG
|
[
"Parse",
"a",
"PEP",
"-",
"257",
"docstring",
"."
] |
python
|
train
|
ejeschke/ginga
|
ginga/rv/plugins/PixTable.py
|
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/PixTable.py#L711-L717
|
def set_mode_cb(self, mode, tf):
"""Called when one of the Move/Draw/Edit radio buttons is selected."""
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_select_mark()
return True
|
[
"def",
"set_mode_cb",
"(",
"self",
",",
"mode",
",",
"tf",
")",
":",
"if",
"tf",
":",
"self",
".",
"canvas",
".",
"set_draw_mode",
"(",
"mode",
")",
"if",
"mode",
"==",
"'edit'",
":",
"self",
".",
"edit_select_mark",
"(",
")",
"return",
"True"
] |
Called when one of the Move/Draw/Edit radio buttons is selected.
|
[
"Called",
"when",
"one",
"of",
"the",
"Move",
"/",
"Draw",
"/",
"Edit",
"radio",
"buttons",
"is",
"selected",
"."
] |
python
|
train
|
gwpy/gwpy
|
gwpy/signal/spectral/_lal.py
|
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_lal.py#L89-L133
|
def generate_window(length, window=None, dtype='float64'):
"""Generate a time-domain window for use in a LAL FFT
Parameters
----------
length : `int`
length of window in samples.
window : `str`, `tuple`
name of window to generate, default: ``('kaiser', 24)``. Give
`str` for simple windows, or tuple of ``(name, *args)`` for
complicated windows
dtype : :class:`numpy.dtype`
numeric type of window, default `numpy.dtype(numpy.float64)`
Returns
-------
`window` : `REAL8Window` or similar
time-domain window to use for FFT
"""
from ...utils.lal import (find_typed_function, to_lal_type_str)
if window is None:
window = ('kaiser', 24)
# generate key for caching window
laltype = to_lal_type_str(dtype)
key = (length, str(window), laltype)
# find existing window
try:
return LAL_WINDOWS[key]
# or create one
except KeyError:
# parse window as name and arguments, e.g. ('kaiser', 24)
if isinstance(window, (list, tuple)):
window, beta = window
else:
beta = 0
window = canonical_name(window)
# create window
create = find_typed_function(dtype, 'CreateNamed', 'Window')
LAL_WINDOWS[key] = create(window, beta, length)
return LAL_WINDOWS[key]
|
[
"def",
"generate_window",
"(",
"length",
",",
"window",
"=",
"None",
",",
"dtype",
"=",
"'float64'",
")",
":",
"from",
".",
".",
".",
"utils",
".",
"lal",
"import",
"(",
"find_typed_function",
",",
"to_lal_type_str",
")",
"if",
"window",
"is",
"None",
":",
"window",
"=",
"(",
"'kaiser'",
",",
"24",
")",
"# generate key for caching window",
"laltype",
"=",
"to_lal_type_str",
"(",
"dtype",
")",
"key",
"=",
"(",
"length",
",",
"str",
"(",
"window",
")",
",",
"laltype",
")",
"# find existing window",
"try",
":",
"return",
"LAL_WINDOWS",
"[",
"key",
"]",
"# or create one",
"except",
"KeyError",
":",
"# parse window as name and arguments, e.g. ('kaiser', 24)",
"if",
"isinstance",
"(",
"window",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"window",
",",
"beta",
"=",
"window",
"else",
":",
"beta",
"=",
"0",
"window",
"=",
"canonical_name",
"(",
"window",
")",
"# create window",
"create",
"=",
"find_typed_function",
"(",
"dtype",
",",
"'CreateNamed'",
",",
"'Window'",
")",
"LAL_WINDOWS",
"[",
"key",
"]",
"=",
"create",
"(",
"window",
",",
"beta",
",",
"length",
")",
"return",
"LAL_WINDOWS",
"[",
"key",
"]"
] |
Generate a time-domain window for use in a LAL FFT
Parameters
----------
length : `int`
length of window in samples.
window : `str`, `tuple`
name of window to generate, default: ``('kaiser', 24)``. Give
`str` for simple windows, or tuple of ``(name, *args)`` for
complicated windows
dtype : :class:`numpy.dtype`
numeric type of window, default `numpy.dtype(numpy.float64)`
Returns
-------
`window` : `REAL8Window` or similar
time-domain window to use for FFT
|
[
"Generate",
"a",
"time",
"-",
"domain",
"window",
"for",
"use",
"in",
"a",
"LAL",
"FFT"
] |
python
|
train
|
ulule/django-badgify
|
badgify/recipe.py
|
https://github.com/ulule/django-badgify/blob/1bf233ffeb6293ee659454de7b3794682128b6ca/badgify/recipe.py#L200-L216
|
def get_obsolete_user_ids(self, db_read=None):
"""
Returns obsolete users IDs to unaward.
"""
db_read = db_read or self.db_read
already_awarded_ids = self.get_already_awarded_user_ids(db_read=db_read, show_log=False)
current_ids = self.get_current_user_ids(db_read=db_read)
obsolete_ids = list(set(already_awarded_ids) - set(current_ids))
obsolete_ids_count = len(obsolete_ids)
logger.debug(
'→ Badge %s: %d users need to be unawarded',
self.slug,
obsolete_ids_count)
return (obsolete_ids, obsolete_ids_count)
|
[
"def",
"get_obsolete_user_ids",
"(",
"self",
",",
"db_read",
"=",
"None",
")",
":",
"db_read",
"=",
"db_read",
"or",
"self",
".",
"db_read",
"already_awarded_ids",
"=",
"self",
".",
"get_already_awarded_user_ids",
"(",
"db_read",
"=",
"db_read",
",",
"show_log",
"=",
"False",
")",
"current_ids",
"=",
"self",
".",
"get_current_user_ids",
"(",
"db_read",
"=",
"db_read",
")",
"obsolete_ids",
"=",
"list",
"(",
"set",
"(",
"already_awarded_ids",
")",
"-",
"set",
"(",
"current_ids",
")",
")",
"obsolete_ids_count",
"=",
"len",
"(",
"obsolete_ids",
")",
"logger",
".",
"debug",
"(",
"'→ Badge %s: %d users need to be unawarded',",
"",
"self",
".",
"slug",
",",
"obsolete_ids_count",
")",
"return",
"(",
"obsolete_ids",
",",
"obsolete_ids_count",
")"
] |
Returns obsolete users IDs to unaward.
|
[
"Returns",
"obsolete",
"users",
"IDs",
"to",
"unaward",
"."
] |
python
|
train
|
has2k1/plotnine
|
plotnine/facets/facet_wrap.py
|
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet_wrap.py#L217-L233
|
def draw_label(self, layout_info, ax):
"""
Draw facet label onto the axes.
This function will only draw labels if they are needed.
Parameters
----------
layout_info : dict-like
facet information
ax : axes
Axes to label
"""
label_info = layout_info[list(self.vars)]
label_info._meta = {'dimension': 'cols'}
label_info = self.labeller(label_info)
self.draw_strip_text(label_info, 'top', ax)
|
[
"def",
"draw_label",
"(",
"self",
",",
"layout_info",
",",
"ax",
")",
":",
"label_info",
"=",
"layout_info",
"[",
"list",
"(",
"self",
".",
"vars",
")",
"]",
"label_info",
".",
"_meta",
"=",
"{",
"'dimension'",
":",
"'cols'",
"}",
"label_info",
"=",
"self",
".",
"labeller",
"(",
"label_info",
")",
"self",
".",
"draw_strip_text",
"(",
"label_info",
",",
"'top'",
",",
"ax",
")"
] |
Draw facet label onto the axes.
This function will only draw labels if they are needed.
Parameters
----------
layout_info : dict-like
facet information
ax : axes
Axes to label
|
[
"Draw",
"facet",
"label",
"onto",
"the",
"axes",
"."
] |
python
|
train
|
SeabornGames/RequestClient
|
seaborn/request_client/endpoint.py
|
https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/seaborn/request_client/endpoint.py#L126-L146
|
def _parse_function_return_types_from_doc(cls, doc):
"""
This will extract the return type for list of lists so that the
repr can display the header.
:param doc: str of the function doc
:return dict of {func.__name__:{'api_type':'type','col_name':[],
'col_type':[],'repr_type':None}}
"""
data = dict(name='', col_types=[], col_names=[], _type=None)
if doc:
return_doc = __doc__.split(':return')[-1].strip()
data['name'] = return_doc.split(':')[0]
if data['name'].startswith('list of'):
if data['name'].endswith('LIST'):
data['_type'] = 'list_list'
for row in return_doc.split('\n')[3:]:
index, col_type, col_name = row.split(None, 2)
assert (index == str(index))
data['col_types'].append(col_type)
data['col_names'].append(col_name.split()[0])
return data
|
[
"def",
"_parse_function_return_types_from_doc",
"(",
"cls",
",",
"doc",
")",
":",
"data",
"=",
"dict",
"(",
"name",
"=",
"''",
",",
"col_types",
"=",
"[",
"]",
",",
"col_names",
"=",
"[",
"]",
",",
"_type",
"=",
"None",
")",
"if",
"doc",
":",
"return_doc",
"=",
"__doc__",
".",
"split",
"(",
"':return'",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"data",
"[",
"'name'",
"]",
"=",
"return_doc",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"if",
"data",
"[",
"'name'",
"]",
".",
"startswith",
"(",
"'list of'",
")",
":",
"if",
"data",
"[",
"'name'",
"]",
".",
"endswith",
"(",
"'LIST'",
")",
":",
"data",
"[",
"'_type'",
"]",
"=",
"'list_list'",
"for",
"row",
"in",
"return_doc",
".",
"split",
"(",
"'\\n'",
")",
"[",
"3",
":",
"]",
":",
"index",
",",
"col_type",
",",
"col_name",
"=",
"row",
".",
"split",
"(",
"None",
",",
"2",
")",
"assert",
"(",
"index",
"==",
"str",
"(",
"index",
")",
")",
"data",
"[",
"'col_types'",
"]",
".",
"append",
"(",
"col_type",
")",
"data",
"[",
"'col_names'",
"]",
".",
"append",
"(",
"col_name",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"return",
"data"
] |
This will extract the return type for list of lists so that the
repr can display the header.
:param doc: str of the function doc
:return dict of {func.__name__:{'api_type':'type','col_name':[],
'col_type':[],'repr_type':None}}
|
[
"This",
"will",
"extract",
"the",
"return",
"type",
"for",
"list",
"of",
"lists",
"so",
"that",
"the",
"repr",
"can",
"display",
"the",
"header",
".",
":",
"param",
"doc",
":",
"str",
"of",
"the",
"function",
"doc",
":",
"return",
"dict",
"of",
"{",
"func",
".",
"__name__",
":",
"{",
"api_type",
":",
"type",
"col_name",
":",
"[]",
"col_type",
":",
"[]",
"repr_type",
":",
"None",
"}}"
] |
python
|
train
|
Fantomas42/django-blog-zinnia
|
zinnia/views/trackback.py
|
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/views/trackback.py#L27-L31
|
def dispatch(self, *args, **kwargs):
"""
Decorate the view dispatcher with csrf_exempt.
"""
return super(EntryTrackback, self).dispatch(*args, **kwargs)
|
[
"def",
"dispatch",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"super",
"(",
"EntryTrackback",
",",
"self",
")",
".",
"dispatch",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Decorate the view dispatcher with csrf_exempt.
|
[
"Decorate",
"the",
"view",
"dispatcher",
"with",
"csrf_exempt",
"."
] |
python
|
train
|
adafruit/Adafruit_Python_BluefruitLE
|
Adafruit_BluefruitLE/corebluetooth/metadata.py
|
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/corebluetooth/metadata.py#L81-L86
|
def remove(self, cbobject):
"""Remove any metadata associated with the provided CoreBluetooth object.
"""
with self._lock:
if cbobject in self._metadata:
del self._metadata[cbobject]
|
[
"def",
"remove",
"(",
"self",
",",
"cbobject",
")",
":",
"with",
"self",
".",
"_lock",
":",
"if",
"cbobject",
"in",
"self",
".",
"_metadata",
":",
"del",
"self",
".",
"_metadata",
"[",
"cbobject",
"]"
] |
Remove any metadata associated with the provided CoreBluetooth object.
|
[
"Remove",
"any",
"metadata",
"associated",
"with",
"the",
"provided",
"CoreBluetooth",
"object",
"."
] |
python
|
valid
|
saltstack/salt
|
salt/modules/config.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/config.py#L446-L463
|
def gather_bootstrap_script(bootstrap=None):
'''
Download the salt-bootstrap script, and return its location
bootstrap
URL of alternate bootstrap script
CLI Example:
.. code-block:: bash
salt '*' config.gather_bootstrap_script
'''
if not HAS_CLOUD:
return False, 'config.gather_bootstrap_script is unavailable'
ret = salt.utils.cloud.update_bootstrap(__opts__, url=bootstrap)
if 'Success' in ret and ret['Success']['Files updated']:
return ret['Success']['Files updated'][0]
|
[
"def",
"gather_bootstrap_script",
"(",
"bootstrap",
"=",
"None",
")",
":",
"if",
"not",
"HAS_CLOUD",
":",
"return",
"False",
",",
"'config.gather_bootstrap_script is unavailable'",
"ret",
"=",
"salt",
".",
"utils",
".",
"cloud",
".",
"update_bootstrap",
"(",
"__opts__",
",",
"url",
"=",
"bootstrap",
")",
"if",
"'Success'",
"in",
"ret",
"and",
"ret",
"[",
"'Success'",
"]",
"[",
"'Files updated'",
"]",
":",
"return",
"ret",
"[",
"'Success'",
"]",
"[",
"'Files updated'",
"]",
"[",
"0",
"]"
] |
Download the salt-bootstrap script, and return its location
bootstrap
URL of alternate bootstrap script
CLI Example:
.. code-block:: bash
salt '*' config.gather_bootstrap_script
|
[
"Download",
"the",
"salt",
"-",
"bootstrap",
"script",
"and",
"return",
"its",
"location"
] |
python
|
train
|
lowandrew/OLCTools
|
databasesetup/database_setup.py
|
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/databasesetup/database_setup.py#L103-L112
|
def sipprverse_targets(self, databasepath, database_name='sipprverse', download_id='13699538'):
"""
Download OLC-specific sipprverse targets
:param databasepath: path to use to save the database
:param database_name: name of current database
:param download_id: figshare identifier of .tar.gz file
"""
self.custom_databases(databasepath=databasepath,
database_name=database_name,
download_id=download_id)
|
[
"def",
"sipprverse_targets",
"(",
"self",
",",
"databasepath",
",",
"database_name",
"=",
"'sipprverse'",
",",
"download_id",
"=",
"'13699538'",
")",
":",
"self",
".",
"custom_databases",
"(",
"databasepath",
"=",
"databasepath",
",",
"database_name",
"=",
"database_name",
",",
"download_id",
"=",
"download_id",
")"
] |
Download OLC-specific sipprverse targets
:param databasepath: path to use to save the database
:param database_name: name of current database
:param download_id: figshare identifier of .tar.gz file
|
[
"Download",
"OLC",
"-",
"specific",
"sipprverse",
"targets",
":",
"param",
"databasepath",
":",
"path",
"to",
"use",
"to",
"save",
"the",
"database",
":",
"param",
"database_name",
":",
"name",
"of",
"current",
"database",
":",
"param",
"download_id",
":",
"figshare",
"identifier",
"of",
".",
"tar",
".",
"gz",
"file"
] |
python
|
train
|
contentful/contentful.py
|
contentful/errors.py
|
https://github.com/contentful/contentful.py/blob/73fe01d6ae5a1f8818880da65199107b584681dd/contentful/errors.py#L203-L221
|
def get_error(response):
"""Gets Error by HTTP Status Code"""
errors = {
400: BadRequestError,
401: UnauthorizedError,
403: AccessDeniedError,
404: NotFoundError,
429: RateLimitExceededError,
500: ServerError,
502: BadGatewayError,
503: ServiceUnavailableError
}
error_class = HTTPError
if response.status_code in errors:
error_class = errors[response.status_code]
return error_class(response)
|
[
"def",
"get_error",
"(",
"response",
")",
":",
"errors",
"=",
"{",
"400",
":",
"BadRequestError",
",",
"401",
":",
"UnauthorizedError",
",",
"403",
":",
"AccessDeniedError",
",",
"404",
":",
"NotFoundError",
",",
"429",
":",
"RateLimitExceededError",
",",
"500",
":",
"ServerError",
",",
"502",
":",
"BadGatewayError",
",",
"503",
":",
"ServiceUnavailableError",
"}",
"error_class",
"=",
"HTTPError",
"if",
"response",
".",
"status_code",
"in",
"errors",
":",
"error_class",
"=",
"errors",
"[",
"response",
".",
"status_code",
"]",
"return",
"error_class",
"(",
"response",
")"
] |
Gets Error by HTTP Status Code
|
[
"Gets",
"Error",
"by",
"HTTP",
"Status",
"Code"
] |
python
|
train
|
jtwhite79/pyemu
|
pyemu/utils/helpers.py
|
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/helpers.py#L151-L286
|
def geostatistical_draws(pst, struct_dict,num_reals=100,sigma_range=4,verbose=True):
""" a helper function to construct a parameter ensenble from a full prior covariance matrix
implied by the geostatistical structure(s) in struct_dict. This function is much more efficient
for problems with lots of pars (>200K).
Parameters
----------
pst : pyemu.Pst
a control file (or the name of control file)
struct_dict : dict
a python dict of GeoStruct (or structure file), and list of pp tpl files pairs
If the values in the dict are pd.DataFrames, then they must have an
'x','y', and 'parnme' column. If the filename ends in '.csv',
then a pd.DataFrame is loaded, otherwise a pilot points file is loaded.
num_reals : int
number of realizations to draw. Default is 100
sigma_range : float
a float representing the number of standard deviations implied by parameter bounds.
Default is 4.0, which implies 95% confidence parameter bounds.
verbose : bool
flag for stdout.
Returns
-------
par_ens : pyemu.ParameterEnsemble
Example
-------
``>>>import pyemu``
``>>>pst = pyemu.Pst("pest.pst")``
``>>>sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}``
``>>>pe = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd,num_reals=100)``
``>>>pe.to_csv("par_ensemble.csv")``
"""
if isinstance(pst,str):
pst = pyemu.Pst(pst)
assert isinstance(pst,pyemu.Pst),"pst arg must be a Pst instance, not {0}".\
format(type(pst))
if verbose: print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(pst, sigma_range=sigma_range)
full_cov_dict = {n: float(v) for n, v in zip(full_cov.col_names, full_cov.x)}
# par_org = pst.parameter_data.copy # not sure about the need or function of this line? (BH)
par = pst.parameter_data
par_ens = []
pars_in_cov = set()
for gs,items in struct_dict.items():
if verbose: print("processing ",gs)
if isinstance(gs,str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss,list):
warnings.warn("using first geostat structure in file {0}".\
format(gs),PyemuWarning)
gs = gss[0]
else:
gs = gss
if not isinstance(items,list):
items = [items]
for item in items:
if isinstance(item,str):
assert os.path.exists(item),"file {0} not found".\
format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
if df.columns.contains('pargp'):
if verbose: print("working on pargroups {0}".format(df.pargp.unique().tolist()))
for req in ['x','y','parnme']:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(
lambda x : x not in par.parnme),"parnme"]
if len(missing) > 0:
warnings.warn("the following parameters are not " + \
"in the control file: {0}".\
format(','.join(missing)),PyemuWarning)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if "zone" not in df.columns:
df.loc[:,"zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone==zone,:].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset),:]
if df_zone.shape[0] == 0:
warnings.warn("all parameters in zone {0} tied and/or fixed, skipping...".format(zone),PyemuWarning)
continue
#df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose: print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x,df_zone.y,df_zone.parnme)
if verbose: print("done")
if verbose: print("getting diag var cov",df_zone.shape[0])
#tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
if verbose: print("scaling full cov by diag var cov")
#cov.x *= tpl_var
for i in range(cov.shape[0]):
cov.x[i,:] *= tpl_var
# no fixed values here
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst=pst,cov=cov,num_reals=num_reals,
group_chunks=False,fill_fixed=False)
#df = pe.iloc[:,:]
par_ens.append(pd.DataFrame(pe))
pars_in_cov.update(set(pe.columns))
if verbose: print("adding remaining parameters to diagonal")
fset = set(full_cov.row_names)
diff = list(fset.difference(pars_in_cov))
if (len(diff) > 0):
name_dict = {name:i for i,name in enumerate(full_cov.row_names)}
vec = np.atleast_2d(np.array([full_cov.x[name_dict[d]] for d in diff]))
cov = pyemu.Cov(x=vec,names=diff,isdiagonal=True)
#cov = full_cov.get(diff,diff)
# here we fill in the fixed values
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst,cov,num_reals=num_reals,
fill_fixed=True)
par_ens.append(pd.DataFrame(pe))
par_ens = pd.concat(par_ens,axis=1)
par_ens = pyemu.ParameterEnsemble.from_dataframe(df=par_ens,pst=pst)
return par_ens
|
[
"def",
"geostatistical_draws",
"(",
"pst",
",",
"struct_dict",
",",
"num_reals",
"=",
"100",
",",
"sigma_range",
"=",
"4",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"pst",
",",
"str",
")",
":",
"pst",
"=",
"pyemu",
".",
"Pst",
"(",
"pst",
")",
"assert",
"isinstance",
"(",
"pst",
",",
"pyemu",
".",
"Pst",
")",
",",
"\"pst arg must be a Pst instance, not {0}\"",
".",
"format",
"(",
"type",
"(",
"pst",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"\"building diagonal cov\"",
")",
"full_cov",
"=",
"pyemu",
".",
"Cov",
".",
"from_parameter_data",
"(",
"pst",
",",
"sigma_range",
"=",
"sigma_range",
")",
"full_cov_dict",
"=",
"{",
"n",
":",
"float",
"(",
"v",
")",
"for",
"n",
",",
"v",
"in",
"zip",
"(",
"full_cov",
".",
"col_names",
",",
"full_cov",
".",
"x",
")",
"}",
"# par_org = pst.parameter_data.copy # not sure about the need or function of this line? (BH)",
"par",
"=",
"pst",
".",
"parameter_data",
"par_ens",
"=",
"[",
"]",
"pars_in_cov",
"=",
"set",
"(",
")",
"for",
"gs",
",",
"items",
"in",
"struct_dict",
".",
"items",
"(",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"processing \"",
",",
"gs",
")",
"if",
"isinstance",
"(",
"gs",
",",
"str",
")",
":",
"gss",
"=",
"pyemu",
".",
"geostats",
".",
"read_struct_file",
"(",
"gs",
")",
"if",
"isinstance",
"(",
"gss",
",",
"list",
")",
":",
"warnings",
".",
"warn",
"(",
"\"using first geostat structure in file {0}\"",
".",
"format",
"(",
"gs",
")",
",",
"PyemuWarning",
")",
"gs",
"=",
"gss",
"[",
"0",
"]",
"else",
":",
"gs",
"=",
"gss",
"if",
"not",
"isinstance",
"(",
"items",
",",
"list",
")",
":",
"items",
"=",
"[",
"items",
"]",
"for",
"item",
"in",
"items",
":",
"if",
"isinstance",
"(",
"item",
",",
"str",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"item",
")",
",",
"\"file {0} not found\"",
".",
"format",
"(",
"item",
")",
"if",
"item",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\".tpl\"",
")",
":",
"df",
"=",
"pyemu",
".",
"pp_utils",
".",
"pp_tpl_to_dataframe",
"(",
"item",
")",
"elif",
"item",
".",
"lower",
".",
"endswith",
"(",
"\".csv\"",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"item",
")",
"else",
":",
"df",
"=",
"item",
"if",
"df",
".",
"columns",
".",
"contains",
"(",
"'pargp'",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"working on pargroups {0}\"",
".",
"format",
"(",
"df",
".",
"pargp",
".",
"unique",
"(",
")",
".",
"tolist",
"(",
")",
")",
")",
"for",
"req",
"in",
"[",
"'x'",
",",
"'y'",
",",
"'parnme'",
"]",
":",
"if",
"req",
"not",
"in",
"df",
".",
"columns",
":",
"raise",
"Exception",
"(",
"\"{0} is not in the columns\"",
".",
"format",
"(",
"req",
")",
")",
"missing",
"=",
"df",
".",
"loc",
"[",
"df",
".",
"parnme",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"not",
"in",
"par",
".",
"parnme",
")",
",",
"\"parnme\"",
"]",
"if",
"len",
"(",
"missing",
")",
">",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"the following parameters are not \"",
"+",
"\"in the control file: {0}\"",
".",
"format",
"(",
"','",
".",
"join",
"(",
"missing",
")",
")",
",",
"PyemuWarning",
")",
"df",
"=",
"df",
".",
"loc",
"[",
"df",
".",
"parnme",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"not",
"in",
"missing",
")",
"]",
"if",
"\"zone\"",
"not",
"in",
"df",
".",
"columns",
":",
"df",
".",
"loc",
"[",
":",
",",
"\"zone\"",
"]",
"=",
"1",
"zones",
"=",
"df",
".",
"zone",
".",
"unique",
"(",
")",
"aset",
"=",
"set",
"(",
"pst",
".",
"adj_par_names",
")",
"for",
"zone",
"in",
"zones",
":",
"df_zone",
"=",
"df",
".",
"loc",
"[",
"df",
".",
"zone",
"==",
"zone",
",",
":",
"]",
".",
"copy",
"(",
")",
"df_zone",
"=",
"df_zone",
".",
"loc",
"[",
"df_zone",
".",
"parnme",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"in",
"aset",
")",
",",
":",
"]",
"if",
"df_zone",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"all parameters in zone {0} tied and/or fixed, skipping...\"",
".",
"format",
"(",
"zone",
")",
",",
"PyemuWarning",
")",
"continue",
"#df_zone.sort_values(by=\"parnme\",inplace=True)",
"df_zone",
".",
"sort_index",
"(",
"inplace",
"=",
"True",
")",
"if",
"verbose",
":",
"print",
"(",
"\"build cov matrix\"",
")",
"cov",
"=",
"gs",
".",
"covariance_matrix",
"(",
"df_zone",
".",
"x",
",",
"df_zone",
".",
"y",
",",
"df_zone",
".",
"parnme",
")",
"if",
"verbose",
":",
"print",
"(",
"\"done\"",
")",
"if",
"verbose",
":",
"print",
"(",
"\"getting diag var cov\"",
",",
"df_zone",
".",
"shape",
"[",
"0",
"]",
")",
"#tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()",
"tpl_var",
"=",
"max",
"(",
"[",
"full_cov_dict",
"[",
"pn",
"]",
"for",
"pn",
"in",
"df_zone",
".",
"parnme",
"]",
")",
"if",
"verbose",
":",
"print",
"(",
"\"scaling full cov by diag var cov\"",
")",
"#cov.x *= tpl_var",
"for",
"i",
"in",
"range",
"(",
"cov",
".",
"shape",
"[",
"0",
"]",
")",
":",
"cov",
".",
"x",
"[",
"i",
",",
":",
"]",
"*=",
"tpl_var",
"# no fixed values here",
"pe",
"=",
"pyemu",
".",
"ParameterEnsemble",
".",
"from_gaussian_draw",
"(",
"pst",
"=",
"pst",
",",
"cov",
"=",
"cov",
",",
"num_reals",
"=",
"num_reals",
",",
"group_chunks",
"=",
"False",
",",
"fill_fixed",
"=",
"False",
")",
"#df = pe.iloc[:,:]",
"par_ens",
".",
"append",
"(",
"pd",
".",
"DataFrame",
"(",
"pe",
")",
")",
"pars_in_cov",
".",
"update",
"(",
"set",
"(",
"pe",
".",
"columns",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"\"adding remaining parameters to diagonal\"",
")",
"fset",
"=",
"set",
"(",
"full_cov",
".",
"row_names",
")",
"diff",
"=",
"list",
"(",
"fset",
".",
"difference",
"(",
"pars_in_cov",
")",
")",
"if",
"(",
"len",
"(",
"diff",
")",
">",
"0",
")",
":",
"name_dict",
"=",
"{",
"name",
":",
"i",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"full_cov",
".",
"row_names",
")",
"}",
"vec",
"=",
"np",
".",
"atleast_2d",
"(",
"np",
".",
"array",
"(",
"[",
"full_cov",
".",
"x",
"[",
"name_dict",
"[",
"d",
"]",
"]",
"for",
"d",
"in",
"diff",
"]",
")",
")",
"cov",
"=",
"pyemu",
".",
"Cov",
"(",
"x",
"=",
"vec",
",",
"names",
"=",
"diff",
",",
"isdiagonal",
"=",
"True",
")",
"#cov = full_cov.get(diff,diff)",
"# here we fill in the fixed values",
"pe",
"=",
"pyemu",
".",
"ParameterEnsemble",
".",
"from_gaussian_draw",
"(",
"pst",
",",
"cov",
",",
"num_reals",
"=",
"num_reals",
",",
"fill_fixed",
"=",
"True",
")",
"par_ens",
".",
"append",
"(",
"pd",
".",
"DataFrame",
"(",
"pe",
")",
")",
"par_ens",
"=",
"pd",
".",
"concat",
"(",
"par_ens",
",",
"axis",
"=",
"1",
")",
"par_ens",
"=",
"pyemu",
".",
"ParameterEnsemble",
".",
"from_dataframe",
"(",
"df",
"=",
"par_ens",
",",
"pst",
"=",
"pst",
")",
"return",
"par_ens"
] |
a helper function to construct a parameter ensenble from a full prior covariance matrix
implied by the geostatistical structure(s) in struct_dict. This function is much more efficient
for problems with lots of pars (>200K).
Parameters
----------
pst : pyemu.Pst
a control file (or the name of control file)
struct_dict : dict
a python dict of GeoStruct (or structure file), and list of pp tpl files pairs
If the values in the dict are pd.DataFrames, then they must have an
'x','y', and 'parnme' column. If the filename ends in '.csv',
then a pd.DataFrame is loaded, otherwise a pilot points file is loaded.
num_reals : int
number of realizations to draw. Default is 100
sigma_range : float
a float representing the number of standard deviations implied by parameter bounds.
Default is 4.0, which implies 95% confidence parameter bounds.
verbose : bool
flag for stdout.
Returns
-------
par_ens : pyemu.ParameterEnsemble
Example
-------
``>>>import pyemu``
``>>>pst = pyemu.Pst("pest.pst")``
``>>>sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}``
``>>>pe = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd,num_reals=100)``
``>>>pe.to_csv("par_ensemble.csv")``
|
[
"a",
"helper",
"function",
"to",
"construct",
"a",
"parameter",
"ensenble",
"from",
"a",
"full",
"prior",
"covariance",
"matrix",
"implied",
"by",
"the",
"geostatistical",
"structure",
"(",
"s",
")",
"in",
"struct_dict",
".",
"This",
"function",
"is",
"much",
"more",
"efficient",
"for",
"problems",
"with",
"lots",
"of",
"pars",
"(",
">",
"200K",
")",
"."
] |
python
|
train
|
wavycloud/pyboto3
|
pyboto3/codedeploy.py
|
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/codedeploy.py#L821-L1019
|
def create_deployment_group(applicationName=None, deploymentGroupName=None, deploymentConfigName=None, ec2TagFilters=None, onPremisesInstanceTagFilters=None, autoScalingGroups=None, serviceRoleArn=None, triggerConfigurations=None, alarmConfiguration=None, autoRollbackConfiguration=None, deploymentStyle=None, blueGreenDeploymentConfiguration=None, loadBalancerInfo=None):
"""
Creates a deployment group to which application revisions will be deployed.
See also: AWS API Documentation
:example: response = client.create_deployment_group(
applicationName='string',
deploymentGroupName='string',
deploymentConfigName='string',
ec2TagFilters=[
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
onPremisesInstanceTagFilters=[
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
autoScalingGroups=[
'string',
],
serviceRoleArn='string',
triggerConfigurations=[
{
'triggerName': 'string',
'triggerTargetArn': 'string',
'triggerEvents': [
'DeploymentStart'|'DeploymentSuccess'|'DeploymentFailure'|'DeploymentStop'|'DeploymentRollback'|'DeploymentReady'|'InstanceStart'|'InstanceSuccess'|'InstanceFailure'|'InstanceReady',
]
},
],
alarmConfiguration={
'enabled': True|False,
'ignorePollAlarmFailure': True|False,
'alarms': [
{
'name': 'string'
},
]
},
autoRollbackConfiguration={
'enabled': True|False,
'events': [
'DEPLOYMENT_FAILURE'|'DEPLOYMENT_STOP_ON_ALARM'|'DEPLOYMENT_STOP_ON_REQUEST',
]
},
deploymentStyle={
'deploymentType': 'IN_PLACE'|'BLUE_GREEN',
'deploymentOption': 'WITH_TRAFFIC_CONTROL'|'WITHOUT_TRAFFIC_CONTROL'
},
blueGreenDeploymentConfiguration={
'terminateBlueInstancesOnDeploymentSuccess': {
'action': 'TERMINATE'|'KEEP_ALIVE',
'terminationWaitTimeInMinutes': 123
},
'deploymentReadyOption': {
'actionOnTimeout': 'CONTINUE_DEPLOYMENT'|'STOP_DEPLOYMENT',
'waitTimeInMinutes': 123
},
'greenFleetProvisioningOption': {
'action': 'DISCOVER_EXISTING'|'COPY_AUTO_SCALING_GROUP'
}
},
loadBalancerInfo={
'elbInfoList': [
{
'name': 'string'
},
]
}
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type deploymentGroupName: string
:param deploymentGroupName: [REQUIRED]
The name of a new deployment group for the specified application.
:type deploymentConfigName: string
:param deploymentConfigName: If specified, the deployment configuration name can be either one of the predefined configurations provided with AWS CodeDeploy or a custom deployment configuration that you create by calling the create deployment configuration operation.
CodeDeployDefault.OneAtATime is the default deployment configuration. It is used if a configuration isn't specified for the deployment or the deployment group.
For more information about the predefined deployment configurations in AWS CodeDeploy, see Working with Deployment Groups in AWS CodeDeploy in the AWS CodeDeploy User Guide.
:type ec2TagFilters: list
:param ec2TagFilters: The Amazon EC2 tags on which to filter. The deployment group will include EC2 instances with any of the specified tags.
(dict) --Information about an EC2 tag filter.
Key (string) --The tag filter key.
Value (string) --The tag filter value.
Type (string) --The tag filter type:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
:type onPremisesInstanceTagFilters: list
:param onPremisesInstanceTagFilters: The on-premises instance tags on which to filter. The deployment group will include on-premises instances with any of the specified tags.
(dict) --Information about an on-premises instance tag filter.
Key (string) --The on-premises instance tag filter key.
Value (string) --The on-premises instance tag filter value.
Type (string) --The on-premises instance tag filter type:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
:type autoScalingGroups: list
:param autoScalingGroups: A list of associated Auto Scaling groups.
(string) --
:type serviceRoleArn: string
:param serviceRoleArn: [REQUIRED]
A service role ARN that allows AWS CodeDeploy to act on the user's behalf when interacting with AWS services.
:type triggerConfigurations: list
:param triggerConfigurations: Information about triggers to create when the deployment group is created. For examples, see Create a Trigger for an AWS CodeDeploy Event in the AWS CodeDeploy User Guide.
(dict) --Information about notification triggers for the deployment group.
triggerName (string) --The name of the notification trigger.
triggerTargetArn (string) --The ARN of the Amazon Simple Notification Service topic through which notifications about deployment or instance events are sent.
triggerEvents (list) --The event type or types for which notifications are triggered.
(string) --
:type alarmConfiguration: dict
:param alarmConfiguration: Information to add about Amazon CloudWatch alarms when the deployment group is created.
enabled (boolean) --Indicates whether the alarm configuration is enabled.
ignorePollAlarmFailure (boolean) --Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from Amazon CloudWatch. The default value is false.
true: The deployment will proceed even if alarm status information can't be retrieved from Amazon CloudWatch.
false: The deployment will stop if alarm status information can't be retrieved from Amazon CloudWatch.
alarms (list) --A list of alarms configured for the deployment group. A maximum of 10 alarms can be added to a deployment group.
(dict) --Information about an alarm.
name (string) --The name of the alarm. Maximum length is 255 characters. Each alarm name can be used only once in a list of alarms.
:type autoRollbackConfiguration: dict
:param autoRollbackConfiguration: Configuration information for an automatic rollback that is added when a deployment group is created.
enabled (boolean) --Indicates whether a defined automatic rollback configuration is currently enabled.
events (list) --The event type or types that trigger a rollback.
(string) --
:type deploymentStyle: dict
:param deploymentStyle: Information about the type of deployment, in-place or blue/green, that you want to run and whether to route deployment traffic behind a load balancer.
deploymentType (string) --Indicates whether to run an in-place deployment or a blue/green deployment.
deploymentOption (string) --Indicates whether to route deployment traffic behind a load balancer.
:type blueGreenDeploymentConfiguration: dict
:param blueGreenDeploymentConfiguration: Information about blue/green deployment options for a deployment group.
terminateBlueInstancesOnDeploymentSuccess (dict) --Information about whether to terminate instances in the original fleet during a blue/green deployment.
action (string) --The action to take on instances in the original environment after a successful blue/green deployment.
TERMINATE: Instances are terminated after a specified wait time.
KEEP_ALIVE: Instances are left running after they are deregistered from the load balancer and removed from the deployment group.
terminationWaitTimeInMinutes (integer) --The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment.
deploymentReadyOption (dict) --Information about the action to take when newly provisioned instances are ready to receive traffic in a blue/green deployment.
actionOnTimeout (string) --Information about when to reroute traffic from an original environment to a replacement environment in a blue/green deployment.
CONTINUE_DEPLOYMENT: Register new instances with the load balancer immediately after the new application revision is installed on the instances in the replacement environment.
STOP_DEPLOYMENT: Do not register new instances with load balancer unless traffic is rerouted manually. If traffic is not rerouted manually before the end of the specified wait period, the deployment status is changed to Stopped.
waitTimeInMinutes (integer) --The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for actionOnTimeout
greenFleetProvisioningOption (dict) --Information about how instances are provisioned for a replacement environment in a blue/green deployment.
action (string) --The method used to add instances to a replacement environment.
DISCOVER_EXISTING: Use instances that already exist or will be created manually.
COPY_AUTO_SCALING_GROUP: Use settings from a specified Auto Scaling group to define and create instances in a new Auto Scaling group.
:type loadBalancerInfo: dict
:param loadBalancerInfo: Information about the load balancer used in a deployment.
elbInfoList (list) --An array containing information about the load balancer in Elastic Load Balancing to use in a deployment.
(dict) --Information about a load balancer in Elastic Load Balancing to use in a deployment.
name (string) --For blue/green deployments, the name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.
:rtype: dict
:return: {
'deploymentGroupId': 'string'
}
"""
pass
|
[
"def",
"create_deployment_group",
"(",
"applicationName",
"=",
"None",
",",
"deploymentGroupName",
"=",
"None",
",",
"deploymentConfigName",
"=",
"None",
",",
"ec2TagFilters",
"=",
"None",
",",
"onPremisesInstanceTagFilters",
"=",
"None",
",",
"autoScalingGroups",
"=",
"None",
",",
"serviceRoleArn",
"=",
"None",
",",
"triggerConfigurations",
"=",
"None",
",",
"alarmConfiguration",
"=",
"None",
",",
"autoRollbackConfiguration",
"=",
"None",
",",
"deploymentStyle",
"=",
"None",
",",
"blueGreenDeploymentConfiguration",
"=",
"None",
",",
"loadBalancerInfo",
"=",
"None",
")",
":",
"pass"
] |
Creates a deployment group to which application revisions will be deployed.
See also: AWS API Documentation
:example: response = client.create_deployment_group(
applicationName='string',
deploymentGroupName='string',
deploymentConfigName='string',
ec2TagFilters=[
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
onPremisesInstanceTagFilters=[
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
autoScalingGroups=[
'string',
],
serviceRoleArn='string',
triggerConfigurations=[
{
'triggerName': 'string',
'triggerTargetArn': 'string',
'triggerEvents': [
'DeploymentStart'|'DeploymentSuccess'|'DeploymentFailure'|'DeploymentStop'|'DeploymentRollback'|'DeploymentReady'|'InstanceStart'|'InstanceSuccess'|'InstanceFailure'|'InstanceReady',
]
},
],
alarmConfiguration={
'enabled': True|False,
'ignorePollAlarmFailure': True|False,
'alarms': [
{
'name': 'string'
},
]
},
autoRollbackConfiguration={
'enabled': True|False,
'events': [
'DEPLOYMENT_FAILURE'|'DEPLOYMENT_STOP_ON_ALARM'|'DEPLOYMENT_STOP_ON_REQUEST',
]
},
deploymentStyle={
'deploymentType': 'IN_PLACE'|'BLUE_GREEN',
'deploymentOption': 'WITH_TRAFFIC_CONTROL'|'WITHOUT_TRAFFIC_CONTROL'
},
blueGreenDeploymentConfiguration={
'terminateBlueInstancesOnDeploymentSuccess': {
'action': 'TERMINATE'|'KEEP_ALIVE',
'terminationWaitTimeInMinutes': 123
},
'deploymentReadyOption': {
'actionOnTimeout': 'CONTINUE_DEPLOYMENT'|'STOP_DEPLOYMENT',
'waitTimeInMinutes': 123
},
'greenFleetProvisioningOption': {
'action': 'DISCOVER_EXISTING'|'COPY_AUTO_SCALING_GROUP'
}
},
loadBalancerInfo={
'elbInfoList': [
{
'name': 'string'
},
]
}
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type deploymentGroupName: string
:param deploymentGroupName: [REQUIRED]
The name of a new deployment group for the specified application.
:type deploymentConfigName: string
:param deploymentConfigName: If specified, the deployment configuration name can be either one of the predefined configurations provided with AWS CodeDeploy or a custom deployment configuration that you create by calling the create deployment configuration operation.
CodeDeployDefault.OneAtATime is the default deployment configuration. It is used if a configuration isn't specified for the deployment or the deployment group.
For more information about the predefined deployment configurations in AWS CodeDeploy, see Working with Deployment Groups in AWS CodeDeploy in the AWS CodeDeploy User Guide.
:type ec2TagFilters: list
:param ec2TagFilters: The Amazon EC2 tags on which to filter. The deployment group will include EC2 instances with any of the specified tags.
(dict) --Information about an EC2 tag filter.
Key (string) --The tag filter key.
Value (string) --The tag filter value.
Type (string) --The tag filter type:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
:type onPremisesInstanceTagFilters: list
:param onPremisesInstanceTagFilters: The on-premises instance tags on which to filter. The deployment group will include on-premises instances with any of the specified tags.
(dict) --Information about an on-premises instance tag filter.
Key (string) --The on-premises instance tag filter key.
Value (string) --The on-premises instance tag filter value.
Type (string) --The on-premises instance tag filter type:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
:type autoScalingGroups: list
:param autoScalingGroups: A list of associated Auto Scaling groups.
(string) --
:type serviceRoleArn: string
:param serviceRoleArn: [REQUIRED]
A service role ARN that allows AWS CodeDeploy to act on the user's behalf when interacting with AWS services.
:type triggerConfigurations: list
:param triggerConfigurations: Information about triggers to create when the deployment group is created. For examples, see Create a Trigger for an AWS CodeDeploy Event in the AWS CodeDeploy User Guide.
(dict) --Information about notification triggers for the deployment group.
triggerName (string) --The name of the notification trigger.
triggerTargetArn (string) --The ARN of the Amazon Simple Notification Service topic through which notifications about deployment or instance events are sent.
triggerEvents (list) --The event type or types for which notifications are triggered.
(string) --
:type alarmConfiguration: dict
:param alarmConfiguration: Information to add about Amazon CloudWatch alarms when the deployment group is created.
enabled (boolean) --Indicates whether the alarm configuration is enabled.
ignorePollAlarmFailure (boolean) --Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from Amazon CloudWatch. The default value is false.
true: The deployment will proceed even if alarm status information can't be retrieved from Amazon CloudWatch.
false: The deployment will stop if alarm status information can't be retrieved from Amazon CloudWatch.
alarms (list) --A list of alarms configured for the deployment group. A maximum of 10 alarms can be added to a deployment group.
(dict) --Information about an alarm.
name (string) --The name of the alarm. Maximum length is 255 characters. Each alarm name can be used only once in a list of alarms.
:type autoRollbackConfiguration: dict
:param autoRollbackConfiguration: Configuration information for an automatic rollback that is added when a deployment group is created.
enabled (boolean) --Indicates whether a defined automatic rollback configuration is currently enabled.
events (list) --The event type or types that trigger a rollback.
(string) --
:type deploymentStyle: dict
:param deploymentStyle: Information about the type of deployment, in-place or blue/green, that you want to run and whether to route deployment traffic behind a load balancer.
deploymentType (string) --Indicates whether to run an in-place deployment or a blue/green deployment.
deploymentOption (string) --Indicates whether to route deployment traffic behind a load balancer.
:type blueGreenDeploymentConfiguration: dict
:param blueGreenDeploymentConfiguration: Information about blue/green deployment options for a deployment group.
terminateBlueInstancesOnDeploymentSuccess (dict) --Information about whether to terminate instances in the original fleet during a blue/green deployment.
action (string) --The action to take on instances in the original environment after a successful blue/green deployment.
TERMINATE: Instances are terminated after a specified wait time.
KEEP_ALIVE: Instances are left running after they are deregistered from the load balancer and removed from the deployment group.
terminationWaitTimeInMinutes (integer) --The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment.
deploymentReadyOption (dict) --Information about the action to take when newly provisioned instances are ready to receive traffic in a blue/green deployment.
actionOnTimeout (string) --Information about when to reroute traffic from an original environment to a replacement environment in a blue/green deployment.
CONTINUE_DEPLOYMENT: Register new instances with the load balancer immediately after the new application revision is installed on the instances in the replacement environment.
STOP_DEPLOYMENT: Do not register new instances with load balancer unless traffic is rerouted manually. If traffic is not rerouted manually before the end of the specified wait period, the deployment status is changed to Stopped.
waitTimeInMinutes (integer) --The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for actionOnTimeout
greenFleetProvisioningOption (dict) --Information about how instances are provisioned for a replacement environment in a blue/green deployment.
action (string) --The method used to add instances to a replacement environment.
DISCOVER_EXISTING: Use instances that already exist or will be created manually.
COPY_AUTO_SCALING_GROUP: Use settings from a specified Auto Scaling group to define and create instances in a new Auto Scaling group.
:type loadBalancerInfo: dict
:param loadBalancerInfo: Information about the load balancer used in a deployment.
elbInfoList (list) --An array containing information about the load balancer in Elastic Load Balancing to use in a deployment.
(dict) --Information about a load balancer in Elastic Load Balancing to use in a deployment.
name (string) --For blue/green deployments, the name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.
:rtype: dict
:return: {
'deploymentGroupId': 'string'
}
|
[
"Creates",
"a",
"deployment",
"group",
"to",
"which",
"application",
"revisions",
"will",
"be",
"deployed",
".",
"See",
"also",
":",
"AWS",
"API",
"Documentation",
":",
"example",
":",
"response",
"=",
"client",
".",
"create_deployment_group",
"(",
"applicationName",
"=",
"string",
"deploymentGroupName",
"=",
"string",
"deploymentConfigName",
"=",
"string",
"ec2TagFilters",
"=",
"[",
"{",
"Key",
":",
"string",
"Value",
":",
"string",
"Type",
":",
"KEY_ONLY",
"|",
"VALUE_ONLY",
"|",
"KEY_AND_VALUE",
"}",
"]",
"onPremisesInstanceTagFilters",
"=",
"[",
"{",
"Key",
":",
"string",
"Value",
":",
"string",
"Type",
":",
"KEY_ONLY",
"|",
"VALUE_ONLY",
"|",
"KEY_AND_VALUE",
"}",
"]",
"autoScalingGroups",
"=",
"[",
"string",
"]",
"serviceRoleArn",
"=",
"string",
"triggerConfigurations",
"=",
"[",
"{",
"triggerName",
":",
"string",
"triggerTargetArn",
":",
"string",
"triggerEvents",
":",
"[",
"DeploymentStart",
"|",
"DeploymentSuccess",
"|",
"DeploymentFailure",
"|",
"DeploymentStop",
"|",
"DeploymentRollback",
"|",
"DeploymentReady",
"|",
"InstanceStart",
"|",
"InstanceSuccess",
"|",
"InstanceFailure",
"|",
"InstanceReady",
"]",
"}",
"]",
"alarmConfiguration",
"=",
"{",
"enabled",
":",
"True|False",
"ignorePollAlarmFailure",
":",
"True|False",
"alarms",
":",
"[",
"{",
"name",
":",
"string",
"}",
"]",
"}",
"autoRollbackConfiguration",
"=",
"{",
"enabled",
":",
"True|False",
"events",
":",
"[",
"DEPLOYMENT_FAILURE",
"|",
"DEPLOYMENT_STOP_ON_ALARM",
"|",
"DEPLOYMENT_STOP_ON_REQUEST",
"]",
"}",
"deploymentStyle",
"=",
"{",
"deploymentType",
":",
"IN_PLACE",
"|",
"BLUE_GREEN",
"deploymentOption",
":",
"WITH_TRAFFIC_CONTROL",
"|",
"WITHOUT_TRAFFIC_CONTROL",
"}",
"blueGreenDeploymentConfiguration",
"=",
"{",
"terminateBlueInstancesOnDeploymentSuccess",
":",
"{",
"action",
":",
"TERMINATE",
"|",
"KEEP_ALIVE",
"terminationWaitTimeInMinutes",
":",
"123",
"}",
"deploymentReadyOption",
":",
"{",
"actionOnTimeout",
":",
"CONTINUE_DEPLOYMENT",
"|",
"STOP_DEPLOYMENT",
"waitTimeInMinutes",
":",
"123",
"}",
"greenFleetProvisioningOption",
":",
"{",
"action",
":",
"DISCOVER_EXISTING",
"|",
"COPY_AUTO_SCALING_GROUP",
"}",
"}",
"loadBalancerInfo",
"=",
"{",
"elbInfoList",
":",
"[",
"{",
"name",
":",
"string",
"}",
"]",
"}",
")",
":",
"type",
"applicationName",
":",
"string",
":",
"param",
"applicationName",
":",
"[",
"REQUIRED",
"]",
"The",
"name",
"of",
"an",
"AWS",
"CodeDeploy",
"application",
"associated",
"with",
"the",
"applicable",
"IAM",
"user",
"or",
"AWS",
"account",
"."
] |
python
|
train
|
TUT-ARG/sed_eval
|
sed_eval/audio_tag.py
|
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/audio_tag.py#L597-L656
|
def results_class_wise_average_metrics(self):
"""Class-wise averaged metrics
Returns
-------
dict
results in a dictionary format
"""
class_wise_results = self.results_class_wise_metrics()
class_wise_eer = []
class_wise_fmeasure = []
class_wise_precision = []
class_wise_recall = []
for class_label in class_wise_results:
if class_wise_results[class_label]['eer']['eer'] is not None:
class_wise_eer.append(class_wise_results[class_label]['eer']['eer'])
if class_wise_results[class_label]['f_measure']['f_measure'] is not None:
class_wise_fmeasure.append(class_wise_results[class_label]['f_measure']['f_measure'])
class_wise_precision.append(class_wise_results[class_label]['f_measure']['precision'])
class_wise_recall.append(class_wise_results[class_label]['f_measure']['recall'])
if class_wise_eer:
eer = float(numpy.nanmean(class_wise_eer))
else:
eer = None
if class_wise_fmeasure:
f_measure = float(numpy.nanmean(class_wise_fmeasure))
else:
f_measure = None
if class_wise_precision:
precision = float(numpy.nanmean(class_wise_precision))
else:
precision = None
if class_wise_recall:
recall = float(numpy.nanmean(class_wise_recall))
else:
recall = None
return {
'eer': {
'eer': eer
},
'f_measure': {
'f_measure': f_measure,
'precision': precision,
'recall': recall,
}
}
|
[
"def",
"results_class_wise_average_metrics",
"(",
"self",
")",
":",
"class_wise_results",
"=",
"self",
".",
"results_class_wise_metrics",
"(",
")",
"class_wise_eer",
"=",
"[",
"]",
"class_wise_fmeasure",
"=",
"[",
"]",
"class_wise_precision",
"=",
"[",
"]",
"class_wise_recall",
"=",
"[",
"]",
"for",
"class_label",
"in",
"class_wise_results",
":",
"if",
"class_wise_results",
"[",
"class_label",
"]",
"[",
"'eer'",
"]",
"[",
"'eer'",
"]",
"is",
"not",
"None",
":",
"class_wise_eer",
".",
"append",
"(",
"class_wise_results",
"[",
"class_label",
"]",
"[",
"'eer'",
"]",
"[",
"'eer'",
"]",
")",
"if",
"class_wise_results",
"[",
"class_label",
"]",
"[",
"'f_measure'",
"]",
"[",
"'f_measure'",
"]",
"is",
"not",
"None",
":",
"class_wise_fmeasure",
".",
"append",
"(",
"class_wise_results",
"[",
"class_label",
"]",
"[",
"'f_measure'",
"]",
"[",
"'f_measure'",
"]",
")",
"class_wise_precision",
".",
"append",
"(",
"class_wise_results",
"[",
"class_label",
"]",
"[",
"'f_measure'",
"]",
"[",
"'precision'",
"]",
")",
"class_wise_recall",
".",
"append",
"(",
"class_wise_results",
"[",
"class_label",
"]",
"[",
"'f_measure'",
"]",
"[",
"'recall'",
"]",
")",
"if",
"class_wise_eer",
":",
"eer",
"=",
"float",
"(",
"numpy",
".",
"nanmean",
"(",
"class_wise_eer",
")",
")",
"else",
":",
"eer",
"=",
"None",
"if",
"class_wise_fmeasure",
":",
"f_measure",
"=",
"float",
"(",
"numpy",
".",
"nanmean",
"(",
"class_wise_fmeasure",
")",
")",
"else",
":",
"f_measure",
"=",
"None",
"if",
"class_wise_precision",
":",
"precision",
"=",
"float",
"(",
"numpy",
".",
"nanmean",
"(",
"class_wise_precision",
")",
")",
"else",
":",
"precision",
"=",
"None",
"if",
"class_wise_recall",
":",
"recall",
"=",
"float",
"(",
"numpy",
".",
"nanmean",
"(",
"class_wise_recall",
")",
")",
"else",
":",
"recall",
"=",
"None",
"return",
"{",
"'eer'",
":",
"{",
"'eer'",
":",
"eer",
"}",
",",
"'f_measure'",
":",
"{",
"'f_measure'",
":",
"f_measure",
",",
"'precision'",
":",
"precision",
",",
"'recall'",
":",
"recall",
",",
"}",
"}"
] |
Class-wise averaged metrics
Returns
-------
dict
results in a dictionary format
|
[
"Class",
"-",
"wise",
"averaged",
"metrics"
] |
python
|
train
|
numenta/nupic
|
src/nupic/frameworks/opf/helpers.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/helpers.py#L64-L75
|
def getExperimentDescriptionInterfaceFromModule(module):
"""
:param module: imported description.py module
:returns: (:class:`nupic.frameworks.opf.exp_description_api.DescriptionIface`)
represents the experiment description
"""
result = module.descriptionInterface
assert isinstance(result, exp_description_api.DescriptionIface), \
"expected DescriptionIface-based instance, but got %s" % type(result)
return result
|
[
"def",
"getExperimentDescriptionInterfaceFromModule",
"(",
"module",
")",
":",
"result",
"=",
"module",
".",
"descriptionInterface",
"assert",
"isinstance",
"(",
"result",
",",
"exp_description_api",
".",
"DescriptionIface",
")",
",",
"\"expected DescriptionIface-based instance, but got %s\"",
"%",
"type",
"(",
"result",
")",
"return",
"result"
] |
:param module: imported description.py module
:returns: (:class:`nupic.frameworks.opf.exp_description_api.DescriptionIface`)
represents the experiment description
|
[
":",
"param",
"module",
":",
"imported",
"description",
".",
"py",
"module"
] |
python
|
valid
|
ejeschke/ginga
|
ginga/util/io_asdf.py
|
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/io_asdf.py#L69-L86
|
def loader(filepath, logger=None, **kwargs):
"""
Load an object from an ASDF file.
See :func:`ginga.util.loader` for more info.
TODO: kwargs may contain info about what part of the file to load
"""
# see ginga.util.loader module
# TODO: return an AstroTable if loading a table, etc.
# for now, assume always an image
from ginga import AstroImage
image = AstroImage.AstroImage(logger=logger)
with asdf.open(filepath) as asdf_f:
#image.load_asdf(asdf_f, **kwargs)
image.load_asdf(asdf_f)
return image
|
[
"def",
"loader",
"(",
"filepath",
",",
"logger",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# see ginga.util.loader module",
"# TODO: return an AstroTable if loading a table, etc.",
"# for now, assume always an image",
"from",
"ginga",
"import",
"AstroImage",
"image",
"=",
"AstroImage",
".",
"AstroImage",
"(",
"logger",
"=",
"logger",
")",
"with",
"asdf",
".",
"open",
"(",
"filepath",
")",
"as",
"asdf_f",
":",
"#image.load_asdf(asdf_f, **kwargs)",
"image",
".",
"load_asdf",
"(",
"asdf_f",
")",
"return",
"image"
] |
Load an object from an ASDF file.
See :func:`ginga.util.loader` for more info.
TODO: kwargs may contain info about what part of the file to load
|
[
"Load",
"an",
"object",
"from",
"an",
"ASDF",
"file",
".",
"See",
":",
"func",
":",
"ginga",
".",
"util",
".",
"loader",
"for",
"more",
"info",
"."
] |
python
|
train
|
aewallin/allantools
|
allantools/allantools.py
|
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L515-L566
|
def calc_hdev_phase(phase, rate, mj, stride):
""" main calculation fungtion for HDEV and OHDEV
Parameters
----------
phase: np.array
Phase data in seconds.
rate: float
The sampling rate for phase or frequency, in Hz
mj: int
M index value for stride
stride: int
Size of stride
Returns
-------
(dev, deverr, n): tuple
Array of computed values.
Notes
-----
http://www.leapsecond.com/tools/adev_lib.c
1 N-3
s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2
6*tau^2 (N-3m) i=1
N=M+1 phase measurements
m is averaging factor
NIST SP 1065 eqn (18) and (20) pages 20 and 21
"""
tau0 = 1.0 / float(rate)
mj = int(mj)
stride = int(stride)
d3 = phase[3 * mj::stride]
d2 = phase[2 * mj::stride]
d1 = phase[1 * mj::stride]
d0 = phase[::stride]
n = min(len(d0), len(d1), len(d2), len(d3))
v_arr = d3[:n] - 3 * d2[:n] + 3 * d1[:n] - d0[:n]
s = np.sum(v_arr * v_arr)
if n == 0:
n = 1
h = np.sqrt(s / 6.0 / float(n)) / float(tau0 * mj)
e = h / np.sqrt(n)
return h, e, n
|
[
"def",
"calc_hdev_phase",
"(",
"phase",
",",
"rate",
",",
"mj",
",",
"stride",
")",
":",
"tau0",
"=",
"1.0",
"/",
"float",
"(",
"rate",
")",
"mj",
"=",
"int",
"(",
"mj",
")",
"stride",
"=",
"int",
"(",
"stride",
")",
"d3",
"=",
"phase",
"[",
"3",
"*",
"mj",
":",
":",
"stride",
"]",
"d2",
"=",
"phase",
"[",
"2",
"*",
"mj",
":",
":",
"stride",
"]",
"d1",
"=",
"phase",
"[",
"1",
"*",
"mj",
":",
":",
"stride",
"]",
"d0",
"=",
"phase",
"[",
":",
":",
"stride",
"]",
"n",
"=",
"min",
"(",
"len",
"(",
"d0",
")",
",",
"len",
"(",
"d1",
")",
",",
"len",
"(",
"d2",
")",
",",
"len",
"(",
"d3",
")",
")",
"v_arr",
"=",
"d3",
"[",
":",
"n",
"]",
"-",
"3",
"*",
"d2",
"[",
":",
"n",
"]",
"+",
"3",
"*",
"d1",
"[",
":",
"n",
"]",
"-",
"d0",
"[",
":",
"n",
"]",
"s",
"=",
"np",
".",
"sum",
"(",
"v_arr",
"*",
"v_arr",
")",
"if",
"n",
"==",
"0",
":",
"n",
"=",
"1",
"h",
"=",
"np",
".",
"sqrt",
"(",
"s",
"/",
"6.0",
"/",
"float",
"(",
"n",
")",
")",
"/",
"float",
"(",
"tau0",
"*",
"mj",
")",
"e",
"=",
"h",
"/",
"np",
".",
"sqrt",
"(",
"n",
")",
"return",
"h",
",",
"e",
",",
"n"
] |
main calculation fungtion for HDEV and OHDEV
Parameters
----------
phase: np.array
Phase data in seconds.
rate: float
The sampling rate for phase or frequency, in Hz
mj: int
M index value for stride
stride: int
Size of stride
Returns
-------
(dev, deverr, n): tuple
Array of computed values.
Notes
-----
http://www.leapsecond.com/tools/adev_lib.c
1 N-3
s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2
6*tau^2 (N-3m) i=1
N=M+1 phase measurements
m is averaging factor
NIST SP 1065 eqn (18) and (20) pages 20 and 21
|
[
"main",
"calculation",
"fungtion",
"for",
"HDEV",
"and",
"OHDEV"
] |
python
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L112-L133
|
def update_dict(self, label, pred):
"""Update the internal evaluation with named label and pred
Parameters
----------
labels : OrderedDict of str -> NDArray
name to array mapping for labels.
preds : OrderedDict of str -> NDArray
name to array mapping of predicted outputs.
"""
if self.output_names is not None:
pred = [pred[name] for name in self.output_names]
else:
pred = list(pred.values())
if self.label_names is not None:
label = [label[name] for name in self.label_names]
else:
label = list(label.values())
self.update(label, pred)
|
[
"def",
"update_dict",
"(",
"self",
",",
"label",
",",
"pred",
")",
":",
"if",
"self",
".",
"output_names",
"is",
"not",
"None",
":",
"pred",
"=",
"[",
"pred",
"[",
"name",
"]",
"for",
"name",
"in",
"self",
".",
"output_names",
"]",
"else",
":",
"pred",
"=",
"list",
"(",
"pred",
".",
"values",
"(",
")",
")",
"if",
"self",
".",
"label_names",
"is",
"not",
"None",
":",
"label",
"=",
"[",
"label",
"[",
"name",
"]",
"for",
"name",
"in",
"self",
".",
"label_names",
"]",
"else",
":",
"label",
"=",
"list",
"(",
"label",
".",
"values",
"(",
")",
")",
"self",
".",
"update",
"(",
"label",
",",
"pred",
")"
] |
Update the internal evaluation with named label and pred
Parameters
----------
labels : OrderedDict of str -> NDArray
name to array mapping for labels.
preds : OrderedDict of str -> NDArray
name to array mapping of predicted outputs.
|
[
"Update",
"the",
"internal",
"evaluation",
"with",
"named",
"label",
"and",
"pred"
] |
python
|
train
|
benedictpaten/sonLib
|
tree.py
|
https://github.com/benedictpaten/sonLib/blob/1decb75bb439b70721ec776f685ce98e25217d26/tree.py#L543-L554
|
def annotateTree(bT, fn):
"""
annotate a tree in an external array using the given function
"""
l = [None]*bT.traversalID.midEnd
def fn2(bT):
l[bT.traversalID.mid] = fn(bT)
if bT.internal:
fn2(bT.left)
fn2(bT.right)
fn2(bT)
return l
|
[
"def",
"annotateTree",
"(",
"bT",
",",
"fn",
")",
":",
"l",
"=",
"[",
"None",
"]",
"*",
"bT",
".",
"traversalID",
".",
"midEnd",
"def",
"fn2",
"(",
"bT",
")",
":",
"l",
"[",
"bT",
".",
"traversalID",
".",
"mid",
"]",
"=",
"fn",
"(",
"bT",
")",
"if",
"bT",
".",
"internal",
":",
"fn2",
"(",
"bT",
".",
"left",
")",
"fn2",
"(",
"bT",
".",
"right",
")",
"fn2",
"(",
"bT",
")",
"return",
"l"
] |
annotate a tree in an external array using the given function
|
[
"annotate",
"a",
"tree",
"in",
"an",
"external",
"array",
"using",
"the",
"given",
"function"
] |
python
|
train
|
pyfca/pyfca
|
pyfca/implications.py
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L170-L191
|
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
|
[
"def",
"omega",
"(",
"imps",
")",
":",
"if",
"isinstance",
"(",
"imps",
",",
"v_Us_dict",
")",
":",
"return",
"sum",
"(",
"[",
"omega",
"(",
"V",
")",
"for",
"U",
",",
"V",
"in",
"imps",
".",
"items",
"(",
")",
"]",
")",
"#|V|=1",
"if",
"isinstance",
"(",
"imps",
",",
"list",
")",
":",
"return",
"sum",
"(",
"[",
"omega",
"(",
"x",
")",
"for",
"x",
"in",
"imps",
"]",
")",
"if",
"isinstance",
"(",
"imps",
",",
"str",
")",
":",
"#imps = due[-1]",
"try",
":",
"U",
",",
"V",
"=",
"imps",
".",
"split",
"(",
"\"->\"",
")",
"Us",
"=",
"U",
".",
"split",
"(",
"\",\"",
")",
"if",
"\",\"",
"in",
"U",
"else",
"U",
".",
"split",
"(",
")",
"Vs",
"=",
"V",
".",
"split",
"(",
"\",\"",
")",
"if",
"\",\"",
"in",
"V",
"else",
"V",
".",
"split",
"(",
")",
"res",
"=",
"len",
"(",
"Us",
")",
"*",
"len",
"(",
"Vs",
")",
"return",
"res",
"except",
":",
"return",
"0",
"if",
"isinstance",
"(",
"imps",
",",
"int",
")",
":",
"b",
"=",
"bin",
"(",
"imps",
")",
"[",
"2",
":",
"]",
"res",
"=",
"len",
"(",
"[",
"x",
"for",
"x",
"in",
"b",
"if",
"x",
"==",
"'1'",
"]",
")",
"return",
"res"
] |
Calculates a measure for the size of the implication basis: \sum |U||V|
|
[
"Calculates",
"a",
"measure",
"for",
"the",
"size",
"of",
"the",
"implication",
"basis",
":",
"\\",
"sum",
"|U||V|"
] |
python
|
train
|
regebro/hovercraft
|
hovercraft/position.py
|
https://github.com/regebro/hovercraft/blob/d9f63bfdfe1519c4d7a81697ee066e49dc26a30b/hovercraft/position.py#L239-L244
|
def position_slides(tree):
"""Position the slides in the tree"""
positions = gather_positions(tree)
positions = calculate_positions(positions)
update_positions(tree, positions)
|
[
"def",
"position_slides",
"(",
"tree",
")",
":",
"positions",
"=",
"gather_positions",
"(",
"tree",
")",
"positions",
"=",
"calculate_positions",
"(",
"positions",
")",
"update_positions",
"(",
"tree",
",",
"positions",
")"
] |
Position the slides in the tree
|
[
"Position",
"the",
"slides",
"in",
"the",
"tree"
] |
python
|
train
|
gem/oq-engine
|
openquake/hazardlib/gsim/bindi_2011.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/bindi_2011.py#L200-L207
|
def _get_mechanism(self, rup, C):
"""
Compute the fifth term of the equation 1 described on paragraph :
Get fault type dummy variables, see Table 1
"""
U, SS, NS, RS = self._get_fault_type_dummy_variables(rup)
return C['f1'] * NS + C['f2'] * RS + C['f3'] * SS
|
[
"def",
"_get_mechanism",
"(",
"self",
",",
"rup",
",",
"C",
")",
":",
"U",
",",
"SS",
",",
"NS",
",",
"RS",
"=",
"self",
".",
"_get_fault_type_dummy_variables",
"(",
"rup",
")",
"return",
"C",
"[",
"'f1'",
"]",
"*",
"NS",
"+",
"C",
"[",
"'f2'",
"]",
"*",
"RS",
"+",
"C",
"[",
"'f3'",
"]",
"*",
"SS"
] |
Compute the fifth term of the equation 1 described on paragraph :
Get fault type dummy variables, see Table 1
|
[
"Compute",
"the",
"fifth",
"term",
"of",
"the",
"equation",
"1",
"described",
"on",
"paragraph",
":",
"Get",
"fault",
"type",
"dummy",
"variables",
"see",
"Table",
"1"
] |
python
|
train
|
letuananh/chirptext
|
chirptext/texttaglib.py
|
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/texttaglib.py#L588-L598
|
def get(self, sent_id, **kwargs):
''' If sent_id exists, remove and return the associated sentence object else return default.
If no default is provided, KeyError will be raised.'''
if sent_id is not None and not isinstance(sent_id, int):
sent_id = int(sent_id)
if sent_id is None or not self.has_id(sent_id):
if 'default' in kwargs:
return kwargs['default']
else:
raise KeyError("Invalid sentence ID ({})".format(sent_id))
return self.__sent_map[sent_id]
|
[
"def",
"get",
"(",
"self",
",",
"sent_id",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"sent_id",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"sent_id",
",",
"int",
")",
":",
"sent_id",
"=",
"int",
"(",
"sent_id",
")",
"if",
"sent_id",
"is",
"None",
"or",
"not",
"self",
".",
"has_id",
"(",
"sent_id",
")",
":",
"if",
"'default'",
"in",
"kwargs",
":",
"return",
"kwargs",
"[",
"'default'",
"]",
"else",
":",
"raise",
"KeyError",
"(",
"\"Invalid sentence ID ({})\"",
".",
"format",
"(",
"sent_id",
")",
")",
"return",
"self",
".",
"__sent_map",
"[",
"sent_id",
"]"
] |
If sent_id exists, remove and return the associated sentence object else return default.
If no default is provided, KeyError will be raised.
|
[
"If",
"sent_id",
"exists",
"remove",
"and",
"return",
"the",
"associated",
"sentence",
"object",
"else",
"return",
"default",
".",
"If",
"no",
"default",
"is",
"provided",
"KeyError",
"will",
"be",
"raised",
"."
] |
python
|
train
|
flukso/tmpo-py
|
tmpo/__init__.py
|
https://github.com/flukso/tmpo-py/blob/3c99e3d123bc985a6fba2558922b29430d2a0a94/tmpo/__init__.py#L399-L424
|
def first_timestamp(self, sid, epoch=False):
"""
Get the first available timestamp for a sensor
Parameters
----------
sid : str
SensorID
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
"""
first_block = self.dbcur.execute(SQL_TMPO_FIRST, (sid,)).fetchone()
if first_block is None:
return None
timestamp = first_block[2]
if not epoch:
timestamp = pd.Timestamp.utcfromtimestamp(timestamp)
timestamp = timestamp.tz_localize('UTC')
return timestamp
|
[
"def",
"first_timestamp",
"(",
"self",
",",
"sid",
",",
"epoch",
"=",
"False",
")",
":",
"first_block",
"=",
"self",
".",
"dbcur",
".",
"execute",
"(",
"SQL_TMPO_FIRST",
",",
"(",
"sid",
",",
")",
")",
".",
"fetchone",
"(",
")",
"if",
"first_block",
"is",
"None",
":",
"return",
"None",
"timestamp",
"=",
"first_block",
"[",
"2",
"]",
"if",
"not",
"epoch",
":",
"timestamp",
"=",
"pd",
".",
"Timestamp",
".",
"utcfromtimestamp",
"(",
"timestamp",
")",
"timestamp",
"=",
"timestamp",
".",
"tz_localize",
"(",
"'UTC'",
")",
"return",
"timestamp"
] |
Get the first available timestamp for a sensor
Parameters
----------
sid : str
SensorID
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
|
[
"Get",
"the",
"first",
"available",
"timestamp",
"for",
"a",
"sensor"
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.