text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def p_CommentOrEmptyLineList(p):
'''
CommentOrEmptyLineList :
| CommentOrEmptyLine
| CommentOrEmptyLineList CommentOrEmptyLine
'''
if len(p) <= 1:
p[0] = CommentOrEmptyLineList(None, None)
elif len(p) <= 2:
p[0] = CommentOrEmptyLineList(None, p[1])
else:
p[0] = CommentOrEmptyLineList(p[1], p[2])
|
[
"def",
"p_CommentOrEmptyLineList",
"(",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"<=",
"1",
":",
"p",
"[",
"0",
"]",
"=",
"CommentOrEmptyLineList",
"(",
"None",
",",
"None",
")",
"elif",
"len",
"(",
"p",
")",
"<=",
"2",
":",
"p",
"[",
"0",
"]",
"=",
"CommentOrEmptyLineList",
"(",
"None",
",",
"p",
"[",
"1",
"]",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"CommentOrEmptyLineList",
"(",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"2",
"]",
")"
] | 32.333333 | 18.5 |
def cancelar_ultima_venda(self, chave_cfe, dados_cancelamento):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.cancelar_ultima_venda`.
:return: Uma resposta SAT especializada em ``CancelarUltimaVenda``.
:rtype: satcfe.resposta.cancelarultimavenda.RespostaCancelarUltimaVenda
"""
resp = self._http_post('cancelarultimavenda',
chave_cfe=chave_cfe,
dados_cancelamento=dados_cancelamento.documento())
conteudo = resp.json()
return RespostaCancelarUltimaVenda.analisar(conteudo.get('retorno'))
|
[
"def",
"cancelar_ultima_venda",
"(",
"self",
",",
"chave_cfe",
",",
"dados_cancelamento",
")",
":",
"resp",
"=",
"self",
".",
"_http_post",
"(",
"'cancelarultimavenda'",
",",
"chave_cfe",
"=",
"chave_cfe",
",",
"dados_cancelamento",
"=",
"dados_cancelamento",
".",
"documento",
"(",
")",
")",
"conteudo",
"=",
"resp",
".",
"json",
"(",
")",
"return",
"RespostaCancelarUltimaVenda",
".",
"analisar",
"(",
"conteudo",
".",
"get",
"(",
"'retorno'",
")",
")"
] | 51.181818 | 20.545455 |
def get(self, key, value):
"""Get single app by one of id or name
Supports resource cache
Keyword Args:
id (str): Full app id
name (str): App name
Returns:
App: Corresponding App resource instance
Raises:
TypeError: No or multiple keyword arguments provided
ValueError: No matching app found on server
"""
if key == 'id':
# Server returns 204 instead of 404 for a non-existent app id
response = self._swimlane.request('get', 'app/{}'.format(value))
if response.status_code == 204:
raise ValueError('No app with id "{}"'.format(value))
return App(
self._swimlane,
response.json()
)
else:
# Workaround for lack of support for get by name
# Holdover from previous driver support, to be fixed as part of 3.x
for app in self.list():
if value and value == app.name:
return app
# No matching app found
raise ValueError('No app with name "{}"'.format(value))
|
[
"def",
"get",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"key",
"==",
"'id'",
":",
"# Server returns 204 instead of 404 for a non-existent app id",
"response",
"=",
"self",
".",
"_swimlane",
".",
"request",
"(",
"'get'",
",",
"'app/{}'",
".",
"format",
"(",
"value",
")",
")",
"if",
"response",
".",
"status_code",
"==",
"204",
":",
"raise",
"ValueError",
"(",
"'No app with id \"{}\"'",
".",
"format",
"(",
"value",
")",
")",
"return",
"App",
"(",
"self",
".",
"_swimlane",
",",
"response",
".",
"json",
"(",
")",
")",
"else",
":",
"# Workaround for lack of support for get by name",
"# Holdover from previous driver support, to be fixed as part of 3.x",
"for",
"app",
"in",
"self",
".",
"list",
"(",
")",
":",
"if",
"value",
"and",
"value",
"==",
"app",
".",
"name",
":",
"return",
"app",
"# No matching app found",
"raise",
"ValueError",
"(",
"'No app with name \"{}\"'",
".",
"format",
"(",
"value",
")",
")"
] | 32.857143 | 20.485714 |
def parse(cls, querydict):
""" Parse querydict data.
There are expected agruments:
distinct, fields, filter, include, page, sort
Parameters
----------
querydict : django.http.request.QueryDict
MultiValueDict with query arguments.
Returns
-------
result : dict
dictionary in format {key: value}.
Raises
------
ValueError
If args consist of not know key.
"""
for key in querydict.keys():
if not any((key in JSONAPIQueryDict._fields,
cls.RE_FIELDS.match(key))):
msg = "Query parameter {} is not known".format(key)
raise ValueError(msg)
result = JSONAPIQueryDict(
distinct=cls.prepare_values(querydict.getlist('distinct')),
fields=cls.parse_fields(querydict),
filter=querydict.getlist('filter'),
include=cls.prepare_values(querydict.getlist('include')),
page=int(querydict.get('page')) if querydict.get('page') else None,
sort=cls.prepare_values(querydict.getlist('sort'))
)
return result
|
[
"def",
"parse",
"(",
"cls",
",",
"querydict",
")",
":",
"for",
"key",
"in",
"querydict",
".",
"keys",
"(",
")",
":",
"if",
"not",
"any",
"(",
"(",
"key",
"in",
"JSONAPIQueryDict",
".",
"_fields",
",",
"cls",
".",
"RE_FIELDS",
".",
"match",
"(",
"key",
")",
")",
")",
":",
"msg",
"=",
"\"Query parameter {} is not known\"",
".",
"format",
"(",
"key",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"result",
"=",
"JSONAPIQueryDict",
"(",
"distinct",
"=",
"cls",
".",
"prepare_values",
"(",
"querydict",
".",
"getlist",
"(",
"'distinct'",
")",
")",
",",
"fields",
"=",
"cls",
".",
"parse_fields",
"(",
"querydict",
")",
",",
"filter",
"=",
"querydict",
".",
"getlist",
"(",
"'filter'",
")",
",",
"include",
"=",
"cls",
".",
"prepare_values",
"(",
"querydict",
".",
"getlist",
"(",
"'include'",
")",
")",
",",
"page",
"=",
"int",
"(",
"querydict",
".",
"get",
"(",
"'page'",
")",
")",
"if",
"querydict",
".",
"get",
"(",
"'page'",
")",
"else",
"None",
",",
"sort",
"=",
"cls",
".",
"prepare_values",
"(",
"querydict",
".",
"getlist",
"(",
"'sort'",
")",
")",
")",
"return",
"result"
] | 30 | 21.025641 |
def get_execution_engine(name):
"""Get the execution engine by name."""
manager = driver.DriverManager(
namespace='cosmic_ray.execution_engines',
name=name,
invoke_on_load=True,
on_load_failure_callback=_log_extension_loading_failure,
)
return manager.driver
|
[
"def",
"get_execution_engine",
"(",
"name",
")",
":",
"manager",
"=",
"driver",
".",
"DriverManager",
"(",
"namespace",
"=",
"'cosmic_ray.execution_engines'",
",",
"name",
"=",
"name",
",",
"invoke_on_load",
"=",
"True",
",",
"on_load_failure_callback",
"=",
"_log_extension_loading_failure",
",",
")",
"return",
"manager",
".",
"driver"
] | 29.8 | 17.1 |
def uid(self):
"""Return the user id that the process will run as
:rtype: int
"""
if not self._uid:
if self.config.daemon.user:
self._uid = pwd.getpwnam(self.config.daemon.user).pw_uid
else:
self._uid = os.getuid()
return self._uid
|
[
"def",
"uid",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_uid",
":",
"if",
"self",
".",
"config",
".",
"daemon",
".",
"user",
":",
"self",
".",
"_uid",
"=",
"pwd",
".",
"getpwnam",
"(",
"self",
".",
"config",
".",
"daemon",
".",
"user",
")",
".",
"pw_uid",
"else",
":",
"self",
".",
"_uid",
"=",
"os",
".",
"getuid",
"(",
")",
"return",
"self",
".",
"_uid"
] | 26.5 | 17.916667 |
def names(self):
"""The names referenced in this code object.
Names come from instructions like LOAD_GLOBAL or STORE_ATTR
where the name of the global or attribute is needed at runtime.
"""
# We must sort to preserve the order between calls.
# The set comprehension is to drop the duplicates.
return tuple(sorted({
instr.arg for instr in self.instrs if instr.uses_name
}))
|
[
"def",
"names",
"(",
"self",
")",
":",
"# We must sort to preserve the order between calls.",
"# The set comprehension is to drop the duplicates.",
"return",
"tuple",
"(",
"sorted",
"(",
"{",
"instr",
".",
"arg",
"for",
"instr",
"in",
"self",
".",
"instrs",
"if",
"instr",
".",
"uses_name",
"}",
")",
")"
] | 39.909091 | 20.363636 |
def list(self, request, *args, **kwargs):
"""
Available request parameters:
- ?type=type_of_statistics_objects (required. Have to be from the list: 'customer', 'project')
- ?from=timestamp (default: now - 30 days, for example: 1415910025)
- ?to=timestamp (default: now, for example: 1415912625)
- ?datapoints=how many data points have to be in answer (default: 6)
Answer will be list of datapoints(dictionaries).
Each datapoint will contain fields: 'to', 'from', 'value'.
'Value' - count of objects, that were created between 'from' and 'to' dates.
Example:
.. code-block:: javascript
[
{"to": 471970877, "from": 1, "value": 5},
{"to": 943941753, "from": 471970877, "value": 0},
{"to": 1415912629, "from": 943941753, "value": 3}
]
"""
return super(CreationTimeStatsView, self).list(request, *args, **kwargs)
|
[
"def",
"list",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"super",
"(",
"CreationTimeStatsView",
",",
"self",
")",
".",
"list",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 40.208333 | 26.541667 |
def mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False,
norm=1, dtype=np.float32):
"""Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
norm : {None, 1, np.inf} [scalar]
if 1, divide the triangular mel weights by the width of the mel band
(area normalization). Otherwise, leave all the triangles aiming for
a peak value of 1.0
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if fmax is None:
fmax = float(sr) / 2
if norm is not None and norm != 1 and norm != np.inf:
raise ParameterError('Unsupported norm: {}'.format(repr(norm)))
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=dtype)
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
mel_f = mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax, htk=htk)
fdiff = np.diff(mel_f)
ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(n_mels):
# lower and upper slopes for all bins
lower = -ramps[i] / fdiff[i]
upper = ramps[i+2] / fdiff[i+1]
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper))
if norm == 1:
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (mel_f[2:n_mels+2] - mel_f[:n_mels])
weights *= enorm[:, np.newaxis]
# Only check weights if f_mel[0] is positive
if not np.all((mel_f[:-2] == 0) | (weights.max(axis=1) > 0)):
# This means we have an empty channel somewhere
warnings.warn('Empty filters detected in mel frequency basis. '
'Some channels will produce empty responses. '
'Try increasing your sampling rate (and fmax) or '
'reducing n_mels.')
return weights
|
[
"def",
"mel",
"(",
"sr",
",",
"n_fft",
",",
"n_mels",
"=",
"128",
",",
"fmin",
"=",
"0.0",
",",
"fmax",
"=",
"None",
",",
"htk",
"=",
"False",
",",
"norm",
"=",
"1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
":",
"if",
"fmax",
"is",
"None",
":",
"fmax",
"=",
"float",
"(",
"sr",
")",
"/",
"2",
"if",
"norm",
"is",
"not",
"None",
"and",
"norm",
"!=",
"1",
"and",
"norm",
"!=",
"np",
".",
"inf",
":",
"raise",
"ParameterError",
"(",
"'Unsupported norm: {}'",
".",
"format",
"(",
"repr",
"(",
"norm",
")",
")",
")",
"# Initialize the weights",
"n_mels",
"=",
"int",
"(",
"n_mels",
")",
"weights",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_mels",
",",
"int",
"(",
"1",
"+",
"n_fft",
"//",
"2",
")",
")",
",",
"dtype",
"=",
"dtype",
")",
"# Center freqs of each FFT bin",
"fftfreqs",
"=",
"fft_frequencies",
"(",
"sr",
"=",
"sr",
",",
"n_fft",
"=",
"n_fft",
")",
"# 'Center freqs' of mel bands - uniformly spaced between limits",
"mel_f",
"=",
"mel_frequencies",
"(",
"n_mels",
"+",
"2",
",",
"fmin",
"=",
"fmin",
",",
"fmax",
"=",
"fmax",
",",
"htk",
"=",
"htk",
")",
"fdiff",
"=",
"np",
".",
"diff",
"(",
"mel_f",
")",
"ramps",
"=",
"np",
".",
"subtract",
".",
"outer",
"(",
"mel_f",
",",
"fftfreqs",
")",
"for",
"i",
"in",
"range",
"(",
"n_mels",
")",
":",
"# lower and upper slopes for all bins",
"lower",
"=",
"-",
"ramps",
"[",
"i",
"]",
"/",
"fdiff",
"[",
"i",
"]",
"upper",
"=",
"ramps",
"[",
"i",
"+",
"2",
"]",
"/",
"fdiff",
"[",
"i",
"+",
"1",
"]",
"# .. then intersect them with each other and zero",
"weights",
"[",
"i",
"]",
"=",
"np",
".",
"maximum",
"(",
"0",
",",
"np",
".",
"minimum",
"(",
"lower",
",",
"upper",
")",
")",
"if",
"norm",
"==",
"1",
":",
"# Slaney-style mel is scaled to be approx constant energy per channel",
"enorm",
"=",
"2.0",
"/",
"(",
"mel_f",
"[",
"2",
":",
"n_mels",
"+",
"2",
"]",
"-",
"mel_f",
"[",
":",
"n_mels",
"]",
")",
"weights",
"*=",
"enorm",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"# Only check weights if f_mel[0] is positive",
"if",
"not",
"np",
".",
"all",
"(",
"(",
"mel_f",
"[",
":",
"-",
"2",
"]",
"==",
"0",
")",
"|",
"(",
"weights",
".",
"max",
"(",
"axis",
"=",
"1",
")",
">",
"0",
")",
")",
":",
"# This means we have an empty channel somewhere",
"warnings",
".",
"warn",
"(",
"'Empty filters detected in mel frequency basis. '",
"'Some channels will produce empty responses. '",
"'Try increasing your sampling rate (and fmax) or '",
"'reducing n_mels.'",
")",
"return",
"weights"
] | 30.403509 | 20.508772 |
def proto_01_12_steps025(abf=exampleABF):
"""IC steps. Use to determine gain function."""
swhlab.ap.detect(abf)
standard_groupingForInj(abf,200)
for feature in ['freq','downslope']:
swhlab.ap.plot_values(abf,feature,continuous=False) #plot AP info
swhlab.plot.save(abf,tag='A_'+feature)
swhlab.plot.gain(abf) #easy way to do a gain function!
swhlab.plot.save(abf,tag='05-gain')
|
[
"def",
"proto_01_12_steps025",
"(",
"abf",
"=",
"exampleABF",
")",
":",
"swhlab",
".",
"ap",
".",
"detect",
"(",
"abf",
")",
"standard_groupingForInj",
"(",
"abf",
",",
"200",
")",
"for",
"feature",
"in",
"[",
"'freq'",
",",
"'downslope'",
"]",
":",
"swhlab",
".",
"ap",
".",
"plot_values",
"(",
"abf",
",",
"feature",
",",
"continuous",
"=",
"False",
")",
"#plot AP info",
"swhlab",
".",
"plot",
".",
"save",
"(",
"abf",
",",
"tag",
"=",
"'A_'",
"+",
"feature",
")",
"swhlab",
".",
"plot",
".",
"gain",
"(",
"abf",
")",
"#easy way to do a gain function!",
"swhlab",
".",
"plot",
".",
"save",
"(",
"abf",
",",
"tag",
"=",
"'05-gain'",
")"
] | 37.181818 | 14.363636 |
def build_key_bundle(key_conf, kid_template=""):
"""
Builds a :py:class:`oidcmsg.key_bundle.KeyBundle` instance based on a key
specification.
An example of such a specification::
keys = [
{"type": "RSA", "key": "cp_keys/key.pem", "use": ["enc", "sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"], "kid": "ec.1"},
{"type": "EC", "crv": "P-256", "use": ["enc"], "kid": "ec.2"}
]
Keys in this specification are:
type
The type of key. Presently only 'rsa' and 'ec' supported.
key
A name of a file where a key can be found. Only works with PEM encoded
RSA keys
use
What the key should be used for
crv
The elliptic curve that should be used. Only applies to elliptic curve
keys :-)
kid
Key ID, can only be used with one usage type is specified. If there
are more the one usage type specified 'kid' will just be ignored.
:param key_conf: The key configuration
:param kid_template: A template by which to build the key IDs. If no
kid_template is given then the built-in function add_kid() will be used.
:return: A KeyBundle instance
"""
kid = 0
tot_kb = KeyBundle()
for spec in key_conf:
typ = spec["type"].upper()
if typ == "RSA":
if "key" in spec:
error_to_catch = (OSError, IOError,
DeSerializationNotPossible)
try:
kb = KeyBundle(source="file://%s" % spec["key"],
fileformat="der",
keytype=typ, keyusage=spec["use"])
except error_to_catch:
kb = rsa_init(spec)
except Exception:
raise
else:
kb = rsa_init(spec)
elif typ == "EC":
kb = ec_init(spec)
else:
continue
if 'kid' in spec and len(kb) == 1:
ks = kb.keys()
ks[0].kid = spec['kid']
else:
for k in kb.keys():
if kid_template:
k.kid = kid_template % kid
kid += 1
else:
k.add_kid()
tot_kb.extend(kb.keys())
return tot_kb
|
[
"def",
"build_key_bundle",
"(",
"key_conf",
",",
"kid_template",
"=",
"\"\"",
")",
":",
"kid",
"=",
"0",
"tot_kb",
"=",
"KeyBundle",
"(",
")",
"for",
"spec",
"in",
"key_conf",
":",
"typ",
"=",
"spec",
"[",
"\"type\"",
"]",
".",
"upper",
"(",
")",
"if",
"typ",
"==",
"\"RSA\"",
":",
"if",
"\"key\"",
"in",
"spec",
":",
"error_to_catch",
"=",
"(",
"OSError",
",",
"IOError",
",",
"DeSerializationNotPossible",
")",
"try",
":",
"kb",
"=",
"KeyBundle",
"(",
"source",
"=",
"\"file://%s\"",
"%",
"spec",
"[",
"\"key\"",
"]",
",",
"fileformat",
"=",
"\"der\"",
",",
"keytype",
"=",
"typ",
",",
"keyusage",
"=",
"spec",
"[",
"\"use\"",
"]",
")",
"except",
"error_to_catch",
":",
"kb",
"=",
"rsa_init",
"(",
"spec",
")",
"except",
"Exception",
":",
"raise",
"else",
":",
"kb",
"=",
"rsa_init",
"(",
"spec",
")",
"elif",
"typ",
"==",
"\"EC\"",
":",
"kb",
"=",
"ec_init",
"(",
"spec",
")",
"else",
":",
"continue",
"if",
"'kid'",
"in",
"spec",
"and",
"len",
"(",
"kb",
")",
"==",
"1",
":",
"ks",
"=",
"kb",
".",
"keys",
"(",
")",
"ks",
"[",
"0",
"]",
".",
"kid",
"=",
"spec",
"[",
"'kid'",
"]",
"else",
":",
"for",
"k",
"in",
"kb",
".",
"keys",
"(",
")",
":",
"if",
"kid_template",
":",
"k",
".",
"kid",
"=",
"kid_template",
"%",
"kid",
"kid",
"+=",
"1",
"else",
":",
"k",
".",
"add_kid",
"(",
")",
"tot_kb",
".",
"extend",
"(",
"kb",
".",
"keys",
"(",
")",
")",
"return",
"tot_kb"
] | 29.24359 | 22.75641 |
def stop_all_tensorboards():
"""Terminate all TensorBoard instances."""
for process in Process.instances:
print("Process '%s', running %d" % (process.command[0],
process.is_running()))
if process.is_running() and process.command[0] == "tensorboard":
process.terminate()
|
[
"def",
"stop_all_tensorboards",
"(",
")",
":",
"for",
"process",
"in",
"Process",
".",
"instances",
":",
"print",
"(",
"\"Process '%s', running %d\"",
"%",
"(",
"process",
".",
"command",
"[",
"0",
"]",
",",
"process",
".",
"is_running",
"(",
")",
")",
")",
"if",
"process",
".",
"is_running",
"(",
")",
"and",
"process",
".",
"command",
"[",
"0",
"]",
"==",
"\"tensorboard\"",
":",
"process",
".",
"terminate",
"(",
")"
] | 49 | 15 |
def handle_sap(q):
question_votes = votes = Answer.objects.filter(question=q)
users = q.get_users_voted()
num_users_votes = {u.id: votes.filter(user=u).count() for u in users}
user_scale = {u.id: (1 / num_users_votes[u.id]) for u in users}
choices = []
for c in q.choice_set.all().order_by("num"):
votes = question_votes.filter(choice=c)
vote_users = set([v.user for v in votes])
choice = {
"choice": c,
"votes": {
"total": {
"all": len(vote_users),
"all_percent": perc(len(vote_users), users.count()),
"male": fmt(sum([v.user.is_male * user_scale[v.user.id] for v in votes])),
"female": fmt(sum([v.user.is_female * user_scale[v.user.id] for v in votes]))
}
},
"users": [v.user for v in votes]
}
for yr in range(9, 14):
yr_votes = [v.user if v.user.grade and v.user.grade.number == yr else None for v in votes]
yr_votes = list(filter(None, yr_votes))
choice["votes"][yr] = {
"all": len(set(yr_votes)),
"male": fmt(sum([u.is_male * user_scale[u.id] for u in yr_votes])),
"female": fmt(sum([u.is_female * user_scale[u.id] for u in yr_votes])),
}
choices.append(choice)
""" Clear vote """
votes = question_votes.filter(clear_vote=True)
clr_users = set([v.user for v in votes])
choice = {
"choice": "Clear vote",
"votes": {
"total": {
"all": len(clr_users),
"all_percent": perc(len(clr_users), users.count()),
"male": fmt(sum([v.user.is_male * user_scale[v.user.id] for v in votes])),
"female": fmt(sum([v.user.is_female * user_scale[v.user.id] for v in votes]))
}
},
"users": clr_users
}
for yr in range(9, 14):
yr_votes = [v.user if v.user.grade and v.user.grade.number == yr else None for v in votes]
yr_votes = list(filter(None, yr_votes))
choice["votes"][yr] = {
"all": len(yr_votes),
"male": fmt(sum([u.is_male * user_scale[u.id] for u in yr_votes])),
"female": fmt(sum([u.is_female * user_scale[u.id] for u in yr_votes]))
}
choices.append(choice)
choice = {
"choice": "Total",
"votes": {
"total": {
"all": users.count(),
"votes_all": question_votes.count(),
"all_percent": perc(users.count(), users.count()),
"male": users.filter(gender=True).count(),
"female": users.filter(gender__isnull=False, gender=False).count()
}
}
}
for yr in range(9, 14):
yr_votes = [u if u.grade and u.grade.number == yr else None for u in users]
yr_votes = list(filter(None, yr_votes))
choice["votes"][yr] = {
"all": len(set(yr_votes)),
"male": fmt(sum([u.is_male * user_scale[u.id] for u in yr_votes])),
"female": fmt(sum([u.is_female * user_scale[u.id] for u in yr_votes]))
}
choices.append(choice)
return {"question": q, "choices": choices, "user_scale": user_scale}
|
[
"def",
"handle_sap",
"(",
"q",
")",
":",
"question_votes",
"=",
"votes",
"=",
"Answer",
".",
"objects",
".",
"filter",
"(",
"question",
"=",
"q",
")",
"users",
"=",
"q",
".",
"get_users_voted",
"(",
")",
"num_users_votes",
"=",
"{",
"u",
".",
"id",
":",
"votes",
".",
"filter",
"(",
"user",
"=",
"u",
")",
".",
"count",
"(",
")",
"for",
"u",
"in",
"users",
"}",
"user_scale",
"=",
"{",
"u",
".",
"id",
":",
"(",
"1",
"/",
"num_users_votes",
"[",
"u",
".",
"id",
"]",
")",
"for",
"u",
"in",
"users",
"}",
"choices",
"=",
"[",
"]",
"for",
"c",
"in",
"q",
".",
"choice_set",
".",
"all",
"(",
")",
".",
"order_by",
"(",
"\"num\"",
")",
":",
"votes",
"=",
"question_votes",
".",
"filter",
"(",
"choice",
"=",
"c",
")",
"vote_users",
"=",
"set",
"(",
"[",
"v",
".",
"user",
"for",
"v",
"in",
"votes",
"]",
")",
"choice",
"=",
"{",
"\"choice\"",
":",
"c",
",",
"\"votes\"",
":",
"{",
"\"total\"",
":",
"{",
"\"all\"",
":",
"len",
"(",
"vote_users",
")",
",",
"\"all_percent\"",
":",
"perc",
"(",
"len",
"(",
"vote_users",
")",
",",
"users",
".",
"count",
"(",
")",
")",
",",
"\"male\"",
":",
"fmt",
"(",
"sum",
"(",
"[",
"v",
".",
"user",
".",
"is_male",
"*",
"user_scale",
"[",
"v",
".",
"user",
".",
"id",
"]",
"for",
"v",
"in",
"votes",
"]",
")",
")",
",",
"\"female\"",
":",
"fmt",
"(",
"sum",
"(",
"[",
"v",
".",
"user",
".",
"is_female",
"*",
"user_scale",
"[",
"v",
".",
"user",
".",
"id",
"]",
"for",
"v",
"in",
"votes",
"]",
")",
")",
"}",
"}",
",",
"\"users\"",
":",
"[",
"v",
".",
"user",
"for",
"v",
"in",
"votes",
"]",
"}",
"for",
"yr",
"in",
"range",
"(",
"9",
",",
"14",
")",
":",
"yr_votes",
"=",
"[",
"v",
".",
"user",
"if",
"v",
".",
"user",
".",
"grade",
"and",
"v",
".",
"user",
".",
"grade",
".",
"number",
"==",
"yr",
"else",
"None",
"for",
"v",
"in",
"votes",
"]",
"yr_votes",
"=",
"list",
"(",
"filter",
"(",
"None",
",",
"yr_votes",
")",
")",
"choice",
"[",
"\"votes\"",
"]",
"[",
"yr",
"]",
"=",
"{",
"\"all\"",
":",
"len",
"(",
"set",
"(",
"yr_votes",
")",
")",
",",
"\"male\"",
":",
"fmt",
"(",
"sum",
"(",
"[",
"u",
".",
"is_male",
"*",
"user_scale",
"[",
"u",
".",
"id",
"]",
"for",
"u",
"in",
"yr_votes",
"]",
")",
")",
",",
"\"female\"",
":",
"fmt",
"(",
"sum",
"(",
"[",
"u",
".",
"is_female",
"*",
"user_scale",
"[",
"u",
".",
"id",
"]",
"for",
"u",
"in",
"yr_votes",
"]",
")",
")",
",",
"}",
"choices",
".",
"append",
"(",
"choice",
")",
"votes",
"=",
"question_votes",
".",
"filter",
"(",
"clear_vote",
"=",
"True",
")",
"clr_users",
"=",
"set",
"(",
"[",
"v",
".",
"user",
"for",
"v",
"in",
"votes",
"]",
")",
"choice",
"=",
"{",
"\"choice\"",
":",
"\"Clear vote\"",
",",
"\"votes\"",
":",
"{",
"\"total\"",
":",
"{",
"\"all\"",
":",
"len",
"(",
"clr_users",
")",
",",
"\"all_percent\"",
":",
"perc",
"(",
"len",
"(",
"clr_users",
")",
",",
"users",
".",
"count",
"(",
")",
")",
",",
"\"male\"",
":",
"fmt",
"(",
"sum",
"(",
"[",
"v",
".",
"user",
".",
"is_male",
"*",
"user_scale",
"[",
"v",
".",
"user",
".",
"id",
"]",
"for",
"v",
"in",
"votes",
"]",
")",
")",
",",
"\"female\"",
":",
"fmt",
"(",
"sum",
"(",
"[",
"v",
".",
"user",
".",
"is_female",
"*",
"user_scale",
"[",
"v",
".",
"user",
".",
"id",
"]",
"for",
"v",
"in",
"votes",
"]",
")",
")",
"}",
"}",
",",
"\"users\"",
":",
"clr_users",
"}",
"for",
"yr",
"in",
"range",
"(",
"9",
",",
"14",
")",
":",
"yr_votes",
"=",
"[",
"v",
".",
"user",
"if",
"v",
".",
"user",
".",
"grade",
"and",
"v",
".",
"user",
".",
"grade",
".",
"number",
"==",
"yr",
"else",
"None",
"for",
"v",
"in",
"votes",
"]",
"yr_votes",
"=",
"list",
"(",
"filter",
"(",
"None",
",",
"yr_votes",
")",
")",
"choice",
"[",
"\"votes\"",
"]",
"[",
"yr",
"]",
"=",
"{",
"\"all\"",
":",
"len",
"(",
"yr_votes",
")",
",",
"\"male\"",
":",
"fmt",
"(",
"sum",
"(",
"[",
"u",
".",
"is_male",
"*",
"user_scale",
"[",
"u",
".",
"id",
"]",
"for",
"u",
"in",
"yr_votes",
"]",
")",
")",
",",
"\"female\"",
":",
"fmt",
"(",
"sum",
"(",
"[",
"u",
".",
"is_female",
"*",
"user_scale",
"[",
"u",
".",
"id",
"]",
"for",
"u",
"in",
"yr_votes",
"]",
")",
")",
"}",
"choices",
".",
"append",
"(",
"choice",
")",
"choice",
"=",
"{",
"\"choice\"",
":",
"\"Total\"",
",",
"\"votes\"",
":",
"{",
"\"total\"",
":",
"{",
"\"all\"",
":",
"users",
".",
"count",
"(",
")",
",",
"\"votes_all\"",
":",
"question_votes",
".",
"count",
"(",
")",
",",
"\"all_percent\"",
":",
"perc",
"(",
"users",
".",
"count",
"(",
")",
",",
"users",
".",
"count",
"(",
")",
")",
",",
"\"male\"",
":",
"users",
".",
"filter",
"(",
"gender",
"=",
"True",
")",
".",
"count",
"(",
")",
",",
"\"female\"",
":",
"users",
".",
"filter",
"(",
"gender__isnull",
"=",
"False",
",",
"gender",
"=",
"False",
")",
".",
"count",
"(",
")",
"}",
"}",
"}",
"for",
"yr",
"in",
"range",
"(",
"9",
",",
"14",
")",
":",
"yr_votes",
"=",
"[",
"u",
"if",
"u",
".",
"grade",
"and",
"u",
".",
"grade",
".",
"number",
"==",
"yr",
"else",
"None",
"for",
"u",
"in",
"users",
"]",
"yr_votes",
"=",
"list",
"(",
"filter",
"(",
"None",
",",
"yr_votes",
")",
")",
"choice",
"[",
"\"votes\"",
"]",
"[",
"yr",
"]",
"=",
"{",
"\"all\"",
":",
"len",
"(",
"set",
"(",
"yr_votes",
")",
")",
",",
"\"male\"",
":",
"fmt",
"(",
"sum",
"(",
"[",
"u",
".",
"is_male",
"*",
"user_scale",
"[",
"u",
".",
"id",
"]",
"for",
"u",
"in",
"yr_votes",
"]",
")",
")",
",",
"\"female\"",
":",
"fmt",
"(",
"sum",
"(",
"[",
"u",
".",
"is_female",
"*",
"user_scale",
"[",
"u",
".",
"id",
"]",
"for",
"u",
"in",
"yr_votes",
"]",
")",
")",
"}",
"choices",
".",
"append",
"(",
"choice",
")",
"return",
"{",
"\"question\"",
":",
"q",
",",
"\"choices\"",
":",
"choices",
",",
"\"user_scale\"",
":",
"user_scale",
"}"
] | 39.950617 | 23.82716 |
def _setup_crontab():
"""Sets up the crontab if it hasn't already been setup."""
from crontab import CronTab
#Since CI works out of a virtualenv anyway, the `ci.py` script will be
#installed in the bin already, so we can call it explicitly.
command = '/bin/bash -c "source ~/.cron_profile; workon {}; ci.py -cron"'.format(settings.venv)
user = _get_real_user()
if args["nolive"]:
vms("Skipping cron tab configuration because 'nolive' enabled.")
return
cron = CronTab(user=user)
#We need to see if the cron has already been created for this command.
existing = False
possible = cron.find_comment("pyci_cron")
if len(list(possible)) > 0:
if args["rollback"]:
vms("Removing {} from cron tab.".format(command))
cron.remove_all(command)
cron.write()
db["cron"] = False
_save_db()
else:
existing = True
if not existing and not args["rollback"]:
job = cron.new(command=command, comment="pyci_cron")
#Run the cron every minute of every hour every day.
if args["cronfreq"] == 1:
vms("New cron tab configured *minutely* for {}".format(command))
job.setall("* * * * *")
else:
vms("New cron tab configured every {} minutes for {}.".format(args["cronfreq"], command))
job.setall("*/{} * * * *".format(args["cronfreq"]))
cron.write()
db["cron"] = True
_save_db()
|
[
"def",
"_setup_crontab",
"(",
")",
":",
"from",
"crontab",
"import",
"CronTab",
"#Since CI works out of a virtualenv anyway, the `ci.py` script will be",
"#installed in the bin already, so we can call it explicitly.",
"command",
"=",
"'/bin/bash -c \"source ~/.cron_profile; workon {}; ci.py -cron\"'",
".",
"format",
"(",
"settings",
".",
"venv",
")",
"user",
"=",
"_get_real_user",
"(",
")",
"if",
"args",
"[",
"\"nolive\"",
"]",
":",
"vms",
"(",
"\"Skipping cron tab configuration because 'nolive' enabled.\"",
")",
"return",
"cron",
"=",
"CronTab",
"(",
"user",
"=",
"user",
")",
"#We need to see if the cron has already been created for this command.",
"existing",
"=",
"False",
"possible",
"=",
"cron",
".",
"find_comment",
"(",
"\"pyci_cron\"",
")",
"if",
"len",
"(",
"list",
"(",
"possible",
")",
")",
">",
"0",
":",
"if",
"args",
"[",
"\"rollback\"",
"]",
":",
"vms",
"(",
"\"Removing {} from cron tab.\"",
".",
"format",
"(",
"command",
")",
")",
"cron",
".",
"remove_all",
"(",
"command",
")",
"cron",
".",
"write",
"(",
")",
"db",
"[",
"\"cron\"",
"]",
"=",
"False",
"_save_db",
"(",
")",
"else",
":",
"existing",
"=",
"True",
"if",
"not",
"existing",
"and",
"not",
"args",
"[",
"\"rollback\"",
"]",
":",
"job",
"=",
"cron",
".",
"new",
"(",
"command",
"=",
"command",
",",
"comment",
"=",
"\"pyci_cron\"",
")",
"#Run the cron every minute of every hour every day.",
"if",
"args",
"[",
"\"cronfreq\"",
"]",
"==",
"1",
":",
"vms",
"(",
"\"New cron tab configured *minutely* for {}\"",
".",
"format",
"(",
"command",
")",
")",
"job",
".",
"setall",
"(",
"\"* * * * *\"",
")",
"else",
":",
"vms",
"(",
"\"New cron tab configured every {} minutes for {}.\"",
".",
"format",
"(",
"args",
"[",
"\"cronfreq\"",
"]",
",",
"command",
")",
")",
"job",
".",
"setall",
"(",
"\"*/{} * * * *\"",
".",
"format",
"(",
"args",
"[",
"\"cronfreq\"",
"]",
")",
")",
"cron",
".",
"write",
"(",
")",
"db",
"[",
"\"cron\"",
"]",
"=",
"True",
"_save_db",
"(",
")"
] | 40.054054 | 20.702703 |
def _already_in(self, option):
"""
Check if an option is already in the message.
:type option: Option
:param option: the option to be checked
:return: True if already present, False otherwise
"""
for opt in self._options:
if option.number == opt.number:
return True
return False
|
[
"def",
"_already_in",
"(",
"self",
",",
"option",
")",
":",
"for",
"opt",
"in",
"self",
".",
"_options",
":",
"if",
"option",
".",
"number",
"==",
"opt",
".",
"number",
":",
"return",
"True",
"return",
"False"
] | 30 | 11.833333 |
def enable_disable(self):
"""
Enable or disable this endpoint. If enabled, it will be disabled
and vice versa.
:return: None
"""
if self.enabled:
self.data['enabled'] = False
else:
self.data['enabled'] = True
self.update()
|
[
"def",
"enable_disable",
"(",
"self",
")",
":",
"if",
"self",
".",
"enabled",
":",
"self",
".",
"data",
"[",
"'enabled'",
"]",
"=",
"False",
"else",
":",
"self",
".",
"data",
"[",
"'enabled'",
"]",
"=",
"True",
"self",
".",
"update",
"(",
")"
] | 25 | 15.5 |
def _GetSubFileEntries(self):
"""Retrieves sub file entries.
Yields:
ZipFileEntry: a sub file entry.
"""
if self._directory is None:
self._directory = self._GetDirectory()
zip_file = self._file_system.GetZipFile()
if self._directory and zip_file:
for path_spec in self._directory.entries:
location = getattr(path_spec, 'location', None)
if location is None:
continue
kwargs = {}
try:
kwargs['zip_info'] = zip_file.getinfo(location[1:])
except KeyError:
kwargs['is_virtual'] = True
yield ZipFileEntry(
self._resolver_context, self._file_system, path_spec, **kwargs)
|
[
"def",
"_GetSubFileEntries",
"(",
"self",
")",
":",
"if",
"self",
".",
"_directory",
"is",
"None",
":",
"self",
".",
"_directory",
"=",
"self",
".",
"_GetDirectory",
"(",
")",
"zip_file",
"=",
"self",
".",
"_file_system",
".",
"GetZipFile",
"(",
")",
"if",
"self",
".",
"_directory",
"and",
"zip_file",
":",
"for",
"path_spec",
"in",
"self",
".",
"_directory",
".",
"entries",
":",
"location",
"=",
"getattr",
"(",
"path_spec",
",",
"'location'",
",",
"None",
")",
"if",
"location",
"is",
"None",
":",
"continue",
"kwargs",
"=",
"{",
"}",
"try",
":",
"kwargs",
"[",
"'zip_info'",
"]",
"=",
"zip_file",
".",
"getinfo",
"(",
"location",
"[",
"1",
":",
"]",
")",
"except",
"KeyError",
":",
"kwargs",
"[",
"'is_virtual'",
"]",
"=",
"True",
"yield",
"ZipFileEntry",
"(",
"self",
".",
"_resolver_context",
",",
"self",
".",
"_file_system",
",",
"path_spec",
",",
"*",
"*",
"kwargs",
")"
] | 28.208333 | 17.416667 |
def svd(a, compute_uv=True, rcond=None):
""" svd decomposition of matrix ``a`` containing |GVar|\s.
Args:
a: Two-dimensional matrix/array of numbers
and/or :class:`gvar.GVar`\s.
compute_uv (bool): It ``True`` (default), returns
tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
and ``vT @ vT.T = 1``, and ``s`` is the list of singular
values. Only ``s`` is returned if ``compute_uv=False``.
rcond (float): Singular values whose difference is smaller than
``rcond`` times their sum are assumed to be degenerate for
calculating variances for ``u`` and ``vT``.
Default (``rcond=None``) is ``max(M,N)`` times machine precision.
Returns:
Tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
and ``vT @ vT.T = 1``, and ``s`` is the list of singular
values. If ``a.shape=(N,M)``, then ``u.shape=(N,K)``
and ``vT.shape=(K,M)`` where ``K`` is the number of
nonzero singular values (``len(s)==K``).
If ``compute_uv==False`` only ``s`` is returned.
Raises:
ValueError: If matrix is not two-dimensional.
"""
a = numpy.asarray(a)
if a.dtype != object:
return numpy.linalg.svd(a, compute_uv=compute_uv)
amean = gvar.mean(a)
if amean.ndim != 2:
raise ValueError(
'matrix must have dimension 2: actual shape = ' + str(a.shape)
)
if rcond is None:
rcond = numpy.finfo(float).eps * max(a.shape)
da = a - amean
u0,s0,v0T = numpy.linalg.svd(amean, compute_uv=True, full_matrices=True)
k = min(a.shape)
s = s0 + [
u0[:, i].dot(da.dot(v0T[i, :])) for i in range(k)
]
if compute_uv:
u = numpy.array(u0, dtype=object)
vT = numpy.array(v0T, dtype=object)
# u first
daaT = da.dot(a.T) + a.dot(da.T)
s02 = numpy.zeros(daaT.shape[0], float)
s02[:len(s0)] = s0 ** 2
for j in range(s02.shape[0]):
for i in range(k):
if i == j:
continue
ds2 = s02[i] - s02[j]
if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0:
continue
u[:, i] += u0[:, j] * u0[:, j].dot(daaT.dot(u0[:, i])) / ds2
# v next
daTa = da.T.dot(a) + a.T.dot(da)
s02 = numpy.zeros(daTa.shape[0], float)
s02[:len(s0)] = s0 ** 2
for j in range(s02.shape[0]):
for i in range(k):
if i == j:
continue
ds2 = s02[i] - s02[j]
if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0:
continue
vT[i, :] += v0T[j, :] * v0T[j, :].dot(daTa.dot(v0T[i, :])) / ds2
return u[:,:k], s, vT[:k, :]
else:
return s
|
[
"def",
"svd",
"(",
"a",
",",
"compute_uv",
"=",
"True",
",",
"rcond",
"=",
"None",
")",
":",
"a",
"=",
"numpy",
".",
"asarray",
"(",
"a",
")",
"if",
"a",
".",
"dtype",
"!=",
"object",
":",
"return",
"numpy",
".",
"linalg",
".",
"svd",
"(",
"a",
",",
"compute_uv",
"=",
"compute_uv",
")",
"amean",
"=",
"gvar",
".",
"mean",
"(",
"a",
")",
"if",
"amean",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'matrix must have dimension 2: actual shape = '",
"+",
"str",
"(",
"a",
".",
"shape",
")",
")",
"if",
"rcond",
"is",
"None",
":",
"rcond",
"=",
"numpy",
".",
"finfo",
"(",
"float",
")",
".",
"eps",
"*",
"max",
"(",
"a",
".",
"shape",
")",
"da",
"=",
"a",
"-",
"amean",
"u0",
",",
"s0",
",",
"v0T",
"=",
"numpy",
".",
"linalg",
".",
"svd",
"(",
"amean",
",",
"compute_uv",
"=",
"True",
",",
"full_matrices",
"=",
"True",
")",
"k",
"=",
"min",
"(",
"a",
".",
"shape",
")",
"s",
"=",
"s0",
"+",
"[",
"u0",
"[",
":",
",",
"i",
"]",
".",
"dot",
"(",
"da",
".",
"dot",
"(",
"v0T",
"[",
"i",
",",
":",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"k",
")",
"]",
"if",
"compute_uv",
":",
"u",
"=",
"numpy",
".",
"array",
"(",
"u0",
",",
"dtype",
"=",
"object",
")",
"vT",
"=",
"numpy",
".",
"array",
"(",
"v0T",
",",
"dtype",
"=",
"object",
")",
"# u first",
"daaT",
"=",
"da",
".",
"dot",
"(",
"a",
".",
"T",
")",
"+",
"a",
".",
"dot",
"(",
"da",
".",
"T",
")",
"s02",
"=",
"numpy",
".",
"zeros",
"(",
"daaT",
".",
"shape",
"[",
"0",
"]",
",",
"float",
")",
"s02",
"[",
":",
"len",
"(",
"s0",
")",
"]",
"=",
"s0",
"**",
"2",
"for",
"j",
"in",
"range",
"(",
"s02",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"i",
"in",
"range",
"(",
"k",
")",
":",
"if",
"i",
"==",
"j",
":",
"continue",
"ds2",
"=",
"s02",
"[",
"i",
"]",
"-",
"s02",
"[",
"j",
"]",
"if",
"abs",
"(",
"ds2",
")",
"<",
"rcond",
"*",
"abs",
"(",
"s02",
"[",
"i",
"]",
"+",
"s02",
"[",
"j",
"]",
")",
"or",
"ds2",
"==",
"0",
":",
"continue",
"u",
"[",
":",
",",
"i",
"]",
"+=",
"u0",
"[",
":",
",",
"j",
"]",
"*",
"u0",
"[",
":",
",",
"j",
"]",
".",
"dot",
"(",
"daaT",
".",
"dot",
"(",
"u0",
"[",
":",
",",
"i",
"]",
")",
")",
"/",
"ds2",
"# v next",
"daTa",
"=",
"da",
".",
"T",
".",
"dot",
"(",
"a",
")",
"+",
"a",
".",
"T",
".",
"dot",
"(",
"da",
")",
"s02",
"=",
"numpy",
".",
"zeros",
"(",
"daTa",
".",
"shape",
"[",
"0",
"]",
",",
"float",
")",
"s02",
"[",
":",
"len",
"(",
"s0",
")",
"]",
"=",
"s0",
"**",
"2",
"for",
"j",
"in",
"range",
"(",
"s02",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"i",
"in",
"range",
"(",
"k",
")",
":",
"if",
"i",
"==",
"j",
":",
"continue",
"ds2",
"=",
"s02",
"[",
"i",
"]",
"-",
"s02",
"[",
"j",
"]",
"if",
"abs",
"(",
"ds2",
")",
"<",
"rcond",
"*",
"abs",
"(",
"s02",
"[",
"i",
"]",
"+",
"s02",
"[",
"j",
"]",
")",
"or",
"ds2",
"==",
"0",
":",
"continue",
"vT",
"[",
"i",
",",
":",
"]",
"+=",
"v0T",
"[",
"j",
",",
":",
"]",
"*",
"v0T",
"[",
"j",
",",
":",
"]",
".",
"dot",
"(",
"daTa",
".",
"dot",
"(",
"v0T",
"[",
"i",
",",
":",
"]",
")",
")",
"/",
"ds2",
"return",
"u",
"[",
":",
",",
":",
"k",
"]",
",",
"s",
",",
"vT",
"[",
":",
"k",
",",
":",
"]",
"else",
":",
"return",
"s"
] | 39.797297 | 18.432432 |
def convert_from_binary(self, binvalue, type, **kwargs):
"""
Convert binary data to type 'type'.
'type' must have a convert_binary function. If 'type'
supports size checking, the size function is called to ensure
that binvalue is the correct size for deserialization
"""
size = self.get_type_size(type)
if size > 0 and len(binvalue) != size:
raise ArgumentError("Could not convert type from binary since the data was not the correct size", required_size=size, actual_size=len(binvalue), type=type)
typeobj = self.get_type(type)
if not hasattr(typeobj, 'convert_binary'):
raise ArgumentError("Type does not support conversion from binary", type=type)
return typeobj.convert_binary(binvalue, **kwargs)
|
[
"def",
"convert_from_binary",
"(",
"self",
",",
"binvalue",
",",
"type",
",",
"*",
"*",
"kwargs",
")",
":",
"size",
"=",
"self",
".",
"get_type_size",
"(",
"type",
")",
"if",
"size",
">",
"0",
"and",
"len",
"(",
"binvalue",
")",
"!=",
"size",
":",
"raise",
"ArgumentError",
"(",
"\"Could not convert type from binary since the data was not the correct size\"",
",",
"required_size",
"=",
"size",
",",
"actual_size",
"=",
"len",
"(",
"binvalue",
")",
",",
"type",
"=",
"type",
")",
"typeobj",
"=",
"self",
".",
"get_type",
"(",
"type",
")",
"if",
"not",
"hasattr",
"(",
"typeobj",
",",
"'convert_binary'",
")",
":",
"raise",
"ArgumentError",
"(",
"\"Type does not support conversion from binary\"",
",",
"type",
"=",
"type",
")",
"return",
"typeobj",
".",
"convert_binary",
"(",
"binvalue",
",",
"*",
"*",
"kwargs",
")"
] | 42.052632 | 26.578947 |
def edges(inputtiles, parsenames):
"""
For a stream of [<x>, <y>, <z>] tiles, return only those tiles that are on the edge.
"""
try:
inputtiles = click.open_file(inputtiles).readlines()
except IOError:
inputtiles = [inputtiles]
# parse the input stream into an array
tiles = edge_finder.findedges(inputtiles, parsenames)
for t in tiles:
click.echo(t.tolist())
|
[
"def",
"edges",
"(",
"inputtiles",
",",
"parsenames",
")",
":",
"try",
":",
"inputtiles",
"=",
"click",
".",
"open_file",
"(",
"inputtiles",
")",
".",
"readlines",
"(",
")",
"except",
"IOError",
":",
"inputtiles",
"=",
"[",
"inputtiles",
"]",
"# parse the input stream into an array",
"tiles",
"=",
"edge_finder",
".",
"findedges",
"(",
"inputtiles",
",",
"parsenames",
")",
"for",
"t",
"in",
"tiles",
":",
"click",
".",
"echo",
"(",
"t",
".",
"tolist",
"(",
")",
")"
] | 28.857143 | 18.857143 |
def GetClientConfig(self, context, validate=True, deploy_timestamp=True):
"""Generates the client config file for inclusion in deployable binaries."""
with utils.TempDirectory() as tmp_dir:
# Make sure we write the file in yaml format.
filename = os.path.join(
tmp_dir,
config.CONFIG.Get("ClientBuilder.config_filename", context=context))
new_config = config.CONFIG.MakeNewConfig()
new_config.Initialize(reset=True, data="")
new_config.SetWriteBack(filename)
# Only copy certain sections to the client. We enumerate all
# defined options and then resolve those from the config in the
# client's context. The result is the raw option as if the
# client read our config file.
client_context = context[:]
while contexts.CLIENT_BUILD_CONTEXT in client_context:
client_context.remove(contexts.CLIENT_BUILD_CONTEXT)
for descriptor in sorted(config.CONFIG.type_infos, key=lambda x: x.name):
if descriptor.name in self.SKIP_OPTION_LIST:
continue
if descriptor.section in self.CONFIG_SECTIONS:
value = config.CONFIG.GetRaw(
descriptor.name, context=client_context, default=None)
if value is not None:
logging.debug("Copying config option to client: %s",
descriptor.name)
new_config.SetRaw(descriptor.name, value)
if deploy_timestamp:
deploy_time_string = str(rdfvalue.RDFDatetime.Now())
new_config.Set("Client.deploy_time", deploy_time_string)
new_config.Write()
if validate:
self.ValidateEndConfig(new_config)
private_validator = config.CONFIG.Get(
"ClientBuilder.private_config_validator_class", context=context)
if private_validator:
try:
validator = config_validator_base.PrivateConfigValidator.classes[
private_validator]()
except KeyError:
logging.error(
"Couldn't find config validator class %s, "
"you probably need to copy it into lib/local", private_validator)
raise
validator.ValidateEndConfig(new_config, self.context)
return io.open(filename, "r").read()
|
[
"def",
"GetClientConfig",
"(",
"self",
",",
"context",
",",
"validate",
"=",
"True",
",",
"deploy_timestamp",
"=",
"True",
")",
":",
"with",
"utils",
".",
"TempDirectory",
"(",
")",
"as",
"tmp_dir",
":",
"# Make sure we write the file in yaml format.",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"config",
".",
"CONFIG",
".",
"Get",
"(",
"\"ClientBuilder.config_filename\"",
",",
"context",
"=",
"context",
")",
")",
"new_config",
"=",
"config",
".",
"CONFIG",
".",
"MakeNewConfig",
"(",
")",
"new_config",
".",
"Initialize",
"(",
"reset",
"=",
"True",
",",
"data",
"=",
"\"\"",
")",
"new_config",
".",
"SetWriteBack",
"(",
"filename",
")",
"# Only copy certain sections to the client. We enumerate all",
"# defined options and then resolve those from the config in the",
"# client's context. The result is the raw option as if the",
"# client read our config file.",
"client_context",
"=",
"context",
"[",
":",
"]",
"while",
"contexts",
".",
"CLIENT_BUILD_CONTEXT",
"in",
"client_context",
":",
"client_context",
".",
"remove",
"(",
"contexts",
".",
"CLIENT_BUILD_CONTEXT",
")",
"for",
"descriptor",
"in",
"sorted",
"(",
"config",
".",
"CONFIG",
".",
"type_infos",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"name",
")",
":",
"if",
"descriptor",
".",
"name",
"in",
"self",
".",
"SKIP_OPTION_LIST",
":",
"continue",
"if",
"descriptor",
".",
"section",
"in",
"self",
".",
"CONFIG_SECTIONS",
":",
"value",
"=",
"config",
".",
"CONFIG",
".",
"GetRaw",
"(",
"descriptor",
".",
"name",
",",
"context",
"=",
"client_context",
",",
"default",
"=",
"None",
")",
"if",
"value",
"is",
"not",
"None",
":",
"logging",
".",
"debug",
"(",
"\"Copying config option to client: %s\"",
",",
"descriptor",
".",
"name",
")",
"new_config",
".",
"SetRaw",
"(",
"descriptor",
".",
"name",
",",
"value",
")",
"if",
"deploy_timestamp",
":",
"deploy_time_string",
"=",
"str",
"(",
"rdfvalue",
".",
"RDFDatetime",
".",
"Now",
"(",
")",
")",
"new_config",
".",
"Set",
"(",
"\"Client.deploy_time\"",
",",
"deploy_time_string",
")",
"new_config",
".",
"Write",
"(",
")",
"if",
"validate",
":",
"self",
".",
"ValidateEndConfig",
"(",
"new_config",
")",
"private_validator",
"=",
"config",
".",
"CONFIG",
".",
"Get",
"(",
"\"ClientBuilder.private_config_validator_class\"",
",",
"context",
"=",
"context",
")",
"if",
"private_validator",
":",
"try",
":",
"validator",
"=",
"config_validator_base",
".",
"PrivateConfigValidator",
".",
"classes",
"[",
"private_validator",
"]",
"(",
")",
"except",
"KeyError",
":",
"logging",
".",
"error",
"(",
"\"Couldn't find config validator class %s, \"",
"\"you probably need to copy it into lib/local\"",
",",
"private_validator",
")",
"raise",
"validator",
".",
"ValidateEndConfig",
"(",
"new_config",
",",
"self",
".",
"context",
")",
"return",
"io",
".",
"open",
"(",
"filename",
",",
"\"r\"",
")",
".",
"read",
"(",
")"
] | 39.945455 | 20.745455 |
def set_profiling_level(self, level, slow_ms=None, session=None):
"""Set the database's profiling level.
:Parameters:
- `level`: Specifies a profiling level, see list of possible values
below.
- `slow_ms`: Optionally modify the threshold for the profile to
consider a query or operation. Even if the profiler is off queries
slower than the `slow_ms` level will get written to the logs.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
Possible `level` values:
+----------------------------+------------------------------------+
| Level | Setting |
+============================+====================================+
| :data:`~pymongo.OFF` | Off. No profiling. |
+----------------------------+------------------------------------+
| :data:`~pymongo.SLOW_ONLY` | On. Only includes slow operations. |
+----------------------------+------------------------------------+
| :data:`~pymongo.ALL` | On. Includes all operations. |
+----------------------------+------------------------------------+
Raises :class:`ValueError` if level is not one of
(:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`,
:data:`~pymongo.ALL`).
.. versionchanged:: 3.6
Added ``session`` parameter.
.. mongodoc:: profiling
"""
if not isinstance(level, int) or level < 0 or level > 2:
raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)")
if slow_ms is not None and not isinstance(slow_ms, int):
raise TypeError("slow_ms must be an integer")
if slow_ms is not None:
self.command("profile", level, slowms=slow_ms, session=session)
else:
self.command("profile", level, session=session)
|
[
"def",
"set_profiling_level",
"(",
"self",
",",
"level",
",",
"slow_ms",
"=",
"None",
",",
"session",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"level",
",",
"int",
")",
"or",
"level",
"<",
"0",
"or",
"level",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"level must be one of (OFF, SLOW_ONLY, ALL)\"",
")",
"if",
"slow_ms",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"slow_ms",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"slow_ms must be an integer\"",
")",
"if",
"slow_ms",
"is",
"not",
"None",
":",
"self",
".",
"command",
"(",
"\"profile\"",
",",
"level",
",",
"slowms",
"=",
"slow_ms",
",",
"session",
"=",
"session",
")",
"else",
":",
"self",
".",
"command",
"(",
"\"profile\"",
",",
"level",
",",
"session",
"=",
"session",
")"
] | 45.255814 | 26.255814 |
def garbage_cycle(index):
"""Get reference cycle details."""
graph = _compute_garbage_graphs()[int(index)]
graph.reduce_to_cycles()
objects = graph.metadata
objects.sort(key=lambda x: -x.size)
return dict(objects=objects, index=index)
|
[
"def",
"garbage_cycle",
"(",
"index",
")",
":",
"graph",
"=",
"_compute_garbage_graphs",
"(",
")",
"[",
"int",
"(",
"index",
")",
"]",
"graph",
".",
"reduce_to_cycles",
"(",
")",
"objects",
"=",
"graph",
".",
"metadata",
"objects",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"-",
"x",
".",
"size",
")",
"return",
"dict",
"(",
"objects",
"=",
"objects",
",",
"index",
"=",
"index",
")"
] | 36 | 7.714286 |
def parse_afterqc_log(self, f):
""" Parse the JSON output from AfterQC and save the summary statistics """
try:
parsed_json = json.load(f['f'])
except:
log.warn("Could not parse AfterQC JSON: '{}'".format(f['fn']))
return None
# AfterQC changed the name of their summary key at some point
if 'summary' in parsed_json:
summaryk = 'summary'
elif 'afterqc_main_summary' in parsed_json:
summaryk = 'afterqc_main_summary'
else:
log.warn("AfterQC JSON did not have a 'summary' or 'afterqc_main_summary' key, skipping: '{}'".format(f['fn']))
return None
s_name = f['s_name']
self.add_data_source(f, s_name)
self.afterqc_data[s_name] = {}
for k in parsed_json[summaryk]:
try:
self.afterqc_data[s_name][k] = float(parsed_json[summaryk][k])
except ValueError:
self.afterqc_data[s_name][k] = parsed_json[summaryk][k]
try:
self.afterqc_data[s_name]['pct_good_bases'] = (self.afterqc_data[s_name]['good_bases'] / self.afterqc_data[s_name]['total_bases']) * 100.0
except KeyError:
pass
|
[
"def",
"parse_afterqc_log",
"(",
"self",
",",
"f",
")",
":",
"try",
":",
"parsed_json",
"=",
"json",
".",
"load",
"(",
"f",
"[",
"'f'",
"]",
")",
"except",
":",
"log",
".",
"warn",
"(",
"\"Could not parse AfterQC JSON: '{}'\"",
".",
"format",
"(",
"f",
"[",
"'fn'",
"]",
")",
")",
"return",
"None",
"# AfterQC changed the name of their summary key at some point",
"if",
"'summary'",
"in",
"parsed_json",
":",
"summaryk",
"=",
"'summary'",
"elif",
"'afterqc_main_summary'",
"in",
"parsed_json",
":",
"summaryk",
"=",
"'afterqc_main_summary'",
"else",
":",
"log",
".",
"warn",
"(",
"\"AfterQC JSON did not have a 'summary' or 'afterqc_main_summary' key, skipping: '{}'\"",
".",
"format",
"(",
"f",
"[",
"'fn'",
"]",
")",
")",
"return",
"None",
"s_name",
"=",
"f",
"[",
"'s_name'",
"]",
"self",
".",
"add_data_source",
"(",
"f",
",",
"s_name",
")",
"self",
".",
"afterqc_data",
"[",
"s_name",
"]",
"=",
"{",
"}",
"for",
"k",
"in",
"parsed_json",
"[",
"summaryk",
"]",
":",
"try",
":",
"self",
".",
"afterqc_data",
"[",
"s_name",
"]",
"[",
"k",
"]",
"=",
"float",
"(",
"parsed_json",
"[",
"summaryk",
"]",
"[",
"k",
"]",
")",
"except",
"ValueError",
":",
"self",
".",
"afterqc_data",
"[",
"s_name",
"]",
"[",
"k",
"]",
"=",
"parsed_json",
"[",
"summaryk",
"]",
"[",
"k",
"]",
"try",
":",
"self",
".",
"afterqc_data",
"[",
"s_name",
"]",
"[",
"'pct_good_bases'",
"]",
"=",
"(",
"self",
".",
"afterqc_data",
"[",
"s_name",
"]",
"[",
"'good_bases'",
"]",
"/",
"self",
".",
"afterqc_data",
"[",
"s_name",
"]",
"[",
"'total_bases'",
"]",
")",
"*",
"100.0",
"except",
"KeyError",
":",
"pass"
] | 41.827586 | 23.344828 |
def orthorhombic(a: float, b: float, c: float):
"""
Convenience constructor for an orthorhombic lattice.
Args:
a (float): *a* lattice parameter of the orthorhombic cell.
b (float): *b* lattice parameter of the orthorhombic cell.
c (float): *c* lattice parameter of the orthorhombic cell.
Returns:
Orthorhombic lattice of dimensions a x b x c.
"""
return Lattice.from_parameters(a, b, c, 90, 90, 90)
|
[
"def",
"orthorhombic",
"(",
"a",
":",
"float",
",",
"b",
":",
"float",
",",
"c",
":",
"float",
")",
":",
"return",
"Lattice",
".",
"from_parameters",
"(",
"a",
",",
"b",
",",
"c",
",",
"90",
",",
"90",
",",
"90",
")"
] | 37.230769 | 21.846154 |
def download(self, local_port_path, key_names): # pragma: no cover
"""
download all files from a users account location
:param local_port_path: the local path where the data is to download to
:param key_name: can start with self.prefix or taken as relative to prefix.
Example:
local_port_path = /home/user/myworkflow/input_images/ (sync all data in this folder)
s3_folder = myworkflow/input_images/ (location on s3 that will be synced to local path)
"""
if not os.path.isdir(local_port_path):
raise ValueError("Download path does not exist: %s" % local_port_path)
if not isinstance(key_names, list):
key_names = [key_names]
for key_name in key_names:
is_folder = key_name.endswith('/')
# strip leading and trailing slashes
key_name = key_name.lstrip('/').rstrip('/')
key_parts = key_name.split('/')
# Key names from the list function will include the account prefix
# and any folder namespace.
if key_parts[0] == self.prefix:
path = os.path.join(local_port_path, *key_parts[1:])
if not is_folder:
folder_path = os.path.join(local_port_path, *key_parts[1:-1])
get_key_name = key_name
else:
path = os.path.join(local_port_path, *key_parts)
if not is_folder:
folder_path = os.path.join(local_port_path, *key_parts[:-1])
get_key_name = '%s/%s' % (self.prefix, key_name)
if is_folder and not os.path.isdir(path):
# A directory that doesn't exist
os.makedirs(path)
else:
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
# Assume it is a file
self.__download_file(path, get_key_name)
|
[
"def",
"download",
"(",
"self",
",",
"local_port_path",
",",
"key_names",
")",
":",
"# pragma: no cover",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"local_port_path",
")",
":",
"raise",
"ValueError",
"(",
"\"Download path does not exist: %s\"",
"%",
"local_port_path",
")",
"if",
"not",
"isinstance",
"(",
"key_names",
",",
"list",
")",
":",
"key_names",
"=",
"[",
"key_names",
"]",
"for",
"key_name",
"in",
"key_names",
":",
"is_folder",
"=",
"key_name",
".",
"endswith",
"(",
"'/'",
")",
"# strip leading and trailing slashes",
"key_name",
"=",
"key_name",
".",
"lstrip",
"(",
"'/'",
")",
".",
"rstrip",
"(",
"'/'",
")",
"key_parts",
"=",
"key_name",
".",
"split",
"(",
"'/'",
")",
"# Key names from the list function will include the account prefix",
"# and any folder namespace.",
"if",
"key_parts",
"[",
"0",
"]",
"==",
"self",
".",
"prefix",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"local_port_path",
",",
"*",
"key_parts",
"[",
"1",
":",
"]",
")",
"if",
"not",
"is_folder",
":",
"folder_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"local_port_path",
",",
"*",
"key_parts",
"[",
"1",
":",
"-",
"1",
"]",
")",
"get_key_name",
"=",
"key_name",
"else",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"local_port_path",
",",
"*",
"key_parts",
")",
"if",
"not",
"is_folder",
":",
"folder_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"local_port_path",
",",
"*",
"key_parts",
"[",
":",
"-",
"1",
"]",
")",
"get_key_name",
"=",
"'%s/%s'",
"%",
"(",
"self",
".",
"prefix",
",",
"key_name",
")",
"if",
"is_folder",
"and",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"# A directory that doesn't exist",
"os",
".",
"makedirs",
"(",
"path",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"folder_path",
")",
":",
"os",
".",
"makedirs",
"(",
"folder_path",
")",
"# Assume it is a file",
"self",
".",
"__download_file",
"(",
"path",
",",
"get_key_name",
")"
] | 42.822222 | 21.311111 |
def tracebacks_from_file(fileobj, reverse=False):
"""Generator that yields tracebacks found in a file object
With reverse=True, searches backwards from the end of the file.
"""
if reverse:
lines = deque()
for line in BackwardsReader(fileobj):
lines.appendleft(line)
if tb_head in line:
yield next(tracebacks_from_lines(lines))
lines.clear()
else:
for traceback in tracebacks_from_lines(fileobj):
yield traceback
|
[
"def",
"tracebacks_from_file",
"(",
"fileobj",
",",
"reverse",
"=",
"False",
")",
":",
"if",
"reverse",
":",
"lines",
"=",
"deque",
"(",
")",
"for",
"line",
"in",
"BackwardsReader",
"(",
"fileobj",
")",
":",
"lines",
".",
"appendleft",
"(",
"line",
")",
"if",
"tb_head",
"in",
"line",
":",
"yield",
"next",
"(",
"tracebacks_from_lines",
"(",
"lines",
")",
")",
"lines",
".",
"clear",
"(",
")",
"else",
":",
"for",
"traceback",
"in",
"tracebacks_from_lines",
"(",
"fileobj",
")",
":",
"yield",
"traceback"
] | 30 | 17.941176 |
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
|
[
"def",
"add_license",
"(",
"service_instance",
",",
"key",
",",
"description",
",",
"license_manager",
"=",
"None",
")",
":",
"if",
"not",
"license_manager",
":",
"license_manager",
"=",
"get_license_manager",
"(",
"service_instance",
")",
"label",
"=",
"vim",
".",
"KeyValue",
"(",
")",
"label",
".",
"key",
"=",
"'VpxClientLicenseLabel'",
"label",
".",
"value",
"=",
"description",
"log",
".",
"debug",
"(",
"'Adding license \\'%s\\''",
",",
"description",
")",
"try",
":",
"vmware_license",
"=",
"license_manager",
".",
"AddLicense",
"(",
"key",
",",
"[",
"label",
"]",
")",
"except",
"vim",
".",
"fault",
".",
"NoPermission",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"'Not enough permissions. Required privilege: '",
"'{0}'",
".",
"format",
"(",
"exc",
".",
"privilegeId",
")",
")",
"except",
"vim",
".",
"fault",
".",
"VimFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareApiError",
"(",
"exc",
".",
"msg",
")",
"except",
"vmodl",
".",
"RuntimeFault",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"exc",
")",
"raise",
"salt",
".",
"exceptions",
".",
"VMwareRuntimeError",
"(",
"exc",
".",
"msg",
")",
"return",
"vmware_license"
] | 31.702703 | 18.72973 |
def local_attention1d_spatial_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local1D spatial layers."""
batch_dim, length_dim, model_dim = x.shape.dims
blocks_w_dim = mtf.Dimension("blocksw", hparams.block_length)
num_w_blocks_dim = mtf.Dimension("num_wblocks",
length_dim.size // blocks_w_dim.size)
x = mtf.reshape(
x, mtf.Shape([batch_dim, num_w_blocks_dim, blocks_w_dim, model_dim]))
# [ self attention - ffn - residual + dropout] x n
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
x += layer_prepostprocess_dropout(
mtf.layers.local_self_attention_spatial_blocks(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
memory_w_dim=blocks_w_dim,
mask_right=True,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output
|
[
"def",
"local_attention1d_spatial_decoder",
"(",
"x",
",",
"kv_dim",
",",
"heads_dim",
",",
"feedforward_dim",
",",
"hparams",
")",
":",
"batch_dim",
",",
"length_dim",
",",
"model_dim",
"=",
"x",
".",
"shape",
".",
"dims",
"blocks_w_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"\"blocksw\"",
",",
"hparams",
".",
"block_length",
")",
"num_w_blocks_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"\"num_wblocks\"",
",",
"length_dim",
".",
"size",
"//",
"blocks_w_dim",
".",
"size",
")",
"x",
"=",
"mtf",
".",
"reshape",
"(",
"x",
",",
"mtf",
".",
"Shape",
"(",
"[",
"batch_dim",
",",
"num_w_blocks_dim",
",",
"blocks_w_dim",
",",
"model_dim",
"]",
")",
")",
"# [ self attention - ffn - residual + dropout] x n",
"for",
"layer",
"in",
"range",
"(",
"hparams",
".",
"num_decoder_layers",
")",
":",
"layer_name",
"=",
"\"decoder_layer_%d\"",
"%",
"layer",
"with",
"tf",
".",
"variable_scope",
"(",
"layer_name",
")",
":",
"# Self attention layer",
"x",
"+=",
"layer_prepostprocess_dropout",
"(",
"mtf",
".",
"layers",
".",
"local_self_attention_spatial_blocks",
"(",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"layer_norm_att\"",
")",
",",
"kv_dim",
",",
"heads_dim",
",",
"memory_w_dim",
"=",
"blocks_w_dim",
",",
"mask_right",
"=",
"True",
",",
"name",
"=",
"\"self_att\"",
")",
",",
"hparams",
")",
"# ffn layer",
"x",
"+=",
"layer_prepostprocess_dropout",
"(",
"mtf",
".",
"layers",
".",
"dense_relu_dense",
"(",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"layer_norm_ffn\"",
")",
",",
"feedforward_dim",
",",
"hparams",
".",
"dropout",
",",
"dropout_broadcast_dims",
"=",
"[",
"length_dim",
"]",
")",
",",
"hparams",
")",
"output",
"=",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"final_layer_norm\"",
")",
"return",
"output"
] | 44.40625 | 15.59375 |
def psd(data, dt, ndivide=1, window=hanning, overlap_half=False):
"""Calculate power spectrum density of data.
Args:
data (np.ndarray): Input data.
dt (float): Time between each data.
ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them).
ax (matplotlib.axes): Axis you want to plot on.
doplot (bool): Plot how averaging works.
overlap_half (bool): Split data to half-overlapped regions.
Returns:
vk (np.ndarray): Frequency.
psd (np.ndarray): PSD
"""
logger = getLogger('decode.utils.ndarray.psd')
if overlap_half:
step = int(len(data) / (ndivide + 1))
size = step * 2
else:
step = int(len(data) / ndivide)
size = step
if bin(len(data)).count('1') != 1:
logger.warning('warning: length of data is not power of 2: {}'.format(len(data)))
size = int(len(data) / ndivide)
if bin(size).count('1') != 1.:
if overlap_half:
logger.warning('warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'.format(size))
else:
logger.warning('warning: (length of data) / ndivide is not power of 2: {}'.format(size))
psd = np.zeros(size)
T = (size - 1) * dt
vs = 1 / dt
vk_ = fftfreq(size, dt)
vk = vk_[np.where(vk_ >= 0)]
for i in range(ndivide):
d = data[i * step:i * step + size]
if window is None:
w = np.ones(size)
corr = 1.0
else:
w = window(size)
corr = np.mean(w**2)
psd = psd + 2 * (np.abs(fft(d * w)))**2 / size * dt / corr
return vk, psd[:len(vk)] / ndivide
|
[
"def",
"psd",
"(",
"data",
",",
"dt",
",",
"ndivide",
"=",
"1",
",",
"window",
"=",
"hanning",
",",
"overlap_half",
"=",
"False",
")",
":",
"logger",
"=",
"getLogger",
"(",
"'decode.utils.ndarray.psd'",
")",
"if",
"overlap_half",
":",
"step",
"=",
"int",
"(",
"len",
"(",
"data",
")",
"/",
"(",
"ndivide",
"+",
"1",
")",
")",
"size",
"=",
"step",
"*",
"2",
"else",
":",
"step",
"=",
"int",
"(",
"len",
"(",
"data",
")",
"/",
"ndivide",
")",
"size",
"=",
"step",
"if",
"bin",
"(",
"len",
"(",
"data",
")",
")",
".",
"count",
"(",
"'1'",
")",
"!=",
"1",
":",
"logger",
".",
"warning",
"(",
"'warning: length of data is not power of 2: {}'",
".",
"format",
"(",
"len",
"(",
"data",
")",
")",
")",
"size",
"=",
"int",
"(",
"len",
"(",
"data",
")",
"/",
"ndivide",
")",
"if",
"bin",
"(",
"size",
")",
".",
"count",
"(",
"'1'",
")",
"!=",
"1.",
":",
"if",
"overlap_half",
":",
"logger",
".",
"warning",
"(",
"'warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'",
".",
"format",
"(",
"size",
")",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'warning: (length of data) / ndivide is not power of 2: {}'",
".",
"format",
"(",
"size",
")",
")",
"psd",
"=",
"np",
".",
"zeros",
"(",
"size",
")",
"T",
"=",
"(",
"size",
"-",
"1",
")",
"*",
"dt",
"vs",
"=",
"1",
"/",
"dt",
"vk_",
"=",
"fftfreq",
"(",
"size",
",",
"dt",
")",
"vk",
"=",
"vk_",
"[",
"np",
".",
"where",
"(",
"vk_",
">=",
"0",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"ndivide",
")",
":",
"d",
"=",
"data",
"[",
"i",
"*",
"step",
":",
"i",
"*",
"step",
"+",
"size",
"]",
"if",
"window",
"is",
"None",
":",
"w",
"=",
"np",
".",
"ones",
"(",
"size",
")",
"corr",
"=",
"1.0",
"else",
":",
"w",
"=",
"window",
"(",
"size",
")",
"corr",
"=",
"np",
".",
"mean",
"(",
"w",
"**",
"2",
")",
"psd",
"=",
"psd",
"+",
"2",
"*",
"(",
"np",
".",
"abs",
"(",
"fft",
"(",
"d",
"*",
"w",
")",
")",
")",
"**",
"2",
"/",
"size",
"*",
"dt",
"/",
"corr",
"return",
"vk",
",",
"psd",
"[",
":",
"len",
"(",
"vk",
")",
"]",
"/",
"ndivide"
] | 33.795918 | 20.265306 |
def set_conditions(self, variables, constraints):
"""Problem provided data.
variables = {variable-name: list-of-domain-values}
constraints = [(constraint_function, variable-names, default-variable-values)]
"""
self._vars, self._constraints = variables, []
# build constraint objects
for func, variables, values in constraints:
c = Constraint(func, variables, values, self._compute_search_spaces(variables))
self._constraints.append(c)
# sort into most constraining first
self._constraints.sort()
|
[
"def",
"set_conditions",
"(",
"self",
",",
"variables",
",",
"constraints",
")",
":",
"self",
".",
"_vars",
",",
"self",
".",
"_constraints",
"=",
"variables",
",",
"[",
"]",
"# build constraint objects",
"for",
"func",
",",
"variables",
",",
"values",
"in",
"constraints",
":",
"c",
"=",
"Constraint",
"(",
"func",
",",
"variables",
",",
"values",
",",
"self",
".",
"_compute_search_spaces",
"(",
"variables",
")",
")",
"self",
".",
"_constraints",
".",
"append",
"(",
"c",
")",
"# sort into most constraining first",
"self",
".",
"_constraints",
".",
"sort",
"(",
")"
] | 44.615385 | 15.846154 |
def _collapse_edge_passing_predicates(graph: BELGraph, edge_predicates: EdgePredicates = None) -> None:
"""Collapse all edges passing the given edge predicates."""
for u, v, _ in filter_edges(graph, edge_predicates=edge_predicates):
collapse_pair(graph, survivor=u, victim=v)
|
[
"def",
"_collapse_edge_passing_predicates",
"(",
"graph",
":",
"BELGraph",
",",
"edge_predicates",
":",
"EdgePredicates",
"=",
"None",
")",
"->",
"None",
":",
"for",
"u",
",",
"v",
",",
"_",
"in",
"filter_edges",
"(",
"graph",
",",
"edge_predicates",
"=",
"edge_predicates",
")",
":",
"collapse_pair",
"(",
"graph",
",",
"survivor",
"=",
"u",
",",
"victim",
"=",
"v",
")"
] | 72 | 26.25 |
def is_in_intervall(value, min_value, max_value, name='variable'):
"""
Raise an exception if value is not in an interval.
Parameters
----------
value : orderable
min_value : orderable
max_value : orderable
name : str
Name of the variable to print in exception.
"""
if not (min_value <= value <= max_value):
raise ValueError('{}={} is not in [{}, {}]'
.format(name, value, min_value, max_value))
|
[
"def",
"is_in_intervall",
"(",
"value",
",",
"min_value",
",",
"max_value",
",",
"name",
"=",
"'variable'",
")",
":",
"if",
"not",
"(",
"min_value",
"<=",
"value",
"<=",
"max_value",
")",
":",
"raise",
"ValueError",
"(",
"'{}={} is not in [{}, {}]'",
".",
"format",
"(",
"name",
",",
"value",
",",
"min_value",
",",
"max_value",
")",
")"
] | 30.8 | 17.466667 |
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
|
[
"def",
"which_roles_can",
"(",
"self",
",",
"name",
")",
":",
"targetPermissionRecords",
"=",
"AuthPermission",
".",
"objects",
"(",
"creator",
"=",
"self",
".",
"client",
",",
"name",
"=",
"name",
")",
".",
"first",
"(",
")",
"return",
"[",
"{",
"'role'",
":",
"group",
".",
"role",
"}",
"for",
"group",
"in",
"targetPermissionRecords",
".",
"groups",
"]"
] | 62 | 26.25 |
def check_label_shape(self, label_shape):
"""Checks if the new label shape is valid"""
if not len(label_shape) == 2:
raise ValueError('label_shape should have length 2')
if label_shape[0] < self.label_shape[0]:
msg = 'Attempts to reduce label count from %d to %d, not allowed.' \
% (self.label_shape[0], label_shape[0])
raise ValueError(msg)
if label_shape[1] != self.provide_label[0][1][2]:
msg = 'label_shape object width inconsistent: %d vs %d.' \
% (self.provide_label[0][1][2], label_shape[1])
raise ValueError(msg)
|
[
"def",
"check_label_shape",
"(",
"self",
",",
"label_shape",
")",
":",
"if",
"not",
"len",
"(",
"label_shape",
")",
"==",
"2",
":",
"raise",
"ValueError",
"(",
"'label_shape should have length 2'",
")",
"if",
"label_shape",
"[",
"0",
"]",
"<",
"self",
".",
"label_shape",
"[",
"0",
"]",
":",
"msg",
"=",
"'Attempts to reduce label count from %d to %d, not allowed.'",
"%",
"(",
"self",
".",
"label_shape",
"[",
"0",
"]",
",",
"label_shape",
"[",
"0",
"]",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"label_shape",
"[",
"1",
"]",
"!=",
"self",
".",
"provide_label",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"2",
"]",
":",
"msg",
"=",
"'label_shape object width inconsistent: %d vs %d.'",
"%",
"(",
"self",
".",
"provide_label",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"2",
"]",
",",
"label_shape",
"[",
"1",
"]",
")",
"raise",
"ValueError",
"(",
"msg",
")"
] | 52.75 | 14.583333 |
def read_and_redirect(request, notification_id):
"""
Marks the supplied notification as read and then redirects
to the supplied URL from the ``next`` URL parameter.
**IMPORTANT**: This is CSRF - unsafe method.
Only use it if its okay for you to mark notifications \
as read without a robust check.
:param request: HTTP request context.
:param notification_id: ID of the notification to be marked a read.
:returns: Redirect response to a valid target url.
"""
notification_page = reverse('notifications:all')
next_page = request.GET.get('next', notification_page)
if is_safe_url(next_page):
target = next_page
else:
target = notification_page
try:
user_nf = request.user.notifications.get(pk=notification_id)
if not user_nf.read:
user_nf.mark_as_read()
except Notification.DoesNotExist:
pass
return HttpResponseRedirect(target)
|
[
"def",
"read_and_redirect",
"(",
"request",
",",
"notification_id",
")",
":",
"notification_page",
"=",
"reverse",
"(",
"'notifications:all'",
")",
"next_page",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'next'",
",",
"notification_page",
")",
"if",
"is_safe_url",
"(",
"next_page",
")",
":",
"target",
"=",
"next_page",
"else",
":",
"target",
"=",
"notification_page",
"try",
":",
"user_nf",
"=",
"request",
".",
"user",
".",
"notifications",
".",
"get",
"(",
"pk",
"=",
"notification_id",
")",
"if",
"not",
"user_nf",
".",
"read",
":",
"user_nf",
".",
"mark_as_read",
"(",
")",
"except",
"Notification",
".",
"DoesNotExist",
":",
"pass",
"return",
"HttpResponseRedirect",
"(",
"target",
")"
] | 31.827586 | 18.103448 |
def get_query(query_name):
"""Find file matching query_name, read and return query object
"""
query_file_match = list(
filter(lambda i: query_name == i.stem, FLAT_QUERIES))
if not query_file_match:
return None
# TODO: Log warning if more than one match
query_file = query_file_match[0]
with open(query_file) as f:
metadata, query_body = frontmatter.parse(f.read())
result_mod = query_file.suffix.strip('.')
query_obj = SimpleNamespace(
name=query_name,
metadata=metadata,
path=query_file,
result_mod=result_mod,
body=query_body,
error=False,
executed=datetime.utcnow().isoformat())
return query_obj
|
[
"def",
"get_query",
"(",
"query_name",
")",
":",
"query_file_match",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"i",
":",
"query_name",
"==",
"i",
".",
"stem",
",",
"FLAT_QUERIES",
")",
")",
"if",
"not",
"query_file_match",
":",
"return",
"None",
"# TODO: Log warning if more than one match",
"query_file",
"=",
"query_file_match",
"[",
"0",
"]",
"with",
"open",
"(",
"query_file",
")",
"as",
"f",
":",
"metadata",
",",
"query_body",
"=",
"frontmatter",
".",
"parse",
"(",
"f",
".",
"read",
"(",
")",
")",
"result_mod",
"=",
"query_file",
".",
"suffix",
".",
"strip",
"(",
"'.'",
")",
"query_obj",
"=",
"SimpleNamespace",
"(",
"name",
"=",
"query_name",
",",
"metadata",
"=",
"metadata",
",",
"path",
"=",
"query_file",
",",
"result_mod",
"=",
"result_mod",
",",
"body",
"=",
"query_body",
",",
"error",
"=",
"False",
",",
"executed",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"isoformat",
"(",
")",
")",
"return",
"query_obj"
] | 33.238095 | 11.857143 |
def jenkins(self):
"""Generate jenkins job details."""
job_name = self.format['jenkins_job_name'].format(**self.data)
job = {'name': job_name}
return job
|
[
"def",
"jenkins",
"(",
"self",
")",
":",
"job_name",
"=",
"self",
".",
"format",
"[",
"'jenkins_job_name'",
"]",
".",
"format",
"(",
"*",
"*",
"self",
".",
"data",
")",
"job",
"=",
"{",
"'name'",
":",
"job_name",
"}",
"return",
"job"
] | 30.166667 | 20.333333 |
def _get_elements(complex_type, root):
"""Get attribute elements
"""
found_elements = []
element = findall(root, '{%s}complexType' % XS_NAMESPACE,
attribute_name='name', attribute_value=complex_type)[0]
found_elements = findall(element, '{%s}element' % XS_NAMESPACE)
return found_elements
|
[
"def",
"_get_elements",
"(",
"complex_type",
",",
"root",
")",
":",
"found_elements",
"=",
"[",
"]",
"element",
"=",
"findall",
"(",
"root",
",",
"'{%s}complexType'",
"%",
"XS_NAMESPACE",
",",
"attribute_name",
"=",
"'name'",
",",
"attribute_value",
"=",
"complex_type",
")",
"[",
"0",
"]",
"found_elements",
"=",
"findall",
"(",
"element",
",",
"'{%s}element'",
"%",
"XS_NAMESPACE",
")",
"return",
"found_elements"
] | 32.8 | 20 |
def data_filler_user_agent(self, number_of_rows, db):
'''creates and fills the table with user agent data
'''
try:
user_agent = db
data_list = list()
for i in range(0, number_of_rows):
post_uo_reg = {
"id": rnd_id_generator(self),
"ip": self.faker.ipv4(),
"countrycode": self.faker.country_code(),
"useragent": self.faker.user_agent()
}
user_agent.save(post_uo_reg)
logger.warning(
'user_agent Commits are successful after write job!',
extra=d)
except Exception as e:
logger.error(e, extra=d)
|
[
"def",
"data_filler_user_agent",
"(",
"self",
",",
"number_of_rows",
",",
"db",
")",
":",
"try",
":",
"user_agent",
"=",
"db",
"data_list",
"=",
"list",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"number_of_rows",
")",
":",
"post_uo_reg",
"=",
"{",
"\"id\"",
":",
"rnd_id_generator",
"(",
"self",
")",
",",
"\"ip\"",
":",
"self",
".",
"faker",
".",
"ipv4",
"(",
")",
",",
"\"countrycode\"",
":",
"self",
".",
"faker",
".",
"country_code",
"(",
")",
",",
"\"useragent\"",
":",
"self",
".",
"faker",
".",
"user_agent",
"(",
")",
"}",
"user_agent",
".",
"save",
"(",
"post_uo_reg",
")",
"logger",
".",
"warning",
"(",
"'user_agent Commits are successful after write job!'",
",",
"extra",
"=",
"d",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"e",
",",
"extra",
"=",
"d",
")"
] | 34.571429 | 16.952381 |
def exists(self):
"""Check whether the AppProfile already exists.
:rtype: bool
:returns: True if the AppProfile exists, else False.
"""
try:
self.instance_admin_client.get_app_profile(self.name)
return True
# NOTE: There could be other exceptions that are returned to the user.
except NotFound:
return False
|
[
"def",
"exists",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"instance_admin_client",
".",
"get_app_profile",
"(",
"self",
".",
"name",
")",
"return",
"True",
"# NOTE: There could be other exceptions that are returned to the user.",
"except",
"NotFound",
":",
"return",
"False"
] | 32.416667 | 20.25 |
def list(self, **params):
"""
Retrieve all notes
Returns all notes available to the user, according to the parameters provided
:calls: ``get /notes``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of Notes.
:rtype: list
"""
_, _, notes = self.http_client.get("/notes", params=params)
return notes
|
[
"def",
"list",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"_",
",",
"_",
",",
"notes",
"=",
"self",
".",
"http_client",
".",
"get",
"(",
"\"/notes\"",
",",
"params",
"=",
"params",
")",
"return",
"notes"
] | 32.857143 | 25.428571 |
def __frontend_limit_rules_descriptor(self, api_info):
"""Builds a frontend limit rules descriptor from API info.
Args:
api_info: An _ApiInfo object.
Returns:
A list of dictionaries with frontend limit rules information.
"""
if not api_info.frontend_limits.rules:
return None
rules = []
for rule in api_info.frontend_limits.rules:
descriptor = {}
for propname, descname in (('match', 'match'),
('qps', 'qps'),
('user_qps', 'userQps'),
('daily', 'daily'),
('analytics_id', 'analyticsId')):
if getattr(rule, propname) is not None:
descriptor[descname] = getattr(rule, propname)
if descriptor:
rules.append(descriptor)
return rules
|
[
"def",
"__frontend_limit_rules_descriptor",
"(",
"self",
",",
"api_info",
")",
":",
"if",
"not",
"api_info",
".",
"frontend_limits",
".",
"rules",
":",
"return",
"None",
"rules",
"=",
"[",
"]",
"for",
"rule",
"in",
"api_info",
".",
"frontend_limits",
".",
"rules",
":",
"descriptor",
"=",
"{",
"}",
"for",
"propname",
",",
"descname",
"in",
"(",
"(",
"'match'",
",",
"'match'",
")",
",",
"(",
"'qps'",
",",
"'qps'",
")",
",",
"(",
"'user_qps'",
",",
"'userQps'",
")",
",",
"(",
"'daily'",
",",
"'daily'",
")",
",",
"(",
"'analytics_id'",
",",
"'analyticsId'",
")",
")",
":",
"if",
"getattr",
"(",
"rule",
",",
"propname",
")",
"is",
"not",
"None",
":",
"descriptor",
"[",
"descname",
"]",
"=",
"getattr",
"(",
"rule",
",",
"propname",
")",
"if",
"descriptor",
":",
"rules",
".",
"append",
"(",
"descriptor",
")",
"return",
"rules"
] | 32.038462 | 18.923077 |
def install(name=None,
refresh=False,
sysupgrade=None,
pkgs=None,
sources=None,
**kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any pacman commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Install (``pacman -S``) the specified packag(s). Add ``refresh=True`` to
install with ``-y``, add ``sysupgrade=True`` to install with ``-u``.
name
The name of the package to be installed. Note that this parameter is
ignored if either ``pkgs`` or ``sources`` is passed. Additionally,
please note that this option can only be used to install packages from
a software repository. To install a package file manually, use the
``sources`` option.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
refresh
Whether or not to refresh the package database before installing.
sysupgrade
Whether or not to upgrade the system packages before installing.
If refresh is set to ``True`` but sysupgrade is not specified, ``-u`` will be
applied
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list. A specific version number can be specified
by using a single-element dict representing the package and its
version. As with the ``version`` parameter above, comparison operators
can be used to target a specific version of a package.
CLI Examples:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3-4"}]'
salt '*' pkg.install pkgs='["foo", {"bar": "<1.2.3-4"}]'
sources
A list of packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package.
CLI Example:
.. code-block:: bash
salt '*' pkg.install \
sources='[{"foo": "salt://foo.pkg.tar.xz"}, \
{"bar": "salt://bar.pkg.tar.xz"}]'
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
'''
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if not pkg_params:
return {}
if 'root' in kwargs:
pkg_params['-r'] = kwargs['root']
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.append('pacman')
targets = []
errors = []
targets = []
if pkg_type == 'file':
cmd.extend(['-U', '--noprogressbar', '--noconfirm'])
cmd.extend(pkg_params)
elif pkg_type == 'repository':
cmd.append('-S')
if refresh is True:
cmd.append('-y')
if sysupgrade is True or (sysupgrade is None and refresh is True):
cmd.append('-u')
cmd.extend(['--noprogressbar', '--noconfirm', '--needed'])
wildcards = []
for param, version_num in six.iteritems(pkg_params):
if version_num is None:
targets.append(param)
else:
prefix, verstr = salt.utils.pkg.split_comparison(version_num)
if not prefix:
prefix = '='
if '*' in verstr:
if prefix == '=':
wildcards.append((param, verstr))
else:
errors.append(
'Invalid wildcard for {0}{1}{2}'.format(
param, prefix, verstr
)
)
continue
targets.append('{0}{1}{2}'.format(param, prefix, verstr))
if wildcards:
# Resolve wildcard matches
_available = list_repo_pkgs(*[x[0] for x in wildcards], refresh=refresh)
for pkgname, verstr in wildcards:
candidates = _available.get(pkgname, [])
match = salt.utils.itertools.fnmatch_multiple(candidates, verstr)
if match is not None:
targets.append('='.join((pkgname, match)))
else:
errors.append(
'No version matching \'{0}\' found for package \'{1}\' '
'(available: {2})'.format(
verstr,
pkgname,
', '.join(candidates) if candidates else 'none'
)
)
if refresh:
try:
# Prevent a second refresh when we run the install command
cmd.remove('-y')
except ValueError:
# Shouldn't happen since we only add -y when refresh is True,
# but just in case that code above is inadvertently changed,
# don't let this result in a traceback.
pass
if not errors:
cmd.extend(targets)
old = list_pkgs()
out = __salt__['cmd.run_all'](
cmd,
output_loglevel='trace',
python_shell=False
)
if out['retcode'] != 0 and out['stderr']:
errors = [out['stderr']]
else:
errors = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
try:
changes = ret
except UnboundLocalError:
# We ran into errors before we attempted to install anything, so
# there are no changes.
changes = {}
raise CommandExecutionError(
'Problem encountered installing package(s)',
info={'errors': errors, 'changes': changes}
)
return ret
|
[
"def",
"install",
"(",
"name",
"=",
"None",
",",
"refresh",
"=",
"False",
",",
"sysupgrade",
"=",
"None",
",",
"pkgs",
"=",
"None",
",",
"sources",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"pkg_params",
",",
"pkg_type",
"=",
"__salt__",
"[",
"'pkg_resource.parse_targets'",
"]",
"(",
"name",
",",
"pkgs",
",",
"sources",
",",
"*",
"*",
"kwargs",
")",
"except",
"MinionError",
"as",
"exc",
":",
"raise",
"CommandExecutionError",
"(",
"exc",
")",
"if",
"not",
"pkg_params",
":",
"return",
"{",
"}",
"if",
"'root'",
"in",
"kwargs",
":",
"pkg_params",
"[",
"'-r'",
"]",
"=",
"kwargs",
"[",
"'root'",
"]",
"cmd",
"=",
"[",
"]",
"if",
"salt",
".",
"utils",
".",
"systemd",
".",
"has_scope",
"(",
"__context__",
")",
"and",
"__salt__",
"[",
"'config.get'",
"]",
"(",
"'systemd.scope'",
",",
"True",
")",
":",
"cmd",
".",
"extend",
"(",
"[",
"'systemd-run'",
",",
"'--scope'",
"]",
")",
"cmd",
".",
"append",
"(",
"'pacman'",
")",
"targets",
"=",
"[",
"]",
"errors",
"=",
"[",
"]",
"targets",
"=",
"[",
"]",
"if",
"pkg_type",
"==",
"'file'",
":",
"cmd",
".",
"extend",
"(",
"[",
"'-U'",
",",
"'--noprogressbar'",
",",
"'--noconfirm'",
"]",
")",
"cmd",
".",
"extend",
"(",
"pkg_params",
")",
"elif",
"pkg_type",
"==",
"'repository'",
":",
"cmd",
".",
"append",
"(",
"'-S'",
")",
"if",
"refresh",
"is",
"True",
":",
"cmd",
".",
"append",
"(",
"'-y'",
")",
"if",
"sysupgrade",
"is",
"True",
"or",
"(",
"sysupgrade",
"is",
"None",
"and",
"refresh",
"is",
"True",
")",
":",
"cmd",
".",
"append",
"(",
"'-u'",
")",
"cmd",
".",
"extend",
"(",
"[",
"'--noprogressbar'",
",",
"'--noconfirm'",
",",
"'--needed'",
"]",
")",
"wildcards",
"=",
"[",
"]",
"for",
"param",
",",
"version_num",
"in",
"six",
".",
"iteritems",
"(",
"pkg_params",
")",
":",
"if",
"version_num",
"is",
"None",
":",
"targets",
".",
"append",
"(",
"param",
")",
"else",
":",
"prefix",
",",
"verstr",
"=",
"salt",
".",
"utils",
".",
"pkg",
".",
"split_comparison",
"(",
"version_num",
")",
"if",
"not",
"prefix",
":",
"prefix",
"=",
"'='",
"if",
"'*'",
"in",
"verstr",
":",
"if",
"prefix",
"==",
"'='",
":",
"wildcards",
".",
"append",
"(",
"(",
"param",
",",
"verstr",
")",
")",
"else",
":",
"errors",
".",
"append",
"(",
"'Invalid wildcard for {0}{1}{2}'",
".",
"format",
"(",
"param",
",",
"prefix",
",",
"verstr",
")",
")",
"continue",
"targets",
".",
"append",
"(",
"'{0}{1}{2}'",
".",
"format",
"(",
"param",
",",
"prefix",
",",
"verstr",
")",
")",
"if",
"wildcards",
":",
"# Resolve wildcard matches",
"_available",
"=",
"list_repo_pkgs",
"(",
"*",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"wildcards",
"]",
",",
"refresh",
"=",
"refresh",
")",
"for",
"pkgname",
",",
"verstr",
"in",
"wildcards",
":",
"candidates",
"=",
"_available",
".",
"get",
"(",
"pkgname",
",",
"[",
"]",
")",
"match",
"=",
"salt",
".",
"utils",
".",
"itertools",
".",
"fnmatch_multiple",
"(",
"candidates",
",",
"verstr",
")",
"if",
"match",
"is",
"not",
"None",
":",
"targets",
".",
"append",
"(",
"'='",
".",
"join",
"(",
"(",
"pkgname",
",",
"match",
")",
")",
")",
"else",
":",
"errors",
".",
"append",
"(",
"'No version matching \\'{0}\\' found for package \\'{1}\\' '",
"'(available: {2})'",
".",
"format",
"(",
"verstr",
",",
"pkgname",
",",
"', '",
".",
"join",
"(",
"candidates",
")",
"if",
"candidates",
"else",
"'none'",
")",
")",
"if",
"refresh",
":",
"try",
":",
"# Prevent a second refresh when we run the install command",
"cmd",
".",
"remove",
"(",
"'-y'",
")",
"except",
"ValueError",
":",
"# Shouldn't happen since we only add -y when refresh is True,",
"# but just in case that code above is inadvertently changed,",
"# don't let this result in a traceback.",
"pass",
"if",
"not",
"errors",
":",
"cmd",
".",
"extend",
"(",
"targets",
")",
"old",
"=",
"list_pkgs",
"(",
")",
"out",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"output_loglevel",
"=",
"'trace'",
",",
"python_shell",
"=",
"False",
")",
"if",
"out",
"[",
"'retcode'",
"]",
"!=",
"0",
"and",
"out",
"[",
"'stderr'",
"]",
":",
"errors",
"=",
"[",
"out",
"[",
"'stderr'",
"]",
"]",
"else",
":",
"errors",
"=",
"[",
"]",
"__context__",
".",
"pop",
"(",
"'pkg.list_pkgs'",
",",
"None",
")",
"new",
"=",
"list_pkgs",
"(",
")",
"ret",
"=",
"salt",
".",
"utils",
".",
"data",
".",
"compare_dicts",
"(",
"old",
",",
"new",
")",
"if",
"errors",
":",
"try",
":",
"changes",
"=",
"ret",
"except",
"UnboundLocalError",
":",
"# We ran into errors before we attempted to install anything, so",
"# there are no changes.",
"changes",
"=",
"{",
"}",
"raise",
"CommandExecutionError",
"(",
"'Problem encountered installing package(s)'",
",",
"info",
"=",
"{",
"'errors'",
":",
"errors",
",",
"'changes'",
":",
"changes",
"}",
")",
"return",
"ret"
] | 35.609375 | 23.317708 |
def shift_by_n_processors(self, x, mesh_axis, offset, wrap):
"""Receive the slice from processor pcoord - offset.
Args:
x: a LaidOutTensor
mesh_axis: an integer
offset: an integer
wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.
"""
n = self.shape[mesh_axis].size
source_pcoord = []
for i in xrange(n):
c = i - offset
if c != c % n:
if wrap:
c = c % n
else:
c = None
source_pcoord.append(c)
return self.receive(x, mesh_axis, source_pcoord)
|
[
"def",
"shift_by_n_processors",
"(",
"self",
",",
"x",
",",
"mesh_axis",
",",
"offset",
",",
"wrap",
")",
":",
"n",
"=",
"self",
".",
"shape",
"[",
"mesh_axis",
"]",
".",
"size",
"source_pcoord",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"n",
")",
":",
"c",
"=",
"i",
"-",
"offset",
"if",
"c",
"!=",
"c",
"%",
"n",
":",
"if",
"wrap",
":",
"c",
"=",
"c",
"%",
"n",
"else",
":",
"c",
"=",
"None",
"source_pcoord",
".",
"append",
"(",
"c",
")",
"return",
"self",
".",
"receive",
"(",
"x",
",",
"mesh_axis",
",",
"source_pcoord",
")"
] | 27.45 | 18.5 |
def get_http_method_arg_name(self):
"""
Return the HTTP function to call and the params/data argument name
"""
if self.method == 'get':
arg_name = 'params'
else:
arg_name = 'data'
return getattr(requests, self.method), arg_name
|
[
"def",
"get_http_method_arg_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"method",
"==",
"'get'",
":",
"arg_name",
"=",
"'params'",
"else",
":",
"arg_name",
"=",
"'data'",
"return",
"getattr",
"(",
"requests",
",",
"self",
".",
"method",
")",
",",
"arg_name"
] | 32.333333 | 12.111111 |
def update_job_libraries(
logger,
job_list,
match,
new_library_path,
token,
host,
):
"""
update libraries on jobs using same major version
Parameters
----------
logger: logging object
configured in cli_commands.py
job_list: list of strings
output of get_job_list
match: FilenameMatch object
match object with suffix
new_library_path: string
path to library in dbfs (including uri)
token: string
Databricks API key with admin permissions
host: string
Databricks account url
(e.g. https://fake-organization.cloud.databricks.com)
Side Effects
------------
jobs now require updated version of library
"""
for job in job_list:
get_res = requests.get(
host + '/api/2.0/jobs/get?job_id={}'.format(job['job_id']),
auth=('token', token),
)
if get_res.status_code == 200:
job_specs = get_res.json() # copy current job specs
settings = job_specs['settings']
job_specs.pop('settings')
new_libraries = []
for lib in settings['libraries']:
if (
match.suffix in lib.keys()
and lib[match.suffix] == job['library_path']
):
# replace entry for old library path with new one
new_libraries.append({match.suffix: new_library_path})
else:
new_libraries.append(lib)
settings['libraries'] = new_libraries
job_specs['new_settings'] = settings
post_res = requests.post(
host + '/api/2.0/jobs/reset',
auth=('token', token),
data=json.dumps(job_specs)
)
if post_res.status_code != 200:
raise APIError(post_res)
else:
raise APIError(get_res)
|
[
"def",
"update_job_libraries",
"(",
"logger",
",",
"job_list",
",",
"match",
",",
"new_library_path",
",",
"token",
",",
"host",
",",
")",
":",
"for",
"job",
"in",
"job_list",
":",
"get_res",
"=",
"requests",
".",
"get",
"(",
"host",
"+",
"'/api/2.0/jobs/get?job_id={}'",
".",
"format",
"(",
"job",
"[",
"'job_id'",
"]",
")",
",",
"auth",
"=",
"(",
"'token'",
",",
"token",
")",
",",
")",
"if",
"get_res",
".",
"status_code",
"==",
"200",
":",
"job_specs",
"=",
"get_res",
".",
"json",
"(",
")",
"# copy current job specs",
"settings",
"=",
"job_specs",
"[",
"'settings'",
"]",
"job_specs",
".",
"pop",
"(",
"'settings'",
")",
"new_libraries",
"=",
"[",
"]",
"for",
"lib",
"in",
"settings",
"[",
"'libraries'",
"]",
":",
"if",
"(",
"match",
".",
"suffix",
"in",
"lib",
".",
"keys",
"(",
")",
"and",
"lib",
"[",
"match",
".",
"suffix",
"]",
"==",
"job",
"[",
"'library_path'",
"]",
")",
":",
"# replace entry for old library path with new one",
"new_libraries",
".",
"append",
"(",
"{",
"match",
".",
"suffix",
":",
"new_library_path",
"}",
")",
"else",
":",
"new_libraries",
".",
"append",
"(",
"lib",
")",
"settings",
"[",
"'libraries'",
"]",
"=",
"new_libraries",
"job_specs",
"[",
"'new_settings'",
"]",
"=",
"settings",
"post_res",
"=",
"requests",
".",
"post",
"(",
"host",
"+",
"'/api/2.0/jobs/reset'",
",",
"auth",
"=",
"(",
"'token'",
",",
"token",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"job_specs",
")",
")",
"if",
"post_res",
".",
"status_code",
"!=",
"200",
":",
"raise",
"APIError",
"(",
"post_res",
")",
"else",
":",
"raise",
"APIError",
"(",
"get_res",
")"
] | 30.564516 | 16.306452 |
def chunks(raw):
"""Yield successive EVENT_SIZE sized chunks from raw."""
for i in range(0, len(raw), EVENT_SIZE):
yield struct.unpack(EVENT_FORMAT, raw[i:i+EVENT_SIZE])
|
[
"def",
"chunks",
"(",
"raw",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"raw",
")",
",",
"EVENT_SIZE",
")",
":",
"yield",
"struct",
".",
"unpack",
"(",
"EVENT_FORMAT",
",",
"raw",
"[",
"i",
":",
"i",
"+",
"EVENT_SIZE",
"]",
")"
] | 45.5 | 12.5 |
def identification_field_factory(label, error_required):
"""
A simple identification field factory which enable you to set the label.
:param label:
String containing the label for this field.
:param error_required:
String containing the error message if the field is left empty.
"""
return forms.CharField(label=label,
widget=forms.TextInput(attrs=attrs_dict),
max_length=75,
error_messages={'required': error_required})
|
[
"def",
"identification_field_factory",
"(",
"label",
",",
"error_required",
")",
":",
"return",
"forms",
".",
"CharField",
"(",
"label",
"=",
"label",
",",
"widget",
"=",
"forms",
".",
"TextInput",
"(",
"attrs",
"=",
"attrs_dict",
")",
",",
"max_length",
"=",
"75",
",",
"error_messages",
"=",
"{",
"'required'",
":",
"error_required",
"}",
")"
] | 35.333333 | 20.8 |
def training_data(self):
""" Returns data dictionary from training.pkl """
data = pickle.load(open(os.path.join(self.repopath, 'training.pkl')))
return data.keys(), data.values()
|
[
"def",
"training_data",
"(",
"self",
")",
":",
"data",
"=",
"pickle",
".",
"load",
"(",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"repopath",
",",
"'training.pkl'",
")",
")",
")",
"return",
"data",
".",
"keys",
"(",
")",
",",
"data",
".",
"values",
"(",
")"
] | 39.8 | 18.8 |
def newDocTextLen(self, content, len):
"""Creation of a new text node with an extra content length
parameter. The text node pertain to a given document. """
ret = libxml2mod.xmlNewDocTextLen(self._o, content, len)
if ret is None:raise treeError('xmlNewDocTextLen() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
|
[
"def",
"newDocTextLen",
"(",
"self",
",",
"content",
",",
"len",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlNewDocTextLen",
"(",
"self",
".",
"_o",
",",
"content",
",",
"len",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlNewDocTextLen() failed'",
")",
"__tmp",
"=",
"xmlNode",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] | 51 | 11.428571 |
def ekm_log(logstr, priority=3):
""" Send string to module level log
Args:
logstr (str): string to print.
priority (int): priority, supports 3 (default) and 4 (special).
"""
if priority <= ekmmeters_log_level:
dt = datetime.datetime
stamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M.%f")
ekmmeters_log_func("[EKM Meter Debug Message: " + stamp + "] -> " + logstr)
pass
|
[
"def",
"ekm_log",
"(",
"logstr",
",",
"priority",
"=",
"3",
")",
":",
"if",
"priority",
"<=",
"ekmmeters_log_level",
":",
"dt",
"=",
"datetime",
".",
"datetime",
"stamp",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M.%f\"",
")",
"ekmmeters_log_func",
"(",
"\"[EKM Meter Debug Message: \"",
"+",
"stamp",
"+",
"\"] -> \"",
"+",
"logstr",
")",
"pass"
] | 35.416667 | 18.916667 |
def print_stat(x, message=None):
""" A simple print Op that might be easier to use than :meth:`tf.Print`.
Use it like: ``x = print_stat(x, message='This is x')``.
"""
if message is None:
message = x.op.name
lst = [tf.shape(x), tf.reduce_mean(x)]
if x.dtype.is_floating:
lst.append(rms(x))
return tf.Print(x, lst + [x], summarize=20,
message=message, name='print_' + x.op.name)
|
[
"def",
"print_stat",
"(",
"x",
",",
"message",
"=",
"None",
")",
":",
"if",
"message",
"is",
"None",
":",
"message",
"=",
"x",
".",
"op",
".",
"name",
"lst",
"=",
"[",
"tf",
".",
"shape",
"(",
"x",
")",
",",
"tf",
".",
"reduce_mean",
"(",
"x",
")",
"]",
"if",
"x",
".",
"dtype",
".",
"is_floating",
":",
"lst",
".",
"append",
"(",
"rms",
"(",
"x",
")",
")",
"return",
"tf",
".",
"Print",
"(",
"x",
",",
"lst",
"+",
"[",
"x",
"]",
",",
"summarize",
"=",
"20",
",",
"message",
"=",
"message",
",",
"name",
"=",
"'print_'",
"+",
"x",
".",
"op",
".",
"name",
")"
] | 39.454545 | 11 |
def encode_params(self, data=None, **kwargs):
"""
Build the body for a text/plain request.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
charset = kwargs.get("charset", self.charset)
collection_format = kwargs.get("collection_format", self.collection_format)
output_str = kwargs.get("output_str", self.output_str)
if data is None:
return "", self.get_content_type(charset)
elif isinstance(data, (str, bytes)):
return data, self.get_content_type(charset)
elif hasattr(data, 'read'):
return data, self.get_content_type(charset)
elif collection_format == 'multi' and hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
result.append(b"=".join([self._encode(k, charset), self._encode(v, charset, output_str)]))
return b'\n'.join(result), self.get_content_type(charset)
elif collection_format == 'plain' and hasattr(data, '__iter__'):
results = []
for k, vs in to_key_val_dict(data).items():
results.append(b"=".join([self._encode(k, charset), self._encode(vs, charset, output_str)]))
return b'\n'.join(results), self.get_content_type(charset)
elif hasattr(data, '__iter__'):
results = []
for k, vs in to_key_val_dict(data).items():
if isinstance(vs, list):
v = self.COLLECTION_SEPARATORS[collection_format].join(e for e in vs)
key = k + '[]'
else:
v = vs
key = k
results.append(b"=".join([self._encode(key, charset), self._encode(v, charset, output_str)]))
return b"\n".join(results), self.get_content_type(charset)
else:
return str(data).encode(charset) if charset else str(data), self.get_content_type(charset)
|
[
"def",
"encode_params",
"(",
"self",
",",
"data",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"charset",
"=",
"kwargs",
".",
"get",
"(",
"\"charset\"",
",",
"self",
".",
"charset",
")",
"collection_format",
"=",
"kwargs",
".",
"get",
"(",
"\"collection_format\"",
",",
"self",
".",
"collection_format",
")",
"output_str",
"=",
"kwargs",
".",
"get",
"(",
"\"output_str\"",
",",
"self",
".",
"output_str",
")",
"if",
"data",
"is",
"None",
":",
"return",
"\"\"",
",",
"self",
".",
"get_content_type",
"(",
"charset",
")",
"elif",
"isinstance",
"(",
"data",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"return",
"data",
",",
"self",
".",
"get_content_type",
"(",
"charset",
")",
"elif",
"hasattr",
"(",
"data",
",",
"'read'",
")",
":",
"return",
"data",
",",
"self",
".",
"get_content_type",
"(",
"charset",
")",
"elif",
"collection_format",
"==",
"'multi'",
"and",
"hasattr",
"(",
"data",
",",
"'__iter__'",
")",
":",
"result",
"=",
"[",
"]",
"for",
"k",
",",
"vs",
"in",
"to_key_val_list",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"vs",
",",
"basestring",
")",
"or",
"not",
"hasattr",
"(",
"vs",
",",
"'__iter__'",
")",
":",
"vs",
"=",
"[",
"vs",
"]",
"for",
"v",
"in",
"vs",
":",
"result",
".",
"append",
"(",
"b\"=\"",
".",
"join",
"(",
"[",
"self",
".",
"_encode",
"(",
"k",
",",
"charset",
")",
",",
"self",
".",
"_encode",
"(",
"v",
",",
"charset",
",",
"output_str",
")",
"]",
")",
")",
"return",
"b'\\n'",
".",
"join",
"(",
"result",
")",
",",
"self",
".",
"get_content_type",
"(",
"charset",
")",
"elif",
"collection_format",
"==",
"'plain'",
"and",
"hasattr",
"(",
"data",
",",
"'__iter__'",
")",
":",
"results",
"=",
"[",
"]",
"for",
"k",
",",
"vs",
"in",
"to_key_val_dict",
"(",
"data",
")",
".",
"items",
"(",
")",
":",
"results",
".",
"append",
"(",
"b\"=\"",
".",
"join",
"(",
"[",
"self",
".",
"_encode",
"(",
"k",
",",
"charset",
")",
",",
"self",
".",
"_encode",
"(",
"vs",
",",
"charset",
",",
"output_str",
")",
"]",
")",
")",
"return",
"b'\\n'",
".",
"join",
"(",
"results",
")",
",",
"self",
".",
"get_content_type",
"(",
"charset",
")",
"elif",
"hasattr",
"(",
"data",
",",
"'__iter__'",
")",
":",
"results",
"=",
"[",
"]",
"for",
"k",
",",
"vs",
"in",
"to_key_val_dict",
"(",
"data",
")",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"vs",
",",
"list",
")",
":",
"v",
"=",
"self",
".",
"COLLECTION_SEPARATORS",
"[",
"collection_format",
"]",
".",
"join",
"(",
"e",
"for",
"e",
"in",
"vs",
")",
"key",
"=",
"k",
"+",
"'[]'",
"else",
":",
"v",
"=",
"vs",
"key",
"=",
"k",
"results",
".",
"append",
"(",
"b\"=\"",
".",
"join",
"(",
"[",
"self",
".",
"_encode",
"(",
"key",
",",
"charset",
")",
",",
"self",
".",
"_encode",
"(",
"v",
",",
"charset",
",",
"output_str",
")",
"]",
")",
")",
"return",
"b\"\\n\"",
".",
"join",
"(",
"results",
")",
",",
"self",
".",
"get_content_type",
"(",
"charset",
")",
"else",
":",
"return",
"str",
"(",
"data",
")",
".",
"encode",
"(",
"charset",
")",
"if",
"charset",
"else",
"str",
"(",
"data",
")",
",",
"self",
".",
"get_content_type",
"(",
"charset",
")"
] | 49.2 | 23.511111 |
def get_favorite_radio_stations(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='radio_stations'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(['radio_stations'] + list(args))
return self.get_music_library_information(*args, **kwargs)
|
[
"def",
"get_favorite_radio_stations",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"tuple",
"(",
"[",
"'radio_stations'",
"]",
"+",
"list",
"(",
"args",
")",
")",
"return",
"self",
".",
"get_music_library_information",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 53.375 | 17.875 |
def calc_multi_exp_unc(sys_unc, n, mean, std, dof, confidence=0.95):
"""Calculate expanded uncertainty using values from multiple runs.
Note that this function assumes the statistic is a mean value, therefore
the combined standard deviation is divided by `sqrt(N)`.
Parameters
----------
sys_unc : numpy array of systematic uncertainties
n : numpy array of numbers of samples per set
std : numpy array of sample standard deviations
dof : numpy array of degrees of freedom
confidence : Confidence interval for t-statistic
"""
sys_unc = sys_unc.mean()
std_combined = combine_std(n, mean, std)
std_combined /= np.sqrt(n.sum())
std_unc_combined = np.sqrt(std_combined**2 + sys_unc**2)
dof = dof.sum()
t_combined = scipy.stats.t.interval(alpha=confidence, df=dof)[-1]
exp_unc_combined = t_combined*std_unc_combined
return exp_unc_combined
|
[
"def",
"calc_multi_exp_unc",
"(",
"sys_unc",
",",
"n",
",",
"mean",
",",
"std",
",",
"dof",
",",
"confidence",
"=",
"0.95",
")",
":",
"sys_unc",
"=",
"sys_unc",
".",
"mean",
"(",
")",
"std_combined",
"=",
"combine_std",
"(",
"n",
",",
"mean",
",",
"std",
")",
"std_combined",
"/=",
"np",
".",
"sqrt",
"(",
"n",
".",
"sum",
"(",
")",
")",
"std_unc_combined",
"=",
"np",
".",
"sqrt",
"(",
"std_combined",
"**",
"2",
"+",
"sys_unc",
"**",
"2",
")",
"dof",
"=",
"dof",
".",
"sum",
"(",
")",
"t_combined",
"=",
"scipy",
".",
"stats",
".",
"t",
".",
"interval",
"(",
"alpha",
"=",
"confidence",
",",
"df",
"=",
"dof",
")",
"[",
"-",
"1",
"]",
"exp_unc_combined",
"=",
"t_combined",
"*",
"std_unc_combined",
"return",
"exp_unc_combined"
] | 41.409091 | 17.363636 |
def send_capabilties_request(self, vehicle, name, m):
'''An alias for send_capabilities_request.
The word "capabilities" was misspelled in previous versions of this code. This is simply
an alias to send_capabilities_request using the legacy name.
'''
return self.send_capabilities_request(vehicle, name, m)
|
[
"def",
"send_capabilties_request",
"(",
"self",
",",
"vehicle",
",",
"name",
",",
"m",
")",
":",
"return",
"self",
".",
"send_capabilities_request",
"(",
"vehicle",
",",
"name",
",",
"m",
")"
] | 48.714286 | 28.428571 |
def _extract_units(self, obj, value):
''' Internal helper for dealing with units associated units properties
when setting values on |UnitsSpec| properties.
When ``value`` is a dict, this function may mutate the value of the
associated units property.
Args:
obj (HasProps) : instance to update units spec property value for
value (obj) : new value to set for the property
Returns:
copy of ``value``, with 'units' key and value removed when
applicable
'''
if isinstance(value, dict):
if 'units' in value:
value = copy(value) # so we can modify it
units = value.pop("units", None)
if units:
self.units_prop.__set__(obj, units)
return value
|
[
"def",
"_extract_units",
"(",
"self",
",",
"obj",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"if",
"'units'",
"in",
"value",
":",
"value",
"=",
"copy",
"(",
"value",
")",
"# so we can modify it",
"units",
"=",
"value",
".",
"pop",
"(",
"\"units\"",
",",
"None",
")",
"if",
"units",
":",
"self",
".",
"units_prop",
".",
"__set__",
"(",
"obj",
",",
"units",
")",
"return",
"value"
] | 35.043478 | 22.782609 |
def _select_ftdi_channel(channel):
"""Select multiplexer channel. Currently uses a FTDI chip via pylibftdi"""
if channel < 0 or channel > 8:
raise ArgumentError("FTDI-selected multiplexer only has channels 0-7 valid, "
"make sure you specify channel with -c channel=number", channel=channel)
from pylibftdi import BitBangDevice
bb = BitBangDevice(auto_detach=False)
bb.direction = 0b111
bb.port = channel
|
[
"def",
"_select_ftdi_channel",
"(",
"channel",
")",
":",
"if",
"channel",
"<",
"0",
"or",
"channel",
">",
"8",
":",
"raise",
"ArgumentError",
"(",
"\"FTDI-selected multiplexer only has channels 0-7 valid, \"",
"\"make sure you specify channel with -c channel=number\"",
",",
"channel",
"=",
"channel",
")",
"from",
"pylibftdi",
"import",
"BitBangDevice",
"bb",
"=",
"BitBangDevice",
"(",
"auto_detach",
"=",
"False",
")",
"bb",
".",
"direction",
"=",
"0b111",
"bb",
".",
"port",
"=",
"channel"
] | 50.666667 | 17.111111 |
def _mse_converged(self):
"""Check convergence based on mean squared error
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior.
"""
mse = mean_squared_error(self.local_prior, self.local_posterior_,
multioutput='uniform_average')
if mse > self.threshold:
return False, mse
else:
return True, mse
|
[
"def",
"_mse_converged",
"(",
"self",
")",
":",
"mse",
"=",
"mean_squared_error",
"(",
"self",
".",
"local_prior",
",",
"self",
".",
"local_posterior_",
",",
"multioutput",
"=",
"'uniform_average'",
")",
"if",
"mse",
">",
"self",
".",
"threshold",
":",
"return",
"False",
",",
"mse",
"else",
":",
"return",
"True",
",",
"mse"
] | 26 | 22.35 |
def _get_ami_dict(json_url):
"""Get ami from a web url.
Args:
region (str): AWS Region to find AMI ID.
Returns:
dict: Contents in dictionary format.
"""
LOG.info("Getting AMI from %s", json_url)
response = requests.get(json_url)
assert response.ok, "Error getting ami info from {}".format(json_url)
ami_dict = response.json()
LOG.debug('AMI json contents: %s', ami_dict)
return ami_dict
|
[
"def",
"_get_ami_dict",
"(",
"json_url",
")",
":",
"LOG",
".",
"info",
"(",
"\"Getting AMI from %s\"",
",",
"json_url",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"json_url",
")",
"assert",
"response",
".",
"ok",
",",
"\"Error getting ami info from {}\"",
".",
"format",
"(",
"json_url",
")",
"ami_dict",
"=",
"response",
".",
"json",
"(",
")",
"LOG",
".",
"debug",
"(",
"'AMI json contents: %s'",
",",
"ami_dict",
")",
"return",
"ami_dict"
] | 26.875 | 17.6875 |
def off_coordinator(self, year):
"""Returns the coach ID for the team's OC in a given year.
:year: An int representing the year.
:returns: A string containing the coach ID of the OC.
"""
try:
oc_anchor = self._year_info_pq(year, 'Offensive Coordinator')('a')
if oc_anchor:
return oc_anchor.attr['href']
except ValueError:
return None
|
[
"def",
"off_coordinator",
"(",
"self",
",",
"year",
")",
":",
"try",
":",
"oc_anchor",
"=",
"self",
".",
"_year_info_pq",
"(",
"year",
",",
"'Offensive Coordinator'",
")",
"(",
"'a'",
")",
"if",
"oc_anchor",
":",
"return",
"oc_anchor",
".",
"attr",
"[",
"'href'",
"]",
"except",
"ValueError",
":",
"return",
"None"
] | 35.25 | 15.833333 |
def order_queryset(self, queryset):
"""
Orders the passed in queryset, returning a new queryset in response. By default uses the _order query
parameter.
"""
order = self.derive_ordering()
# if we get our order from the request
# make sure it is a valid field in the list
if '_order' in self.request.GET:
if order.lstrip('-') not in self.derive_fields():
order = None
if order:
# if our order is a single string, convert to a simple list
if isinstance(order, str):
order = (order,)
queryset = queryset.order_by(*order)
return queryset
|
[
"def",
"order_queryset",
"(",
"self",
",",
"queryset",
")",
":",
"order",
"=",
"self",
".",
"derive_ordering",
"(",
")",
"# if we get our order from the request",
"# make sure it is a valid field in the list",
"if",
"'_order'",
"in",
"self",
".",
"request",
".",
"GET",
":",
"if",
"order",
".",
"lstrip",
"(",
"'-'",
")",
"not",
"in",
"self",
".",
"derive_fields",
"(",
")",
":",
"order",
"=",
"None",
"if",
"order",
":",
"# if our order is a single string, convert to a simple list",
"if",
"isinstance",
"(",
"order",
",",
"str",
")",
":",
"order",
"=",
"(",
"order",
",",
")",
"queryset",
"=",
"queryset",
".",
"order_by",
"(",
"*",
"order",
")",
"return",
"queryset"
] | 32.285714 | 18.952381 |
def start(self):
"""Start websocket connection."""
if self.state != STATE_RUNNING:
conn = self.loop.create_connection(
lambda: self, self.host, self.port)
task = self.loop.create_task(conn)
task.add_done_callback(self.init_done)
self.state = STATE_STARTING
|
[
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"state",
"!=",
"STATE_RUNNING",
":",
"conn",
"=",
"self",
".",
"loop",
".",
"create_connection",
"(",
"lambda",
":",
"self",
",",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
"task",
"=",
"self",
".",
"loop",
".",
"create_task",
"(",
"conn",
")",
"task",
".",
"add_done_callback",
"(",
"self",
".",
"init_done",
")",
"self",
".",
"state",
"=",
"STATE_STARTING"
] | 41.125 | 7.5 |
def has_printout(
state, index, not_printed_msg=None, pre_code=None, name=None, copy=False
):
"""Check if the right printouts happened.
``has_printout()`` will look for the printout in the solution code that you specified with ``index`` (0 in this case), rerun the ``print()`` call in
the solution process, capture its output, and verify whether the output is present in the output of the student.
This is more robust as ``Ex().check_function('print')`` initiated chains as students can use as many
printouts as they want, as long as they do the correct one somewhere.
Args:
index (int): index of the ``print()`` call in the solution whose output you want to search for in the student output.
not_printed_msg (str): if specified, this overrides the default message that is generated when the output
is not found in the student output.
pre_code (str): Python code as a string that is executed before running the targeted student call.
This is the ideal place to set a random seed, for example.
copy (bool): whether to try to deep copy objects in the environment, such as lists, that could
accidentally be mutated. Disabled by default, which speeds up SCTs.
state (State): state as passed by the SCT chain. Don't specify this explicitly.
:Example:
Suppose you want somebody to print out 4: ::
print(1, 2, 3, 4)
The following SCT would check that: ::
Ex().has_printout(0)
All of the following SCTs would pass: ::
print(1, 2, 3, 4)
print('1 2 3 4')
print(1, 2, '3 4')
print("random"); print(1, 2, 3, 4)
:Example:
Watch out: ``has_printout()`` will effectively **rerun** the ``print()`` call in the solution process after the entire solution script was executed.
If your solution script updates the value of `x` after executing it, ``has_printout()`` will not work.
Suppose you have the following solution: ::
x = 4
print(x)
x = 6
The following SCT will not work: ::
Ex().has_printout(0)
Why? When the ``print(x)`` call is executed, the value of ``x`` will be 6, and pythonwhat will look for the output `'6`' in the output the student generated.
In cases like these, ``has_printout()`` cannot be used.
:Example:
Inside a for loop ``has_printout()``
Suppose you have the following solution: ::
for i in range(5):
print(i)
The following SCT will not work: ::
Ex().check_for_loop().check_body().has_printout(0)
The reason is that ``has_printout()`` can only be called from the root state. ``Ex()``.
If you want to check printouts done in e.g. a for loop, you have to use a `check_function('print')` chain instead: ::
Ex().check_for_loop().check_body().\\
set_context(0).check_function('print').\\
check_args(0).has_equal_value()
"""
extra_msg = "If you want to check printouts done in e.g. a for loop, you have to use a `check_function('print')` chain instead."
state.assert_root("has_printout", extra_msg=extra_msg)
if not_printed_msg is None:
not_printed_msg = (
"Have you used `{{sol_call}}` to do the appropriate printouts?"
)
try:
sol_call_ast = state.ast_dispatcher("function_calls", state.solution_ast)[
"print"
][index]["node"]
except (KeyError, IndexError):
raise InstructorError(
"`has_printout({})` couldn't find the {} print call in your solution.".format(
index, utils.get_ord(index + 1)
)
)
out_sol, str_sol = getOutputInProcess(
tree=sol_call_ast,
process=state.solution_process,
context=state.solution_context,
env=state.solution_env,
pre_code=pre_code,
copy=copy,
)
sol_call_str = state.solution_ast_tokens.get_text(sol_call_ast)
if isinstance(str_sol, Exception):
raise InstructorError(
"Evaluating the solution expression {} raised error in solution process."
"Error: {} - {}".format(sol_call_str, type(out_sol), str_sol)
)
_msg = state.build_message(not_printed_msg, {"sol_call": sol_call_str})
has_output(state, out_sol.strip(), pattern=False, no_output_msg=_msg)
return state
|
[
"def",
"has_printout",
"(",
"state",
",",
"index",
",",
"not_printed_msg",
"=",
"None",
",",
"pre_code",
"=",
"None",
",",
"name",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"extra_msg",
"=",
"\"If you want to check printouts done in e.g. a for loop, you have to use a `check_function('print')` chain instead.\"",
"state",
".",
"assert_root",
"(",
"\"has_printout\"",
",",
"extra_msg",
"=",
"extra_msg",
")",
"if",
"not_printed_msg",
"is",
"None",
":",
"not_printed_msg",
"=",
"(",
"\"Have you used `{{sol_call}}` to do the appropriate printouts?\"",
")",
"try",
":",
"sol_call_ast",
"=",
"state",
".",
"ast_dispatcher",
"(",
"\"function_calls\"",
",",
"state",
".",
"solution_ast",
")",
"[",
"\"print\"",
"]",
"[",
"index",
"]",
"[",
"\"node\"",
"]",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"raise",
"InstructorError",
"(",
"\"`has_printout({})` couldn't find the {} print call in your solution.\"",
".",
"format",
"(",
"index",
",",
"utils",
".",
"get_ord",
"(",
"index",
"+",
"1",
")",
")",
")",
"out_sol",
",",
"str_sol",
"=",
"getOutputInProcess",
"(",
"tree",
"=",
"sol_call_ast",
",",
"process",
"=",
"state",
".",
"solution_process",
",",
"context",
"=",
"state",
".",
"solution_context",
",",
"env",
"=",
"state",
".",
"solution_env",
",",
"pre_code",
"=",
"pre_code",
",",
"copy",
"=",
"copy",
",",
")",
"sol_call_str",
"=",
"state",
".",
"solution_ast_tokens",
".",
"get_text",
"(",
"sol_call_ast",
")",
"if",
"isinstance",
"(",
"str_sol",
",",
"Exception",
")",
":",
"raise",
"InstructorError",
"(",
"\"Evaluating the solution expression {} raised error in solution process.\"",
"\"Error: {} - {}\"",
".",
"format",
"(",
"sol_call_str",
",",
"type",
"(",
"out_sol",
")",
",",
"str_sol",
")",
")",
"_msg",
"=",
"state",
".",
"build_message",
"(",
"not_printed_msg",
",",
"{",
"\"sol_call\"",
":",
"sol_call_str",
"}",
")",
"has_output",
"(",
"state",
",",
"out_sol",
".",
"strip",
"(",
")",
",",
"pattern",
"=",
"False",
",",
"no_output_msg",
"=",
"_msg",
")",
"return",
"state"
] | 36.840336 | 32.10084 |
def _get_request_content(self, message=None):
'''Updates message with default message paramaters.
:param message: Postmark message data
:type message: `dict`
:rtype: JSON encoded `unicode`
'''
message = self._cast_message(message=message)
return message.json()
|
[
"def",
"_get_request_content",
"(",
"self",
",",
"message",
"=",
"None",
")",
":",
"message",
"=",
"self",
".",
"_cast_message",
"(",
"message",
"=",
"message",
")",
"return",
"message",
".",
"json",
"(",
")"
] | 34.333333 | 15 |
def _execute_and_seal_error(method, arg, method_name):
"""Execute method with arg and return the result.
If the method fails, return a RayTaskError so it can be sealed in the
resultOID and retried by user.
"""
try:
return method(arg)
except Exception:
return ray.worker.RayTaskError(method_name, traceback.format_exc())
|
[
"def",
"_execute_and_seal_error",
"(",
"method",
",",
"arg",
",",
"method_name",
")",
":",
"try",
":",
"return",
"method",
"(",
"arg",
")",
"except",
"Exception",
":",
"return",
"ray",
".",
"worker",
".",
"RayTaskError",
"(",
"method_name",
",",
"traceback",
".",
"format_exc",
"(",
")",
")"
] | 35.1 | 19.3 |
def set_exception(self, exception):
"""Sets the exception on the future."""
if not self.done():
raise TransferNotDoneError(
'set_exception can only be called once the transfer is '
'complete.')
self._coordinator.set_exception(exception, override=True)
|
[
"def",
"set_exception",
"(",
"self",
",",
"exception",
")",
":",
"if",
"not",
"self",
".",
"done",
"(",
")",
":",
"raise",
"TransferNotDoneError",
"(",
"'set_exception can only be called once the transfer is '",
"'complete.'",
")",
"self",
".",
"_coordinator",
".",
"set_exception",
"(",
"exception",
",",
"override",
"=",
"True",
")"
] | 44.714286 | 12.571429 |
def hasColumn(self, column, recurse=True, flags=0):
"""
Returns whether or not this column exists within the list of columns
for this schema.
:return <bool>
"""
return column in self.columns(recurse=recurse, flags=flags)
|
[
"def",
"hasColumn",
"(",
"self",
",",
"column",
",",
"recurse",
"=",
"True",
",",
"flags",
"=",
"0",
")",
":",
"return",
"column",
"in",
"self",
".",
"columns",
"(",
"recurse",
"=",
"recurse",
",",
"flags",
"=",
"flags",
")"
] | 34.25 | 17 |
def cee_map_remap_lossless_priority_lossless_remapped_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name_key = ET.SubElement(cee_map, "name")
name_key.text = kwargs.pop('name')
remap = ET.SubElement(cee_map, "remap")
lossless_priority = ET.SubElement(remap, "lossless-priority")
lossless_remapped_priority = ET.SubElement(lossless_priority, "lossless-remapped-priority")
lossless_remapped_priority.text = kwargs.pop('lossless_remapped_priority')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"cee_map_remap_lossless_priority_lossless_remapped_priority",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"cee_map",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"cee-map\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-cee-map\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"cee_map",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"remap",
"=",
"ET",
".",
"SubElement",
"(",
"cee_map",
",",
"\"remap\"",
")",
"lossless_priority",
"=",
"ET",
".",
"SubElement",
"(",
"remap",
",",
"\"lossless-priority\"",
")",
"lossless_remapped_priority",
"=",
"ET",
".",
"SubElement",
"(",
"lossless_priority",
",",
"\"lossless-remapped-priority\"",
")",
"lossless_remapped_priority",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'lossless_remapped_priority'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 52.071429 | 22.285714 |
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.org/en/latest/security.html'),
InsecureRequestWarning)
|
[
"def",
"_validate_conn",
"(",
"self",
",",
"conn",
")",
":",
"super",
"(",
"HTTPSConnectionPool",
",",
"self",
")",
".",
"_validate_conn",
"(",
"conn",
")",
"# Force connect early to allow us to validate the connection.",
"if",
"not",
"getattr",
"(",
"conn",
",",
"'sock'",
",",
"None",
")",
":",
"# AppEngine might not have `.sock`",
"conn",
".",
"connect",
"(",
")",
"if",
"not",
"conn",
".",
"is_verified",
":",
"warnings",
".",
"warn",
"(",
"(",
"'Unverified HTTPS request is being made. '",
"'Adding certificate verification is strongly advised. See: '",
"'https://urllib3.readthedocs.org/en/latest/security.html'",
")",
",",
"InsecureRequestWarning",
")"
] | 41.9375 | 21.1875 |
def get(self,dimlist):
'''
get dimensions
:parameter dimlist: list of dimensions
'''
out=()
for i,d in enumerate(dimlist):
out+=(super(dimStr, self).get(d,None),)
return out
|
[
"def",
"get",
"(",
"self",
",",
"dimlist",
")",
":",
"out",
"=",
"(",
")",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"dimlist",
")",
":",
"out",
"+=",
"(",
"super",
"(",
"dimStr",
",",
"self",
")",
".",
"get",
"(",
"d",
",",
"None",
")",
",",
")",
"return",
"out"
] | 25.1 | 18.7 |
def start(debug=False, host='127.0.0.1'):
""" starts a nago agent (daemon) process """
if debug:
debug = True
nago.protocols.httpserver.app.run(debug=debug, host=host)
|
[
"def",
"start",
"(",
"debug",
"=",
"False",
",",
"host",
"=",
"'127.0.0.1'",
")",
":",
"if",
"debug",
":",
"debug",
"=",
"True",
"nago",
".",
"protocols",
".",
"httpserver",
".",
"app",
".",
"run",
"(",
"debug",
"=",
"debug",
",",
"host",
"=",
"host",
")"
] | 36.6 | 13.8 |
def Residual(*layers, **kwargs):
"""Constructs a residual version of layers, summing input to layers output."""
shortcut = kwargs.get('shortcut', Identity()) # pylint: disable=no-value-for-parameter
if len(layers) > 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(Serial(*layers), shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
elif len(layers) == 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(layers[0], shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
else:
raise ValueError('Empty residual combinator.')
|
[
"def",
"Residual",
"(",
"*",
"layers",
",",
"*",
"*",
"kwargs",
")",
":",
"shortcut",
"=",
"kwargs",
".",
"get",
"(",
"'shortcut'",
",",
"Identity",
"(",
")",
")",
"# pylint: disable=no-value-for-parameter",
"if",
"len",
"(",
"layers",
")",
">",
"1",
":",
"return",
"Serial",
"(",
"Branch",
"(",
")",
",",
"# pylint: disable=no-value-for-parameter",
"Parallel",
"(",
"Serial",
"(",
"*",
"layers",
")",
",",
"shortcut",
")",
",",
"SumBranches",
"(",
")",
"# pylint: disable=no-value-for-parameter",
")",
"elif",
"len",
"(",
"layers",
")",
"==",
"1",
":",
"return",
"Serial",
"(",
"Branch",
"(",
")",
",",
"# pylint: disable=no-value-for-parameter",
"Parallel",
"(",
"layers",
"[",
"0",
"]",
",",
"shortcut",
")",
",",
"SumBranches",
"(",
")",
"# pylint: disable=no-value-for-parameter",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Empty residual combinator.'",
")"
] | 39.705882 | 19.941176 |
def protocolise(url):
"""
Given a URL, check to see if there is an assocaited protocol.
If not, set the protocol to HTTP and return the protocolised URL
"""
# Use the regex to match http//localhost/something
protore = re.compile(r'https?:{0,1}/{1,2}')
parsed = urlparse.urlparse(url)
if not parsed.scheme and not protore.search(url):
url = 'http://{0}'.format(url)
return url
|
[
"def",
"protocolise",
"(",
"url",
")",
":",
"# Use the regex to match http//localhost/something",
"protore",
"=",
"re",
".",
"compile",
"(",
"r'https?:{0,1}/{1,2}'",
")",
"parsed",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"if",
"not",
"parsed",
".",
"scheme",
"and",
"not",
"protore",
".",
"search",
"(",
"url",
")",
":",
"url",
"=",
"'http://{0}'",
".",
"format",
"(",
"url",
")",
"return",
"url"
] | 34.083333 | 14.916667 |
def WriteVcard(filename, vcard, fopen=codecs.open):
"""Writes a vCard into the given filename."""
if os.access(filename, os.F_OK):
logger.warning('File exists at "{}", skipping.'.format(filename))
return False
try:
with fopen(filename, 'w', encoding='utf-8') as f:
logger.debug('Writing {}:\n{}'.format(filename, u(vcard.serialize())))
f.write(u(vcard.serialize()))
except OSError:
logger.error('Error writing to file "{}", skipping.'.format(filename))
return False
return True
|
[
"def",
"WriteVcard",
"(",
"filename",
",",
"vcard",
",",
"fopen",
"=",
"codecs",
".",
"open",
")",
":",
"if",
"os",
".",
"access",
"(",
"filename",
",",
"os",
".",
"F_OK",
")",
":",
"logger",
".",
"warning",
"(",
"'File exists at \"{}\", skipping.'",
".",
"format",
"(",
"filename",
")",
")",
"return",
"False",
"try",
":",
"with",
"fopen",
"(",
"filename",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"logger",
".",
"debug",
"(",
"'Writing {}:\\n{}'",
".",
"format",
"(",
"filename",
",",
"u",
"(",
"vcard",
".",
"serialize",
"(",
")",
")",
")",
")",
"f",
".",
"write",
"(",
"u",
"(",
"vcard",
".",
"serialize",
"(",
")",
")",
")",
"except",
"OSError",
":",
"logger",
".",
"error",
"(",
"'Error writing to file \"{}\", skipping.'",
".",
"format",
"(",
"filename",
")",
")",
"return",
"False",
"return",
"True"
] | 42.230769 | 20.307692 |
def start(self):
"""
Starts the TCP server.
:return: Method success.
:rtype: bool
"""
if self.__online:
raise foundations.exceptions.ServerOperationError(
"{0} | '{1}' TCP Server is already online!".format(self.__class__.__name__, self))
try:
self.__server = SocketServer.TCPServer((self.__address, self.__port), self.__handler)
self.__worker = threading.Thread(target=self.__server.serve_forever)
self.__worker.setDaemon(True)
self.__worker.start()
self.__online = True
LOGGER.info(
"{0} | TCP Server successfully started with '{1}' address on '{2}' port using '{3}' requests handler!".format(
self.__class__.__name__, self.__address, self.__port, self.__handler.__name__))
return True
except socket.error as error:
if error.errno in (errno.EADDRINUSE, errno.EADDRNOTAVAIL):
LOGGER.warning(
"!> {0} | Cannot start TCP Server, address is already in use on port '{1}'!".format(
self.__class__.__name__, self.__port))
else:
raise error
|
[
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"__online",
":",
"raise",
"foundations",
".",
"exceptions",
".",
"ServerOperationError",
"(",
"\"{0} | '{1}' TCP Server is already online!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
")",
")",
"try",
":",
"self",
".",
"__server",
"=",
"SocketServer",
".",
"TCPServer",
"(",
"(",
"self",
".",
"__address",
",",
"self",
".",
"__port",
")",
",",
"self",
".",
"__handler",
")",
"self",
".",
"__worker",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"__server",
".",
"serve_forever",
")",
"self",
".",
"__worker",
".",
"setDaemon",
"(",
"True",
")",
"self",
".",
"__worker",
".",
"start",
"(",
")",
"self",
".",
"__online",
"=",
"True",
"LOGGER",
".",
"info",
"(",
"\"{0} | TCP Server successfully started with '{1}' address on '{2}' port using '{3}' requests handler!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"__address",
",",
"self",
".",
"__port",
",",
"self",
".",
"__handler",
".",
"__name__",
")",
")",
"return",
"True",
"except",
"socket",
".",
"error",
"as",
"error",
":",
"if",
"error",
".",
"errno",
"in",
"(",
"errno",
".",
"EADDRINUSE",
",",
"errno",
".",
"EADDRNOTAVAIL",
")",
":",
"LOGGER",
".",
"warning",
"(",
"\"!> {0} | Cannot start TCP Server, address is already in use on port '{1}'!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"__port",
")",
")",
"else",
":",
"raise",
"error"
] | 42.068966 | 26.206897 |
def _name_to_index(self, channels):
"""
Return the channel indices for the specified channel names.
Integers contained in `channel` are returned unmodified, if they
are within the range of ``self.channels``.
Parameters
----------
channels : int or str or list of int or list of str
Name(s) of the channel(s) of interest.
Returns
-------
int or list of int
Numerical index(ces) of the specified channels.
"""
# Check if list, then run recursively
if hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types):
return [self._name_to_index(ch) for ch in channels]
if isinstance(channels, six.string_types):
# channels is a string containing a channel name
if channels in self.channels:
return self.channels.index(channels)
else:
raise ValueError("{} is not a valid channel name."
.format(channels))
if isinstance(channels, int):
if (channels < len(self.channels)
and channels >= -len(self.channels)):
return channels
else:
raise ValueError("index out of range")
else:
raise TypeError("input argument should be an integer, string or "
"list of integers or strings")
|
[
"def",
"_name_to_index",
"(",
"self",
",",
"channels",
")",
":",
"# Check if list, then run recursively",
"if",
"hasattr",
"(",
"channels",
",",
"'__iter__'",
")",
"and",
"not",
"isinstance",
"(",
"channels",
",",
"six",
".",
"string_types",
")",
":",
"return",
"[",
"self",
".",
"_name_to_index",
"(",
"ch",
")",
"for",
"ch",
"in",
"channels",
"]",
"if",
"isinstance",
"(",
"channels",
",",
"six",
".",
"string_types",
")",
":",
"# channels is a string containing a channel name",
"if",
"channels",
"in",
"self",
".",
"channels",
":",
"return",
"self",
".",
"channels",
".",
"index",
"(",
"channels",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"{} is not a valid channel name.\"",
".",
"format",
"(",
"channels",
")",
")",
"if",
"isinstance",
"(",
"channels",
",",
"int",
")",
":",
"if",
"(",
"channels",
"<",
"len",
"(",
"self",
".",
"channels",
")",
"and",
"channels",
">=",
"-",
"len",
"(",
"self",
".",
"channels",
")",
")",
":",
"return",
"channels",
"else",
":",
"raise",
"ValueError",
"(",
"\"index out of range\"",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"input argument should be an integer, string or \"",
"\"list of integers or strings\"",
")"
] | 34.634146 | 19.463415 |
def get_backbuffer_size(self):
"""Get the width and height of the backbuffer as a Vector2."""
vec = Vector2()
vec.X = self.backbuffer.get_width()
vec.Y = self.backbuffer.get_height()
return vec
|
[
"def",
"get_backbuffer_size",
"(",
"self",
")",
":",
"vec",
"=",
"Vector2",
"(",
")",
"vec",
".",
"X",
"=",
"self",
".",
"backbuffer",
".",
"get_width",
"(",
")",
"vec",
".",
"Y",
"=",
"self",
".",
"backbuffer",
".",
"get_height",
"(",
")",
"return",
"vec"
] | 38 | 9.333333 |
def from_json(self, json_data):
"""
Load JSON data into this Task
"""
try:
data = json_data.decode()
except Exception:
data = json_data
self.__dict__ = json.loads(data)
|
[
"def",
"from_json",
"(",
"self",
",",
"json_data",
")",
":",
"try",
":",
"data",
"=",
"json_data",
".",
"decode",
"(",
")",
"except",
"Exception",
":",
"data",
"=",
"json_data",
"self",
".",
"__dict__",
"=",
"json",
".",
"loads",
"(",
"data",
")"
] | 25.777778 | 7.777778 |
def use_federated_objective_bank_view(self):
"""Pass through to provider ObjectiveLookupSession.use_federated_objective_bank_view"""
self._objective_bank_view = FEDERATED
# self._get_provider_session('objective_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_federated_objective_bank_view()
except AttributeError:
pass
|
[
"def",
"use_federated_objective_bank_view",
"(",
"self",
")",
":",
"self",
".",
"_objective_bank_view",
"=",
"FEDERATED",
"# self._get_provider_session('objective_lookup_session') # To make sure the session is tracked",
"for",
"session",
"in",
"self",
".",
"_get_provider_sessions",
"(",
")",
":",
"try",
":",
"session",
".",
"use_federated_objective_bank_view",
"(",
")",
"except",
"AttributeError",
":",
"pass"
] | 52 | 17 |
def add_intersecting(self, division, intersection=None, symm=True):
"""
Adds paired relationships between intersecting divisions.
Optional intersection represents the portion of the area of the related
division intersecting this division. You can only specify an
intersection on one side of the relationship when adding a peer.
"""
relationship, created = IntersectRelationship.objects.update_or_create(
from_division=self,
to_division=division,
defaults={"intersection": intersection},
)
if symm:
division.add_intersecting(self, None, False)
return relationship
|
[
"def",
"add_intersecting",
"(",
"self",
",",
"division",
",",
"intersection",
"=",
"None",
",",
"symm",
"=",
"True",
")",
":",
"relationship",
",",
"created",
"=",
"IntersectRelationship",
".",
"objects",
".",
"update_or_create",
"(",
"from_division",
"=",
"self",
",",
"to_division",
"=",
"division",
",",
"defaults",
"=",
"{",
"\"intersection\"",
":",
"intersection",
"}",
",",
")",
"if",
"symm",
":",
"division",
".",
"add_intersecting",
"(",
"self",
",",
"None",
",",
"False",
")",
"return",
"relationship"
] | 42.25 | 21.375 |
def init_structure(self, total_num_bonds, total_num_atoms,
total_num_groups, total_num_chains, total_num_models,
structure_id):
"""Initialise the structure object.
:param total_num_bonds: the number of bonds in the structure
:param total_num_atoms: the number of atoms in the structure
:param total_num_groups: the number of groups in the structure
:param total_num_chains: the number of chains in the structure
:param total_num_models: the number of models in the structure
:param structure_id the: id of the structure (e.g. PDB id)
"""
self.mmtf_version = constants.MMTF_VERSION
self.mmtf_producer = constants.PRODUCER
self.num_atoms = total_num_atoms
self.num_bonds = total_num_bonds
self.num_groups = total_num_groups
self.num_chains = total_num_chains
self.num_models = total_num_models
self.structure_id = structure_id
# initialise the arrays
self.x_coord_list = []
self.y_coord_list = []
self.z_coord_list = []
self.group_type_list = []
self.entity_list = []
self.b_factor_list = []
self.occupancy_list = []
self.atom_id_list = []
self.alt_loc_list = []
self.ins_code_list = []
self.group_id_list = []
self.sequence_index_list = []
self.group_list = []
self.chain_name_list = []
self.chain_id_list = []
self.bond_atom_list = []
self.bond_order_list = []
self.sec_struct_list = []
self.chains_per_model = []
self.groups_per_chain = []
self.current_group = None
self.bio_assembly = []
|
[
"def",
"init_structure",
"(",
"self",
",",
"total_num_bonds",
",",
"total_num_atoms",
",",
"total_num_groups",
",",
"total_num_chains",
",",
"total_num_models",
",",
"structure_id",
")",
":",
"self",
".",
"mmtf_version",
"=",
"constants",
".",
"MMTF_VERSION",
"self",
".",
"mmtf_producer",
"=",
"constants",
".",
"PRODUCER",
"self",
".",
"num_atoms",
"=",
"total_num_atoms",
"self",
".",
"num_bonds",
"=",
"total_num_bonds",
"self",
".",
"num_groups",
"=",
"total_num_groups",
"self",
".",
"num_chains",
"=",
"total_num_chains",
"self",
".",
"num_models",
"=",
"total_num_models",
"self",
".",
"structure_id",
"=",
"structure_id",
"# initialise the arrays",
"self",
".",
"x_coord_list",
"=",
"[",
"]",
"self",
".",
"y_coord_list",
"=",
"[",
"]",
"self",
".",
"z_coord_list",
"=",
"[",
"]",
"self",
".",
"group_type_list",
"=",
"[",
"]",
"self",
".",
"entity_list",
"=",
"[",
"]",
"self",
".",
"b_factor_list",
"=",
"[",
"]",
"self",
".",
"occupancy_list",
"=",
"[",
"]",
"self",
".",
"atom_id_list",
"=",
"[",
"]",
"self",
".",
"alt_loc_list",
"=",
"[",
"]",
"self",
".",
"ins_code_list",
"=",
"[",
"]",
"self",
".",
"group_id_list",
"=",
"[",
"]",
"self",
".",
"sequence_index_list",
"=",
"[",
"]",
"self",
".",
"group_list",
"=",
"[",
"]",
"self",
".",
"chain_name_list",
"=",
"[",
"]",
"self",
".",
"chain_id_list",
"=",
"[",
"]",
"self",
".",
"bond_atom_list",
"=",
"[",
"]",
"self",
".",
"bond_order_list",
"=",
"[",
"]",
"self",
".",
"sec_struct_list",
"=",
"[",
"]",
"self",
".",
"chains_per_model",
"=",
"[",
"]",
"self",
".",
"groups_per_chain",
"=",
"[",
"]",
"self",
".",
"current_group",
"=",
"None",
"self",
".",
"bio_assembly",
"=",
"[",
"]"
] | 40.619048 | 10.619048 |
def cget(self, key):
"""
Query widget option.
:param key: option name
:type key: str
:return: value of the option
To get the list of options for this widget, call the method :meth:`~Balloon.keys`.
"""
if key == "headertext":
return self.__headertext
elif key == "text":
return self.__text
elif key == "width":
return self.__width
elif key == "timeout":
return self._timeout
elif key == "background":
return self.__background
else:
return ttk.Frame.cget(self, key)
|
[
"def",
"cget",
"(",
"self",
",",
"key",
")",
":",
"if",
"key",
"==",
"\"headertext\"",
":",
"return",
"self",
".",
"__headertext",
"elif",
"key",
"==",
"\"text\"",
":",
"return",
"self",
".",
"__text",
"elif",
"key",
"==",
"\"width\"",
":",
"return",
"self",
".",
"__width",
"elif",
"key",
"==",
"\"timeout\"",
":",
"return",
"self",
".",
"_timeout",
"elif",
"key",
"==",
"\"background\"",
":",
"return",
"self",
".",
"__background",
"else",
":",
"return",
"ttk",
".",
"Frame",
".",
"cget",
"(",
"self",
",",
"key",
")"
] | 28.181818 | 14.090909 |
def ensure_berksfile_cookbooks_are_installed():
"""Run 'berks vendor' to berksfile cookbooks directory"""
msg = "Vendoring cookbooks from Berksfile {0} to directory {1}..."
print(msg.format(env.berksfile, env.berksfile_cookbooks_directory))
run_vendor = True
cookbooks_dir = env.berksfile_cookbooks_directory
berksfile_lock_path = cookbooks_dir+'/Berksfile.lock'
berksfile_lock_exists = os.path.isfile(berksfile_lock_path)
cookbooks_dir_exists = os.path.isdir(cookbooks_dir)
if cookbooks_dir_exists and berksfile_lock_exists:
berksfile_mtime = os.stat('Berksfile').st_mtime
cookbooks_mtime = os.stat(berksfile_lock_path).st_mtime
run_vendor = berksfile_mtime > cookbooks_mtime
if run_vendor:
if cookbooks_dir_exists:
shutil.rmtree(env.berksfile_cookbooks_directory)
p = subprocess.Popen(['berks', 'vendor', env.berksfile_cookbooks_directory],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if env.verbose or p.returncode:
print stdout, stderr
|
[
"def",
"ensure_berksfile_cookbooks_are_installed",
"(",
")",
":",
"msg",
"=",
"\"Vendoring cookbooks from Berksfile {0} to directory {1}...\"",
"print",
"(",
"msg",
".",
"format",
"(",
"env",
".",
"berksfile",
",",
"env",
".",
"berksfile_cookbooks_directory",
")",
")",
"run_vendor",
"=",
"True",
"cookbooks_dir",
"=",
"env",
".",
"berksfile_cookbooks_directory",
"berksfile_lock_path",
"=",
"cookbooks_dir",
"+",
"'/Berksfile.lock'",
"berksfile_lock_exists",
"=",
"os",
".",
"path",
".",
"isfile",
"(",
"berksfile_lock_path",
")",
"cookbooks_dir_exists",
"=",
"os",
".",
"path",
".",
"isdir",
"(",
"cookbooks_dir",
")",
"if",
"cookbooks_dir_exists",
"and",
"berksfile_lock_exists",
":",
"berksfile_mtime",
"=",
"os",
".",
"stat",
"(",
"'Berksfile'",
")",
".",
"st_mtime",
"cookbooks_mtime",
"=",
"os",
".",
"stat",
"(",
"berksfile_lock_path",
")",
".",
"st_mtime",
"run_vendor",
"=",
"berksfile_mtime",
">",
"cookbooks_mtime",
"if",
"run_vendor",
":",
"if",
"cookbooks_dir_exists",
":",
"shutil",
".",
"rmtree",
"(",
"env",
".",
"berksfile_cookbooks_directory",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'berks'",
",",
"'vendor'",
",",
"env",
".",
"berksfile_cookbooks_directory",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"stdout",
",",
"stderr",
"=",
"p",
".",
"communicate",
"(",
")",
"if",
"env",
".",
"verbose",
"or",
"p",
".",
"returncode",
":",
"print",
"stdout",
",",
"stderr"
] | 41.962963 | 20.296296 |
def _process_fields(self):
"""Default info massage to appropiate format/style.
This processing is called on preprocess and postprocess, AKA
before and after conversion of fields to appropiate
format/style.
Perfect example: custom fields on certain objects is a mess
(IMHO) when retrieved from Mambu, so some easiness is
implemented here to access them. See some of this objects
modules and pydocs for further info.
Tasks done here:
- Each custom field is given a 'name' key that holds the field
name, and for each keyed name, the value of the custom field is
assigned. Each pair of custom field name/value is entered as a
new property on the main dictionary, allowing an easy access to
them, not nested inside a pretty dark 'customInformation/Values'
list.
- Every item on the attrs dictionary gets stripped from trailing
spaces (useful when users make typos).
PLEASE REMEMBER! whenever you call postprocess on inherited
classes you should call this method too, or else you lose the
effect of the tasks done here.
"""
try:
try:
if self.has_key(self.customFieldName):
self[self.customFieldName] = [ c for c in self[self.customFieldName] if c['customField']['state']!="DEACTIVATED" ]
for custom in self[self.customFieldName]:
field_name = custom['customField']['name']
field_id = custom['customField']['id']
if custom['customFieldSetGroupIndex'] != -1:
field_name += '_'+str(custom['customFieldSetGroupIndex'])
field_id += '_'+str(custom['customFieldSetGroupIndex'])
custom['name'] = field_name
custom['id'] = field_id
try:
self[field_name] = custom['value']
self[field_id] = custom['value']
except KeyError:
self[field_name] = custom['linkedEntityKeyValue']
self[field_id] = custom['linkedEntityKeyValue']
custom['value'] = custom['linkedEntityKeyValue']
# in case you don't have any customFieldName, don't do anything here
except (AttributeError, TypeError):
pass
for k,v in self.items():
try:
self[k] = v.strip()
except Exception:
pass
except NotImplementedError:
pass
|
[
"def",
"_process_fields",
"(",
"self",
")",
":",
"try",
":",
"try",
":",
"if",
"self",
".",
"has_key",
"(",
"self",
".",
"customFieldName",
")",
":",
"self",
"[",
"self",
".",
"customFieldName",
"]",
"=",
"[",
"c",
"for",
"c",
"in",
"self",
"[",
"self",
".",
"customFieldName",
"]",
"if",
"c",
"[",
"'customField'",
"]",
"[",
"'state'",
"]",
"!=",
"\"DEACTIVATED\"",
"]",
"for",
"custom",
"in",
"self",
"[",
"self",
".",
"customFieldName",
"]",
":",
"field_name",
"=",
"custom",
"[",
"'customField'",
"]",
"[",
"'name'",
"]",
"field_id",
"=",
"custom",
"[",
"'customField'",
"]",
"[",
"'id'",
"]",
"if",
"custom",
"[",
"'customFieldSetGroupIndex'",
"]",
"!=",
"-",
"1",
":",
"field_name",
"+=",
"'_'",
"+",
"str",
"(",
"custom",
"[",
"'customFieldSetGroupIndex'",
"]",
")",
"field_id",
"+=",
"'_'",
"+",
"str",
"(",
"custom",
"[",
"'customFieldSetGroupIndex'",
"]",
")",
"custom",
"[",
"'name'",
"]",
"=",
"field_name",
"custom",
"[",
"'id'",
"]",
"=",
"field_id",
"try",
":",
"self",
"[",
"field_name",
"]",
"=",
"custom",
"[",
"'value'",
"]",
"self",
"[",
"field_id",
"]",
"=",
"custom",
"[",
"'value'",
"]",
"except",
"KeyError",
":",
"self",
"[",
"field_name",
"]",
"=",
"custom",
"[",
"'linkedEntityKeyValue'",
"]",
"self",
"[",
"field_id",
"]",
"=",
"custom",
"[",
"'linkedEntityKeyValue'",
"]",
"custom",
"[",
"'value'",
"]",
"=",
"custom",
"[",
"'linkedEntityKeyValue'",
"]",
"# in case you don't have any customFieldName, don't do anything here",
"except",
"(",
"AttributeError",
",",
"TypeError",
")",
":",
"pass",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
":",
"try",
":",
"self",
"[",
"k",
"]",
"=",
"v",
".",
"strip",
"(",
")",
"except",
"Exception",
":",
"pass",
"except",
"NotImplementedError",
":",
"pass"
] | 46.275862 | 24.275862 |
def _make_request_data(self, teststep_dict, entry_json):
""" parse HAR entry request data, and make teststep request data
Args:
entry_json (dict):
{
"request": {
"method": "POST",
"postData": {
"mimeType": "application/x-www-form-urlencoded; charset=utf-8",
"params": [
{"name": "a", "value": 1},
{"name": "b", "value": "2"}
}
},
},
"response": {...}
}
Returns:
{
"request": {
"method": "POST",
"data": {"v": "1", "w": "2"}
}
}
"""
method = entry_json["request"].get("method")
if method in ["POST", "PUT", "PATCH"]:
postData = entry_json["request"].get("postData", {})
mimeType = postData.get("mimeType")
# Note that text and params fields are mutually exclusive.
if "text" in postData:
post_data = postData.get("text")
else:
params = postData.get("params", [])
post_data = utils.convert_list_to_dict(params)
request_data_key = "data"
if not mimeType:
pass
elif mimeType.startswith("application/json"):
try:
post_data = json.loads(post_data)
request_data_key = "json"
except JSONDecodeError:
pass
elif mimeType.startswith("application/x-www-form-urlencoded"):
post_data = utils.convert_x_www_form_urlencoded_to_dict(post_data)
else:
# TODO: make compatible with more mimeType
pass
teststep_dict["request"][request_data_key] = post_data
|
[
"def",
"_make_request_data",
"(",
"self",
",",
"teststep_dict",
",",
"entry_json",
")",
":",
"method",
"=",
"entry_json",
"[",
"\"request\"",
"]",
".",
"get",
"(",
"\"method\"",
")",
"if",
"method",
"in",
"[",
"\"POST\"",
",",
"\"PUT\"",
",",
"\"PATCH\"",
"]",
":",
"postData",
"=",
"entry_json",
"[",
"\"request\"",
"]",
".",
"get",
"(",
"\"postData\"",
",",
"{",
"}",
")",
"mimeType",
"=",
"postData",
".",
"get",
"(",
"\"mimeType\"",
")",
"# Note that text and params fields are mutually exclusive.",
"if",
"\"text\"",
"in",
"postData",
":",
"post_data",
"=",
"postData",
".",
"get",
"(",
"\"text\"",
")",
"else",
":",
"params",
"=",
"postData",
".",
"get",
"(",
"\"params\"",
",",
"[",
"]",
")",
"post_data",
"=",
"utils",
".",
"convert_list_to_dict",
"(",
"params",
")",
"request_data_key",
"=",
"\"data\"",
"if",
"not",
"mimeType",
":",
"pass",
"elif",
"mimeType",
".",
"startswith",
"(",
"\"application/json\"",
")",
":",
"try",
":",
"post_data",
"=",
"json",
".",
"loads",
"(",
"post_data",
")",
"request_data_key",
"=",
"\"json\"",
"except",
"JSONDecodeError",
":",
"pass",
"elif",
"mimeType",
".",
"startswith",
"(",
"\"application/x-www-form-urlencoded\"",
")",
":",
"post_data",
"=",
"utils",
".",
"convert_x_www_form_urlencoded_to_dict",
"(",
"post_data",
")",
"else",
":",
"# TODO: make compatible with more mimeType",
"pass",
"teststep_dict",
"[",
"\"request\"",
"]",
"[",
"request_data_key",
"]",
"=",
"post_data"
] | 34.912281 | 18.754386 |
def azimintpix(data, dataerr, bcx, bcy, mask=None, Ntheta=100, pixmin=0,
pixmax=np.inf, returnmask=False, errorpropagation=2):
"""Azimuthal integration (averaging) on the detector plane
Inputs:
data: scattering pattern matrix (np.ndarray, dtype: np.double)
dataerr: error matrix (np.ndarray, dtype: np.double; or None)
bcx, bcy: beam position, counting from 1
mask: mask matrix (np.ndarray, dtype: np.uint8)
Ntheta: Number of points in the abscissa (azimuth angle)
pixmin: smallest distance from the origin in pixels
pixmax: largest distance from the origin in pixels
returnmask: if the effective mask matrix is to be returned
Outputs: theta, Intensity, [Error], Area, [mask]
Error is only returned if dataerr is not None
mask is only returned if returnmask is True
Relies heavily (completely) on azimint().
"""
if isinstance(data, np.ndarray):
data = data.astype(np.double)
if isinstance(dataerr, np.ndarray):
dataerr = dataerr.astype(np.double)
if isinstance(mask, np.ndarray):
mask = mask.astype(np.uint8)
return azimint(data, dataerr, -1, -1,
- 1, bcx, bcy, mask, Ntheta, pixmin,
pixmax, returnmask, errorpropagation)
|
[
"def",
"azimintpix",
"(",
"data",
",",
"dataerr",
",",
"bcx",
",",
"bcy",
",",
"mask",
"=",
"None",
",",
"Ntheta",
"=",
"100",
",",
"pixmin",
"=",
"0",
",",
"pixmax",
"=",
"np",
".",
"inf",
",",
"returnmask",
"=",
"False",
",",
"errorpropagation",
"=",
"2",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
":",
"data",
"=",
"data",
".",
"astype",
"(",
"np",
".",
"double",
")",
"if",
"isinstance",
"(",
"dataerr",
",",
"np",
".",
"ndarray",
")",
":",
"dataerr",
"=",
"dataerr",
".",
"astype",
"(",
"np",
".",
"double",
")",
"if",
"isinstance",
"(",
"mask",
",",
"np",
".",
"ndarray",
")",
":",
"mask",
"=",
"mask",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"return",
"azimint",
"(",
"data",
",",
"dataerr",
",",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"bcx",
",",
"bcy",
",",
"mask",
",",
"Ntheta",
",",
"pixmin",
",",
"pixmax",
",",
"returnmask",
",",
"errorpropagation",
")"
] | 44.448276 | 16.206897 |
def fix_flags(self, flags):
"""Fixes standard TensorBoard CLI flags to parser."""
FlagsError = base_plugin.FlagsError
if flags.version_tb:
pass
elif flags.inspect:
if flags.logdir and flags.event_file:
raise FlagsError(
'Must specify either --logdir or --event_file, but not both.')
if not (flags.logdir or flags.event_file):
raise FlagsError('Must specify either --logdir or --event_file.')
elif not flags.db and not flags.logdir:
raise FlagsError('A logdir or db must be specified. '
'For example `tensorboard --logdir mylogdir` '
'or `tensorboard --db sqlite:~/.tensorboard.db`. '
'Run `tensorboard --helpfull` for details and examples.')
if flags.path_prefix.endswith('/'):
flags.path_prefix = flags.path_prefix[:-1]
|
[
"def",
"fix_flags",
"(",
"self",
",",
"flags",
")",
":",
"FlagsError",
"=",
"base_plugin",
".",
"FlagsError",
"if",
"flags",
".",
"version_tb",
":",
"pass",
"elif",
"flags",
".",
"inspect",
":",
"if",
"flags",
".",
"logdir",
"and",
"flags",
".",
"event_file",
":",
"raise",
"FlagsError",
"(",
"'Must specify either --logdir or --event_file, but not both.'",
")",
"if",
"not",
"(",
"flags",
".",
"logdir",
"or",
"flags",
".",
"event_file",
")",
":",
"raise",
"FlagsError",
"(",
"'Must specify either --logdir or --event_file.'",
")",
"elif",
"not",
"flags",
".",
"db",
"and",
"not",
"flags",
".",
"logdir",
":",
"raise",
"FlagsError",
"(",
"'A logdir or db must be specified. '",
"'For example `tensorboard --logdir mylogdir` '",
"'or `tensorboard --db sqlite:~/.tensorboard.db`. '",
"'Run `tensorboard --helpfull` for details and examples.'",
")",
"if",
"flags",
".",
"path_prefix",
".",
"endswith",
"(",
"'/'",
")",
":",
"flags",
".",
"path_prefix",
"=",
"flags",
".",
"path_prefix",
"[",
":",
"-",
"1",
"]"
] | 44.947368 | 18.052632 |
def set_inasafe_default_value_qsetting(
qsetting, category, inasafe_field_key, value):
"""Helper method to set inasafe default value to qsetting.
:param qsetting: QSettings.
:type qsetting: QSettings
:param category: Category of the default value. It can be global or
recent. Global means the global setting for default value. Recent
means the last set custom for default value from the user.
:type category: str
:param inasafe_field_key: Key for the field.
:type inasafe_field_key: str
:param value: Value of the inasafe_default_value.
:type value: float, int
"""
key = 'inasafe/default_value/%s/%s' % (category, inasafe_field_key)
qsetting.setValue(key, value)
|
[
"def",
"set_inasafe_default_value_qsetting",
"(",
"qsetting",
",",
"category",
",",
"inasafe_field_key",
",",
"value",
")",
":",
"key",
"=",
"'inasafe/default_value/%s/%s'",
"%",
"(",
"category",
",",
"inasafe_field_key",
")",
"qsetting",
".",
"setValue",
"(",
"key",
",",
"value",
")"
] | 35.95 | 19.1 |
def listDatasetArray(self, **kwargs):
"""
API to list datasets in DBS.
:param dataset: list of datasets [dataset1,dataset2,..,dataset n] (Required if dataset_id is not presented), Max length 1000.
:type dataset: list
:param dataset_id: list of dataset_ids that are the primary keys of datasets table: [dataset_id1,dataset_id2,..,dataset_idn] (Required if dataset is not presented), Max length 1000.
:type dataset: list
:param dataset_access_type: List only datasets with that dataset access type (Optional)
:type dataset_access_type: str
:param detail: brief list or detailed list 1/0
:type detail: bool
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contains the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
"""
validParameters = ['dataset', 'dataset_access_type', 'detail', 'dataset_id']
requiredParameters = {'multiple': ['dataset', 'dataset_id']}
checkInputParameter(method="listDatasetArray", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
#set defaults
if 'detail' not in kwargs.keys():
kwargs['detail'] = False
return self.__callServer("datasetlist", data=kwargs, callmethod='POST')
|
[
"def",
"listDatasetArray",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"validParameters",
"=",
"[",
"'dataset'",
",",
"'dataset_access_type'",
",",
"'detail'",
",",
"'dataset_id'",
"]",
"requiredParameters",
"=",
"{",
"'multiple'",
":",
"[",
"'dataset'",
",",
"'dataset_id'",
"]",
"}",
"checkInputParameter",
"(",
"method",
"=",
"\"listDatasetArray\"",
",",
"parameters",
"=",
"kwargs",
".",
"keys",
"(",
")",
",",
"validParameters",
"=",
"validParameters",
",",
"requiredParameters",
"=",
"requiredParameters",
")",
"#set defaults",
"if",
"'detail'",
"not",
"in",
"kwargs",
".",
"keys",
"(",
")",
":",
"kwargs",
"[",
"'detail'",
"]",
"=",
"False",
"return",
"self",
".",
"__callServer",
"(",
"\"datasetlist\"",
",",
"data",
"=",
"kwargs",
",",
"callmethod",
"=",
"'POST'",
")"
] | 60.481481 | 43.592593 |
def disable_ap_port(self, apid, port):
"""临时关闭接入点端口
临时关闭接入点端口,仅对公网域名,公网ip有效。
Args:
- apid: 接入点ID
- port: 要设置的端口号
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/aps/{1}/{2}/disable'.format(self.host, apid, port)
return self.__post(url)
|
[
"def",
"disable_ap_port",
"(",
"self",
",",
"apid",
",",
"port",
")",
":",
"url",
"=",
"'{0}/v3/aps/{1}/{2}/disable'",
".",
"format",
"(",
"self",
".",
"host",
",",
"apid",
",",
"port",
")",
"return",
"self",
".",
"__post",
"(",
"url",
")"
] | 28.5 | 18.8125 |
def _periodicfeatures_worker(task):
'''
This is a parallel worker for the drivers below.
'''
pfpickle, lcbasedir, outdir, starfeatures, kwargs = task
try:
return get_periodicfeatures(pfpickle,
lcbasedir,
outdir,
starfeatures=starfeatures,
**kwargs)
except Exception as e:
LOGEXCEPTION('failed to get periodicfeatures for %s' % pfpickle)
|
[
"def",
"_periodicfeatures_worker",
"(",
"task",
")",
":",
"pfpickle",
",",
"lcbasedir",
",",
"outdir",
",",
"starfeatures",
",",
"kwargs",
"=",
"task",
"try",
":",
"return",
"get_periodicfeatures",
"(",
"pfpickle",
",",
"lcbasedir",
",",
"outdir",
",",
"starfeatures",
"=",
"starfeatures",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'failed to get periodicfeatures for %s'",
"%",
"pfpickle",
")"
] | 26.736842 | 24.315789 |
def add_directory(self, relativePath, info=None):
"""
Adds a directory in the repository and creates its
attribute in the Repository with utc timestamp.
It insures adding all the missing directories in the path.
:Parameters:
#. relativePath (string): The relative to the repository path of the directory to add in the repository.
#. info (None, string, pickable object): Any random info about the folder.
:Returns:
#. info (dict): The directory info dict.
"""
path = os.path.normpath(relativePath)
# create directories
currentDir = self.path
currentDict = self
if path in ("","."):
return currentDict
save = False
for dir in path.split(os.sep):
dirPath = os.path.join(currentDir, dir)
# create directory
if not os.path.exists(dirPath):
os.mkdir(dirPath)
# create dictionary key
currentDict = dict.__getitem__(currentDict, "directories")
if currentDict.get(dir, None) is None:
save = True
currentDict[dir] = {"directories":{}, "files":{},
"timestamp":datetime.utcnow(),
"id":str(uuid.uuid1()),
"info": info} # INFO MUST BE SET ONLY FOR THE LAST DIRECTORY
currentDict = currentDict[dir]
currentDir = dirPath
# save repository
if save:
self.save()
# return currentDict
return currentDict
|
[
"def",
"add_directory",
"(",
"self",
",",
"relativePath",
",",
"info",
"=",
"None",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"relativePath",
")",
"# create directories",
"currentDir",
"=",
"self",
".",
"path",
"currentDict",
"=",
"self",
"if",
"path",
"in",
"(",
"\"\"",
",",
"\".\"",
")",
":",
"return",
"currentDict",
"save",
"=",
"False",
"for",
"dir",
"in",
"path",
".",
"split",
"(",
"os",
".",
"sep",
")",
":",
"dirPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"currentDir",
",",
"dir",
")",
"# create directory",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dirPath",
")",
":",
"os",
".",
"mkdir",
"(",
"dirPath",
")",
"# create dictionary key",
"currentDict",
"=",
"dict",
".",
"__getitem__",
"(",
"currentDict",
",",
"\"directories\"",
")",
"if",
"currentDict",
".",
"get",
"(",
"dir",
",",
"None",
")",
"is",
"None",
":",
"save",
"=",
"True",
"currentDict",
"[",
"dir",
"]",
"=",
"{",
"\"directories\"",
":",
"{",
"}",
",",
"\"files\"",
":",
"{",
"}",
",",
"\"timestamp\"",
":",
"datetime",
".",
"utcnow",
"(",
")",
",",
"\"id\"",
":",
"str",
"(",
"uuid",
".",
"uuid1",
"(",
")",
")",
",",
"\"info\"",
":",
"info",
"}",
"# INFO MUST BE SET ONLY FOR THE LAST DIRECTORY",
"currentDict",
"=",
"currentDict",
"[",
"dir",
"]",
"currentDir",
"=",
"dirPath",
"# save repository",
"if",
"save",
":",
"self",
".",
"save",
"(",
")",
"# return currentDict",
"return",
"currentDict"
] | 39.170732 | 18.390244 |
def get_instance(self, payload):
"""
Build an instance of ReservationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.worker.reservation.ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.reservation.ReservationInstance
"""
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
worker_sid=self._solution['worker_sid'],
)
|
[
"def",
"get_instance",
"(",
"self",
",",
"payload",
")",
":",
"return",
"ReservationInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"workspace_sid",
"=",
"self",
".",
"_solution",
"[",
"'workspace_sid'",
"]",
",",
"worker_sid",
"=",
"self",
".",
"_solution",
"[",
"'worker_sid'",
"]",
",",
")"
] | 36.133333 | 21.066667 |
def scheduled_times(self, earliest_time='now', latest_time='+1h'):
"""Returns the times when this search is scheduled to run.
By default this method returns the times in the next hour. For different
time ranges, set *earliest_time* and *latest_time*. For example,
for all times in the last day use "earliest_time=-1d" and
"latest_time=now".
:param earliest_time: The earliest time.
:type earliest_time: ``string``
:param latest_time: The latest time.
:type latest_time: ``string``
:return: The list of search times.
"""
response = self.get("scheduled_times",
earliest_time=earliest_time,
latest_time=latest_time)
data = self._load_atom_entry(response)
rec = _parse_atom_entry(data)
times = [datetime.fromtimestamp(int(t))
for t in rec.content.scheduled_times]
return times
|
[
"def",
"scheduled_times",
"(",
"self",
",",
"earliest_time",
"=",
"'now'",
",",
"latest_time",
"=",
"'+1h'",
")",
":",
"response",
"=",
"self",
".",
"get",
"(",
"\"scheduled_times\"",
",",
"earliest_time",
"=",
"earliest_time",
",",
"latest_time",
"=",
"latest_time",
")",
"data",
"=",
"self",
".",
"_load_atom_entry",
"(",
"response",
")",
"rec",
"=",
"_parse_atom_entry",
"(",
"data",
")",
"times",
"=",
"[",
"datetime",
".",
"fromtimestamp",
"(",
"int",
"(",
"t",
")",
")",
"for",
"t",
"in",
"rec",
".",
"content",
".",
"scheduled_times",
"]",
"return",
"times"
] | 41.478261 | 15.608696 |
def pop(self,
num_items: int,
type_hint: str) -> Union[int, bytes, Tuple[Union[int, bytes], ...]]:
"""
Pop an item off the stack.
Note: This function is optimized for speed over readability.
"""
try:
if num_items == 1:
return next(self._pop(num_items, type_hint))
else:
return tuple(self._pop(num_items, type_hint))
except IndexError:
raise InsufficientStack("No stack items")
|
[
"def",
"pop",
"(",
"self",
",",
"num_items",
":",
"int",
",",
"type_hint",
":",
"str",
")",
"->",
"Union",
"[",
"int",
",",
"bytes",
",",
"Tuple",
"[",
"Union",
"[",
"int",
",",
"bytes",
"]",
",",
"...",
"]",
"]",
":",
"try",
":",
"if",
"num_items",
"==",
"1",
":",
"return",
"next",
"(",
"self",
".",
"_pop",
"(",
"num_items",
",",
"type_hint",
")",
")",
"else",
":",
"return",
"tuple",
"(",
"self",
".",
"_pop",
"(",
"num_items",
",",
"type_hint",
")",
")",
"except",
"IndexError",
":",
"raise",
"InsufficientStack",
"(",
"\"No stack items\"",
")"
] | 33.533333 | 18.866667 |
def est_kl_divergence(self, other, kernel=None, delta=1e-2):
"""
Finds the KL divergence between this and another particle
distribution by using a kernel density estimator to smooth over the
other distribution's particles.
:param SMCUpdater other:
"""
return self._kl_divergence(
other.particle_locations,
other.particle_weights,
kernel, delta
)
|
[
"def",
"est_kl_divergence",
"(",
"self",
",",
"other",
",",
"kernel",
"=",
"None",
",",
"delta",
"=",
"1e-2",
")",
":",
"return",
"self",
".",
"_kl_divergence",
"(",
"other",
".",
"particle_locations",
",",
"other",
".",
"particle_weights",
",",
"kernel",
",",
"delta",
")"
] | 33.384615 | 14.461538 |
def insert_query_m(data, table, conn, columns=None, db_type='mysql'):
""" Insert python list of tuples into SQL table
Args:
data (list): List of tuples
table (str): Name of database table
conn (connection object): database connection object
columns (str): String of column names to use if not assigned then all columns are presumed to be used [Optional]
db_type (str): If "sqlite" or "mysql"
"""
# if length of data is very large we need to break into chunks the insert_query_m is then used recursively untill
# all data has been inserted
if len(data) > 10000:
_chunk_query(data, 10000, columns, conn, table, db_type)
else:
# sqlite and mysql have type string (? or %s) reference to use
if db_type == 'sqlite':
type_sign = '?'
else:
type_sign = '%s'
# create a string of types for the insertion string (e.g. ?,?,? if inserting 3 columns of data)
type_com = type_sign + ", "
type = type_com * (len(data[0]) - 1)
type = type + type_sign
# if using specific columns to insert data
if columns:
stmt = "INSERT INTO " + table + "( " + columns + ") VALUES (" + type + ")"
else:
stmt = "INSERT INTO " + table + " VALUES (" + type + ")"
# execute query
cursor = conn.cursor()
cursor.executemany(stmt, data)
conn.commit()
|
[
"def",
"insert_query_m",
"(",
"data",
",",
"table",
",",
"conn",
",",
"columns",
"=",
"None",
",",
"db_type",
"=",
"'mysql'",
")",
":",
"# if length of data is very large we need to break into chunks the insert_query_m is then used recursively untill",
"# all data has been inserted",
"if",
"len",
"(",
"data",
")",
">",
"10000",
":",
"_chunk_query",
"(",
"data",
",",
"10000",
",",
"columns",
",",
"conn",
",",
"table",
",",
"db_type",
")",
"else",
":",
"# sqlite and mysql have type string (? or %s) reference to use",
"if",
"db_type",
"==",
"'sqlite'",
":",
"type_sign",
"=",
"'?'",
"else",
":",
"type_sign",
"=",
"'%s'",
"# create a string of types for the insertion string (e.g. ?,?,? if inserting 3 columns of data)",
"type_com",
"=",
"type_sign",
"+",
"\", \"",
"type",
"=",
"type_com",
"*",
"(",
"len",
"(",
"data",
"[",
"0",
"]",
")",
"-",
"1",
")",
"type",
"=",
"type",
"+",
"type_sign",
"# if using specific columns to insert data",
"if",
"columns",
":",
"stmt",
"=",
"\"INSERT INTO \"",
"+",
"table",
"+",
"\"( \"",
"+",
"columns",
"+",
"\") VALUES (\"",
"+",
"type",
"+",
"\")\"",
"else",
":",
"stmt",
"=",
"\"INSERT INTO \"",
"+",
"table",
"+",
"\" VALUES (\"",
"+",
"type",
"+",
"\")\"",
"# execute query",
"cursor",
"=",
"conn",
".",
"cursor",
"(",
")",
"cursor",
".",
"executemany",
"(",
"stmt",
",",
"data",
")",
"conn",
".",
"commit",
"(",
")"
] | 39.333333 | 23.333333 |
def _process_status(self, status):
""" Process latest status update. """
self._screen_id = status.get(ATTR_SCREEN_ID)
self.status_update_event.set()
|
[
"def",
"_process_status",
"(",
"self",
",",
"status",
")",
":",
"self",
".",
"_screen_id",
"=",
"status",
".",
"get",
"(",
"ATTR_SCREEN_ID",
")",
"self",
".",
"status_update_event",
".",
"set",
"(",
")"
] | 42.25 | 5 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.