text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def spectral_density(self, two_pi=True, res=1200):
r"""
Compute the spectral density function. The spectral density is
the discrete time Fourier transform of the autocovariance
function. In particular,
.. math::
f(w) = \sum_k \gamma(k) \exp(-ikw)
where gamma is the autocovariance function and the sum is over
the set of all integers.
Parameters
----------
two_pi : Boolean, optional
Compute the spectral density function over :math:`[0, \pi]` if
two_pi is False and :math:`[0, 2 \pi]` otherwise. Default value is
True
res : scalar or array_like(int), optional(default=1200)
If res is a scalar then the spectral density is computed at
`res` frequencies evenly spaced around the unit circle, but
if res is an array then the function computes the response
at the frequencies given by the array
Returns
-------
w : array_like(float)
The normalized frequencies at which h was computed, in
radians/sample
spect : array_like(float)
The frequency response
"""
from scipy.signal import freqz
w, h = freqz(self.ma_poly, self.ar_poly, worN=res, whole=two_pi)
spect = h * conj(h) * self.sigma**2
return w, spect
|
[
"def",
"spectral_density",
"(",
"self",
",",
"two_pi",
"=",
"True",
",",
"res",
"=",
"1200",
")",
":",
"from",
"scipy",
".",
"signal",
"import",
"freqz",
"w",
",",
"h",
"=",
"freqz",
"(",
"self",
".",
"ma_poly",
",",
"self",
".",
"ar_poly",
",",
"worN",
"=",
"res",
",",
"whole",
"=",
"two_pi",
")",
"spect",
"=",
"h",
"*",
"conj",
"(",
"h",
")",
"*",
"self",
".",
"sigma",
"**",
"2",
"return",
"w",
",",
"spect"
] | 34.974359 | 22.025641 |
def Spring(
startPoint=(0, 0, 0),
endPoint=(1, 0, 0),
coils=20,
r=0.1,
r2=None,
thickness=None,
c="grey",
alpha=1,
):
"""
Build a spring of specified nr of `coils` between `startPoint` and `endPoint`.
:param int coils: number of coils
:param float r: radius at start point
:param float r2: radius at end point
:param float thickness: thickness of the coil section
.. hint:: |aspring| |aspring.py|_
"""
diff = endPoint - np.array(startPoint)
length = np.linalg.norm(diff)
if not length:
return None
if not r:
r = length / 20
trange = np.linspace(0, length, num=50 * coils)
om = 6.283 * (coils - 0.5) / length
if not r2:
r2 = r
pts = []
for t in trange:
f = (length - t) / length
rd = r * f + r2 * (1 - f)
pts.append([rd * np.cos(om * t), rd * np.sin(om * t), t])
pts = [[0, 0, 0]] + pts + [[0, 0, length]]
diff = diff / length
theta = np.arccos(diff[2])
phi = np.arctan2(diff[1], diff[0])
sp = Line(pts).polydata(False)
t = vtk.vtkTransform()
t.RotateZ(phi * 57.3)
t.RotateY(theta * 57.3)
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(sp)
tf.SetTransform(t)
tf.Update()
tuf = vtk.vtkTubeFilter()
tuf.SetNumberOfSides(12)
tuf.CappingOn()
tuf.SetInputData(tf.GetOutput())
if not thickness:
thickness = r / 10
tuf.SetRadius(thickness)
tuf.Update()
poly = tuf.GetOutput()
actor = Actor(poly, c, alpha)
actor.GetProperty().SetInterpolationToPhong()
actor.SetPosition(startPoint)
actor.base = np.array(startPoint)
actor.top = np.array(endPoint)
settings.collectable_actors.append(actor)
return actor
|
[
"def",
"Spring",
"(",
"startPoint",
"=",
"(",
"0",
",",
"0",
",",
"0",
")",
",",
"endPoint",
"=",
"(",
"1",
",",
"0",
",",
"0",
")",
",",
"coils",
"=",
"20",
",",
"r",
"=",
"0.1",
",",
"r2",
"=",
"None",
",",
"thickness",
"=",
"None",
",",
"c",
"=",
"\"grey\"",
",",
"alpha",
"=",
"1",
",",
")",
":",
"diff",
"=",
"endPoint",
"-",
"np",
".",
"array",
"(",
"startPoint",
")",
"length",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"diff",
")",
"if",
"not",
"length",
":",
"return",
"None",
"if",
"not",
"r",
":",
"r",
"=",
"length",
"/",
"20",
"trange",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"length",
",",
"num",
"=",
"50",
"*",
"coils",
")",
"om",
"=",
"6.283",
"*",
"(",
"coils",
"-",
"0.5",
")",
"/",
"length",
"if",
"not",
"r2",
":",
"r2",
"=",
"r",
"pts",
"=",
"[",
"]",
"for",
"t",
"in",
"trange",
":",
"f",
"=",
"(",
"length",
"-",
"t",
")",
"/",
"length",
"rd",
"=",
"r",
"*",
"f",
"+",
"r2",
"*",
"(",
"1",
"-",
"f",
")",
"pts",
".",
"append",
"(",
"[",
"rd",
"*",
"np",
".",
"cos",
"(",
"om",
"*",
"t",
")",
",",
"rd",
"*",
"np",
".",
"sin",
"(",
"om",
"*",
"t",
")",
",",
"t",
"]",
")",
"pts",
"=",
"[",
"[",
"0",
",",
"0",
",",
"0",
"]",
"]",
"+",
"pts",
"+",
"[",
"[",
"0",
",",
"0",
",",
"length",
"]",
"]",
"diff",
"=",
"diff",
"/",
"length",
"theta",
"=",
"np",
".",
"arccos",
"(",
"diff",
"[",
"2",
"]",
")",
"phi",
"=",
"np",
".",
"arctan2",
"(",
"diff",
"[",
"1",
"]",
",",
"diff",
"[",
"0",
"]",
")",
"sp",
"=",
"Line",
"(",
"pts",
")",
".",
"polydata",
"(",
"False",
")",
"t",
"=",
"vtk",
".",
"vtkTransform",
"(",
")",
"t",
".",
"RotateZ",
"(",
"phi",
"*",
"57.3",
")",
"t",
".",
"RotateY",
"(",
"theta",
"*",
"57.3",
")",
"tf",
"=",
"vtk",
".",
"vtkTransformPolyDataFilter",
"(",
")",
"tf",
".",
"SetInputData",
"(",
"sp",
")",
"tf",
".",
"SetTransform",
"(",
"t",
")",
"tf",
".",
"Update",
"(",
")",
"tuf",
"=",
"vtk",
".",
"vtkTubeFilter",
"(",
")",
"tuf",
".",
"SetNumberOfSides",
"(",
"12",
")",
"tuf",
".",
"CappingOn",
"(",
")",
"tuf",
".",
"SetInputData",
"(",
"tf",
".",
"GetOutput",
"(",
")",
")",
"if",
"not",
"thickness",
":",
"thickness",
"=",
"r",
"/",
"10",
"tuf",
".",
"SetRadius",
"(",
"thickness",
")",
"tuf",
".",
"Update",
"(",
")",
"poly",
"=",
"tuf",
".",
"GetOutput",
"(",
")",
"actor",
"=",
"Actor",
"(",
"poly",
",",
"c",
",",
"alpha",
")",
"actor",
".",
"GetProperty",
"(",
")",
".",
"SetInterpolationToPhong",
"(",
")",
"actor",
".",
"SetPosition",
"(",
"startPoint",
")",
"actor",
".",
"base",
"=",
"np",
".",
"array",
"(",
"startPoint",
")",
"actor",
".",
"top",
"=",
"np",
".",
"array",
"(",
"endPoint",
")",
"settings",
".",
"collectable_actors",
".",
"append",
"(",
"actor",
")",
"return",
"actor"
] | 26.625 | 16.0625 |
def parse_named_unicode(self, i):
"""Parse named Unicode."""
value = ord(_unicodedata.lookup(self.get_named_unicode(i)))
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(chr(value), self.span_stack[-1])
value = ord(self.convert_case(text, single)) if single is not None else ord(text)
elif single:
value = ord(self.convert_case(chr(value), single))
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(chr(value), i)
elif value <= 0xFF:
self.result.append('\\%03o' % value)
else:
self.result.append(chr(value))
|
[
"def",
"parse_named_unicode",
"(",
"self",
",",
"i",
")",
":",
"value",
"=",
"ord",
"(",
"_unicodedata",
".",
"lookup",
"(",
"self",
".",
"get_named_unicode",
"(",
"i",
")",
")",
")",
"single",
"=",
"self",
".",
"get_single_stack",
"(",
")",
"if",
"self",
".",
"span_stack",
":",
"text",
"=",
"self",
".",
"convert_case",
"(",
"chr",
"(",
"value",
")",
",",
"self",
".",
"span_stack",
"[",
"-",
"1",
"]",
")",
"value",
"=",
"ord",
"(",
"self",
".",
"convert_case",
"(",
"text",
",",
"single",
")",
")",
"if",
"single",
"is",
"not",
"None",
"else",
"ord",
"(",
"text",
")",
"elif",
"single",
":",
"value",
"=",
"ord",
"(",
"self",
".",
"convert_case",
"(",
"chr",
"(",
"value",
")",
",",
"single",
")",
")",
"if",
"self",
".",
"use_format",
"and",
"value",
"in",
"_CURLY_BRACKETS_ORD",
":",
"self",
".",
"handle_format",
"(",
"chr",
"(",
"value",
")",
",",
"i",
")",
"elif",
"value",
"<=",
"0xFF",
":",
"self",
".",
"result",
".",
"append",
"(",
"'\\\\%03o'",
"%",
"value",
")",
"else",
":",
"self",
".",
"result",
".",
"append",
"(",
"chr",
"(",
"value",
")",
")"
] | 42.5 | 17.875 |
def get_hosting_devices_for_agent(self, context):
"""Get a list of hosting devices assigned to this agent."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_hosting_devices_for_agent',
host=self.host)
|
[
"def",
"get_hosting_devices_for_agent",
"(",
"self",
",",
"context",
")",
":",
"cctxt",
"=",
"self",
".",
"client",
".",
"prepare",
"(",
")",
"return",
"cctxt",
".",
"call",
"(",
"context",
",",
"'get_hosting_devices_for_agent'",
",",
"host",
"=",
"self",
".",
"host",
")"
] | 47.666667 | 6.166667 |
def update_function_config(FunctionName, Role=None, Handler=None,
Description=None, Timeout=None, MemorySize=None,
region=None, key=None, keyid=None, profile=None,
VpcConfig=None, WaitForRole=False, RoleRetries=5,
Environment=None):
'''
.. versionadded:: 2017.7.0
Update the named lambda function to the configuration.
Environment
The parent object that contains your environment's configuration
settings. This is a dictionary of the form:
.. code-block:: python
{
'Variables': {
'VariableName': 'VariableValue'
}
}
Returns ``{'updated': True}`` if the function was updated, and
``{'updated': False}`` if the function was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.update_function_config my_function my_role my_file.my_function "my lambda function"
'''
args = dict(FunctionName=FunctionName)
options = {'Handler': Handler,
'Description': Description,
'Timeout': Timeout,
'MemorySize': MemorySize,
'VpcConfig': VpcConfig,
'Environment': Environment}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
for val, var in six.iteritems(options):
if var:
args[val] = var
if Role:
args['Role'] = _get_role_arn(Role, region, key, keyid, profile)
if VpcConfig:
args['VpcConfig'] = _resolve_vpcconfig(VpcConfig, region=region, key=key, keyid=keyid, profile=profile)
try:
if WaitForRole:
retrycount = RoleRetries
else:
retrycount = 1
for retry in range(retrycount, 0, -1):
try:
r = conn.update_function_configuration(**args)
except ClientError as e:
if retry > 1 and e.response.get('Error', {}).get('Code') == 'InvalidParameterValueException':
log.info(
'Function not updated but IAM role may not have propagated, will retry')
# exponential backoff
time.sleep((2 ** (RoleRetries - retry)) +
(random.randint(0, 1000) / 1000))
continue
else:
raise
else:
break
if r:
keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256',
'CodeSize', 'Description', 'Timeout', 'MemorySize',
'FunctionArn', 'LastModified', 'VpcConfig', 'Environment')
return {'updated': True, 'function': dict([(k, r.get(k)) for k in keys])}
else:
log.warning('Function was not updated')
return {'updated': False}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
|
[
"def",
"update_function_config",
"(",
"FunctionName",
",",
"Role",
"=",
"None",
",",
"Handler",
"=",
"None",
",",
"Description",
"=",
"None",
",",
"Timeout",
"=",
"None",
",",
"MemorySize",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"VpcConfig",
"=",
"None",
",",
"WaitForRole",
"=",
"False",
",",
"RoleRetries",
"=",
"5",
",",
"Environment",
"=",
"None",
")",
":",
"args",
"=",
"dict",
"(",
"FunctionName",
"=",
"FunctionName",
")",
"options",
"=",
"{",
"'Handler'",
":",
"Handler",
",",
"'Description'",
":",
"Description",
",",
"'Timeout'",
":",
"Timeout",
",",
"'MemorySize'",
":",
"MemorySize",
",",
"'VpcConfig'",
":",
"VpcConfig",
",",
"'Environment'",
":",
"Environment",
"}",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"for",
"val",
",",
"var",
"in",
"six",
".",
"iteritems",
"(",
"options",
")",
":",
"if",
"var",
":",
"args",
"[",
"val",
"]",
"=",
"var",
"if",
"Role",
":",
"args",
"[",
"'Role'",
"]",
"=",
"_get_role_arn",
"(",
"Role",
",",
"region",
",",
"key",
",",
"keyid",
",",
"profile",
")",
"if",
"VpcConfig",
":",
"args",
"[",
"'VpcConfig'",
"]",
"=",
"_resolve_vpcconfig",
"(",
"VpcConfig",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"try",
":",
"if",
"WaitForRole",
":",
"retrycount",
"=",
"RoleRetries",
"else",
":",
"retrycount",
"=",
"1",
"for",
"retry",
"in",
"range",
"(",
"retrycount",
",",
"0",
",",
"-",
"1",
")",
":",
"try",
":",
"r",
"=",
"conn",
".",
"update_function_configuration",
"(",
"*",
"*",
"args",
")",
"except",
"ClientError",
"as",
"e",
":",
"if",
"retry",
">",
"1",
"and",
"e",
".",
"response",
".",
"get",
"(",
"'Error'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'Code'",
")",
"==",
"'InvalidParameterValueException'",
":",
"log",
".",
"info",
"(",
"'Function not updated but IAM role may not have propagated, will retry'",
")",
"# exponential backoff",
"time",
".",
"sleep",
"(",
"(",
"2",
"**",
"(",
"RoleRetries",
"-",
"retry",
")",
")",
"+",
"(",
"random",
".",
"randint",
"(",
"0",
",",
"1000",
")",
"/",
"1000",
")",
")",
"continue",
"else",
":",
"raise",
"else",
":",
"break",
"if",
"r",
":",
"keys",
"=",
"(",
"'FunctionName'",
",",
"'Runtime'",
",",
"'Role'",
",",
"'Handler'",
",",
"'CodeSha256'",
",",
"'CodeSize'",
",",
"'Description'",
",",
"'Timeout'",
",",
"'MemorySize'",
",",
"'FunctionArn'",
",",
"'LastModified'",
",",
"'VpcConfig'",
",",
"'Environment'",
")",
"return",
"{",
"'updated'",
":",
"True",
",",
"'function'",
":",
"dict",
"(",
"[",
"(",
"k",
",",
"r",
".",
"get",
"(",
"k",
")",
")",
"for",
"k",
"in",
"keys",
"]",
")",
"}",
"else",
":",
"log",
".",
"warning",
"(",
"'Function was not updated'",
")",
"return",
"{",
"'updated'",
":",
"False",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'updated'",
":",
"False",
",",
"'error'",
":",
"__utils__",
"[",
"'boto3.get_error'",
"]",
"(",
"e",
")",
"}"
] | 37.481013 | 24.594937 |
def access_time(self):
"""dfdatetime.DateTimeValues: access time or None if not available."""
timestamp = self._fsapfs_file_entry.get_access_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
|
[
"def",
"access_time",
"(",
"self",
")",
":",
"timestamp",
"=",
"self",
".",
"_fsapfs_file_entry",
".",
"get_access_time_as_integer",
"(",
")",
"return",
"dfdatetime_apfs_time",
".",
"APFSTime",
"(",
"timestamp",
"=",
"timestamp",
")"
] | 56.25 | 16.75 |
def report(self):
"""
Report usage of training parameters.
"""
if self.logger:
self.logger.info("accessed parameters:")
for key in self.used_parameters:
self.logger.info(" - %s %s" % (key, "(undefined)" if key in self.undefined_parameters else ""))
|
[
"def",
"report",
"(",
"self",
")",
":",
"if",
"self",
".",
"logger",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"accessed parameters:\"",
")",
"for",
"key",
"in",
"self",
".",
"used_parameters",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\" - %s %s\"",
"%",
"(",
"key",
",",
"\"(undefined)\"",
"if",
"key",
"in",
"self",
".",
"undefined_parameters",
"else",
"\"\"",
")",
")"
] | 39.125 | 16.375 |
def kde(data, npoints=_npoints):
"""
Identify peak using Gaussian kernel density estimator.
Parameters:
-----------
data : The 1d data sample
npoints : The number of kde points to evaluate
"""
# Clipping of severe outliers to concentrate more KDE samples in the parameter range of interest
mad = np.median(np.fabs(np.median(data) - data))
cut = (data > np.median(data) - 5. * mad) & (data < np.median(data) + 5. * mad)
x = data[cut]
kde = scipy.stats.gaussian_kde(x)
# No penalty for using a finer sampling for KDE evaluation except computation time
values = np.linspace(np.min(x), np.max(x), npoints)
kde_values = kde.evaluate(values)
peak = values[np.argmax(kde_values)]
return values[np.argmax(kde_values)], kde.evaluate(peak)
|
[
"def",
"kde",
"(",
"data",
",",
"npoints",
"=",
"_npoints",
")",
":",
"# Clipping of severe outliers to concentrate more KDE samples in the parameter range of interest",
"mad",
"=",
"np",
".",
"median",
"(",
"np",
".",
"fabs",
"(",
"np",
".",
"median",
"(",
"data",
")",
"-",
"data",
")",
")",
"cut",
"=",
"(",
"data",
">",
"np",
".",
"median",
"(",
"data",
")",
"-",
"5.",
"*",
"mad",
")",
"&",
"(",
"data",
"<",
"np",
".",
"median",
"(",
"data",
")",
"+",
"5.",
"*",
"mad",
")",
"x",
"=",
"data",
"[",
"cut",
"]",
"kde",
"=",
"scipy",
".",
"stats",
".",
"gaussian_kde",
"(",
"x",
")",
"# No penalty for using a finer sampling for KDE evaluation except computation time",
"values",
"=",
"np",
".",
"linspace",
"(",
"np",
".",
"min",
"(",
"x",
")",
",",
"np",
".",
"max",
"(",
"x",
")",
",",
"npoints",
")",
"kde_values",
"=",
"kde",
".",
"evaluate",
"(",
"values",
")",
"peak",
"=",
"values",
"[",
"np",
".",
"argmax",
"(",
"kde_values",
")",
"]",
"return",
"values",
"[",
"np",
".",
"argmax",
"(",
"kde_values",
")",
"]",
",",
"kde",
".",
"evaluate",
"(",
"peak",
")"
] | 41.368421 | 18.736842 |
def check_version_consistency(self):
"""
Determine if any releasers have inconsistent versions
"""
version = None
releaser_name = None
for releaser in self.releasers:
try:
next_version = releaser.determine_current_version()
except NotImplementedError:
continue
if next_version and version and version != next_version:
raise Exception('Inconsistent versions, {} is at {} but {} is at {}.'.format(
releaser_name, version, releaser.name, next_version))
version = next_version
releaser_name = releaser.name
|
[
"def",
"check_version_consistency",
"(",
"self",
")",
":",
"version",
"=",
"None",
"releaser_name",
"=",
"None",
"for",
"releaser",
"in",
"self",
".",
"releasers",
":",
"try",
":",
"next_version",
"=",
"releaser",
".",
"determine_current_version",
"(",
")",
"except",
"NotImplementedError",
":",
"continue",
"if",
"next_version",
"and",
"version",
"and",
"version",
"!=",
"next_version",
":",
"raise",
"Exception",
"(",
"'Inconsistent versions, {} is at {} but {} is at {}.'",
".",
"format",
"(",
"releaser_name",
",",
"version",
",",
"releaser",
".",
"name",
",",
"next_version",
")",
")",
"version",
"=",
"next_version",
"releaser_name",
"=",
"releaser",
".",
"name"
] | 33.75 | 20.85 |
def set_inlets(self, pores=[], overwrite=False):
r"""
Parameters
----------
pores : array_like
The list of inlet pores from which the Phase can enter the Network
"""
if overwrite:
self['pore.invasion_sequence'] = -1
self['pore.invasion_sequence'][pores] = 0
# Perform initial analysis on input pores
Ts = self.project.network.find_neighbor_throats(pores=pores)
self.queue = []
[hq.heappush(self.queue, T) for T in self['throat.order'][Ts]]
|
[
"def",
"set_inlets",
"(",
"self",
",",
"pores",
"=",
"[",
"]",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"overwrite",
":",
"self",
"[",
"'pore.invasion_sequence'",
"]",
"=",
"-",
"1",
"self",
"[",
"'pore.invasion_sequence'",
"]",
"[",
"pores",
"]",
"=",
"0",
"# Perform initial analysis on input pores",
"Ts",
"=",
"self",
".",
"project",
".",
"network",
".",
"find_neighbor_throats",
"(",
"pores",
"=",
"pores",
")",
"self",
".",
"queue",
"=",
"[",
"]",
"[",
"hq",
".",
"heappush",
"(",
"self",
".",
"queue",
",",
"T",
")",
"for",
"T",
"in",
"self",
"[",
"'throat.order'",
"]",
"[",
"Ts",
"]",
"]"
] | 33.625 | 18.9375 |
def _format_value(value):
"""Returns `value` in a format parseable by `parse_value`, or `None`.
Simply put, This function ensures that when it returns a string value, the
following will hold:
parse_value(_format_value(value)) == value
Args:
value: The value to format.
Returns:
A string representation of `value` when `value` is literally representable,
or `None`.
"""
literal = repr(value)
try:
if parse_value(literal) == value:
return literal
except SyntaxError:
pass
return None
|
[
"def",
"_format_value",
"(",
"value",
")",
":",
"literal",
"=",
"repr",
"(",
"value",
")",
"try",
":",
"if",
"parse_value",
"(",
"literal",
")",
"==",
"value",
":",
"return",
"literal",
"except",
"SyntaxError",
":",
"pass",
"return",
"None"
] | 23.454545 | 23.909091 |
def _get_requested_spec(self, obj, spec_name):
"""Helper to translate user specifications to needed objects."""
requested = self._specs_in[spec_name]
if isinstance(requested, str):
return _get_attr_by_tag(obj, requested, spec_name)
else:
return requested
|
[
"def",
"_get_requested_spec",
"(",
"self",
",",
"obj",
",",
"spec_name",
")",
":",
"requested",
"=",
"self",
".",
"_specs_in",
"[",
"spec_name",
"]",
"if",
"isinstance",
"(",
"requested",
",",
"str",
")",
":",
"return",
"_get_attr_by_tag",
"(",
"obj",
",",
"requested",
",",
"spec_name",
")",
"else",
":",
"return",
"requested"
] | 43.428571 | 10.571429 |
def p_expr_add_term(self, args):
' expr ::= expr ADD_OP term '
op = 'add' if args[1].attr == '+' else 'subtract'
return AST(op, [args[0], args[2]])
|
[
"def",
"p_expr_add_term",
"(",
"self",
",",
"args",
")",
":",
"op",
"=",
"'add'",
"if",
"args",
"[",
"1",
"]",
".",
"attr",
"==",
"'+'",
"else",
"'subtract'",
"return",
"AST",
"(",
"op",
",",
"[",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"2",
"]",
"]",
")"
] | 42 | 7.5 |
def channel_is_opened(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID,
) -> bool:
""" Returns true if the channel is in an open state, false otherwise. """
try:
channel_state = self._get_channel_state(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
except RaidenRecoverableError:
return False
return channel_state == ChannelState.OPENED
|
[
"def",
"channel_is_opened",
"(",
"self",
",",
"participant1",
":",
"Address",
",",
"participant2",
":",
"Address",
",",
"block_identifier",
":",
"BlockSpecification",
",",
"channel_identifier",
":",
"ChannelID",
",",
")",
"->",
"bool",
":",
"try",
":",
"channel_state",
"=",
"self",
".",
"_get_channel_state",
"(",
"participant1",
"=",
"participant1",
",",
"participant2",
"=",
"participant2",
",",
"block_identifier",
"=",
"block_identifier",
",",
"channel_identifier",
"=",
"channel_identifier",
",",
")",
"except",
"RaidenRecoverableError",
":",
"return",
"False",
"return",
"channel_state",
"==",
"ChannelState",
".",
"OPENED"
] | 37.277778 | 11.888889 |
def explore_batch(traj, batch):
"""Chooses exploration according to `batch`"""
explore_dict = {}
explore_dict['sigma'] = np.arange(10.0 * batch, 10.0*(batch+1), 1.0).tolist()
# for batch = 0 explores sigma in [0.0, 1.0, 2.0, ..., 9.0],
# for batch = 1 explores sigma in [10.0, 11.0, 12.0, ..., 19.0]
# and so on
traj.f_explore(explore_dict)
|
[
"def",
"explore_batch",
"(",
"traj",
",",
"batch",
")",
":",
"explore_dict",
"=",
"{",
"}",
"explore_dict",
"[",
"'sigma'",
"]",
"=",
"np",
".",
"arange",
"(",
"10.0",
"*",
"batch",
",",
"10.0",
"*",
"(",
"batch",
"+",
"1",
")",
",",
"1.0",
")",
".",
"tolist",
"(",
")",
"# for batch = 0 explores sigma in [0.0, 1.0, 2.0, ..., 9.0],",
"# for batch = 1 explores sigma in [10.0, 11.0, 12.0, ..., 19.0]",
"# and so on",
"traj",
".",
"f_explore",
"(",
"explore_dict",
")"
] | 45.125 | 19.125 |
def create_sparse_instance(cls, values, max_values, classname="weka.core.SparseInstance", weight=1.0):
"""
Creates a new sparse instance.
:param values: the list of tuples (0-based index and internal format float). The indices of the
tuples must be in ascending order and "max_values" must be set to the maximum
number of attributes in the dataset.
:type values: list
:param max_values: the maximum number of attributes
:type max_values: int
:param classname: the classname of the instance (eg weka.core.SparseInstance).
:type classname: str
:param weight: the weight of the instance
:type weight: float
"""
jni_classname = classname.replace(".", "/")
indices = []
vals = []
for (i, v) in values:
indices.append(i)
vals.append(float(v))
indices = numpy.array(indices, dtype=numpy.int32)
vals = numpy.array(vals)
return Instance(
javabridge.make_instance(
jni_classname, "(D[D[II)V",
weight, javabridge.get_env().make_double_array(vals),
javabridge.get_env().make_int_array(indices), max_values))
|
[
"def",
"create_sparse_instance",
"(",
"cls",
",",
"values",
",",
"max_values",
",",
"classname",
"=",
"\"weka.core.SparseInstance\"",
",",
"weight",
"=",
"1.0",
")",
":",
"jni_classname",
"=",
"classname",
".",
"replace",
"(",
"\".\"",
",",
"\"/\"",
")",
"indices",
"=",
"[",
"]",
"vals",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"v",
")",
"in",
"values",
":",
"indices",
".",
"append",
"(",
"i",
")",
"vals",
".",
"append",
"(",
"float",
"(",
"v",
")",
")",
"indices",
"=",
"numpy",
".",
"array",
"(",
"indices",
",",
"dtype",
"=",
"numpy",
".",
"int32",
")",
"vals",
"=",
"numpy",
".",
"array",
"(",
"vals",
")",
"return",
"Instance",
"(",
"javabridge",
".",
"make_instance",
"(",
"jni_classname",
",",
"\"(D[D[II)V\"",
",",
"weight",
",",
"javabridge",
".",
"get_env",
"(",
")",
".",
"make_double_array",
"(",
"vals",
")",
",",
"javabridge",
".",
"get_env",
"(",
")",
".",
"make_int_array",
"(",
"indices",
")",
",",
"max_values",
")",
")"
] | 44.392857 | 20.107143 |
def seconds_to_time(x):
"""Convert a number of second into a time"""
t = int(x * 10**6)
ms = t % 10**6
t = t // 10**6
s = t % 60
t = t // 60
m = t % 60
t = t // 60
h = t
return time(h, m, s, ms)
|
[
"def",
"seconds_to_time",
"(",
"x",
")",
":",
"t",
"=",
"int",
"(",
"x",
"*",
"10",
"**",
"6",
")",
"ms",
"=",
"t",
"%",
"10",
"**",
"6",
"t",
"=",
"t",
"//",
"10",
"**",
"6",
"s",
"=",
"t",
"%",
"60",
"t",
"=",
"t",
"//",
"60",
"m",
"=",
"t",
"%",
"60",
"t",
"=",
"t",
"//",
"60",
"h",
"=",
"t",
"return",
"time",
"(",
"h",
",",
"m",
",",
"s",
",",
"ms",
")"
] | 20.363636 | 20.363636 |
def create_output(decoder_output, rows, cols, targets, hparams):
"""Creates output from decoder output and vars.
Args:
decoder_output: Tensor of shape [batch, ...], where ... can be any rank such
that the number of elements is batch * rows * cols * hparams.hidden_size.
rows: Integer representing number of rows in a 2-D data point.
cols: Integer representing number of columns in a 2-D data point.
targets: Tensor of shape [batch, hparams.img_len, hparams.img_len,
hparams.num_channels].
hparams: HParams set.
Returns:
Tensor of shape [batch, hparams.img_len, hparams.img_len,
hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise
[batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256].
In the special case of predict mode, it is a Tensor of rank 5.
"""
del targets # unused arg
decoded_image = postprocess_image(decoder_output, rows, cols, hparams)
batch = common_layers.shape_list(decoded_image)[0]
depth = common_layers.shape_list(decoded_image)[-1]
likelihood = getattr(hparams, "likelihood", DistributionType.CAT)
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth])
output = y[:, :rows, :, :, :]
elif likelihood == DistributionType.CAT:
# Unpack the cols dimension of the Categorical.
channels = hparams.num_channels
output = tf.reshape(decoded_image,
[batch, rows, cols // channels, channels, depth])
else:
output = decoded_image
return output
|
[
"def",
"create_output",
"(",
"decoder_output",
",",
"rows",
",",
"cols",
",",
"targets",
",",
"hparams",
")",
":",
"del",
"targets",
"# unused arg",
"decoded_image",
"=",
"postprocess_image",
"(",
"decoder_output",
",",
"rows",
",",
"cols",
",",
"hparams",
")",
"batch",
"=",
"common_layers",
".",
"shape_list",
"(",
"decoded_image",
")",
"[",
"0",
"]",
"depth",
"=",
"common_layers",
".",
"shape_list",
"(",
"decoded_image",
")",
"[",
"-",
"1",
"]",
"likelihood",
"=",
"getattr",
"(",
"hparams",
",",
"\"likelihood\"",
",",
"DistributionType",
".",
"CAT",
")",
"if",
"hparams",
".",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"PREDICT",
":",
"y",
"=",
"tf",
".",
"reshape",
"(",
"decoded_image",
",",
"[",
"batch",
",",
"-",
"1",
",",
"1",
",",
"1",
",",
"depth",
"]",
")",
"output",
"=",
"y",
"[",
":",
",",
":",
"rows",
",",
":",
",",
":",
",",
":",
"]",
"elif",
"likelihood",
"==",
"DistributionType",
".",
"CAT",
":",
"# Unpack the cols dimension of the Categorical.",
"channels",
"=",
"hparams",
".",
"num_channels",
"output",
"=",
"tf",
".",
"reshape",
"(",
"decoded_image",
",",
"[",
"batch",
",",
"rows",
",",
"cols",
"//",
"channels",
",",
"channels",
",",
"depth",
"]",
")",
"else",
":",
"output",
"=",
"decoded_image",
"return",
"output"
] | 44.823529 | 21.411765 |
def send(self, jsonstr):
"""
Send jsonstr to the UDP collector
>>> logger = UDPLogger()
>>> logger.send('{"key": "value"}')
"""
udp_sock = socket(AF_INET, SOCK_DGRAM)
udp_sock.sendto(jsonstr.encode('utf-8'), self.addr)
|
[
"def",
"send",
"(",
"self",
",",
"jsonstr",
")",
":",
"udp_sock",
"=",
"socket",
"(",
"AF_INET",
",",
"SOCK_DGRAM",
")",
"udp_sock",
".",
"sendto",
"(",
"jsonstr",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"self",
".",
"addr",
")"
] | 29.666667 | 10.333333 |
def get_item(item, **kwargs):
"""
API versioning for each OpenStack service is independent. Generically capture
the public members (non-routine and non-private) of the OpenStack SDK objects.
Note the lack of the modify_output decorator. Preserving the field naming allows
us to reconstruct objects and orchestrate from stored items.
"""
_item = {}
for k,v in inspect.getmembers(item, lambda a:not(inspect.isroutine(a))):
if not k.startswith('_') and not k in ignore_list:
_item[k] = v
return sub_dict(_item)
|
[
"def",
"get_item",
"(",
"item",
",",
"*",
"*",
"kwargs",
")",
":",
"_item",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"inspect",
".",
"getmembers",
"(",
"item",
",",
"lambda",
"a",
":",
"not",
"(",
"inspect",
".",
"isroutine",
"(",
"a",
")",
")",
")",
":",
"if",
"not",
"k",
".",
"startswith",
"(",
"'_'",
")",
"and",
"not",
"k",
"in",
"ignore_list",
":",
"_item",
"[",
"k",
"]",
"=",
"v",
"return",
"sub_dict",
"(",
"_item",
")"
] | 40 | 25.714286 |
def _send_scp(self, x, y, p, *args, **kwargs):
"""Determine the best connection to use to send an SCP packet and use
it to transmit.
This internal version of the method is identical to send_scp except it
has positional arguments for x, y and p.
See the arguments for
:py:meth:`~rig.machine_control.scp_connection.SCPConnection` for
details.
"""
# Determine the size of packet we expect in return, this is usually the
# size that we are informed we should expect by SCAMP/SARK or else is
# the default.
if self._scp_data_length is None:
length = consts.SCP_SVER_RECEIVE_LENGTH_MAX
else:
length = self._scp_data_length
connection = self._get_connection(x, y)
return connection.send_scp(length, x, y, p, *args, **kwargs)
|
[
"def",
"_send_scp",
"(",
"self",
",",
"x",
",",
"y",
",",
"p",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Determine the size of packet we expect in return, this is usually the",
"# size that we are informed we should expect by SCAMP/SARK or else is",
"# the default.",
"if",
"self",
".",
"_scp_data_length",
"is",
"None",
":",
"length",
"=",
"consts",
".",
"SCP_SVER_RECEIVE_LENGTH_MAX",
"else",
":",
"length",
"=",
"self",
".",
"_scp_data_length",
"connection",
"=",
"self",
".",
"_get_connection",
"(",
"x",
",",
"y",
")",
"return",
"connection",
".",
"send_scp",
"(",
"length",
",",
"x",
",",
"y",
",",
"p",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 40.190476 | 20.47619 |
def to_code(self, context: Context =None):
"""
Generate the code and return it as a string.
"""
# Do not override this method!
context = context or Context()
for imp in self.imports:
if imp not in context.imports:
context.imports.append(imp)
counter = Counter()
lines = list(self.to_lines(context=context, counter=counter))
if counter.num_indented_non_doc_blocks == 0:
if self.expects_body_or_pass:
lines.append(" pass")
elif self.closed_by:
lines[-1] += self.closed_by
else:
if self.closed_by:
lines.append(self.closed_by)
return join_lines(*lines) + self._suffix
|
[
"def",
"to_code",
"(",
"self",
",",
"context",
":",
"Context",
"=",
"None",
")",
":",
"# Do not override this method!",
"context",
"=",
"context",
"or",
"Context",
"(",
")",
"for",
"imp",
"in",
"self",
".",
"imports",
":",
"if",
"imp",
"not",
"in",
"context",
".",
"imports",
":",
"context",
".",
"imports",
".",
"append",
"(",
"imp",
")",
"counter",
"=",
"Counter",
"(",
")",
"lines",
"=",
"list",
"(",
"self",
".",
"to_lines",
"(",
"context",
"=",
"context",
",",
"counter",
"=",
"counter",
")",
")",
"if",
"counter",
".",
"num_indented_non_doc_blocks",
"==",
"0",
":",
"if",
"self",
".",
"expects_body_or_pass",
":",
"lines",
".",
"append",
"(",
"\" pass\"",
")",
"elif",
"self",
".",
"closed_by",
":",
"lines",
"[",
"-",
"1",
"]",
"+=",
"self",
".",
"closed_by",
"else",
":",
"if",
"self",
".",
"closed_by",
":",
"lines",
".",
"append",
"(",
"self",
".",
"closed_by",
")",
"return",
"join_lines",
"(",
"*",
"lines",
")",
"+",
"self",
".",
"_suffix"
] | 31.166667 | 12.75 |
def add(self, session):
""" Add session to the container.
@param session: Session object
"""
self._items[session.session_id] = session
if session.expiry is not None:
self._queue.push(session)
|
[
"def",
"add",
"(",
"self",
",",
"session",
")",
":",
"self",
".",
"_items",
"[",
"session",
".",
"session_id",
"]",
"=",
"session",
"if",
"session",
".",
"expiry",
"is",
"not",
"None",
":",
"self",
".",
"_queue",
".",
"push",
"(",
"session",
")"
] | 26.333333 | 12.555556 |
def _correct_build_location(self):
"""Move self._temp_build_dir to self._ideal_build_dir/self.req.name
For some requirements (e.g. a path to a directory), the name of the
package is not available until we run egg_info, so the build_location
will return a temporary directory and store the _ideal_build_dir.
This is only called by self.egg_info_path to fix the temporary build
directory.
"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
assert self._ideal_build_dir
old_location = self._temp_build_dir
self._temp_build_dir = None
new_location = self.build_location(self._ideal_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self._ideal_build_dir = None
self.source_dir = new_location
self._egg_info_path = None
|
[
"def",
"_correct_build_location",
"(",
"self",
")",
":",
"if",
"self",
".",
"source_dir",
"is",
"not",
"None",
":",
"return",
"assert",
"self",
".",
"req",
"is",
"not",
"None",
"assert",
"self",
".",
"_temp_build_dir",
"assert",
"self",
".",
"_ideal_build_dir",
"old_location",
"=",
"self",
".",
"_temp_build_dir",
"self",
".",
"_temp_build_dir",
"=",
"None",
"new_location",
"=",
"self",
".",
"build_location",
"(",
"self",
".",
"_ideal_build_dir",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"new_location",
")",
":",
"raise",
"InstallationError",
"(",
"'A package already exists in %s; please remove it to continue'",
"%",
"display_path",
"(",
"new_location",
")",
")",
"logger",
".",
"debug",
"(",
"'Moving package %s from %s to new location %s'",
",",
"self",
",",
"display_path",
"(",
"old_location",
")",
",",
"display_path",
"(",
"new_location",
")",
",",
")",
"shutil",
".",
"move",
"(",
"old_location",
",",
"new_location",
")",
"self",
".",
"_temp_build_dir",
"=",
"new_location",
"self",
".",
"_ideal_build_dir",
"=",
"None",
"self",
".",
"source_dir",
"=",
"new_location",
"self",
".",
"_egg_info_path",
"=",
"None"
] | 42.064516 | 15.806452 |
async def _handle_home(self, request: Request) -> Response:
"""Home page request handler."""
if self.description:
title = f'{self.name} - {self.description}'
else:
title = self.name
text = dedent(
f'''<!DOCTYPE html>
<html>
<head>
<title>{title}</title>
</head>
<body>
<h1>{title}</h1>
<p>
Metric are exported at the
<a href="/metrics">/metrics</a> endpoint.
</p>
</body>
</html>
''')
return Response(content_type='text/html', text=text)
|
[
"async",
"def",
"_handle_home",
"(",
"self",
",",
"request",
":",
"Request",
")",
"->",
"Response",
":",
"if",
"self",
".",
"description",
":",
"title",
"=",
"f'{self.name} - {self.description}'",
"else",
":",
"title",
"=",
"self",
".",
"name",
"text",
"=",
"dedent",
"(",
"f'''<!DOCTYPE html>\n <html>\n <head>\n <title>{title}</title>\n </head>\n <body>\n <h1>{title}</h1>\n <p>\n Metric are exported at the\n <a href=\"/metrics\">/metrics</a> endpoint.\n </p>\n </body>\n </html>\n '''",
")",
"return",
"Response",
"(",
"content_type",
"=",
"'text/html'",
",",
"text",
"=",
"text",
")"
] | 29.73913 | 16.956522 |
def get_num_features(estimator):
""" Return size of a feature vector estimator expects as an input. """
if hasattr(estimator, 'coef_'): # linear models
if len(estimator.coef_.shape) == 0:
return 1
return estimator.coef_.shape[-1]
elif hasattr(estimator, 'feature_importances_'): # ensembles
return estimator.feature_importances_.shape[-1]
elif hasattr(estimator, 'feature_count_'): # naive bayes
return estimator.feature_count_.shape[-1]
elif hasattr(estimator, 'theta_'):
return estimator.theta_.shape[-1]
elif hasattr(estimator, 'estimators_') and len(estimator.estimators_):
# OvR
return get_num_features(estimator.estimators_[0])
else:
raise ValueError("Can't figure out feature vector size for %s" %
estimator)
|
[
"def",
"get_num_features",
"(",
"estimator",
")",
":",
"if",
"hasattr",
"(",
"estimator",
",",
"'coef_'",
")",
":",
"# linear models",
"if",
"len",
"(",
"estimator",
".",
"coef_",
".",
"shape",
")",
"==",
"0",
":",
"return",
"1",
"return",
"estimator",
".",
"coef_",
".",
"shape",
"[",
"-",
"1",
"]",
"elif",
"hasattr",
"(",
"estimator",
",",
"'feature_importances_'",
")",
":",
"# ensembles",
"return",
"estimator",
".",
"feature_importances_",
".",
"shape",
"[",
"-",
"1",
"]",
"elif",
"hasattr",
"(",
"estimator",
",",
"'feature_count_'",
")",
":",
"# naive bayes",
"return",
"estimator",
".",
"feature_count_",
".",
"shape",
"[",
"-",
"1",
"]",
"elif",
"hasattr",
"(",
"estimator",
",",
"'theta_'",
")",
":",
"return",
"estimator",
".",
"theta_",
".",
"shape",
"[",
"-",
"1",
"]",
"elif",
"hasattr",
"(",
"estimator",
",",
"'estimators_'",
")",
"and",
"len",
"(",
"estimator",
".",
"estimators_",
")",
":",
"# OvR",
"return",
"get_num_features",
"(",
"estimator",
".",
"estimators_",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Can't figure out feature vector size for %s\"",
"%",
"estimator",
")"
] | 46.111111 | 14.555556 |
def is_type_target(self):
"""Returns the CustomType instance if this executable is an embedded
procedure in a custom type declaration; else False.
"""
if self._is_type_target is None:
#All we need to do is search through the custom types in the parent
#module and see if any of their executables points to this method.
self._is_type_target = False
for tkey in self.module.types:
custype = self.module.types[tkey]
for execkey, execinst in custype.executables.items():
if execinst.target is self:
self._is_type_target = custype
break
if self._is_type_target:
break
return self._is_type_target
|
[
"def",
"is_type_target",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_type_target",
"is",
"None",
":",
"#All we need to do is search through the custom types in the parent",
"#module and see if any of their executables points to this method.",
"self",
".",
"_is_type_target",
"=",
"False",
"for",
"tkey",
"in",
"self",
".",
"module",
".",
"types",
":",
"custype",
"=",
"self",
".",
"module",
".",
"types",
"[",
"tkey",
"]",
"for",
"execkey",
",",
"execinst",
"in",
"custype",
".",
"executables",
".",
"items",
"(",
")",
":",
"if",
"execinst",
".",
"target",
"is",
"self",
":",
"self",
".",
"_is_type_target",
"=",
"custype",
"break",
"if",
"self",
".",
"_is_type_target",
":",
"break",
"return",
"self",
".",
"_is_type_target"
] | 44.388889 | 13.5 |
def create_html_link(urlbase, urlargd, link_label, linkattrd=None,
escape_urlargd=True, escape_linkattrd=True,
urlhash=None):
"""Creates a W3C compliant link.
@param urlbase: base url (e.g. config.CFG_SITE_URL/search)
@param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'})
@param link_label: text displayed in a browser (has to be already escaped)
@param linkattrd: dictionary of attributes (e.g. a={'class': 'img'})
@param escape_urlargd: boolean indicating if the function should escape
arguments (e.g. < becomes < or " becomes ")
@param escape_linkattrd: boolean indicating if the function should escape
attributes (e.g. < becomes < or " becomes ")
@param urlhash: hash string to add at the end of the link
"""
attributes_separator = ' '
output = '<a href="' + \
create_url(urlbase, urlargd, escape_urlargd, urlhash) + '"'
if linkattrd:
output += ' '
if escape_linkattrd:
attributes = [escape(str(key), quote=True) + '="' +
escape(str(linkattrd[key]), quote=True) + '"'
for key in linkattrd.keys()]
else:
attributes = [str(key) + '="' + str(linkattrd[key]) + '"'
for key in linkattrd.keys()]
output += attributes_separator.join(attributes)
output = wash_for_utf8(output)
output += '>' + wash_for_utf8(link_label) + '</a>'
return output
|
[
"def",
"create_html_link",
"(",
"urlbase",
",",
"urlargd",
",",
"link_label",
",",
"linkattrd",
"=",
"None",
",",
"escape_urlargd",
"=",
"True",
",",
"escape_linkattrd",
"=",
"True",
",",
"urlhash",
"=",
"None",
")",
":",
"attributes_separator",
"=",
"' '",
"output",
"=",
"'<a href=\"'",
"+",
"create_url",
"(",
"urlbase",
",",
"urlargd",
",",
"escape_urlargd",
",",
"urlhash",
")",
"+",
"'\"'",
"if",
"linkattrd",
":",
"output",
"+=",
"' '",
"if",
"escape_linkattrd",
":",
"attributes",
"=",
"[",
"escape",
"(",
"str",
"(",
"key",
")",
",",
"quote",
"=",
"True",
")",
"+",
"'=\"'",
"+",
"escape",
"(",
"str",
"(",
"linkattrd",
"[",
"key",
"]",
")",
",",
"quote",
"=",
"True",
")",
"+",
"'\"'",
"for",
"key",
"in",
"linkattrd",
".",
"keys",
"(",
")",
"]",
"else",
":",
"attributes",
"=",
"[",
"str",
"(",
"key",
")",
"+",
"'=\"'",
"+",
"str",
"(",
"linkattrd",
"[",
"key",
"]",
")",
"+",
"'\"'",
"for",
"key",
"in",
"linkattrd",
".",
"keys",
"(",
")",
"]",
"output",
"+=",
"attributes_separator",
".",
"join",
"(",
"attributes",
")",
"output",
"=",
"wash_for_utf8",
"(",
"output",
")",
"output",
"+=",
"'>'",
"+",
"wash_for_utf8",
"(",
"link_label",
")",
"+",
"'</a>'",
"return",
"output"
] | 51.566667 | 21.933333 |
def dpll(clauses, symbols, model):
"See if the clauses are true in a partial model."
unknown_clauses = [] ## clauses with an unknown truth value
for c in clauses:
val = pl_true(c, model)
if val == False:
return False
if val != True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P, value = find_pure_symbol(symbols, unknown_clauses)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
P, value = find_unit_clause(clauses, model)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
P, symbols = symbols[0], symbols[1:]
return (dpll(clauses, symbols, extend(model, P, True)) or
dpll(clauses, symbols, extend(model, P, False)))
|
[
"def",
"dpll",
"(",
"clauses",
",",
"symbols",
",",
"model",
")",
":",
"unknown_clauses",
"=",
"[",
"]",
"## clauses with an unknown truth value",
"for",
"c",
"in",
"clauses",
":",
"val",
"=",
"pl_true",
"(",
"c",
",",
"model",
")",
"if",
"val",
"==",
"False",
":",
"return",
"False",
"if",
"val",
"!=",
"True",
":",
"unknown_clauses",
".",
"append",
"(",
"c",
")",
"if",
"not",
"unknown_clauses",
":",
"return",
"model",
"P",
",",
"value",
"=",
"find_pure_symbol",
"(",
"symbols",
",",
"unknown_clauses",
")",
"if",
"P",
":",
"return",
"dpll",
"(",
"clauses",
",",
"removeall",
"(",
"P",
",",
"symbols",
")",
",",
"extend",
"(",
"model",
",",
"P",
",",
"value",
")",
")",
"P",
",",
"value",
"=",
"find_unit_clause",
"(",
"clauses",
",",
"model",
")",
"if",
"P",
":",
"return",
"dpll",
"(",
"clauses",
",",
"removeall",
"(",
"P",
",",
"symbols",
")",
",",
"extend",
"(",
"model",
",",
"P",
",",
"value",
")",
")",
"P",
",",
"symbols",
"=",
"symbols",
"[",
"0",
"]",
",",
"symbols",
"[",
"1",
":",
"]",
"return",
"(",
"dpll",
"(",
"clauses",
",",
"symbols",
",",
"extend",
"(",
"model",
",",
"P",
",",
"True",
")",
")",
"or",
"dpll",
"(",
"clauses",
",",
"symbols",
",",
"extend",
"(",
"model",
",",
"P",
",",
"False",
")",
")",
")"
] | 39.65 | 17.65 |
def _clause_formatter(self, cond):
'''Formats conditions
args is a list of ['field', 'operator', 'value']
'''
if len(cond) == 2 :
cond = ' '.join(cond)
return cond
if 'in' in cond[1].lower() :
if not isinstance(cond[2], (tuple, list)):
raise TypeError('("{0}") must be of type <type tuple> or <type list>'.format(cond[2]))
if 'select' not in cond[2][0].lower() :
cond[2] = "({0})".format(','.join(map(str,["'{0}'".format(e) for e in cond[2]])))
else:
cond[2] = "({0})".format(','.join(map(str,["{0}".format(e) for e in cond[2]])))
cond = " ".join(cond)
else:
#if isinstance(cond[2], str):
# var = re.match('^@(\w+)$', cond[2])
#else:
# var = None
#if var :
if isinstance(cond[2], str) and cond[2].startswith('@'):
cond[2] = "{0}".format(cond[2])
else :
cond[2] = "'{0}'".format(cond[2])
cond = ' '.join(cond)
return cond
|
[
"def",
"_clause_formatter",
"(",
"self",
",",
"cond",
")",
":",
"if",
"len",
"(",
"cond",
")",
"==",
"2",
":",
"cond",
"=",
"' '",
".",
"join",
"(",
"cond",
")",
"return",
"cond",
"if",
"'in'",
"in",
"cond",
"[",
"1",
"]",
".",
"lower",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"cond",
"[",
"2",
"]",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"raise",
"TypeError",
"(",
"'(\"{0}\") must be of type <type tuple> or <type list>'",
".",
"format",
"(",
"cond",
"[",
"2",
"]",
")",
")",
"if",
"'select'",
"not",
"in",
"cond",
"[",
"2",
"]",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"cond",
"[",
"2",
"]",
"=",
"\"({0})\"",
".",
"format",
"(",
"','",
".",
"join",
"(",
"map",
"(",
"str",
",",
"[",
"\"'{0}'\"",
".",
"format",
"(",
"e",
")",
"for",
"e",
"in",
"cond",
"[",
"2",
"]",
"]",
")",
")",
")",
"else",
":",
"cond",
"[",
"2",
"]",
"=",
"\"({0})\"",
".",
"format",
"(",
"','",
".",
"join",
"(",
"map",
"(",
"str",
",",
"[",
"\"{0}\"",
".",
"format",
"(",
"e",
")",
"for",
"e",
"in",
"cond",
"[",
"2",
"]",
"]",
")",
")",
")",
"cond",
"=",
"\" \"",
".",
"join",
"(",
"cond",
")",
"else",
":",
"#if isinstance(cond[2], str):",
"# var = re.match('^@(\\w+)$', cond[2])",
"#else:",
"# var = None",
"#if var :",
"if",
"isinstance",
"(",
"cond",
"[",
"2",
"]",
",",
"str",
")",
"and",
"cond",
"[",
"2",
"]",
".",
"startswith",
"(",
"'@'",
")",
":",
"cond",
"[",
"2",
"]",
"=",
"\"{0}\"",
".",
"format",
"(",
"cond",
"[",
"2",
"]",
")",
"else",
":",
"cond",
"[",
"2",
"]",
"=",
"\"'{0}'\"",
".",
"format",
"(",
"cond",
"[",
"2",
"]",
")",
"cond",
"=",
"' '",
".",
"join",
"(",
"cond",
")",
"return",
"cond"
] | 34.65625 | 22.59375 |
def write_doc(doc : MetapackDoc, mt_file=None):
"""
Write a Metatab doc to a CSV file, and update the Modified time
:param doc:
:param mt_file:
:return:
"""
from rowgenerators import parse_app_url
if not mt_file:
mt_file = doc.ref
add_giturl(doc)
u = parse_app_url(mt_file)
if u.scheme == 'file':
doc.write(mt_file)
return True
else:
return False
|
[
"def",
"write_doc",
"(",
"doc",
":",
"MetapackDoc",
",",
"mt_file",
"=",
"None",
")",
":",
"from",
"rowgenerators",
"import",
"parse_app_url",
"if",
"not",
"mt_file",
":",
"mt_file",
"=",
"doc",
".",
"ref",
"add_giturl",
"(",
"doc",
")",
"u",
"=",
"parse_app_url",
"(",
"mt_file",
")",
"if",
"u",
".",
"scheme",
"==",
"'file'",
":",
"doc",
".",
"write",
"(",
"mt_file",
")",
"return",
"True",
"else",
":",
"return",
"False"
] | 18.636364 | 21.727273 |
def from_signed_raw(cls: Type[MembershipType], signed_raw: str) -> MembershipType:
"""
Return Membership instance from signed raw format
:param signed_raw: Signed raw format string
:return:
"""
lines = signed_raw.splitlines(True)
n = 0
version = int(Membership.parse_field("Version", lines[n]))
n += 1
Membership.parse_field("Type", lines[n])
n += 1
currency = Membership.parse_field("Currency", lines[n])
n += 1
issuer = Membership.parse_field("Issuer", lines[n])
n += 1
membership_ts = BlockUID.from_str(Membership.parse_field("Block", lines[n]))
n += 1
membership_type = Membership.parse_field("Membership", lines[n])
n += 1
uid = Membership.parse_field("UserID", lines[n])
n += 1
identity_ts = BlockUID.from_str(Membership.parse_field("CertTS", lines[n]))
n += 1
signature = Membership.parse_field("Signature", lines[n])
n += 1
return cls(version, currency, issuer, membership_ts,
membership_type, uid, identity_ts, signature)
|
[
"def",
"from_signed_raw",
"(",
"cls",
":",
"Type",
"[",
"MembershipType",
"]",
",",
"signed_raw",
":",
"str",
")",
"->",
"MembershipType",
":",
"lines",
"=",
"signed_raw",
".",
"splitlines",
"(",
"True",
")",
"n",
"=",
"0",
"version",
"=",
"int",
"(",
"Membership",
".",
"parse_field",
"(",
"\"Version\"",
",",
"lines",
"[",
"n",
"]",
")",
")",
"n",
"+=",
"1",
"Membership",
".",
"parse_field",
"(",
"\"Type\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"currency",
"=",
"Membership",
".",
"parse_field",
"(",
"\"Currency\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"issuer",
"=",
"Membership",
".",
"parse_field",
"(",
"\"Issuer\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"membership_ts",
"=",
"BlockUID",
".",
"from_str",
"(",
"Membership",
".",
"parse_field",
"(",
"\"Block\"",
",",
"lines",
"[",
"n",
"]",
")",
")",
"n",
"+=",
"1",
"membership_type",
"=",
"Membership",
".",
"parse_field",
"(",
"\"Membership\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"uid",
"=",
"Membership",
".",
"parse_field",
"(",
"\"UserID\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"identity_ts",
"=",
"BlockUID",
".",
"from_str",
"(",
"Membership",
".",
"parse_field",
"(",
"\"CertTS\"",
",",
"lines",
"[",
"n",
"]",
")",
")",
"n",
"+=",
"1",
"signature",
"=",
"Membership",
".",
"parse_field",
"(",
"\"Signature\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"return",
"cls",
"(",
"version",
",",
"currency",
",",
"issuer",
",",
"membership_ts",
",",
"membership_type",
",",
"uid",
",",
"identity_ts",
",",
"signature",
")"
] | 28.974359 | 27.641026 |
def imread(filename, *args, **kwargs):
"""Return image data from TIFF file as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
Examples
--------
>>> image = imread('test.tif', 0)
"""
with TIFFfile(filename) as tif:
return tif.asarray(*args, **kwargs)
|
[
"def",
"imread",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"TIFFfile",
"(",
"filename",
")",
"as",
"tif",
":",
"return",
"tif",
".",
"asarray",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 26.526316 | 19.263158 |
def parse(self, fp, parser=None, context=None):
"""
Parse an HTML or XML document and
return the extacted object following the Parsley rules give at instantiation.
:param fp: file-like object containing an HTML or XML document, or URL or filename
:param parser: *lxml.etree._FeedParser* instance (optional); defaults to lxml.etree.HTMLParser()
:param context: user-supplied context that will be passed to custom XPath extensions (as first argument)
:rtype: Python :class:`dict` object with mapped extracted content
:raises: :class:`.NonMatchingNonOptionalKey`
To parse from a string, use the :meth:`~base.Parselet.parse_fromstring` method instead.
Note that the fp paramater is passed directly
to `lxml.etree.parse <http://lxml.de/api/lxml.etree-module.html#parse>`_,
so you can also give it an URL, and lxml will download it for you.
(Also see `<http://lxml.de/tutorial.html#the-parse-function>`_.)
"""
if parser is None:
parser = lxml.etree.HTMLParser()
doc = lxml.etree.parse(fp, parser=parser).getroot()
return self.extract(doc, context=context)
|
[
"def",
"parse",
"(",
"self",
",",
"fp",
",",
"parser",
"=",
"None",
",",
"context",
"=",
"None",
")",
":",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"lxml",
".",
"etree",
".",
"HTMLParser",
"(",
")",
"doc",
"=",
"lxml",
".",
"etree",
".",
"parse",
"(",
"fp",
",",
"parser",
"=",
"parser",
")",
".",
"getroot",
"(",
")",
"return",
"self",
".",
"extract",
"(",
"doc",
",",
"context",
"=",
"context",
")"
] | 51.26087 | 28.913043 |
def load_with_scipy(file, data_name):
import scipy.io
"""
Loads data from a netcdf file.
Parameters
----------
file : string or file-like
The name of the netcdf file to open.
data_name : string
The name of the data to extract from the netcdf file.
Returns
-------
data : ndarray
The desired data from the netcdf file as ndarray with nan for missing values.
"""
logger.debug('Loading data {} of netcdf file {} with scipy.io.'.format(data_name, file))
f = scipy.io.netcdf.netcdf_file(file, 'r')
data_netcdf = f.variables[data_name]
data = np.array(data_netcdf.data, copy = True)
data[data == data_netcdf.missing_value] = np.nan
f.close()
return data
|
[
"def",
"load_with_scipy",
"(",
"file",
",",
"data_name",
")",
":",
"import",
"scipy",
".",
"io",
"logger",
".",
"debug",
"(",
"'Loading data {} of netcdf file {} with scipy.io.'",
".",
"format",
"(",
"data_name",
",",
"file",
")",
")",
"f",
"=",
"scipy",
".",
"io",
".",
"netcdf",
".",
"netcdf_file",
"(",
"file",
",",
"'r'",
")",
"data_netcdf",
"=",
"f",
".",
"variables",
"[",
"data_name",
"]",
"data",
"=",
"np",
".",
"array",
"(",
"data_netcdf",
".",
"data",
",",
"copy",
"=",
"True",
")",
"data",
"[",
"data",
"==",
"data_netcdf",
".",
"missing_value",
"]",
"=",
"np",
".",
"nan",
"f",
".",
"close",
"(",
")",
"return",
"data"
] | 25.785714 | 22.571429 |
async def load_tracks(self, query) -> LoadResult:
"""
Executes a loadtracks request. Only works on Lavalink V3.
Parameters
----------
query : str
Returns
-------
LoadResult
"""
self.__check_node_ready()
url = self._uri + quote(str(query))
data = await self._get(url)
if isinstance(data, dict):
return LoadResult(data)
elif isinstance(data, list):
modified_data = {
"loadType": LoadType.V2_COMPAT,
"tracks": data
}
return LoadResult(modified_data)
|
[
"async",
"def",
"load_tracks",
"(",
"self",
",",
"query",
")",
"->",
"LoadResult",
":",
"self",
".",
"__check_node_ready",
"(",
")",
"url",
"=",
"self",
".",
"_uri",
"+",
"quote",
"(",
"str",
"(",
"query",
")",
")",
"data",
"=",
"await",
"self",
".",
"_get",
"(",
"url",
")",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"return",
"LoadResult",
"(",
"data",
")",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"modified_data",
"=",
"{",
"\"loadType\"",
":",
"LoadType",
".",
"V2_COMPAT",
",",
"\"tracks\"",
":",
"data",
"}",
"return",
"LoadResult",
"(",
"modified_data",
")"
] | 24.72 | 16.8 |
def add_paths_to_os(self, key=None, update=None):
''' Add the paths in tree environ into the os environ
This code goes through the tree environ and checks
for existence in the os environ, then adds them
Parameters:
key (str):
The section name to check against / add
update (bool):
If True, overwrites existing tree environment variables in your
local environment. Default is False.
'''
if key is not None:
allpaths = key if isinstance(key, list) else [key]
else:
allpaths = [k for k in self.environ.keys() if 'default' not in k]
for key in allpaths:
paths = self.get_paths(key)
self.check_paths(paths, update=update)
|
[
"def",
"add_paths_to_os",
"(",
"self",
",",
"key",
"=",
"None",
",",
"update",
"=",
"None",
")",
":",
"if",
"key",
"is",
"not",
"None",
":",
"allpaths",
"=",
"key",
"if",
"isinstance",
"(",
"key",
",",
"list",
")",
"else",
"[",
"key",
"]",
"else",
":",
"allpaths",
"=",
"[",
"k",
"for",
"k",
"in",
"self",
".",
"environ",
".",
"keys",
"(",
")",
"if",
"'default'",
"not",
"in",
"k",
"]",
"for",
"key",
"in",
"allpaths",
":",
"paths",
"=",
"self",
".",
"get_paths",
"(",
"key",
")",
"self",
".",
"check_paths",
"(",
"paths",
",",
"update",
"=",
"update",
")"
] | 35.636364 | 22.454545 |
async def connect(dsn=None, *,
host=None, port=None,
user=None, password=None, passfile=None,
database=None,
loop=None,
timeout=60,
statement_cache_size=100,
max_cached_statement_lifetime=300,
max_cacheable_statement_size=1024 * 15,
command_timeout=None,
ssl=None,
connection_class=Connection,
server_settings=None):
r"""A coroutine to establish a connection to a PostgreSQL server.
The connection parameters may be specified either as a connection
URI in *dsn*, or as specific keyword arguments, or both.
If both *dsn* and keyword arguments are specified, the latter
override the corresponding values parsed from the connection URI.
The default values for the majority of arguments can be specified
using `environment variables <postgres envvars>`_.
Returns a new :class:`~asyncpg.connection.Connection` object.
:param dsn:
Connection arguments specified using as a single string in the
`libpq connection URI format`_:
``postgres://user:password@host:port/database?option=value``.
The following options are recognized by asyncpg: host, port,
user, database (or dbname), password, passfile, sslmode.
Unlike libpq, asyncpg will treat unrecognized options
as `server settings`_ to be used for the connection.
:param host:
Database host address as one of the following:
- an IP address or a domain name;
- an absolute path to the directory containing the database
server Unix-domain socket (not supported on Windows);
- a sequence of any of the above, in which case the addresses
will be tried in order, and the first successful connection
will be returned.
If not specified, asyncpg will try the following, in order:
- host address(es) parsed from the *dsn* argument,
- the value of the ``PGHOST`` environment variable,
- on Unix, common directories used for PostgreSQL Unix-domain
sockets: ``"/run/postgresql"``, ``"/var/run/postgresl"``,
``"/var/pgsql_socket"``, ``"/private/tmp"``, and ``"/tmp"``,
- ``"localhost"``.
:param port:
Port number to connect to at the server host
(or Unix-domain socket file extension). If multiple host
addresses were specified, this parameter may specify a
sequence of port numbers of the same length as the host sequence,
or it may specify a single port number to be used for all host
addresses.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGPORT`` environment variable, or ``5432`` if
neither is specified.
:param user:
The name of the database role used for authentication.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGUSER`` environment variable, or the
operating system name of the user running the application.
:param database:
The name of the database to connect to.
If not specified, the value parsed from the *dsn* argument is used,
or the value of the ``PGDATABASE`` environment variable, or the
operating system name of the user running the application.
:param password:
Password to be used for authentication, if the server requires
one. If not specified, the value parsed from the *dsn* argument
is used, or the value of the ``PGPASSWORD`` environment variable.
Note that the use of the environment variable is discouraged as
other users and applications may be able to read it without needing
specific privileges. It is recommended to use *passfile* instead.
:param passfile:
The name of the file used to store passwords
(defaults to ``~/.pgpass``, or ``%APPDATA%\postgresql\pgpass.conf``
on Windows).
:param loop:
An asyncio event loop instance. If ``None``, the default
event loop will be used.
:param float timeout:
Connection timeout in seconds.
:param int statement_cache_size:
The size of prepared statement LRU cache. Pass ``0`` to
disable the cache.
:param int max_cached_statement_lifetime:
The maximum time in seconds a prepared statement will stay
in the cache. Pass ``0`` to allow statements be cached
indefinitely.
:param int max_cacheable_statement_size:
The maximum size of a statement that can be cached (15KiB by
default). Pass ``0`` to allow all statements to be cached
regardless of their size.
:param float command_timeout:
The default timeout for operations on this connection
(the default is ``None``: no timeout).
:param ssl:
Pass ``True`` or an `ssl.SSLContext <SSLContext_>`_ instance to
require an SSL connection. If ``True``, a default SSL context
returned by `ssl.create_default_context() <create_default_context_>`_
will be used.
:param dict server_settings:
An optional dict of server runtime parameters. Refer to
PostgreSQL documentation for
a `list of supported options <server settings>`_.
:param Connection connection_class:
Class of the returned connection object. Must be a subclass of
:class:`~asyncpg.connection.Connection`.
:return: A :class:`~asyncpg.connection.Connection` instance.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... types = await con.fetch('SELECT * FROM pg_type')
... print(types)
...
>>> asyncio.get_event_loop().run_until_complete(run())
[<Record typname='bool' typnamespace=11 ...
.. versionadded:: 0.10.0
Added ``max_cached_statement_use_count`` parameter.
.. versionchanged:: 0.11.0
Removed ability to pass arbitrary keyword arguments to set
server settings. Added a dedicated parameter ``server_settings``
for that.
.. versionadded:: 0.11.0
Added ``connection_class`` parameter.
.. versionadded:: 0.16.0
Added ``passfile`` parameter
(and support for password files in general).
.. versionadded:: 0.18.0
Added ability to specify multiple hosts in the *dsn*
and *host* arguments.
.. _SSLContext: https://docs.python.org/3/library/ssl.html#ssl.SSLContext
.. _create_default_context:
https://docs.python.org/3/library/ssl.html#ssl.create_default_context
.. _server settings:
https://www.postgresql.org/docs/current/static/runtime-config.html
.. _postgres envvars:
https://www.postgresql.org/docs/current/static/libpq-envars.html
.. _libpq connection URI format:
https://www.postgresql.org/docs/current/static/\
libpq-connect.html#LIBPQ-CONNSTRING
"""
if not issubclass(connection_class, Connection):
raise TypeError(
'connection_class is expected to be a subclass of '
'asyncpg.Connection, got {!r}'.format(connection_class))
if loop is None:
loop = asyncio.get_event_loop()
return await connect_utils._connect(
loop=loop, timeout=timeout, connection_class=connection_class,
dsn=dsn, host=host, port=port, user=user,
password=password, passfile=passfile,
ssl=ssl, database=database,
server_settings=server_settings,
command_timeout=command_timeout,
statement_cache_size=statement_cache_size,
max_cached_statement_lifetime=max_cached_statement_lifetime,
max_cacheable_statement_size=max_cacheable_statement_size)
|
[
"async",
"def",
"connect",
"(",
"dsn",
"=",
"None",
",",
"*",
",",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"user",
"=",
"None",
",",
"password",
"=",
"None",
",",
"passfile",
"=",
"None",
",",
"database",
"=",
"None",
",",
"loop",
"=",
"None",
",",
"timeout",
"=",
"60",
",",
"statement_cache_size",
"=",
"100",
",",
"max_cached_statement_lifetime",
"=",
"300",
",",
"max_cacheable_statement_size",
"=",
"1024",
"*",
"15",
",",
"command_timeout",
"=",
"None",
",",
"ssl",
"=",
"None",
",",
"connection_class",
"=",
"Connection",
",",
"server_settings",
"=",
"None",
")",
":",
"if",
"not",
"issubclass",
"(",
"connection_class",
",",
"Connection",
")",
":",
"raise",
"TypeError",
"(",
"'connection_class is expected to be a subclass of '",
"'asyncpg.Connection, got {!r}'",
".",
"format",
"(",
"connection_class",
")",
")",
"if",
"loop",
"is",
"None",
":",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"return",
"await",
"connect_utils",
".",
"_connect",
"(",
"loop",
"=",
"loop",
",",
"timeout",
"=",
"timeout",
",",
"connection_class",
"=",
"connection_class",
",",
"dsn",
"=",
"dsn",
",",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"user",
"=",
"user",
",",
"password",
"=",
"password",
",",
"passfile",
"=",
"passfile",
",",
"ssl",
"=",
"ssl",
",",
"database",
"=",
"database",
",",
"server_settings",
"=",
"server_settings",
",",
"command_timeout",
"=",
"command_timeout",
",",
"statement_cache_size",
"=",
"statement_cache_size",
",",
"max_cached_statement_lifetime",
"=",
"max_cached_statement_lifetime",
",",
"max_cacheable_statement_size",
"=",
"max_cacheable_statement_size",
")"
] | 39.984615 | 22.569231 |
def deserialize(self, value, attr=None, data=None, **kwargs):
"""Deserialize ``value``.
:param value: The value to be deserialized.
:param str attr: The attribute/key in `data` to be deserialized.
:param dict data: The raw input data passed to the `Schema.load`.
:param dict kwargs': Field-specific keyword arguments.
:raise ValidationError: If an invalid value is passed or if a required value
is missing.
"""
# Validate required fields, deserialize, then validate
# deserialized value
self._validate_missing(value)
if value is missing_:
_miss = self.missing
return _miss() if callable(_miss) else _miss
if getattr(self, 'allow_none', False) is True and value is None:
return None
output = self._deserialize(value, attr, data, **kwargs)
self._validate(output)
return output
|
[
"def",
"deserialize",
"(",
"self",
",",
"value",
",",
"attr",
"=",
"None",
",",
"data",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Validate required fields, deserialize, then validate",
"# deserialized value",
"self",
".",
"_validate_missing",
"(",
"value",
")",
"if",
"value",
"is",
"missing_",
":",
"_miss",
"=",
"self",
".",
"missing",
"return",
"_miss",
"(",
")",
"if",
"callable",
"(",
"_miss",
")",
"else",
"_miss",
"if",
"getattr",
"(",
"self",
",",
"'allow_none'",
",",
"False",
")",
"is",
"True",
"and",
"value",
"is",
"None",
":",
"return",
"None",
"output",
"=",
"self",
".",
"_deserialize",
"(",
"value",
",",
"attr",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_validate",
"(",
"output",
")",
"return",
"output"
] | 43.952381 | 18.714286 |
def write_additional(self, productversion, channel):
"""Write the additional information to the MAR header.
Args:
productversion (str): product and version string
channel (str): channel string
"""
self.fileobj.seek(self.additional_offset)
extras = extras_header.build(dict(
count=1,
sections=[dict(
channel=six.u(channel),
productversion=six.u(productversion),
size=len(channel) + len(productversion) + 2 + 8,
padding=b'',
)],
))
self.fileobj.write(extras)
self.last_offset = self.fileobj.tell()
|
[
"def",
"write_additional",
"(",
"self",
",",
"productversion",
",",
"channel",
")",
":",
"self",
".",
"fileobj",
".",
"seek",
"(",
"self",
".",
"additional_offset",
")",
"extras",
"=",
"extras_header",
".",
"build",
"(",
"dict",
"(",
"count",
"=",
"1",
",",
"sections",
"=",
"[",
"dict",
"(",
"channel",
"=",
"six",
".",
"u",
"(",
"channel",
")",
",",
"productversion",
"=",
"six",
".",
"u",
"(",
"productversion",
")",
",",
"size",
"=",
"len",
"(",
"channel",
")",
"+",
"len",
"(",
"productversion",
")",
"+",
"2",
"+",
"8",
",",
"padding",
"=",
"b''",
",",
")",
"]",
",",
")",
")",
"self",
".",
"fileobj",
".",
"write",
"(",
"extras",
")",
"self",
".",
"last_offset",
"=",
"self",
".",
"fileobj",
".",
"tell",
"(",
")"
] | 31.714286 | 16.238095 |
def split_coords_2d(seq):
"""
:param seq: a flat list with lons and lats
:returns: a validated list of pairs (lon, lat)
>>> split_coords_2d([1.1, 2.1, 2.2, 2.3])
[(1.1, 2.1), (2.2, 2.3)]
"""
lons, lats = [], []
for i, el in enumerate(seq):
if i % 2 == 0:
lons.append(valid.longitude(el))
elif i % 2 == 1:
lats.append(valid.latitude(el))
return list(zip(lons, lats))
|
[
"def",
"split_coords_2d",
"(",
"seq",
")",
":",
"lons",
",",
"lats",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"i",
",",
"el",
"in",
"enumerate",
"(",
"seq",
")",
":",
"if",
"i",
"%",
"2",
"==",
"0",
":",
"lons",
".",
"append",
"(",
"valid",
".",
"longitude",
"(",
"el",
")",
")",
"elif",
"i",
"%",
"2",
"==",
"1",
":",
"lats",
".",
"append",
"(",
"valid",
".",
"latitude",
"(",
"el",
")",
")",
"return",
"list",
"(",
"zip",
"(",
"lons",
",",
"lats",
")",
")"
] | 28.533333 | 10.8 |
def update_currency_by_id(cls, currency_id, currency, **kwargs):
"""Update Currency
Update attributes of Currency
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_currency_by_id(currency_id, currency, async=True)
>>> result = thread.get()
:param async bool
:param str currency_id: ID of currency to update. (required)
:param Currency currency: Attributes of currency to update. (required)
:return: Currency
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_currency_by_id_with_http_info(currency_id, currency, **kwargs)
else:
(data) = cls._update_currency_by_id_with_http_info(currency_id, currency, **kwargs)
return data
|
[
"def",
"update_currency_by_id",
"(",
"cls",
",",
"currency_id",
",",
"currency",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_update_currency_by_id_with_http_info",
"(",
"currency_id",
",",
"currency",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_update_currency_by_id_with_http_info",
"(",
"currency_id",
",",
"currency",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 44.636364 | 22.318182 |
def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this expression."""
self.validate()
edge_direction, edge_name = self.fold_scope_location.get_first_folded_edge()
validate_safe_string(edge_name)
inverse_direction_table = {
'out': 'in',
'in': 'out',
}
inverse_direction = inverse_direction_table[edge_direction]
base_location_name, _ = self.fold_scope_location.base_location.get_location_name()
validate_safe_string(base_location_name)
_, field_name = self.fold_scope_location.get_location_name()
validate_safe_string(field_name)
if not self.folded_ir_blocks:
# There is no filtering nor type coercions applied to this @fold scope.
#
# This template generates code like:
# (
# (m.base.in_Animal_ParentOf == null) ?
# [] : (
# m.base.in_Animal_ParentOf.collect{entry -> entry.outV.next().uuid}
# )
# )
template = (
u'((m.{base_location_name}.{direction}_{edge_name} == null) ? [] : ('
u'm.{base_location_name}.{direction}_{edge_name}.collect{{'
u'entry -> entry.{inverse_direction}V.next().{field_name}{maybe_format}'
u'}}'
u'))'
)
filter_and_traverse_data = ''
else:
# There is filtering or type coercions in this @fold scope.
#
# This template generates code like:
# (
# (m.base.in_Animal_ParentOf == null) ?
# [] : (
# m.base.in_Animal_ParentOf
# .collect{entry -> entry.outV.next()}
# .findAll{it.alias.contains($wanted)}
# .collect{it.uuid}
# )
# )
template = (
u'((m.{base_location_name}.{direction}_{edge_name} == null) ? [] : ('
u'm.{base_location_name}.{direction}_{edge_name}.collect{{'
u'entry -> entry.{inverse_direction}V.next()'
u'}}'
u'.{filters_and_traverses}'
u'.collect{{entry -> entry.{field_name}{maybe_format}}}'
u'))'
)
filter_and_traverse_data = u'.'.join(block.to_gremlin()
for block in self.folded_ir_blocks)
maybe_format = ''
inner_type = strip_non_null_from_type(self.field_type.of_type)
if GraphQLDate.is_same_type(inner_type):
maybe_format = '.format("' + STANDARD_DATE_FORMAT + '")'
elif GraphQLDateTime.is_same_type(inner_type):
maybe_format = '.format("' + STANDARD_DATETIME_FORMAT + '")'
template_data = {
'base_location_name': base_location_name,
'direction': edge_direction,
'edge_name': edge_name,
'field_name': field_name,
'inverse_direction': inverse_direction,
'maybe_format': maybe_format,
'filters_and_traverses': filter_and_traverse_data,
}
return template.format(**template_data)
|
[
"def",
"to_gremlin",
"(",
"self",
")",
":",
"self",
".",
"validate",
"(",
")",
"edge_direction",
",",
"edge_name",
"=",
"self",
".",
"fold_scope_location",
".",
"get_first_folded_edge",
"(",
")",
"validate_safe_string",
"(",
"edge_name",
")",
"inverse_direction_table",
"=",
"{",
"'out'",
":",
"'in'",
",",
"'in'",
":",
"'out'",
",",
"}",
"inverse_direction",
"=",
"inverse_direction_table",
"[",
"edge_direction",
"]",
"base_location_name",
",",
"_",
"=",
"self",
".",
"fold_scope_location",
".",
"base_location",
".",
"get_location_name",
"(",
")",
"validate_safe_string",
"(",
"base_location_name",
")",
"_",
",",
"field_name",
"=",
"self",
".",
"fold_scope_location",
".",
"get_location_name",
"(",
")",
"validate_safe_string",
"(",
"field_name",
")",
"if",
"not",
"self",
".",
"folded_ir_blocks",
":",
"# There is no filtering nor type coercions applied to this @fold scope.",
"#",
"# This template generates code like:",
"# (",
"# (m.base.in_Animal_ParentOf == null) ?",
"# [] : (",
"# m.base.in_Animal_ParentOf.collect{entry -> entry.outV.next().uuid}",
"# )",
"# )",
"template",
"=",
"(",
"u'((m.{base_location_name}.{direction}_{edge_name} == null) ? [] : ('",
"u'm.{base_location_name}.{direction}_{edge_name}.collect{{'",
"u'entry -> entry.{inverse_direction}V.next().{field_name}{maybe_format}'",
"u'}}'",
"u'))'",
")",
"filter_and_traverse_data",
"=",
"''",
"else",
":",
"# There is filtering or type coercions in this @fold scope.",
"#",
"# This template generates code like:",
"# (",
"# (m.base.in_Animal_ParentOf == null) ?",
"# [] : (",
"# m.base.in_Animal_ParentOf",
"# .collect{entry -> entry.outV.next()}",
"# .findAll{it.alias.contains($wanted)}",
"# .collect{it.uuid}",
"# )",
"# )",
"template",
"=",
"(",
"u'((m.{base_location_name}.{direction}_{edge_name} == null) ? [] : ('",
"u'm.{base_location_name}.{direction}_{edge_name}.collect{{'",
"u'entry -> entry.{inverse_direction}V.next()'",
"u'}}'",
"u'.{filters_and_traverses}'",
"u'.collect{{entry -> entry.{field_name}{maybe_format}}}'",
"u'))'",
")",
"filter_and_traverse_data",
"=",
"u'.'",
".",
"join",
"(",
"block",
".",
"to_gremlin",
"(",
")",
"for",
"block",
"in",
"self",
".",
"folded_ir_blocks",
")",
"maybe_format",
"=",
"''",
"inner_type",
"=",
"strip_non_null_from_type",
"(",
"self",
".",
"field_type",
".",
"of_type",
")",
"if",
"GraphQLDate",
".",
"is_same_type",
"(",
"inner_type",
")",
":",
"maybe_format",
"=",
"'.format(\"'",
"+",
"STANDARD_DATE_FORMAT",
"+",
"'\")'",
"elif",
"GraphQLDateTime",
".",
"is_same_type",
"(",
"inner_type",
")",
":",
"maybe_format",
"=",
"'.format(\"'",
"+",
"STANDARD_DATETIME_FORMAT",
"+",
"'\")'",
"template_data",
"=",
"{",
"'base_location_name'",
":",
"base_location_name",
",",
"'direction'",
":",
"edge_direction",
",",
"'edge_name'",
":",
"edge_name",
",",
"'field_name'",
":",
"field_name",
",",
"'inverse_direction'",
":",
"inverse_direction",
",",
"'maybe_format'",
":",
"maybe_format",
",",
"'filters_and_traverses'",
":",
"filter_and_traverse_data",
",",
"}",
"return",
"template",
".",
"format",
"(",
"*",
"*",
"template_data",
")"
] | 41.115385 | 21.871795 |
def _jks_keystream(iv, password):
"""Helper keystream generator for _jks_pkey_decrypt"""
cur = iv
while 1:
xhash = hashlib.sha1(bytes(password + cur)) # hashlib.sha1 in python 2.6 does not accept a bytearray argument
cur = bytearray(xhash.digest()) # make sure we iterate over ints in both Py2 and Py3
for byte in cur:
yield byte
|
[
"def",
"_jks_keystream",
"(",
"iv",
",",
"password",
")",
":",
"cur",
"=",
"iv",
"while",
"1",
":",
"xhash",
"=",
"hashlib",
".",
"sha1",
"(",
"bytes",
"(",
"password",
"+",
"cur",
")",
")",
"# hashlib.sha1 in python 2.6 does not accept a bytearray argument",
"cur",
"=",
"bytearray",
"(",
"xhash",
".",
"digest",
"(",
")",
")",
"# make sure we iterate over ints in both Py2 and Py3",
"for",
"byte",
"in",
"cur",
":",
"yield",
"byte"
] | 46.25 | 28.25 |
def change_name(self, new_name):
"""Change the name of the shell, possibly updating the maximum name
length"""
if not new_name:
name = self.hostname
else:
name = new_name.decode()
self.display_name = display_names.change(
self.display_name, name)
|
[
"def",
"change_name",
"(",
"self",
",",
"new_name",
")",
":",
"if",
"not",
"new_name",
":",
"name",
"=",
"self",
".",
"hostname",
"else",
":",
"name",
"=",
"new_name",
".",
"decode",
"(",
")",
"self",
".",
"display_name",
"=",
"display_names",
".",
"change",
"(",
"self",
".",
"display_name",
",",
"name",
")"
] | 34.888889 | 8.444444 |
def install_trigger_function(connection: connection, overwrite: bool=False) -> None:
"""Install the psycopg2-pgevents trigger function against the database.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
overwrite: bool
Whether or not to overwrite existing installation of psycopg2-pgevents
trigger function, if existing installation is found.
Returns
-------
None
"""
prior_install = False
if not overwrite:
prior_install = trigger_function_installed(connection)
if not prior_install:
log('Installing trigger function...', logger_name=_LOGGER_NAME)
execute(connection, INSTALL_TRIGGER_FUNCTION_STATEMENT)
else:
log('Trigger function already installed; skipping...', logger_name=_LOGGER_NAME)
|
[
"def",
"install_trigger_function",
"(",
"connection",
":",
"connection",
",",
"overwrite",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"prior_install",
"=",
"False",
"if",
"not",
"overwrite",
":",
"prior_install",
"=",
"trigger_function_installed",
"(",
"connection",
")",
"if",
"not",
"prior_install",
":",
"log",
"(",
"'Installing trigger function...'",
",",
"logger_name",
"=",
"_LOGGER_NAME",
")",
"execute",
"(",
"connection",
",",
"INSTALL_TRIGGER_FUNCTION_STATEMENT",
")",
"else",
":",
"log",
"(",
"'Trigger function already installed; skipping...'",
",",
"logger_name",
"=",
"_LOGGER_NAME",
")"
] | 31.185185 | 26.888889 |
def line_plot(data, atts=None, percent=100.0, seed=1, title=None, outfile=None, wait=True):
"""
Uses the internal format to plot the dataset, one line per instance.
:param data: the dataset
:type data: Instances
:param atts: the list of 0-based attribute indices of attributes to plot
:type atts: list
:param percent: the percentage of the dataset to use for plotting
:type percent: float
:param seed: the seed value to use for subsampling
:type seed: int
:param title: an optional title
:type title: str
:param outfile: the (optional) file to save the generated plot to. The extension determines the file format.
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
# create subsample
data = plot.create_subsample(data, percent=percent, seed=seed)
fig = plt.figure()
if atts is None:
x = []
for i in range(data.num_attributes):
x.append(i)
else:
x = atts
ax = fig.add_subplot(111)
ax.set_xlabel("attributes")
ax.set_ylabel("value")
ax.grid(True)
for index_y in range(data.num_instances):
y = []
for index_x in x:
y.append(data.get_instance(index_y).get_value(index_x))
ax.plot(x, y, "o-", alpha=0.5)
if title is None:
title = data.relationname
if percent != 100:
title += " (%0.1f%%)" % percent
fig.canvas.set_window_title(title)
plt.draw()
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show()
|
[
"def",
"line_plot",
"(",
"data",
",",
"atts",
"=",
"None",
",",
"percent",
"=",
"100.0",
",",
"seed",
"=",
"1",
",",
"title",
"=",
"None",
",",
"outfile",
"=",
"None",
",",
"wait",
"=",
"True",
")",
":",
"if",
"not",
"plot",
".",
"matplotlib_available",
":",
"logger",
".",
"error",
"(",
"\"Matplotlib is not installed, plotting unavailable!\"",
")",
"return",
"# create subsample",
"data",
"=",
"plot",
".",
"create_subsample",
"(",
"data",
",",
"percent",
"=",
"percent",
",",
"seed",
"=",
"seed",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"if",
"atts",
"is",
"None",
":",
"x",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"data",
".",
"num_attributes",
")",
":",
"x",
".",
"append",
"(",
"i",
")",
"else",
":",
"x",
"=",
"atts",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"ax",
".",
"set_xlabel",
"(",
"\"attributes\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"value\"",
")",
"ax",
".",
"grid",
"(",
"True",
")",
"for",
"index_y",
"in",
"range",
"(",
"data",
".",
"num_instances",
")",
":",
"y",
"=",
"[",
"]",
"for",
"index_x",
"in",
"x",
":",
"y",
".",
"append",
"(",
"data",
".",
"get_instance",
"(",
"index_y",
")",
".",
"get_value",
"(",
"index_x",
")",
")",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"\"o-\"",
",",
"alpha",
"=",
"0.5",
")",
"if",
"title",
"is",
"None",
":",
"title",
"=",
"data",
".",
"relationname",
"if",
"percent",
"!=",
"100",
":",
"title",
"+=",
"\" (%0.1f%%)\"",
"%",
"percent",
"fig",
".",
"canvas",
".",
"set_window_title",
"(",
"title",
")",
"plt",
".",
"draw",
"(",
")",
"if",
"outfile",
"is",
"not",
"None",
":",
"plt",
".",
"savefig",
"(",
"outfile",
")",
"if",
"wait",
":",
"plt",
".",
"show",
"(",
")"
] | 30.907407 | 20.944444 |
def freeze(self):
"""Make the SchemaElement's connections immutable."""
self.in_connections = frozenset(self.in_connections)
self.out_connections = frozenset(self.out_connections)
|
[
"def",
"freeze",
"(",
"self",
")",
":",
"self",
".",
"in_connections",
"=",
"frozenset",
"(",
"self",
".",
"in_connections",
")",
"self",
".",
"out_connections",
"=",
"frozenset",
"(",
"self",
".",
"out_connections",
")"
] | 50 | 16.25 |
def main():
"""Main entry point for `dddp` command."""
parser = argparse.ArgumentParser(description=__doc__)
django = parser.add_argument_group('Django Options')
django.add_argument(
'--verbosity', '-v', metavar='VERBOSITY', dest='verbosity', type=int,
default=1,
)
django.add_argument(
'--debug-port', metavar='DEBUG_PORT', dest='debug_port', type=int,
default=0,
)
django.add_argument(
'--settings', metavar='SETTINGS', dest='settings',
help="The Python path to a settings module, e.g. "
"\"myproject.settings.main\". If this isn't provided, the "
"DJANGO_SETTINGS_MODULE environment variable will be used.",
)
http = parser.add_argument_group('HTTP Options')
http.add_argument(
'listen', metavar='address[:port]', nargs='*', type=addr,
help='Listening address for HTTP(s) server.',
)
ssl = parser.add_argument_group('SSL Options')
ssl.add_argument('--ssl-version', metavar='SSL_VERSION', dest='ssl_version',
help="SSL version to use (see stdlib ssl module's) [3]",
choices=['1', '2', '3'], default='3')
ssl.add_argument('--certfile', metavar='FILE', dest='certfile',
help="SSL certificate file [None]")
ssl.add_argument('--ciphers', metavar='CIPHERS', dest='ciphers',
help="Ciphers to use (see stdlib ssl module's) [TLSv1]")
ssl.add_argument('--ca-certs', metavar='FILE', dest='ca_certs',
help="CA certificates file [None]")
ssl.add_argument('--keyfile', metavar='FILE', dest='keyfile',
help="SSL key file [None]")
namespace = parser.parse_args()
if namespace.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = namespace.settings
serve(
namespace.listen or [Addr('localhost', 8000)],
debug_port=namespace.debug_port,
keyfile=namespace.keyfile,
certfile=namespace.certfile,
verbosity=namespace.verbosity,
)
|
[
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"__doc__",
")",
"django",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Django Options'",
")",
"django",
".",
"add_argument",
"(",
"'--verbosity'",
",",
"'-v'",
",",
"metavar",
"=",
"'VERBOSITY'",
",",
"dest",
"=",
"'verbosity'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
")",
"django",
".",
"add_argument",
"(",
"'--debug-port'",
",",
"metavar",
"=",
"'DEBUG_PORT'",
",",
"dest",
"=",
"'debug_port'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
")",
"django",
".",
"add_argument",
"(",
"'--settings'",
",",
"metavar",
"=",
"'SETTINGS'",
",",
"dest",
"=",
"'settings'",
",",
"help",
"=",
"\"The Python path to a settings module, e.g. \"",
"\"\\\"myproject.settings.main\\\". If this isn't provided, the \"",
"\"DJANGO_SETTINGS_MODULE environment variable will be used.\"",
",",
")",
"http",
"=",
"parser",
".",
"add_argument_group",
"(",
"'HTTP Options'",
")",
"http",
".",
"add_argument",
"(",
"'listen'",
",",
"metavar",
"=",
"'address[:port]'",
",",
"nargs",
"=",
"'*'",
",",
"type",
"=",
"addr",
",",
"help",
"=",
"'Listening address for HTTP(s) server.'",
",",
")",
"ssl",
"=",
"parser",
".",
"add_argument_group",
"(",
"'SSL Options'",
")",
"ssl",
".",
"add_argument",
"(",
"'--ssl-version'",
",",
"metavar",
"=",
"'SSL_VERSION'",
",",
"dest",
"=",
"'ssl_version'",
",",
"help",
"=",
"\"SSL version to use (see stdlib ssl module's) [3]\"",
",",
"choices",
"=",
"[",
"'1'",
",",
"'2'",
",",
"'3'",
"]",
",",
"default",
"=",
"'3'",
")",
"ssl",
".",
"add_argument",
"(",
"'--certfile'",
",",
"metavar",
"=",
"'FILE'",
",",
"dest",
"=",
"'certfile'",
",",
"help",
"=",
"\"SSL certificate file [None]\"",
")",
"ssl",
".",
"add_argument",
"(",
"'--ciphers'",
",",
"metavar",
"=",
"'CIPHERS'",
",",
"dest",
"=",
"'ciphers'",
",",
"help",
"=",
"\"Ciphers to use (see stdlib ssl module's) [TLSv1]\"",
")",
"ssl",
".",
"add_argument",
"(",
"'--ca-certs'",
",",
"metavar",
"=",
"'FILE'",
",",
"dest",
"=",
"'ca_certs'",
",",
"help",
"=",
"\"CA certificates file [None]\"",
")",
"ssl",
".",
"add_argument",
"(",
"'--keyfile'",
",",
"metavar",
"=",
"'FILE'",
",",
"dest",
"=",
"'keyfile'",
",",
"help",
"=",
"\"SSL key file [None]\"",
")",
"namespace",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"namespace",
".",
"settings",
":",
"os",
".",
"environ",
"[",
"'DJANGO_SETTINGS_MODULE'",
"]",
"=",
"namespace",
".",
"settings",
"serve",
"(",
"namespace",
".",
"listen",
"or",
"[",
"Addr",
"(",
"'localhost'",
",",
"8000",
")",
"]",
",",
"debug_port",
"=",
"namespace",
".",
"debug_port",
",",
"keyfile",
"=",
"namespace",
".",
"keyfile",
",",
"certfile",
"=",
"namespace",
".",
"certfile",
",",
"verbosity",
"=",
"namespace",
".",
"verbosity",
",",
")"
] | 44.533333 | 21.066667 |
def unc_wrapper_args(*covariance_keys):
"""
Wrap function, calculate its Jacobian and calculate the covariance of the
outputs given the covariance of the specified inputs.
:param covariance_keys: indices and names of arguments corresponding to
covariance
:return: wrapped function bound to specified covariance keys
This is the outer uncertainty wrapper that allows you to specify the
arguments in the original function that correspond to the covariance. The
inner wrapper takes the original function to be wrapped. ::
def f(a, b, c, d, kw1='foo', *args, **kwargs):
pass
# arguments a, c, d and kw1 correspond to the covariance matrix
f_wrapped = unc_wrapper_args(0, 2, 3, 'kw1')(f)
cov = np.array([[0.0001, 0., 0., 0.], [0., 0.0001, 0., 0.],
[0., 0., 0.0001, 0.], [0., 0., 0., 0.0001])
y, cov, jac = f_wrapped(a, b, c, d, kw1='bar', __covariance__=cov)
The covariance keys can be indices of positional arguments or the names of
keywords argument used in calling the function. If no covariance keys are
specified then the arguments that correspond to the covariance shoud be
grouped into a sequence. If ``None`` is anywhere in ``covariance_keys`` then
all of the arguments will be used to calculate the Jacobian.
The covariance matrix must be a symmetrical matrix with positive numbers on
the diagonal that correspond to the square of the standard deviation, second
moment around the mean or root-mean-square(RMS) of the function with respect
to the arguments specified as covariance keys. The other elements are the
covariances corresponding to the arguments intersecting at that element.
Pass the covariance matrix with the keyword ``__covariance__`` and it will
be popped from the dictionary of keyword arguments provided to the wrapped
function.
The wrapped function will return the evaluation of the original function,
its Jacobian, which is the sensitivity of the return output to each
argument specified as a covariance key and the covariance propagated using
the first order terms of a Taylor series expansion around the arguments.
An optional keyword argument ``__method__`` can also be passed to the
wrapped function (not the wrapper) that specifies the method used to
calculate the dot product. The default method is ``'loop'``. The other
methods are ``'dense'``, ``'sparse'`` and ``'pool'``.
If the arguments specified as covariance keys are arrays, they should all be
the same size. These dimensions will be considered as separate observations.
Another argument, not in the covariance keys, may also create observations.
The resulting Jacobian will have dimensions of number of observations (nobs)
by number of return output (nf) by number of covariance keys (nargs). The
resulting covariance will be nobs x nf x nf.
"""
def wrapper(f):
@wraps(f)
def wrapped_function(*args, **kwargs):
cov = kwargs.pop('__covariance__', None) # pop covariance
method = kwargs.pop('__method__', 'loop') # pop covariance
# covariance keys cannot be defaults, they must be in args or kwargs
cov_keys = covariance_keys
# convert args to kwargs by index
kwargs.update({n: v for n, v in enumerate(args)})
args = () # empty args
if None in cov_keys:
# use all keys
cov_keys = kwargs.keys()
# group covariance keys
if len(cov_keys) > 0:
# uses specified keys
x = [np.atleast_1d(kwargs.pop(k)) for k in cov_keys]
else:
# arguments already grouped
x = kwargs.pop(0) # use first argument
# remaining args
args_dict = {}
def args_from_kwargs(kwargs_):
"""unpack positional arguments from keyword arguments"""
# create mapping of positional arguments by index
args_ = [(n, v) for n, v in kwargs_.iteritems()
if not isinstance(n, basestring)]
# sort positional arguments by index
idx, args_ = zip(*sorted(args_, key=lambda m: m[0]))
# remove args_ and their indices from kwargs_
args_dict_ = {n: kwargs_.pop(n) for n in idx}
return args_, args_dict_
if kwargs:
args, args_dict = args_from_kwargs(kwargs)
def f_(x_, *args_, **kwargs_):
"""call original function with independent variables grouped"""
args_dict_ = args_dict
if cov_keys:
kwargs_.update(zip(cov_keys, x_), **args_dict_)
if kwargs_:
args_, _ = args_from_kwargs(kwargs_)
return np.array(f(*args_, **kwargs_))
# assumes independent variables already grouped
return f(x_, *args_, **kwargs_)
# evaluate function and Jacobian
avg = f_(x, *args, **kwargs)
# number of returns and observations
if avg.ndim > 1:
nf, nobs = avg.shape
else:
nf, nobs = avg.size, 1
jac = jacobian(f_, x, nf, nobs, *args, **kwargs)
# calculate covariance
if cov is not None:
# covariance must account for all observations
# scale covariances by x squared in each direction
if cov.ndim == 3:
x = np.array([np.repeat(y, nobs) if len(y)==1
else y for y in x])
LOGGER.debug('x:\n%r', x)
cov = np.array([c * y * np.row_stack(y)
for c, y in zip(cov, x.T)])
else: # x are all only one dimension
x = np.asarray(x)
cov = cov * x * x.T
assert jac.size / nf / nobs == cov.size / len(x)
cov = np.tile(cov, (nobs, 1, 1))
# propagate uncertainty using different methods
if method.lower() == 'dense':
j, c = jflatten(jac), jflatten(cov)
cov = prop_unc((j, c))
# sparse
elif method.lower() == 'sparse':
j, c = jtosparse(jac), jtosparse(cov)
cov = j.dot(c).dot(j.transpose())
cov = cov.todense()
# pool
elif method.lower() == 'pool':
try:
p = Pool()
cov = np.array(p.map(prop_unc, zip(jac, cov)))
finally:
p.terminate()
# loop is the default
else:
cov = np.array([prop_unc((jac[o], cov[o]))
for o in xrange(nobs)])
# dense and spares are flattened, unravel them into 3-D list of
# observations
if method.lower() in ['dense', 'sparse']:
cov = np.array([
cov[(nf * o):(nf * (o + 1)), (nf * o):(nf * (o + 1))]
for o in xrange(nobs)
])
# unpack returns for original function with ungrouped arguments
if None in cov_keys or len(cov_keys) > 0:
return tuple(avg.tolist() + [cov, jac])
# independent variables were already grouped
return avg, cov, jac
return wrapped_function
return wrapper
|
[
"def",
"unc_wrapper_args",
"(",
"*",
"covariance_keys",
")",
":",
"def",
"wrapper",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapped_function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cov",
"=",
"kwargs",
".",
"pop",
"(",
"'__covariance__'",
",",
"None",
")",
"# pop covariance\r",
"method",
"=",
"kwargs",
".",
"pop",
"(",
"'__method__'",
",",
"'loop'",
")",
"# pop covariance\r",
"# covariance keys cannot be defaults, they must be in args or kwargs\r",
"cov_keys",
"=",
"covariance_keys",
"# convert args to kwargs by index\r",
"kwargs",
".",
"update",
"(",
"{",
"n",
":",
"v",
"for",
"n",
",",
"v",
"in",
"enumerate",
"(",
"args",
")",
"}",
")",
"args",
"=",
"(",
")",
"# empty args\r",
"if",
"None",
"in",
"cov_keys",
":",
"# use all keys\r",
"cov_keys",
"=",
"kwargs",
".",
"keys",
"(",
")",
"# group covariance keys\r",
"if",
"len",
"(",
"cov_keys",
")",
">",
"0",
":",
"# uses specified keys\r",
"x",
"=",
"[",
"np",
".",
"atleast_1d",
"(",
"kwargs",
".",
"pop",
"(",
"k",
")",
")",
"for",
"k",
"in",
"cov_keys",
"]",
"else",
":",
"# arguments already grouped\r",
"x",
"=",
"kwargs",
".",
"pop",
"(",
"0",
")",
"# use first argument\r",
"# remaining args\r",
"args_dict",
"=",
"{",
"}",
"def",
"args_from_kwargs",
"(",
"kwargs_",
")",
":",
"\"\"\"unpack positional arguments from keyword arguments\"\"\"",
"# create mapping of positional arguments by index\r",
"args_",
"=",
"[",
"(",
"n",
",",
"v",
")",
"for",
"n",
",",
"v",
"in",
"kwargs_",
".",
"iteritems",
"(",
")",
"if",
"not",
"isinstance",
"(",
"n",
",",
"basestring",
")",
"]",
"# sort positional arguments by index\r",
"idx",
",",
"args_",
"=",
"zip",
"(",
"*",
"sorted",
"(",
"args_",
",",
"key",
"=",
"lambda",
"m",
":",
"m",
"[",
"0",
"]",
")",
")",
"# remove args_ and their indices from kwargs_\r",
"args_dict_",
"=",
"{",
"n",
":",
"kwargs_",
".",
"pop",
"(",
"n",
")",
"for",
"n",
"in",
"idx",
"}",
"return",
"args_",
",",
"args_dict_",
"if",
"kwargs",
":",
"args",
",",
"args_dict",
"=",
"args_from_kwargs",
"(",
"kwargs",
")",
"def",
"f_",
"(",
"x_",
",",
"*",
"args_",
",",
"*",
"*",
"kwargs_",
")",
":",
"\"\"\"call original function with independent variables grouped\"\"\"",
"args_dict_",
"=",
"args_dict",
"if",
"cov_keys",
":",
"kwargs_",
".",
"update",
"(",
"zip",
"(",
"cov_keys",
",",
"x_",
")",
",",
"*",
"*",
"args_dict_",
")",
"if",
"kwargs_",
":",
"args_",
",",
"_",
"=",
"args_from_kwargs",
"(",
"kwargs_",
")",
"return",
"np",
".",
"array",
"(",
"f",
"(",
"*",
"args_",
",",
"*",
"*",
"kwargs_",
")",
")",
"# assumes independent variables already grouped\r",
"return",
"f",
"(",
"x_",
",",
"*",
"args_",
",",
"*",
"*",
"kwargs_",
")",
"# evaluate function and Jacobian\r",
"avg",
"=",
"f_",
"(",
"x",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# number of returns and observations\r",
"if",
"avg",
".",
"ndim",
">",
"1",
":",
"nf",
",",
"nobs",
"=",
"avg",
".",
"shape",
"else",
":",
"nf",
",",
"nobs",
"=",
"avg",
".",
"size",
",",
"1",
"jac",
"=",
"jacobian",
"(",
"f_",
",",
"x",
",",
"nf",
",",
"nobs",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# calculate covariance\r",
"if",
"cov",
"is",
"not",
"None",
":",
"# covariance must account for all observations\r",
"# scale covariances by x squared in each direction\r",
"if",
"cov",
".",
"ndim",
"==",
"3",
":",
"x",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"repeat",
"(",
"y",
",",
"nobs",
")",
"if",
"len",
"(",
"y",
")",
"==",
"1",
"else",
"y",
"for",
"y",
"in",
"x",
"]",
")",
"LOGGER",
".",
"debug",
"(",
"'x:\\n%r'",
",",
"x",
")",
"cov",
"=",
"np",
".",
"array",
"(",
"[",
"c",
"*",
"y",
"*",
"np",
".",
"row_stack",
"(",
"y",
")",
"for",
"c",
",",
"y",
"in",
"zip",
"(",
"cov",
",",
"x",
".",
"T",
")",
"]",
")",
"else",
":",
"# x are all only one dimension\r",
"x",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"cov",
"=",
"cov",
"*",
"x",
"*",
"x",
".",
"T",
"assert",
"jac",
".",
"size",
"/",
"nf",
"/",
"nobs",
"==",
"cov",
".",
"size",
"/",
"len",
"(",
"x",
")",
"cov",
"=",
"np",
".",
"tile",
"(",
"cov",
",",
"(",
"nobs",
",",
"1",
",",
"1",
")",
")",
"# propagate uncertainty using different methods\r",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"'dense'",
":",
"j",
",",
"c",
"=",
"jflatten",
"(",
"jac",
")",
",",
"jflatten",
"(",
"cov",
")",
"cov",
"=",
"prop_unc",
"(",
"(",
"j",
",",
"c",
")",
")",
"# sparse\r",
"elif",
"method",
".",
"lower",
"(",
")",
"==",
"'sparse'",
":",
"j",
",",
"c",
"=",
"jtosparse",
"(",
"jac",
")",
",",
"jtosparse",
"(",
"cov",
")",
"cov",
"=",
"j",
".",
"dot",
"(",
"c",
")",
".",
"dot",
"(",
"j",
".",
"transpose",
"(",
")",
")",
"cov",
"=",
"cov",
".",
"todense",
"(",
")",
"# pool\r",
"elif",
"method",
".",
"lower",
"(",
")",
"==",
"'pool'",
":",
"try",
":",
"p",
"=",
"Pool",
"(",
")",
"cov",
"=",
"np",
".",
"array",
"(",
"p",
".",
"map",
"(",
"prop_unc",
",",
"zip",
"(",
"jac",
",",
"cov",
")",
")",
")",
"finally",
":",
"p",
".",
"terminate",
"(",
")",
"# loop is the default\r",
"else",
":",
"cov",
"=",
"np",
".",
"array",
"(",
"[",
"prop_unc",
"(",
"(",
"jac",
"[",
"o",
"]",
",",
"cov",
"[",
"o",
"]",
")",
")",
"for",
"o",
"in",
"xrange",
"(",
"nobs",
")",
"]",
")",
"# dense and spares are flattened, unravel them into 3-D list of\r",
"# observations\r",
"if",
"method",
".",
"lower",
"(",
")",
"in",
"[",
"'dense'",
",",
"'sparse'",
"]",
":",
"cov",
"=",
"np",
".",
"array",
"(",
"[",
"cov",
"[",
"(",
"nf",
"*",
"o",
")",
":",
"(",
"nf",
"*",
"(",
"o",
"+",
"1",
")",
")",
",",
"(",
"nf",
"*",
"o",
")",
":",
"(",
"nf",
"*",
"(",
"o",
"+",
"1",
")",
")",
"]",
"for",
"o",
"in",
"xrange",
"(",
"nobs",
")",
"]",
")",
"# unpack returns for original function with ungrouped arguments\r",
"if",
"None",
"in",
"cov_keys",
"or",
"len",
"(",
"cov_keys",
")",
">",
"0",
":",
"return",
"tuple",
"(",
"avg",
".",
"tolist",
"(",
")",
"+",
"[",
"cov",
",",
"jac",
"]",
")",
"# independent variables were already grouped\r",
"return",
"avg",
",",
"cov",
",",
"jac",
"return",
"wrapped_function",
"return",
"wrapper"
] | 48.5625 | 20.84375 |
def basic_pool(features, max_area_width, max_area_height=1, height=1,
fn=tf.reduce_max, name=None):
"""Pools for each area based on a given pooling function (fn).
Args:
features: a Tensor in a shape of [batch_size, height * width, depth].
max_area_width: the max width allowed for an area.
max_area_height: the max height allowed for an area.
height: the height of the image.
fn: the TF function for the pooling.
name: the namescope.
Returns:
pool_results: A Tensor of shape [batch_size, num_areas, depth]
area_heights: A Tensor of shape [batch_size, num_areas, 1]
area_widths: A Tensor of shape [batch_size, num_areas, 1]
"""
with tf.name_scope(name, default_name="basic_pool"):
feature_shape = common_layers.shape_list(features)
batch_size = feature_shape[0]
length = feature_shape[-2]
depth = feature_shape[-1]
width = length // height
features_2d = tf.reshape(features, [batch_size, height, width, depth])
height_list = []
width_list = []
pool_list = []
size_tensor = tf.ones_like(features_2d[:, :, :, 0], dtype=tf.int32)
for area_height in range(max_area_height):
for area_width in range(max_area_width):
pool_tensor = _pool_one_shape(features_2d,
area_width=area_width + 1,
area_height=area_height + 1,
batch_size=batch_size,
width=width,
height=height,
depth=depth,
fn=fn)
pool_list.append(
tf.reshape(pool_tensor, [batch_size, -1, depth]))
height_list.append(
tf.reshape(
size_tensor[:, area_height:, area_width:] *\
(area_height + 1), [batch_size, -1]))
width_list.append(
tf.reshape(
size_tensor[:, area_height:, area_width:] *\
(area_width + 1), [batch_size, -1]))
pool_results = tf.concat(pool_list, axis=1)
area_heights = tf.expand_dims(tf.concat(height_list, axis=1), 2)
area_widths = tf.expand_dims(tf.concat(width_list, axis=1), 2)
return pool_results, area_heights, area_widths
|
[
"def",
"basic_pool",
"(",
"features",
",",
"max_area_width",
",",
"max_area_height",
"=",
"1",
",",
"height",
"=",
"1",
",",
"fn",
"=",
"tf",
".",
"reduce_max",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
",",
"default_name",
"=",
"\"basic_pool\"",
")",
":",
"feature_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"features",
")",
"batch_size",
"=",
"feature_shape",
"[",
"0",
"]",
"length",
"=",
"feature_shape",
"[",
"-",
"2",
"]",
"depth",
"=",
"feature_shape",
"[",
"-",
"1",
"]",
"width",
"=",
"length",
"//",
"height",
"features_2d",
"=",
"tf",
".",
"reshape",
"(",
"features",
",",
"[",
"batch_size",
",",
"height",
",",
"width",
",",
"depth",
"]",
")",
"height_list",
"=",
"[",
"]",
"width_list",
"=",
"[",
"]",
"pool_list",
"=",
"[",
"]",
"size_tensor",
"=",
"tf",
".",
"ones_like",
"(",
"features_2d",
"[",
":",
",",
":",
",",
":",
",",
"0",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"for",
"area_height",
"in",
"range",
"(",
"max_area_height",
")",
":",
"for",
"area_width",
"in",
"range",
"(",
"max_area_width",
")",
":",
"pool_tensor",
"=",
"_pool_one_shape",
"(",
"features_2d",
",",
"area_width",
"=",
"area_width",
"+",
"1",
",",
"area_height",
"=",
"area_height",
"+",
"1",
",",
"batch_size",
"=",
"batch_size",
",",
"width",
"=",
"width",
",",
"height",
"=",
"height",
",",
"depth",
"=",
"depth",
",",
"fn",
"=",
"fn",
")",
"pool_list",
".",
"append",
"(",
"tf",
".",
"reshape",
"(",
"pool_tensor",
",",
"[",
"batch_size",
",",
"-",
"1",
",",
"depth",
"]",
")",
")",
"height_list",
".",
"append",
"(",
"tf",
".",
"reshape",
"(",
"size_tensor",
"[",
":",
",",
"area_height",
":",
",",
"area_width",
":",
"]",
"*",
"(",
"area_height",
"+",
"1",
")",
",",
"[",
"batch_size",
",",
"-",
"1",
"]",
")",
")",
"width_list",
".",
"append",
"(",
"tf",
".",
"reshape",
"(",
"size_tensor",
"[",
":",
",",
"area_height",
":",
",",
"area_width",
":",
"]",
"*",
"(",
"area_width",
"+",
"1",
")",
",",
"[",
"batch_size",
",",
"-",
"1",
"]",
")",
")",
"pool_results",
"=",
"tf",
".",
"concat",
"(",
"pool_list",
",",
"axis",
"=",
"1",
")",
"area_heights",
"=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"concat",
"(",
"height_list",
",",
"axis",
"=",
"1",
")",
",",
"2",
")",
"area_widths",
"=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"concat",
"(",
"width_list",
",",
"axis",
"=",
"1",
")",
",",
"2",
")",
"return",
"pool_results",
",",
"area_heights",
",",
"area_widths"
] | 44.470588 | 16.529412 |
def percent(args=None):
'''
Return partition information for volumes mounted on this minion
CLI Example:
.. code-block:: bash
salt '*' disk.percent /var
'''
if __grains__['kernel'] == 'Linux':
cmd = 'df -P'
elif __grains__['kernel'] == 'OpenBSD' or __grains__['kernel'] == 'AIX':
cmd = 'df -kP'
else:
cmd = 'df'
ret = {}
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in out:
if not line:
continue
if line.startswith('Filesystem'):
continue
comps = line.split()
while len(comps) >= 2 and not comps[1].isdigit():
comps[0] = '{0} {1}'.format(comps[0], comps[1])
comps.pop(1)
if len(comps) < 2:
continue
try:
if __grains__['kernel'] == 'Darwin':
ret[comps[8]] = comps[4]
else:
ret[comps[5]] = comps[4]
except IndexError:
log.error('Problem parsing disk usage information')
ret = {}
if args and args not in ret:
log.error(
'Problem parsing disk usage information: Partition \'%s\' '
'does not exist!', args
)
ret = {}
elif args:
return ret[args]
return ret
|
[
"def",
"percent",
"(",
"args",
"=",
"None",
")",
":",
"if",
"__grains__",
"[",
"'kernel'",
"]",
"==",
"'Linux'",
":",
"cmd",
"=",
"'df -P'",
"elif",
"__grains__",
"[",
"'kernel'",
"]",
"==",
"'OpenBSD'",
"or",
"__grains__",
"[",
"'kernel'",
"]",
"==",
"'AIX'",
":",
"cmd",
"=",
"'df -kP'",
"else",
":",
"cmd",
"=",
"'df'",
"ret",
"=",
"{",
"}",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
".",
"splitlines",
"(",
")",
"for",
"line",
"in",
"out",
":",
"if",
"not",
"line",
":",
"continue",
"if",
"line",
".",
"startswith",
"(",
"'Filesystem'",
")",
":",
"continue",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"while",
"len",
"(",
"comps",
")",
">=",
"2",
"and",
"not",
"comps",
"[",
"1",
"]",
".",
"isdigit",
"(",
")",
":",
"comps",
"[",
"0",
"]",
"=",
"'{0} {1}'",
".",
"format",
"(",
"comps",
"[",
"0",
"]",
",",
"comps",
"[",
"1",
"]",
")",
"comps",
".",
"pop",
"(",
"1",
")",
"if",
"len",
"(",
"comps",
")",
"<",
"2",
":",
"continue",
"try",
":",
"if",
"__grains__",
"[",
"'kernel'",
"]",
"==",
"'Darwin'",
":",
"ret",
"[",
"comps",
"[",
"8",
"]",
"]",
"=",
"comps",
"[",
"4",
"]",
"else",
":",
"ret",
"[",
"comps",
"[",
"5",
"]",
"]",
"=",
"comps",
"[",
"4",
"]",
"except",
"IndexError",
":",
"log",
".",
"error",
"(",
"'Problem parsing disk usage information'",
")",
"ret",
"=",
"{",
"}",
"if",
"args",
"and",
"args",
"not",
"in",
"ret",
":",
"log",
".",
"error",
"(",
"'Problem parsing disk usage information: Partition \\'%s\\' '",
"'does not exist!'",
",",
"args",
")",
"ret",
"=",
"{",
"}",
"elif",
"args",
":",
"return",
"ret",
"[",
"args",
"]",
"return",
"ret"
] | 27.148936 | 20.893617 |
def rank(self, dim, pct=False, keep_attrs=None):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If pct, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
ranked : DataArray
DataArray with the same coordinates and dtype 'float64'.
Examples
--------
>>> arr = xr.DataArray([5, 6, 7], dims='x')
>>> arr.rank('x')
<xarray.DataArray (x: 3)>
array([ 1., 2., 3.])
Dimensions without coordinates: x
"""
ds = self._to_temp_dataset().rank(dim, pct=pct, keep_attrs=keep_attrs)
return self._from_temp_dataset(ds)
|
[
"def",
"rank",
"(",
"self",
",",
"dim",
",",
"pct",
"=",
"False",
",",
"keep_attrs",
"=",
"None",
")",
":",
"ds",
"=",
"self",
".",
"_to_temp_dataset",
"(",
")",
".",
"rank",
"(",
"dim",
",",
"pct",
"=",
"pct",
",",
"keep_attrs",
"=",
"keep_attrs",
")",
"return",
"self",
".",
"_from_temp_dataset",
"(",
"ds",
")"
] | 33.769231 | 22.871795 |
def ready(self):
"""
Finalizes application configuration.
"""
import wagtailplus.wagtailrelations.signals.handlers
self.add_relationship_panels()
self.add_relationship_methods()
super(WagtailRelationsAppConfig, self).ready()
|
[
"def",
"ready",
"(",
"self",
")",
":",
"import",
"wagtailplus",
".",
"wagtailrelations",
".",
"signals",
".",
"handlers",
"self",
".",
"add_relationship_panels",
"(",
")",
"self",
".",
"add_relationship_methods",
"(",
")",
"super",
"(",
"WagtailRelationsAppConfig",
",",
"self",
")",
".",
"ready",
"(",
")"
] | 31.222222 | 11.444444 |
def valid_file(cls, filename):
""" Check if the provided file is a valid file for this plugin.
:arg filename: the path to the file to check.
"""
return not os.path.isdir(filename) \
and os.path.basename(filename).startswith('Session ') \
and filename.endswith('.mqo')
|
[
"def",
"valid_file",
"(",
"cls",
",",
"filename",
")",
":",
"return",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"filename",
")",
"and",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
".",
"startswith",
"(",
"'Session '",
")",
"and",
"filename",
".",
"endswith",
"(",
"'.mqo'",
")"
] | 35.222222 | 15 |
def sanitize(self):
'''
Check if the current settings conform to the LISP specifications and
fix them where possible.
'''
# We override the MapRegisterMessage sa
super(InfoMessage, self).sanitize()
# R: R bit indicates this is a reply to an Info-Request (Info-
# Reply). R bit is set to 0 in an Info-Request. When R bit is set
# to 0, the AFI field (following the EID-prefix field) must be set
# to 0. When R bit is set to 1, the packet contents follow the
# format for an Info-Reply as described below.
if not isinstance(self.is_reply, bool):
raise ValueError('Is-reply flag must be a boolean')
# Nonce: An 8-byte random value created by the sender of the Info-
# Request. This nonce will be returned in the Info-Reply. The
# nonce SHOULD be generated by a properly seeded pseudo-random (or
# strong random) source.
if len(bytes(self.nonce)) != 8:
raise ValueError('Invalid nonce')
# Key ID: A configured ID to find the configured Message
# Authentication Code (MAC) algorithm and key value used for the
# authentication function. See Section 14.4 for codepoint
# assignments.
if self.key_id not in (KEY_ID_NONE, KEY_ID_HMAC_SHA_1_96,
KEY_ID_HMAC_SHA_256_128):
raise ValueError('Invalid Key ID')
# Authentication Data: The message digest used from the output of the
# Message Authentication Code (MAC) algorithm. The entire Map-
# Register payload is authenticated with this field preset to 0.
# After the MAC is computed, it is placed in this field.
# Implementations of this specification MUST include support for
# HMAC-SHA-1-96 [RFC2404] and support for HMAC-SHA-256-128 [RFC6234]
# is RECOMMENDED.
if not isinstance(self.authentication_data, bytes):
raise ValueError('Invalid authentication data')
# TTL: The time in minutes the recipient of the Info-Reply will
# store the RTR Information.
if not isinstance(self.ttl, numbers.Integral) \
or self.ttl < 0 or self.ttl > 0xffffffff:
raise ValueError('Invalid TTL')
# EID-prefix: 4 octets if an IPv4 address-family, 16 octets if an IPv6
# address-family.
if not isinstance(self.eid_prefix, (IPv4Network, IPv6Network)):
raise ValueError('EID prefix must be IPv4 or IPv6')
# When a Map-Server receives an Info-Request message, it responds with
# an Info-Reply message. The Info-Reply message source port is 4342,
# and destination port is taken from the source port of the triggering
# Info-Request. Map-Server fills the NAT LCAF (LCAF Type = 7) fields
# according to their description. The Map-Server uses AFI=0 for the
# Private ETR RLOC Address field in the NAT LCAF.
if self.is_reply:
if not isinstance(self.reply, LCAFNATTraversalAddress):
raise ValueError("An InfoMessage which is an Info-Reply must contain an LCAFNATTraversalAddress")
else:
if self.reply is not None:
raise ValueError("An InfoMessage which is an Info-Request can not contain a reply")
|
[
"def",
"sanitize",
"(",
"self",
")",
":",
"# We override the MapRegisterMessage sa",
"super",
"(",
"InfoMessage",
",",
"self",
")",
".",
"sanitize",
"(",
")",
"# R: R bit indicates this is a reply to an Info-Request (Info-",
"# Reply). R bit is set to 0 in an Info-Request. When R bit is set",
"# to 0, the AFI field (following the EID-prefix field) must be set",
"# to 0. When R bit is set to 1, the packet contents follow the",
"# format for an Info-Reply as described below.",
"if",
"not",
"isinstance",
"(",
"self",
".",
"is_reply",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"'Is-reply flag must be a boolean'",
")",
"# Nonce: An 8-byte random value created by the sender of the Info-",
"# Request. This nonce will be returned in the Info-Reply. The",
"# nonce SHOULD be generated by a properly seeded pseudo-random (or",
"# strong random) source.",
"if",
"len",
"(",
"bytes",
"(",
"self",
".",
"nonce",
")",
")",
"!=",
"8",
":",
"raise",
"ValueError",
"(",
"'Invalid nonce'",
")",
"# Key ID: A configured ID to find the configured Message",
"# Authentication Code (MAC) algorithm and key value used for the",
"# authentication function. See Section 14.4 for codepoint",
"# assignments.",
"if",
"self",
".",
"key_id",
"not",
"in",
"(",
"KEY_ID_NONE",
",",
"KEY_ID_HMAC_SHA_1_96",
",",
"KEY_ID_HMAC_SHA_256_128",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid Key ID'",
")",
"# Authentication Data: The message digest used from the output of the",
"# Message Authentication Code (MAC) algorithm. The entire Map-",
"# Register payload is authenticated with this field preset to 0.",
"# After the MAC is computed, it is placed in this field.",
"# Implementations of this specification MUST include support for",
"# HMAC-SHA-1-96 [RFC2404] and support for HMAC-SHA-256-128 [RFC6234]",
"# is RECOMMENDED.",
"if",
"not",
"isinstance",
"(",
"self",
".",
"authentication_data",
",",
"bytes",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid authentication data'",
")",
"# TTL: The time in minutes the recipient of the Info-Reply will",
"# store the RTR Information.",
"if",
"not",
"isinstance",
"(",
"self",
".",
"ttl",
",",
"numbers",
".",
"Integral",
")",
"or",
"self",
".",
"ttl",
"<",
"0",
"or",
"self",
".",
"ttl",
">",
"0xffffffff",
":",
"raise",
"ValueError",
"(",
"'Invalid TTL'",
")",
"# EID-prefix: 4 octets if an IPv4 address-family, 16 octets if an IPv6",
"# address-family.",
"if",
"not",
"isinstance",
"(",
"self",
".",
"eid_prefix",
",",
"(",
"IPv4Network",
",",
"IPv6Network",
")",
")",
":",
"raise",
"ValueError",
"(",
"'EID prefix must be IPv4 or IPv6'",
")",
"# When a Map-Server receives an Info-Request message, it responds with",
"# an Info-Reply message. The Info-Reply message source port is 4342,",
"# and destination port is taken from the source port of the triggering",
"# Info-Request. Map-Server fills the NAT LCAF (LCAF Type = 7) fields",
"# according to their description. The Map-Server uses AFI=0 for the",
"# Private ETR RLOC Address field in the NAT LCAF.",
"if",
"self",
".",
"is_reply",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"reply",
",",
"LCAFNATTraversalAddress",
")",
":",
"raise",
"ValueError",
"(",
"\"An InfoMessage which is an Info-Reply must contain an LCAFNATTraversalAddress\"",
")",
"else",
":",
"if",
"self",
".",
"reply",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"An InfoMessage which is an Info-Request can not contain a reply\"",
")"
] | 51.34375 | 26.09375 |
def configure_login(app):
"""Configure login authentification
Uses `Flask-Login <https://flask-login.readthedocs.org>`_
"""
from heman.auth import login_manager, login
login_manager.init_app(app)
@app.teardown_request
def force_logout(*args, **kwargs):
login.logout_user()
|
[
"def",
"configure_login",
"(",
"app",
")",
":",
"from",
"heman",
".",
"auth",
"import",
"login_manager",
",",
"login",
"login_manager",
".",
"init_app",
"(",
"app",
")",
"@",
"app",
".",
"teardown_request",
"def",
"force_logout",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"login",
".",
"logout_user",
"(",
")"
] | 27.272727 | 14.727273 |
def find_day_by_weekday_offset(year, month, weekday, offset):
"""Get the day number based on a date and offset
:param year: date year
:type year: int
:param month: date month
:type month: int
:param weekday: date week day
:type weekday: int
:param offset: offset (-1 is last, 1 is first etc)
:type offset: int
:return: day number in the month
:rtype: int
>>> find_day_by_weekday_offset(2010, 7, 1, -1)
27
"""
# thanks calendar :)
cal = calendar.monthcalendar(year, month)
# If we ask for a -1 day, just reverse cal
if offset < 0:
offset = abs(offset)
cal.reverse()
# ok go for it
nb_found = 0
try:
for i in range(0, offset + 1):
# in cal 0 mean "there are no day here :)"
if cal[i][weekday] != 0:
nb_found += 1
if nb_found == offset:
return cal[i][weekday]
return None
except KeyError:
return None
|
[
"def",
"find_day_by_weekday_offset",
"(",
"year",
",",
"month",
",",
"weekday",
",",
"offset",
")",
":",
"# thanks calendar :)",
"cal",
"=",
"calendar",
".",
"monthcalendar",
"(",
"year",
",",
"month",
")",
"# If we ask for a -1 day, just reverse cal",
"if",
"offset",
"<",
"0",
":",
"offset",
"=",
"abs",
"(",
"offset",
")",
"cal",
".",
"reverse",
"(",
")",
"# ok go for it",
"nb_found",
"=",
"0",
"try",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"offset",
"+",
"1",
")",
":",
"# in cal 0 mean \"there are no day here :)\"",
"if",
"cal",
"[",
"i",
"]",
"[",
"weekday",
"]",
"!=",
"0",
":",
"nb_found",
"+=",
"1",
"if",
"nb_found",
"==",
"offset",
":",
"return",
"cal",
"[",
"i",
"]",
"[",
"weekday",
"]",
"return",
"None",
"except",
"KeyError",
":",
"return",
"None"
] | 25.972973 | 17.243243 |
def deploy(self, initial_instance_count, instance_type, accelerator_type=None, endpoint_name=None,
update_endpoint=False, tags=None, kms_key=None):
"""Deploy this ``Model`` to an ``Endpoint`` and optionally return a ``Predictor``.
Create a SageMaker ``Model`` and ``EndpointConfig``, and deploy an ``Endpoint`` from this ``Model``.
If ``self.predictor_cls`` is not None, this method returns a the result of invoking
``self.predictor_cls`` on the created endpoint name.
The name of the created model is accessible in the ``name`` field of this ``Model`` after deploy returns
The name of the created endpoint is accessible in the ``endpoint_name``
field of this ``Model`` after deploy returns.
Args:
instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'.
initial_instance_count (int): The initial number of instances to run in the
``Endpoint`` created from this ``Model``.
accelerator_type (str): Type of Elastic Inference accelerator to deploy this model for model loading
and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator
will be attached to the endpoint.
For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html
endpoint_name (str): The name of the endpoint to create (default: None).
If not specified, a unique endpoint name will be created.
update_endpoint (bool): Flag to update the model in an existing Amazon SageMaker endpoint.
If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources
corresponding to the previous EndpointConfig. If False, a new endpoint will be created. Default: False
tags(List[dict[str, str]]): The list of tags to attach to this specific endpoint.
kms_key (str): The ARN of the KMS key that is used to encrypt the data on the
storage volume attached to the instance hosting the endpoint.
Returns:
callable[string, sagemaker.session.Session] or None: Invocation of ``self.predictor_cls`` on
the created endpoint name, if ``self.predictor_cls`` is not None. Otherwise, return None.
"""
if not self.sagemaker_session:
if instance_type in ('local', 'local_gpu'):
self.sagemaker_session = local.LocalSession()
else:
self.sagemaker_session = session.Session()
if self.role is None:
raise ValueError("Role can not be null for deploying a model")
compiled_model_suffix = '-'.join(instance_type.split('.')[:-1])
if self._is_compiled_model:
self.name += compiled_model_suffix
self._create_sagemaker_model(instance_type, accelerator_type, tags)
production_variant = sagemaker.production_variant(self.name, instance_type, initial_instance_count,
accelerator_type=accelerator_type)
if endpoint_name:
self.endpoint_name = endpoint_name
else:
self.endpoint_name = self.name
if self._is_compiled_model and not self.endpoint_name.endswith(compiled_model_suffix):
self.endpoint_name += compiled_model_suffix
if update_endpoint:
endpoint_config_name = self.sagemaker_session.create_endpoint_config(
name=self.name,
model_name=self.name,
initial_instance_count=initial_instance_count,
instance_type=instance_type,
accelerator_type=accelerator_type,
tags=tags,
kms_key=kms_key)
self.sagemaker_session.update_endpoint(self.endpoint_name, endpoint_config_name)
else:
self.sagemaker_session.endpoint_from_production_variants(self.endpoint_name, [production_variant],
tags, kms_key)
if self.predictor_cls:
return self.predictor_cls(self.endpoint_name, self.sagemaker_session)
|
[
"def",
"deploy",
"(",
"self",
",",
"initial_instance_count",
",",
"instance_type",
",",
"accelerator_type",
"=",
"None",
",",
"endpoint_name",
"=",
"None",
",",
"update_endpoint",
"=",
"False",
",",
"tags",
"=",
"None",
",",
"kms_key",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"sagemaker_session",
":",
"if",
"instance_type",
"in",
"(",
"'local'",
",",
"'local_gpu'",
")",
":",
"self",
".",
"sagemaker_session",
"=",
"local",
".",
"LocalSession",
"(",
")",
"else",
":",
"self",
".",
"sagemaker_session",
"=",
"session",
".",
"Session",
"(",
")",
"if",
"self",
".",
"role",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Role can not be null for deploying a model\"",
")",
"compiled_model_suffix",
"=",
"'-'",
".",
"join",
"(",
"instance_type",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"-",
"1",
"]",
")",
"if",
"self",
".",
"_is_compiled_model",
":",
"self",
".",
"name",
"+=",
"compiled_model_suffix",
"self",
".",
"_create_sagemaker_model",
"(",
"instance_type",
",",
"accelerator_type",
",",
"tags",
")",
"production_variant",
"=",
"sagemaker",
".",
"production_variant",
"(",
"self",
".",
"name",
",",
"instance_type",
",",
"initial_instance_count",
",",
"accelerator_type",
"=",
"accelerator_type",
")",
"if",
"endpoint_name",
":",
"self",
".",
"endpoint_name",
"=",
"endpoint_name",
"else",
":",
"self",
".",
"endpoint_name",
"=",
"self",
".",
"name",
"if",
"self",
".",
"_is_compiled_model",
"and",
"not",
"self",
".",
"endpoint_name",
".",
"endswith",
"(",
"compiled_model_suffix",
")",
":",
"self",
".",
"endpoint_name",
"+=",
"compiled_model_suffix",
"if",
"update_endpoint",
":",
"endpoint_config_name",
"=",
"self",
".",
"sagemaker_session",
".",
"create_endpoint_config",
"(",
"name",
"=",
"self",
".",
"name",
",",
"model_name",
"=",
"self",
".",
"name",
",",
"initial_instance_count",
"=",
"initial_instance_count",
",",
"instance_type",
"=",
"instance_type",
",",
"accelerator_type",
"=",
"accelerator_type",
",",
"tags",
"=",
"tags",
",",
"kms_key",
"=",
"kms_key",
")",
"self",
".",
"sagemaker_session",
".",
"update_endpoint",
"(",
"self",
".",
"endpoint_name",
",",
"endpoint_config_name",
")",
"else",
":",
"self",
".",
"sagemaker_session",
".",
"endpoint_from_production_variants",
"(",
"self",
".",
"endpoint_name",
",",
"[",
"production_variant",
"]",
",",
"tags",
",",
"kms_key",
")",
"if",
"self",
".",
"predictor_cls",
":",
"return",
"self",
".",
"predictor_cls",
"(",
"self",
".",
"endpoint_name",
",",
"self",
".",
"sagemaker_session",
")"
] | 57.69863 | 34.342466 |
def get_eip_address_info(addresses=None, allocation_ids=None, region=None, key=None,
keyid=None, profile=None):
'''
Get 'interesting' info about some, or all EIPs associated with the current account.
addresses
(list) - Optional list of addresses. If provided, only the addresses
associated with those in the list will be returned.
allocation_ids
(list) - Optional list of allocation IDs. If provided, only the
addresses associated with the given allocation IDs will be returned.
returns
(list of dicts) - A list of dicts, each containing the info for one of the requested EIPs.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.get_eip_address_info addresses=52.4.2.15
.. versionadded:: 2016.3.0
'''
if type(addresses) == (type('string')):
addresses = [addresses]
if type(allocation_ids) == (type('string')):
allocation_ids = [allocation_ids]
ret = _get_all_eip_addresses(addresses=addresses, allocation_ids=allocation_ids,
region=region, key=key, keyid=keyid, profile=profile)
interesting = ['allocation_id', 'association_id', 'domain', 'instance_id',
'network_interface_id', 'network_interface_owner_id', 'public_ip',
'private_ip_address']
return [dict([(x, getattr(address, x)) for x in interesting]) for address in ret]
|
[
"def",
"get_eip_address_info",
"(",
"addresses",
"=",
"None",
",",
"allocation_ids",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"if",
"type",
"(",
"addresses",
")",
"==",
"(",
"type",
"(",
"'string'",
")",
")",
":",
"addresses",
"=",
"[",
"addresses",
"]",
"if",
"type",
"(",
"allocation_ids",
")",
"==",
"(",
"type",
"(",
"'string'",
")",
")",
":",
"allocation_ids",
"=",
"[",
"allocation_ids",
"]",
"ret",
"=",
"_get_all_eip_addresses",
"(",
"addresses",
"=",
"addresses",
",",
"allocation_ids",
"=",
"allocation_ids",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"interesting",
"=",
"[",
"'allocation_id'",
",",
"'association_id'",
",",
"'domain'",
",",
"'instance_id'",
",",
"'network_interface_id'",
",",
"'network_interface_owner_id'",
",",
"'public_ip'",
",",
"'private_ip_address'",
"]",
"return",
"[",
"dict",
"(",
"[",
"(",
"x",
",",
"getattr",
"(",
"address",
",",
"x",
")",
")",
"for",
"x",
"in",
"interesting",
"]",
")",
"for",
"address",
"in",
"ret",
"]"
] | 39.111111 | 30.388889 |
def post(self, repo):
"""
Post to the metadata server
Parameters
----------
repo
"""
datapackage = repo.package
url = self.url
token = self.token
headers = {
'Authorization': 'Token {}'.format(token),
'Content-Type': 'application/json'
}
try:
r = requests.post(url,
data = json.dumps(datapackage),
headers=headers)
return r
except Exception as e:
#print(e)
#traceback.print_exc()
raise NetworkError()
return ""
|
[
"def",
"post",
"(",
"self",
",",
"repo",
")",
":",
"datapackage",
"=",
"repo",
".",
"package",
"url",
"=",
"self",
".",
"url",
"token",
"=",
"self",
".",
"token",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Token {}'",
".",
"format",
"(",
"token",
")",
",",
"'Content-Type'",
":",
"'application/json'",
"}",
"try",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"datapackage",
")",
",",
"headers",
"=",
"headers",
")",
"return",
"r",
"except",
"Exception",
"as",
"e",
":",
"#print(e)",
"#traceback.print_exc()",
"raise",
"NetworkError",
"(",
")",
"return",
"\"\""
] | 21.466667 | 19.733333 |
def mousePressEvent( self, event ):
"""
Make sure on a mouse release event that we have a current item. If
no item is current, then our edit item will become current.
:param event | <QMouseReleaseEvent>
"""
item = self.itemAt(event.pos())
# set the tag creation item as active
if item is None:
create_item = self.createItem()
if create_item:
self.setCurrentItem(create_item)
self.editItem(create_item)
# check to see if we're removing a tag
else:
rect = self.visualItemRect(item)
if ( rect.right() - 14 < event.pos().x() ):
# make sure the item is allowed to be removed via the widget
if ( self.itemsRemovable() ):
self.takeItem(self.row(item))
# emit the removed signal
if ( not self.signalsBlocked() ):
self.tagRemoved.emit(item.text())
event.ignore()
return
super(XMultiTagEdit, self).mousePressEvent(event)
|
[
"def",
"mousePressEvent",
"(",
"self",
",",
"event",
")",
":",
"item",
"=",
"self",
".",
"itemAt",
"(",
"event",
".",
"pos",
"(",
")",
")",
"# set the tag creation item as active\r",
"if",
"item",
"is",
"None",
":",
"create_item",
"=",
"self",
".",
"createItem",
"(",
")",
"if",
"create_item",
":",
"self",
".",
"setCurrentItem",
"(",
"create_item",
")",
"self",
".",
"editItem",
"(",
"create_item",
")",
"# check to see if we're removing a tag\r",
"else",
":",
"rect",
"=",
"self",
".",
"visualItemRect",
"(",
"item",
")",
"if",
"(",
"rect",
".",
"right",
"(",
")",
"-",
"14",
"<",
"event",
".",
"pos",
"(",
")",
".",
"x",
"(",
")",
")",
":",
"# make sure the item is allowed to be removed via the widget\r",
"if",
"(",
"self",
".",
"itemsRemovable",
"(",
")",
")",
":",
"self",
".",
"takeItem",
"(",
"self",
".",
"row",
"(",
"item",
")",
")",
"# emit the removed signal\r",
"if",
"(",
"not",
"self",
".",
"signalsBlocked",
"(",
")",
")",
":",
"self",
".",
"tagRemoved",
".",
"emit",
"(",
"item",
".",
"text",
"(",
")",
")",
"event",
".",
"ignore",
"(",
")",
"return",
"super",
"(",
"XMultiTagEdit",
",",
"self",
")",
".",
"mousePressEvent",
"(",
"event",
")"
] | 37.21875 | 14.78125 |
def preprocess_na(sent, label_type):
"""Preprocess Na sentences
Args:
sent: A sentence
label_type: The type of label provided
"""
if label_type == "phonemes_and_tones":
phonemes = True
tones = True
tgm = True
elif label_type == "phonemes_and_tones_no_tgm":
phonemes = True
tones = True
tgm = False
elif label_type == "phonemes":
phonemes = True
tones = False
tgm = False
elif label_type == "tones":
phonemes = False
tones = True
tgm = True
elif label_type == "tones_notgm":
phonemes = False
tones = True
tgm = False
else:
raise ValueError("Unrecognized label type: %s" % label_type)
def pop_phoneme(sentence):
"""Pop phonemes off a sentence one at a time"""
# TODO desperately needs refactoring
# Treating fillers as single tokens; normalizing to əəə and mmm
if phonemes:
if sentence[:4] in ["əəə…", "mmm…"]:
return sentence[:4], sentence[4:]
if sentence.startswith("ə…"):
return "əəə…", sentence[2:]
if sentence.startswith("m…"):
return "mmm…", sentence[2:]
if sentence.startswith("mm…"):
return "mmm…", sentence[3:]
# Normalizing some stuff
if sentence[:3] == "wæ̃":
if phonemes:
return "w̃æ", sentence[3:]
else:
return None, sentence[3:]
if sentence[:3] == "ṽ̩":
if phonemes:
return "ṽ̩", sentence[3:]
else:
return None, sentence[3:]
if sentence[:3] in TRI_PHNS:
if phonemes:
return sentence[:3], sentence[3:]
else:
return None, sentence[3:]
if sentence[:2] in BI_PHNS:
if phonemes:
return sentence[:2], sentence[2:]
else:
return None, sentence[2:]
if sentence[:2] == "˧̩":
return "˧", sentence[2:]
if sentence[:2] == "˧̍":
return "˧", sentence[2:]
if sentence[0] in UNI_PHNS:
if phonemes:
return sentence[0], sentence[1:]
else:
return None, sentence[1:]
if sentence[:2] in BI_TONES:
if tones:
return sentence[:2], sentence[2:]
else:
return None, sentence[2:]
if sentence[0] in UNI_TONES:
if tones:
return sentence[0], sentence[1:]
else:
return None, sentence[1:]
if sentence[0] in MISC_SYMBOLS:
# We assume these symbols cannot be captured.
return None, sentence[1:]
if sentence[0] in BAD_NA_SYMBOLS:
return None, sentence[1:]
if sentence[0] in PUNC_SYMBOLS:
return None, sentence[1:]
if sentence[0] in ["-", "ʰ", "/"]:
return None, sentence[1:]
if sentence[0] in set(["<", ">"]):
# We keep everything literal, thus including what is in <>
# brackets; so we just remove these tokens"
return None, sentence[1:]
if sentence[0] == "[":
# It's an opening square bracket, so ignore everything until we
# find a closing one.
if sentence.find("]") == len(sentence)-1:
# If the closing bracket is the last char
return None, ""
else:
return None, sentence[sentence.find("]")+1:]
if sentence[0] in set([" ", "\t", "\n"]):
# Return a space char so that it can be identified in word segmentation
# processing.
return " ", sentence[1:]
if sentence[0] == "|" or sentence[0] == "ǀ" or sentence[0] == "◊":
# TODO Address extrametrical span symbol ◊ differently. For now,
# treating it as a tone group boundary marker for consistency with
# previous work.
if tgm:
return "|", sentence[1:]
else:
return None, sentence[1:]
if sentence[0] in "()":
return None, sentence[1:]
print("***" + sentence)
raise ValueError("Next character not recognized: " + sentence[:1])
def filter_for_phonemes(sentence):
""" Returns a sequence of phonemes and pipes (word delimiters). Tones,
syllable boundaries, whitespace are all removed."""
filtered_sentence = []
while sentence != "":
phoneme, sentence = pop_phoneme(sentence)
if phoneme != " ":
filtered_sentence.append(phoneme)
filtered_sentence = [item for item in filtered_sentence if item != None]
return " ".join(filtered_sentence)
# Filter utterances with certain words
if "BEGAIEMENT" in sent:
return ""
sent = filter_for_phonemes(sent)
return sent
|
[
"def",
"preprocess_na",
"(",
"sent",
",",
"label_type",
")",
":",
"if",
"label_type",
"==",
"\"phonemes_and_tones\"",
":",
"phonemes",
"=",
"True",
"tones",
"=",
"True",
"tgm",
"=",
"True",
"elif",
"label_type",
"==",
"\"phonemes_and_tones_no_tgm\"",
":",
"phonemes",
"=",
"True",
"tones",
"=",
"True",
"tgm",
"=",
"False",
"elif",
"label_type",
"==",
"\"phonemes\"",
":",
"phonemes",
"=",
"True",
"tones",
"=",
"False",
"tgm",
"=",
"False",
"elif",
"label_type",
"==",
"\"tones\"",
":",
"phonemes",
"=",
"False",
"tones",
"=",
"True",
"tgm",
"=",
"True",
"elif",
"label_type",
"==",
"\"tones_notgm\"",
":",
"phonemes",
"=",
"False",
"tones",
"=",
"True",
"tgm",
"=",
"False",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized label type: %s\"",
"%",
"label_type",
")",
"def",
"pop_phoneme",
"(",
"sentence",
")",
":",
"\"\"\"Pop phonemes off a sentence one at a time\"\"\"",
"# TODO desperately needs refactoring",
"# Treating fillers as single tokens; normalizing to əəə and mmm",
"if",
"phonemes",
":",
"if",
"sentence",
"[",
":",
"4",
"]",
"in",
"[",
"\"əəə…\", \"mm",
"m",
"\"]:",
"",
"",
"return",
"sentence",
"[",
":",
"4",
"]",
",",
"sentence",
"[",
"4",
":",
"]",
"if",
"sentence",
".",
"startswith",
"(",
"\"ə…\"):",
"",
"",
"return",
"\"əəə…\", sen",
"t",
"nce[2:]",
"",
"",
"",
"",
"if",
"sentence",
".",
"startswith",
"(",
"\"m…\"):",
"",
"",
"return",
"\"mmm…\", ",
"s",
"ntence[2",
":",
"]",
"",
"",
"if",
"sentence",
".",
"startswith",
"(",
"\"mm…\"):",
"",
"",
"return",
"\"mmm…\", ",
"s",
"ntence[3",
":",
"]",
"",
"",
"# Normalizing some stuff",
"if",
"sentence",
"[",
":",
"3",
"]",
"==",
"\"wæ̃\":",
"",
"if",
"phonemes",
":",
"return",
"\"w̃æ\", ",
"s",
"ntence[3",
":",
"]",
"",
"",
"else",
":",
"return",
"None",
",",
"sentence",
"[",
"3",
":",
"]",
"if",
"sentence",
"[",
":",
"3",
"]",
"==",
"\"ṽ̩\":",
"",
"if",
"phonemes",
":",
"return",
"\"ṽ̩\", ",
"s",
"ntence[3",
":",
"]",
"",
"",
"else",
":",
"return",
"None",
",",
"sentence",
"[",
"3",
":",
"]",
"if",
"sentence",
"[",
":",
"3",
"]",
"in",
"TRI_PHNS",
":",
"if",
"phonemes",
":",
"return",
"sentence",
"[",
":",
"3",
"]",
",",
"sentence",
"[",
"3",
":",
"]",
"else",
":",
"return",
"None",
",",
"sentence",
"[",
"3",
":",
"]",
"if",
"sentence",
"[",
":",
"2",
"]",
"in",
"BI_PHNS",
":",
"if",
"phonemes",
":",
"return",
"sentence",
"[",
":",
"2",
"]",
",",
"sentence",
"[",
"2",
":",
"]",
"else",
":",
"return",
"None",
",",
"sentence",
"[",
"2",
":",
"]",
"if",
"sentence",
"[",
":",
"2",
"]",
"==",
"\"˧̩\":",
"",
"return",
"\"˧\",",
" ",
"entence[",
"2",
":",
"]",
"",
"if",
"sentence",
"[",
":",
"2",
"]",
"==",
"\"˧̍\":",
"",
"return",
"\"˧\",",
" ",
"entence[",
"2",
":",
"]",
"",
"if",
"sentence",
"[",
"0",
"]",
"in",
"UNI_PHNS",
":",
"if",
"phonemes",
":",
"return",
"sentence",
"[",
"0",
"]",
",",
"sentence",
"[",
"1",
":",
"]",
"else",
":",
"return",
"None",
",",
"sentence",
"[",
"1",
":",
"]",
"if",
"sentence",
"[",
":",
"2",
"]",
"in",
"BI_TONES",
":",
"if",
"tones",
":",
"return",
"sentence",
"[",
":",
"2",
"]",
",",
"sentence",
"[",
"2",
":",
"]",
"else",
":",
"return",
"None",
",",
"sentence",
"[",
"2",
":",
"]",
"if",
"sentence",
"[",
"0",
"]",
"in",
"UNI_TONES",
":",
"if",
"tones",
":",
"return",
"sentence",
"[",
"0",
"]",
",",
"sentence",
"[",
"1",
":",
"]",
"else",
":",
"return",
"None",
",",
"sentence",
"[",
"1",
":",
"]",
"if",
"sentence",
"[",
"0",
"]",
"in",
"MISC_SYMBOLS",
":",
"# We assume these symbols cannot be captured.",
"return",
"None",
",",
"sentence",
"[",
"1",
":",
"]",
"if",
"sentence",
"[",
"0",
"]",
"in",
"BAD_NA_SYMBOLS",
":",
"return",
"None",
",",
"sentence",
"[",
"1",
":",
"]",
"if",
"sentence",
"[",
"0",
"]",
"in",
"PUNC_SYMBOLS",
":",
"return",
"None",
",",
"sentence",
"[",
"1",
":",
"]",
"if",
"sentence",
"[",
"0",
"]",
"in",
"[",
"\"-\"",
",",
"\"ʰ\",",
" ",
"/\"]",
":",
"",
"return",
"None",
",",
"sentence",
"[",
"1",
":",
"]",
"if",
"sentence",
"[",
"0",
"]",
"in",
"set",
"(",
"[",
"\"<\"",
",",
"\">\"",
"]",
")",
":",
"# We keep everything literal, thus including what is in <>",
"# brackets; so we just remove these tokens\"",
"return",
"None",
",",
"sentence",
"[",
"1",
":",
"]",
"if",
"sentence",
"[",
"0",
"]",
"==",
"\"[\"",
":",
"# It's an opening square bracket, so ignore everything until we",
"# find a closing one.",
"if",
"sentence",
".",
"find",
"(",
"\"]\"",
")",
"==",
"len",
"(",
"sentence",
")",
"-",
"1",
":",
"# If the closing bracket is the last char",
"return",
"None",
",",
"\"\"",
"else",
":",
"return",
"None",
",",
"sentence",
"[",
"sentence",
".",
"find",
"(",
"\"]\"",
")",
"+",
"1",
":",
"]",
"if",
"sentence",
"[",
"0",
"]",
"in",
"set",
"(",
"[",
"\" \"",
",",
"\"\\t\"",
",",
"\"\\n\"",
"]",
")",
":",
"# Return a space char so that it can be identified in word segmentation",
"# processing.",
"return",
"\" \"",
",",
"sentence",
"[",
"1",
":",
"]",
"if",
"sentence",
"[",
"0",
"]",
"==",
"\"|\"",
"or",
"sentence",
"[",
"0",
"]",
"==",
"\"ǀ\" ",
"r ",
"entence[",
"0",
"]",
" ",
"= ",
"◊\":",
"",
"# TODO Address extrametrical span symbol ◊ differently. For now,",
"# treating it as a tone group boundary marker for consistency with",
"# previous work.",
"if",
"tgm",
":",
"return",
"\"|\"",
",",
"sentence",
"[",
"1",
":",
"]",
"else",
":",
"return",
"None",
",",
"sentence",
"[",
"1",
":",
"]",
"if",
"sentence",
"[",
"0",
"]",
"in",
"\"()\"",
":",
"return",
"None",
",",
"sentence",
"[",
"1",
":",
"]",
"print",
"(",
"\"***\"",
"+",
"sentence",
")",
"raise",
"ValueError",
"(",
"\"Next character not recognized: \"",
"+",
"sentence",
"[",
":",
"1",
"]",
")",
"def",
"filter_for_phonemes",
"(",
"sentence",
")",
":",
"\"\"\" Returns a sequence of phonemes and pipes (word delimiters). Tones,\n syllable boundaries, whitespace are all removed.\"\"\"",
"filtered_sentence",
"=",
"[",
"]",
"while",
"sentence",
"!=",
"\"\"",
":",
"phoneme",
",",
"sentence",
"=",
"pop_phoneme",
"(",
"sentence",
")",
"if",
"phoneme",
"!=",
"\" \"",
":",
"filtered_sentence",
".",
"append",
"(",
"phoneme",
")",
"filtered_sentence",
"=",
"[",
"item",
"for",
"item",
"in",
"filtered_sentence",
"if",
"item",
"!=",
"None",
"]",
"return",
"\" \"",
".",
"join",
"(",
"filtered_sentence",
")",
"# Filter utterances with certain words",
"if",
"\"BEGAIEMENT\"",
"in",
"sent",
":",
"return",
"\"\"",
"sent",
"=",
"filter_for_phonemes",
"(",
"sent",
")",
"return",
"sent"
] | 34.77305 | 13.64539 |
def condense(self):
'''If siblings have the same label, merge them. If they have edge lengths, the resulting ``Node`` will have the larger of the lengths'''
self.resolve_polytomies(); labels_below = dict(); longest_leaf_dist = dict()
for node in self.traverse_postorder():
if node.is_leaf():
labels_below[node] = [node.label]; longest_leaf_dist[node] = None
else:
labels_below[node] = set()
for c in node.children:
labels_below[node].update(labels_below[c])
d = longest_leaf_dist[c]
if c.edge_length is not None:
if d is None:
d = 0
d += c.edge_length
if node not in longest_leaf_dist or longest_leaf_dist[node] is None or (d is not None and d > longest_leaf_dist[node]):
longest_leaf_dist[node] = d
nodes = deque(); nodes.append(self.root)
while len(nodes) != 0:
node = nodes.pop()
if node.is_leaf():
continue
if len(labels_below[node]) == 1:
node.label = labels_below[node].pop(); node.children = list()
if longest_leaf_dist[node] is not None:
if node.edge_length is None:
node.edge_length = 0
node.edge_length += longest_leaf_dist[node]
else:
nodes.extend(node.children)
|
[
"def",
"condense",
"(",
"self",
")",
":",
"self",
".",
"resolve_polytomies",
"(",
")",
"labels_below",
"=",
"dict",
"(",
")",
"longest_leaf_dist",
"=",
"dict",
"(",
")",
"for",
"node",
"in",
"self",
".",
"traverse_postorder",
"(",
")",
":",
"if",
"node",
".",
"is_leaf",
"(",
")",
":",
"labels_below",
"[",
"node",
"]",
"=",
"[",
"node",
".",
"label",
"]",
"longest_leaf_dist",
"[",
"node",
"]",
"=",
"None",
"else",
":",
"labels_below",
"[",
"node",
"]",
"=",
"set",
"(",
")",
"for",
"c",
"in",
"node",
".",
"children",
":",
"labels_below",
"[",
"node",
"]",
".",
"update",
"(",
"labels_below",
"[",
"c",
"]",
")",
"d",
"=",
"longest_leaf_dist",
"[",
"c",
"]",
"if",
"c",
".",
"edge_length",
"is",
"not",
"None",
":",
"if",
"d",
"is",
"None",
":",
"d",
"=",
"0",
"d",
"+=",
"c",
".",
"edge_length",
"if",
"node",
"not",
"in",
"longest_leaf_dist",
"or",
"longest_leaf_dist",
"[",
"node",
"]",
"is",
"None",
"or",
"(",
"d",
"is",
"not",
"None",
"and",
"d",
">",
"longest_leaf_dist",
"[",
"node",
"]",
")",
":",
"longest_leaf_dist",
"[",
"node",
"]",
"=",
"d",
"nodes",
"=",
"deque",
"(",
")",
"nodes",
".",
"append",
"(",
"self",
".",
"root",
")",
"while",
"len",
"(",
"nodes",
")",
"!=",
"0",
":",
"node",
"=",
"nodes",
".",
"pop",
"(",
")",
"if",
"node",
".",
"is_leaf",
"(",
")",
":",
"continue",
"if",
"len",
"(",
"labels_below",
"[",
"node",
"]",
")",
"==",
"1",
":",
"node",
".",
"label",
"=",
"labels_below",
"[",
"node",
"]",
".",
"pop",
"(",
")",
"node",
".",
"children",
"=",
"list",
"(",
")",
"if",
"longest_leaf_dist",
"[",
"node",
"]",
"is",
"not",
"None",
":",
"if",
"node",
".",
"edge_length",
"is",
"None",
":",
"node",
".",
"edge_length",
"=",
"0",
"node",
".",
"edge_length",
"+=",
"longest_leaf_dist",
"[",
"node",
"]",
"else",
":",
"nodes",
".",
"extend",
"(",
"node",
".",
"children",
")"
] | 50.4 | 19.333333 |
def get_elem(elem_ref, default=None):
"""
Gets the element referenced by elem_ref or returns the elem_ref directly if its not a reference.
:param elem_ref:
:param default:
:return:
"""
if not is_elem_ref(elem_ref):
return elem_ref
elif elem_ref[0] == ElemRefObj:
return getattr(elem_ref[1], elem_ref[2], default)
elif elem_ref[0] == ElemRefArr:
return elem_ref[1][elem_ref[2]]
|
[
"def",
"get_elem",
"(",
"elem_ref",
",",
"default",
"=",
"None",
")",
":",
"if",
"not",
"is_elem_ref",
"(",
"elem_ref",
")",
":",
"return",
"elem_ref",
"elif",
"elem_ref",
"[",
"0",
"]",
"==",
"ElemRefObj",
":",
"return",
"getattr",
"(",
"elem_ref",
"[",
"1",
"]",
",",
"elem_ref",
"[",
"2",
"]",
",",
"default",
")",
"elif",
"elem_ref",
"[",
"0",
"]",
"==",
"ElemRefArr",
":",
"return",
"elem_ref",
"[",
"1",
"]",
"[",
"elem_ref",
"[",
"2",
"]",
"]"
] | 30.285714 | 16 |
def on(self, event, listener, *user_args):
"""Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called the next time the ``event`` is emitted.
"""
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args))
|
[
"def",
"on",
"(",
"self",
",",
"event",
",",
"listener",
",",
"*",
"user_args",
")",
":",
"self",
".",
"_listeners",
"[",
"event",
"]",
".",
"append",
"(",
"_Listener",
"(",
"callback",
"=",
"listener",
",",
"user_args",
"=",
"user_args",
")",
")"
] | 42.833333 | 21.666667 |
def get_ruler_distances(image, p1, p2):
"""Get the distance calculated between two points. A Bunch of
results is returned, containing pixel values and distance values
if the image contains a valid WCS.
"""
x1, y1 = p1[:2]
x2, y2 = p2[:2]
dx, dy = x2 - x1, y2 - y1
res = Bunch.Bunch(x1=x1, y1=y1, x2=x2, y2=y2,
theta=np.arctan2(y2 - y1, x2 - x1),
dx_pix=dx, dy_pix=dy,
dh_pix=np.sqrt(dx**2 + dy**2),
ra_org=None, dec_org=None,
ra_dst=None, dec_dst=None,
ra_heel=None, dec_heel=None,
dx_deg=None, dy_deg=None, dh_deg=None)
if image is not None and hasattr(image, 'wcs') and image.wcs is not None:
# Calculate RA and DEC for the three points
try:
# origination point
ra_org, dec_org = image.pixtoradec(x1, y1)
res.ra_org, res.dec_org = ra_org, dec_org
# destination point
ra_dst, dec_dst = image.pixtoradec(x2, y2)
res.ra_dst, res.dec_dst = ra_dst, dec_dst
# "heel" point making a right triangle
ra_heel, dec_heel = image.pixtoradec(x2, y1)
res.ra_heel, res.dec_heel = ra_heel, dec_heel
res.dh_deg = deltaStarsRaDecDeg(ra_org, dec_org,
ra_dst, dec_dst)
res.dx_deg = deltaStarsRaDecDeg(ra_org, dec_org,
ra_heel, dec_heel)
res.dy_deg = deltaStarsRaDecDeg(ra_heel, dec_heel,
ra_dst, dec_dst)
except Exception as e:
pass
return res
|
[
"def",
"get_ruler_distances",
"(",
"image",
",",
"p1",
",",
"p2",
")",
":",
"x1",
",",
"y1",
"=",
"p1",
"[",
":",
"2",
"]",
"x2",
",",
"y2",
"=",
"p2",
"[",
":",
"2",
"]",
"dx",
",",
"dy",
"=",
"x2",
"-",
"x1",
",",
"y2",
"-",
"y1",
"res",
"=",
"Bunch",
".",
"Bunch",
"(",
"x1",
"=",
"x1",
",",
"y1",
"=",
"y1",
",",
"x2",
"=",
"x2",
",",
"y2",
"=",
"y2",
",",
"theta",
"=",
"np",
".",
"arctan2",
"(",
"y2",
"-",
"y1",
",",
"x2",
"-",
"x1",
")",
",",
"dx_pix",
"=",
"dx",
",",
"dy_pix",
"=",
"dy",
",",
"dh_pix",
"=",
"np",
".",
"sqrt",
"(",
"dx",
"**",
"2",
"+",
"dy",
"**",
"2",
")",
",",
"ra_org",
"=",
"None",
",",
"dec_org",
"=",
"None",
",",
"ra_dst",
"=",
"None",
",",
"dec_dst",
"=",
"None",
",",
"ra_heel",
"=",
"None",
",",
"dec_heel",
"=",
"None",
",",
"dx_deg",
"=",
"None",
",",
"dy_deg",
"=",
"None",
",",
"dh_deg",
"=",
"None",
")",
"if",
"image",
"is",
"not",
"None",
"and",
"hasattr",
"(",
"image",
",",
"'wcs'",
")",
"and",
"image",
".",
"wcs",
"is",
"not",
"None",
":",
"# Calculate RA and DEC for the three points",
"try",
":",
"# origination point",
"ra_org",
",",
"dec_org",
"=",
"image",
".",
"pixtoradec",
"(",
"x1",
",",
"y1",
")",
"res",
".",
"ra_org",
",",
"res",
".",
"dec_org",
"=",
"ra_org",
",",
"dec_org",
"# destination point",
"ra_dst",
",",
"dec_dst",
"=",
"image",
".",
"pixtoradec",
"(",
"x2",
",",
"y2",
")",
"res",
".",
"ra_dst",
",",
"res",
".",
"dec_dst",
"=",
"ra_dst",
",",
"dec_dst",
"# \"heel\" point making a right triangle",
"ra_heel",
",",
"dec_heel",
"=",
"image",
".",
"pixtoradec",
"(",
"x2",
",",
"y1",
")",
"res",
".",
"ra_heel",
",",
"res",
".",
"dec_heel",
"=",
"ra_heel",
",",
"dec_heel",
"res",
".",
"dh_deg",
"=",
"deltaStarsRaDecDeg",
"(",
"ra_org",
",",
"dec_org",
",",
"ra_dst",
",",
"dec_dst",
")",
"res",
".",
"dx_deg",
"=",
"deltaStarsRaDecDeg",
"(",
"ra_org",
",",
"dec_org",
",",
"ra_heel",
",",
"dec_heel",
")",
"res",
".",
"dy_deg",
"=",
"deltaStarsRaDecDeg",
"(",
"ra_heel",
",",
"dec_heel",
",",
"ra_dst",
",",
"dec_dst",
")",
"except",
"Exception",
"as",
"e",
":",
"pass",
"return",
"res"
] | 39.418605 | 18.27907 |
def to_array(self):
"""
Serializes this InlineKeyboardButton to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(InlineKeyboardButton, self).to_array()
array['text'] = u(self.text) # py2: type unicode, py3: type str
if self.url is not None:
array['url'] = u(self.url) # py2: type unicode, py3: type str
if self.callback_data is not None:
array['callback_data'] = u(self.callback_data) # py2: type unicode, py3: type str
if self.switch_inline_query is not None:
array['switch_inline_query'] = u(self.switch_inline_query) # py2: type unicode, py3: type str
if self.switch_inline_query_current_chat is not None:
array['switch_inline_query_current_chat'] = u(self.switch_inline_query_current_chat) # py2: type unicode, py3: type str
if self.callback_game is not None:
array['callback_game'] = self.callback_game.to_array() # type CallbackGame
if self.pay is not None:
array['pay'] = bool(self.pay) # type bool
return array
|
[
"def",
"to_array",
"(",
"self",
")",
":",
"array",
"=",
"super",
"(",
"InlineKeyboardButton",
",",
"self",
")",
".",
"to_array",
"(",
")",
"array",
"[",
"'text'",
"]",
"=",
"u",
"(",
"self",
".",
"text",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"url",
"is",
"not",
"None",
":",
"array",
"[",
"'url'",
"]",
"=",
"u",
"(",
"self",
".",
"url",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"callback_data",
"is",
"not",
"None",
":",
"array",
"[",
"'callback_data'",
"]",
"=",
"u",
"(",
"self",
".",
"callback_data",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"switch_inline_query",
"is",
"not",
"None",
":",
"array",
"[",
"'switch_inline_query'",
"]",
"=",
"u",
"(",
"self",
".",
"switch_inline_query",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"switch_inline_query_current_chat",
"is",
"not",
"None",
":",
"array",
"[",
"'switch_inline_query_current_chat'",
"]",
"=",
"u",
"(",
"self",
".",
"switch_inline_query_current_chat",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"callback_game",
"is",
"not",
"None",
":",
"array",
"[",
"'callback_game'",
"]",
"=",
"self",
".",
"callback_game",
".",
"to_array",
"(",
")",
"# type CallbackGame",
"if",
"self",
".",
"pay",
"is",
"not",
"None",
":",
"array",
"[",
"'pay'",
"]",
"=",
"bool",
"(",
"self",
".",
"pay",
")",
"# type bool",
"return",
"array"
] | 40.571429 | 28.142857 |
def write_html(self, html_dir='/tmp', include_osd=False,
osd_width=500, osd_height=500):
"""Write HTML test page using OpenSeadragon for the tiles generated.
Assumes that the generate(..) method has already been called to set up
identifier etc. Parameters:
html_dir - output directory for HTML files, will be created if it
does not already exist
include_osd - true to include OpenSeadragon code
osd_width - width of OpenSeadragon pane in pixels
osd_height - height of OpenSeadragon pane in pixels
"""
osd_config = self.get_osd_config(self.osd_version)
osd_base = osd_config['base']
osd_dir = osd_config['dir'] # relative to base
osd_js = os.path.join(osd_dir, osd_config['js'])
osd_images = os.path.join(osd_dir, osd_config['images'])
if (os.path.isdir(html_dir)):
# Exists, fine
pass
elif (os.path.isfile(html_dir)):
raise IIIFStaticError(
"Can't write to directory %s: a file of that name exists" % html_dir)
else:
os.makedirs(html_dir)
self.logger.info("Writing HTML to %s" % (html_dir))
with open(os.path.join(self.template_dir, 'static_osd.html'), 'r') as f:
template = f.read()
outfile = self.identifier + '.html'
outpath = os.path.join(html_dir, outfile)
with open(outpath, 'w') as f:
info_json_uri = '/'.join([self.identifier, 'info.json'])
if (self.prefix):
info_json_uri = '/'.join([self.prefix, info_json_uri])
d = dict(identifier=self.identifier,
api_version=self.api_version,
osd_version=self.osd_version,
osd_uri=osd_js,
osd_images_prefix=osd_images,
osd_height=osd_width,
osd_width=osd_height,
info_json_uri=info_json_uri)
f.write(Template(template).safe_substitute(d))
self.logger.info("%s / %s" % (html_dir, outfile))
# Do we want to copy OSD in there too? If so, do it only if
# we haven't already
if (include_osd):
if (self.copied_osd):
self.logger.info("OpenSeadragon already copied")
else:
# Make directory, copy JavaScript and icons (from osd_images)
osd_path = os.path.join(html_dir, osd_dir)
if (not os.path.isdir(osd_path)):
os.makedirs(osd_path)
shutil.copyfile(os.path.join(osd_base, osd_js),
os.path.join(html_dir, osd_js))
self.logger.info("%s / %s" % (html_dir, osd_js))
osd_images_path = os.path.join(html_dir, osd_images)
if (os.path.isdir(osd_images_path)):
self.logger.warning(
"OpenSeadragon images directory (%s) already exists, skipping"
% osd_images_path)
else:
shutil.copytree(os.path.join(osd_base, osd_images),
osd_images_path)
self.logger.info("%s / %s/*" % (html_dir, osd_images))
self.copied_osd = True
|
[
"def",
"write_html",
"(",
"self",
",",
"html_dir",
"=",
"'/tmp'",
",",
"include_osd",
"=",
"False",
",",
"osd_width",
"=",
"500",
",",
"osd_height",
"=",
"500",
")",
":",
"osd_config",
"=",
"self",
".",
"get_osd_config",
"(",
"self",
".",
"osd_version",
")",
"osd_base",
"=",
"osd_config",
"[",
"'base'",
"]",
"osd_dir",
"=",
"osd_config",
"[",
"'dir'",
"]",
"# relative to base",
"osd_js",
"=",
"os",
".",
"path",
".",
"join",
"(",
"osd_dir",
",",
"osd_config",
"[",
"'js'",
"]",
")",
"osd_images",
"=",
"os",
".",
"path",
".",
"join",
"(",
"osd_dir",
",",
"osd_config",
"[",
"'images'",
"]",
")",
"if",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"html_dir",
")",
")",
":",
"# Exists, fine",
"pass",
"elif",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"html_dir",
")",
")",
":",
"raise",
"IIIFStaticError",
"(",
"\"Can't write to directory %s: a file of that name exists\"",
"%",
"html_dir",
")",
"else",
":",
"os",
".",
"makedirs",
"(",
"html_dir",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Writing HTML to %s\"",
"%",
"(",
"html_dir",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"template_dir",
",",
"'static_osd.html'",
")",
",",
"'r'",
")",
"as",
"f",
":",
"template",
"=",
"f",
".",
"read",
"(",
")",
"outfile",
"=",
"self",
".",
"identifier",
"+",
"'.html'",
"outpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"html_dir",
",",
"outfile",
")",
"with",
"open",
"(",
"outpath",
",",
"'w'",
")",
"as",
"f",
":",
"info_json_uri",
"=",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"identifier",
",",
"'info.json'",
"]",
")",
"if",
"(",
"self",
".",
"prefix",
")",
":",
"info_json_uri",
"=",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"prefix",
",",
"info_json_uri",
"]",
")",
"d",
"=",
"dict",
"(",
"identifier",
"=",
"self",
".",
"identifier",
",",
"api_version",
"=",
"self",
".",
"api_version",
",",
"osd_version",
"=",
"self",
".",
"osd_version",
",",
"osd_uri",
"=",
"osd_js",
",",
"osd_images_prefix",
"=",
"osd_images",
",",
"osd_height",
"=",
"osd_width",
",",
"osd_width",
"=",
"osd_height",
",",
"info_json_uri",
"=",
"info_json_uri",
")",
"f",
".",
"write",
"(",
"Template",
"(",
"template",
")",
".",
"safe_substitute",
"(",
"d",
")",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"%s / %s\"",
"%",
"(",
"html_dir",
",",
"outfile",
")",
")",
"# Do we want to copy OSD in there too? If so, do it only if",
"# we haven't already",
"if",
"(",
"include_osd",
")",
":",
"if",
"(",
"self",
".",
"copied_osd",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"OpenSeadragon already copied\"",
")",
"else",
":",
"# Make directory, copy JavaScript and icons (from osd_images)",
"osd_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"html_dir",
",",
"osd_dir",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"osd_path",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"osd_path",
")",
"shutil",
".",
"copyfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"osd_base",
",",
"osd_js",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"html_dir",
",",
"osd_js",
")",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"%s / %s\"",
"%",
"(",
"html_dir",
",",
"osd_js",
")",
")",
"osd_images_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"html_dir",
",",
"osd_images",
")",
"if",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"osd_images_path",
")",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"OpenSeadragon images directory (%s) already exists, skipping\"",
"%",
"osd_images_path",
")",
"else",
":",
"shutil",
".",
"copytree",
"(",
"os",
".",
"path",
".",
"join",
"(",
"osd_base",
",",
"osd_images",
")",
",",
"osd_images_path",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"%s / %s/*\"",
"%",
"(",
"html_dir",
",",
"osd_images",
")",
")",
"self",
".",
"copied_osd",
"=",
"True"
] | 49.268657 | 16.149254 |
def line(h1: Histogram1D, **kwargs) -> dict:
"""Line plot of 1D histogram values.
Points are horizontally placed in bin centers.
Parameters
----------
h1 : physt.histogram1d.Histogram1D
Dimensionality of histogram for which it is applicable
"""
lw = kwargs.pop("lw", DEFAULT_STROKE_WIDTH)
mark_template = [{
"type": "line",
"encode": {
"enter": {
"x": {"scale": "xscale", "field": "x"},
"y": {"scale": "yscale", "field": "y"},
"stroke": {"scale": "series", "field": "c"},
"strokeWidth": {"value": lw}
}
},
"from": {"data": "series"},
}]
vega = _scatter_or_line(h1, mark_template=mark_template, kwargs=kwargs)
return vega
|
[
"def",
"line",
"(",
"h1",
":",
"Histogram1D",
",",
"*",
"*",
"kwargs",
")",
"->",
"dict",
":",
"lw",
"=",
"kwargs",
".",
"pop",
"(",
"\"lw\"",
",",
"DEFAULT_STROKE_WIDTH",
")",
"mark_template",
"=",
"[",
"{",
"\"type\"",
":",
"\"line\"",
",",
"\"encode\"",
":",
"{",
"\"enter\"",
":",
"{",
"\"x\"",
":",
"{",
"\"scale\"",
":",
"\"xscale\"",
",",
"\"field\"",
":",
"\"x\"",
"}",
",",
"\"y\"",
":",
"{",
"\"scale\"",
":",
"\"yscale\"",
",",
"\"field\"",
":",
"\"y\"",
"}",
",",
"\"stroke\"",
":",
"{",
"\"scale\"",
":",
"\"series\"",
",",
"\"field\"",
":",
"\"c\"",
"}",
",",
"\"strokeWidth\"",
":",
"{",
"\"value\"",
":",
"lw",
"}",
"}",
"}",
",",
"\"from\"",
":",
"{",
"\"data\"",
":",
"\"series\"",
"}",
",",
"}",
"]",
"vega",
"=",
"_scatter_or_line",
"(",
"h1",
",",
"mark_template",
"=",
"mark_template",
",",
"kwargs",
"=",
"kwargs",
")",
"return",
"vega"
] | 28.518519 | 20.037037 |
def output_raw(self, text):
"""
Output results in raw JSON format
"""
payload = json.loads(text)
out = json.dumps(payload, sort_keys=True, indent=self._indent, separators=(',', ': '))
print(self.colorize_json(out))
|
[
"def",
"output_raw",
"(",
"self",
",",
"text",
")",
":",
"payload",
"=",
"json",
".",
"loads",
"(",
"text",
")",
"out",
"=",
"json",
".",
"dumps",
"(",
"payload",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"self",
".",
"_indent",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"print",
"(",
"self",
".",
"colorize_json",
"(",
"out",
")",
")"
] | 36.571429 | 10.857143 |
def create_all(app, user=None, password=None, bucket_name=None,
location=None, include_hidden=False,
filepath_filter_regex=None, put_bucket_acl=True):
"""
Uploads of the static assets associated with a Flask application to
Amazon S3.
All static assets are identified on the local filesystem, including
any static assets associated with *registered* blueprints. In turn,
each asset is uploaded to the bucket described by `bucket_name`. If
the bucket does not exist then it is created.
Flask-S3 creates the same relative static asset folder structure on
S3 as can be found within your Flask application.
Many of the optional arguments to `create_all` can be specified
instead in your application's configuration using the Flask-S3
`configuration`_ variables.
:param app: a :class:`flask.Flask` application object.
:param user: an AWS Access Key ID. You can find this key in the
Security Credentials section of your AWS account.
:type user: `basestring` or None
:param password: an AWS Secret Access Key. You can find this key in
the Security Credentials section of your AWS
account.
:type password: `basestring` or None
:param bucket_name: the name of the bucket you wish to server your
static assets from. **Note**: while a valid
character, it is recommended that you do not
include periods in bucket_name if you wish to
serve over HTTPS. See Amazon's `bucket
restrictions`_ for more details.
:type bucket_name: `basestring` or None
:param location: the AWS region to host the bucket in; an empty
string indicates the default region should be used,
which is the US Standard region. Possible location
values include: `'DEFAULT'`, `'EU'`, `'us-east-1'`,
`'us-west-1'`, `'us-west-2'`, `'ap-south-1'`,
`'ap-northeast-2'`, `'ap-southeast-1'`,
`'ap-southeast-2'`, `'ap-northeast-1'`,
`'eu-central-1'`, `'eu-west-1'`, `'sa-east-1'`
:type location: `basestring` or None
:param include_hidden: by default Flask-S3 will not upload hidden
files. Set this to true to force the upload of hidden files.
:type include_hidden: `bool`
:param filepath_filter_regex: if specified, then the upload of
static assets is limited to only those files whose relative path
matches this regular expression string. For example, to only
upload files within the 'css' directory of your app's static
store, set to r'^css'.
:type filepath_filter_regex: `basestring` or None
:param put_bucket_acl: by default Flask-S3 will set the bucket ACL
to public. Set this to false to leave the policy unchanged.
:type put_bucket_acl: `bool`
.. _bucket restrictions: http://docs.amazonwebservices.com/AmazonS3\
/latest/dev/BucketRestrictions.html
"""
user = user or app.config.get('AWS_ACCESS_KEY_ID')
password = password or app.config.get('AWS_SECRET_ACCESS_KEY')
bucket_name = bucket_name or app.config.get('FLASKS3_BUCKET_NAME')
if not bucket_name:
raise ValueError("No bucket name provided.")
location = location or app.config.get('FLASKS3_REGION')
endpoint_url = app.config.get('FLASKS3_ENDPOINT_URL')
# build list of static files
all_files = _gather_files(app, include_hidden,
filepath_filter_regex=filepath_filter_regex)
logger.debug("All valid files: %s" % all_files)
# connect to s3
s3 = boto3.client("s3",
endpoint_url=endpoint_url,
region_name=location or None,
aws_access_key_id=user,
aws_secret_access_key=password)
# get_or_create bucket
try:
s3.head_bucket(Bucket=bucket_name)
except ClientError as e:
if int(e.response['Error']['Code']) == 404:
# Create the bucket
bucket = s3.create_bucket(Bucket=bucket_name)
else:
raise
if put_bucket_acl:
s3.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
if get_setting('FLASKS3_ONLY_MODIFIED', app):
try:
hashes_object = s3.get_object(Bucket=bucket_name, Key='.file-hashes')
hashes = json.loads(str(hashes_object['Body'].read().decode()))
except ClientError as e:
logger.warn("No file hashes found: %s" % e)
hashes = None
new_hashes = _upload_files(s3, app, all_files, bucket_name, hashes=hashes)
try:
s3.put_object(Bucket=bucket_name,
Key='.file-hashes',
Body=json.dumps(dict(new_hashes)),
ACL='private')
except boto3.exceptions.S3UploadFailedError as e:
logger.warn("Unable to upload file hashes: %s" % e)
else:
_upload_files(s3, app, all_files, bucket_name)
|
[
"def",
"create_all",
"(",
"app",
",",
"user",
"=",
"None",
",",
"password",
"=",
"None",
",",
"bucket_name",
"=",
"None",
",",
"location",
"=",
"None",
",",
"include_hidden",
"=",
"False",
",",
"filepath_filter_regex",
"=",
"None",
",",
"put_bucket_acl",
"=",
"True",
")",
":",
"user",
"=",
"user",
"or",
"app",
".",
"config",
".",
"get",
"(",
"'AWS_ACCESS_KEY_ID'",
")",
"password",
"=",
"password",
"or",
"app",
".",
"config",
".",
"get",
"(",
"'AWS_SECRET_ACCESS_KEY'",
")",
"bucket_name",
"=",
"bucket_name",
"or",
"app",
".",
"config",
".",
"get",
"(",
"'FLASKS3_BUCKET_NAME'",
")",
"if",
"not",
"bucket_name",
":",
"raise",
"ValueError",
"(",
"\"No bucket name provided.\"",
")",
"location",
"=",
"location",
"or",
"app",
".",
"config",
".",
"get",
"(",
"'FLASKS3_REGION'",
")",
"endpoint_url",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'FLASKS3_ENDPOINT_URL'",
")",
"# build list of static files",
"all_files",
"=",
"_gather_files",
"(",
"app",
",",
"include_hidden",
",",
"filepath_filter_regex",
"=",
"filepath_filter_regex",
")",
"logger",
".",
"debug",
"(",
"\"All valid files: %s\"",
"%",
"all_files",
")",
"# connect to s3",
"s3",
"=",
"boto3",
".",
"client",
"(",
"\"s3\"",
",",
"endpoint_url",
"=",
"endpoint_url",
",",
"region_name",
"=",
"location",
"or",
"None",
",",
"aws_access_key_id",
"=",
"user",
",",
"aws_secret_access_key",
"=",
"password",
")",
"# get_or_create bucket",
"try",
":",
"s3",
".",
"head_bucket",
"(",
"Bucket",
"=",
"bucket_name",
")",
"except",
"ClientError",
"as",
"e",
":",
"if",
"int",
"(",
"e",
".",
"response",
"[",
"'Error'",
"]",
"[",
"'Code'",
"]",
")",
"==",
"404",
":",
"# Create the bucket",
"bucket",
"=",
"s3",
".",
"create_bucket",
"(",
"Bucket",
"=",
"bucket_name",
")",
"else",
":",
"raise",
"if",
"put_bucket_acl",
":",
"s3",
".",
"put_bucket_acl",
"(",
"Bucket",
"=",
"bucket_name",
",",
"ACL",
"=",
"'public-read'",
")",
"if",
"get_setting",
"(",
"'FLASKS3_ONLY_MODIFIED'",
",",
"app",
")",
":",
"try",
":",
"hashes_object",
"=",
"s3",
".",
"get_object",
"(",
"Bucket",
"=",
"bucket_name",
",",
"Key",
"=",
"'.file-hashes'",
")",
"hashes",
"=",
"json",
".",
"loads",
"(",
"str",
"(",
"hashes_object",
"[",
"'Body'",
"]",
".",
"read",
"(",
")",
".",
"decode",
"(",
")",
")",
")",
"except",
"ClientError",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"\"No file hashes found: %s\"",
"%",
"e",
")",
"hashes",
"=",
"None",
"new_hashes",
"=",
"_upload_files",
"(",
"s3",
",",
"app",
",",
"all_files",
",",
"bucket_name",
",",
"hashes",
"=",
"hashes",
")",
"try",
":",
"s3",
".",
"put_object",
"(",
"Bucket",
"=",
"bucket_name",
",",
"Key",
"=",
"'.file-hashes'",
",",
"Body",
"=",
"json",
".",
"dumps",
"(",
"dict",
"(",
"new_hashes",
")",
")",
",",
"ACL",
"=",
"'private'",
")",
"except",
"boto3",
".",
"exceptions",
".",
"S3UploadFailedError",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"\"Unable to upload file hashes: %s\"",
"%",
"e",
")",
"else",
":",
"_upload_files",
"(",
"s3",
",",
"app",
",",
"all_files",
",",
"bucket_name",
")"
] | 42.537815 | 22.941176 |
def get_context_data(self, **kwargs):
"""Add context data to view"""
context = super().get_context_data(**kwargs)
context.update({
'title': self.title,
'submit_value': self.submit_value,
'cancel_url': self.cancel_url
})
return context
|
[
"def",
"get_context_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"super",
"(",
")",
".",
"get_context_data",
"(",
"*",
"*",
"kwargs",
")",
"context",
".",
"update",
"(",
"{",
"'title'",
":",
"self",
".",
"title",
",",
"'submit_value'",
":",
"self",
".",
"submit_value",
",",
"'cancel_url'",
":",
"self",
".",
"cancel_url",
"}",
")",
"return",
"context"
] | 33.555556 | 10.444444 |
def section(title, bar=OVERLINE, strm=sys.stdout):
"""Helper function for testing demo routines
"""
width = utils.term.width
printy(bold(title.center(width)))
printy(bold((bar * width)[:width]))
|
[
"def",
"section",
"(",
"title",
",",
"bar",
"=",
"OVERLINE",
",",
"strm",
"=",
"sys",
".",
"stdout",
")",
":",
"width",
"=",
"utils",
".",
"term",
".",
"width",
"printy",
"(",
"bold",
"(",
"title",
".",
"center",
"(",
"width",
")",
")",
")",
"printy",
"(",
"bold",
"(",
"(",
"bar",
"*",
"width",
")",
"[",
":",
"width",
"]",
")",
")"
] | 34.833333 | 4.333333 |
def _NormalizePath(path):
"""Removes surrounding whitespace, leading separator and normalize."""
# TODO(emrekultursay): Calling os.path.normpath "may change the meaning of a
# path that contains symbolic links" (e.g., "A/foo/../B" != "A/B" if foo is a
# symlink). This might cause trouble when matching against loaded module
# paths. We should try to avoid using it.
# Example:
# > import symlink.a
# > symlink.a.__file__
# symlink/a.py
# > import target.a
# > starget.a.__file__
# target/a.py
# Python interpreter treats these as two separate modules. So, we also need to
# handle them the same way.
return os.path.normpath(path.strip().lstrip(os.sep))
|
[
"def",
"_NormalizePath",
"(",
"path",
")",
":",
"# TODO(emrekultursay): Calling os.path.normpath \"may change the meaning of a",
"# path that contains symbolic links\" (e.g., \"A/foo/../B\" != \"A/B\" if foo is a",
"# symlink). This might cause trouble when matching against loaded module",
"# paths. We should try to avoid using it.",
"# Example:",
"# > import symlink.a",
"# > symlink.a.__file__",
"# symlink/a.py",
"# > import target.a",
"# > starget.a.__file__",
"# target/a.py",
"# Python interpreter treats these as two separate modules. So, we also need to",
"# handle them the same way.",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"path",
".",
"strip",
"(",
")",
".",
"lstrip",
"(",
"os",
".",
"sep",
")",
")"
] | 42.125 | 20.875 |
def get_packages(self, feed_id, protocol_type=None, package_name_query=None, normalized_package_name=None, include_urls=None, include_all_versions=None, is_listed=None, get_top_package_versions=None, is_release=None, include_description=None, top=None, skip=None, include_deleted=None, is_cached=None, direct_upstream_id=None):
"""GetPackages.
[Preview API] Get details about all of the packages in the feed. Use the various filters to include or exclude information from the result set.
:param str feed_id: Name or Id of the feed.
:param str protocol_type: One of the supported artifact package types.
:param str package_name_query: Filter to packages that contain the provided string. Characters in the string must conform to the package name constraints.
:param str normalized_package_name: [Obsolete] Used for legacy scenarios and may be removed in future versions.
:param bool include_urls: True to return REST Urls with the response. Default is True.
:param bool include_all_versions: True to return all versions of the package in the response. Default is false (latest version only).
:param bool is_listed: Only applicable for NuGet packages, setting it for other package types will result in a 404. If false, delisted package versions will be returned. Use this to filter the response when includeAllVersions is set to true. Default is unset (do not return delisted packages).
:param bool get_top_package_versions: Changes the behavior of $top and $skip to return all versions of each package up to $top. Must be used in conjunction with includeAllVersions=true
:param bool is_release: Only applicable for Nuget packages. Use this to filter the response when includeAllVersions is set to true. Default is True (only return packages without prerelease versioning).
:param bool include_description: Return the description for every version of each package in the response. Default is False.
:param int top: Get the top N packages (or package versions where getTopPackageVersions=true)
:param int skip: Skip the first N packages (or package versions where getTopPackageVersions=true)
:param bool include_deleted: Return deleted or unpublished versions of packages in the response. Default is False.
:param bool is_cached: [Obsolete] Used for legacy scenarios and may be removed in future versions.
:param str direct_upstream_id: Filter results to return packages from a specific upstream.
:rtype: [Package]
"""
route_values = {}
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
query_parameters = {}
if protocol_type is not None:
query_parameters['protocolType'] = self._serialize.query('protocol_type', protocol_type, 'str')
if package_name_query is not None:
query_parameters['packageNameQuery'] = self._serialize.query('package_name_query', package_name_query, 'str')
if normalized_package_name is not None:
query_parameters['normalizedPackageName'] = self._serialize.query('normalized_package_name', normalized_package_name, 'str')
if include_urls is not None:
query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool')
if include_all_versions is not None:
query_parameters['includeAllVersions'] = self._serialize.query('include_all_versions', include_all_versions, 'bool')
if is_listed is not None:
query_parameters['isListed'] = self._serialize.query('is_listed', is_listed, 'bool')
if get_top_package_versions is not None:
query_parameters['getTopPackageVersions'] = self._serialize.query('get_top_package_versions', get_top_package_versions, 'bool')
if is_release is not None:
query_parameters['isRelease'] = self._serialize.query('is_release', is_release, 'bool')
if include_description is not None:
query_parameters['includeDescription'] = self._serialize.query('include_description', include_description, 'bool')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if include_deleted is not None:
query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
if is_cached is not None:
query_parameters['isCached'] = self._serialize.query('is_cached', is_cached, 'bool')
if direct_upstream_id is not None:
query_parameters['directUpstreamId'] = self._serialize.query('direct_upstream_id', direct_upstream_id, 'str')
response = self._send(http_method='GET',
location_id='7a20d846-c929-4acc-9ea2-0d5a7df1b197',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Package]', self._unwrap_collection(response))
|
[
"def",
"get_packages",
"(",
"self",
",",
"feed_id",
",",
"protocol_type",
"=",
"None",
",",
"package_name_query",
"=",
"None",
",",
"normalized_package_name",
"=",
"None",
",",
"include_urls",
"=",
"None",
",",
"include_all_versions",
"=",
"None",
",",
"is_listed",
"=",
"None",
",",
"get_top_package_versions",
"=",
"None",
",",
"is_release",
"=",
"None",
",",
"include_description",
"=",
"None",
",",
"top",
"=",
"None",
",",
"skip",
"=",
"None",
",",
"include_deleted",
"=",
"None",
",",
"is_cached",
"=",
"None",
",",
"direct_upstream_id",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"feed_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'feedId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'feed_id'",
",",
"feed_id",
",",
"'str'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"protocol_type",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'protocolType'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'protocol_type'",
",",
"protocol_type",
",",
"'str'",
")",
"if",
"package_name_query",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'packageNameQuery'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'package_name_query'",
",",
"package_name_query",
",",
"'str'",
")",
"if",
"normalized_package_name",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'normalizedPackageName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'normalized_package_name'",
",",
"normalized_package_name",
",",
"'str'",
")",
"if",
"include_urls",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'includeUrls'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'include_urls'",
",",
"include_urls",
",",
"'bool'",
")",
"if",
"include_all_versions",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'includeAllVersions'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'include_all_versions'",
",",
"include_all_versions",
",",
"'bool'",
")",
"if",
"is_listed",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'isListed'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'is_listed'",
",",
"is_listed",
",",
"'bool'",
")",
"if",
"get_top_package_versions",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'getTopPackageVersions'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'get_top_package_versions'",
",",
"get_top_package_versions",
",",
"'bool'",
")",
"if",
"is_release",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'isRelease'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'is_release'",
",",
"is_release",
",",
"'bool'",
")",
"if",
"include_description",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'includeDescription'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'include_description'",
",",
"include_description",
",",
"'bool'",
")",
"if",
"top",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'$top'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'top'",
",",
"top",
",",
"'int'",
")",
"if",
"skip",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'$skip'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'skip'",
",",
"skip",
",",
"'int'",
")",
"if",
"include_deleted",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'includeDeleted'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'include_deleted'",
",",
"include_deleted",
",",
"'bool'",
")",
"if",
"is_cached",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'isCached'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'is_cached'",
",",
"is_cached",
",",
"'bool'",
")",
"if",
"direct_upstream_id",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'directUpstreamId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'direct_upstream_id'",
",",
"direct_upstream_id",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'7a20d846-c929-4acc-9ea2-0d5a7df1b197'",
",",
"version",
"=",
"'5.0-preview.1'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[Package]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
] | 89.5 | 53.844828 |
def _get_conversion_type(self, convert_to=None):
'''a helper function to return the conversion type based on user
preference and input recipe.
Parameters
==========
convert_to: a string either docker or singularity (default None)
'''
acceptable = ['singularity', 'docker']
# Default is to convert to opposite kind
conversion = "singularity"
if self.name == "singularity":
conversion = "docker"
# Unless the user asks for a specific type
if convert_to is not None and convert_to in acceptable:
conversion = convert_to
return conversion
|
[
"def",
"_get_conversion_type",
"(",
"self",
",",
"convert_to",
"=",
"None",
")",
":",
"acceptable",
"=",
"[",
"'singularity'",
",",
"'docker'",
"]",
"# Default is to convert to opposite kind",
"conversion",
"=",
"\"singularity\"",
"if",
"self",
".",
"name",
"==",
"\"singularity\"",
":",
"conversion",
"=",
"\"docker\"",
"# Unless the user asks for a specific type",
"if",
"convert_to",
"is",
"not",
"None",
"and",
"convert_to",
"in",
"acceptable",
":",
"conversion",
"=",
"convert_to",
"return",
"conversion"
] | 33 | 19.3 |
def split_input(img):
"""
img: an RGB image of shape (s, 2s, 3).
:return: [input, output]
"""
# split the image into left + right pairs
s = img.shape[0]
assert img.shape[1] == 2 * s
input, output = img[:, :s, :], img[:, s:, :]
if args.mode == 'BtoA':
input, output = output, input
if IN_CH == 1:
input = cv2.cvtColor(input, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
if OUT_CH == 1:
output = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
return [input, output]
|
[
"def",
"split_input",
"(",
"img",
")",
":",
"# split the image into left + right pairs",
"s",
"=",
"img",
".",
"shape",
"[",
"0",
"]",
"assert",
"img",
".",
"shape",
"[",
"1",
"]",
"==",
"2",
"*",
"s",
"input",
",",
"output",
"=",
"img",
"[",
":",
",",
":",
"s",
",",
":",
"]",
",",
"img",
"[",
":",
",",
"s",
":",
",",
":",
"]",
"if",
"args",
".",
"mode",
"==",
"'BtoA'",
":",
"input",
",",
"output",
"=",
"output",
",",
"input",
"if",
"IN_CH",
"==",
"1",
":",
"input",
"=",
"cv2",
".",
"cvtColor",
"(",
"input",
",",
"cv2",
".",
"COLOR_RGB2GRAY",
")",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"if",
"OUT_CH",
"==",
"1",
":",
"output",
"=",
"cv2",
".",
"cvtColor",
"(",
"output",
",",
"cv2",
".",
"COLOR_RGB2GRAY",
")",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"return",
"[",
"input",
",",
"output",
"]"
] | 32.8125 | 13.4375 |
def ytdl_progress_hook(self, d):
"""Called when youtube-dl updates progress"""
if d['status'] == 'downloading':
self.play_empty()
if "elapsed" in d:
if d["elapsed"] > self.current_download_elapsed + 4:
self.current_download_elapsed = d["elapsed"]
current_download = 0
current_download_total = 0
current_download_eta = 0
if "total_bytes" in d and d["total_bytes"] > 0:
current_download_total = d["total_bytes"]
elif "total_bytes_estimate" in d and d["total_bytes_estimate"] > 0:
current_download_total = d["total_bytes_estimate"]
if "downloaded_bytes" in d and d["downloaded_bytes"] > 0:
current_download = d["downloaded_bytes"]
if "eta" in d and d["eta"] > 0:
current_download_eta = d["eta"]
if current_download_total > 0:
percent = round(100 * (current_download / current_download_total))
if percent > 100:
percent = 100
elif percent < 0:
percent = 0
seconds = str(round(current_download_eta)) if current_download_eta > 0 else ""
eta = " ({} {} remaining)".format(seconds, "seconds" if seconds != 1 else "second")
downloading = "Downloading song: {}%{}".format(percent, eta)
if self.prev_time != downloading:
self.timelog.debug(downloading)
self.prev_time = downloading
if d['status'] == 'error':
self.statuslog.error("Error downloading song")
elif d['status'] == 'finished':
self.statuslog.info("Downloaded song")
downloading = "Downloading song: {}%".format(100)
if self.prev_time != downloading:
self.timelog.debug(downloading)
self.prev_time = downloading
if "elapsed" in d:
download_time = "{} {}".format(d["elapsed"] if d["elapsed"] > 0 else "<1",
"seconds" if d["elapsed"] != 1 else "second")
self.logger.debug("Downloaded song in {}".format(download_time))
# Create an FFmpeg player
future = asyncio.run_coroutine_threadsafe(self.create_ffmpeg_player(d['filename']), client.loop)
try:
future.result()
except Exception as e:
logger.exception(e)
return
|
[
"def",
"ytdl_progress_hook",
"(",
"self",
",",
"d",
")",
":",
"if",
"d",
"[",
"'status'",
"]",
"==",
"'downloading'",
":",
"self",
".",
"play_empty",
"(",
")",
"if",
"\"elapsed\"",
"in",
"d",
":",
"if",
"d",
"[",
"\"elapsed\"",
"]",
">",
"self",
".",
"current_download_elapsed",
"+",
"4",
":",
"self",
".",
"current_download_elapsed",
"=",
"d",
"[",
"\"elapsed\"",
"]",
"current_download",
"=",
"0",
"current_download_total",
"=",
"0",
"current_download_eta",
"=",
"0",
"if",
"\"total_bytes\"",
"in",
"d",
"and",
"d",
"[",
"\"total_bytes\"",
"]",
">",
"0",
":",
"current_download_total",
"=",
"d",
"[",
"\"total_bytes\"",
"]",
"elif",
"\"total_bytes_estimate\"",
"in",
"d",
"and",
"d",
"[",
"\"total_bytes_estimate\"",
"]",
">",
"0",
":",
"current_download_total",
"=",
"d",
"[",
"\"total_bytes_estimate\"",
"]",
"if",
"\"downloaded_bytes\"",
"in",
"d",
"and",
"d",
"[",
"\"downloaded_bytes\"",
"]",
">",
"0",
":",
"current_download",
"=",
"d",
"[",
"\"downloaded_bytes\"",
"]",
"if",
"\"eta\"",
"in",
"d",
"and",
"d",
"[",
"\"eta\"",
"]",
">",
"0",
":",
"current_download_eta",
"=",
"d",
"[",
"\"eta\"",
"]",
"if",
"current_download_total",
">",
"0",
":",
"percent",
"=",
"round",
"(",
"100",
"*",
"(",
"current_download",
"/",
"current_download_total",
")",
")",
"if",
"percent",
">",
"100",
":",
"percent",
"=",
"100",
"elif",
"percent",
"<",
"0",
":",
"percent",
"=",
"0",
"seconds",
"=",
"str",
"(",
"round",
"(",
"current_download_eta",
")",
")",
"if",
"current_download_eta",
">",
"0",
"else",
"\"\"",
"eta",
"=",
"\" ({} {} remaining)\"",
".",
"format",
"(",
"seconds",
",",
"\"seconds\"",
"if",
"seconds",
"!=",
"1",
"else",
"\"second\"",
")",
"downloading",
"=",
"\"Downloading song: {}%{}\"",
".",
"format",
"(",
"percent",
",",
"eta",
")",
"if",
"self",
".",
"prev_time",
"!=",
"downloading",
":",
"self",
".",
"timelog",
".",
"debug",
"(",
"downloading",
")",
"self",
".",
"prev_time",
"=",
"downloading",
"if",
"d",
"[",
"'status'",
"]",
"==",
"'error'",
":",
"self",
".",
"statuslog",
".",
"error",
"(",
"\"Error downloading song\"",
")",
"elif",
"d",
"[",
"'status'",
"]",
"==",
"'finished'",
":",
"self",
".",
"statuslog",
".",
"info",
"(",
"\"Downloaded song\"",
")",
"downloading",
"=",
"\"Downloading song: {}%\"",
".",
"format",
"(",
"100",
")",
"if",
"self",
".",
"prev_time",
"!=",
"downloading",
":",
"self",
".",
"timelog",
".",
"debug",
"(",
"downloading",
")",
"self",
".",
"prev_time",
"=",
"downloading",
"if",
"\"elapsed\"",
"in",
"d",
":",
"download_time",
"=",
"\"{} {}\"",
".",
"format",
"(",
"d",
"[",
"\"elapsed\"",
"]",
"if",
"d",
"[",
"\"elapsed\"",
"]",
">",
"0",
"else",
"\"<1\"",
",",
"\"seconds\"",
"if",
"d",
"[",
"\"elapsed\"",
"]",
"!=",
"1",
"else",
"\"second\"",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Downloaded song in {}\"",
".",
"format",
"(",
"download_time",
")",
")",
"# Create an FFmpeg player",
"future",
"=",
"asyncio",
".",
"run_coroutine_threadsafe",
"(",
"self",
".",
"create_ffmpeg_player",
"(",
"d",
"[",
"'filename'",
"]",
")",
",",
"client",
".",
"loop",
")",
"try",
":",
"future",
".",
"result",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"e",
")",
"return"
] | 48.321429 | 22.089286 |
def _create_or_reuse_item(local_file, parent_folder_id, reuse_existing=False):
"""
Create an item from the local file in the Midas Server folder corresponding
to the parent folder id.
:param local_file: full path to a file on the local file system
:type local_file: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the item will be added
:type parent_folder_id: int | long
:param reuse_existing: (optional) whether to accept an existing item of the
same name in the same location, or create a new one instead
:type reuse_existing: bool
"""
local_item_name = os.path.basename(local_file)
item_id = None
if reuse_existing:
# check by name to see if the item already exists in the folder
children = session.communicator.folder_children(
session.token, parent_folder_id)
items = children['items']
for item in items:
if item['name'] == local_item_name:
item_id = item['item_id']
break
if item_id is None:
# create the item for the subdir
new_item = session.communicator.create_item(
session.token, local_item_name, parent_folder_id)
item_id = new_item['item_id']
return item_id
|
[
"def",
"_create_or_reuse_item",
"(",
"local_file",
",",
"parent_folder_id",
",",
"reuse_existing",
"=",
"False",
")",
":",
"local_item_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"local_file",
")",
"item_id",
"=",
"None",
"if",
"reuse_existing",
":",
"# check by name to see if the item already exists in the folder",
"children",
"=",
"session",
".",
"communicator",
".",
"folder_children",
"(",
"session",
".",
"token",
",",
"parent_folder_id",
")",
"items",
"=",
"children",
"[",
"'items'",
"]",
"for",
"item",
"in",
"items",
":",
"if",
"item",
"[",
"'name'",
"]",
"==",
"local_item_name",
":",
"item_id",
"=",
"item",
"[",
"'item_id'",
"]",
"break",
"if",
"item_id",
"is",
"None",
":",
"# create the item for the subdir",
"new_item",
"=",
"session",
".",
"communicator",
".",
"create_item",
"(",
"session",
".",
"token",
",",
"local_item_name",
",",
"parent_folder_id",
")",
"item_id",
"=",
"new_item",
"[",
"'item_id'",
"]",
"return",
"item_id"
] | 37.705882 | 18.588235 |
def _validate_args(self, args):
''' validate the user arguments '''
assert(args.bucket)
if args.subscribers:
for _subscriber in args.subscribers:
assert(isinstance(_subscriber, AsperaBaseSubscriber))
if (args.transfer_config):
assert(isinstance(args.transfer_config, AsperaConfig))
# number of sessions requested cant be greater than max ascps
if args.transfer_config.multi_session > self._config.ascp_max_concurrent:
raise ValueError("Max sessions is %d" % self._config.ascp_max_concurrent)
for _pair in args.file_pair_list:
if not _pair.key or not _pair.fileobj:
raise ValueError("Invalid file pair")
|
[
"def",
"_validate_args",
"(",
"self",
",",
"args",
")",
":",
"assert",
"(",
"args",
".",
"bucket",
")",
"if",
"args",
".",
"subscribers",
":",
"for",
"_subscriber",
"in",
"args",
".",
"subscribers",
":",
"assert",
"(",
"isinstance",
"(",
"_subscriber",
",",
"AsperaBaseSubscriber",
")",
")",
"if",
"(",
"args",
".",
"transfer_config",
")",
":",
"assert",
"(",
"isinstance",
"(",
"args",
".",
"transfer_config",
",",
"AsperaConfig",
")",
")",
"# number of sessions requested cant be greater than max ascps",
"if",
"args",
".",
"transfer_config",
".",
"multi_session",
">",
"self",
".",
"_config",
".",
"ascp_max_concurrent",
":",
"raise",
"ValueError",
"(",
"\"Max sessions is %d\"",
"%",
"self",
".",
"_config",
".",
"ascp_max_concurrent",
")",
"for",
"_pair",
"in",
"args",
".",
"file_pair_list",
":",
"if",
"not",
"_pair",
".",
"key",
"or",
"not",
"_pair",
".",
"fileobj",
":",
"raise",
"ValueError",
"(",
"\"Invalid file pair\"",
")"
] | 40.944444 | 23.166667 |
def execute(self, eopatch):
""" Execute method takes EOPatch and changes the specified feature
"""
feature_type, feature_name = next(self.feature(eopatch))
eopatch[feature_type][feature_name] = self.process(eopatch[feature_type][feature_name])
return eopatch
|
[
"def",
"execute",
"(",
"self",
",",
"eopatch",
")",
":",
"feature_type",
",",
"feature_name",
"=",
"next",
"(",
"self",
".",
"feature",
"(",
"eopatch",
")",
")",
"eopatch",
"[",
"feature_type",
"]",
"[",
"feature_name",
"]",
"=",
"self",
".",
"process",
"(",
"eopatch",
"[",
"feature_type",
"]",
"[",
"feature_name",
"]",
")",
"return",
"eopatch"
] | 37.5 | 23.625 |
def ValidateLanguageCode(lang, column_name=None, problems=None):
"""
Validates a non-required language code value using IsValidLanguageCode():
- if invalid adds InvalidValue error (if problems accumulator is provided)
- an empty language code is regarded as valid! Otherwise we might end up
with many duplicate errors because of the required field checks.
"""
if IsEmpty(lang) or IsValidLanguageCode(lang):
return True
else:
if problems:
problems.InvalidValue(column_name, lang)
return False
|
[
"def",
"ValidateLanguageCode",
"(",
"lang",
",",
"column_name",
"=",
"None",
",",
"problems",
"=",
"None",
")",
":",
"if",
"IsEmpty",
"(",
"lang",
")",
"or",
"IsValidLanguageCode",
"(",
"lang",
")",
":",
"return",
"True",
"else",
":",
"if",
"problems",
":",
"problems",
".",
"InvalidValue",
"(",
"column_name",
",",
"lang",
")",
"return",
"False"
] | 40.076923 | 21.769231 |
def cmd_nc(host, port, family, ssl_enable, crlf, source_ip, source_port, protocol):
"""Some kind of netcat/ncat replacement.
The execution emulates the feeling of this popular tools.
Example:
\b
$ habu.nc --crlf www.portantier.com 80
Connected to 45.77.113.133 80
HEAD / HTTP/1.0
\b
HTTP/1.0 301 Moved Permanently
Date: Thu, 26 Jul 2018 21:10:51 GMT
Server: OpenBSD httpd
Connection: close
Content-Type: text/html
Content-Length: 443
Location: https://www.portantier.com/
"""
resolved = socket.getaddrinfo(host, port)
families = {
'4' : [ socket.AF_INET ],
'6' : [ socket.AF_INET6 ],
'46': [ socket.AF_INET, socket.AF_INET6]
}
address = None
for r in resolved:
if r[0] in families[family]:
address = r # (<AddressFamily.AF_INET6: 10>, <SocketType.SOCK_STREAM: 1>, 6, '', ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0))
if not address:
print('Could not resolve {} to the ip address family selected ({})'.format(host, family), file=sys.stderr)
sys.exit(1)
to_send = b''
if not source_ip:
source_ip = which_source_for(address[4][0])
if protocol == 'tcp':
s = socket.socket(address[0], socket.SOCK_STREAM)
else:
s = socket.socket(address[0], socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((source_ip, source_port))
if ssl_enable:
ssl_context = ssl.SSLContext()
s = ssl_context.wrap_socket(s, server_side=False)
try:
s.connect((address[4][0], port))
print('Connected to', address[4][0], port, file=sys.stderr)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
while True:
iready, oready, eready = select.select([sys.stdin, s], [], [s])
for i in iready:
if i == sys.stdin:
if crlf:
to_send += i.readline().replace('\n', '\r\n').encode()
else:
to_send += i.readline().encode()
else:
received = s.recv(4096)
if not received:
sys.exit(1)
os.write(sys.stdout.fileno(), received)
iready, oready, eready = select.select([], [s], [s])
for o in oready:
if to_send:
o.send(to_send)
to_send = b''
s.close()
|
[
"def",
"cmd_nc",
"(",
"host",
",",
"port",
",",
"family",
",",
"ssl_enable",
",",
"crlf",
",",
"source_ip",
",",
"source_port",
",",
"protocol",
")",
":",
"resolved",
"=",
"socket",
".",
"getaddrinfo",
"(",
"host",
",",
"port",
")",
"families",
"=",
"{",
"'4'",
":",
"[",
"socket",
".",
"AF_INET",
"]",
",",
"'6'",
":",
"[",
"socket",
".",
"AF_INET6",
"]",
",",
"'46'",
":",
"[",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"AF_INET6",
"]",
"}",
"address",
"=",
"None",
"for",
"r",
"in",
"resolved",
":",
"if",
"r",
"[",
"0",
"]",
"in",
"families",
"[",
"family",
"]",
":",
"address",
"=",
"r",
"# (<AddressFamily.AF_INET6: 10>, <SocketType.SOCK_STREAM: 1>, 6, '', ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0))",
"if",
"not",
"address",
":",
"print",
"(",
"'Could not resolve {} to the ip address family selected ({})'",
".",
"format",
"(",
"host",
",",
"family",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"to_send",
"=",
"b''",
"if",
"not",
"source_ip",
":",
"source_ip",
"=",
"which_source_for",
"(",
"address",
"[",
"4",
"]",
"[",
"0",
"]",
")",
"if",
"protocol",
"==",
"'tcp'",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"address",
"[",
"0",
"]",
",",
"socket",
".",
"SOCK_STREAM",
")",
"else",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"address",
"[",
"0",
"]",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"s",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_REUSEADDR",
",",
"1",
")",
"s",
".",
"bind",
"(",
"(",
"source_ip",
",",
"source_port",
")",
")",
"if",
"ssl_enable",
":",
"ssl_context",
"=",
"ssl",
".",
"SSLContext",
"(",
")",
"s",
"=",
"ssl_context",
".",
"wrap_socket",
"(",
"s",
",",
"server_side",
"=",
"False",
")",
"try",
":",
"s",
".",
"connect",
"(",
"(",
"address",
"[",
"4",
"]",
"[",
"0",
"]",
",",
"port",
")",
")",
"print",
"(",
"'Connected to'",
",",
"address",
"[",
"4",
"]",
"[",
"0",
"]",
",",
"port",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"while",
"True",
":",
"iready",
",",
"oready",
",",
"eready",
"=",
"select",
".",
"select",
"(",
"[",
"sys",
".",
"stdin",
",",
"s",
"]",
",",
"[",
"]",
",",
"[",
"s",
"]",
")",
"for",
"i",
"in",
"iready",
":",
"if",
"i",
"==",
"sys",
".",
"stdin",
":",
"if",
"crlf",
":",
"to_send",
"+=",
"i",
".",
"readline",
"(",
")",
".",
"replace",
"(",
"'\\n'",
",",
"'\\r\\n'",
")",
".",
"encode",
"(",
")",
"else",
":",
"to_send",
"+=",
"i",
".",
"readline",
"(",
")",
".",
"encode",
"(",
")",
"else",
":",
"received",
"=",
"s",
".",
"recv",
"(",
"4096",
")",
"if",
"not",
"received",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"os",
".",
"write",
"(",
"sys",
".",
"stdout",
".",
"fileno",
"(",
")",
",",
"received",
")",
"iready",
",",
"oready",
",",
"eready",
"=",
"select",
".",
"select",
"(",
"[",
"]",
",",
"[",
"s",
"]",
",",
"[",
"s",
"]",
")",
"for",
"o",
"in",
"oready",
":",
"if",
"to_send",
":",
"o",
".",
"send",
"(",
"to_send",
")",
"to_send",
"=",
"b''",
"s",
".",
"close",
"(",
")"
] | 26.730337 | 23.640449 |
def get_tasks(self):
"""Returns an ordered dictionary {task_name: task} of all tasks within this workflow.
:return: Ordered dictionary with key being task_name (str) and an instance of a corresponding task from this
workflow
:rtype: OrderedDict
"""
tasks = collections.OrderedDict()
for dep in self.ordered_dependencies:
tasks[dep.name] = dep.task
return tasks
|
[
"def",
"get_tasks",
"(",
"self",
")",
":",
"tasks",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"dep",
"in",
"self",
".",
"ordered_dependencies",
":",
"tasks",
"[",
"dep",
".",
"name",
"]",
"=",
"dep",
".",
"task",
"return",
"tasks"
] | 36.5 | 19.833333 |
def do_over():
'''Calls :py:func:`os.exec` with executable and args derived from sys.'''
path = sys.executable
args = [path] + sys.argv
# And the rest, after a sudden wet thud, was silence.
os.execv(path, args)
|
[
"def",
"do_over",
"(",
")",
":",
"path",
"=",
"sys",
".",
"executable",
"args",
"=",
"[",
"path",
"]",
"+",
"sys",
".",
"argv",
"# And the rest, after a sudden wet thud, was silence.",
"os",
".",
"execv",
"(",
"path",
",",
"args",
")"
] | 32.142857 | 23.285714 |
def calculate_row_format(columns, keys=None):
"""
Calculate row format.
Args:
columns (dict): the keys are the column name and the value the max length.
keys (list): optional list of keys to order columns as well as to filter for them.
Returns:
str: format for table row
"""
row_format = ''
if keys is None:
keys = columns.keys()
else:
keys = [key for key in keys if key in columns]
for key in keys:
if len(row_format) > 0:
row_format += "|"
row_format += "%%(%s)-%ds" % (key, columns[key])
return '|' + row_format + '|'
|
[
"def",
"calculate_row_format",
"(",
"columns",
",",
"keys",
"=",
"None",
")",
":",
"row_format",
"=",
"''",
"if",
"keys",
"is",
"None",
":",
"keys",
"=",
"columns",
".",
"keys",
"(",
")",
"else",
":",
"keys",
"=",
"[",
"key",
"for",
"key",
"in",
"keys",
"if",
"key",
"in",
"columns",
"]",
"for",
"key",
"in",
"keys",
":",
"if",
"len",
"(",
"row_format",
")",
">",
"0",
":",
"row_format",
"+=",
"\"|\"",
"row_format",
"+=",
"\"%%(%s)-%ds\"",
"%",
"(",
"key",
",",
"columns",
"[",
"key",
"]",
")",
"return",
"'|'",
"+",
"row_format",
"+",
"'|'"
] | 26.521739 | 21.652174 |
def confirm_execution(self):
"""Confirm from your if proposed-plan be executed."""
permit = ''
while permit.lower() not in ('yes', 'no'):
permit = input('Execute Proposed Plan? [yes/no] ')
if permit.lower() == 'yes':
return True
else:
return False
|
[
"def",
"confirm_execution",
"(",
"self",
")",
":",
"permit",
"=",
"''",
"while",
"permit",
".",
"lower",
"(",
")",
"not",
"in",
"(",
"'yes'",
",",
"'no'",
")",
":",
"permit",
"=",
"input",
"(",
"'Execute Proposed Plan? [yes/no] '",
")",
"if",
"permit",
".",
"lower",
"(",
")",
"==",
"'yes'",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | 35 | 14.444444 |
def avail_locations(call=None):
'''
List all available locations
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
conn = get_conn()
response = conn.getCreateObjectOptions()
#return response
for datacenter in response['datacenters']:
#return data center
ret[datacenter['template']['datacenter']['name']] = {
'name': datacenter['template']['datacenter']['name'],
}
return ret
|
[
"def",
"avail_locations",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_locations function must be called with '",
"'-f or --function, or with the --list-locations option'",
")",
"ret",
"=",
"{",
"}",
"conn",
"=",
"get_conn",
"(",
")",
"response",
"=",
"conn",
".",
"getCreateObjectOptions",
"(",
")",
"#return response",
"for",
"datacenter",
"in",
"response",
"[",
"'datacenters'",
"]",
":",
"#return data center",
"ret",
"[",
"datacenter",
"[",
"'template'",
"]",
"[",
"'datacenter'",
"]",
"[",
"'name'",
"]",
"]",
"=",
"{",
"'name'",
":",
"datacenter",
"[",
"'template'",
"]",
"[",
"'datacenter'",
"]",
"[",
"'name'",
"]",
",",
"}",
"return",
"ret"
] | 29.65 | 20.95 |
def list_properties(type):
"""
:param type: a Python GObject instance or type that the signal is associated with
:type type: :obj:`GObject.Object`
:returns: a list of :obj:`GObject.ParamSpec`
:rtype: [:obj:`GObject.ParamSpec`]
Takes a GObject/GInterface subclass or a GType and returns a list of
GParamSpecs for all properties of `type`.
"""
if isinstance(type, PGType):
type = type.pytype
from pgi.obj import Object, InterfaceBase
if not issubclass(type, (Object, InterfaceBase)):
raise TypeError("Must be a subclass of %s or %s" %
(Object.__name__, InterfaceBase.__name__))
gparams = []
for key in dir(type.props):
if not key.startswith("_"):
gparams.append(getattr(type.props, key))
return gparams
|
[
"def",
"list_properties",
"(",
"type",
")",
":",
"if",
"isinstance",
"(",
"type",
",",
"PGType",
")",
":",
"type",
"=",
"type",
".",
"pytype",
"from",
"pgi",
".",
"obj",
"import",
"Object",
",",
"InterfaceBase",
"if",
"not",
"issubclass",
"(",
"type",
",",
"(",
"Object",
",",
"InterfaceBase",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Must be a subclass of %s or %s\"",
"%",
"(",
"Object",
".",
"__name__",
",",
"InterfaceBase",
".",
"__name__",
")",
")",
"gparams",
"=",
"[",
"]",
"for",
"key",
"in",
"dir",
"(",
"type",
".",
"props",
")",
":",
"if",
"not",
"key",
".",
"startswith",
"(",
"\"_\"",
")",
":",
"gparams",
".",
"append",
"(",
"getattr",
"(",
"type",
".",
"props",
",",
"key",
")",
")",
"return",
"gparams"
] | 30.653846 | 19.423077 |
def start_listener(self):
'''start listening for packets'''
if self.sock is not None:
self.sock.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('', self.asterix_settings.port))
self.sock.setblocking(False)
print("Started on port %u" % self.asterix_settings.port)
|
[
"def",
"start_listener",
"(",
"self",
")",
":",
"if",
"self",
".",
"sock",
"is",
"not",
"None",
":",
"self",
".",
"sock",
".",
"close",
"(",
")",
"self",
".",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
",",
"socket",
".",
"IPPROTO_UDP",
")",
"self",
".",
"sock",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_REUSEADDR",
",",
"1",
")",
"self",
".",
"sock",
".",
"bind",
"(",
"(",
"''",
",",
"self",
".",
"asterix_settings",
".",
"port",
")",
")",
"self",
".",
"sock",
".",
"setblocking",
"(",
"False",
")",
"print",
"(",
"\"Started on port %u\"",
"%",
"self",
".",
"asterix_settings",
".",
"port",
")"
] | 49.222222 | 17.444444 |
def _set_column_names(dep, exp):
""" rename the columns in the dependent peptides table from
the raw file to the corresponding {experiment}_{fraction}.
:param dep: dependent peptides table.
:param exp: experimental design table.
"""
colnames = exp['Experiment'].astype(str) + '_' + exp['Fraction'].astype(str)
file2col = dict(zip(exp['Raw file'], colnames))
_dep = dep.rename(columns=file2col)
_dep.columns.name = 'Column Name'
return _dep
|
[
"def",
"_set_column_names",
"(",
"dep",
",",
"exp",
")",
":",
"colnames",
"=",
"exp",
"[",
"'Experiment'",
"]",
".",
"astype",
"(",
"str",
")",
"+",
"'_'",
"+",
"exp",
"[",
"'Fraction'",
"]",
".",
"astype",
"(",
"str",
")",
"file2col",
"=",
"dict",
"(",
"zip",
"(",
"exp",
"[",
"'Raw file'",
"]",
",",
"colnames",
")",
")",
"_dep",
"=",
"dep",
".",
"rename",
"(",
"columns",
"=",
"file2col",
")",
"_dep",
".",
"columns",
".",
"name",
"=",
"'Column Name'",
"return",
"_dep"
] | 42.636364 | 10.272727 |
def data_two_freqs(N=200):
"""A simple test example with two close frequencies
"""
nn = arange(N)
xx = cos(0.257*pi*nn) + sin(0.2*pi*nn) + 0.01*randn(nn.size)
return xx
|
[
"def",
"data_two_freqs",
"(",
"N",
"=",
"200",
")",
":",
"nn",
"=",
"arange",
"(",
"N",
")",
"xx",
"=",
"cos",
"(",
"0.257",
"*",
"pi",
"*",
"nn",
")",
"+",
"sin",
"(",
"0.2",
"*",
"pi",
"*",
"nn",
")",
"+",
"0.01",
"*",
"randn",
"(",
"nn",
".",
"size",
")",
"return",
"xx"
] | 26.142857 | 18.142857 |
def reconnect(self, old_node, new_node):
"""
Disconnect old_node and connect new_node copying over any properties on the original relationship.
Useful for preventing cardinality violations
:param old_node:
:param new_node:
:return: None
"""
self._check_node(old_node)
self._check_node(new_node)
if old_node.id == new_node.id:
return
old_rel = _rel_helper(lhs='us', rhs='old', ident='r', **self.definition)
# get list of properties on the existing rel
result, meta = self.source.cypher(
"MATCH (us), (old) WHERE id(us)={self} and id(old)={old} "
"MATCH " + old_rel + " RETURN r", {'old': old_node.id})
if result:
node_properties = _get_node_properties(result[0][0])
existing_properties = node_properties.keys()
else:
raise NotConnected('reconnect', self.source, old_node)
# remove old relationship and create new one
new_rel = _rel_helper(lhs='us', rhs='new', ident='r2', **self.definition)
q = "MATCH (us), (old), (new) " \
"WHERE id(us)={self} and id(old)={old} and id(new)={new} " \
"MATCH " + old_rel
q += " CREATE UNIQUE" + new_rel
# copy over properties if we have
for p in existing_properties:
q += " SET r2.{0} = r.{1}".format(p, p)
q += " WITH r DELETE r"
self.source.cypher(q, {'old': old_node.id, 'new': new_node.id})
|
[
"def",
"reconnect",
"(",
"self",
",",
"old_node",
",",
"new_node",
")",
":",
"self",
".",
"_check_node",
"(",
"old_node",
")",
"self",
".",
"_check_node",
"(",
"new_node",
")",
"if",
"old_node",
".",
"id",
"==",
"new_node",
".",
"id",
":",
"return",
"old_rel",
"=",
"_rel_helper",
"(",
"lhs",
"=",
"'us'",
",",
"rhs",
"=",
"'old'",
",",
"ident",
"=",
"'r'",
",",
"*",
"*",
"self",
".",
"definition",
")",
"# get list of properties on the existing rel",
"result",
",",
"meta",
"=",
"self",
".",
"source",
".",
"cypher",
"(",
"\"MATCH (us), (old) WHERE id(us)={self} and id(old)={old} \"",
"\"MATCH \"",
"+",
"old_rel",
"+",
"\" RETURN r\"",
",",
"{",
"'old'",
":",
"old_node",
".",
"id",
"}",
")",
"if",
"result",
":",
"node_properties",
"=",
"_get_node_properties",
"(",
"result",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"existing_properties",
"=",
"node_properties",
".",
"keys",
"(",
")",
"else",
":",
"raise",
"NotConnected",
"(",
"'reconnect'",
",",
"self",
".",
"source",
",",
"old_node",
")",
"# remove old relationship and create new one",
"new_rel",
"=",
"_rel_helper",
"(",
"lhs",
"=",
"'us'",
",",
"rhs",
"=",
"'new'",
",",
"ident",
"=",
"'r2'",
",",
"*",
"*",
"self",
".",
"definition",
")",
"q",
"=",
"\"MATCH (us), (old), (new) \"",
"\"WHERE id(us)={self} and id(old)={old} and id(new)={new} \"",
"\"MATCH \"",
"+",
"old_rel",
"q",
"+=",
"\" CREATE UNIQUE\"",
"+",
"new_rel",
"# copy over properties if we have",
"for",
"p",
"in",
"existing_properties",
":",
"q",
"+=",
"\" SET r2.{0} = r.{1}\"",
".",
"format",
"(",
"p",
",",
"p",
")",
"q",
"+=",
"\" WITH r DELETE r\"",
"self",
".",
"source",
".",
"cypher",
"(",
"q",
",",
"{",
"'old'",
":",
"old_node",
".",
"id",
",",
"'new'",
":",
"new_node",
".",
"id",
"}",
")"
] | 37.175 | 20.575 |
def load_local_config(filename):
"""Loads the pylint.config.py file.
Args:
filename (str): The python file containing the local configuration.
Returns:
module: The loaded Python module.
"""
if not filename:
return imp.new_module('local_pylint_config')
module = imp.load_source('local_pylint_config', filename)
return module
|
[
"def",
"load_local_config",
"(",
"filename",
")",
":",
"if",
"not",
"filename",
":",
"return",
"imp",
".",
"new_module",
"(",
"'local_pylint_config'",
")",
"module",
"=",
"imp",
".",
"load_source",
"(",
"'local_pylint_config'",
",",
"filename",
")",
"return",
"module"
] | 28.076923 | 19.923077 |
def get_media_uri(self, item_id):
"""Get a streaming URI for an item.
Note:
You should not need to use this directly. It is used by the Sonos
players (not the controllers) to obtain the uri of the media
stream. If you want to have a player play a media item,
you should add add it to the queue using its id and let the
player work out where to get the stream from (see `On Demand
Playback <http://musicpartners.sonos.com/node/421>`_ and
`Programmed Radio <http://musicpartners.sonos.com/node/422>`_)
Args:
item_id (str): The item for which the URI is required
Returns:
str: The item's streaming URI.
"""
response = self.soap_client.call(
'getMediaURI',
[('id', item_id)])
return response.get('getMediaURIResult', None)
|
[
"def",
"get_media_uri",
"(",
"self",
",",
"item_id",
")",
":",
"response",
"=",
"self",
".",
"soap_client",
".",
"call",
"(",
"'getMediaURI'",
",",
"[",
"(",
"'id'",
",",
"item_id",
")",
"]",
")",
"return",
"response",
".",
"get",
"(",
"'getMediaURIResult'",
",",
"None",
")"
] | 40.045455 | 22.045455 |
def int_input(message, low, high, show_range = True):
'''
Ask a user for a int input between two values
args:
message (str): Prompt for user
low (int): Low value, user entered value must be > this value to be accepted
high (int): High value, user entered value must be < this value to be accepted
show_range (boolean, Default True): Print hint to user the range
returns:
int_in (int): Input integer
'''
int_in = low - 1
while (int_in < low) or (int_in > high):
if show_range:
suffix = ' (integer between ' + str(low) + ' and ' + str(high) + ')'
else:
suffix = ''
inp = input('Enter a ' + message + suffix + ': ')
if re.match('^-?[0-9]+$', inp) is not None:
int_in = int(inp)
else:
print(colored('Must be an integer, try again!', 'red'))
return int_in
|
[
"def",
"int_input",
"(",
"message",
",",
"low",
",",
"high",
",",
"show_range",
"=",
"True",
")",
":",
"int_in",
"=",
"low",
"-",
"1",
"while",
"(",
"int_in",
"<",
"low",
")",
"or",
"(",
"int_in",
">",
"high",
")",
":",
"if",
"show_range",
":",
"suffix",
"=",
"' (integer between '",
"+",
"str",
"(",
"low",
")",
"+",
"' and '",
"+",
"str",
"(",
"high",
")",
"+",
"')'",
"else",
":",
"suffix",
"=",
"''",
"inp",
"=",
"input",
"(",
"'Enter a '",
"+",
"message",
"+",
"suffix",
"+",
"': '",
")",
"if",
"re",
".",
"match",
"(",
"'^-?[0-9]+$'",
",",
"inp",
")",
"is",
"not",
"None",
":",
"int_in",
"=",
"int",
"(",
"inp",
")",
"else",
":",
"print",
"(",
"colored",
"(",
"'Must be an integer, try again!'",
",",
"'red'",
")",
")",
"return",
"int_in"
] | 34.153846 | 24.538462 |
def findRepl( self,
text,
repl,
caseSensitive = False,
replaceAll = False ):
"""
Looks for the inputed text and replaces it with the given replacement \
text.
:param text | <str>
repl | <str>
caseSensitive | <bool>
replaceAll | <bool>
:return <int> number of items replace
"""
# make sure something is selected
if ( not text ):
return 0
# make sure we have some text selected to replace
if ( self.selectedText() != text ):
found = self.findNext( text,
False,
caseSensitive,
False,
True )
if ( not found ):
return 0
sel = self.getSelection()
alltext = self.text()
# replace all instances
if ( replaceAll ):
sensitivity = Qt.CaseInsensitive
if ( caseSensitive ):
sensitivity = Qt.CaseSensitive
count = alltext.count(text, sensitivity)
alltext.replace(text, repl, sensitivity)
else:
count = 1
startpos = self.positionFromLineIndex(sel[0], sel[1])
alltext.replace(startpos, len(text), repl)
self.setText(alltext)
if ( count == 1 ):
sel = list(sel)
sel[3] += len(repl) - len(text)
self.setSelection(*sel)
else:
self.findNext( repl,
False,
caseSensitive,
False,
True )
return count
|
[
"def",
"findRepl",
"(",
"self",
",",
"text",
",",
"repl",
",",
"caseSensitive",
"=",
"False",
",",
"replaceAll",
"=",
"False",
")",
":",
"# make sure something is selected",
"if",
"(",
"not",
"text",
")",
":",
"return",
"0",
"# make sure we have some text selected to replace\r",
"if",
"(",
"self",
".",
"selectedText",
"(",
")",
"!=",
"text",
")",
":",
"found",
"=",
"self",
".",
"findNext",
"(",
"text",
",",
"False",
",",
"caseSensitive",
",",
"False",
",",
"True",
")",
"if",
"(",
"not",
"found",
")",
":",
"return",
"0",
"sel",
"=",
"self",
".",
"getSelection",
"(",
")",
"alltext",
"=",
"self",
".",
"text",
"(",
")",
"# replace all instances\r",
"if",
"(",
"replaceAll",
")",
":",
"sensitivity",
"=",
"Qt",
".",
"CaseInsensitive",
"if",
"(",
"caseSensitive",
")",
":",
"sensitivity",
"=",
"Qt",
".",
"CaseSensitive",
"count",
"=",
"alltext",
".",
"count",
"(",
"text",
",",
"sensitivity",
")",
"alltext",
".",
"replace",
"(",
"text",
",",
"repl",
",",
"sensitivity",
")",
"else",
":",
"count",
"=",
"1",
"startpos",
"=",
"self",
".",
"positionFromLineIndex",
"(",
"sel",
"[",
"0",
"]",
",",
"sel",
"[",
"1",
"]",
")",
"alltext",
".",
"replace",
"(",
"startpos",
",",
"len",
"(",
"text",
")",
",",
"repl",
")",
"self",
".",
"setText",
"(",
"alltext",
")",
"if",
"(",
"count",
"==",
"1",
")",
":",
"sel",
"=",
"list",
"(",
"sel",
")",
"sel",
"[",
"3",
"]",
"+=",
"len",
"(",
"repl",
")",
"-",
"len",
"(",
"text",
")",
"self",
".",
"setSelection",
"(",
"*",
"sel",
")",
"else",
":",
"self",
".",
"findNext",
"(",
"repl",
",",
"False",
",",
"caseSensitive",
",",
"False",
",",
"True",
")",
"return",
"count"
] | 31.492063 | 13.857143 |
def event_schedule_difference(old_schedule, new_schedule):
"""Compute the difference between two schedules from an event perspective
Parameters
----------
old_schedule : list or tuple
of :py:class:`resources.ScheduledItem` objects
new_schedule : list or tuple
of :py:class:`resources.ScheduledItem` objects
Returns
-------
list
A list of :py:class:`resources.ChangedEventScheduledItem` objects
Example
-------
>>> from conference_scheduler.resources import Event, Slot, ScheduledItem
>>> from conference_scheduler.scheduler import event_schedule_difference
>>> events = [Event(f'event_{i}', 30, 0) for i in range(5)]
>>> slots = [Slot(f'venue_{i}', '', 30, 100, None) for i in range(5)]
>>> old_schedule = (
... ScheduledItem(events[0], slots[0]),
... ScheduledItem(events[1], slots[1]),
... ScheduledItem(events[2], slots[2]))
>>> new_schedule = (
... ScheduledItem(events[0], slots[0]),
... ScheduledItem(events[1], slots[2]),
... ScheduledItem(events[2], slots[3]),
... ScheduledItem(events[3], slots[4]))
>>> diff = (event_schedule_difference(old_schedule, new_schedule))
>>> print([item.event.name for item in diff])
['event_1', 'event_2', 'event_3']
"""
old = {item.event.name: item for item in old_schedule}
new = {item.event.name: item for item in new_schedule}
common_events = set(old.keys()).intersection(new.keys())
added_events = new.keys() - old.keys()
removed_events = old.keys() - new.keys()
changed = [
ChangedEventScheduledItem(
old[event].event, old[event].slot, new[event].slot)
for event in common_events
if old[event].slot != new[event].slot
]
added = [
ChangedEventScheduledItem(new[event].event, None, new[event].slot)
for event in added_events
]
removed = [
ChangedEventScheduledItem(old[event].event, old[event].slot, None)
for event in removed_events
]
return sorted(changed + added + removed, key=lambda item: item.event.name)
|
[
"def",
"event_schedule_difference",
"(",
"old_schedule",
",",
"new_schedule",
")",
":",
"old",
"=",
"{",
"item",
".",
"event",
".",
"name",
":",
"item",
"for",
"item",
"in",
"old_schedule",
"}",
"new",
"=",
"{",
"item",
".",
"event",
".",
"name",
":",
"item",
"for",
"item",
"in",
"new_schedule",
"}",
"common_events",
"=",
"set",
"(",
"old",
".",
"keys",
"(",
")",
")",
".",
"intersection",
"(",
"new",
".",
"keys",
"(",
")",
")",
"added_events",
"=",
"new",
".",
"keys",
"(",
")",
"-",
"old",
".",
"keys",
"(",
")",
"removed_events",
"=",
"old",
".",
"keys",
"(",
")",
"-",
"new",
".",
"keys",
"(",
")",
"changed",
"=",
"[",
"ChangedEventScheduledItem",
"(",
"old",
"[",
"event",
"]",
".",
"event",
",",
"old",
"[",
"event",
"]",
".",
"slot",
",",
"new",
"[",
"event",
"]",
".",
"slot",
")",
"for",
"event",
"in",
"common_events",
"if",
"old",
"[",
"event",
"]",
".",
"slot",
"!=",
"new",
"[",
"event",
"]",
".",
"slot",
"]",
"added",
"=",
"[",
"ChangedEventScheduledItem",
"(",
"new",
"[",
"event",
"]",
".",
"event",
",",
"None",
",",
"new",
"[",
"event",
"]",
".",
"slot",
")",
"for",
"event",
"in",
"added_events",
"]",
"removed",
"=",
"[",
"ChangedEventScheduledItem",
"(",
"old",
"[",
"event",
"]",
".",
"event",
",",
"old",
"[",
"event",
"]",
".",
"slot",
",",
"None",
")",
"for",
"event",
"in",
"removed_events",
"]",
"return",
"sorted",
"(",
"changed",
"+",
"added",
"+",
"removed",
",",
"key",
"=",
"lambda",
"item",
":",
"item",
".",
"event",
".",
"name",
")"
] | 35.844828 | 21.189655 |
def normalVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the standard normal distribution.
:param sc: SparkContext used to create the RDD.
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ `N(0.0, 1.0)`.
>>> import numpy as np
>>> mat = np.matrix(RandomRDDs.normalVectorRDD(sc, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - 0.0) < 0.1
True
>>> abs(mat.std() - 1.0) < 0.1
True
"""
return callMLlibFunc("normalVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
|
[
"def",
"normalVectorRDD",
"(",
"sc",
",",
"numRows",
",",
"numCols",
",",
"numPartitions",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"normalVectorRDD\"",
",",
"sc",
".",
"_jsc",
",",
"numRows",
",",
"numCols",
",",
"numPartitions",
",",
"seed",
")"
] | 44.772727 | 24.681818 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.