text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def preloop(self):
''' Executed before the command loop starts. '''
script_dir = os.path.dirname(os.path.realpath(__file__))
help_dir = os.path.join(script_dir, HELP_DIR_NAME)
self.load_forth_commands(help_dir)
self.load_shell_commands(help_dir) | [
"def",
"preloop",
"(",
"self",
")",
":",
"script_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"help_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"script_dir",
",",
"HELP_DIR_NAME",
")",
"self",
".",
"load_forth_commands",
"(",
"help_dir",
")",
"self",
".",
"load_shell_commands",
"(",
"help_dir",
")"
]
| 46.666667 | 14 |
def find_TPs_and_DUPs(self, percent=5., makefig=False):
"""
Function which finds TPs and uses the calc_DUP_parameter
function. To calculate DUP parameter evolution dependent of
the star or core mass.
Parameters
----------
fig : integer
Figure number to plot.
t0_model : integer
First he-shell lum peak.
percent : float
dredge-up is defined as when the mass dredged up is a certain
percent of the total mass dredged up during that event, which
is set by the user in this variable.
The default is 5.
makefig :
do you want a figure to be made?
Returns
-------
TPmods : array
model numbers at the peak of each thermal pulse
DUPmods : array
model numbers at the dredge-up, where dredge-up is defined as
when the mass dredged up is a certain percent of the total mass
dredged up during that event, which is set by the user
TPend : array
model numbers at the end of the PDCZ for each TP
lambda : array
DUP efficiency for each pulse
"""
t0_model=self.find_first_TP()
t0_idx=(t0_model-self.get("model_number")[0])
first_TP_he_lum=10**(self.get("log_LHe")[t0_idx])
he_lum=10**(self.get("log_LHe")[t0_idx:])
h_lum=10**(self.get("log_LH")[t0_idx:])
model=self.get("model_number")[t0_idx:]
try:
h1_bndry=self.get("h1_boundary_mass")[t0_idx:]
except:
try:
h1_bndry=self.get('he_core_mass')[t0_idx:]
except:
pass
# SJ find TPs by finding local maxima in He-burning luminosity and
# checking that the he_lum is greater than the h_lum:
maxima=[0]
for i in range(2,len(model)-1):
if he_lum[i] > he_lum[i-1] and he_lum[i] > he_lum[i+1]:
if he_lum[i-1] > he_lum[i-2] and he_lum[i+1] > he_lum[i+2]:
if he_lum[i] > h_lum[i]:
maxima.append(i)
# find DUPs when h-boundary first decreases by more than XX% of the total DUP
# depth:
DUPs=[]
TPend=[]
maxDUPs=[]
for i in range(len(maxima)):
idx1=maxima[i]
try:
idx2=maxima[i+1]
except IndexError:
idx2=-1
bound=h1_bndry[idx1:idx2]
bound0=bound[0]
if bound0==min(bound) or bound0 < min(bound): # then no DUP
DUP=idx1
DUPs.append(DUP)
maxDUPs.append(DUP)
else:
maxDUPs.append(idx1+bound.argmin()) # model number of deepest extend of 3DUP
maxDUP=bound0-min(bound) # total mass dredged up in DUP
db=bound - bound[0]
db_maxDUP = old_div(db, maxDUP)
DUP=np.where(db_maxDUP <= old_div(-float(percent),100.))[0][0]
DUPs.append(DUP+idx1)
# # Alternative definition, where envelope reaches mass coordinate
# # where top of PDCZ had resided during the TP:
# top=self.get('mx2_top')[idx1]
# DUP=np.abs(bound-top).argmin()
# DUPs.append(DUP+idx1)
# find end of PDCZ by seeking from TP peak and checking mx2_bot:
mx2b=self.get('mx2_bot')[t0_idx:][idx1:idx2]
for i in range(len(mx2b)):
if mx2b[i]==0.:
endTP=i+idx1
TPend.append(endTP)
break
# 3DUP efficiency:
lambd=[0.]
for i in range(1,len(maxima)):
dmenv = h1_bndry[maxima[i]] - h1_bndry[maxDUPs[i-1]]
dmdredge = h1_bndry[maxima[i]] - h1_bndry[maxDUPs[i]]
lambd.append(old_div(dmdredge,dmenv))
TPmods = maxima + t0_idx
DUPmods = DUPs + t0_idx
TPend = TPend + t0_idx
return TPmods, DUPmods, TPend, lambd | [
"def",
"find_TPs_and_DUPs",
"(",
"self",
",",
"percent",
"=",
"5.",
",",
"makefig",
"=",
"False",
")",
":",
"t0_model",
"=",
"self",
".",
"find_first_TP",
"(",
")",
"t0_idx",
"=",
"(",
"t0_model",
"-",
"self",
".",
"get",
"(",
"\"model_number\"",
")",
"[",
"0",
"]",
")",
"first_TP_he_lum",
"=",
"10",
"**",
"(",
"self",
".",
"get",
"(",
"\"log_LHe\"",
")",
"[",
"t0_idx",
"]",
")",
"he_lum",
"=",
"10",
"**",
"(",
"self",
".",
"get",
"(",
"\"log_LHe\"",
")",
"[",
"t0_idx",
":",
"]",
")",
"h_lum",
"=",
"10",
"**",
"(",
"self",
".",
"get",
"(",
"\"log_LH\"",
")",
"[",
"t0_idx",
":",
"]",
")",
"model",
"=",
"self",
".",
"get",
"(",
"\"model_number\"",
")",
"[",
"t0_idx",
":",
"]",
"try",
":",
"h1_bndry",
"=",
"self",
".",
"get",
"(",
"\"h1_boundary_mass\"",
")",
"[",
"t0_idx",
":",
"]",
"except",
":",
"try",
":",
"h1_bndry",
"=",
"self",
".",
"get",
"(",
"'he_core_mass'",
")",
"[",
"t0_idx",
":",
"]",
"except",
":",
"pass",
"# SJ find TPs by finding local maxima in He-burning luminosity and",
"# checking that the he_lum is greater than the h_lum:",
"maxima",
"=",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"2",
",",
"len",
"(",
"model",
")",
"-",
"1",
")",
":",
"if",
"he_lum",
"[",
"i",
"]",
">",
"he_lum",
"[",
"i",
"-",
"1",
"]",
"and",
"he_lum",
"[",
"i",
"]",
">",
"he_lum",
"[",
"i",
"+",
"1",
"]",
":",
"if",
"he_lum",
"[",
"i",
"-",
"1",
"]",
">",
"he_lum",
"[",
"i",
"-",
"2",
"]",
"and",
"he_lum",
"[",
"i",
"+",
"1",
"]",
">",
"he_lum",
"[",
"i",
"+",
"2",
"]",
":",
"if",
"he_lum",
"[",
"i",
"]",
">",
"h_lum",
"[",
"i",
"]",
":",
"maxima",
".",
"append",
"(",
"i",
")",
"# find DUPs when h-boundary first decreases by more than XX% of the total DUP",
"# depth:",
"DUPs",
"=",
"[",
"]",
"TPend",
"=",
"[",
"]",
"maxDUPs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"maxima",
")",
")",
":",
"idx1",
"=",
"maxima",
"[",
"i",
"]",
"try",
":",
"idx2",
"=",
"maxima",
"[",
"i",
"+",
"1",
"]",
"except",
"IndexError",
":",
"idx2",
"=",
"-",
"1",
"bound",
"=",
"h1_bndry",
"[",
"idx1",
":",
"idx2",
"]",
"bound0",
"=",
"bound",
"[",
"0",
"]",
"if",
"bound0",
"==",
"min",
"(",
"bound",
")",
"or",
"bound0",
"<",
"min",
"(",
"bound",
")",
":",
"# then no DUP",
"DUP",
"=",
"idx1",
"DUPs",
".",
"append",
"(",
"DUP",
")",
"maxDUPs",
".",
"append",
"(",
"DUP",
")",
"else",
":",
"maxDUPs",
".",
"append",
"(",
"idx1",
"+",
"bound",
".",
"argmin",
"(",
")",
")",
"# model number of deepest extend of 3DUP",
"maxDUP",
"=",
"bound0",
"-",
"min",
"(",
"bound",
")",
"# total mass dredged up in DUP",
"db",
"=",
"bound",
"-",
"bound",
"[",
"0",
"]",
"db_maxDUP",
"=",
"old_div",
"(",
"db",
",",
"maxDUP",
")",
"DUP",
"=",
"np",
".",
"where",
"(",
"db_maxDUP",
"<=",
"old_div",
"(",
"-",
"float",
"(",
"percent",
")",
",",
"100.",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"DUPs",
".",
"append",
"(",
"DUP",
"+",
"idx1",
")",
"# # Alternative definition, where envelope reaches mass coordinate",
"# # where top of PDCZ had resided during the TP:",
"# top=self.get('mx2_top')[idx1]",
"# DUP=np.abs(bound-top).argmin()",
"# DUPs.append(DUP+idx1)",
"# find end of PDCZ by seeking from TP peak and checking mx2_bot:",
"mx2b",
"=",
"self",
".",
"get",
"(",
"'mx2_bot'",
")",
"[",
"t0_idx",
":",
"]",
"[",
"idx1",
":",
"idx2",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"mx2b",
")",
")",
":",
"if",
"mx2b",
"[",
"i",
"]",
"==",
"0.",
":",
"endTP",
"=",
"i",
"+",
"idx1",
"TPend",
".",
"append",
"(",
"endTP",
")",
"break",
"# 3DUP efficiency:",
"lambd",
"=",
"[",
"0.",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"maxima",
")",
")",
":",
"dmenv",
"=",
"h1_bndry",
"[",
"maxima",
"[",
"i",
"]",
"]",
"-",
"h1_bndry",
"[",
"maxDUPs",
"[",
"i",
"-",
"1",
"]",
"]",
"dmdredge",
"=",
"h1_bndry",
"[",
"maxima",
"[",
"i",
"]",
"]",
"-",
"h1_bndry",
"[",
"maxDUPs",
"[",
"i",
"]",
"]",
"lambd",
".",
"append",
"(",
"old_div",
"(",
"dmdredge",
",",
"dmenv",
")",
")",
"TPmods",
"=",
"maxima",
"+",
"t0_idx",
"DUPmods",
"=",
"DUPs",
"+",
"t0_idx",
"TPend",
"=",
"TPend",
"+",
"t0_idx",
"return",
"TPmods",
",",
"DUPmods",
",",
"TPend",
",",
"lambd"
]
| 37.149533 | 18.738318 |
def orchestration_restore(self, saved_artifact_info, custom_params=None):
"""Orchestration restore
:param saved_artifact_info: json with all required data to restore configuration on the device
:param custom_params: custom parameters
"""
if saved_artifact_info is None or saved_artifact_info == '':
raise Exception('ConfigurationOperations', 'saved_artifact_info is None or empty')
saved_artifact_info = JsonRequestDeserializer(jsonpickle.decode(saved_artifact_info))
if not hasattr(saved_artifact_info, 'saved_artifacts_info'):
raise Exception('ConfigurationOperations', 'Saved_artifacts_info is missing')
saved_config = saved_artifact_info.saved_artifacts_info
params = None
if custom_params:
params = JsonRequestDeserializer(jsonpickle.decode(custom_params))
_validate_custom_params(params)
self._validate_artifact_info(saved_config)
if saved_config.restore_rules.requires_same_resource \
and saved_config.resource_name.lower() != self.resource_config.name.lower():
raise Exception('ConfigurationOperations', 'Incompatible resource, expected {}'.format(self.resource_config.name))
restore_params = {'configuration_type': 'running',
'restore_method': 'override',
'vrf_management_name': None,
'path': '{}:{}'.format(saved_config.saved_artifact.artifact_type,
saved_config.saved_artifact.identifier)}
if hasattr(params, 'custom_params'):
if hasattr(params.custom_params, 'restore_method'):
restore_params['restore_method'] = params.custom_params.restore_method
if hasattr(params.custom_params, 'configuration_type'):
restore_params['configuration_type'] = params.custom_params.configuration_type
if hasattr(params.custom_params, 'vrf_management_name'):
restore_params['vrf_management_name'] = params.custom_params.vrf_management_name
if 'startup' in saved_config.saved_artifact.identifier.split('/')[-1]:
restore_params['configuration_type'] = 'startup'
self.restore(**restore_params) | [
"def",
"orchestration_restore",
"(",
"self",
",",
"saved_artifact_info",
",",
"custom_params",
"=",
"None",
")",
":",
"if",
"saved_artifact_info",
"is",
"None",
"or",
"saved_artifact_info",
"==",
"''",
":",
"raise",
"Exception",
"(",
"'ConfigurationOperations'",
",",
"'saved_artifact_info is None or empty'",
")",
"saved_artifact_info",
"=",
"JsonRequestDeserializer",
"(",
"jsonpickle",
".",
"decode",
"(",
"saved_artifact_info",
")",
")",
"if",
"not",
"hasattr",
"(",
"saved_artifact_info",
",",
"'saved_artifacts_info'",
")",
":",
"raise",
"Exception",
"(",
"'ConfigurationOperations'",
",",
"'Saved_artifacts_info is missing'",
")",
"saved_config",
"=",
"saved_artifact_info",
".",
"saved_artifacts_info",
"params",
"=",
"None",
"if",
"custom_params",
":",
"params",
"=",
"JsonRequestDeserializer",
"(",
"jsonpickle",
".",
"decode",
"(",
"custom_params",
")",
")",
"_validate_custom_params",
"(",
"params",
")",
"self",
".",
"_validate_artifact_info",
"(",
"saved_config",
")",
"if",
"saved_config",
".",
"restore_rules",
".",
"requires_same_resource",
"and",
"saved_config",
".",
"resource_name",
".",
"lower",
"(",
")",
"!=",
"self",
".",
"resource_config",
".",
"name",
".",
"lower",
"(",
")",
":",
"raise",
"Exception",
"(",
"'ConfigurationOperations'",
",",
"'Incompatible resource, expected {}'",
".",
"format",
"(",
"self",
".",
"resource_config",
".",
"name",
")",
")",
"restore_params",
"=",
"{",
"'configuration_type'",
":",
"'running'",
",",
"'restore_method'",
":",
"'override'",
",",
"'vrf_management_name'",
":",
"None",
",",
"'path'",
":",
"'{}:{}'",
".",
"format",
"(",
"saved_config",
".",
"saved_artifact",
".",
"artifact_type",
",",
"saved_config",
".",
"saved_artifact",
".",
"identifier",
")",
"}",
"if",
"hasattr",
"(",
"params",
",",
"'custom_params'",
")",
":",
"if",
"hasattr",
"(",
"params",
".",
"custom_params",
",",
"'restore_method'",
")",
":",
"restore_params",
"[",
"'restore_method'",
"]",
"=",
"params",
".",
"custom_params",
".",
"restore_method",
"if",
"hasattr",
"(",
"params",
".",
"custom_params",
",",
"'configuration_type'",
")",
":",
"restore_params",
"[",
"'configuration_type'",
"]",
"=",
"params",
".",
"custom_params",
".",
"configuration_type",
"if",
"hasattr",
"(",
"params",
".",
"custom_params",
",",
"'vrf_management_name'",
")",
":",
"restore_params",
"[",
"'vrf_management_name'",
"]",
"=",
"params",
".",
"custom_params",
".",
"vrf_management_name",
"if",
"'startup'",
"in",
"saved_config",
".",
"saved_artifact",
".",
"identifier",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
":",
"restore_params",
"[",
"'configuration_type'",
"]",
"=",
"'startup'",
"self",
".",
"restore",
"(",
"*",
"*",
"restore_params",
")"
]
| 50.622222 | 32.6 |
def to_xml(self):
'''
Returns a DOM representation of the line.
@return: Element
'''
for n, v in {"name": self.name, "quantity": self.quantity,
"unit_price": self.unit_price}.items():
if is_empty_or_none(v):
raise LineError("'%s' attribute cannot be empty or None." %
n)
doc = Document()
root = doc.createElement("line")
super(Line, self).to_xml(root)
self._create_text_node(root, "date", self.date)
self._create_text_node(root, "name", self.name, True)
self._create_text_node(root, "description", self.description, True)
self._create_text_node(root, "quantity", self.quantity)
self._create_text_node(root, "unitPrice", self.unit_price)
self._create_text_node(root, "unit", self.unit)
self._create_text_node(root, "gin", self.gin)
self._create_text_node(root, "gtin", self.gtin)
self._create_text_node(root, "sscc", self.sscc)
if len(self.__discounts):
discounts = root.ownerDocument.createElement("discounts")
root.appendChild(discounts)
for discount in self.__discounts:
if not issubclass(discount.__class__, Discount):
raise LineError('discount of type %s is not an ' \
'instance or a subclass of %s' %
(discount.__class__.__name__,
Discount.__name__))
discounts.appendChild(discount.to_xml())
if len(self.__taxes):
taxes = root.ownerDocument.createElement("taxes")
root.appendChild(taxes)
for tax in self.__taxes:
if not issubclass(tax.__class__, Tax):
raise LineError('tax of type %s is not an instance ' \
'or a subclass of %s' %
(tax.__class__.__name__, Tax.__name__))
taxes.appendChild(tax.to_xml())
return root | [
"def",
"to_xml",
"(",
"self",
")",
":",
"for",
"n",
",",
"v",
"in",
"{",
"\"name\"",
":",
"self",
".",
"name",
",",
"\"quantity\"",
":",
"self",
".",
"quantity",
",",
"\"unit_price\"",
":",
"self",
".",
"unit_price",
"}",
".",
"items",
"(",
")",
":",
"if",
"is_empty_or_none",
"(",
"v",
")",
":",
"raise",
"LineError",
"(",
"\"'%s' attribute cannot be empty or None.\"",
"%",
"n",
")",
"doc",
"=",
"Document",
"(",
")",
"root",
"=",
"doc",
".",
"createElement",
"(",
"\"line\"",
")",
"super",
"(",
"Line",
",",
"self",
")",
".",
"to_xml",
"(",
"root",
")",
"self",
".",
"_create_text_node",
"(",
"root",
",",
"\"date\"",
",",
"self",
".",
"date",
")",
"self",
".",
"_create_text_node",
"(",
"root",
",",
"\"name\"",
",",
"self",
".",
"name",
",",
"True",
")",
"self",
".",
"_create_text_node",
"(",
"root",
",",
"\"description\"",
",",
"self",
".",
"description",
",",
"True",
")",
"self",
".",
"_create_text_node",
"(",
"root",
",",
"\"quantity\"",
",",
"self",
".",
"quantity",
")",
"self",
".",
"_create_text_node",
"(",
"root",
",",
"\"unitPrice\"",
",",
"self",
".",
"unit_price",
")",
"self",
".",
"_create_text_node",
"(",
"root",
",",
"\"unit\"",
",",
"self",
".",
"unit",
")",
"self",
".",
"_create_text_node",
"(",
"root",
",",
"\"gin\"",
",",
"self",
".",
"gin",
")",
"self",
".",
"_create_text_node",
"(",
"root",
",",
"\"gtin\"",
",",
"self",
".",
"gtin",
")",
"self",
".",
"_create_text_node",
"(",
"root",
",",
"\"sscc\"",
",",
"self",
".",
"sscc",
")",
"if",
"len",
"(",
"self",
".",
"__discounts",
")",
":",
"discounts",
"=",
"root",
".",
"ownerDocument",
".",
"createElement",
"(",
"\"discounts\"",
")",
"root",
".",
"appendChild",
"(",
"discounts",
")",
"for",
"discount",
"in",
"self",
".",
"__discounts",
":",
"if",
"not",
"issubclass",
"(",
"discount",
".",
"__class__",
",",
"Discount",
")",
":",
"raise",
"LineError",
"(",
"'discount of type %s is not an '",
"'instance or a subclass of %s'",
"%",
"(",
"discount",
".",
"__class__",
".",
"__name__",
",",
"Discount",
".",
"__name__",
")",
")",
"discounts",
".",
"appendChild",
"(",
"discount",
".",
"to_xml",
"(",
")",
")",
"if",
"len",
"(",
"self",
".",
"__taxes",
")",
":",
"taxes",
"=",
"root",
".",
"ownerDocument",
".",
"createElement",
"(",
"\"taxes\"",
")",
"root",
".",
"appendChild",
"(",
"taxes",
")",
"for",
"tax",
"in",
"self",
".",
"__taxes",
":",
"if",
"not",
"issubclass",
"(",
"tax",
".",
"__class__",
",",
"Tax",
")",
":",
"raise",
"LineError",
"(",
"'tax of type %s is not an instance '",
"'or a subclass of %s'",
"%",
"(",
"tax",
".",
"__class__",
".",
"__name__",
",",
"Tax",
".",
"__name__",
")",
")",
"taxes",
".",
"appendChild",
"(",
"tax",
".",
"to_xml",
"(",
")",
")",
"return",
"root"
]
| 45.391304 | 19.782609 |
def dp_from_p(p, ps, p_top=0., p_bot=1.1e5):
"""Get level thickness of pressure data, incorporating surface pressure.
Level edges are defined as halfway between the levels, as well as the user-
specified uppermost and lowermost values. The dp of levels whose bottom
pressure is less than the surface pressure is not changed by ps, since they
don't intersect the surface. If ps is in between a level's top and bottom
pressures, then its dp becomes the pressure difference between its top and
ps. If ps is less than a level's top and bottom pressures, then that level
is underground and its values are masked.
Note that postprocessing routines (e.g. at GFDL) typically mask out data
wherever the surface pressure is less than the level's given value, not the
level's upper edge. This masks out more levels than the
"""
p_str = get_dim_name(p, (internal_names.PLEVEL_STR, 'plev'))
p_vals = to_pascal(p.values.copy())
# Layer edges are halfway between the given pressure levels.
p_edges_interior = 0.5*(p_vals[:-1] + p_vals[1:])
p_edges = np.concatenate(([p_bot], p_edges_interior, [p_top]))
p_edge_above = p_edges[1:]
p_edge_below = p_edges[:-1]
dp = p_edge_below - p_edge_above
if not all(np.sign(dp)):
raise ValueError("dp array not all > 0 : {}".format(dp))
# Pressure difference between ps and the upper edge of each pressure level.
p_edge_above_xr = xr.DataArray(p_edge_above, dims=p.dims, coords=p.coords)
dp_to_sfc = ps - p_edge_above_xr
# Find the level adjacent to the masked, under-ground levels.
change = xr.DataArray(np.zeros(dp_to_sfc.shape), dims=dp_to_sfc.dims,
coords=dp_to_sfc.coords)
change[{p_str: slice(1, None)}] = np.diff(
np.sign(ps - to_pascal(p.copy()))
)
dp_combined = xr.DataArray(np.where(change, dp_to_sfc, dp),
dims=dp_to_sfc.dims, coords=dp_to_sfc.coords)
# Mask levels that are under ground.
above_ground = ps > to_pascal(p.copy())
above_ground[p_str] = p[p_str]
dp_with_ps = dp_combined.where(above_ground)
# Revert to original dim order.
possible_dim_orders = [
(internal_names.TIME_STR, p_str, internal_names.LAT_STR,
internal_names.LON_STR),
(internal_names.TIME_STR, p_str, internal_names.LAT_STR),
(internal_names.TIME_STR, p_str, internal_names.LON_STR),
(internal_names.TIME_STR, p_str),
(p_str, internal_names.LAT_STR, internal_names.LON_STR),
(p_str, internal_names.LAT_STR),
(p_str, internal_names.LON_STR),
(p_str,),
]
for dim_order in possible_dim_orders:
try:
return dp_with_ps.transpose(*dim_order)
except ValueError:
logging.debug("Failed transpose to dims: {}".format(dim_order))
else:
logging.debug("No transpose was successful.")
return dp_with_ps | [
"def",
"dp_from_p",
"(",
"p",
",",
"ps",
",",
"p_top",
"=",
"0.",
",",
"p_bot",
"=",
"1.1e5",
")",
":",
"p_str",
"=",
"get_dim_name",
"(",
"p",
",",
"(",
"internal_names",
".",
"PLEVEL_STR",
",",
"'plev'",
")",
")",
"p_vals",
"=",
"to_pascal",
"(",
"p",
".",
"values",
".",
"copy",
"(",
")",
")",
"# Layer edges are halfway between the given pressure levels.",
"p_edges_interior",
"=",
"0.5",
"*",
"(",
"p_vals",
"[",
":",
"-",
"1",
"]",
"+",
"p_vals",
"[",
"1",
":",
"]",
")",
"p_edges",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"p_bot",
"]",
",",
"p_edges_interior",
",",
"[",
"p_top",
"]",
")",
")",
"p_edge_above",
"=",
"p_edges",
"[",
"1",
":",
"]",
"p_edge_below",
"=",
"p_edges",
"[",
":",
"-",
"1",
"]",
"dp",
"=",
"p_edge_below",
"-",
"p_edge_above",
"if",
"not",
"all",
"(",
"np",
".",
"sign",
"(",
"dp",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"dp array not all > 0 : {}\"",
".",
"format",
"(",
"dp",
")",
")",
"# Pressure difference between ps and the upper edge of each pressure level.",
"p_edge_above_xr",
"=",
"xr",
".",
"DataArray",
"(",
"p_edge_above",
",",
"dims",
"=",
"p",
".",
"dims",
",",
"coords",
"=",
"p",
".",
"coords",
")",
"dp_to_sfc",
"=",
"ps",
"-",
"p_edge_above_xr",
"# Find the level adjacent to the masked, under-ground levels.",
"change",
"=",
"xr",
".",
"DataArray",
"(",
"np",
".",
"zeros",
"(",
"dp_to_sfc",
".",
"shape",
")",
",",
"dims",
"=",
"dp_to_sfc",
".",
"dims",
",",
"coords",
"=",
"dp_to_sfc",
".",
"coords",
")",
"change",
"[",
"{",
"p_str",
":",
"slice",
"(",
"1",
",",
"None",
")",
"}",
"]",
"=",
"np",
".",
"diff",
"(",
"np",
".",
"sign",
"(",
"ps",
"-",
"to_pascal",
"(",
"p",
".",
"copy",
"(",
")",
")",
")",
")",
"dp_combined",
"=",
"xr",
".",
"DataArray",
"(",
"np",
".",
"where",
"(",
"change",
",",
"dp_to_sfc",
",",
"dp",
")",
",",
"dims",
"=",
"dp_to_sfc",
".",
"dims",
",",
"coords",
"=",
"dp_to_sfc",
".",
"coords",
")",
"# Mask levels that are under ground.",
"above_ground",
"=",
"ps",
">",
"to_pascal",
"(",
"p",
".",
"copy",
"(",
")",
")",
"above_ground",
"[",
"p_str",
"]",
"=",
"p",
"[",
"p_str",
"]",
"dp_with_ps",
"=",
"dp_combined",
".",
"where",
"(",
"above_ground",
")",
"# Revert to original dim order.",
"possible_dim_orders",
"=",
"[",
"(",
"internal_names",
".",
"TIME_STR",
",",
"p_str",
",",
"internal_names",
".",
"LAT_STR",
",",
"internal_names",
".",
"LON_STR",
")",
",",
"(",
"internal_names",
".",
"TIME_STR",
",",
"p_str",
",",
"internal_names",
".",
"LAT_STR",
")",
",",
"(",
"internal_names",
".",
"TIME_STR",
",",
"p_str",
",",
"internal_names",
".",
"LON_STR",
")",
",",
"(",
"internal_names",
".",
"TIME_STR",
",",
"p_str",
")",
",",
"(",
"p_str",
",",
"internal_names",
".",
"LAT_STR",
",",
"internal_names",
".",
"LON_STR",
")",
",",
"(",
"p_str",
",",
"internal_names",
".",
"LAT_STR",
")",
",",
"(",
"p_str",
",",
"internal_names",
".",
"LON_STR",
")",
",",
"(",
"p_str",
",",
")",
",",
"]",
"for",
"dim_order",
"in",
"possible_dim_orders",
":",
"try",
":",
"return",
"dp_with_ps",
".",
"transpose",
"(",
"*",
"dim_order",
")",
"except",
"ValueError",
":",
"logging",
".",
"debug",
"(",
"\"Failed transpose to dims: {}\"",
".",
"format",
"(",
"dim_order",
")",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"\"No transpose was successful.\"",
")",
"return",
"dp_with_ps"
]
| 46.709677 | 19.951613 |
async def close(self, event):
"""Close the PLM device connection and don't try to reconnect."""
_LOGGER.info('Closing connection to Insteon Modem')
self._closing = True
self._auto_reconnect = False
await self.protocol.close()
if self.protocol.transport:
self.protocol.transport.close()
await asyncio.sleep(0, loop=self._loop)
_LOGGER.info('Insteon Modem connection closed') | [
"async",
"def",
"close",
"(",
"self",
",",
"event",
")",
":",
"_LOGGER",
".",
"info",
"(",
"'Closing connection to Insteon Modem'",
")",
"self",
".",
"_closing",
"=",
"True",
"self",
".",
"_auto_reconnect",
"=",
"False",
"await",
"self",
".",
"protocol",
".",
"close",
"(",
")",
"if",
"self",
".",
"protocol",
".",
"transport",
":",
"self",
".",
"protocol",
".",
"transport",
".",
"close",
"(",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"0",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"_LOGGER",
".",
"info",
"(",
"'Insteon Modem connection closed'",
")"
]
| 44 | 8.1 |
def limit_value_string_length(value):
"""This method limits the string representation of the value to MAX_VALUE_LABEL_TEXT_LENGTH + 3 characters.
:param value: Value to limit string representation
:return: String holding the value with a maximum length of MAX_VALUE_LABEL_TEXT_LENGTH + 3
"""
if isinstance(value, string_types) and len(value) > constants.MAX_VALUE_LABEL_TEXT_LENGTH:
value = value[:constants.MAX_VALUE_LABEL_TEXT_LENGTH] + "..."
final_string = " " + value + " "
elif isinstance(value, (dict, list)) and len(str(value)) > constants.MAX_VALUE_LABEL_TEXT_LENGTH:
value_text = str(value)[:constants.MAX_VALUE_LABEL_TEXT_LENGTH] + "..."
final_string = " " + value_text + " "
else:
final_string = " " + str(value) + " "
return final_string | [
"def",
"limit_value_string_length",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"string_types",
")",
"and",
"len",
"(",
"value",
")",
">",
"constants",
".",
"MAX_VALUE_LABEL_TEXT_LENGTH",
":",
"value",
"=",
"value",
"[",
":",
"constants",
".",
"MAX_VALUE_LABEL_TEXT_LENGTH",
"]",
"+",
"\"...\"",
"final_string",
"=",
"\" \"",
"+",
"value",
"+",
"\" \"",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"dict",
",",
"list",
")",
")",
"and",
"len",
"(",
"str",
"(",
"value",
")",
")",
">",
"constants",
".",
"MAX_VALUE_LABEL_TEXT_LENGTH",
":",
"value_text",
"=",
"str",
"(",
"value",
")",
"[",
":",
"constants",
".",
"MAX_VALUE_LABEL_TEXT_LENGTH",
"]",
"+",
"\"...\"",
"final_string",
"=",
"\" \"",
"+",
"value_text",
"+",
"\" \"",
"else",
":",
"final_string",
"=",
"\" \"",
"+",
"str",
"(",
"value",
")",
"+",
"\" \"",
"return",
"final_string"
]
| 50.5 | 24.5 |
def listed(self):
"""Print blacklist packages
"""
print("\nPackages in the blacklist:\n")
for black in self.get_black():
if black:
print("{0}{1}{2}".format(self.meta.color["GREEN"], black,
self.meta.color["ENDC"]))
self.quit = True
if self.quit:
print("") | [
"def",
"listed",
"(",
"self",
")",
":",
"print",
"(",
"\"\\nPackages in the blacklist:\\n\"",
")",
"for",
"black",
"in",
"self",
".",
"get_black",
"(",
")",
":",
"if",
"black",
":",
"print",
"(",
"\"{0}{1}{2}\"",
".",
"format",
"(",
"self",
".",
"meta",
".",
"color",
"[",
"\"GREEN\"",
"]",
",",
"black",
",",
"self",
".",
"meta",
".",
"color",
"[",
"\"ENDC\"",
"]",
")",
")",
"self",
".",
"quit",
"=",
"True",
"if",
"self",
".",
"quit",
":",
"print",
"(",
"\"\"",
")"
]
| 34.727273 | 14.181818 |
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
j = {}
for p in self.properties:
j[p] = getattr(self, p)
return j | [
"def",
"to_dictionary",
"(",
"self",
")",
":",
"j",
"=",
"{",
"}",
"for",
"p",
"in",
"self",
".",
"properties",
":",
"j",
"[",
"p",
"]",
"=",
"getattr",
"(",
"self",
",",
"p",
")",
"return",
"j"
]
| 37.636364 | 20.636364 |
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, _encode_filename(filename)).parse() | [
"def",
"_parse",
"(",
"self",
",",
"source",
",",
"name",
",",
"filename",
")",
":",
"return",
"Parser",
"(",
"self",
",",
"source",
",",
"name",
",",
"_encode_filename",
"(",
"filename",
")",
")",
".",
"parse",
"(",
")"
]
| 62.666667 | 12.666667 |
def append_rows(self, rows, between, refresh_presision):
"""Transform the rows of data to Measurements.
Keyword arguments:
rows -- an array of arrays [datetime, integral_measurement]
between -- time between integral_measurements in seconds
refresh_presision -- time between sensor values that compose the integral_measurements
"""
for r in rows:
Measurement.register_or_check(finish=r[0], mean=r[1]/between, between=between, refresh_presision=refresh_presision, configuration=self) | [
"def",
"append_rows",
"(",
"self",
",",
"rows",
",",
"between",
",",
"refresh_presision",
")",
":",
"for",
"r",
"in",
"rows",
":",
"Measurement",
".",
"register_or_check",
"(",
"finish",
"=",
"r",
"[",
"0",
"]",
",",
"mean",
"=",
"r",
"[",
"1",
"]",
"/",
"between",
",",
"between",
"=",
"between",
",",
"refresh_presision",
"=",
"refresh_presision",
",",
"configuration",
"=",
"self",
")"
]
| 49.5 | 28.7 |
def migrate(action):
" Migration utils [create, run, undo, redo]. "
from flaskext.evolution import Evolution
from flask import current_app
evolution = Evolution(current_app)
evolution.manager(action) | [
"def",
"migrate",
"(",
"action",
")",
":",
"from",
"flaskext",
".",
"evolution",
"import",
"Evolution",
"from",
"flask",
"import",
"current_app",
"evolution",
"=",
"Evolution",
"(",
"current_app",
")",
"evolution",
".",
"manager",
"(",
"action",
")"
]
| 35.666667 | 9 |
def cmd_delete(args):
"""Deletes a node"""
major = args.get(0)
minor = args.get(1)
if major is not None:
if major in penStore.data:
if minor is None:
if len(penStore.data[major]) > 0:
if raw_input("are you sure (y/n)? ") not in ['y', 'Y', 'yes', 'Yes']:
return ExitStatus.ABORT
penStore.deleteList(major)
puts("list deleted")
elif minor in penStore.data[major]:
penStore.deleteNote(major, minor)
puts("note deleted")
else:
puts("no such note, sorry! (%s)" % minor)
else:
puts("no such list, sorry! (%s)" % major)
else:
print """
- pen: delete help ------------------------------------------------------------
pen delete <list> deletes list and all of its notes
pen delete <list> <note> deletes note
""" | [
"def",
"cmd_delete",
"(",
"args",
")",
":",
"major",
"=",
"args",
".",
"get",
"(",
"0",
")",
"minor",
"=",
"args",
".",
"get",
"(",
"1",
")",
"if",
"major",
"is",
"not",
"None",
":",
"if",
"major",
"in",
"penStore",
".",
"data",
":",
"if",
"minor",
"is",
"None",
":",
"if",
"len",
"(",
"penStore",
".",
"data",
"[",
"major",
"]",
")",
">",
"0",
":",
"if",
"raw_input",
"(",
"\"are you sure (y/n)? \"",
")",
"not",
"in",
"[",
"'y'",
",",
"'Y'",
",",
"'yes'",
",",
"'Yes'",
"]",
":",
"return",
"ExitStatus",
".",
"ABORT",
"penStore",
".",
"deleteList",
"(",
"major",
")",
"puts",
"(",
"\"list deleted\"",
")",
"elif",
"minor",
"in",
"penStore",
".",
"data",
"[",
"major",
"]",
":",
"penStore",
".",
"deleteNote",
"(",
"major",
",",
"minor",
")",
"puts",
"(",
"\"note deleted\"",
")",
"else",
":",
"puts",
"(",
"\"no such note, sorry! (%s)\"",
"%",
"minor",
")",
"else",
":",
"puts",
"(",
"\"no such list, sorry! (%s)\"",
"%",
"major",
")",
"else",
":",
"print",
"\"\"\"\n- pen: delete help ------------------------------------------------------------\n\npen delete <list> deletes list and all of its notes\npen delete <list> <note> deletes note\n\"\"\""
]
| 35.961538 | 15.346154 |
def total_sparse_pixels_from_mask(mask, unmasked_sparse_grid_pixel_centres):
"""Given the full (i.e. without removing pixels which are outside the regular-masks) pixelization grid's pixel centers
and the regular-masks, compute the total number of pixels which are within the regular-masks and thus used by the
pixelization grid.
Parameters
-----------
mask : ccd.masks.Mask
The regular-masks within which pixelization pixels must be inside
unmasked_sparse_grid_pixel_centres : ndarray
The centres of the unmasked pixelization grid pixels.
"""
total_sparse_pixels = 0
for unmasked_sparse_pixel_index in range(unmasked_sparse_grid_pixel_centres.shape[0]):
y = unmasked_sparse_grid_pixel_centres[unmasked_sparse_pixel_index, 0]
x = unmasked_sparse_grid_pixel_centres[unmasked_sparse_pixel_index, 1]
if not mask[y,x]:
total_sparse_pixels += 1
return total_sparse_pixels | [
"def",
"total_sparse_pixels_from_mask",
"(",
"mask",
",",
"unmasked_sparse_grid_pixel_centres",
")",
":",
"total_sparse_pixels",
"=",
"0",
"for",
"unmasked_sparse_pixel_index",
"in",
"range",
"(",
"unmasked_sparse_grid_pixel_centres",
".",
"shape",
"[",
"0",
"]",
")",
":",
"y",
"=",
"unmasked_sparse_grid_pixel_centres",
"[",
"unmasked_sparse_pixel_index",
",",
"0",
"]",
"x",
"=",
"unmasked_sparse_grid_pixel_centres",
"[",
"unmasked_sparse_pixel_index",
",",
"1",
"]",
"if",
"not",
"mask",
"[",
"y",
",",
"x",
"]",
":",
"total_sparse_pixels",
"+=",
"1",
"return",
"total_sparse_pixels"
]
| 39.333333 | 27.791667 |
def get_pushes(self, project, **params):
"""
Gets pushes from project, filtered by parameters
By default this method will just return the latest 10 pushes (if they exist)
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.PUSH_ENDPOINT, project, **params) | [
"def",
"get_pushes",
"(",
"self",
",",
"project",
",",
"*",
"*",
"params",
")",
":",
"return",
"self",
".",
"_get_json_list",
"(",
"self",
".",
"PUSH_ENDPOINT",
",",
"project",
",",
"*",
"*",
"params",
")"
]
| 40 | 21.8 |
def submarine(space, smooth=True, taper=20.0):
"""Return a 'submarine' phantom consisting in an ellipsoid and a box.
Parameters
----------
space : `DiscreteLp`
Discretized space in which the phantom is supposed to be created.
smooth : bool, optional
If ``True``, the boundaries are smoothed out. Otherwise, the
function steps from 0 to 1 at the boundaries.
taper : float, optional
Tapering parameter for the boundary smoothing. Larger values
mean faster taper, i.e. sharper boundaries.
Returns
-------
phantom : ``space`` element
The submarine phantom in ``space``.
"""
if space.ndim == 2:
if smooth:
return _submarine_2d_smooth(space, taper)
else:
return _submarine_2d_nonsmooth(space)
else:
raise ValueError('phantom only defined in 2 dimensions, got {}'
''.format(space.ndim)) | [
"def",
"submarine",
"(",
"space",
",",
"smooth",
"=",
"True",
",",
"taper",
"=",
"20.0",
")",
":",
"if",
"space",
".",
"ndim",
"==",
"2",
":",
"if",
"smooth",
":",
"return",
"_submarine_2d_smooth",
"(",
"space",
",",
"taper",
")",
"else",
":",
"return",
"_submarine_2d_nonsmooth",
"(",
"space",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'phantom only defined in 2 dimensions, got {}'",
"''",
".",
"format",
"(",
"space",
".",
"ndim",
")",
")"
]
| 34.222222 | 19.259259 |
def run(self, args):
"""
Process command line arguments and run the given command command
(start, stop, restart).
"""
prog = args[0]
if len(args) < 2:
self.usage(prog)
return 1
command = args[1]
if command == "start":
self.start()
elif command == "stop":
self.stop()
elif command == "restart":
self.stop()
self.start()
else:
self.usage(prog)
return 1
return 0 | [
"def",
"run",
"(",
"self",
",",
"args",
")",
":",
"prog",
"=",
"args",
"[",
"0",
"]",
"if",
"len",
"(",
"args",
")",
"<",
"2",
":",
"self",
".",
"usage",
"(",
"prog",
")",
"return",
"1",
"command",
"=",
"args",
"[",
"1",
"]",
"if",
"command",
"==",
"\"start\"",
":",
"self",
".",
"start",
"(",
")",
"elif",
"command",
"==",
"\"stop\"",
":",
"self",
".",
"stop",
"(",
")",
"elif",
"command",
"==",
"\"restart\"",
":",
"self",
".",
"stop",
"(",
")",
"self",
".",
"start",
"(",
")",
"else",
":",
"self",
".",
"usage",
"(",
"prog",
")",
"return",
"1",
"return",
"0"
]
| 24.136364 | 16.136364 |
def from_xml(self,xmlnode):
"""Initialize Delay object from an XML node.
:Parameters:
- `xmlnode`: the jabber:x:delay XML element.
:Types:
- `xmlnode`: `libxml2.xmlNode`"""
if xmlnode.type!="element":
raise ValueError("XML node is not a jabber:x:delay element (not an element)")
ns=get_node_ns_uri(xmlnode)
if ns and ns!=DELAY_NS or xmlnode.name!="x":
raise ValueError("XML node is not a jabber:x:delay element")
stamp=xmlnode.prop("stamp")
if stamp.endswith("Z"):
stamp=stamp[:-1]
if "-" in stamp:
stamp=stamp.split("-",1)[0]
try:
tm = time.strptime(stamp, "%Y%m%dT%H:%M:%S")
except ValueError:
raise BadRequestProtocolError("Bad timestamp")
tm=tm[0:8]+(0,)
self.timestamp=datetime.datetime.fromtimestamp(time.mktime(tm))
delay_from=from_utf8(xmlnode.prop("from"))
if delay_from:
try:
self.delay_from = JID(delay_from)
except JIDError:
raise JIDMalformedProtocolError("Bad JID in the jabber:x:delay 'from' attribute")
else:
self.delay_from = None
self.reason = from_utf8(xmlnode.getContent()) | [
"def",
"from_xml",
"(",
"self",
",",
"xmlnode",
")",
":",
"if",
"xmlnode",
".",
"type",
"!=",
"\"element\"",
":",
"raise",
"ValueError",
"(",
"\"XML node is not a jabber:x:delay element (not an element)\"",
")",
"ns",
"=",
"get_node_ns_uri",
"(",
"xmlnode",
")",
"if",
"ns",
"and",
"ns",
"!=",
"DELAY_NS",
"or",
"xmlnode",
".",
"name",
"!=",
"\"x\"",
":",
"raise",
"ValueError",
"(",
"\"XML node is not a jabber:x:delay element\"",
")",
"stamp",
"=",
"xmlnode",
".",
"prop",
"(",
"\"stamp\"",
")",
"if",
"stamp",
".",
"endswith",
"(",
"\"Z\"",
")",
":",
"stamp",
"=",
"stamp",
"[",
":",
"-",
"1",
"]",
"if",
"\"-\"",
"in",
"stamp",
":",
"stamp",
"=",
"stamp",
".",
"split",
"(",
"\"-\"",
",",
"1",
")",
"[",
"0",
"]",
"try",
":",
"tm",
"=",
"time",
".",
"strptime",
"(",
"stamp",
",",
"\"%Y%m%dT%H:%M:%S\"",
")",
"except",
"ValueError",
":",
"raise",
"BadRequestProtocolError",
"(",
"\"Bad timestamp\"",
")",
"tm",
"=",
"tm",
"[",
"0",
":",
"8",
"]",
"+",
"(",
"0",
",",
")",
"self",
".",
"timestamp",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"time",
".",
"mktime",
"(",
"tm",
")",
")",
"delay_from",
"=",
"from_utf8",
"(",
"xmlnode",
".",
"prop",
"(",
"\"from\"",
")",
")",
"if",
"delay_from",
":",
"try",
":",
"self",
".",
"delay_from",
"=",
"JID",
"(",
"delay_from",
")",
"except",
"JIDError",
":",
"raise",
"JIDMalformedProtocolError",
"(",
"\"Bad JID in the jabber:x:delay 'from' attribute\"",
")",
"else",
":",
"self",
".",
"delay_from",
"=",
"None",
"self",
".",
"reason",
"=",
"from_utf8",
"(",
"xmlnode",
".",
"getContent",
"(",
")",
")"
]
| 39.46875 | 17.5 |
def _bprop_wrap(name, reqtype, doc):
"""
Helper function to generate properties
:param name: The name of the subfield in the JSON dictionary
:param reqtype: The compound query type the query
list should be coerced into
:param doc: Documentation for the field
:return: the property.
"""
def fget(self):
return self._subqueries.get(name)
def fset(self, value):
if value is None:
if name in self._subqueries:
del self._subqueries[name]
elif isinstance(value, reqtype):
self._subqueries[name] = value
elif isinstance(value, Query):
self._subqueries[name] = reqtype(value)
else:
try:
it = iter(value)
except ValueError:
raise TypeError('Value must be instance of Query')
l = []
for q in it:
if not isinstance(q, Query):
raise TypeError('Item is not a query!', q)
l.append(q)
self._subqueries[name] = reqtype(*l)
def fdel(self):
setattr(self, name, None)
return property(fget, fset, fdel, doc) | [
"def",
"_bprop_wrap",
"(",
"name",
",",
"reqtype",
",",
"doc",
")",
":",
"def",
"fget",
"(",
"self",
")",
":",
"return",
"self",
".",
"_subqueries",
".",
"get",
"(",
"name",
")",
"def",
"fset",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"if",
"name",
"in",
"self",
".",
"_subqueries",
":",
"del",
"self",
".",
"_subqueries",
"[",
"name",
"]",
"elif",
"isinstance",
"(",
"value",
",",
"reqtype",
")",
":",
"self",
".",
"_subqueries",
"[",
"name",
"]",
"=",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"Query",
")",
":",
"self",
".",
"_subqueries",
"[",
"name",
"]",
"=",
"reqtype",
"(",
"value",
")",
"else",
":",
"try",
":",
"it",
"=",
"iter",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"TypeError",
"(",
"'Value must be instance of Query'",
")",
"l",
"=",
"[",
"]",
"for",
"q",
"in",
"it",
":",
"if",
"not",
"isinstance",
"(",
"q",
",",
"Query",
")",
":",
"raise",
"TypeError",
"(",
"'Item is not a query!'",
",",
"q",
")",
"l",
".",
"append",
"(",
"q",
")",
"self",
".",
"_subqueries",
"[",
"name",
"]",
"=",
"reqtype",
"(",
"*",
"l",
")",
"def",
"fdel",
"(",
"self",
")",
":",
"setattr",
"(",
"self",
",",
"name",
",",
"None",
")",
"return",
"property",
"(",
"fget",
",",
"fset",
",",
"fdel",
",",
"doc",
")"
]
| 31.108108 | 13.594595 |
def _loadThreePartSource(self, sourceFName, sourceLines):
"""is a helper for _loadOneSource.
"""
lineno = 1
for ln in sourceLines:
lineno += 1
try:
stem, pubType, source = ln.split("\t", 2)
stem = stem.strip()[-9:]
self._addPub(stem, source)
if pubType=="C":
self.confstems[stem] = 1
except ValueError:
sys.stderr.write("sourcematchers.py: %s (%d): skipping source line: %s"%(sourceFName,lineno,ln)) | [
"def",
"_loadThreePartSource",
"(",
"self",
",",
"sourceFName",
",",
"sourceLines",
")",
":",
"lineno",
"=",
"1",
"for",
"ln",
"in",
"sourceLines",
":",
"lineno",
"+=",
"1",
"try",
":",
"stem",
",",
"pubType",
",",
"source",
"=",
"ln",
".",
"split",
"(",
"\"\\t\"",
",",
"2",
")",
"stem",
"=",
"stem",
".",
"strip",
"(",
")",
"[",
"-",
"9",
":",
"]",
"self",
".",
"_addPub",
"(",
"stem",
",",
"source",
")",
"if",
"pubType",
"==",
"\"C\"",
":",
"self",
".",
"confstems",
"[",
"stem",
"]",
"=",
"1",
"except",
"ValueError",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"sourcematchers.py: %s (%d): skipping source line: %s\"",
"%",
"(",
"sourceFName",
",",
"lineno",
",",
"ln",
")",
")"
]
| 39.571429 | 14.5 |
def latrec(radius, longitude, latitude):
"""
Convert from latitudinal coordinates to rectangular coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latrec_c.html
:param radius: Distance of a point from the origin.
:type radius: float
:param longitude: Longitude of point in radians.
:type longitude: float
:param latitude: Latitude of point in radians.
:type latitude: float
:return: Rectangular coordinates of the point.
:rtype: 3-Element Array of floats
"""
radius = ctypes.c_double(radius)
longitude = ctypes.c_double(longitude)
latitude = ctypes.c_double(latitude)
rectan = stypes.emptyDoubleVector(3)
libspice.latrec_c(radius, longitude, latitude, rectan)
return stypes.cVectorToPython(rectan) | [
"def",
"latrec",
"(",
"radius",
",",
"longitude",
",",
"latitude",
")",
":",
"radius",
"=",
"ctypes",
".",
"c_double",
"(",
"radius",
")",
"longitude",
"=",
"ctypes",
".",
"c_double",
"(",
"longitude",
")",
"latitude",
"=",
"ctypes",
".",
"c_double",
"(",
"latitude",
")",
"rectan",
"=",
"stypes",
".",
"emptyDoubleVector",
"(",
"3",
")",
"libspice",
".",
"latrec_c",
"(",
"radius",
",",
"longitude",
",",
"latitude",
",",
"rectan",
")",
"return",
"stypes",
".",
"cVectorToPython",
"(",
"rectan",
")"
]
| 36.666667 | 12.47619 |
def generate(self, num_to_generate, starting_place):
"""Generate data based on some initial position."""
res = []
activ = starting_place[None, :]
index = activ.__getattribute__(self.argfunc)(1)
item = self.weights[index]
for x in range(num_to_generate):
activ = self.forward(item, prev_activation=activ)[0]
index = activ.__getattribute__(self.argfunc)(1)
res.append(index)
item = self.weights[index]
return res | [
"def",
"generate",
"(",
"self",
",",
"num_to_generate",
",",
"starting_place",
")",
":",
"res",
"=",
"[",
"]",
"activ",
"=",
"starting_place",
"[",
"None",
",",
":",
"]",
"index",
"=",
"activ",
".",
"__getattribute__",
"(",
"self",
".",
"argfunc",
")",
"(",
"1",
")",
"item",
"=",
"self",
".",
"weights",
"[",
"index",
"]",
"for",
"x",
"in",
"range",
"(",
"num_to_generate",
")",
":",
"activ",
"=",
"self",
".",
"forward",
"(",
"item",
",",
"prev_activation",
"=",
"activ",
")",
"[",
"0",
"]",
"index",
"=",
"activ",
".",
"__getattribute__",
"(",
"self",
".",
"argfunc",
")",
"(",
"1",
")",
"res",
".",
"append",
"(",
"index",
")",
"item",
"=",
"self",
".",
"weights",
"[",
"index",
"]",
"return",
"res"
]
| 38.692308 | 13.538462 |
def get_xdg_env(env_name, fallback):
""" Used for XDG_* env variables to return fallback if unset *or* empty """
env = os.environ.get(env_name)
return env if env else fallback | [
"def",
"get_xdg_env",
"(",
"env_name",
",",
"fallback",
")",
":",
"env",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"env_name",
")",
"return",
"env",
"if",
"env",
"else",
"fallback"
]
| 46 | 3.75 |
def execute(self):
"""Execute Main Loop"""
try:
logging.debug("Entering IOLoop")
self.loop.start()
logging.debug("Leaving IOLoop")
except KeyboardInterrupt:
logging.debug("Leaving IOLoop by KeyboardInterrupt")
finally:
self.hw_communication.disconnect() | [
"def",
"execute",
"(",
"self",
")",
":",
"try",
":",
"logging",
".",
"debug",
"(",
"\"Entering IOLoop\"",
")",
"self",
".",
"loop",
".",
"start",
"(",
")",
"logging",
".",
"debug",
"(",
"\"Leaving IOLoop\"",
")",
"except",
"KeyboardInterrupt",
":",
"logging",
".",
"debug",
"(",
"\"Leaving IOLoop by KeyboardInterrupt\"",
")",
"finally",
":",
"self",
".",
"hw_communication",
".",
"disconnect",
"(",
")"
]
| 33.6 | 12.9 |
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt=_DEFAULT_FLOATFMT, numalign="decimal", stralign="left",
missingval=_DEFAULT_MISSINGVAL, showindex="default",
disable_numparse=False):
"""Format a fixed width table for pretty printing.
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(
tabular_data, headers, showindex=showindex)
# empty values in the first column of RST tables should be escaped (issue #82)
# "" should be escaped as "\\ " or ".."
if tablefmt == 'rst':
list_of_lists, headers = _rst_escape_first_column(
list_of_lists, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\t'.join(['\t'.join(map(_text_type, headers))] +
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
if tablefmt in multiline_formats and _is_multiline(plain_text):
tablefmt = multiline_formats.get(tablefmt, tablefmt)
is_multiline = True
else:
is_multiline = False
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(izip_longest(*list_of_lists))
numparses = _expand_numparse(disable_numparse, len(cols))
coltypes = [_column_type(col, numparse=np) for col, np in
zip(cols, numparses)]
if isinstance(floatfmt, basestring): # old version
# just duplicate the string to use in each column
float_formats = len(cols) * [floatfmt]
else: # if floatfmt is list, tuple etc we have one per column
float_formats = list(floatfmt)
if len(float_formats) < len(cols):
float_formats.extend(
(len(cols) - len(float_formats)) * [_DEFAULT_FLOATFMT])
if isinstance(missingval, basestring):
missing_vals = len(cols) * [missingval]
else:
missing_vals = list(missingval)
if len(missing_vals) < len(cols):
missing_vals.extend(
(len(cols) - len(missing_vals)) * [_DEFAULT_MISSINGVAL])
cols = [[_format(v, ct, fl_fmt, miss_v, has_invisible) for v in c]
for c, ct, fl_fmt, miss_v in zip(cols, coltypes, float_formats, missing_vals)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
minwidths = [
width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, max(width_fn(cl) for cl in c))
for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), is_multiline, width_fn)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [max(width_fn(cl) for cl in c) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows,
minwidths, aligns, is_multiline) | [
"def",
"tabulate",
"(",
"tabular_data",
",",
"headers",
"=",
"(",
")",
",",
"tablefmt",
"=",
"\"simple\"",
",",
"floatfmt",
"=",
"_DEFAULT_FLOATFMT",
",",
"numalign",
"=",
"\"decimal\"",
",",
"stralign",
"=",
"\"left\"",
",",
"missingval",
"=",
"_DEFAULT_MISSINGVAL",
",",
"showindex",
"=",
"\"default\"",
",",
"disable_numparse",
"=",
"False",
")",
":",
"if",
"tabular_data",
"is",
"None",
":",
"tabular_data",
"=",
"[",
"]",
"list_of_lists",
",",
"headers",
"=",
"_normalize_tabular_data",
"(",
"tabular_data",
",",
"headers",
",",
"showindex",
"=",
"showindex",
")",
"# empty values in the first column of RST tables should be escaped (issue #82)",
"# \"\" should be escaped as \"\\\\ \" or \"..\"",
"if",
"tablefmt",
"==",
"'rst'",
":",
"list_of_lists",
",",
"headers",
"=",
"_rst_escape_first_column",
"(",
"list_of_lists",
",",
"headers",
")",
"# optimization: look for ANSI control codes once,",
"# enable smart width functions only if a control code is found",
"plain_text",
"=",
"'\\t'",
".",
"join",
"(",
"[",
"'\\t'",
".",
"join",
"(",
"map",
"(",
"_text_type",
",",
"headers",
")",
")",
"]",
"+",
"[",
"'\\t'",
".",
"join",
"(",
"map",
"(",
"_text_type",
",",
"row",
")",
")",
"for",
"row",
"in",
"list_of_lists",
"]",
")",
"has_invisible",
"=",
"re",
".",
"search",
"(",
"_invisible_codes",
",",
"plain_text",
")",
"enable_widechars",
"=",
"wcwidth",
"is",
"not",
"None",
"and",
"WIDE_CHARS_MODE",
"if",
"tablefmt",
"in",
"multiline_formats",
"and",
"_is_multiline",
"(",
"plain_text",
")",
":",
"tablefmt",
"=",
"multiline_formats",
".",
"get",
"(",
"tablefmt",
",",
"tablefmt",
")",
"is_multiline",
"=",
"True",
"else",
":",
"is_multiline",
"=",
"False",
"width_fn",
"=",
"_choose_width_fn",
"(",
"has_invisible",
",",
"enable_widechars",
",",
"is_multiline",
")",
"# format rows and columns, convert numeric values to strings",
"cols",
"=",
"list",
"(",
"izip_longest",
"(",
"*",
"list_of_lists",
")",
")",
"numparses",
"=",
"_expand_numparse",
"(",
"disable_numparse",
",",
"len",
"(",
"cols",
")",
")",
"coltypes",
"=",
"[",
"_column_type",
"(",
"col",
",",
"numparse",
"=",
"np",
")",
"for",
"col",
",",
"np",
"in",
"zip",
"(",
"cols",
",",
"numparses",
")",
"]",
"if",
"isinstance",
"(",
"floatfmt",
",",
"basestring",
")",
":",
"# old version",
"# just duplicate the string to use in each column",
"float_formats",
"=",
"len",
"(",
"cols",
")",
"*",
"[",
"floatfmt",
"]",
"else",
":",
"# if floatfmt is list, tuple etc we have one per column",
"float_formats",
"=",
"list",
"(",
"floatfmt",
")",
"if",
"len",
"(",
"float_formats",
")",
"<",
"len",
"(",
"cols",
")",
":",
"float_formats",
".",
"extend",
"(",
"(",
"len",
"(",
"cols",
")",
"-",
"len",
"(",
"float_formats",
")",
")",
"*",
"[",
"_DEFAULT_FLOATFMT",
"]",
")",
"if",
"isinstance",
"(",
"missingval",
",",
"basestring",
")",
":",
"missing_vals",
"=",
"len",
"(",
"cols",
")",
"*",
"[",
"missingval",
"]",
"else",
":",
"missing_vals",
"=",
"list",
"(",
"missingval",
")",
"if",
"len",
"(",
"missing_vals",
")",
"<",
"len",
"(",
"cols",
")",
":",
"missing_vals",
".",
"extend",
"(",
"(",
"len",
"(",
"cols",
")",
"-",
"len",
"(",
"missing_vals",
")",
")",
"*",
"[",
"_DEFAULT_MISSINGVAL",
"]",
")",
"cols",
"=",
"[",
"[",
"_format",
"(",
"v",
",",
"ct",
",",
"fl_fmt",
",",
"miss_v",
",",
"has_invisible",
")",
"for",
"v",
"in",
"c",
"]",
"for",
"c",
",",
"ct",
",",
"fl_fmt",
",",
"miss_v",
"in",
"zip",
"(",
"cols",
",",
"coltypes",
",",
"float_formats",
",",
"missing_vals",
")",
"]",
"# align columns",
"aligns",
"=",
"[",
"numalign",
"if",
"ct",
"in",
"[",
"int",
",",
"float",
"]",
"else",
"stralign",
"for",
"ct",
"in",
"coltypes",
"]",
"minwidths",
"=",
"[",
"width_fn",
"(",
"h",
")",
"+",
"MIN_PADDING",
"for",
"h",
"in",
"headers",
"]",
"if",
"headers",
"else",
"[",
"0",
"]",
"*",
"len",
"(",
"cols",
")",
"cols",
"=",
"[",
"_align_column",
"(",
"c",
",",
"a",
",",
"minw",
",",
"has_invisible",
",",
"enable_widechars",
",",
"is_multiline",
")",
"for",
"c",
",",
"a",
",",
"minw",
"in",
"zip",
"(",
"cols",
",",
"aligns",
",",
"minwidths",
")",
"]",
"if",
"headers",
":",
"# align headers and add headers",
"t_cols",
"=",
"cols",
"or",
"[",
"[",
"''",
"]",
"]",
"*",
"len",
"(",
"headers",
")",
"t_aligns",
"=",
"aligns",
"or",
"[",
"stralign",
"]",
"*",
"len",
"(",
"headers",
")",
"minwidths",
"=",
"[",
"max",
"(",
"minw",
",",
"max",
"(",
"width_fn",
"(",
"cl",
")",
"for",
"cl",
"in",
"c",
")",
")",
"for",
"minw",
",",
"c",
"in",
"zip",
"(",
"minwidths",
",",
"t_cols",
")",
"]",
"headers",
"=",
"[",
"_align_header",
"(",
"h",
",",
"a",
",",
"minw",
",",
"width_fn",
"(",
"h",
")",
",",
"is_multiline",
",",
"width_fn",
")",
"for",
"h",
",",
"a",
",",
"minw",
"in",
"zip",
"(",
"headers",
",",
"t_aligns",
",",
"minwidths",
")",
"]",
"rows",
"=",
"list",
"(",
"zip",
"(",
"*",
"cols",
")",
")",
"else",
":",
"minwidths",
"=",
"[",
"max",
"(",
"width_fn",
"(",
"cl",
")",
"for",
"cl",
"in",
"c",
")",
"for",
"c",
"in",
"cols",
"]",
"rows",
"=",
"list",
"(",
"zip",
"(",
"*",
"cols",
")",
")",
"if",
"not",
"isinstance",
"(",
"tablefmt",
",",
"TableFormat",
")",
":",
"tablefmt",
"=",
"_table_formats",
".",
"get",
"(",
"tablefmt",
",",
"_table_formats",
"[",
"\"simple\"",
"]",
")",
"return",
"_format_table",
"(",
"tablefmt",
",",
"headers",
",",
"rows",
",",
"minwidths",
",",
"aligns",
",",
"is_multiline",
")"
]
| 45.594937 | 20.949367 |
def consume(self, routingKey, msg):
"""
Consumer for this (CaptureData) class. Gets the data sent from yieldMetricsValue and
sends it to the storage backends.
"""
build_data = msg['build_data']
builder_info = yield self.master.data.get(("builders", build_data['builderid']))
if self._builder_name_matches(builder_info) and self._data_name == msg['data_name']:
try:
ret_val = self._callback(msg['post_data'])
except Exception as e:
raise CaptureCallbackError("CaptureData failed for build %s of builder %s."
" Exception generated: %s with message %s"
% (build_data['number'], builder_info['name'],
type(e).__name__, str(e)))
post_data = ret_val
series_name = '%s-%s' % (builder_info['name'], self._data_name)
context = self._defaultContext(build_data, builder_info['name'])
yield self._store(post_data, series_name, context) | [
"def",
"consume",
"(",
"self",
",",
"routingKey",
",",
"msg",
")",
":",
"build_data",
"=",
"msg",
"[",
"'build_data'",
"]",
"builder_info",
"=",
"yield",
"self",
".",
"master",
".",
"data",
".",
"get",
"(",
"(",
"\"builders\"",
",",
"build_data",
"[",
"'builderid'",
"]",
")",
")",
"if",
"self",
".",
"_builder_name_matches",
"(",
"builder_info",
")",
"and",
"self",
".",
"_data_name",
"==",
"msg",
"[",
"'data_name'",
"]",
":",
"try",
":",
"ret_val",
"=",
"self",
".",
"_callback",
"(",
"msg",
"[",
"'post_data'",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CaptureCallbackError",
"(",
"\"CaptureData failed for build %s of builder %s.\"",
"\" Exception generated: %s with message %s\"",
"%",
"(",
"build_data",
"[",
"'number'",
"]",
",",
"builder_info",
"[",
"'name'",
"]",
",",
"type",
"(",
"e",
")",
".",
"__name__",
",",
"str",
"(",
"e",
")",
")",
")",
"post_data",
"=",
"ret_val",
"series_name",
"=",
"'%s-%s'",
"%",
"(",
"builder_info",
"[",
"'name'",
"]",
",",
"self",
".",
"_data_name",
")",
"context",
"=",
"self",
".",
"_defaultContext",
"(",
"build_data",
",",
"builder_info",
"[",
"'name'",
"]",
")",
"yield",
"self",
".",
"_store",
"(",
"post_data",
",",
"series_name",
",",
"context",
")"
]
| 54.85 | 26.35 |
def setup(self, port):
"""Connects to an Arduino UNO on serial port `port`.
@throw RuntimeError can't connect to Arduino
"""
port = str(port)
# timeout is used by all I/O operations
self._serial = serial.Serial(port, 115200, timeout=2)
time.sleep(2) # time to Arduino reset
if not self._serial.is_open:
raise RuntimeError('Could not connect to Arduino')
self._serial.write(b'\x01')
if self._serial.read() != b'\x06':
raise RuntimeError('Could not connect to Arduino')
ps = [p for p in self.available_pins() if p['digital']['output']]
for pin in ps:
self._set_pin_direction(pin['id'], ahio.Direction.Output) | [
"def",
"setup",
"(",
"self",
",",
"port",
")",
":",
"port",
"=",
"str",
"(",
"port",
")",
"# timeout is used by all I/O operations",
"self",
".",
"_serial",
"=",
"serial",
".",
"Serial",
"(",
"port",
",",
"115200",
",",
"timeout",
"=",
"2",
")",
"time",
".",
"sleep",
"(",
"2",
")",
"# time to Arduino reset",
"if",
"not",
"self",
".",
"_serial",
".",
"is_open",
":",
"raise",
"RuntimeError",
"(",
"'Could not connect to Arduino'",
")",
"self",
".",
"_serial",
".",
"write",
"(",
"b'\\x01'",
")",
"if",
"self",
".",
"_serial",
".",
"read",
"(",
")",
"!=",
"b'\\x06'",
":",
"raise",
"RuntimeError",
"(",
"'Could not connect to Arduino'",
")",
"ps",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"available_pins",
"(",
")",
"if",
"p",
"[",
"'digital'",
"]",
"[",
"'output'",
"]",
"]",
"for",
"pin",
"in",
"ps",
":",
"self",
".",
"_set_pin_direction",
"(",
"pin",
"[",
"'id'",
"]",
",",
"ahio",
".",
"Direction",
".",
"Output",
")"
]
| 34.47619 | 19.761905 |
def certificateOptionsFromPEMs(pemObjects, **kw):
# type: (List[AbstractPEMObject], **Any) -> ssl.CerticateOptions
"""
Load a CertificateOptions from the given collection of PEM objects
(already-loaded private keys and certificates).
In those PEM objects, identify one private key and its corresponding
certificate to use as the primary certificate. Then use the rest of the
certificates found as chain certificates. Raise a ValueError if no
certificate matching a private key is found.
:return: A TLS context factory using *pemObjects*
:rtype: `twisted.internet.ssl.CertificateOptions`_
.. _`twisted.internet.ssl.CertificateOptions`: \
https://twistedmatrix.com/documents/current/api/\
twisted.internet.ssl.CertificateOptions.html
"""
keys = [key for key in pemObjects if isinstance(key, Key)]
if not len(keys):
raise ValueError("Supplied PEM file(s) does *not* contain a key.")
if len(keys) > 1:
raise ValueError("Supplied PEM file(s) contains *more* than one key.")
privateKey = ssl.KeyPair.load(str(keys[0]), FILETYPE_PEM)
certs = [cert for cert in pemObjects if isinstance(cert, Certificate)]
if not len(certs):
raise ValueError("*At least one* certificate is required.")
certificates = [ssl.Certificate.loadPEM(str(certPEM)) for certPEM in certs]
certificatesByFingerprint = dict(
[
(certificate.getPublicKey().keyHash(), certificate)
for certificate in certificates
]
)
if privateKey.keyHash() not in certificatesByFingerprint:
raise ValueError(
"No certificate matching {fingerprint} found.".format(
fingerprint=privateKey.keyHash()
)
)
primaryCertificate = certificatesByFingerprint.pop(privateKey.keyHash())
if "dhParameters" in kw:
raise TypeError(
"Passing DH parameters as a keyword argument instead of a "
"PEM object is not supported anymore."
)
dhparams = [o for o in pemObjects if isinstance(o, DHParameters)]
if len(dhparams) > 1:
raise ValueError(
"Supplied PEM file(s) contain(s) *more* than one set of DH "
"parameters."
)
elif len(dhparams) == 1:
kw["dhParameters"] = ssl.DiffieHellmanParameters(str(dhparams[0]))
ctxFactory = ssl.CertificateOptions(
privateKey=privateKey.original,
certificate=primaryCertificate.original,
extraCertChain=[
chain.original for chain in certificatesByFingerprint.values()
],
**kw
)
return ctxFactory | [
"def",
"certificateOptionsFromPEMs",
"(",
"pemObjects",
",",
"*",
"*",
"kw",
")",
":",
"# type: (List[AbstractPEMObject], **Any) -> ssl.CerticateOptions",
"keys",
"=",
"[",
"key",
"for",
"key",
"in",
"pemObjects",
"if",
"isinstance",
"(",
"key",
",",
"Key",
")",
"]",
"if",
"not",
"len",
"(",
"keys",
")",
":",
"raise",
"ValueError",
"(",
"\"Supplied PEM file(s) does *not* contain a key.\"",
")",
"if",
"len",
"(",
"keys",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Supplied PEM file(s) contains *more* than one key.\"",
")",
"privateKey",
"=",
"ssl",
".",
"KeyPair",
".",
"load",
"(",
"str",
"(",
"keys",
"[",
"0",
"]",
")",
",",
"FILETYPE_PEM",
")",
"certs",
"=",
"[",
"cert",
"for",
"cert",
"in",
"pemObjects",
"if",
"isinstance",
"(",
"cert",
",",
"Certificate",
")",
"]",
"if",
"not",
"len",
"(",
"certs",
")",
":",
"raise",
"ValueError",
"(",
"\"*At least one* certificate is required.\"",
")",
"certificates",
"=",
"[",
"ssl",
".",
"Certificate",
".",
"loadPEM",
"(",
"str",
"(",
"certPEM",
")",
")",
"for",
"certPEM",
"in",
"certs",
"]",
"certificatesByFingerprint",
"=",
"dict",
"(",
"[",
"(",
"certificate",
".",
"getPublicKey",
"(",
")",
".",
"keyHash",
"(",
")",
",",
"certificate",
")",
"for",
"certificate",
"in",
"certificates",
"]",
")",
"if",
"privateKey",
".",
"keyHash",
"(",
")",
"not",
"in",
"certificatesByFingerprint",
":",
"raise",
"ValueError",
"(",
"\"No certificate matching {fingerprint} found.\"",
".",
"format",
"(",
"fingerprint",
"=",
"privateKey",
".",
"keyHash",
"(",
")",
")",
")",
"primaryCertificate",
"=",
"certificatesByFingerprint",
".",
"pop",
"(",
"privateKey",
".",
"keyHash",
"(",
")",
")",
"if",
"\"dhParameters\"",
"in",
"kw",
":",
"raise",
"TypeError",
"(",
"\"Passing DH parameters as a keyword argument instead of a \"",
"\"PEM object is not supported anymore.\"",
")",
"dhparams",
"=",
"[",
"o",
"for",
"o",
"in",
"pemObjects",
"if",
"isinstance",
"(",
"o",
",",
"DHParameters",
")",
"]",
"if",
"len",
"(",
"dhparams",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Supplied PEM file(s) contain(s) *more* than one set of DH \"",
"\"parameters.\"",
")",
"elif",
"len",
"(",
"dhparams",
")",
"==",
"1",
":",
"kw",
"[",
"\"dhParameters\"",
"]",
"=",
"ssl",
".",
"DiffieHellmanParameters",
"(",
"str",
"(",
"dhparams",
"[",
"0",
"]",
")",
")",
"ctxFactory",
"=",
"ssl",
".",
"CertificateOptions",
"(",
"privateKey",
"=",
"privateKey",
".",
"original",
",",
"certificate",
"=",
"primaryCertificate",
".",
"original",
",",
"extraCertChain",
"=",
"[",
"chain",
".",
"original",
"for",
"chain",
"in",
"certificatesByFingerprint",
".",
"values",
"(",
")",
"]",
",",
"*",
"*",
"kw",
")",
"return",
"ctxFactory"
]
| 36 | 24.277778 |
def calc_effective_diffusivity(self, inlets=None, outlets=None,
domain_area=None, domain_length=None):
r"""
This calculates the effective diffusivity in this linear transport
algorithm.
Parameters
----------
inlets : array_like
The pores where the inlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
outlets : array_like
The pores where the outlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
domain_area : scalar, optional
The area of the inlet (and outlet) boundary faces. If not given
then an attempt is made to estimate it, but it is usually
underestimated.
domain_length : scalar, optional
The length of the domain between the inlet and outlet boundary
faces. If not given then an attempt is made to estimate it, but it
is usually underestimated.
Notes
-----
The area and length of the domain are found using the bounding box
around the inlet and outlet pores which do not necessarily lie on the
edge of the domain, resulting in underestimation of sizes.
"""
return self._calc_eff_prop(inlets=inlets, outlets=outlets,
domain_area=domain_area,
domain_length=domain_length) | [
"def",
"calc_effective_diffusivity",
"(",
"self",
",",
"inlets",
"=",
"None",
",",
"outlets",
"=",
"None",
",",
"domain_area",
"=",
"None",
",",
"domain_length",
"=",
"None",
")",
":",
"return",
"self",
".",
"_calc_eff_prop",
"(",
"inlets",
"=",
"inlets",
",",
"outlets",
"=",
"outlets",
",",
"domain_area",
"=",
"domain_area",
",",
"domain_length",
"=",
"domain_length",
")"
]
| 42.135135 | 25.189189 |
def _ip_route_linux():
'''
Return ip routing information for Linux distros
(netstat is deprecated and may not be available)
'''
# table main closest to old netstat inet output
ret = []
cmd = 'ip -4 route show table main'
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
# need to fake similar output to that provided by netstat
# to maintain output format
if comps[0] == "unreachable":
continue
if comps[0] == "default":
ip_interface = ''
if comps[3] == "dev":
ip_interface = comps[4]
ret.append({
'addr_family': 'inet',
'destination': '0.0.0.0',
'gateway': comps[2],
'netmask': '0.0.0.0',
'flags': 'UG',
'interface': ip_interface})
else:
address_mask = convert_cidr(comps[0])
ip_interface = ''
if comps[1] == "dev":
ip_interface = comps[2]
ret.append({
'addr_family': 'inet',
'destination': address_mask['network'],
'gateway': '0.0.0.0',
'netmask': address_mask['netmask'],
'flags': 'U',
'interface': ip_interface})
# table all closest to old netstat inet6 output
cmd = 'ip -6 route show table all'
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
# need to fake similar output to that provided by netstat
# to maintain output format
if comps[0] == "unreachable":
continue
if comps[0] == "default":
ip_interface = ''
if comps[3] == "dev":
ip_interface = comps[4]
ret.append({
'addr_family': 'inet6',
'destination': '::/0',
'gateway': comps[2],
'netmask': '',
'flags': 'UG',
'interface': ip_interface})
elif comps[0] == "local":
ip_interface = ''
if comps[2] == "dev":
ip_interface = comps[3]
local_address = comps[1] + "/128"
ret.append({
'addr_family': 'inet6',
'destination': local_address,
'gateway': '::',
'netmask': '',
'flags': 'U',
'interface': ip_interface})
else:
address_mask = convert_cidr(comps[0])
ip_interface = ''
if comps[1] == "dev":
ip_interface = comps[2]
ret.append({
'addr_family': 'inet6',
'destination': comps[0],
'gateway': '::',
'netmask': '',
'flags': 'U',
'interface': ip_interface})
return ret | [
"def",
"_ip_route_linux",
"(",
")",
":",
"# table main closest to old netstat inet output",
"ret",
"=",
"[",
"]",
"cmd",
"=",
"'ip -4 route show table main'",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"True",
")",
"for",
"line",
"in",
"out",
".",
"splitlines",
"(",
")",
":",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"# need to fake similar output to that provided by netstat",
"# to maintain output format",
"if",
"comps",
"[",
"0",
"]",
"==",
"\"unreachable\"",
":",
"continue",
"if",
"comps",
"[",
"0",
"]",
"==",
"\"default\"",
":",
"ip_interface",
"=",
"''",
"if",
"comps",
"[",
"3",
"]",
"==",
"\"dev\"",
":",
"ip_interface",
"=",
"comps",
"[",
"4",
"]",
"ret",
".",
"append",
"(",
"{",
"'addr_family'",
":",
"'inet'",
",",
"'destination'",
":",
"'0.0.0.0'",
",",
"'gateway'",
":",
"comps",
"[",
"2",
"]",
",",
"'netmask'",
":",
"'0.0.0.0'",
",",
"'flags'",
":",
"'UG'",
",",
"'interface'",
":",
"ip_interface",
"}",
")",
"else",
":",
"address_mask",
"=",
"convert_cidr",
"(",
"comps",
"[",
"0",
"]",
")",
"ip_interface",
"=",
"''",
"if",
"comps",
"[",
"1",
"]",
"==",
"\"dev\"",
":",
"ip_interface",
"=",
"comps",
"[",
"2",
"]",
"ret",
".",
"append",
"(",
"{",
"'addr_family'",
":",
"'inet'",
",",
"'destination'",
":",
"address_mask",
"[",
"'network'",
"]",
",",
"'gateway'",
":",
"'0.0.0.0'",
",",
"'netmask'",
":",
"address_mask",
"[",
"'netmask'",
"]",
",",
"'flags'",
":",
"'U'",
",",
"'interface'",
":",
"ip_interface",
"}",
")",
"# table all closest to old netstat inet6 output",
"cmd",
"=",
"'ip -6 route show table all'",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"True",
")",
"for",
"line",
"in",
"out",
".",
"splitlines",
"(",
")",
":",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"# need to fake similar output to that provided by netstat",
"# to maintain output format",
"if",
"comps",
"[",
"0",
"]",
"==",
"\"unreachable\"",
":",
"continue",
"if",
"comps",
"[",
"0",
"]",
"==",
"\"default\"",
":",
"ip_interface",
"=",
"''",
"if",
"comps",
"[",
"3",
"]",
"==",
"\"dev\"",
":",
"ip_interface",
"=",
"comps",
"[",
"4",
"]",
"ret",
".",
"append",
"(",
"{",
"'addr_family'",
":",
"'inet6'",
",",
"'destination'",
":",
"'::/0'",
",",
"'gateway'",
":",
"comps",
"[",
"2",
"]",
",",
"'netmask'",
":",
"''",
",",
"'flags'",
":",
"'UG'",
",",
"'interface'",
":",
"ip_interface",
"}",
")",
"elif",
"comps",
"[",
"0",
"]",
"==",
"\"local\"",
":",
"ip_interface",
"=",
"''",
"if",
"comps",
"[",
"2",
"]",
"==",
"\"dev\"",
":",
"ip_interface",
"=",
"comps",
"[",
"3",
"]",
"local_address",
"=",
"comps",
"[",
"1",
"]",
"+",
"\"/128\"",
"ret",
".",
"append",
"(",
"{",
"'addr_family'",
":",
"'inet6'",
",",
"'destination'",
":",
"local_address",
",",
"'gateway'",
":",
"'::'",
",",
"'netmask'",
":",
"''",
",",
"'flags'",
":",
"'U'",
",",
"'interface'",
":",
"ip_interface",
"}",
")",
"else",
":",
"address_mask",
"=",
"convert_cidr",
"(",
"comps",
"[",
"0",
"]",
")",
"ip_interface",
"=",
"''",
"if",
"comps",
"[",
"1",
"]",
"==",
"\"dev\"",
":",
"ip_interface",
"=",
"comps",
"[",
"2",
"]",
"ret",
".",
"append",
"(",
"{",
"'addr_family'",
":",
"'inet6'",
",",
"'destination'",
":",
"comps",
"[",
"0",
"]",
",",
"'gateway'",
":",
"'::'",
",",
"'netmask'",
":",
"''",
",",
"'flags'",
":",
"'U'",
",",
"'interface'",
":",
"ip_interface",
"}",
")",
"return",
"ret"
]
| 30.893617 | 13.170213 |
def parse_json(target, json, create_sections = False, create_options = False):
"""Given a confmanager object and a dictionary object, import the values from the dictionary into the object, optionally adding sections and options as it goes."""
is_dict = isinstance(json, dict)
for o in json:
if is_dict:
section = o
else:
section = o[0]
if not target.has_section(section):
if create_sections:
target.add_section(section)
else:
continue
for k, v in (json[o].items() if is_dict else o[1]):
if target.has_option(section, k) or create_options:
target.set(section, k, v) # Don't add if it shouldn't be there.
return target | [
"def",
"parse_json",
"(",
"target",
",",
"json",
",",
"create_sections",
"=",
"False",
",",
"create_options",
"=",
"False",
")",
":",
"is_dict",
"=",
"isinstance",
"(",
"json",
",",
"dict",
")",
"for",
"o",
"in",
"json",
":",
"if",
"is_dict",
":",
"section",
"=",
"o",
"else",
":",
"section",
"=",
"o",
"[",
"0",
"]",
"if",
"not",
"target",
".",
"has_section",
"(",
"section",
")",
":",
"if",
"create_sections",
":",
"target",
".",
"add_section",
"(",
"section",
")",
"else",
":",
"continue",
"for",
"k",
",",
"v",
"in",
"(",
"json",
"[",
"o",
"]",
".",
"items",
"(",
")",
"if",
"is_dict",
"else",
"o",
"[",
"1",
"]",
")",
":",
"if",
"target",
".",
"has_option",
"(",
"section",
",",
"k",
")",
"or",
"create_options",
":",
"target",
".",
"set",
"(",
"section",
",",
"k",
",",
"v",
")",
"# Don't add if it shouldn't be there.\r",
"return",
"target"
]
| 38.529412 | 20.117647 |
def strace_data_store_event(self, operation, address, address_range=0):
"""Sets an event to trigger trace logic when data write access is made.
Args:
self (JLink): the ``JLink`` instance.
operation (int): one of the operations in ``JLinkStraceOperation``.
address (int): the address of the store data.
address_range (int): optional range of address to trigger event on.
Returns:
An integer specifying the trace event handle. This handle should be
retained in order to clear the event at a later time.
Raises:
JLinkException: on error.
"""
cmd = enums.JLinkStraceCommand.TRACE_EVENT_SET
event_info = structs.JLinkStraceEventInfo()
event_info.Type = enums.JLinkStraceEvent.DATA_STORE
event_info.Op = operation
event_info.Addr = int(address)
event_info.AddrRangeSize = int(address_range)
handle = self._dll.JLINK_STRACE_Control(cmd, ctypes.byref(event_info))
if handle < 0:
raise errors.JLinkException(handle)
return handle | [
"def",
"strace_data_store_event",
"(",
"self",
",",
"operation",
",",
"address",
",",
"address_range",
"=",
"0",
")",
":",
"cmd",
"=",
"enums",
".",
"JLinkStraceCommand",
".",
"TRACE_EVENT_SET",
"event_info",
"=",
"structs",
".",
"JLinkStraceEventInfo",
"(",
")",
"event_info",
".",
"Type",
"=",
"enums",
".",
"JLinkStraceEvent",
".",
"DATA_STORE",
"event_info",
".",
"Op",
"=",
"operation",
"event_info",
".",
"Addr",
"=",
"int",
"(",
"address",
")",
"event_info",
".",
"AddrRangeSize",
"=",
"int",
"(",
"address_range",
")",
"handle",
"=",
"self",
".",
"_dll",
".",
"JLINK_STRACE_Control",
"(",
"cmd",
",",
"ctypes",
".",
"byref",
"(",
"event_info",
")",
")",
"if",
"handle",
"<",
"0",
":",
"raise",
"errors",
".",
"JLinkException",
"(",
"handle",
")",
"return",
"handle"
]
| 40.481481 | 21.37037 |
def __get_doc_block_lines(self):
"""
Returns the start and end line of the DOcBlock of the stored routine code.
"""
line1 = None
line2 = None
i = 0
for line in self._routine_source_code_lines:
if re.match(r'\s*/\*\*', line):
line1 = i
if re.match(r'\s*\*/', line):
line2 = i
if self._is_start_of_stored_routine(line):
break
i += 1
return line1, line2 | [
"def",
"__get_doc_block_lines",
"(",
"self",
")",
":",
"line1",
"=",
"None",
"line2",
"=",
"None",
"i",
"=",
"0",
"for",
"line",
"in",
"self",
".",
"_routine_source_code_lines",
":",
"if",
"re",
".",
"match",
"(",
"r'\\s*/\\*\\*'",
",",
"line",
")",
":",
"line1",
"=",
"i",
"if",
"re",
".",
"match",
"(",
"r'\\s*\\*/'",
",",
"line",
")",
":",
"line2",
"=",
"i",
"if",
"self",
".",
"_is_start_of_stored_routine",
"(",
"line",
")",
":",
"break",
"i",
"+=",
"1",
"return",
"line1",
",",
"line2"
]
| 23.571429 | 20.52381 |
def get_strain_state_dict(strains, stresses, eq_stress=None,
tol=1e-10, add_eq=True, sort=True):
"""
Creates a dictionary of voigt-notation stress-strain sets
keyed by "strain state", i. e. a tuple corresponding to
the non-zero entries in ratios to the lowest nonzero value,
e.g. [0, 0.1, 0, 0.2, 0, 0] -> (0,1,0,2,0,0)
This allows strains to be collected in stencils as to
evaluate parameterized finite difference derivatives
Args:
strains (Nx3x3 array-like): strain matrices
stresses (Nx3x3 array-like): stress matrices
eq_stress (Nx3x3 array-like): equilibrium stress
tol (float): tolerance for sorting strain states
add_eq (bool): flag for whether to add eq_strain
to stress-strain sets for each strain state
sort (bool): flag for whether to sort strain states
Returns:
OrderedDict with strain state keys and dictionaries
with stress-strain data corresponding to strain state
"""
# Recast stress/strains
vstrains = np.array([Strain(s).zeroed(tol).voigt for s in strains])
vstresses = np.array([Stress(s).zeroed(tol).voigt for s in stresses])
# Collect independent strain states:
independent = set([tuple(np.nonzero(vstrain)[0].tolist())
for vstrain in vstrains])
strain_state_dict = OrderedDict()
if add_eq:
if eq_stress is not None:
veq_stress = Stress(eq_stress).voigt
else:
veq_stress = find_eq_stress(strains, stresses).voigt
for n, ind in enumerate(independent):
# match strains with templates
template = np.zeros(6, dtype=bool)
np.put(template, ind, True)
template = np.tile(template, [vstresses.shape[0], 1])
mode = (template == (np.abs(vstrains) > 1e-10)).all(axis=1)
mstresses = vstresses[mode]
mstrains = vstrains[mode]
# Get "strain state", i.e. ratio of each value to minimum strain
min_nonzero_ind = np.argmin(np.abs(np.take(mstrains[-1], ind)))
min_nonzero_val = np.take(mstrains[-1], ind)[min_nonzero_ind]
strain_state = mstrains[-1] / min_nonzero_val
strain_state = tuple(strain_state)
if add_eq:
# add zero strain state
mstrains = np.vstack([mstrains, np.zeros(6)])
mstresses = np.vstack([mstresses, veq_stress])
# sort strains/stresses by strain values
if sort:
mstresses = mstresses[mstrains[:, ind[0]].argsort()]
mstrains = mstrains[mstrains[:, ind[0]].argsort()]
strain_state_dict[strain_state] = {"strains": mstrains,
"stresses": mstresses}
return strain_state_dict | [
"def",
"get_strain_state_dict",
"(",
"strains",
",",
"stresses",
",",
"eq_stress",
"=",
"None",
",",
"tol",
"=",
"1e-10",
",",
"add_eq",
"=",
"True",
",",
"sort",
"=",
"True",
")",
":",
"# Recast stress/strains",
"vstrains",
"=",
"np",
".",
"array",
"(",
"[",
"Strain",
"(",
"s",
")",
".",
"zeroed",
"(",
"tol",
")",
".",
"voigt",
"for",
"s",
"in",
"strains",
"]",
")",
"vstresses",
"=",
"np",
".",
"array",
"(",
"[",
"Stress",
"(",
"s",
")",
".",
"zeroed",
"(",
"tol",
")",
".",
"voigt",
"for",
"s",
"in",
"stresses",
"]",
")",
"# Collect independent strain states:",
"independent",
"=",
"set",
"(",
"[",
"tuple",
"(",
"np",
".",
"nonzero",
"(",
"vstrain",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
")",
"for",
"vstrain",
"in",
"vstrains",
"]",
")",
"strain_state_dict",
"=",
"OrderedDict",
"(",
")",
"if",
"add_eq",
":",
"if",
"eq_stress",
"is",
"not",
"None",
":",
"veq_stress",
"=",
"Stress",
"(",
"eq_stress",
")",
".",
"voigt",
"else",
":",
"veq_stress",
"=",
"find_eq_stress",
"(",
"strains",
",",
"stresses",
")",
".",
"voigt",
"for",
"n",
",",
"ind",
"in",
"enumerate",
"(",
"independent",
")",
":",
"# match strains with templates",
"template",
"=",
"np",
".",
"zeros",
"(",
"6",
",",
"dtype",
"=",
"bool",
")",
"np",
".",
"put",
"(",
"template",
",",
"ind",
",",
"True",
")",
"template",
"=",
"np",
".",
"tile",
"(",
"template",
",",
"[",
"vstresses",
".",
"shape",
"[",
"0",
"]",
",",
"1",
"]",
")",
"mode",
"=",
"(",
"template",
"==",
"(",
"np",
".",
"abs",
"(",
"vstrains",
")",
">",
"1e-10",
")",
")",
".",
"all",
"(",
"axis",
"=",
"1",
")",
"mstresses",
"=",
"vstresses",
"[",
"mode",
"]",
"mstrains",
"=",
"vstrains",
"[",
"mode",
"]",
"# Get \"strain state\", i.e. ratio of each value to minimum strain",
"min_nonzero_ind",
"=",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"take",
"(",
"mstrains",
"[",
"-",
"1",
"]",
",",
"ind",
")",
")",
")",
"min_nonzero_val",
"=",
"np",
".",
"take",
"(",
"mstrains",
"[",
"-",
"1",
"]",
",",
"ind",
")",
"[",
"min_nonzero_ind",
"]",
"strain_state",
"=",
"mstrains",
"[",
"-",
"1",
"]",
"/",
"min_nonzero_val",
"strain_state",
"=",
"tuple",
"(",
"strain_state",
")",
"if",
"add_eq",
":",
"# add zero strain state",
"mstrains",
"=",
"np",
".",
"vstack",
"(",
"[",
"mstrains",
",",
"np",
".",
"zeros",
"(",
"6",
")",
"]",
")",
"mstresses",
"=",
"np",
".",
"vstack",
"(",
"[",
"mstresses",
",",
"veq_stress",
"]",
")",
"# sort strains/stresses by strain values",
"if",
"sort",
":",
"mstresses",
"=",
"mstresses",
"[",
"mstrains",
"[",
":",
",",
"ind",
"[",
"0",
"]",
"]",
".",
"argsort",
"(",
")",
"]",
"mstrains",
"=",
"mstrains",
"[",
"mstrains",
"[",
":",
",",
"ind",
"[",
"0",
"]",
"]",
".",
"argsort",
"(",
")",
"]",
"strain_state_dict",
"[",
"strain_state",
"]",
"=",
"{",
"\"strains\"",
":",
"mstrains",
",",
"\"stresses\"",
":",
"mstresses",
"}",
"return",
"strain_state_dict"
]
| 44.377049 | 17.819672 |
def update_policy(self, id, policy):
""" Create policy.
https://www.nomadproject.io/api/acl-policies.html
arguments:
- name
- policy
returns: request.Response
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("policy", id, json=policy, method="post") | [
"def",
"update_policy",
"(",
"self",
",",
"id",
",",
"policy",
")",
":",
"return",
"self",
".",
"request",
"(",
"\"policy\"",
",",
"id",
",",
"json",
"=",
"policy",
",",
"method",
"=",
"\"post\"",
")"
]
| 29.6 | 19.133333 |
def to_array(self):
"""
Serializes this InvoiceMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(InvoiceMessage, self).to_array()
array['title'] = u(self.title) # py2: type unicode, py3: type str
array['description'] = u(self.description) # py2: type unicode, py3: type str
array['payload'] = u(self.payload) # py2: type unicode, py3: type str
array['provider_token'] = u(self.provider_token) # py2: type unicode, py3: type str
array['start_parameter'] = u(self.start_parameter) # py2: type unicode, py3: type str
array['currency'] = u(self.currency) # py2: type unicode, py3: type str
array['prices'] = self._as_array(self.prices) # type list of LabeledPrice
if self.receiver is not None:
if isinstance(self.receiver, None):
array['chat_id'] = None(self.receiver) # type Noneelif isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type intelse:
raise TypeError('Unknown type, must be one of None, str, int.')
# end if
if self.reply_id is not None:
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_IDelif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type intelse:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
if self.provider_data is not None:
array['provider_data'] = u(self.provider_data) # py2: type unicode, py3: type str
if self.photo_url is not None:
array['photo_url'] = u(self.photo_url) # py2: type unicode, py3: type str
if self.photo_size is not None:
array['photo_size'] = int(self.photo_size) # type int
if self.photo_width is not None:
array['photo_width'] = int(self.photo_width) # type int
if self.photo_height is not None:
array['photo_height'] = int(self.photo_height) # type int
if self.need_name is not None:
array['need_name'] = bool(self.need_name) # type bool
if self.need_phone_number is not None:
array['need_phone_number'] = bool(self.need_phone_number) # type bool
if self.need_email is not None:
array['need_email'] = bool(self.need_email) # type bool
if self.need_shipping_address is not None:
array['need_shipping_address'] = bool(self.need_shipping_address) # type bool
if self.send_phone_number_to_provider is not None:
array['send_phone_number_to_provider'] = bool(self.send_phone_number_to_provider) # type bool
if self.send_email_to_provider is not None:
array['send_email_to_provider'] = bool(self.send_email_to_provider) # type bool
if self.is_flexible is not None:
array['is_flexible'] = bool(self.is_flexible) # type bool
if self.disable_notification is not None:
array['disable_notification'] = bool(self.disable_notification) # type bool
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
return array | [
"def",
"to_array",
"(",
"self",
")",
":",
"array",
"=",
"super",
"(",
"InvoiceMessage",
",",
"self",
")",
".",
"to_array",
"(",
")",
"array",
"[",
"'title'",
"]",
"=",
"u",
"(",
"self",
".",
"title",
")",
"# py2: type unicode, py3: type str",
"array",
"[",
"'description'",
"]",
"=",
"u",
"(",
"self",
".",
"description",
")",
"# py2: type unicode, py3: type str",
"array",
"[",
"'payload'",
"]",
"=",
"u",
"(",
"self",
".",
"payload",
")",
"# py2: type unicode, py3: type str",
"array",
"[",
"'provider_token'",
"]",
"=",
"u",
"(",
"self",
".",
"provider_token",
")",
"# py2: type unicode, py3: type str",
"array",
"[",
"'start_parameter'",
"]",
"=",
"u",
"(",
"self",
".",
"start_parameter",
")",
"# py2: type unicode, py3: type str",
"array",
"[",
"'currency'",
"]",
"=",
"u",
"(",
"self",
".",
"currency",
")",
"# py2: type unicode, py3: type str",
"array",
"[",
"'prices'",
"]",
"=",
"self",
".",
"_as_array",
"(",
"self",
".",
"prices",
")",
"# type list of LabeledPrice",
"if",
"self",
".",
"receiver",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"self",
".",
"receiver",
",",
"None",
")",
":",
"array",
"[",
"'chat_id'",
"]",
"=",
"None",
"(",
"self",
".",
"receiver",
")",
"# type Noneelif isinstance(self.receiver, str):",
"array",
"[",
"'chat_id'",
"]",
"=",
"u",
"(",
"self",
".",
"receiver",
")",
"# py2: type unicode, py3: type str",
"elif",
"isinstance",
"(",
"self",
".",
"receiver",
",",
"int",
")",
":",
"array",
"[",
"'chat_id'",
"]",
"=",
"int",
"(",
"self",
".",
"receiver",
")",
"# type intelse:",
"raise",
"TypeError",
"(",
"'Unknown type, must be one of None, str, int.'",
")",
"# end if",
"if",
"self",
".",
"reply_id",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"self",
".",
"reply_id",
",",
"DEFAULT_MESSAGE_ID",
")",
":",
"array",
"[",
"'reply_to_message_id'",
"]",
"=",
"DEFAULT_MESSAGE_ID",
"(",
"self",
".",
"reply_id",
")",
"# type DEFAULT_MESSAGE_IDelif isinstance(self.reply_id, int):",
"array",
"[",
"'reply_to_message_id'",
"]",
"=",
"int",
"(",
"self",
".",
"reply_id",
")",
"# type intelse:",
"raise",
"TypeError",
"(",
"'Unknown type, must be one of DEFAULT_MESSAGE_ID, int.'",
")",
"# end if",
"if",
"self",
".",
"provider_data",
"is",
"not",
"None",
":",
"array",
"[",
"'provider_data'",
"]",
"=",
"u",
"(",
"self",
".",
"provider_data",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"photo_url",
"is",
"not",
"None",
":",
"array",
"[",
"'photo_url'",
"]",
"=",
"u",
"(",
"self",
".",
"photo_url",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"photo_size",
"is",
"not",
"None",
":",
"array",
"[",
"'photo_size'",
"]",
"=",
"int",
"(",
"self",
".",
"photo_size",
")",
"# type int",
"if",
"self",
".",
"photo_width",
"is",
"not",
"None",
":",
"array",
"[",
"'photo_width'",
"]",
"=",
"int",
"(",
"self",
".",
"photo_width",
")",
"# type int",
"if",
"self",
".",
"photo_height",
"is",
"not",
"None",
":",
"array",
"[",
"'photo_height'",
"]",
"=",
"int",
"(",
"self",
".",
"photo_height",
")",
"# type int",
"if",
"self",
".",
"need_name",
"is",
"not",
"None",
":",
"array",
"[",
"'need_name'",
"]",
"=",
"bool",
"(",
"self",
".",
"need_name",
")",
"# type bool",
"if",
"self",
".",
"need_phone_number",
"is",
"not",
"None",
":",
"array",
"[",
"'need_phone_number'",
"]",
"=",
"bool",
"(",
"self",
".",
"need_phone_number",
")",
"# type bool",
"if",
"self",
".",
"need_email",
"is",
"not",
"None",
":",
"array",
"[",
"'need_email'",
"]",
"=",
"bool",
"(",
"self",
".",
"need_email",
")",
"# type bool",
"if",
"self",
".",
"need_shipping_address",
"is",
"not",
"None",
":",
"array",
"[",
"'need_shipping_address'",
"]",
"=",
"bool",
"(",
"self",
".",
"need_shipping_address",
")",
"# type bool",
"if",
"self",
".",
"send_phone_number_to_provider",
"is",
"not",
"None",
":",
"array",
"[",
"'send_phone_number_to_provider'",
"]",
"=",
"bool",
"(",
"self",
".",
"send_phone_number_to_provider",
")",
"# type bool",
"if",
"self",
".",
"send_email_to_provider",
"is",
"not",
"None",
":",
"array",
"[",
"'send_email_to_provider'",
"]",
"=",
"bool",
"(",
"self",
".",
"send_email_to_provider",
")",
"# type bool",
"if",
"self",
".",
"is_flexible",
"is",
"not",
"None",
":",
"array",
"[",
"'is_flexible'",
"]",
"=",
"bool",
"(",
"self",
".",
"is_flexible",
")",
"# type bool",
"if",
"self",
".",
"disable_notification",
"is",
"not",
"None",
":",
"array",
"[",
"'disable_notification'",
"]",
"=",
"bool",
"(",
"self",
".",
"disable_notification",
")",
"# type bool",
"if",
"self",
".",
"reply_markup",
"is",
"not",
"None",
":",
"array",
"[",
"'reply_markup'",
"]",
"=",
"self",
".",
"reply_markup",
".",
"to_array",
"(",
")",
"# type InlineKeyboardMarkup",
"return",
"array"
]
| 50.1 | 29.014286 |
def m2i(self, pkt, s):
"""
ASN1F_SEQUENCE behaves transparently, with nested ASN1_objects being
dissected one by one. Because we use obj.dissect (see loop below)
instead of obj.m2i (as we trust dissect to do the appropriate set_vals)
we do not directly retrieve the list of nested objects.
Thus m2i returns an empty list (along with the proper remainder).
It is discarded by dissect() and should not be missed elsewhere.
"""
diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag,
implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag,
safe=self.flexible_tag)
if diff_tag is not None:
if self.implicit_tag is not None:
self.implicit_tag = diff_tag
elif self.explicit_tag is not None:
self.explicit_tag = diff_tag
codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
i, s, remain = codec.check_type_check_len(s)
if len(s) == 0:
for obj in self.seq:
obj.set_val(pkt, None)
else:
for obj in self.seq:
try:
s = obj.dissect(pkt, s)
except ASN1F_badsequence:
break
if len(s) > 0:
raise BER_Decoding_Error("unexpected remainder", remaining=s)
return [], remain | [
"def",
"m2i",
"(",
"self",
",",
"pkt",
",",
"s",
")",
":",
"diff_tag",
",",
"s",
"=",
"BER_tagging_dec",
"(",
"s",
",",
"hidden_tag",
"=",
"self",
".",
"ASN1_tag",
",",
"implicit_tag",
"=",
"self",
".",
"implicit_tag",
",",
"explicit_tag",
"=",
"self",
".",
"explicit_tag",
",",
"safe",
"=",
"self",
".",
"flexible_tag",
")",
"if",
"diff_tag",
"is",
"not",
"None",
":",
"if",
"self",
".",
"implicit_tag",
"is",
"not",
"None",
":",
"self",
".",
"implicit_tag",
"=",
"diff_tag",
"elif",
"self",
".",
"explicit_tag",
"is",
"not",
"None",
":",
"self",
".",
"explicit_tag",
"=",
"diff_tag",
"codec",
"=",
"self",
".",
"ASN1_tag",
".",
"get_codec",
"(",
"pkt",
".",
"ASN1_codec",
")",
"i",
",",
"s",
",",
"remain",
"=",
"codec",
".",
"check_type_check_len",
"(",
"s",
")",
"if",
"len",
"(",
"s",
")",
"==",
"0",
":",
"for",
"obj",
"in",
"self",
".",
"seq",
":",
"obj",
".",
"set_val",
"(",
"pkt",
",",
"None",
")",
"else",
":",
"for",
"obj",
"in",
"self",
".",
"seq",
":",
"try",
":",
"s",
"=",
"obj",
".",
"dissect",
"(",
"pkt",
",",
"s",
")",
"except",
"ASN1F_badsequence",
":",
"break",
"if",
"len",
"(",
"s",
")",
">",
"0",
":",
"raise",
"BER_Decoding_Error",
"(",
"\"unexpected remainder\"",
",",
"remaining",
"=",
"s",
")",
"return",
"[",
"]",
",",
"remain"
]
| 45.59375 | 16.90625 |
def from_dict(cls, data, read_only=False):
'''Recreate a feature collection from a dictionary.
The dictionary is of the format dumped by :meth:`to_dict`.
Additional information, such as whether the feature collection
should be read-only, is not included in this dictionary, and
is instead passed as parameters to this function.
'''
fc = cls(read_only=read_only)
fc._features = {}
fc._from_dict_update(data)
return fc | [
"def",
"from_dict",
"(",
"cls",
",",
"data",
",",
"read_only",
"=",
"False",
")",
":",
"fc",
"=",
"cls",
"(",
"read_only",
"=",
"read_only",
")",
"fc",
".",
"_features",
"=",
"{",
"}",
"fc",
".",
"_from_dict_update",
"(",
"data",
")",
"return",
"fc"
]
| 37.384615 | 21.384615 |
def get_tracks(self):
"""Returns the list of Tracks on this album."""
return _extract_tracks(
self._request(self.ws_prefix + ".getInfo", cacheable=True), self.network
) | [
"def",
"get_tracks",
"(",
"self",
")",
":",
"return",
"_extract_tracks",
"(",
"self",
".",
"_request",
"(",
"self",
".",
"ws_prefix",
"+",
"\".getInfo\"",
",",
"cacheable",
"=",
"True",
")",
",",
"self",
".",
"network",
")"
]
| 33.333333 | 23.833333 |
def snapshot(domain, name=None, suffix=None, **kwargs):
'''
Create a snapshot of a VM.
:param domain: domain name
:param name: Name of the snapshot. If the name is omitted, then will be used original domain
name with ISO 8601 time as a suffix.
:param suffix: Add suffix for the new name. Useful in states, where such snapshots
can be distinguished from manually created.
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' virt.snapshot <domain>
'''
if name and name.lower() == domain.lower():
raise CommandExecutionError('Virtual Machine {name} is already defined. '
'Please choose another name for the snapshot'.format(name=name))
if not name:
name = "{domain}-{tsnap}".format(domain=domain, tsnap=time.strftime('%Y%m%d-%H%M%S', time.localtime()))
if suffix:
name = "{name}-{suffix}".format(name=name, suffix=suffix)
doc = ElementTree.Element('domainsnapshot')
n_name = ElementTree.SubElement(doc, 'name')
n_name.text = name
conn = __get_conn(**kwargs)
_get_domain(conn, domain).snapshotCreateXML(
salt.utils.stringutils.to_str(ElementTree.tostring(doc))
)
conn.close()
return {'name': name} | [
"def",
"snapshot",
"(",
"domain",
",",
"name",
"=",
"None",
",",
"suffix",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"name",
"and",
"name",
".",
"lower",
"(",
")",
"==",
"domain",
".",
"lower",
"(",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'Virtual Machine {name} is already defined. '",
"'Please choose another name for the snapshot'",
".",
"format",
"(",
"name",
"=",
"name",
")",
")",
"if",
"not",
"name",
":",
"name",
"=",
"\"{domain}-{tsnap}\"",
".",
"format",
"(",
"domain",
"=",
"domain",
",",
"tsnap",
"=",
"time",
".",
"strftime",
"(",
"'%Y%m%d-%H%M%S'",
",",
"time",
".",
"localtime",
"(",
")",
")",
")",
"if",
"suffix",
":",
"name",
"=",
"\"{name}-{suffix}\"",
".",
"format",
"(",
"name",
"=",
"name",
",",
"suffix",
"=",
"suffix",
")",
"doc",
"=",
"ElementTree",
".",
"Element",
"(",
"'domainsnapshot'",
")",
"n_name",
"=",
"ElementTree",
".",
"SubElement",
"(",
"doc",
",",
"'name'",
")",
"n_name",
".",
"text",
"=",
"name",
"conn",
"=",
"__get_conn",
"(",
"*",
"*",
"kwargs",
")",
"_get_domain",
"(",
"conn",
",",
"domain",
")",
".",
"snapshotCreateXML",
"(",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_str",
"(",
"ElementTree",
".",
"tostring",
"(",
"doc",
")",
")",
")",
"conn",
".",
"close",
"(",
")",
"return",
"{",
"'name'",
":",
"name",
"}"
]
| 32.8125 | 27.229167 |
def parse_color(v, color_range=1):
'''Receives a colour definition and returns a (r,g,b,a) tuple.
Accepts:
- v
- (v)
- (v,a)
- (r,g,b)
- (r,g,b,a)
- #RRGGBB
- RRGGBB
- #RRGGBBAA
- RRGGBBAA
Returns a (red, green, blue, alpha) tuple, with values ranging from
0 to 1.
The 'color_range' parameter sets the colour range in which the
colour data values are specified (except in hexstrings).
'''
# unpack one-element tuples, they show up sometimes
while isinstance(v, (tuple, list)) and len(v) == 1:
v = v[0]
if isinstance(v, (int, float)):
red = green = blue = v / color_range
alpha = 1.
elif isinstance(v, data.Color):
red, green, blue, alpha = v
elif isinstance(v, (tuple, list)):
# normalise values according to the supplied colour range
# for this we make a list with the normalised data
color = []
for index in range(0, len(v)):
color.append(v[index] / color_range)
if len(color) == 1:
red = green = blue = alpha = color[0]
elif len(color) == 2:
red = green = blue = color[0]
alpha = color[1]
elif len(color) == 3:
red = color[0]
green = color[1]
blue = color[2]
alpha = 1.
elif len(color) == 4:
red = color[0]
green = color[1]
blue = color[2]
alpha = color[3]
elif isinstance(v, basestring):
# got a hexstring: first remove hash character, if any
v = v.strip('#')
if len(data) == 6:
# RRGGBB
red = hex2dec(v[0:2]) / 255.
green = hex2dec(v[2:4]) / 255.
blue = hex2dec(v[4:6]) / 255.
alpha = 1.
elif len(v) == 8:
red = hex2dec(v[0:2]) / 255.
green = hex2dec(v[2:4]) / 255.
blue = hex2dec(v[4:6]) / 255.
alpha = hex2dec(v[6:8]) / 255.
return red, green, blue, alpha | [
"def",
"parse_color",
"(",
"v",
",",
"color_range",
"=",
"1",
")",
":",
"# unpack one-element tuples, they show up sometimes",
"while",
"isinstance",
"(",
"v",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"len",
"(",
"v",
")",
"==",
"1",
":",
"v",
"=",
"v",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"v",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"red",
"=",
"green",
"=",
"blue",
"=",
"v",
"/",
"color_range",
"alpha",
"=",
"1.",
"elif",
"isinstance",
"(",
"v",
",",
"data",
".",
"Color",
")",
":",
"red",
",",
"green",
",",
"blue",
",",
"alpha",
"=",
"v",
"elif",
"isinstance",
"(",
"v",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"# normalise values according to the supplied colour range",
"# for this we make a list with the normalised data",
"color",
"=",
"[",
"]",
"for",
"index",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"v",
")",
")",
":",
"color",
".",
"append",
"(",
"v",
"[",
"index",
"]",
"/",
"color_range",
")",
"if",
"len",
"(",
"color",
")",
"==",
"1",
":",
"red",
"=",
"green",
"=",
"blue",
"=",
"alpha",
"=",
"color",
"[",
"0",
"]",
"elif",
"len",
"(",
"color",
")",
"==",
"2",
":",
"red",
"=",
"green",
"=",
"blue",
"=",
"color",
"[",
"0",
"]",
"alpha",
"=",
"color",
"[",
"1",
"]",
"elif",
"len",
"(",
"color",
")",
"==",
"3",
":",
"red",
"=",
"color",
"[",
"0",
"]",
"green",
"=",
"color",
"[",
"1",
"]",
"blue",
"=",
"color",
"[",
"2",
"]",
"alpha",
"=",
"1.",
"elif",
"len",
"(",
"color",
")",
"==",
"4",
":",
"red",
"=",
"color",
"[",
"0",
"]",
"green",
"=",
"color",
"[",
"1",
"]",
"blue",
"=",
"color",
"[",
"2",
"]",
"alpha",
"=",
"color",
"[",
"3",
"]",
"elif",
"isinstance",
"(",
"v",
",",
"basestring",
")",
":",
"# got a hexstring: first remove hash character, if any",
"v",
"=",
"v",
".",
"strip",
"(",
"'#'",
")",
"if",
"len",
"(",
"data",
")",
"==",
"6",
":",
"# RRGGBB",
"red",
"=",
"hex2dec",
"(",
"v",
"[",
"0",
":",
"2",
"]",
")",
"/",
"255.",
"green",
"=",
"hex2dec",
"(",
"v",
"[",
"2",
":",
"4",
"]",
")",
"/",
"255.",
"blue",
"=",
"hex2dec",
"(",
"v",
"[",
"4",
":",
"6",
"]",
")",
"/",
"255.",
"alpha",
"=",
"1.",
"elif",
"len",
"(",
"v",
")",
"==",
"8",
":",
"red",
"=",
"hex2dec",
"(",
"v",
"[",
"0",
":",
"2",
"]",
")",
"/",
"255.",
"green",
"=",
"hex2dec",
"(",
"v",
"[",
"2",
":",
"4",
"]",
")",
"/",
"255.",
"blue",
"=",
"hex2dec",
"(",
"v",
"[",
"4",
":",
"6",
"]",
")",
"/",
"255.",
"alpha",
"=",
"hex2dec",
"(",
"v",
"[",
"6",
":",
"8",
"]",
")",
"/",
"255.",
"return",
"red",
",",
"green",
",",
"blue",
",",
"alpha"
]
| 27.802817 | 18.619718 |
def point_to_line(point, segment_start, segment_end):
"""Given a point and a line segment, return the vector from the point to
the closest point on the segment.
"""
# TODO: Needs unittests.
segment_vec = segment_end - segment_start
# t is distance along line
t = -(segment_start - point).dot(segment_vec) / (
segment_vec.length_squared())
closest_point = segment_start + scale_v3(segment_vec, t)
return point - closest_point | [
"def",
"point_to_line",
"(",
"point",
",",
"segment_start",
",",
"segment_end",
")",
":",
"# TODO: Needs unittests.",
"segment_vec",
"=",
"segment_end",
"-",
"segment_start",
"# t is distance along line",
"t",
"=",
"-",
"(",
"segment_start",
"-",
"point",
")",
".",
"dot",
"(",
"segment_vec",
")",
"/",
"(",
"segment_vec",
".",
"length_squared",
"(",
")",
")",
"closest_point",
"=",
"segment_start",
"+",
"scale_v3",
"(",
"segment_vec",
",",
"t",
")",
"return",
"point",
"-",
"closest_point"
]
| 35.384615 | 12.692308 |
def _iter_frequencies(self):
"""Iterate over the frequencies of this `QPlane`
"""
# work out how many frequencies we need
minf, maxf = self.frange
fcum_mismatch = log(maxf / minf) * (2 + self.q**2)**(1/2.) / 2.
nfreq = int(max(1, ceil(fcum_mismatch / self.deltam)))
fstep = fcum_mismatch / nfreq
fstepmin = 1 / self.duration
# for each frequency, yield a QTile
for i in xrange(nfreq):
yield (minf *
exp(2 / (2 + self.q**2)**(1/2.) * (i + .5) * fstep) //
fstepmin * fstepmin) | [
"def",
"_iter_frequencies",
"(",
"self",
")",
":",
"# work out how many frequencies we need",
"minf",
",",
"maxf",
"=",
"self",
".",
"frange",
"fcum_mismatch",
"=",
"log",
"(",
"maxf",
"/",
"minf",
")",
"*",
"(",
"2",
"+",
"self",
".",
"q",
"**",
"2",
")",
"**",
"(",
"1",
"/",
"2.",
")",
"/",
"2.",
"nfreq",
"=",
"int",
"(",
"max",
"(",
"1",
",",
"ceil",
"(",
"fcum_mismatch",
"/",
"self",
".",
"deltam",
")",
")",
")",
"fstep",
"=",
"fcum_mismatch",
"/",
"nfreq",
"fstepmin",
"=",
"1",
"/",
"self",
".",
"duration",
"# for each frequency, yield a QTile",
"for",
"i",
"in",
"xrange",
"(",
"nfreq",
")",
":",
"yield",
"(",
"minf",
"*",
"exp",
"(",
"2",
"/",
"(",
"2",
"+",
"self",
".",
"q",
"**",
"2",
")",
"**",
"(",
"1",
"/",
"2.",
")",
"*",
"(",
"i",
"+",
".5",
")",
"*",
"fstep",
")",
"//",
"fstepmin",
"*",
"fstepmin",
")"
]
| 42.214286 | 10.571429 |
def get_side_length_of_resize_handle(view, item):
"""Calculate the side length of a resize handle
:param rafcon.gui.mygaphas.view.ExtendedGtkView view: View
:param rafcon.gui.mygaphas.items.state.StateView item: StateView
:return: side length
:rtype: float
"""
from rafcon.gui.mygaphas.items.state import StateView, NameView
if isinstance(item, StateView):
return item.border_width * view.get_zoom_factor() / 1.5
elif isinstance(item, NameView):
return item.parent.border_width * view.get_zoom_factor() / 2.5
return 0 | [
"def",
"get_side_length_of_resize_handle",
"(",
"view",
",",
"item",
")",
":",
"from",
"rafcon",
".",
"gui",
".",
"mygaphas",
".",
"items",
".",
"state",
"import",
"StateView",
",",
"NameView",
"if",
"isinstance",
"(",
"item",
",",
"StateView",
")",
":",
"return",
"item",
".",
"border_width",
"*",
"view",
".",
"get_zoom_factor",
"(",
")",
"/",
"1.5",
"elif",
"isinstance",
"(",
"item",
",",
"NameView",
")",
":",
"return",
"item",
".",
"parent",
".",
"border_width",
"*",
"view",
".",
"get_zoom_factor",
"(",
")",
"/",
"2.5",
"return",
"0"
]
| 40.071429 | 18.214286 |
def annual_event_counts(kind='all'):
"""
Returns a QuerySet of dicts, each one with these keys:
* year - a date object representing the year
* total - the number of events of `kind` that year
kind - The Event `kind`, or 'all' for all kinds (default).
"""
qs = Event.objects
if kind != 'all':
qs = qs.filter(kind=kind)
qs = qs.annotate(year=TruncYear('date')) \
.values('year') \
.annotate(total=Count('id')) \
.order_by('year')
return qs | [
"def",
"annual_event_counts",
"(",
"kind",
"=",
"'all'",
")",
":",
"qs",
"=",
"Event",
".",
"objects",
"if",
"kind",
"!=",
"'all'",
":",
"qs",
"=",
"qs",
".",
"filter",
"(",
"kind",
"=",
"kind",
")",
"qs",
"=",
"qs",
".",
"annotate",
"(",
"year",
"=",
"TruncYear",
"(",
"'date'",
")",
")",
".",
"values",
"(",
"'year'",
")",
".",
"annotate",
"(",
"total",
"=",
"Count",
"(",
"'id'",
")",
")",
".",
"order_by",
"(",
"'year'",
")",
"return",
"qs"
]
| 25.75 | 18.75 |
async def kick(self, user_id: base.Integer,
until_date: typing.Union[base.Integer, None] = None):
"""
Use this method to kick a user from a group, a supergroup or a channel.
In the case of supergroups and channels, the user will not be able to return to the group
on their own using invite links, etc., unless unbanned first.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting
is off in the target group.
Otherwise members may only be removed by the group's creator or by the member that added them.
Source: https://core.telegram.org/bots/api#kickchatmember
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param until_date: Date when the user will be unbanned, unix time.
:type until_date: :obj:`typing.Union[base.Integer, None]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
return await self.bot.kick_chat_member(self.id, user_id=user_id, until_date=until_date) | [
"async",
"def",
"kick",
"(",
"self",
",",
"user_id",
":",
"base",
".",
"Integer",
",",
"until_date",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"Integer",
",",
"None",
"]",
"=",
"None",
")",
":",
"return",
"await",
"self",
".",
"bot",
".",
"kick_chat_member",
"(",
"self",
".",
"id",
",",
"user_id",
"=",
"user_id",
",",
"until_date",
"=",
"until_date",
")"
]
| 53.304348 | 30.608696 |
def _GenerateNonImplementedMethod(self, method):
"""Generates and returns a method that can be set for a service methods.
Args:
method: Descriptor of the service method for which a method is to be
generated.
Returns:
A method that can be added to the service class.
"""
return lambda inst, rpc_controller, request, callback: (
self._NonImplementedMethod(method.name, rpc_controller, callback)) | [
"def",
"_GenerateNonImplementedMethod",
"(",
"self",
",",
"method",
")",
":",
"return",
"lambda",
"inst",
",",
"rpc_controller",
",",
"request",
",",
"callback",
":",
"(",
"self",
".",
"_NonImplementedMethod",
"(",
"method",
".",
"name",
",",
"rpc_controller",
",",
"callback",
")",
")"
]
| 36 | 22.583333 |
def num_cols(x):
"""Returns number of cols in a given `Tensor`."""
if tf.compat.dimension_value(x.shape[-1]) is not None:
return tf.compat.dimension_value(x.shape[-1])
return tf.shape(input=x)[-1] | [
"def",
"num_cols",
"(",
"x",
")",
":",
"if",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"x",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"is",
"not",
"None",
":",
"return",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"x",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"return",
"tf",
".",
"shape",
"(",
"input",
"=",
"x",
")",
"[",
"-",
"1",
"]"
]
| 40.4 | 11.8 |
def denoise(data, pon, work_dir):
"""Normalize read counts using panel of normal background or GC/mappability
"""
std_file = os.path.join(work_dir, "%s-crstandardized.tsv" % dd.get_sample_name(data))
denoise_file = os.path.join(work_dir, "%s-crdenoised.tsv" % dd.get_sample_name(data))
if not utils.file_exists(std_file):
with file_transaction(data, std_file, denoise_file) as (tx_std_file, tx_denoise_file):
params = ["-T", "DenoiseReadCounts",
"-I", tz.get_in(["depth", "bins", "target"], data),
"--standardized-copy-ratios", tx_std_file,
"--denoised-copy-ratios", tx_denoise_file]
if pon:
params += ["--count-panel-of-normals", pon]
else:
params += ["--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], data)]
_run_with_memory_scaling(params, tx_std_file, data)
return denoise_file if pon else std_file | [
"def",
"denoise",
"(",
"data",
",",
"pon",
",",
"work_dir",
")",
":",
"std_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"%s-crstandardized.tsv\"",
"%",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
")",
"denoise_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"%s-crdenoised.tsv\"",
"%",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
")",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"std_file",
")",
":",
"with",
"file_transaction",
"(",
"data",
",",
"std_file",
",",
"denoise_file",
")",
"as",
"(",
"tx_std_file",
",",
"tx_denoise_file",
")",
":",
"params",
"=",
"[",
"\"-T\"",
",",
"\"DenoiseReadCounts\"",
",",
"\"-I\"",
",",
"tz",
".",
"get_in",
"(",
"[",
"\"depth\"",
",",
"\"bins\"",
",",
"\"target\"",
"]",
",",
"data",
")",
",",
"\"--standardized-copy-ratios\"",
",",
"tx_std_file",
",",
"\"--denoised-copy-ratios\"",
",",
"tx_denoise_file",
"]",
"if",
"pon",
":",
"params",
"+=",
"[",
"\"--count-panel-of-normals\"",
",",
"pon",
"]",
"else",
":",
"params",
"+=",
"[",
"\"--annotated-intervals\"",
",",
"tz",
".",
"get_in",
"(",
"[",
"\"regions\"",
",",
"\"bins\"",
",",
"\"gcannotated\"",
"]",
",",
"data",
")",
"]",
"_run_with_memory_scaling",
"(",
"params",
",",
"tx_std_file",
",",
"data",
")",
"return",
"denoise_file",
"if",
"pon",
"else",
"std_file"
]
| 57.941176 | 23.705882 |
def setSparseOutput(outputs, name, value):
"""
Set region sparse output value.
The region output memory is owned by the c++ caller and cannot be changed
directly from python. Use this method to update the sparse output fields in
the "outputs" array so it can be resized from the c++ code.
:param outputs: (dict) of numpy arrays. This is the original outputs dict
owned by the C++ caller, passed to region via the compute method to
be updated.
:param name: (string) name of an existing output to modify
:param value: (list) list of UInt32 indices of all the nonzero entries
representing the sparse array to be set
"""
# The region output memory is owned by the c++ and cannot be changed from
# python. We use a special attribule named "__{name}_len__" to pass
# the sparse array length back to c++
lenAttr = "__{}_len__".format(name)
if lenAttr not in outputs:
raise Exception("Output {} is not a valid sparse output".format(name))
if outputs[name].size < value.size:
raise Exception(
"Output {} must be less than {}. Given value size is {}".format(
name, outputs[name].size, value.size))
outputs[lenAttr][0] = value.size
outputs[name][:value.size] = value | [
"def",
"setSparseOutput",
"(",
"outputs",
",",
"name",
",",
"value",
")",
":",
"# The region output memory is owned by the c++ and cannot be changed from",
"# python. We use a special attribule named \"__{name}_len__\" to pass",
"# the sparse array length back to c++",
"lenAttr",
"=",
"\"__{}_len__\"",
".",
"format",
"(",
"name",
")",
"if",
"lenAttr",
"not",
"in",
"outputs",
":",
"raise",
"Exception",
"(",
"\"Output {} is not a valid sparse output\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"outputs",
"[",
"name",
"]",
".",
"size",
"<",
"value",
".",
"size",
":",
"raise",
"Exception",
"(",
"\"Output {} must be less than {}. Given value size is {}\"",
".",
"format",
"(",
"name",
",",
"outputs",
"[",
"name",
"]",
".",
"size",
",",
"value",
".",
"size",
")",
")",
"outputs",
"[",
"lenAttr",
"]",
"[",
"0",
"]",
"=",
"value",
".",
"size",
"outputs",
"[",
"name",
"]",
"[",
":",
"value",
".",
"size",
"]",
"=",
"value"
]
| 44.137931 | 20.827586 |
def merge_dict(to_update: dict, other_dict: dict):
""" merges b into a """
for key, value in other_dict.items():
has_map = (
isinstance(value, collections.Mapping) and
isinstance(to_update.get(key, None), collections.Mapping)
)
if has_map:
merge_dict(to_update[key], value)
else:
to_update[key] = value | [
"def",
"merge_dict",
"(",
"to_update",
":",
"dict",
",",
"other_dict",
":",
"dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"other_dict",
".",
"items",
"(",
")",
":",
"has_map",
"=",
"(",
"isinstance",
"(",
"value",
",",
"collections",
".",
"Mapping",
")",
"and",
"isinstance",
"(",
"to_update",
".",
"get",
"(",
"key",
",",
"None",
")",
",",
"collections",
".",
"Mapping",
")",
")",
"if",
"has_map",
":",
"merge_dict",
"(",
"to_update",
"[",
"key",
"]",
",",
"value",
")",
"else",
":",
"to_update",
"[",
"key",
"]",
"=",
"value"
]
| 31.666667 | 17.083333 |
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the ProtocolVersion struct and decode it into
its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if either the major or minor protocol versions
are missing from the encoding.
"""
super(ProtocolVersion, self).read(
input_stream,
kmip_version=kmip_version
)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.PROTOCOL_VERSION_MAJOR, local_stream):
self._major = primitives.Integer(
tag=enums.Tags.PROTOCOL_VERSION_MAJOR
)
self._major.read(local_stream, kmip_version=kmip_version)
else:
raise ValueError(
"Invalid encoding missing the major protocol version number."
)
if self.is_tag_next(enums.Tags.PROTOCOL_VERSION_MINOR, local_stream):
self._minor = primitives.Integer(
tag=enums.Tags.PROTOCOL_VERSION_MINOR
)
self._minor.read(local_stream, kmip_version=kmip_version)
else:
raise ValueError(
"Invalid encoding missing the minor protocol version number."
)
self.is_oversized(local_stream) | [
"def",
"read",
"(",
"self",
",",
"input_stream",
",",
"kmip_version",
"=",
"enums",
".",
"KMIPVersion",
".",
"KMIP_1_0",
")",
":",
"super",
"(",
"ProtocolVersion",
",",
"self",
")",
".",
"read",
"(",
"input_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"local_stream",
"=",
"utils",
".",
"BytearrayStream",
"(",
"input_stream",
".",
"read",
"(",
"self",
".",
"length",
")",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"PROTOCOL_VERSION_MAJOR",
",",
"local_stream",
")",
":",
"self",
".",
"_major",
"=",
"primitives",
".",
"Integer",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"PROTOCOL_VERSION_MAJOR",
")",
"self",
".",
"_major",
".",
"read",
"(",
"local_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid encoding missing the major protocol version number.\"",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"PROTOCOL_VERSION_MINOR",
",",
"local_stream",
")",
":",
"self",
".",
"_minor",
"=",
"primitives",
".",
"Integer",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"PROTOCOL_VERSION_MINOR",
")",
"self",
".",
"_minor",
".",
"read",
"(",
"local_stream",
",",
"kmip_version",
"=",
"kmip_version",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid encoding missing the minor protocol version number.\"",
")",
"self",
".",
"is_oversized",
"(",
"local_stream",
")"
]
| 38.772727 | 23.545455 |
def get_all(self, include_archived=False):
"""Get all the conversations.
Args:
include_archived (bool): (optional) Whether to include archived
conversations. Defaults to ``False``.
Returns:
List of all :class:`.Conversation` objects.
"""
return [conv for conv in self._conv_dict.values()
if not conv.is_archived or include_archived] | [
"def",
"get_all",
"(",
"self",
",",
"include_archived",
"=",
"False",
")",
":",
"return",
"[",
"conv",
"for",
"conv",
"in",
"self",
".",
"_conv_dict",
".",
"values",
"(",
")",
"if",
"not",
"conv",
".",
"is_archived",
"or",
"include_archived",
"]"
]
| 34.916667 | 19.416667 |
def pprint(self, index=False, delimiter='-'):
"""Pretty-print the binary tree.
:param index: If set to True (default: False), display level-order_
indexes using the format: ``{index}{delimiter}{value}``.
:type index: bool
:param delimiter: Delimiter character between the node index and
the node value (default: '-').
:type delimiter: str | unicode
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1) # index: 0, value: 1
>>> root.left = Node(2) # index: 1, value: 2
>>> root.right = Node(3) # index: 2, value: 3
>>> root.left.right = Node(4) # index: 4, value: 4
>>>
>>> root.pprint()
<BLANKLINE>
__1
/ \\
2 3
\\
4
<BLANKLINE>
>>> root.pprint(index=True) # Format: {index}-{value}
<BLANKLINE>
_____0-1_
/ \\
1-2_ 2-3
\\
4-4
<BLANKLINE>
.. note::
If you do not need level-order_ indexes in the output string, use
:func:`binarytree.Node.__str__` instead.
.. _level-order:
https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search
"""
lines = _build_tree_string(self, 0, index, delimiter)[0]
print('\n' + '\n'.join((line.rstrip() for line in lines))) | [
"def",
"pprint",
"(",
"self",
",",
"index",
"=",
"False",
",",
"delimiter",
"=",
"'-'",
")",
":",
"lines",
"=",
"_build_tree_string",
"(",
"self",
",",
"0",
",",
"index",
",",
"delimiter",
")",
"[",
"0",
"]",
"print",
"(",
"'\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"(",
"line",
".",
"rstrip",
"(",
")",
"for",
"line",
"in",
"lines",
")",
")",
")"
]
| 33 | 21.957447 |
def inv_slots_preferred(self):
"""
List of all available inventory slots in the preferred search order.
Does not include the additional slots from the open window.
1. active slot
2. remainder of the hotbar
3. remainder of the persistent inventory
"""
slots = [self.active_slot]
slots.extend(slot for slot in self.window.hotbar_slots
if slot != self.active_slot)
slots.extend(self.window.inventory_slots)
return slots | [
"def",
"inv_slots_preferred",
"(",
"self",
")",
":",
"slots",
"=",
"[",
"self",
".",
"active_slot",
"]",
"slots",
".",
"extend",
"(",
"slot",
"for",
"slot",
"in",
"self",
".",
"window",
".",
"hotbar_slots",
"if",
"slot",
"!=",
"self",
".",
"active_slot",
")",
"slots",
".",
"extend",
"(",
"self",
".",
"window",
".",
"inventory_slots",
")",
"return",
"slots"
]
| 36.642857 | 15.071429 |
def log_analyzer(path):
"""This procedure replaces every line which can't be parsed
with special object MalformedLogEntry.
"""
with handle(MalformedLogEntryError,
lambda (c):
invoke_restart('use_value',
MalformedLogEntry(c.text))):
for filename in find_all_logs(path):
analyze_log(filename) | [
"def",
"log_analyzer",
"(",
"path",
")",
":",
"with",
"handle",
"(",
"MalformedLogEntryError",
",",
"lambda",
"(",
"c",
")",
":",
"invoke_restart",
"(",
"'use_value'",
",",
"MalformedLogEntry",
"(",
"c",
".",
"text",
")",
")",
")",
":",
"for",
"filename",
"in",
"find_all_logs",
"(",
"path",
")",
":",
"analyze_log",
"(",
"filename",
")"
]
| 39.4 | 7.6 |
def sample_indexes(segyfile, t0=0.0, dt_override=None):
"""
Creates a list of values representing the samples in a trace at depth or time.
The list starts at *t0* and is incremented with am*dt* for the number of samples.
If a *dt_override* is not provided it will try to find a *dt* in the file.
Parameters
----------
segyfile : segyio.SegyFile
t0 : float
initial sample, or delay-recording-time
dt_override : float or None
Returns
-------
samples : array_like of float
Notes
-----
.. versionadded:: 1.1
"""
if dt_override is None:
dt_override = dt(segyfile)
return [t0 + t * dt_override for t in range(len(segyfile.samples))] | [
"def",
"sample_indexes",
"(",
"segyfile",
",",
"t0",
"=",
"0.0",
",",
"dt_override",
"=",
"None",
")",
":",
"if",
"dt_override",
"is",
"None",
":",
"dt_override",
"=",
"dt",
"(",
"segyfile",
")",
"return",
"[",
"t0",
"+",
"t",
"*",
"dt_override",
"for",
"t",
"in",
"range",
"(",
"len",
"(",
"segyfile",
".",
"samples",
")",
")",
"]"
]
| 24.821429 | 25.535714 |
def files_write(self, path, file, offset=0, create=False, truncate=False,
count=None, **kwargs):
"""Writes to a mutable file in the MFS.
.. code-block:: python
>>> c.files_write("/test/file", io.BytesIO(b"hi"), create=True)
b''
Parameters
----------
path : str
Filepath within the MFS
file : io.RawIOBase
IO stream object with data that should be written
offset : int
Byte offset at which to begin writing at
create : bool
Create the file if it does not exist
truncate : bool
Truncate the file to size zero before writing
count : int
Maximum number of bytes to read from the source ``file``
"""
opts = {"offset": offset, "create": create, "truncate": truncate}
if count is not None:
opts["count"] = count
kwargs.setdefault("opts", opts)
args = (path,)
body, headers = multipart.stream_files(file, self.chunk_size)
return self._client.request('/files/write', args,
data=body, headers=headers, **kwargs) | [
"def",
"files_write",
"(",
"self",
",",
"path",
",",
"file",
",",
"offset",
"=",
"0",
",",
"create",
"=",
"False",
",",
"truncate",
"=",
"False",
",",
"count",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"opts",
"=",
"{",
"\"offset\"",
":",
"offset",
",",
"\"create\"",
":",
"create",
",",
"\"truncate\"",
":",
"truncate",
"}",
"if",
"count",
"is",
"not",
"None",
":",
"opts",
"[",
"\"count\"",
"]",
"=",
"count",
"kwargs",
".",
"setdefault",
"(",
"\"opts\"",
",",
"opts",
")",
"args",
"=",
"(",
"path",
",",
")",
"body",
",",
"headers",
"=",
"multipart",
".",
"stream_files",
"(",
"file",
",",
"self",
".",
"chunk_size",
")",
"return",
"self",
".",
"_client",
".",
"request",
"(",
"'/files/write'",
",",
"args",
",",
"data",
"=",
"body",
",",
"headers",
"=",
"headers",
",",
"*",
"*",
"kwargs",
")"
]
| 35.545455 | 20.030303 |
def reconfigure_log_level(self):
"""
Returns a new standard logger instance
"""
if Global.LOGGER:
Global.LOGGER.debug('reconfiguring logger level')
stream_handlers = filter(lambda x: type(x) is logging.StreamHandler,
self._logger_instance.handlers)
for x in stream_handlers:
x.level = Global.CONFIG_MANAGER.log_level
return self.get_logger() | [
"def",
"reconfigure_log_level",
"(",
"self",
")",
":",
"if",
"Global",
".",
"LOGGER",
":",
"Global",
".",
"LOGGER",
".",
"debug",
"(",
"'reconfiguring logger level'",
")",
"stream_handlers",
"=",
"filter",
"(",
"lambda",
"x",
":",
"type",
"(",
"x",
")",
"is",
"logging",
".",
"StreamHandler",
",",
"self",
".",
"_logger_instance",
".",
"handlers",
")",
"for",
"x",
"in",
"stream_handlers",
":",
"x",
".",
"level",
"=",
"Global",
".",
"CONFIG_MANAGER",
".",
"log_level",
"return",
"self",
".",
"get_logger",
"(",
")"
]
| 34.153846 | 16.769231 |
def _set_tab_width(self, tab_width):
""" Sets the width (in terms of space characters) for tab characters.
"""
font_metrics = QtGui.QFontMetrics(self.font)
self._control.setTabStopWidth(tab_width * font_metrics.width(' '))
self._tab_width = tab_width | [
"def",
"_set_tab_width",
"(",
"self",
",",
"tab_width",
")",
":",
"font_metrics",
"=",
"QtGui",
".",
"QFontMetrics",
"(",
"self",
".",
"font",
")",
"self",
".",
"_control",
".",
"setTabStopWidth",
"(",
"tab_width",
"*",
"font_metrics",
".",
"width",
"(",
"' '",
")",
")",
"self",
".",
"_tab_width",
"=",
"tab_width"
]
| 40.714286 | 13.571429 |
def limit(self, limit):
"""
Sets the limit of this ListEmployeeWagesRequest.
Maximum number of Employee Wages to return per page. Can range between 1 and 200. The default is the maximum at 200.
:param limit: The limit of this ListEmployeeWagesRequest.
:type: int
"""
if limit is None:
raise ValueError("Invalid value for `limit`, must not be `None`")
if limit > 200:
raise ValueError("Invalid value for `limit`, must be a value less than or equal to `200`")
if limit < 1:
raise ValueError("Invalid value for `limit`, must be a value greater than or equal to `1`")
self._limit = limit | [
"def",
"limit",
"(",
"self",
",",
"limit",
")",
":",
"if",
"limit",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `limit`, must not be `None`\"",
")",
"if",
"limit",
">",
"200",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `limit`, must be a value less than or equal to `200`\"",
")",
"if",
"limit",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `limit`, must be a value greater than or equal to `1`\"",
")",
"self",
".",
"_limit",
"=",
"limit"
]
| 40.352941 | 30 |
def config_control(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument
'''
Will check if the configuration was changed.
If differences found, will try to commit.
In case commit unsuccessful, will try to rollback.
:return: A tuple with a boolean that specifies if the config was changed/committed/rollbacked on the device.\
And a string that provides more details of the reason why the configuration was not committed properly.
CLI Example:
.. code-block:: bash
salt '*' net.config_control
'''
result = True
comment = ''
changed, not_changed_rsn = config_changed(inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
if not changed:
return (changed, not_changed_rsn)
# config changed, thus let's try to commit
try_commit = commit()
if not try_commit.get('result'):
result = False
comment = 'Unable to commit the changes: {reason}.\n\
Will try to rollback now!'.format(
reason=try_commit.get('comment')
)
try_rollback = rollback()
if not try_rollback.get('result'):
comment += '\nCannot rollback! {reason}'.format(
reason=try_rollback.get('comment')
)
return result, comment | [
"def",
"config_control",
"(",
"inherit_napalm_device",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=unused-argument",
"result",
"=",
"True",
"comment",
"=",
"''",
"changed",
",",
"not_changed_rsn",
"=",
"config_changed",
"(",
"inherit_napalm_device",
"=",
"napalm_device",
")",
"# pylint: disable=undefined-variable",
"if",
"not",
"changed",
":",
"return",
"(",
"changed",
",",
"not_changed_rsn",
")",
"# config changed, thus let's try to commit",
"try_commit",
"=",
"commit",
"(",
")",
"if",
"not",
"try_commit",
".",
"get",
"(",
"'result'",
")",
":",
"result",
"=",
"False",
"comment",
"=",
"'Unable to commit the changes: {reason}.\\n\\\n Will try to rollback now!'",
".",
"format",
"(",
"reason",
"=",
"try_commit",
".",
"get",
"(",
"'comment'",
")",
")",
"try_rollback",
"=",
"rollback",
"(",
")",
"if",
"not",
"try_rollback",
".",
"get",
"(",
"'result'",
")",
":",
"comment",
"+=",
"'\\nCannot rollback! {reason}'",
".",
"format",
"(",
"reason",
"=",
"try_rollback",
".",
"get",
"(",
"'comment'",
")",
")",
"return",
"result",
",",
"comment"
]
| 32.564103 | 26.153846 |
def library(repo):
"""Load packages from slpkg library and from local
"""
pkg_list, packages = [], ""
if repo == "sbo":
if (os.path.isfile(
_meta_.lib_path + "{0}_repo/SLACKBUILDS.TXT".format(repo))):
packages = Utils().read_file(_meta_.lib_path + "{0}_repo/"
"SLACKBUILDS.TXT".format(repo))
else:
if (os.path.isfile(
_meta_.lib_path + "{0}_repo/PACKAGES.TXT".format(repo))):
packages = Utils().read_file(_meta_.lib_path + "{0}_repo/"
"PACKAGES.TXT".format(repo))
for line in packages.splitlines():
if repo == "sbo":
if line.startswith("SLACKBUILD NAME: "):
pkg_list.append(line[17:].strip())
elif "local" not in repo:
if line.startswith("PACKAGE NAME: "):
pkg_list.append(line[15:].strip())
if repo == "local":
pkg_list = find_package("", _meta_.pkg_path)
return pkg_list | [
"def",
"library",
"(",
"repo",
")",
":",
"pkg_list",
",",
"packages",
"=",
"[",
"]",
",",
"\"\"",
"if",
"repo",
"==",
"\"sbo\"",
":",
"if",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"_meta_",
".",
"lib_path",
"+",
"\"{0}_repo/SLACKBUILDS.TXT\"",
".",
"format",
"(",
"repo",
")",
")",
")",
":",
"packages",
"=",
"Utils",
"(",
")",
".",
"read_file",
"(",
"_meta_",
".",
"lib_path",
"+",
"\"{0}_repo/\"",
"\"SLACKBUILDS.TXT\"",
".",
"format",
"(",
"repo",
")",
")",
"else",
":",
"if",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"_meta_",
".",
"lib_path",
"+",
"\"{0}_repo/PACKAGES.TXT\"",
".",
"format",
"(",
"repo",
")",
")",
")",
":",
"packages",
"=",
"Utils",
"(",
")",
".",
"read_file",
"(",
"_meta_",
".",
"lib_path",
"+",
"\"{0}_repo/\"",
"\"PACKAGES.TXT\"",
".",
"format",
"(",
"repo",
")",
")",
"for",
"line",
"in",
"packages",
".",
"splitlines",
"(",
")",
":",
"if",
"repo",
"==",
"\"sbo\"",
":",
"if",
"line",
".",
"startswith",
"(",
"\"SLACKBUILD NAME: \"",
")",
":",
"pkg_list",
".",
"append",
"(",
"line",
"[",
"17",
":",
"]",
".",
"strip",
"(",
")",
")",
"elif",
"\"local\"",
"not",
"in",
"repo",
":",
"if",
"line",
".",
"startswith",
"(",
"\"PACKAGE NAME: \"",
")",
":",
"pkg_list",
".",
"append",
"(",
"line",
"[",
"15",
":",
"]",
".",
"strip",
"(",
")",
")",
"if",
"repo",
"==",
"\"local\"",
":",
"pkg_list",
"=",
"find_package",
"(",
"\"\"",
",",
"_meta_",
".",
"pkg_path",
")",
"return",
"pkg_list"
]
| 42.291667 | 17.166667 |
def deltafmt(delta, decimals = None):
"""
Returns a human readable representation of a time with the format:
[[[Ih]Jm]K[.L]s
For example: 6h5m23s
If "decimals" is specified, the seconds will be output with that many decimal places.
If not, there will be two places for times less than 1 minute, one place for times
less than 10 minutes, and zero places otherwise
"""
try:
delta = float(delta)
except:
return '(bad delta: %s)' % (str(delta),)
if delta < 60:
if decimals is None:
decimals = 2
return ("{0:."+str(decimals)+"f}s").format(delta)
mins = int(delta/60)
secs = delta - mins*60
if delta < 600:
if decimals is None:
decimals = 1
return ("{0:d}m{1:."+str(decimals)+"f}s").format(mins, secs)
if decimals is None:
decimals = 0
hours = int(mins/60)
mins -= hours*60
if delta < 3600:
return "{0:d}m{1:.0f}s".format(mins, secs)
else:
return ("{0:d}h{1:d}m{2:."+str(decimals)+"f}s").format(hours, mins, secs) | [
"def",
"deltafmt",
"(",
"delta",
",",
"decimals",
"=",
"None",
")",
":",
"try",
":",
"delta",
"=",
"float",
"(",
"delta",
")",
"except",
":",
"return",
"'(bad delta: %s)'",
"%",
"(",
"str",
"(",
"delta",
")",
",",
")",
"if",
"delta",
"<",
"60",
":",
"if",
"decimals",
"is",
"None",
":",
"decimals",
"=",
"2",
"return",
"(",
"\"{0:.\"",
"+",
"str",
"(",
"decimals",
")",
"+",
"\"f}s\"",
")",
".",
"format",
"(",
"delta",
")",
"mins",
"=",
"int",
"(",
"delta",
"/",
"60",
")",
"secs",
"=",
"delta",
"-",
"mins",
"*",
"60",
"if",
"delta",
"<",
"600",
":",
"if",
"decimals",
"is",
"None",
":",
"decimals",
"=",
"1",
"return",
"(",
"\"{0:d}m{1:.\"",
"+",
"str",
"(",
"decimals",
")",
"+",
"\"f}s\"",
")",
".",
"format",
"(",
"mins",
",",
"secs",
")",
"if",
"decimals",
"is",
"None",
":",
"decimals",
"=",
"0",
"hours",
"=",
"int",
"(",
"mins",
"/",
"60",
")",
"mins",
"-=",
"hours",
"*",
"60",
"if",
"delta",
"<",
"3600",
":",
"return",
"\"{0:d}m{1:.0f}s\"",
".",
"format",
"(",
"mins",
",",
"secs",
")",
"else",
":",
"return",
"(",
"\"{0:d}h{1:d}m{2:.\"",
"+",
"str",
"(",
"decimals",
")",
"+",
"\"f}s\"",
")",
".",
"format",
"(",
"hours",
",",
"mins",
",",
"secs",
")"
]
| 30.147059 | 20.970588 |
def transfer_session_cookies_to_driver(self, domain=None):
"""Copies the Session's cookies into the webdriver
Using the 'domain' parameter we choose the cookies we wish to transfer, we only
transfer the cookies which belong to that domain. The domain defaults to our last visited
site if not provided.
"""
if not domain and self._last_requests_url:
domain = tldextract.extract(self._last_requests_url).registered_domain
elif not domain and not self._last_requests_url:
raise Exception('Trying to transfer cookies to selenium without specifying a domain '
'and without having visited any page in the current session')
# Transfer cookies
for c in [c for c in self.cookies if domain in c.domain]:
self.driver.ensure_add_cookie({'name': c.name, 'value': c.value, 'path': c.path,
'expiry': c.expires, 'domain': c.domain}) | [
"def",
"transfer_session_cookies_to_driver",
"(",
"self",
",",
"domain",
"=",
"None",
")",
":",
"if",
"not",
"domain",
"and",
"self",
".",
"_last_requests_url",
":",
"domain",
"=",
"tldextract",
".",
"extract",
"(",
"self",
".",
"_last_requests_url",
")",
".",
"registered_domain",
"elif",
"not",
"domain",
"and",
"not",
"self",
".",
"_last_requests_url",
":",
"raise",
"Exception",
"(",
"'Trying to transfer cookies to selenium without specifying a domain '",
"'and without having visited any page in the current session'",
")",
"# Transfer cookies",
"for",
"c",
"in",
"[",
"c",
"for",
"c",
"in",
"self",
".",
"cookies",
"if",
"domain",
"in",
"c",
".",
"domain",
"]",
":",
"self",
".",
"driver",
".",
"ensure_add_cookie",
"(",
"{",
"'name'",
":",
"c",
".",
"name",
",",
"'value'",
":",
"c",
".",
"value",
",",
"'path'",
":",
"c",
".",
"path",
",",
"'expiry'",
":",
"c",
".",
"expires",
",",
"'domain'",
":",
"c",
".",
"domain",
"}",
")"
]
| 57.705882 | 30.705882 |
def update_stats_history(self):
"""Update stats history."""
# If the plugin data is a dict, the dict's key should be used
if self.get_key() is None:
item_name = ''
else:
item_name = self.get_key()
# Build the history
if self.get_export() and self.history_enable():
for i in self.get_items_history_list():
if isinstance(self.get_export(), list):
# Stats is a list of data
# Iter throught it (for exemple, iter throught network
# interface)
for l in self.get_export():
self.stats_history.add(
nativestr(l[item_name]) + '_' + nativestr(i['name']),
l[i['name']],
description=i['description'],
history_max_size=self._limits['history_size'])
else:
# Stats is not a list
# Add the item to the history directly
self.stats_history.add(nativestr(i['name']),
self.get_export()[i['name']],
description=i['description'],
history_max_size=self._limits['history_size']) | [
"def",
"update_stats_history",
"(",
"self",
")",
":",
"# If the plugin data is a dict, the dict's key should be used",
"if",
"self",
".",
"get_key",
"(",
")",
"is",
"None",
":",
"item_name",
"=",
"''",
"else",
":",
"item_name",
"=",
"self",
".",
"get_key",
"(",
")",
"# Build the history",
"if",
"self",
".",
"get_export",
"(",
")",
"and",
"self",
".",
"history_enable",
"(",
")",
":",
"for",
"i",
"in",
"self",
".",
"get_items_history_list",
"(",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"get_export",
"(",
")",
",",
"list",
")",
":",
"# Stats is a list of data",
"# Iter throught it (for exemple, iter throught network",
"# interface)",
"for",
"l",
"in",
"self",
".",
"get_export",
"(",
")",
":",
"self",
".",
"stats_history",
".",
"add",
"(",
"nativestr",
"(",
"l",
"[",
"item_name",
"]",
")",
"+",
"'_'",
"+",
"nativestr",
"(",
"i",
"[",
"'name'",
"]",
")",
",",
"l",
"[",
"i",
"[",
"'name'",
"]",
"]",
",",
"description",
"=",
"i",
"[",
"'description'",
"]",
",",
"history_max_size",
"=",
"self",
".",
"_limits",
"[",
"'history_size'",
"]",
")",
"else",
":",
"# Stats is not a list",
"# Add the item to the history directly",
"self",
".",
"stats_history",
".",
"add",
"(",
"nativestr",
"(",
"i",
"[",
"'name'",
"]",
")",
",",
"self",
".",
"get_export",
"(",
")",
"[",
"i",
"[",
"'name'",
"]",
"]",
",",
"description",
"=",
"i",
"[",
"'description'",
"]",
",",
"history_max_size",
"=",
"self",
".",
"_limits",
"[",
"'history_size'",
"]",
")"
]
| 49.962963 | 17.407407 |
def index():
""" Display productpage with normal user and test user buttons"""
global productpage
table = json2html.convert(json = json.dumps(productpage),
table_attributes="class=\"table table-condensed table-bordered table-hover\"")
return render_template('index.html', serviceTable=table) | [
"def",
"index",
"(",
")",
":",
"global",
"productpage",
"table",
"=",
"json2html",
".",
"convert",
"(",
"json",
"=",
"json",
".",
"dumps",
"(",
"productpage",
")",
",",
"table_attributes",
"=",
"\"class=\\\"table table-condensed table-bordered table-hover\\\"\"",
")",
"return",
"render_template",
"(",
"'index.html'",
",",
"serviceTable",
"=",
"table",
")"
]
| 41.5 | 29.375 |
def CompileReport(self, mediator):
"""Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: analysis report.
"""
report_text = [
'Sessionize plugin identified {0:d} sessions and '
'applied {1:d} tags.'.format(
len(self._events_per_session), self._number_of_event_tags)]
for session, event_count in enumerate(self._events_per_session):
report_text.append('\tSession {0:d}: {1:d} events'.format(
session, event_count))
report_text = '\n'.join(report_text)
return reports.AnalysisReport(plugin_name=self.NAME, text=report_text) | [
"def",
"CompileReport",
"(",
"self",
",",
"mediator",
")",
":",
"report_text",
"=",
"[",
"'Sessionize plugin identified {0:d} sessions and '",
"'applied {1:d} tags.'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"_events_per_session",
")",
",",
"self",
".",
"_number_of_event_tags",
")",
"]",
"for",
"session",
",",
"event_count",
"in",
"enumerate",
"(",
"self",
".",
"_events_per_session",
")",
":",
"report_text",
".",
"append",
"(",
"'\\tSession {0:d}: {1:d} events'",
".",
"format",
"(",
"session",
",",
"event_count",
")",
")",
"report_text",
"=",
"'\\n'",
".",
"join",
"(",
"report_text",
")",
"return",
"reports",
".",
"AnalysisReport",
"(",
"plugin_name",
"=",
"self",
".",
"NAME",
",",
"text",
"=",
"report_text",
")"
]
| 38.789474 | 19.631579 |
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value | [
"def",
"current_iteration",
"(",
"self",
")",
":",
"out_cur_iter",
"=",
"ctypes",
".",
"c_int",
"(",
"0",
")",
"_safe_call",
"(",
"_LIB",
".",
"LGBM_BoosterGetCurrentIteration",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"out_cur_iter",
")",
")",
")",
"return",
"out_cur_iter",
".",
"value"
]
| 29.153846 | 12.923077 |
def write_h5ad(
self,
filename: Optional[PathLike] = None,
compression: Optional[str] = None,
compression_opts: Union[int, Any] = None,
force_dense: Optional[bool] = None
):
"""Write ``.h5ad``-formatted hdf5 file.
.. note::
Setting compression to ``'gzip'`` can save disk space but
will slow down writing and subsequent reading. Prior to
v0.6.16, this was the default for parameter
``compression``.
Generally, if you have sparse data that are stored as a dense
matrix, you can dramatically improve performance and reduce
disk space by converting to a :class:`~scipy.sparse.csr_matrix`::
from scipy.sparse import csr_matrix
adata.X = csr_matrix(adata.X)
Parameters
----------
filename
Filename of data file. Defaults to backing file.
compression : ``None``, {``'gzip'``, ``'lzf'``} (default: ``None``)
See the h5py :ref:`dataset_compression`.
compression_opts
See the h5py :ref:`dataset_compression`.
force_dense
Write sparse data as a dense matrix. Defaults to ``True`` if object is
backed, otherwise to ``False``.
"""
from .readwrite.write import _write_h5ad
if filename is None and not self.isbacked:
raise ValueError('Provide a filename!')
if filename is None:
filename = self.filename
if force_dense is None:
force_dense = self.isbacked
_write_h5ad(filename, self, compression=compression,
compression_opts=compression_opts, force_dense=force_dense)
if self.isbacked:
self.file.close() | [
"def",
"write_h5ad",
"(",
"self",
",",
"filename",
":",
"Optional",
"[",
"PathLike",
"]",
"=",
"None",
",",
"compression",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"compression_opts",
":",
"Union",
"[",
"int",
",",
"Any",
"]",
"=",
"None",
",",
"force_dense",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
")",
":",
"from",
".",
"readwrite",
".",
"write",
"import",
"_write_h5ad",
"if",
"filename",
"is",
"None",
"and",
"not",
"self",
".",
"isbacked",
":",
"raise",
"ValueError",
"(",
"'Provide a filename!'",
")",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"self",
".",
"filename",
"if",
"force_dense",
"is",
"None",
":",
"force_dense",
"=",
"self",
".",
"isbacked",
"_write_h5ad",
"(",
"filename",
",",
"self",
",",
"compression",
"=",
"compression",
",",
"compression_opts",
"=",
"compression_opts",
",",
"force_dense",
"=",
"force_dense",
")",
"if",
"self",
".",
"isbacked",
":",
"self",
".",
"file",
".",
"close",
"(",
")"
]
| 35.469388 | 20.326531 |
def create_new_account(data_dir, password, **geth_kwargs):
"""Creates a new Ethereum account on geth.
This is useful for testing when you want to stress
interaction (transfers) between Ethereum accounts.
This command communicates with ``geth`` command over
terminal interaction. It creates keystore folder and new
account there.
This function only works against offline geth processes,
because geth builds an account cache when starting up.
If geth process is already running you can create new
accounts using
`web3.personal.newAccount()
<https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console#personalnewaccount>_`
RPC API.
Example py.test fixture for tests:
.. code-block:: python
import os
from geth.wrapper import DEFAULT_PASSWORD_PATH
from geth.accounts import create_new_account
@pytest.fixture
def target_account() -> str:
'''Create a new Ethereum account on a running Geth node.
The account can be used as a withdrawal target for tests.
:return: 0x address of the account
'''
# We store keystore files in the current working directory
# of the test run
data_dir = os.getcwd()
# Use the default password "this-is-not-a-secure-password"
# as supplied in geth/default_blockchain_password file.
# The supplied password must be bytes, not string,
# as we only want ASCII characters and do not want to
# deal encoding problems with passwords
account = create_new_account(data_dir, DEFAULT_PASSWORD_PATH)
return account
:param data_dir: Geth data fir path - where to keep "keystore" folder
:param password: Path to a file containing the password
for newly created account
:param geth_kwargs: Extra command line arguments passwrord to geth
:return: Account as 0x prefixed hex string
"""
if os.path.exists(password):
geth_kwargs['password'] = password
command, proc = spawn_geth(dict(
data_dir=data_dir,
suffix_args=['account', 'new'],
**geth_kwargs
))
if os.path.exists(password):
stdoutdata, stderrdata = proc.communicate()
else:
stdoutdata, stderrdata = proc.communicate(b"\n".join((password, password)))
if proc.returncode:
raise ValueError(format_error_message(
"Error trying to create a new account",
command,
proc.returncode,
stdoutdata,
stderrdata,
))
match = account_regex.search(stdoutdata)
if not match:
raise ValueError(format_error_message(
"Did not find an address in process output",
command,
proc.returncode,
stdoutdata,
stderrdata,
))
return b'0x' + match.groups()[0] | [
"def",
"create_new_account",
"(",
"data_dir",
",",
"password",
",",
"*",
"*",
"geth_kwargs",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"password",
")",
":",
"geth_kwargs",
"[",
"'password'",
"]",
"=",
"password",
"command",
",",
"proc",
"=",
"spawn_geth",
"(",
"dict",
"(",
"data_dir",
"=",
"data_dir",
",",
"suffix_args",
"=",
"[",
"'account'",
",",
"'new'",
"]",
",",
"*",
"*",
"geth_kwargs",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"password",
")",
":",
"stdoutdata",
",",
"stderrdata",
"=",
"proc",
".",
"communicate",
"(",
")",
"else",
":",
"stdoutdata",
",",
"stderrdata",
"=",
"proc",
".",
"communicate",
"(",
"b\"\\n\"",
".",
"join",
"(",
"(",
"password",
",",
"password",
")",
")",
")",
"if",
"proc",
".",
"returncode",
":",
"raise",
"ValueError",
"(",
"format_error_message",
"(",
"\"Error trying to create a new account\"",
",",
"command",
",",
"proc",
".",
"returncode",
",",
"stdoutdata",
",",
"stderrdata",
",",
")",
")",
"match",
"=",
"account_regex",
".",
"search",
"(",
"stdoutdata",
")",
"if",
"not",
"match",
":",
"raise",
"ValueError",
"(",
"format_error_message",
"(",
"\"Did not find an address in process output\"",
",",
"command",
",",
"proc",
".",
"returncode",
",",
"stdoutdata",
",",
"stderrdata",
",",
")",
")",
"return",
"b'0x'",
"+",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]"
]
| 31.722222 | 22.222222 |
def owner_search_fields(self):
"""
Returns all the fields that are CharFields except for password from the
User model. For the built-in User model, that means username,
first_name, last_name, and email.
"""
try:
from django.contrib.auth import get_user_model
except ImportError: # Django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
return [
field.name for field in User._meta.fields
if isinstance(field, models.CharField) and field.name != 'password'
] | [
"def",
"owner_search_fields",
"(",
"self",
")",
":",
"try",
":",
"from",
"django",
".",
"contrib",
".",
"auth",
"import",
"get_user_model",
"except",
"ImportError",
":",
"# Django < 1.5",
"from",
"django",
".",
"contrib",
".",
"auth",
".",
"models",
"import",
"User",
"else",
":",
"User",
"=",
"get_user_model",
"(",
")",
"return",
"[",
"field",
".",
"name",
"for",
"field",
"in",
"User",
".",
"_meta",
".",
"fields",
"if",
"isinstance",
"(",
"field",
",",
"models",
".",
"CharField",
")",
"and",
"field",
".",
"name",
"!=",
"'password'",
"]"
]
| 38.4375 | 17.6875 |
def filter(self, scored_list):
'''
Filtering with top-n ranking.
Args:
scored_list: The list of scoring.
Retruns:
The list of filtered result.
'''
top_n_key = -1 * self.top_n
top_n_list = sorted(scored_list, key=lambda x: x[1])[top_n_key:]
result_list = sorted(top_n_list, key=lambda x: x[0])
return result_list | [
"def",
"filter",
"(",
"self",
",",
"scored_list",
")",
":",
"top_n_key",
"=",
"-",
"1",
"*",
"self",
".",
"top_n",
"top_n_list",
"=",
"sorted",
"(",
"scored_list",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
"[",
"top_n_key",
":",
"]",
"result_list",
"=",
"sorted",
"(",
"top_n_list",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"return",
"result_list"
]
| 26.6 | 21.4 |
def state_id_generator(size=STATE_ID_LENGTH, chars=string.ascii_uppercase, used_state_ids=None):
""" Create a new and unique state id
Generates an id for a state. It randomly samples from random ascii uppercase letters size times
and concatenates them. If the id already exists it draws a new one.
:param size: the length of the generated keys
:param chars: the set of characters a sample draws from
:param list used_state_ids: Handed list of ids already in use
:rtype: str
:return: new_state_id
"""
new_state_id = ''.join(random.choice(chars) for x in range(size))
while used_state_ids is not None and new_state_id in used_state_ids:
new_state_id = ''.join(random.choice(chars) for x in range(size))
return new_state_id | [
"def",
"state_id_generator",
"(",
"size",
"=",
"STATE_ID_LENGTH",
",",
"chars",
"=",
"string",
".",
"ascii_uppercase",
",",
"used_state_ids",
"=",
"None",
")",
":",
"new_state_id",
"=",
"''",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"chars",
")",
"for",
"x",
"in",
"range",
"(",
"size",
")",
")",
"while",
"used_state_ids",
"is",
"not",
"None",
"and",
"new_state_id",
"in",
"used_state_ids",
":",
"new_state_id",
"=",
"''",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"chars",
")",
"for",
"x",
"in",
"range",
"(",
"size",
")",
")",
"return",
"new_state_id"
]
| 47.6875 | 26.875 |
def build_attrs(self, *args, **kwargs):
"""Add select2's tag attributes."""
self.attrs.setdefault('data-minimum-input-length', 1)
self.attrs.setdefault('data-tags', 'true')
self.attrs.setdefault('data-token-separators', '[",", " "]')
return super(Select2TagMixin, self).build_attrs(*args, **kwargs) | [
"def",
"build_attrs",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"attrs",
".",
"setdefault",
"(",
"'data-minimum-input-length'",
",",
"1",
")",
"self",
".",
"attrs",
".",
"setdefault",
"(",
"'data-tags'",
",",
"'true'",
")",
"self",
".",
"attrs",
".",
"setdefault",
"(",
"'data-token-separators'",
",",
"'[\",\", \" \"]'",
")",
"return",
"super",
"(",
"Select2TagMixin",
",",
"self",
")",
".",
"build_attrs",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| 55.5 | 15.333333 |
def multi_packages(self, logins=None, platform=None, package_type=None,
type_=None, access=None):
"""Return the private packages for a given set of usernames/logins."""
logger.debug('')
method = self._multi_packages
new_client = True
try:
# Only the newer versions have extra keywords like `access`
self._anaconda_client_api.user_packages(access='private')
except Exception:
new_client = False
return self._create_worker(method, logins=logins,
platform=platform,
package_type=package_type,
type_=type_, access=access,
new_client=new_client) | [
"def",
"multi_packages",
"(",
"self",
",",
"logins",
"=",
"None",
",",
"platform",
"=",
"None",
",",
"package_type",
"=",
"None",
",",
"type_",
"=",
"None",
",",
"access",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"''",
")",
"method",
"=",
"self",
".",
"_multi_packages",
"new_client",
"=",
"True",
"try",
":",
"# Only the newer versions have extra keywords like `access`",
"self",
".",
"_anaconda_client_api",
".",
"user_packages",
"(",
"access",
"=",
"'private'",
")",
"except",
"Exception",
":",
"new_client",
"=",
"False",
"return",
"self",
".",
"_create_worker",
"(",
"method",
",",
"logins",
"=",
"logins",
",",
"platform",
"=",
"platform",
",",
"package_type",
"=",
"package_type",
",",
"type_",
"=",
"type_",
",",
"access",
"=",
"access",
",",
"new_client",
"=",
"new_client",
")"
]
| 43.333333 | 19.777778 |
async def get_eventhub_info_async(self):
"""
Get details on the specified EventHub async.
:rtype: dict
"""
alt_creds = {
"username": self._auth_config.get("iot_username"),
"password":self._auth_config.get("iot_password")}
try:
mgmt_auth = self._create_auth(**alt_creds)
mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.debug)
await mgmt_client.open_async()
mgmt_msg = Message(application_properties={'name': self.eh_name})
response = await mgmt_client.mgmt_request_async(
mgmt_msg,
constants.READ_OPERATION,
op_type=b'com.microsoft:eventhub',
status_code_field=b'status-code',
description_fields=b'status-description')
eh_info = response.get_data()
output = {}
if eh_info:
output['name'] = eh_info[b'name'].decode('utf-8')
output['type'] = eh_info[b'type'].decode('utf-8')
output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000)
output['partition_count'] = eh_info[b'partition_count']
output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']]
return output
finally:
await mgmt_client.close_async() | [
"async",
"def",
"get_eventhub_info_async",
"(",
"self",
")",
":",
"alt_creds",
"=",
"{",
"\"username\"",
":",
"self",
".",
"_auth_config",
".",
"get",
"(",
"\"iot_username\"",
")",
",",
"\"password\"",
":",
"self",
".",
"_auth_config",
".",
"get",
"(",
"\"iot_password\"",
")",
"}",
"try",
":",
"mgmt_auth",
"=",
"self",
".",
"_create_auth",
"(",
"*",
"*",
"alt_creds",
")",
"mgmt_client",
"=",
"AMQPClientAsync",
"(",
"self",
".",
"mgmt_target",
",",
"auth",
"=",
"mgmt_auth",
",",
"debug",
"=",
"self",
".",
"debug",
")",
"await",
"mgmt_client",
".",
"open_async",
"(",
")",
"mgmt_msg",
"=",
"Message",
"(",
"application_properties",
"=",
"{",
"'name'",
":",
"self",
".",
"eh_name",
"}",
")",
"response",
"=",
"await",
"mgmt_client",
".",
"mgmt_request_async",
"(",
"mgmt_msg",
",",
"constants",
".",
"READ_OPERATION",
",",
"op_type",
"=",
"b'com.microsoft:eventhub'",
",",
"status_code_field",
"=",
"b'status-code'",
",",
"description_fields",
"=",
"b'status-description'",
")",
"eh_info",
"=",
"response",
".",
"get_data",
"(",
")",
"output",
"=",
"{",
"}",
"if",
"eh_info",
":",
"output",
"[",
"'name'",
"]",
"=",
"eh_info",
"[",
"b'name'",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"output",
"[",
"'type'",
"]",
"=",
"eh_info",
"[",
"b'type'",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"output",
"[",
"'created_at'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"eh_info",
"[",
"b'created_at'",
"]",
")",
"/",
"1000",
")",
"output",
"[",
"'partition_count'",
"]",
"=",
"eh_info",
"[",
"b'partition_count'",
"]",
"output",
"[",
"'partition_ids'",
"]",
"=",
"[",
"p",
".",
"decode",
"(",
"'utf-8'",
")",
"for",
"p",
"in",
"eh_info",
"[",
"b'partition_ids'",
"]",
"]",
"return",
"output",
"finally",
":",
"await",
"mgmt_client",
".",
"close_async",
"(",
")"
]
| 45.548387 | 20 |
def camelcase_to_lowercase(camelcase_input, python_input=None):
'''
a function to recursively convert data with camelcase key names into lowercase keys
:param camelcase_input: list or dictionary with camelcase keys
:param python_input: [optional] list or dictionary with default lowercase keys in output
:return: dictionary with lowercase key names
'''
if python_input:
if camelcase_input.__class__ != python_input.__class__:
raise ValueError('python_input type %s does not match camelcase_input type %s' % (python_input.__class__, camelcase_input.__class__))
if isinstance(camelcase_input, dict):
return _to_python_dict(camelcase_input, python_input)
elif isinstance(camelcase_input, list):
return _ingest_list(camelcase_input, _to_python_dict, python_input)
else:
return camelcase_input | [
"def",
"camelcase_to_lowercase",
"(",
"camelcase_input",
",",
"python_input",
"=",
"None",
")",
":",
"if",
"python_input",
":",
"if",
"camelcase_input",
".",
"__class__",
"!=",
"python_input",
".",
"__class__",
":",
"raise",
"ValueError",
"(",
"'python_input type %s does not match camelcase_input type %s'",
"%",
"(",
"python_input",
".",
"__class__",
",",
"camelcase_input",
".",
"__class__",
")",
")",
"if",
"isinstance",
"(",
"camelcase_input",
",",
"dict",
")",
":",
"return",
"_to_python_dict",
"(",
"camelcase_input",
",",
"python_input",
")",
"elif",
"isinstance",
"(",
"camelcase_input",
",",
"list",
")",
":",
"return",
"_ingest_list",
"(",
"camelcase_input",
",",
"_to_python_dict",
",",
"python_input",
")",
"else",
":",
"return",
"camelcase_input"
]
| 46.263158 | 30.578947 |
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c | [
"def",
"cluster",
"(",
"self",
",",
"algorithm",
"=",
"None",
")",
":",
"import",
"sklearn",
".",
"base",
"if",
"algorithm",
"is",
"None",
":",
"import",
"sklearn",
".",
"cluster",
"algorithm",
"=",
"sklearn",
".",
"cluster",
".",
"KMeans",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"algorithm",
",",
"sklearn",
".",
"base",
".",
"ClusterMixin",
")",
":",
"raise",
"Exception",
"(",
"\"Cannot use algorithm of type \"",
"+",
"type",
"(",
"algorithm",
")",
")",
"original_shape",
"=",
"self",
".",
"codebook",
".",
"shape",
"self",
".",
"codebook",
".",
"shape",
"=",
"(",
"self",
".",
"_n_columns",
"*",
"self",
".",
"_n_rows",
",",
"self",
".",
"n_dim",
")",
"linear_clusters",
"=",
"algorithm",
".",
"fit_predict",
"(",
"self",
".",
"codebook",
")",
"self",
".",
"codebook",
".",
"shape",
"=",
"original_shape",
"self",
".",
"clusters",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"_n_rows",
",",
"self",
".",
"_n_columns",
")",
",",
"dtype",
"=",
"int",
")",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"linear_clusters",
")",
":",
"self",
".",
"clusters",
"[",
"i",
"//",
"self",
".",
"_n_columns",
",",
"i",
"%",
"self",
".",
"_n_columns",
"]",
"=",
"c"
]
| 51.115385 | 19.846154 |
def move_into(self, destination_folder):
# type: (Folder) -> None
"""Move the Folder into a different folder.
This makes the Folder provided a child folder of the destination_folder.
Raises:
AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
Args:
destination_folder: A :class:`Folder <pyOutlook.core.folder.Folder>` that should become the parent
Returns:
A new :class:`Folder <pyOutlook.core.folder.Folder>` that is now
inside of the destination_folder.
"""
headers = self.headers
endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id + '/move'
payload = '{ "DestinationId": "' + destination_folder.id + '"}'
r = requests.post(endpoint, headers=headers, data=payload)
if check_response(r):
return_folder = r.json()
return self._json_to_folder(self.account, return_folder) | [
"def",
"move_into",
"(",
"self",
",",
"destination_folder",
")",
":",
"# type: (Folder) -> None",
"headers",
"=",
"self",
".",
"headers",
"endpoint",
"=",
"'https://outlook.office.com/api/v2.0/me/MailFolders/'",
"+",
"self",
".",
"id",
"+",
"'/move'",
"payload",
"=",
"'{ \"DestinationId\": \"'",
"+",
"destination_folder",
".",
"id",
"+",
"'\"}'",
"r",
"=",
"requests",
".",
"post",
"(",
"endpoint",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"payload",
")",
"if",
"check_response",
"(",
"r",
")",
":",
"return_folder",
"=",
"r",
".",
"json",
"(",
")",
"return",
"self",
".",
"_json_to_folder",
"(",
"self",
".",
"account",
",",
"return_folder",
")"
]
| 38.153846 | 28.769231 |
def list(payment):
"""
List all the refunds for a payment.
:param payment: The payment object or the payment id
:type payment: resources.Payment|string
:return: A collection of refunds
:rtype resources.APIResourceCollection
"""
if isinstance(payment, resources.Payment):
payment = payment.id
http_client = HttpClient()
response, _ = http_client.get(routes.url(routes.REFUND_RESOURCE, payment_id=payment))
return resources.APIResourceCollection(resources.Refund, **response) | [
"def",
"list",
"(",
"payment",
")",
":",
"if",
"isinstance",
"(",
"payment",
",",
"resources",
".",
"Payment",
")",
":",
"payment",
"=",
"payment",
".",
"id",
"http_client",
"=",
"HttpClient",
"(",
")",
"response",
",",
"_",
"=",
"http_client",
".",
"get",
"(",
"routes",
".",
"url",
"(",
"routes",
".",
"REFUND_RESOURCE",
",",
"payment_id",
"=",
"payment",
")",
")",
"return",
"resources",
".",
"APIResourceCollection",
"(",
"resources",
".",
"Refund",
",",
"*",
"*",
"response",
")"
]
| 35.0625 | 18.1875 |
def _ep_need_close(self):
"""Peer has closed its end of the session."""
LOG.debug("Session %s close requested - closing...",
self._name)
links = self._links.copy() # may modify _links
for link in links:
link._session_closed() | [
"def",
"_ep_need_close",
"(",
"self",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Session %s close requested - closing...\"",
",",
"self",
".",
"_name",
")",
"links",
"=",
"self",
".",
"_links",
".",
"copy",
"(",
")",
"# may modify _links",
"for",
"link",
"in",
"links",
":",
"link",
".",
"_session_closed",
"(",
")"
]
| 40.285714 | 11.571429 |
def save(self, content):
"""
Save any given content to the instance file.
:param content: (str or bytes)
:return: (None)
"""
# backup existing file if needed
if os.path.exists(self.file_path) and not self.assume_yes:
message = "Overwrite existing {}? (y/n) "
if not confirm(message.format(self.filename)):
self.backup()
# write file
self.output("Saving " + self.filename)
with open(self.file_path, "wb") as handler:
if not isinstance(content, bytes):
content = bytes(content, "utf-8")
handler.write(content)
self.yeah("Done!") | [
"def",
"save",
"(",
"self",
",",
"content",
")",
":",
"# backup existing file if needed",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"file_path",
")",
"and",
"not",
"self",
".",
"assume_yes",
":",
"message",
"=",
"\"Overwrite existing {}? (y/n) \"",
"if",
"not",
"confirm",
"(",
"message",
".",
"format",
"(",
"self",
".",
"filename",
")",
")",
":",
"self",
".",
"backup",
"(",
")",
"# write file",
"self",
".",
"output",
"(",
"\"Saving \"",
"+",
"self",
".",
"filename",
")",
"with",
"open",
"(",
"self",
".",
"file_path",
",",
"\"wb\"",
")",
"as",
"handler",
":",
"if",
"not",
"isinstance",
"(",
"content",
",",
"bytes",
")",
":",
"content",
"=",
"bytes",
"(",
"content",
",",
"\"utf-8\"",
")",
"handler",
".",
"write",
"(",
"content",
")",
"self",
".",
"yeah",
"(",
"\"Done!\"",
")"
]
| 35.631579 | 11.947368 |
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers) | [
"def",
"_outliers",
"(",
"self",
",",
"x",
")",
":",
"outliers",
"=",
"self",
".",
"_tukey",
"(",
"x",
",",
"threshold",
"=",
"1.5",
")",
"return",
"np",
".",
"size",
"(",
"outliers",
")"
]
| 26.666667 | 7 |
def mtf_bitransformer_all_layers_tiny():
"""Test out all the layers on local CPU."""
hparams = mtf_bitransformer_tiny()
hparams.moe_num_experts = 4
hparams.moe_expert_x = 4
hparams.moe_expert_y = 4
hparams.moe_hidden_size = 512
hparams.encoder_layers = [
"self_att", "local_self_att", "moe_1d", "moe_2d", "drd"]
hparams.decoder_layers = [
"self_att", "local_self_att", "enc_att", "moe_1d", "moe_2d", "drd"]
return hparams | [
"def",
"mtf_bitransformer_all_layers_tiny",
"(",
")",
":",
"hparams",
"=",
"mtf_bitransformer_tiny",
"(",
")",
"hparams",
".",
"moe_num_experts",
"=",
"4",
"hparams",
".",
"moe_expert_x",
"=",
"4",
"hparams",
".",
"moe_expert_y",
"=",
"4",
"hparams",
".",
"moe_hidden_size",
"=",
"512",
"hparams",
".",
"encoder_layers",
"=",
"[",
"\"self_att\"",
",",
"\"local_self_att\"",
",",
"\"moe_1d\"",
",",
"\"moe_2d\"",
",",
"\"drd\"",
"]",
"hparams",
".",
"decoder_layers",
"=",
"[",
"\"self_att\"",
",",
"\"local_self_att\"",
",",
"\"enc_att\"",
",",
"\"moe_1d\"",
",",
"\"moe_2d\"",
",",
"\"drd\"",
"]",
"return",
"hparams"
]
| 36.666667 | 12.916667 |
def rvalues(self):
"""
in reversed order
"""
tmp = self
while tmp is not None:
yield tmp.data
tmp = tmp.prev | [
"def",
"rvalues",
"(",
"self",
")",
":",
"tmp",
"=",
"self",
"while",
"tmp",
"is",
"not",
"None",
":",
"yield",
"tmp",
".",
"data",
"tmp",
"=",
"tmp",
".",
"prev"
]
| 20.75 | 12.125 |
def read_cache(self):
"""Reads the cached contents into memory.
"""
if os.path.exists(self._cache_file):
self._cache = _read_cache_file(self._cache_file)
else:
self._cache = {} | [
"def",
"read_cache",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_cache_file",
")",
":",
"self",
".",
"_cache",
"=",
"_read_cache_file",
"(",
"self",
".",
"_cache_file",
")",
"else",
":",
"self",
".",
"_cache",
"=",
"{",
"}"
]
| 32.285714 | 11.714286 |
def reweight(self, weight, edges=None, copy=False):
'''Replaces existing edge weights. weight may be a scalar or 1d array.
edges is a mask or index array that specifies a subset of edges to modify'''
if not self.is_weighted():
warnings.warn('Cannot supply weights for unweighted graph; '
'ignoring call to reweight')
return self
if edges is None:
return self._update_edges(weight, copy=copy)
ii, jj = self.pairs()[edges].T
return self.add_edges(ii, jj, weight=weight, symmetric=False, copy=copy) | [
"def",
"reweight",
"(",
"self",
",",
"weight",
",",
"edges",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"is_weighted",
"(",
")",
":",
"warnings",
".",
"warn",
"(",
"'Cannot supply weights for unweighted graph; '",
"'ignoring call to reweight'",
")",
"return",
"self",
"if",
"edges",
"is",
"None",
":",
"return",
"self",
".",
"_update_edges",
"(",
"weight",
",",
"copy",
"=",
"copy",
")",
"ii",
",",
"jj",
"=",
"self",
".",
"pairs",
"(",
")",
"[",
"edges",
"]",
".",
"T",
"return",
"self",
".",
"add_edges",
"(",
"ii",
",",
"jj",
",",
"weight",
"=",
"weight",
",",
"symmetric",
"=",
"False",
",",
"copy",
"=",
"copy",
")"
]
| 49.727273 | 20.272727 |
def add_library(self, name):
"""Add a library to the database
This method is for adding a library by name (eg: "BuiltIn")
rather than by a file.
"""
libdoc = LibraryDocumentation(name)
if len(libdoc.keywords) > 0:
# FIXME: figure out the path to the library file
collection_id = self.add_collection(None, libdoc.name, libdoc.type,
libdoc.doc, libdoc.version,
libdoc.scope, libdoc.named_args,
libdoc.doc_format)
self._load_keywords(collection_id, libdoc=libdoc) | [
"def",
"add_library",
"(",
"self",
",",
"name",
")",
":",
"libdoc",
"=",
"LibraryDocumentation",
"(",
"name",
")",
"if",
"len",
"(",
"libdoc",
".",
"keywords",
")",
">",
"0",
":",
"# FIXME: figure out the path to the library file",
"collection_id",
"=",
"self",
".",
"add_collection",
"(",
"None",
",",
"libdoc",
".",
"name",
",",
"libdoc",
".",
"type",
",",
"libdoc",
".",
"doc",
",",
"libdoc",
".",
"version",
",",
"libdoc",
".",
"scope",
",",
"libdoc",
".",
"named_args",
",",
"libdoc",
".",
"doc_format",
")",
"self",
".",
"_load_keywords",
"(",
"collection_id",
",",
"libdoc",
"=",
"libdoc",
")"
]
| 48.285714 | 19.785714 |
def intersection(self, other):
""" Calculates the intersection of the two underlying bitarrays and returns
a new bloom filter object."""
if self.capacity != other.capacity or \
self.error_rate != other.error_rate:
raise ValueError("Intersecting filters requires both filters to \
have equal capacity and error rate")
new_bloom = self.copy()
new_bloom.bitarray = new_bloom.bitarray & other.bitarray
return new_bloom | [
"def",
"intersection",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"capacity",
"!=",
"other",
".",
"capacity",
"or",
"self",
".",
"error_rate",
"!=",
"other",
".",
"error_rate",
":",
"raise",
"ValueError",
"(",
"\"Intersecting filters requires both filters to \\\nhave equal capacity and error rate\"",
")",
"new_bloom",
"=",
"self",
".",
"copy",
"(",
")",
"new_bloom",
".",
"bitarray",
"=",
"new_bloom",
".",
"bitarray",
"&",
"other",
".",
"bitarray",
"return",
"new_bloom"
]
| 47.7 | 11.5 |
def get_agents(self, addr=True, agent_cls=None, as_coro=False):
"""Get agents from the managed environment.
This is a managing function for the
:py:meth:`~creamas.environment.Environment.get_agents`. Returned
agent list excludes the environment's manager agent (this agent) by
design.
"""
return self.env.get_agents(addr=addr, agent_cls=agent_cls) | [
"def",
"get_agents",
"(",
"self",
",",
"addr",
"=",
"True",
",",
"agent_cls",
"=",
"None",
",",
"as_coro",
"=",
"False",
")",
":",
"return",
"self",
".",
"env",
".",
"get_agents",
"(",
"addr",
"=",
"addr",
",",
"agent_cls",
"=",
"agent_cls",
")"
]
| 44 | 20.444444 |
def encode_offset_fetch_request(cls, group, payloads, from_kafka=False):
"""
Encode an OffsetFetchRequest struct. The request is encoded using
version 0 if from_kafka is false, indicating a request for Zookeeper
offsets. It is encoded using version 1 otherwise, indicating a request
for Kafka offsets.
Arguments:
group: string, the consumer group you are fetching offsets for
payloads: list of OffsetFetchRequestPayload
from_kafka: bool, default False, set True for Kafka-committed offsets
"""
version = 1 if from_kafka else 0
return kafka.protocol.commit.OffsetFetchRequest[version](
consumer_group=group,
topics=[(
topic,
list(topic_payloads.keys()))
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))]) | [
"def",
"encode_offset_fetch_request",
"(",
"cls",
",",
"group",
",",
"payloads",
",",
"from_kafka",
"=",
"False",
")",
":",
"version",
"=",
"1",
"if",
"from_kafka",
"else",
"0",
"return",
"kafka",
".",
"protocol",
".",
"commit",
".",
"OffsetFetchRequest",
"[",
"version",
"]",
"(",
"consumer_group",
"=",
"group",
",",
"topics",
"=",
"[",
"(",
"topic",
",",
"list",
"(",
"topic_payloads",
".",
"keys",
"(",
")",
")",
")",
"for",
"topic",
",",
"topic_payloads",
"in",
"six",
".",
"iteritems",
"(",
"group_by_topic_and_partition",
"(",
"payloads",
")",
")",
"]",
")"
]
| 47.157895 | 22.842105 |
def _save(self):
""" save a JSON file representation of Tetrad Class for checkpoint"""
## save each attribute as dict
fulldict = copy.deepcopy(self.__dict__)
for i, j in fulldict.items():
if isinstance(j, Params):
fulldict[i] = j.__dict__
fulldumps = json.dumps(fulldict,
sort_keys=False,
indent=4,
separators=(",", ":"),
)
## save to file, make dir if it wasn't made earlier
assemblypath = os.path.join(self.dirs, self.name+".tet.json")
if not os.path.exists(self.dirs):
os.mkdir(self.dirs)
## protect save from interruption
done = 0
while not done:
try:
with open(assemblypath, 'w') as jout:
jout.write(fulldumps)
done = 1
except (KeyboardInterrupt, SystemExit):
print('.')
continue | [
"def",
"_save",
"(",
"self",
")",
":",
"## save each attribute as dict",
"fulldict",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"__dict__",
")",
"for",
"i",
",",
"j",
"in",
"fulldict",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"j",
",",
"Params",
")",
":",
"fulldict",
"[",
"i",
"]",
"=",
"j",
".",
"__dict__",
"fulldumps",
"=",
"json",
".",
"dumps",
"(",
"fulldict",
",",
"sort_keys",
"=",
"False",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
",",
")",
"## save to file, make dir if it wasn't made earlier",
"assemblypath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
",",
"self",
".",
"name",
"+",
"\".tet.json\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"dirs",
")",
":",
"os",
".",
"mkdir",
"(",
"self",
".",
"dirs",
")",
"## protect save from interruption",
"done",
"=",
"0",
"while",
"not",
"done",
":",
"try",
":",
"with",
"open",
"(",
"assemblypath",
",",
"'w'",
")",
"as",
"jout",
":",
"jout",
".",
"write",
"(",
"fulldumps",
")",
"done",
"=",
"1",
"except",
"(",
"KeyboardInterrupt",
",",
"SystemExit",
")",
":",
"print",
"(",
"'.'",
")",
"continue"
]
| 35.37931 | 13.137931 |
def AddKeys(self, key_list):
"""Mark additional columns as being part of the superkey.
Supplements the Keys already extracted from the FSM template.
Useful when adding new columns to existing tables.
Note: This will impact attempts to further 'extend' the table as the
superkey must be common between tables for successful extension.
Args:
key_list: list of header entries to be included in the superkey.
Raises:
KeyError: If any entry in list is not a valid header entry.
"""
for keyname in key_list:
if keyname not in self.header:
raise KeyError("'%s'" % keyname)
self._keys = self._keys.union(set(key_list)) | [
"def",
"AddKeys",
"(",
"self",
",",
"key_list",
")",
":",
"for",
"keyname",
"in",
"key_list",
":",
"if",
"keyname",
"not",
"in",
"self",
".",
"header",
":",
"raise",
"KeyError",
"(",
"\"'%s'\"",
"%",
"keyname",
")",
"self",
".",
"_keys",
"=",
"self",
".",
"_keys",
".",
"union",
"(",
"set",
"(",
"key_list",
")",
")"
]
| 40.470588 | 19.764706 |
def rectwidth(self):
"""Calculate :ref:`pysynphot-formula-rectw`.
Returns
-------
ans : float
Bandpass rectangular width.
"""
mywaveunits = self.waveunits.name
self.convert('angstroms')
wave = self.wave
thru = self.throughput
self.convert(mywaveunits)
num = self.trapezoidIntegration(wave, thru)
den = thru.max()
if 0.0 in (num, den):
return 0.0
else:
return num/den | [
"def",
"rectwidth",
"(",
"self",
")",
":",
"mywaveunits",
"=",
"self",
".",
"waveunits",
".",
"name",
"self",
".",
"convert",
"(",
"'angstroms'",
")",
"wave",
"=",
"self",
".",
"wave",
"thru",
"=",
"self",
".",
"throughput",
"self",
".",
"convert",
"(",
"mywaveunits",
")",
"num",
"=",
"self",
".",
"trapezoidIntegration",
"(",
"wave",
",",
"thru",
")",
"den",
"=",
"thru",
".",
"max",
"(",
")",
"if",
"0.0",
"in",
"(",
"num",
",",
"den",
")",
":",
"return",
"0.0",
"else",
":",
"return",
"num",
"/",
"den"
]
| 21.608696 | 18.695652 |
def _send(key, value, metric_type):
"""Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str value: The properly formatted statsd counter value
"""
if STATSD_PREFIX:
key = '.'.join([STATSD_PREFIX, key])
try:
STATSD_SOCKET.sendto('{0}:{1}|{2}'.format(key,
value,
metric_type).encode(),
STATSD_ADDR)
except socket.error:
LOGGER.exception(SOCKET_ERROR) | [
"def",
"_send",
"(",
"key",
",",
"value",
",",
"metric_type",
")",
":",
"if",
"STATSD_PREFIX",
":",
"key",
"=",
"'.'",
".",
"join",
"(",
"[",
"STATSD_PREFIX",
",",
"key",
"]",
")",
"try",
":",
"STATSD_SOCKET",
".",
"sendto",
"(",
"'{0}:{1}|{2}'",
".",
"format",
"(",
"key",
",",
"value",
",",
"metric_type",
")",
".",
"encode",
"(",
")",
",",
"STATSD_ADDR",
")",
"except",
"socket",
".",
"error",
":",
"LOGGER",
".",
"exception",
"(",
"SOCKET_ERROR",
")"
]
| 35.25 | 16.0625 |
def save_data(self, filename=None):
"""Save data"""
if filename is None:
filename = self.filename
if filename is None:
filename = getcwd_or_home()
filename, _selfilter = getsavefilename(self, _("Save data"),
filename,
iofunctions.save_filters)
if filename:
self.filename = filename
else:
return False
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
error_message = self.shellwidget.save_namespace(self.filename)
self.shellwidget._kernel_reply = None
QApplication.restoreOverrideCursor()
QApplication.processEvents()
if error_message is not None:
if 'Some objects could not be saved:' in error_message:
save_data_message = (
_('<b>Some objects could not be saved:</b>')
+ '<br><br><code>{obj_list}</code>'.format(
obj_list=error_message.split(': ')[1]))
else:
save_data_message = _(
'<b>Unable to save current workspace</b>'
'<br><br>Error message:<br>') + error_message
QMessageBox.critical(self, _("Save data"), save_data_message)
self.save_button.setEnabled(self.filename is not None) | [
"def",
"save_data",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"self",
".",
"filename",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"getcwd_or_home",
"(",
")",
"filename",
",",
"_selfilter",
"=",
"getsavefilename",
"(",
"self",
",",
"_",
"(",
"\"Save data\"",
")",
",",
"filename",
",",
"iofunctions",
".",
"save_filters",
")",
"if",
"filename",
":",
"self",
".",
"filename",
"=",
"filename",
"else",
":",
"return",
"False",
"QApplication",
".",
"setOverrideCursor",
"(",
"QCursor",
"(",
"Qt",
".",
"WaitCursor",
")",
")",
"QApplication",
".",
"processEvents",
"(",
")",
"error_message",
"=",
"self",
".",
"shellwidget",
".",
"save_namespace",
"(",
"self",
".",
"filename",
")",
"self",
".",
"shellwidget",
".",
"_kernel_reply",
"=",
"None",
"QApplication",
".",
"restoreOverrideCursor",
"(",
")",
"QApplication",
".",
"processEvents",
"(",
")",
"if",
"error_message",
"is",
"not",
"None",
":",
"if",
"'Some objects could not be saved:'",
"in",
"error_message",
":",
"save_data_message",
"=",
"(",
"_",
"(",
"'<b>Some objects could not be saved:</b>'",
")",
"+",
"'<br><br><code>{obj_list}</code>'",
".",
"format",
"(",
"obj_list",
"=",
"error_message",
".",
"split",
"(",
"': '",
")",
"[",
"1",
"]",
")",
")",
"else",
":",
"save_data_message",
"=",
"_",
"(",
"'<b>Unable to save current workspace</b>'",
"'<br><br>Error message:<br>'",
")",
"+",
"error_message",
"QMessageBox",
".",
"critical",
"(",
"self",
",",
"_",
"(",
"\"Save data\"",
")",
",",
"save_data_message",
")",
"self",
".",
"save_button",
".",
"setEnabled",
"(",
"self",
".",
"filename",
"is",
"not",
"None",
")"
]
| 45.030303 | 16.666667 |
def get_image_tags(self):
"""
Fetches image labels (repository / tags) from Docker.
:return: A dictionary, with image name and tags as the key and the image id as value.
:rtype: dict
"""
current_images = self.images()
tags = {tag: i['Id'] for i in current_images for tag in i['RepoTags']}
return tags | [
"def",
"get_image_tags",
"(",
"self",
")",
":",
"current_images",
"=",
"self",
".",
"images",
"(",
")",
"tags",
"=",
"{",
"tag",
":",
"i",
"[",
"'Id'",
"]",
"for",
"i",
"in",
"current_images",
"for",
"tag",
"in",
"i",
"[",
"'RepoTags'",
"]",
"}",
"return",
"tags"
]
| 35.6 | 21 |
def blow_out(self, location=None):
"""
Force any remaining liquid to dispense, by moving
this pipette's plunger to the calibrated `blow_out` position
Notes
-----
If no `location` is passed, the pipette will blow_out
from it's current position.
Parameters
----------
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the blow_out.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.aspirate(50).dispense().blow_out() # doctest: +SKIP
"""
if not self.tip_attached:
log.warning("Cannot 'blow out' without a tip attached.")
self.move_to(location)
self.instrument_actuator.set_active_current(self._plunger_current)
self.robot.poses = self.instrument_actuator.move(
self.robot.poses,
x=self._get_plunger_position('blow_out')
)
self.current_volume = 0
return self | [
"def",
"blow_out",
"(",
"self",
",",
"location",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"tip_attached",
":",
"log",
".",
"warning",
"(",
"\"Cannot 'blow out' without a tip attached.\"",
")",
"self",
".",
"move_to",
"(",
"location",
")",
"self",
".",
"instrument_actuator",
".",
"set_active_current",
"(",
"self",
".",
"_plunger_current",
")",
"self",
".",
"robot",
".",
"poses",
"=",
"self",
".",
"instrument_actuator",
".",
"move",
"(",
"self",
".",
"robot",
".",
"poses",
",",
"x",
"=",
"self",
".",
"_get_plunger_position",
"(",
"'blow_out'",
")",
")",
"self",
".",
"current_volume",
"=",
"0",
"return",
"self"
]
| 32.52381 | 22.761905 |
def aes_b64_encrypt(value, secret, block_size=AES.block_size):
""" AES encrypt @value with @secret using the |CFB| mode of AES
with a cryptographically secure initialization vector.
-> (#str) AES encrypted @value
..
from vital.security import aes_encrypt, aes_decrypt
aes_encrypt("Hello, world",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw='
aes_decrypt(
"zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'Hello, world'
..
"""
# iv = randstr(block_size * 2, rng=random)
iv = randstr(block_size * 2)
cipher = AES.new(secret[:32], AES.MODE_CFB, iv[:block_size].encode())
return iv + b64encode(cipher.encrypt(
uniorbytes(value, bytes))).decode('utf-8') | [
"def",
"aes_b64_encrypt",
"(",
"value",
",",
"secret",
",",
"block_size",
"=",
"AES",
".",
"block_size",
")",
":",
"# iv = randstr(block_size * 2, rng=random)",
"iv",
"=",
"randstr",
"(",
"block_size",
"*",
"2",
")",
"cipher",
"=",
"AES",
".",
"new",
"(",
"secret",
"[",
":",
"32",
"]",
",",
"AES",
".",
"MODE_CFB",
",",
"iv",
"[",
":",
"block_size",
"]",
".",
"encode",
"(",
")",
")",
"return",
"iv",
"+",
"b64encode",
"(",
"cipher",
".",
"encrypt",
"(",
"uniorbytes",
"(",
"value",
",",
"bytes",
")",
")",
")",
".",
"decode",
"(",
"'utf-8'",
")"
]
| 41.045455 | 17.318182 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.