text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def _create_update_from_cfg(mode='create', uuid=None, vmcfg=None):
'''
Create vm from configuration
'''
ret = {}
# write json file
vmadm_json_file = __salt__['temp.file'](prefix='vmadm-')
with salt.utils.files.fopen(vmadm_json_file, 'w') as vmadm_json:
salt.utils.json.dump(vmcfg, vmadm_json)
# vmadm validate create|update [-f <filename>]
cmd = 'vmadm validate {mode} {brand} -f {vmadm_json_file}'.format(
mode=mode,
brand=get(uuid)['brand'] if uuid is not None else '',
vmadm_json_file=vmadm_json_file
)
res = __salt__['cmd.run_all'](cmd, python_shell=True)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = _exit_status(retcode)
if 'stderr' in res:
if res['stderr'][0] == '{':
ret['Error'] = salt.utils.json.loads(res['stderr'])
else:
ret['Error'] = res['stderr']
return ret
# vmadm create|update [-f <filename>]
cmd = 'vmadm {mode} {uuid} -f {vmadm_json_file}'.format(
mode=mode,
uuid=uuid if uuid is not None else '',
vmadm_json_file=vmadm_json_file
)
res = __salt__['cmd.run_all'](cmd, python_shell=True)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = _exit_status(retcode)
if 'stderr' in res:
if res['stderr'][0] == '{':
ret['Error'] = salt.utils.json.loads(res['stderr'])
else:
ret['Error'] = res['stderr']
return ret
else:
# cleanup json file (only when successful to help troubleshooting)
salt.utils.files.safe_rm(vmadm_json_file)
# return uuid
if res['stderr'].startswith('Successfully created VM'):
return res['stderr'][24:]
return True
|
[
"def",
"_create_update_from_cfg",
"(",
"mode",
"=",
"'create'",
",",
"uuid",
"=",
"None",
",",
"vmcfg",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"}",
"# write json file",
"vmadm_json_file",
"=",
"__salt__",
"[",
"'temp.file'",
"]",
"(",
"prefix",
"=",
"'vmadm-'",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"vmadm_json_file",
",",
"'w'",
")",
"as",
"vmadm_json",
":",
"salt",
".",
"utils",
".",
"json",
".",
"dump",
"(",
"vmcfg",
",",
"vmadm_json",
")",
"# vmadm validate create|update [-f <filename>]",
"cmd",
"=",
"'vmadm validate {mode} {brand} -f {vmadm_json_file}'",
".",
"format",
"(",
"mode",
"=",
"mode",
",",
"brand",
"=",
"get",
"(",
"uuid",
")",
"[",
"'brand'",
"]",
"if",
"uuid",
"is",
"not",
"None",
"else",
"''",
",",
"vmadm_json_file",
"=",
"vmadm_json_file",
")",
"res",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"True",
")",
"retcode",
"=",
"res",
"[",
"'retcode'",
"]",
"if",
"retcode",
"!=",
"0",
":",
"ret",
"[",
"'Error'",
"]",
"=",
"_exit_status",
"(",
"retcode",
")",
"if",
"'stderr'",
"in",
"res",
":",
"if",
"res",
"[",
"'stderr'",
"]",
"[",
"0",
"]",
"==",
"'{'",
":",
"ret",
"[",
"'Error'",
"]",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"res",
"[",
"'stderr'",
"]",
")",
"else",
":",
"ret",
"[",
"'Error'",
"]",
"=",
"res",
"[",
"'stderr'",
"]",
"return",
"ret",
"# vmadm create|update [-f <filename>]",
"cmd",
"=",
"'vmadm {mode} {uuid} -f {vmadm_json_file}'",
".",
"format",
"(",
"mode",
"=",
"mode",
",",
"uuid",
"=",
"uuid",
"if",
"uuid",
"is",
"not",
"None",
"else",
"''",
",",
"vmadm_json_file",
"=",
"vmadm_json_file",
")",
"res",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"True",
")",
"retcode",
"=",
"res",
"[",
"'retcode'",
"]",
"if",
"retcode",
"!=",
"0",
":",
"ret",
"[",
"'Error'",
"]",
"=",
"_exit_status",
"(",
"retcode",
")",
"if",
"'stderr'",
"in",
"res",
":",
"if",
"res",
"[",
"'stderr'",
"]",
"[",
"0",
"]",
"==",
"'{'",
":",
"ret",
"[",
"'Error'",
"]",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"res",
"[",
"'stderr'",
"]",
")",
"else",
":",
"ret",
"[",
"'Error'",
"]",
"=",
"res",
"[",
"'stderr'",
"]",
"return",
"ret",
"else",
":",
"# cleanup json file (only when successful to help troubleshooting)",
"salt",
".",
"utils",
".",
"files",
".",
"safe_rm",
"(",
"vmadm_json_file",
")",
"# return uuid",
"if",
"res",
"[",
"'stderr'",
"]",
".",
"startswith",
"(",
"'Successfully created VM'",
")",
":",
"return",
"res",
"[",
"'stderr'",
"]",
"[",
"24",
":",
"]",
"return",
"True"
] | 33.884615 | 19.153846 |
def add_handles(self, model, forward_handle, backward_handle):
"""
Add handles to all non-container layers in the model.
Recursively for non-container layers
"""
handles_list = []
for child in model.children():
if 'nn.modules.container' in str(type(child)):
handles_list.extend(self.add_handles(child, forward_handle, backward_handle))
else:
handles_list.append(child.register_forward_hook(forward_handle))
handles_list.append(child.register_backward_hook(backward_handle))
return handles_list
|
[
"def",
"add_handles",
"(",
"self",
",",
"model",
",",
"forward_handle",
",",
"backward_handle",
")",
":",
"handles_list",
"=",
"[",
"]",
"for",
"child",
"in",
"model",
".",
"children",
"(",
")",
":",
"if",
"'nn.modules.container'",
"in",
"str",
"(",
"type",
"(",
"child",
")",
")",
":",
"handles_list",
".",
"extend",
"(",
"self",
".",
"add_handles",
"(",
"child",
",",
"forward_handle",
",",
"backward_handle",
")",
")",
"else",
":",
"handles_list",
".",
"append",
"(",
"child",
".",
"register_forward_hook",
"(",
"forward_handle",
")",
")",
"handles_list",
".",
"append",
"(",
"child",
".",
"register_backward_hook",
"(",
"backward_handle",
")",
")",
"return",
"handles_list"
] | 46.846154 | 19.461538 |
def com_google_fonts_check_os2_metrics_match_hhea(ttFont):
"""Checking OS/2 Metrics match hhea Metrics.
OS/2 and hhea vertical metric values should match. This will produce
the same linespacing on Mac, GNU+Linux and Windows.
Mac OS X uses the hhea values.
Windows uses OS/2 or Win, depending on the OS or fsSelection bit value.
"""
# OS/2 sTypoAscender and sTypoDescender match hhea ascent and descent
if ttFont["OS/2"].sTypoAscender != ttFont["hhea"].ascent:
yield FAIL, Message("ascender",
"OS/2 sTypoAscender and hhea ascent must be equal.")
elif ttFont["OS/2"].sTypoDescender != ttFont["hhea"].descent:
yield FAIL, Message("descender",
"OS/2 sTypoDescender and hhea descent must be equal.")
else:
yield PASS, ("OS/2.sTypoAscender/Descender values"
" match hhea.ascent/descent.")
|
[
"def",
"com_google_fonts_check_os2_metrics_match_hhea",
"(",
"ttFont",
")",
":",
"# OS/2 sTypoAscender and sTypoDescender match hhea ascent and descent",
"if",
"ttFont",
"[",
"\"OS/2\"",
"]",
".",
"sTypoAscender",
"!=",
"ttFont",
"[",
"\"hhea\"",
"]",
".",
"ascent",
":",
"yield",
"FAIL",
",",
"Message",
"(",
"\"ascender\"",
",",
"\"OS/2 sTypoAscender and hhea ascent must be equal.\"",
")",
"elif",
"ttFont",
"[",
"\"OS/2\"",
"]",
".",
"sTypoDescender",
"!=",
"ttFont",
"[",
"\"hhea\"",
"]",
".",
"descent",
":",
"yield",
"FAIL",
",",
"Message",
"(",
"\"descender\"",
",",
"\"OS/2 sTypoDescender and hhea descent must be equal.\"",
")",
"else",
":",
"yield",
"PASS",
",",
"(",
"\"OS/2.sTypoAscender/Descender values\"",
"\" match hhea.ascent/descent.\"",
")"
] | 45.421053 | 20.631579 |
def mark_confirmation_as_clear(self, confirmation_id):
"""
Mark confirmation as clear
:param confirmation_id: the confirmation id
:return Response
"""
return self._create_put_request(
resource=CONFIRMATIONS,
billomat_id=confirmation_id,
command=CLEAR,
)
|
[
"def",
"mark_confirmation_as_clear",
"(",
"self",
",",
"confirmation_id",
")",
":",
"return",
"self",
".",
"_create_put_request",
"(",
"resource",
"=",
"CONFIRMATIONS",
",",
"billomat_id",
"=",
"confirmation_id",
",",
"command",
"=",
"CLEAR",
",",
")"
] | 27.916667 | 11.416667 |
def create_response_adu(self, meta_data, response_pdu):
""" Build response ADU from meta data and response PDU and return it.
:param meta_data: A dict with meta data.
:param request_pdu: A bytearray containing request PDU.
:return: A bytearray containing request ADU.
"""
response_mbap = pack_mbap(
transaction_id=meta_data['transaction_id'],
protocol_id=meta_data['protocol_id'],
length=len(response_pdu) + 1,
unit_id=meta_data['unit_id']
)
return response_mbap + response_pdu
|
[
"def",
"create_response_adu",
"(",
"self",
",",
"meta_data",
",",
"response_pdu",
")",
":",
"response_mbap",
"=",
"pack_mbap",
"(",
"transaction_id",
"=",
"meta_data",
"[",
"'transaction_id'",
"]",
",",
"protocol_id",
"=",
"meta_data",
"[",
"'protocol_id'",
"]",
",",
"length",
"=",
"len",
"(",
"response_pdu",
")",
"+",
"1",
",",
"unit_id",
"=",
"meta_data",
"[",
"'unit_id'",
"]",
")",
"return",
"response_mbap",
"+",
"response_pdu"
] | 38.466667 | 13.533333 |
def freq_mag(magnitudes, completeness, max_mag, binsize=0.2, **kwargs):
"""
Plot a frequency-magnitude histogram and cumulative density plot.
Currently this will compute a b-value, for a given completeness.
B-value is computed by linear fitting to section of curve between
completeness and max_mag.
:type magnitudes: list
:param magnitudes: list of float of magnitudes
:type completeness: float
:param completeness: Level to compute the b-value above
:type max_mag: float
:param max_mag: Maximum magnitude to try and fit a b-value to
:type binsize: float
:param binsize: Width of histogram bins, defaults to 0.2
:returns: :class:`matplotlib.figure.Figure`
.. Note::
See :func:`eqcorrscan.utils.mag_calc.calc_b_value` for a least-squares
method of estimating completeness and b-value. For estimating maximum
curvature see :func:`eqcorrscan.utils.mag_calc.calc_max_curv`.
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.plotting import freq_mag
>>> client = Client('IRIS')
>>> t1 = UTCDateTime('2012-03-26T00:00:00')
>>> t2 = t1 + (3 * 86400)
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
>>> magnitudes = [event.preferred_magnitude().mag for event in catalog]
>>> freq_mag(magnitudes, completeness=4, max_mag=7) # doctest: +SKIP
.. plot::
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from eqcorrscan.utils.plotting import freq_mag
client = Client('IRIS')
t1 = UTCDateTime('2012-03-26T00:00:00')
t2 = t1 + (3 * 86400)
catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
magnitudes = [event.preferred_magnitude().mag for event in catalog]
freq_mag(magnitudes, completeness=4, max_mag=7)
"""
import matplotlib.pyplot as plt
# Ensure magnitudes are sorted
magnitudes.sort()
# Check that there are no nans or infs
if np.isnan(magnitudes).any():
warnings.warn('Found nan values, removing them')
magnitudes = [mag for mag in magnitudes if not np.isnan(mag)]
if np.isinf(magnitudes).any():
warnings.warn('Found inf values, removing them')
magnitudes = [mag for mag in magnitudes if not np.isinf(mag)]
fig, ax1 = plt.subplots()
# Set up the bins, the bin-size could be a variables
bins = np.arange(int(min(magnitudes) - 1), int(max(magnitudes) + 1),
binsize)
n, bins, patches = ax1.hist(magnitudes, bins, facecolor='Black',
alpha=0.5, label='Magnitudes')
ax1.set_ylabel('Frequency')
ax1.set_ylim([0, max(n) + 0.5 * max(n)])
plt.xlabel('Magnitude')
# Now make the cumulative density function
counts = Counter(magnitudes)
cdf = np.zeros(len(counts))
mag_steps = np.zeros(len(counts))
for i, magnitude in enumerate(sorted(counts.keys(), reverse=True)):
mag_steps[i] = magnitude
if i > 0:
cdf[i] = cdf[i - 1] + counts[magnitude]
else:
cdf[i] = counts[magnitude]
ax2 = ax1.twinx()
# ax2.scatter(magnitudes, np.log10(cdf), c='k', marker='+', s=20, lw=2,
ax2.scatter(mag_steps, np.log10(cdf), c='k', marker='+', s=20, lw=2,
label='Magnitude cumulative density')
# Now we want to calculate the b-value and plot the fit
x = []
y = []
for i, magnitude in enumerate(mag_steps):
if magnitude >= completeness <= max_mag:
x.append(magnitude)
y.append(cdf[i])
fit = np.polyfit(x, np.log10(y), 1)
fit_fn = np.poly1d(fit)
ax2.plot(magnitudes, fit_fn(magnitudes), '--k',
label='GR trend, b-value = ' + str(abs(fit[0]))[0:4] +
'\n $M_C$ = ' + str(completeness))
ax2.set_ylabel('$Log_{10}$ of cumulative density')
plt.xlim([min(magnitudes) - 0.1, max(magnitudes) + 0.2])
plt.ylim([min(np.log10(cdf)) - 0.5, max(np.log10(cdf)) + 1.0])
plt.legend(loc=2)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig
|
[
"def",
"freq_mag",
"(",
"magnitudes",
",",
"completeness",
",",
"max_mag",
",",
"binsize",
"=",
"0.2",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"# Ensure magnitudes are sorted",
"magnitudes",
".",
"sort",
"(",
")",
"# Check that there are no nans or infs",
"if",
"np",
".",
"isnan",
"(",
"magnitudes",
")",
".",
"any",
"(",
")",
":",
"warnings",
".",
"warn",
"(",
"'Found nan values, removing them'",
")",
"magnitudes",
"=",
"[",
"mag",
"for",
"mag",
"in",
"magnitudes",
"if",
"not",
"np",
".",
"isnan",
"(",
"mag",
")",
"]",
"if",
"np",
".",
"isinf",
"(",
"magnitudes",
")",
".",
"any",
"(",
")",
":",
"warnings",
".",
"warn",
"(",
"'Found inf values, removing them'",
")",
"magnitudes",
"=",
"[",
"mag",
"for",
"mag",
"in",
"magnitudes",
"if",
"not",
"np",
".",
"isinf",
"(",
"mag",
")",
"]",
"fig",
",",
"ax1",
"=",
"plt",
".",
"subplots",
"(",
")",
"# Set up the bins, the bin-size could be a variables",
"bins",
"=",
"np",
".",
"arange",
"(",
"int",
"(",
"min",
"(",
"magnitudes",
")",
"-",
"1",
")",
",",
"int",
"(",
"max",
"(",
"magnitudes",
")",
"+",
"1",
")",
",",
"binsize",
")",
"n",
",",
"bins",
",",
"patches",
"=",
"ax1",
".",
"hist",
"(",
"magnitudes",
",",
"bins",
",",
"facecolor",
"=",
"'Black'",
",",
"alpha",
"=",
"0.5",
",",
"label",
"=",
"'Magnitudes'",
")",
"ax1",
".",
"set_ylabel",
"(",
"'Frequency'",
")",
"ax1",
".",
"set_ylim",
"(",
"[",
"0",
",",
"max",
"(",
"n",
")",
"+",
"0.5",
"*",
"max",
"(",
"n",
")",
"]",
")",
"plt",
".",
"xlabel",
"(",
"'Magnitude'",
")",
"# Now make the cumulative density function",
"counts",
"=",
"Counter",
"(",
"magnitudes",
")",
"cdf",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"counts",
")",
")",
"mag_steps",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"counts",
")",
")",
"for",
"i",
",",
"magnitude",
"in",
"enumerate",
"(",
"sorted",
"(",
"counts",
".",
"keys",
"(",
")",
",",
"reverse",
"=",
"True",
")",
")",
":",
"mag_steps",
"[",
"i",
"]",
"=",
"magnitude",
"if",
"i",
">",
"0",
":",
"cdf",
"[",
"i",
"]",
"=",
"cdf",
"[",
"i",
"-",
"1",
"]",
"+",
"counts",
"[",
"magnitude",
"]",
"else",
":",
"cdf",
"[",
"i",
"]",
"=",
"counts",
"[",
"magnitude",
"]",
"ax2",
"=",
"ax1",
".",
"twinx",
"(",
")",
"# ax2.scatter(magnitudes, np.log10(cdf), c='k', marker='+', s=20, lw=2,",
"ax2",
".",
"scatter",
"(",
"mag_steps",
",",
"np",
".",
"log10",
"(",
"cdf",
")",
",",
"c",
"=",
"'k'",
",",
"marker",
"=",
"'+'",
",",
"s",
"=",
"20",
",",
"lw",
"=",
"2",
",",
"label",
"=",
"'Magnitude cumulative density'",
")",
"# Now we want to calculate the b-value and plot the fit",
"x",
"=",
"[",
"]",
"y",
"=",
"[",
"]",
"for",
"i",
",",
"magnitude",
"in",
"enumerate",
"(",
"mag_steps",
")",
":",
"if",
"magnitude",
">=",
"completeness",
"<=",
"max_mag",
":",
"x",
".",
"append",
"(",
"magnitude",
")",
"y",
".",
"append",
"(",
"cdf",
"[",
"i",
"]",
")",
"fit",
"=",
"np",
".",
"polyfit",
"(",
"x",
",",
"np",
".",
"log10",
"(",
"y",
")",
",",
"1",
")",
"fit_fn",
"=",
"np",
".",
"poly1d",
"(",
"fit",
")",
"ax2",
".",
"plot",
"(",
"magnitudes",
",",
"fit_fn",
"(",
"magnitudes",
")",
",",
"'--k'",
",",
"label",
"=",
"'GR trend, b-value = '",
"+",
"str",
"(",
"abs",
"(",
"fit",
"[",
"0",
"]",
")",
")",
"[",
"0",
":",
"4",
"]",
"+",
"'\\n $M_C$ = '",
"+",
"str",
"(",
"completeness",
")",
")",
"ax2",
".",
"set_ylabel",
"(",
"'$Log_{10}$ of cumulative density'",
")",
"plt",
".",
"xlim",
"(",
"[",
"min",
"(",
"magnitudes",
")",
"-",
"0.1",
",",
"max",
"(",
"magnitudes",
")",
"+",
"0.2",
"]",
")",
"plt",
".",
"ylim",
"(",
"[",
"min",
"(",
"np",
".",
"log10",
"(",
"cdf",
")",
")",
"-",
"0.5",
",",
"max",
"(",
"np",
".",
"log10",
"(",
"cdf",
")",
")",
"+",
"1.0",
"]",
")",
"plt",
".",
"legend",
"(",
"loc",
"=",
"2",
")",
"fig",
"=",
"_finalise_figure",
"(",
"fig",
"=",
"fig",
",",
"*",
"*",
"kwargs",
")",
"# pragma: no cover",
"return",
"fig"
] | 41.353535 | 18.787879 |
def restart_service(service_name, minimum_running_time=None):
'''
Restart OpenStack service immediately, or only if it's running longer than
specified value
CLI Example:
.. code-block:: bash
salt '*' openstack_mng.restart_service neutron
salt '*' openstack_mng.restart_service neutron minimum_running_time=600
'''
if minimum_running_time:
ret_code = False
# get system services list for interesting openstack service
services = __salt__['cmd.run'](['/usr/bin/openstack-service', 'list', service_name]).split('\n')
for service in services:
service_info = __salt__['service.show'](service)
with salt.utils.files.fopen('/proc/uptime') as rfh:
boot_time = float(
salt.utils.stringutils.to_unicode(
rfh.read()
).split(' ')[0]
)
expr_time = int(service_info.get('ExecMainStartTimestampMonotonic', 0)) / 1000000 < boot_time - minimum_running_time
expr_active = service_info.get('ActiveState') == "active"
if expr_time or not expr_active:
# restart specific system service
ret = __salt__['service.restart'](service)
if ret:
ret_code = True
return ret_code
else:
# just restart
os_cmd = ['/usr/bin/openstack-service', 'restart', service_name]
return __salt__['cmd.retcode'](os_cmd) == 0
|
[
"def",
"restart_service",
"(",
"service_name",
",",
"minimum_running_time",
"=",
"None",
")",
":",
"if",
"minimum_running_time",
":",
"ret_code",
"=",
"False",
"# get system services list for interesting openstack service",
"services",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"[",
"'/usr/bin/openstack-service'",
",",
"'list'",
",",
"service_name",
"]",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"service",
"in",
"services",
":",
"service_info",
"=",
"__salt__",
"[",
"'service.show'",
"]",
"(",
"service",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"'/proc/uptime'",
")",
"as",
"rfh",
":",
"boot_time",
"=",
"float",
"(",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"rfh",
".",
"read",
"(",
")",
")",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
")",
"expr_time",
"=",
"int",
"(",
"service_info",
".",
"get",
"(",
"'ExecMainStartTimestampMonotonic'",
",",
"0",
")",
")",
"/",
"1000000",
"<",
"boot_time",
"-",
"minimum_running_time",
"expr_active",
"=",
"service_info",
".",
"get",
"(",
"'ActiveState'",
")",
"==",
"\"active\"",
"if",
"expr_time",
"or",
"not",
"expr_active",
":",
"# restart specific system service",
"ret",
"=",
"__salt__",
"[",
"'service.restart'",
"]",
"(",
"service",
")",
"if",
"ret",
":",
"ret_code",
"=",
"True",
"return",
"ret_code",
"else",
":",
"# just restart",
"os_cmd",
"=",
"[",
"'/usr/bin/openstack-service'",
",",
"'restart'",
",",
"service_name",
"]",
"return",
"__salt__",
"[",
"'cmd.retcode'",
"]",
"(",
"os_cmd",
")",
"==",
"0"
] | 37.025 | 25.575 |
def syllabify(language, word) :
'''Syllabifies the word, given a language configuration loaded with loadLanguage.
word is either a string of phonemes from the CMU pronouncing dictionary set
(with optional stress numbers after vowels), or a Python list of phonemes,
e.g. "B AE1 T" or ["B", "AE1", "T"]'''
if type(word) == str :
word = word.split()
syllables = [] # This is the returned data structure.
internuclei = [] # This maintains a list of phonemes between nuclei.
for phoneme in word :
phoneme = phoneme.strip()
if phoneme == "" :
continue
stress = None
if phoneme[-1].isdigit() :
stress = int(phoneme[-1])
phoneme = phoneme[0:-1]
if phoneme in language["vowels"] :
# Split the consonants seen since the last nucleus into coda and onset.
coda = None
onset = None
# If there is a period in the input, split there.
if "." in internuclei :
period = internuclei.index(".")
coda = internuclei[:period]
onset = internuclei[period+1:]
else :
# Make the largest onset we can. The 'split' variable marks the break point.
for split in range(0, len(internuclei)+1) :
coda = internuclei[:split]
onset = internuclei[split:]
# If we are looking at a valid onset, or if we're at the start of the word
# (in which case an invalid onset is better than a coda that doesn't follow
# a nucleus), or if we've gone through all of the onsets and we didn't find
# any that are valid, then split the nonvowels we've seen at this location.
if " ".join(onset) in language["onsets"] \
or len(syllables) == 0 \
or len(onset) == 0 :
break
# Tack the coda onto the coda of the last syllable. Can't do it if this
# is the first syllable.
if len(syllables) > 0 :
syllables[-1][3].extend(coda)
# Make a new syllable out of the onset and nucleus.
syllables.append( (stress, onset, [phoneme], []) )
# At this point we've processed the internuclei list.
internuclei = []
elif not phoneme in language["consonants"] and phoneme != "." :
raise ValueError, "Invalid phoneme: " + phoneme
else : # a consonant
internuclei.append(phoneme)
# Done looping through phonemes. We may have consonants left at the end.
# We may have even not found a nucleus.
if len(internuclei) > 0 :
if len(syllables) == 0 :
syllables.append( (None, internuclei, [], []) )
else :
syllables[-1][3].extend(internuclei)
return syllables
|
[
"def",
"syllabify",
"(",
"language",
",",
"word",
")",
":",
"if",
"type",
"(",
"word",
")",
"==",
"str",
":",
"word",
"=",
"word",
".",
"split",
"(",
")",
"syllables",
"=",
"[",
"]",
"# This is the returned data structure.",
"internuclei",
"=",
"[",
"]",
"# This maintains a list of phonemes between nuclei.",
"for",
"phoneme",
"in",
"word",
":",
"phoneme",
"=",
"phoneme",
".",
"strip",
"(",
")",
"if",
"phoneme",
"==",
"\"\"",
":",
"continue",
"stress",
"=",
"None",
"if",
"phoneme",
"[",
"-",
"1",
"]",
".",
"isdigit",
"(",
")",
":",
"stress",
"=",
"int",
"(",
"phoneme",
"[",
"-",
"1",
"]",
")",
"phoneme",
"=",
"phoneme",
"[",
"0",
":",
"-",
"1",
"]",
"if",
"phoneme",
"in",
"language",
"[",
"\"vowels\"",
"]",
":",
"# Split the consonants seen since the last nucleus into coda and onset.",
"coda",
"=",
"None",
"onset",
"=",
"None",
"# If there is a period in the input, split there.",
"if",
"\".\"",
"in",
"internuclei",
":",
"period",
"=",
"internuclei",
".",
"index",
"(",
"\".\"",
")",
"coda",
"=",
"internuclei",
"[",
":",
"period",
"]",
"onset",
"=",
"internuclei",
"[",
"period",
"+",
"1",
":",
"]",
"else",
":",
"# Make the largest onset we can. The 'split' variable marks the break point.",
"for",
"split",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"internuclei",
")",
"+",
"1",
")",
":",
"coda",
"=",
"internuclei",
"[",
":",
"split",
"]",
"onset",
"=",
"internuclei",
"[",
"split",
":",
"]",
"# If we are looking at a valid onset, or if we're at the start of the word",
"# (in which case an invalid onset is better than a coda that doesn't follow",
"# a nucleus), or if we've gone through all of the onsets and we didn't find",
"# any that are valid, then split the nonvowels we've seen at this location.",
"if",
"\" \"",
".",
"join",
"(",
"onset",
")",
"in",
"language",
"[",
"\"onsets\"",
"]",
"or",
"len",
"(",
"syllables",
")",
"==",
"0",
"or",
"len",
"(",
"onset",
")",
"==",
"0",
":",
"break",
"# Tack the coda onto the coda of the last syllable. Can't do it if this",
"# is the first syllable.",
"if",
"len",
"(",
"syllables",
")",
">",
"0",
":",
"syllables",
"[",
"-",
"1",
"]",
"[",
"3",
"]",
".",
"extend",
"(",
"coda",
")",
"# Make a new syllable out of the onset and nucleus.",
"syllables",
".",
"append",
"(",
"(",
"stress",
",",
"onset",
",",
"[",
"phoneme",
"]",
",",
"[",
"]",
")",
")",
"# At this point we've processed the internuclei list.",
"internuclei",
"=",
"[",
"]",
"elif",
"not",
"phoneme",
"in",
"language",
"[",
"\"consonants\"",
"]",
"and",
"phoneme",
"!=",
"\".\"",
":",
"raise",
"ValueError",
",",
"\"Invalid phoneme: \"",
"+",
"phoneme",
"else",
":",
"# a consonant",
"internuclei",
".",
"append",
"(",
"phoneme",
")",
"# Done looping through phonemes. We may have consonants left at the end.",
"# We may have even not found a nucleus.",
"if",
"len",
"(",
"internuclei",
")",
">",
"0",
":",
"if",
"len",
"(",
"syllables",
")",
"==",
"0",
":",
"syllables",
".",
"append",
"(",
"(",
"None",
",",
"internuclei",
",",
"[",
"]",
",",
"[",
"]",
")",
")",
"else",
":",
"syllables",
"[",
"-",
"1",
"]",
"[",
"3",
"]",
".",
"extend",
"(",
"internuclei",
")",
"return",
"syllables"
] | 32.105263 | 23.105263 |
def group_remove(groupname,
user=None,
host=None,
port=None,
maintenance_db=None,
password=None,
runas=None):
'''
Removes a group from the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.group_remove 'groupname'
'''
return _role_remove(groupname,
user=user,
host=host,
port=port,
maintenance_db=maintenance_db,
password=password,
runas=runas)
|
[
"def",
"group_remove",
"(",
"groupname",
",",
"user",
"=",
"None",
",",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"maintenance_db",
"=",
"None",
",",
"password",
"=",
"None",
",",
"runas",
"=",
"None",
")",
":",
"return",
"_role_remove",
"(",
"groupname",
",",
"user",
"=",
"user",
",",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"maintenance_db",
"=",
"maintenance_db",
",",
"password",
"=",
"password",
",",
"runas",
"=",
"runas",
")"
] | 27.043478 | 15.652174 |
def read_metadata_by_name(self, name, metadata_key, caster=None):
"""Read process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param func caster: A casting callable to apply to the read value (e.g. `int`).
"""
file_path = self._metadata_file_path(name, metadata_key)
try:
metadata = read_file(file_path).strip()
return self._maybe_cast(metadata, caster)
except (IOError, OSError):
return None
|
[
"def",
"read_metadata_by_name",
"(",
"self",
",",
"name",
",",
"metadata_key",
",",
"caster",
"=",
"None",
")",
":",
"file_path",
"=",
"self",
".",
"_metadata_file_path",
"(",
"name",
",",
"metadata_key",
")",
"try",
":",
"metadata",
"=",
"read_file",
"(",
"file_path",
")",
".",
"strip",
"(",
")",
"return",
"self",
".",
"_maybe_cast",
"(",
"metadata",
",",
"caster",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
":",
"return",
"None"
] | 42.846154 | 20.615385 |
def delta_E( reactants, products, check_balance=True ):
"""
Calculate the change in energy for reactants --> products.
Args:
reactants (list(vasppy.Calculation): A list of vasppy.Calculation objects. The initial state.
products (list(vasppy.Calculation): A list of vasppy.Calculation objects. The final state.
check_balance (bool:optional): Check that the reaction stoichiometry is balanced. Default: True.
Returns:
(float) The change in energy.
"""
if check_balance:
if delta_stoichiometry( reactants, products ) != {}:
raise ValueError( "reaction is not balanced: {}".format( delta_stoichiometry( reactants, products) ) )
return sum( [ r.energy for r in products ] ) - sum( [ r.energy for r in reactants ] )
|
[
"def",
"delta_E",
"(",
"reactants",
",",
"products",
",",
"check_balance",
"=",
"True",
")",
":",
"if",
"check_balance",
":",
"if",
"delta_stoichiometry",
"(",
"reactants",
",",
"products",
")",
"!=",
"{",
"}",
":",
"raise",
"ValueError",
"(",
"\"reaction is not balanced: {}\"",
".",
"format",
"(",
"delta_stoichiometry",
"(",
"reactants",
",",
"products",
")",
")",
")",
"return",
"sum",
"(",
"[",
"r",
".",
"energy",
"for",
"r",
"in",
"products",
"]",
")",
"-",
"sum",
"(",
"[",
"r",
".",
"energy",
"for",
"r",
"in",
"reactants",
"]",
")"
] | 48.8125 | 32.5625 |
def get_nbytes(self):
""" Compute and return the object size in bytes (i.e.: octets)
A flat dict containing all the objects attributes is first created
The size of each attribute is then estimated with np.asarray().nbytes
Note :
if the attribute is a tofu object, get_nbytes() is recursive
Returns
-------
total : int
The total object estimated size, in bytes
dsize : dict
A dictionnary giving the size of each attribute
"""
dd = self.to_dict()
dsize = dd.fromkeys(dd.keys(),0)
total = 0
for k, v in dd.items():
if issubclass(v.__class__, ToFuObjectBase):
dsize[k] = v.get_nbytes()[0]
else:
dsize[k] = np.asarray(v).nbytes
total += dsize[k]
return total, dsize
|
[
"def",
"get_nbytes",
"(",
"self",
")",
":",
"dd",
"=",
"self",
".",
"to_dict",
"(",
")",
"dsize",
"=",
"dd",
".",
"fromkeys",
"(",
"dd",
".",
"keys",
"(",
")",
",",
"0",
")",
"total",
"=",
"0",
"for",
"k",
",",
"v",
"in",
"dd",
".",
"items",
"(",
")",
":",
"if",
"issubclass",
"(",
"v",
".",
"__class__",
",",
"ToFuObjectBase",
")",
":",
"dsize",
"[",
"k",
"]",
"=",
"v",
".",
"get_nbytes",
"(",
")",
"[",
"0",
"]",
"else",
":",
"dsize",
"[",
"k",
"]",
"=",
"np",
".",
"asarray",
"(",
"v",
")",
".",
"nbytes",
"total",
"+=",
"dsize",
"[",
"k",
"]",
"return",
"total",
",",
"dsize"
] | 33.153846 | 19.269231 |
def support_autoupload_param_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
support = ET.SubElement(config, "support", xmlns="urn:brocade.com:mgmt:brocade-ras")
autoupload_param = ET.SubElement(support, "autoupload-param")
password = ET.SubElement(autoupload_param, "password")
password.text = kwargs.pop('password')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"support_autoupload_param_password",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"support",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"support\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-ras\"",
")",
"autoupload_param",
"=",
"ET",
".",
"SubElement",
"(",
"support",
",",
"\"autoupload-param\"",
")",
"password",
"=",
"ET",
".",
"SubElement",
"(",
"autoupload_param",
",",
"\"password\"",
")",
"password",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'password'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 44.454545 | 17.454545 |
def list_tickets(self, open_status=True, closed_status=True):
"""List all tickets.
:param boolean open_status: include open tickets
:param boolean closed_status: include closed tickets
"""
mask = """mask[id, title, assignedUser[firstName, lastName], priority,
createDate, lastEditDate, accountId, status, updateCount]"""
call = 'getTickets'
if not all([open_status, closed_status]):
if open_status:
call = 'getOpenTickets'
elif closed_status:
call = 'getClosedTickets'
else:
raise ValueError("open_status and closed_status cannot both be False")
return self.client.call('Account', call, mask=mask, iter=True)
|
[
"def",
"list_tickets",
"(",
"self",
",",
"open_status",
"=",
"True",
",",
"closed_status",
"=",
"True",
")",
":",
"mask",
"=",
"\"\"\"mask[id, title, assignedUser[firstName, lastName], priority,\n createDate, lastEditDate, accountId, status, updateCount]\"\"\"",
"call",
"=",
"'getTickets'",
"if",
"not",
"all",
"(",
"[",
"open_status",
",",
"closed_status",
"]",
")",
":",
"if",
"open_status",
":",
"call",
"=",
"'getOpenTickets'",
"elif",
"closed_status",
":",
"call",
"=",
"'getClosedTickets'",
"else",
":",
"raise",
"ValueError",
"(",
"\"open_status and closed_status cannot both be False\"",
")",
"return",
"self",
".",
"client",
".",
"call",
"(",
"'Account'",
",",
"call",
",",
"mask",
"=",
"mask",
",",
"iter",
"=",
"True",
")"
] | 39.947368 | 16.947368 |
def get_int(self, key, default=UndefinedKey):
"""Return int representation of value found at key
:param key: key to use (dot separated). E.g., a.b.c
:type key: basestring
:param default: default value if key not found
:type default: int
:return: int value
:type return: int
"""
value = self.get(key, default)
try:
return int(value) if value is not None else None
except (TypeError, ValueError):
raise ConfigException(
u"{key} has type '{type}' rather than 'int'".format(key=key, type=type(value).__name__))
|
[
"def",
"get_int",
"(",
"self",
",",
"key",
",",
"default",
"=",
"UndefinedKey",
")",
":",
"value",
"=",
"self",
".",
"get",
"(",
"key",
",",
"default",
")",
"try",
":",
"return",
"int",
"(",
"value",
")",
"if",
"value",
"is",
"not",
"None",
"else",
"None",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ConfigException",
"(",
"u\"{key} has type '{type}' rather than 'int'\"",
".",
"format",
"(",
"key",
"=",
"key",
",",
"type",
"=",
"type",
"(",
"value",
")",
".",
"__name__",
")",
")"
] | 38.75 | 15.8125 |
def where(cls, **kwargs):
"""
Returns a generator which yields instances matching the given query
arguments.
For example, this would yield all :py:class:`.Project`::
Project.where()
And this would yield all launch approved :py:class:`.Project`::
Project.where(launch_approved=True)
"""
_id = kwargs.pop('id', '')
return cls.paginated_results(*cls.http_get(_id, params=kwargs))
|
[
"def",
"where",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"_id",
"=",
"kwargs",
".",
"pop",
"(",
"'id'",
",",
"''",
")",
"return",
"cls",
".",
"paginated_results",
"(",
"*",
"cls",
".",
"http_get",
"(",
"_id",
",",
"params",
"=",
"kwargs",
")",
")"
] | 28.375 | 24 |
def _get_val(other):
""" Given a Number, a Numeric Constant or a python number return its value
"""
assert isinstance(other, (numbers.Number, SymbolNUMBER, SymbolCONST))
if isinstance(other, SymbolNUMBER):
return other.value
if isinstance(other, SymbolCONST):
return other.expr.value
return other
|
[
"def",
"_get_val",
"(",
"other",
")",
":",
"assert",
"isinstance",
"(",
"other",
",",
"(",
"numbers",
".",
"Number",
",",
"SymbolNUMBER",
",",
"SymbolCONST",
")",
")",
"if",
"isinstance",
"(",
"other",
",",
"SymbolNUMBER",
")",
":",
"return",
"other",
".",
"value",
"if",
"isinstance",
"(",
"other",
",",
"SymbolCONST",
")",
":",
"return",
"other",
".",
"expr",
".",
"value",
"return",
"other"
] | 29.818182 | 16.636364 |
def add_link(self, link):
"""
Banana banana
"""
if link.id_ not in self.__links:
self.__links[link.id_] = link
|
[
"def",
"add_link",
"(",
"self",
",",
"link",
")",
":",
"if",
"link",
".",
"id_",
"not",
"in",
"self",
".",
"__links",
":",
"self",
".",
"__links",
"[",
"link",
".",
"id_",
"]",
"=",
"link"
] | 24.833333 | 5.833333 |
def snapshot_name_to_id(name, snap_name, strict=False, runas=None):
'''
Attempt to convert a snapshot name to a snapshot ID. If the name is not
found an empty string is returned. If multiple snapshots share the same
name, a list will be returned
:param str name:
Name/ID of VM whose snapshots are inspected
:param str snap_name:
Name of the snapshot
:param bool strict:
Raise an exception if multiple snapshot IDs are found
:param str runas:
The user that the prlctl command will be run as
CLI Example:
.. code-block:: bash
salt '*' parallels.snapshot_id_to_name macvm original runas=macdev
'''
# Validate VM and snapshot names
name = salt.utils.data.decode(name)
snap_name = salt.utils.data.decode(snap_name)
# Get a multiline string containing all the snapshot GUIDs
info = prlctl('snapshot-list', name, runas=runas)
# Get a set of all snapshot GUIDs in the string
snap_ids = _find_guids(info)
# Try to match the snapshot name to an ID
named_ids = []
for snap_id in snap_ids:
if snapshot_id_to_name(name, snap_id, runas=runas) == snap_name:
named_ids.append(snap_id)
# Return one or more IDs having snap_name or raise an error upon
# non-singular names
if not named_ids:
raise SaltInvocationError(
'No snapshots for VM "{0}" have name "{1}"'.format(name, snap_name)
)
elif len(named_ids) == 1:
return named_ids[0]
else:
multi_msg = ('Multiple snapshots for VM "{0}" have name '
'"{1}"'.format(name, snap_name))
if strict:
raise SaltInvocationError(multi_msg)
else:
log.warning(multi_msg)
return named_ids
|
[
"def",
"snapshot_name_to_id",
"(",
"name",
",",
"snap_name",
",",
"strict",
"=",
"False",
",",
"runas",
"=",
"None",
")",
":",
"# Validate VM and snapshot names",
"name",
"=",
"salt",
".",
"utils",
".",
"data",
".",
"decode",
"(",
"name",
")",
"snap_name",
"=",
"salt",
".",
"utils",
".",
"data",
".",
"decode",
"(",
"snap_name",
")",
"# Get a multiline string containing all the snapshot GUIDs",
"info",
"=",
"prlctl",
"(",
"'snapshot-list'",
",",
"name",
",",
"runas",
"=",
"runas",
")",
"# Get a set of all snapshot GUIDs in the string",
"snap_ids",
"=",
"_find_guids",
"(",
"info",
")",
"# Try to match the snapshot name to an ID",
"named_ids",
"=",
"[",
"]",
"for",
"snap_id",
"in",
"snap_ids",
":",
"if",
"snapshot_id_to_name",
"(",
"name",
",",
"snap_id",
",",
"runas",
"=",
"runas",
")",
"==",
"snap_name",
":",
"named_ids",
".",
"append",
"(",
"snap_id",
")",
"# Return one or more IDs having snap_name or raise an error upon",
"# non-singular names",
"if",
"not",
"named_ids",
":",
"raise",
"SaltInvocationError",
"(",
"'No snapshots for VM \"{0}\" have name \"{1}\"'",
".",
"format",
"(",
"name",
",",
"snap_name",
")",
")",
"elif",
"len",
"(",
"named_ids",
")",
"==",
"1",
":",
"return",
"named_ids",
"[",
"0",
"]",
"else",
":",
"multi_msg",
"=",
"(",
"'Multiple snapshots for VM \"{0}\" have name '",
"'\"{1}\"'",
".",
"format",
"(",
"name",
",",
"snap_name",
")",
")",
"if",
"strict",
":",
"raise",
"SaltInvocationError",
"(",
"multi_msg",
")",
"else",
":",
"log",
".",
"warning",
"(",
"multi_msg",
")",
"return",
"named_ids"
] | 31.089286 | 22.660714 |
def retrieveVals(self):
"""Retrieve values for graphs."""
fs = FSinfo(self._fshost, self._fsport, self._fspass)
if self.hasGraph('fs_calls'):
count = fs.getCallCount()
self.setGraphVal('fs_calls', 'calls', count)
if self.hasGraph('fs_channels'):
count = fs.getChannelCount()
self.setGraphVal('fs_channels', 'channels', count)
|
[
"def",
"retrieveVals",
"(",
"self",
")",
":",
"fs",
"=",
"FSinfo",
"(",
"self",
".",
"_fshost",
",",
"self",
".",
"_fsport",
",",
"self",
".",
"_fspass",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'fs_calls'",
")",
":",
"count",
"=",
"fs",
".",
"getCallCount",
"(",
")",
"self",
".",
"setGraphVal",
"(",
"'fs_calls'",
",",
"'calls'",
",",
"count",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'fs_channels'",
")",
":",
"count",
"=",
"fs",
".",
"getChannelCount",
"(",
")",
"self",
".",
"setGraphVal",
"(",
"'fs_channels'",
",",
"'channels'",
",",
"count",
")"
] | 44.111111 | 9.111111 |
def loadfile(args):
'''load a log file (path given by arg)'''
mestate.console.write("Loading %s...\n" % args)
t0 = time.time()
mlog = mavutil.mavlink_connection(args, notimestamps=False,
zero_time_base=False,
progress_callback=progress_bar)
mestate.filename = args
mestate.mlog = mlog
mestate.status.msgs = mlog.messages
t1 = time.time()
mestate.console.write("\ndone (%u messages in %.1fs)\n" % (mestate.mlog._count, t1-t0))
global flightmodes
flightmodes = mlog.flightmode_list()
load_graphs()
setup_menus()
|
[
"def",
"loadfile",
"(",
"args",
")",
":",
"mestate",
".",
"console",
".",
"write",
"(",
"\"Loading %s...\\n\"",
"%",
"args",
")",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"mlog",
"=",
"mavutil",
".",
"mavlink_connection",
"(",
"args",
",",
"notimestamps",
"=",
"False",
",",
"zero_time_base",
"=",
"False",
",",
"progress_callback",
"=",
"progress_bar",
")",
"mestate",
".",
"filename",
"=",
"args",
"mestate",
".",
"mlog",
"=",
"mlog",
"mestate",
".",
"status",
".",
"msgs",
"=",
"mlog",
".",
"messages",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"mestate",
".",
"console",
".",
"write",
"(",
"\"\\ndone (%u messages in %.1fs)\\n\"",
"%",
"(",
"mestate",
".",
"mlog",
".",
"_count",
",",
"t1",
"-",
"t0",
")",
")",
"global",
"flightmodes",
"flightmodes",
"=",
"mlog",
".",
"flightmode_list",
"(",
")",
"load_graphs",
"(",
")",
"setup_menus",
"(",
")"
] | 34.555556 | 20.777778 |
def update_properties(self, new_properties):
""" Update config properties values
Property name must be equal to 'Section_option' of config property
:param new_properties: dict with new properties values
"""
[self._update_property_from_dict(section, option, new_properties)
for section in self.sections() for option in self.options(section)]
|
[
"def",
"update_properties",
"(",
"self",
",",
"new_properties",
")",
":",
"[",
"self",
".",
"_update_property_from_dict",
"(",
"section",
",",
"option",
",",
"new_properties",
")",
"for",
"section",
"in",
"self",
".",
"sections",
"(",
")",
"for",
"option",
"in",
"self",
".",
"options",
"(",
"section",
")",
"]"
] | 47.875 | 21.125 |
def get_argument_parser():
"""Function to obtain the argument parser.
Returns
-------
A fully configured `argparse.ArgumentParser` object.
Notes
-----
This function is used by the `sphinx-argparse` extension for sphinx.
"""
file_mv = cli.file_mv
desc = 'Find all runs (SRR..) associated with an SRA experiment (SRX...).'
parser = cli.get_argument_parser(desc=desc)
parser.add_argument(
'-e', '--experiment-file', type=str, required=True, metavar=file_mv,
help='File with SRA experiment IDs (starting with "SRX").'
)
parser.add_argument(
'-o', '--output-file', type=str, required=True, metavar=file_mv,
help='The output file.'
)
cli.add_reporting_args(parser)
return parser
|
[
"def",
"get_argument_parser",
"(",
")",
":",
"file_mv",
"=",
"cli",
".",
"file_mv",
"desc",
"=",
"'Find all runs (SRR..) associated with an SRA experiment (SRX...).'",
"parser",
"=",
"cli",
".",
"get_argument_parser",
"(",
"desc",
"=",
"desc",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"'--experiment-file'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"metavar",
"=",
"file_mv",
",",
"help",
"=",
"'File with SRA experiment IDs (starting with \"SRX\").'",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--output-file'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"metavar",
"=",
"file_mv",
",",
"help",
"=",
"'The output file.'",
")",
"cli",
".",
"add_reporting_args",
"(",
"parser",
")",
"return",
"parser"
] | 24.225806 | 26.967742 |
def lhd(
dist=None,
size=None,
dims=1,
form="randomized",
iterations=100,
showcorrelations=False,
):
"""
Create a Latin-Hypercube sample design based on distributions defined in the
`scipy.stats` module
Parameters
----------
dist: array_like
frozen scipy.stats.rv_continuous or rv_discrete distribution objects
that are defined previous to calling LHD
size: int
integer value for the number of samples to generate for each
distribution object
dims: int, optional
if dist is a single distribution object, and dims > 1, the one
distribution will be used to generate a size-by-dims sampled design
form: str, optional (non-functional at the moment)
determines how the sampling is to occur, with the following optional
values:
- 'randomized' - completely randomized sampling
- 'spacefilling' - space-filling sampling (generally gives a more
accurate sampling of the design when the number of sample points
is small)
- 'orthogonal' - balanced space-filling sampling (experimental)
The 'spacefilling' and 'orthogonal' forms require some iterations to
determine the optimal sampling pattern.
iterations: int, optional (non-functional at the moment)
used to control the number of allowable search iterations for generating
'spacefilling' and 'orthogonal' designs
Returns
-------
out: 2d-array,
A 2d-array where each column corresponds to each input distribution and
each row is a sample in the design
Examples
--------
Single distribution:
- uniform distribution, low = -1, width = 2
>>> import scipy.stats as ss
>>> d0 = ss.uniform(loc=-1,scale=2)
>>> print lhd(dist=d0,size=5)
[[ 0.51031081]
[-0.28961427]
[-0.68342107]
[ 0.69784371]
[ 0.12248842]]
Single distribution for multiple variables:
- normal distribution, mean = 0, stdev = 1
>>> d1 = ss.norm(loc=0,scale=1)
>>> print lhd(dist=d1,size=7,dims=5)
[[-0.8612785 0.23034412 0.21808001]
[ 0.0455778 0.07001606 0.31586419]
[-0.978553 0.30394663 0.78483995]
[-0.26415983 0.15235896 0.51462024]
[ 0.80805686 0.38891031 0.02076505]
[ 1.63028931 0.52104917 1.48016008]]
Multiple distributions:
- beta distribution, alpha = 2, beta = 5
- exponential distribution, lambda = 1.5
>>> d2 = ss.beta(2,5)
>>> d3 = ss.expon(scale=1/1.5)
>>> print lhd(dist=(d1,d2,d3),size=6)
[[-0.8612785 0.23034412 0.21808001]
[ 0.0455778 0.07001606 0.31586419]
[-0.978553 0.30394663 0.78483995]
[-0.26415983 0.15235896 0.51462024]
[ 0.80805686 0.38891031 0.02076505]
[ 1.63028931 0.52104917 1.48016008]]
"""
assert dims > 0, 'kwarg "dims" must be at least 1'
if not size or not dist:
return None
def _lhs(x, samples=20):
"""
_lhs(x) returns a latin-hypercube matrix (each row is a different
set of sample inputs) using a default sample size of 20 for each column
of X. X must be a 2xN matrix that contains the lower and upper bounds of
each column. The lower bound(s) should be in the first row and the upper
bound(s) should be in the second row.
_lhs(x,samples=N) uses the sample size of N instead of the default (20).
Example:
>>> x = np.array([[0,-1,3],[1,2,6]])
>>> print 'x:'; print x
x:
[[ 0 -1 3]
[ 1 2 6]]
>>> print 'lhs(x):'; print _lhs(x)
lhs(x):
[[ 0.02989122 -0.93918734 3.14432618]
[ 0.08869833 -0.82140706 3.19875152]
[ 0.10627442 -0.66999234 3.33814979]
[ 0.15202861 -0.44157763 3.57036894]
[ 0.2067089 -0.34845384 3.66930908]
[ 0.26542056 -0.23706445 3.76361414]
[ 0.34201421 -0.00779306 3.90818257]
[ 0.37891646 0.15458423 4.15031708]
[ 0.43501575 0.23561118 4.20320064]
[ 0.4865449 0.36350601 4.45792314]
[ 0.54804367 0.56069855 4.60911539]
[ 0.59400712 0.7468415 4.69923486]
[ 0.63708876 0.9159176 4.83611204]
[ 0.68819855 0.98596354 4.97659182]
[ 0.7368695 1.18923511 5.11135111]
[ 0.78885724 1.28369441 5.2900157 ]
[ 0.80966513 1.47415703 5.4081971 ]
[ 0.86196731 1.57844205 5.61067689]
[ 0.94784517 1.71823504 5.78021164]
[ 0.96739728 1.94169017 5.88604772]]
>>> print 'lhs(x,samples=5):'; print _lhs(x,samples=5)
lhs(x,samples=5):
[[ 0.1949127 -0.54124725 3.49238369]
[ 0.21128576 -0.13439798 3.65652016]
[ 0.47516308 0.39957406 4.5797308 ]
[ 0.64400392 0.90890999 4.92379431]
[ 0.96279472 1.79415307 5.52028238]]
"""
# determine the segment size
segmentSize = 1.0 / samples
# get the number of dimensions to sample (number of columns)
numVars = x.shape[1]
# populate each dimension
out = np.zeros((samples, numVars))
pointValue = np.zeros(samples)
for n in range(numVars):
for i in range(samples):
segmentMin = i * segmentSize
point = segmentMin + (np.random.random() * segmentSize)
pointValue[i] = (point * (x[1, n] - x[0, n])) + x[0, n]
out[:, n] = pointValue
# now randomly arrange the different segments
return _mix(out)
def _mix(data, dim="cols"):
"""
Takes a data matrix and mixes up the values along dim (either "rows" or
"cols"). In other words, if dim='rows', then each row's data is mixed
ONLY WITHIN ITSELF. Likewise, if dim='cols', then each column's data is
mixed ONLY WITHIN ITSELF.
"""
data = np.atleast_2d(data)
n = data.shape[0]
if dim == "rows":
data = data.T
data_rank = list(range(n))
for i in range(data.shape[1]):
new_data_rank = np.random.permutation(data_rank)
vals, order = np.unique(
np.hstack((data_rank, new_data_rank)), return_inverse=True
)
old_order = order[:n]
new_order = order[-n:]
tmp = data[np.argsort(old_order), i][new_order]
data[:, i] = tmp[:]
if dim == "rows":
data = data.T
return data
if form is "randomized":
if hasattr(dist, "__getitem__"): # if multiple distributions were input
nvars = len(dist)
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = _lhs(x, samples=size)
dist_data = np.empty_like(unif_data)
for i, d in enumerate(dist):
dist_data[:, i] = d.ppf(unif_data[:, i])
else: # if a single distribution was input
nvars = dims
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = _lhs(x, samples=size)
dist_data = np.empty_like(unif_data)
for i in range(nvars):
dist_data[:, i] = dist.ppf(unif_data[:, i])
elif form is "spacefilling":
def euclid_distance(arr):
n = arr.shape[0]
ans = 0.0
for i in range(n - 1):
for j in range(i + 1, n):
d = np.sqrt(
np.sum(
[(arr[i, k] - arr[j, k]) ** 2 for k in range(arr.shape[1])]
)
)
ans += 1.0 / d ** 2
return ans
def fill_space(data):
best = 1e8
for it in range(iterations):
d = euclid_distance(data)
if d < best:
d_opt = d
data_opt = data.copy()
data = _mix(data)
print("Optimized Distance:", d_opt)
return data_opt
if hasattr(dist, "__getitem__"): # if multiple distributions were input
nvars = len(dist)
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = fill_space(_lhs(x, samples=size))
dist_data = np.empty_like(unif_data)
for i, d in enumerate(dist):
dist_data[:, i] = d.ppf(unif_data[:, i])
else: # if a single distribution was input
nvars = dims
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = fill_space(_lhs(x, samples=size))
dist_data = np.empty_like(unif_data)
for i in range(nvars):
dist_data[:, i] = dist.ppf(unif_data[:, i])
elif form is "orthogonal":
raise NotImplementedError(
"Sorry. The orthogonal space-filling algorithm hasn't been implemented yet."
)
else:
raise ValueError('Invalid "form" value: %s' % (form))
if dist_data.shape[1] > 1:
cor_matrix = np.zeros((nvars, nvars))
for i in range(nvars):
for j in range(nvars):
x_data = dist_data[:, i].copy()
y_data = dist_data[:, j].copy()
x_mean = x_data.mean()
y_mean = y_data.mean()
num = np.sum((x_data - x_mean) * (y_data - y_mean))
den = np.sqrt(
np.sum((x_data - x_mean) ** 2) * np.sum((y_data - y_mean) ** 2)
)
cor_matrix[i, j] = num / den
cor_matrix[j, i] = num / den
inv_cor_matrix = np.linalg.pinv(cor_matrix)
VIF = np.max(np.diag(inv_cor_matrix))
if showcorrelations:
print("Correlation Matrix:\n", cor_matrix)
print("Inverted Correlation Matrix:\n", inv_cor_matrix)
print("Variance Inflation Factor (VIF):", VIF)
return dist_data
|
[
"def",
"lhd",
"(",
"dist",
"=",
"None",
",",
"size",
"=",
"None",
",",
"dims",
"=",
"1",
",",
"form",
"=",
"\"randomized\"",
",",
"iterations",
"=",
"100",
",",
"showcorrelations",
"=",
"False",
",",
")",
":",
"assert",
"dims",
">",
"0",
",",
"'kwarg \"dims\" must be at least 1'",
"if",
"not",
"size",
"or",
"not",
"dist",
":",
"return",
"None",
"def",
"_lhs",
"(",
"x",
",",
"samples",
"=",
"20",
")",
":",
"\"\"\"\n _lhs(x) returns a latin-hypercube matrix (each row is a different\n set of sample inputs) using a default sample size of 20 for each column\n of X. X must be a 2xN matrix that contains the lower and upper bounds of\n each column. The lower bound(s) should be in the first row and the upper\n bound(s) should be in the second row.\n \n _lhs(x,samples=N) uses the sample size of N instead of the default (20).\n \n Example:\n >>> x = np.array([[0,-1,3],[1,2,6]])\n >>> print 'x:'; print x\n x:\n [[ 0 -1 3]\n [ 1 2 6]]\n\n >>> print 'lhs(x):'; print _lhs(x)\n lhs(x):\n [[ 0.02989122 -0.93918734 3.14432618]\n [ 0.08869833 -0.82140706 3.19875152]\n [ 0.10627442 -0.66999234 3.33814979]\n [ 0.15202861 -0.44157763 3.57036894]\n [ 0.2067089 -0.34845384 3.66930908]\n [ 0.26542056 -0.23706445 3.76361414]\n [ 0.34201421 -0.00779306 3.90818257]\n [ 0.37891646 0.15458423 4.15031708]\n [ 0.43501575 0.23561118 4.20320064]\n [ 0.4865449 0.36350601 4.45792314]\n [ 0.54804367 0.56069855 4.60911539]\n [ 0.59400712 0.7468415 4.69923486]\n [ 0.63708876 0.9159176 4.83611204]\n [ 0.68819855 0.98596354 4.97659182]\n [ 0.7368695 1.18923511 5.11135111]\n [ 0.78885724 1.28369441 5.2900157 ]\n [ 0.80966513 1.47415703 5.4081971 ]\n [ 0.86196731 1.57844205 5.61067689]\n [ 0.94784517 1.71823504 5.78021164]\n [ 0.96739728 1.94169017 5.88604772]]\n\n >>> print 'lhs(x,samples=5):'; print _lhs(x,samples=5)\n lhs(x,samples=5):\n [[ 0.1949127 -0.54124725 3.49238369]\n [ 0.21128576 -0.13439798 3.65652016]\n [ 0.47516308 0.39957406 4.5797308 ]\n [ 0.64400392 0.90890999 4.92379431]\n [ 0.96279472 1.79415307 5.52028238]] \n \"\"\"",
"# determine the segment size",
"segmentSize",
"=",
"1.0",
"/",
"samples",
"# get the number of dimensions to sample (number of columns)",
"numVars",
"=",
"x",
".",
"shape",
"[",
"1",
"]",
"# populate each dimension",
"out",
"=",
"np",
".",
"zeros",
"(",
"(",
"samples",
",",
"numVars",
")",
")",
"pointValue",
"=",
"np",
".",
"zeros",
"(",
"samples",
")",
"for",
"n",
"in",
"range",
"(",
"numVars",
")",
":",
"for",
"i",
"in",
"range",
"(",
"samples",
")",
":",
"segmentMin",
"=",
"i",
"*",
"segmentSize",
"point",
"=",
"segmentMin",
"+",
"(",
"np",
".",
"random",
".",
"random",
"(",
")",
"*",
"segmentSize",
")",
"pointValue",
"[",
"i",
"]",
"=",
"(",
"point",
"*",
"(",
"x",
"[",
"1",
",",
"n",
"]",
"-",
"x",
"[",
"0",
",",
"n",
"]",
")",
")",
"+",
"x",
"[",
"0",
",",
"n",
"]",
"out",
"[",
":",
",",
"n",
"]",
"=",
"pointValue",
"# now randomly arrange the different segments",
"return",
"_mix",
"(",
"out",
")",
"def",
"_mix",
"(",
"data",
",",
"dim",
"=",
"\"cols\"",
")",
":",
"\"\"\"\n Takes a data matrix and mixes up the values along dim (either \"rows\" or \n \"cols\"). In other words, if dim='rows', then each row's data is mixed\n ONLY WITHIN ITSELF. Likewise, if dim='cols', then each column's data is\n mixed ONLY WITHIN ITSELF.\n \"\"\"",
"data",
"=",
"np",
".",
"atleast_2d",
"(",
"data",
")",
"n",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"if",
"dim",
"==",
"\"rows\"",
":",
"data",
"=",
"data",
".",
"T",
"data_rank",
"=",
"list",
"(",
"range",
"(",
"n",
")",
")",
"for",
"i",
"in",
"range",
"(",
"data",
".",
"shape",
"[",
"1",
"]",
")",
":",
"new_data_rank",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"data_rank",
")",
"vals",
",",
"order",
"=",
"np",
".",
"unique",
"(",
"np",
".",
"hstack",
"(",
"(",
"data_rank",
",",
"new_data_rank",
")",
")",
",",
"return_inverse",
"=",
"True",
")",
"old_order",
"=",
"order",
"[",
":",
"n",
"]",
"new_order",
"=",
"order",
"[",
"-",
"n",
":",
"]",
"tmp",
"=",
"data",
"[",
"np",
".",
"argsort",
"(",
"old_order",
")",
",",
"i",
"]",
"[",
"new_order",
"]",
"data",
"[",
":",
",",
"i",
"]",
"=",
"tmp",
"[",
":",
"]",
"if",
"dim",
"==",
"\"rows\"",
":",
"data",
"=",
"data",
".",
"T",
"return",
"data",
"if",
"form",
"is",
"\"randomized\"",
":",
"if",
"hasattr",
"(",
"dist",
",",
"\"__getitem__\"",
")",
":",
"# if multiple distributions were input",
"nvars",
"=",
"len",
"(",
"dist",
")",
"x",
"=",
"np",
".",
"vstack",
"(",
"(",
"np",
".",
"zeros",
"(",
"nvars",
")",
",",
"np",
".",
"ones",
"(",
"nvars",
")",
")",
")",
"unif_data",
"=",
"_lhs",
"(",
"x",
",",
"samples",
"=",
"size",
")",
"dist_data",
"=",
"np",
".",
"empty_like",
"(",
"unif_data",
")",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"dist",
")",
":",
"dist_data",
"[",
":",
",",
"i",
"]",
"=",
"d",
".",
"ppf",
"(",
"unif_data",
"[",
":",
",",
"i",
"]",
")",
"else",
":",
"# if a single distribution was input",
"nvars",
"=",
"dims",
"x",
"=",
"np",
".",
"vstack",
"(",
"(",
"np",
".",
"zeros",
"(",
"nvars",
")",
",",
"np",
".",
"ones",
"(",
"nvars",
")",
")",
")",
"unif_data",
"=",
"_lhs",
"(",
"x",
",",
"samples",
"=",
"size",
")",
"dist_data",
"=",
"np",
".",
"empty_like",
"(",
"unif_data",
")",
"for",
"i",
"in",
"range",
"(",
"nvars",
")",
":",
"dist_data",
"[",
":",
",",
"i",
"]",
"=",
"dist",
".",
"ppf",
"(",
"unif_data",
"[",
":",
",",
"i",
"]",
")",
"elif",
"form",
"is",
"\"spacefilling\"",
":",
"def",
"euclid_distance",
"(",
"arr",
")",
":",
"n",
"=",
"arr",
".",
"shape",
"[",
"0",
"]",
"ans",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"n",
"-",
"1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
"+",
"1",
",",
"n",
")",
":",
"d",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"[",
"(",
"arr",
"[",
"i",
",",
"k",
"]",
"-",
"arr",
"[",
"j",
",",
"k",
"]",
")",
"**",
"2",
"for",
"k",
"in",
"range",
"(",
"arr",
".",
"shape",
"[",
"1",
"]",
")",
"]",
")",
")",
"ans",
"+=",
"1.0",
"/",
"d",
"**",
"2",
"return",
"ans",
"def",
"fill_space",
"(",
"data",
")",
":",
"best",
"=",
"1e8",
"for",
"it",
"in",
"range",
"(",
"iterations",
")",
":",
"d",
"=",
"euclid_distance",
"(",
"data",
")",
"if",
"d",
"<",
"best",
":",
"d_opt",
"=",
"d",
"data_opt",
"=",
"data",
".",
"copy",
"(",
")",
"data",
"=",
"_mix",
"(",
"data",
")",
"print",
"(",
"\"Optimized Distance:\"",
",",
"d_opt",
")",
"return",
"data_opt",
"if",
"hasattr",
"(",
"dist",
",",
"\"__getitem__\"",
")",
":",
"# if multiple distributions were input",
"nvars",
"=",
"len",
"(",
"dist",
")",
"x",
"=",
"np",
".",
"vstack",
"(",
"(",
"np",
".",
"zeros",
"(",
"nvars",
")",
",",
"np",
".",
"ones",
"(",
"nvars",
")",
")",
")",
"unif_data",
"=",
"fill_space",
"(",
"_lhs",
"(",
"x",
",",
"samples",
"=",
"size",
")",
")",
"dist_data",
"=",
"np",
".",
"empty_like",
"(",
"unif_data",
")",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"dist",
")",
":",
"dist_data",
"[",
":",
",",
"i",
"]",
"=",
"d",
".",
"ppf",
"(",
"unif_data",
"[",
":",
",",
"i",
"]",
")",
"else",
":",
"# if a single distribution was input",
"nvars",
"=",
"dims",
"x",
"=",
"np",
".",
"vstack",
"(",
"(",
"np",
".",
"zeros",
"(",
"nvars",
")",
",",
"np",
".",
"ones",
"(",
"nvars",
")",
")",
")",
"unif_data",
"=",
"fill_space",
"(",
"_lhs",
"(",
"x",
",",
"samples",
"=",
"size",
")",
")",
"dist_data",
"=",
"np",
".",
"empty_like",
"(",
"unif_data",
")",
"for",
"i",
"in",
"range",
"(",
"nvars",
")",
":",
"dist_data",
"[",
":",
",",
"i",
"]",
"=",
"dist",
".",
"ppf",
"(",
"unif_data",
"[",
":",
",",
"i",
"]",
")",
"elif",
"form",
"is",
"\"orthogonal\"",
":",
"raise",
"NotImplementedError",
"(",
"\"Sorry. The orthogonal space-filling algorithm hasn't been implemented yet.\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid \"form\" value: %s'",
"%",
"(",
"form",
")",
")",
"if",
"dist_data",
".",
"shape",
"[",
"1",
"]",
">",
"1",
":",
"cor_matrix",
"=",
"np",
".",
"zeros",
"(",
"(",
"nvars",
",",
"nvars",
")",
")",
"for",
"i",
"in",
"range",
"(",
"nvars",
")",
":",
"for",
"j",
"in",
"range",
"(",
"nvars",
")",
":",
"x_data",
"=",
"dist_data",
"[",
":",
",",
"i",
"]",
".",
"copy",
"(",
")",
"y_data",
"=",
"dist_data",
"[",
":",
",",
"j",
"]",
".",
"copy",
"(",
")",
"x_mean",
"=",
"x_data",
".",
"mean",
"(",
")",
"y_mean",
"=",
"y_data",
".",
"mean",
"(",
")",
"num",
"=",
"np",
".",
"sum",
"(",
"(",
"x_data",
"-",
"x_mean",
")",
"*",
"(",
"y_data",
"-",
"y_mean",
")",
")",
"den",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"(",
"x_data",
"-",
"x_mean",
")",
"**",
"2",
")",
"*",
"np",
".",
"sum",
"(",
"(",
"y_data",
"-",
"y_mean",
")",
"**",
"2",
")",
")",
"cor_matrix",
"[",
"i",
",",
"j",
"]",
"=",
"num",
"/",
"den",
"cor_matrix",
"[",
"j",
",",
"i",
"]",
"=",
"num",
"/",
"den",
"inv_cor_matrix",
"=",
"np",
".",
"linalg",
".",
"pinv",
"(",
"cor_matrix",
")",
"VIF",
"=",
"np",
".",
"max",
"(",
"np",
".",
"diag",
"(",
"inv_cor_matrix",
")",
")",
"if",
"showcorrelations",
":",
"print",
"(",
"\"Correlation Matrix:\\n\"",
",",
"cor_matrix",
")",
"print",
"(",
"\"Inverted Correlation Matrix:\\n\"",
",",
"inv_cor_matrix",
")",
"print",
"(",
"\"Variance Inflation Factor (VIF):\"",
",",
"VIF",
")",
"return",
"dist_data"
] | 35.375887 | 18.29078 |
def running(name,
restart=False,
update=False,
user=None,
conf_file=None,
bin_env=None,
**kwargs):
'''
Ensure the named service is running.
name
Service name as defined in the supervisor configuration file
restart
Whether to force a restart
update
Whether to update the supervisor configuration.
user
Name of the user to run the supervisorctl command
.. versionadded:: 0.17.0
conf_file
path to supervisorctl config file
bin_env
path to supervisorctl bin or path to virtualenv with supervisor
installed
'''
if name.endswith(':*'):
name = name[:-1]
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if 'supervisord.status' not in __salt__:
ret['result'] = False
ret['comment'] = 'Supervisord module not activated. Do you need to install supervisord?'
return ret
all_processes = __salt__['supervisord.status'](
user=user,
conf_file=conf_file,
bin_env=bin_env
)
# parse process groups
process_groups = set()
for proc in all_processes:
if ':' in proc:
process_groups.add(proc[:proc.index(':') + 1])
process_groups = sorted(process_groups)
matches = {}
if name in all_processes:
matches[name] = (all_processes[name]['state'].lower() == 'running')
elif name in process_groups:
for process in (x for x in all_processes if x.startswith(name)):
matches[process] = (
all_processes[process]['state'].lower() == 'running'
)
to_add = not bool(matches)
if __opts__['test']:
if not to_add:
# Process/group already present, check if any need to be started
to_start = [x for x, y in six.iteritems(matches) if y is False]
if to_start:
ret['result'] = None
if name.endswith(':'):
# Process group
if len(to_start) == len(matches):
ret['comment'] = (
'All services in group \'{0}\' will be started'
.format(name)
)
else:
ret['comment'] = (
'The following services will be started: {0}'
.format(' '.join(to_start))
)
else:
# Single program
ret['comment'] = 'Service {0} will be started'.format(name)
else:
if name.endswith(':'):
# Process group
ret['comment'] = (
'All services in group \'{0}\' are already running'
.format(name)
)
else:
ret['comment'] = ('Service {0} is already running'
.format(name))
else:
ret['result'] = None
# Process/group needs to be added
if name.endswith(':'):
_type = 'Group \'{0}\''.format(name)
else:
_type = 'Service {0}'.format(name)
ret['comment'] = '{0} will be added and started'.format(_type)
return ret
changes = []
just_updated = False
if update:
# If the state explicitly asks to update, we don't care if the process
# is being added or not, since it'll take care of this for us,
# so give this condition priority in order
#
# That is, unless `to_add` somehow manages to contain processes
# we don't want running, in which case adding them may be a mistake
comment = 'Updating supervisor'
result = __salt__['supervisord.update'](
user=user,
conf_file=conf_file,
bin_env=bin_env
)
ret.update(_check_error(result, comment))
log.debug(comment)
if '{0}: updated'.format(name) in result:
just_updated = True
elif to_add:
# Not sure if this condition is precise enough.
comment = 'Adding service: {0}'.format(name)
__salt__['supervisord.reread'](
user=user,
conf_file=conf_file,
bin_env=bin_env
)
# Causes supervisorctl to throw `ERROR: process group already active`
# if process group exists. At this moment, I'm not sure how to handle
# this outside of grepping out the expected string in `_check_error`.
result = __salt__['supervisord.add'](
name,
user=user,
conf_file=conf_file,
bin_env=bin_env
)
ret.update(_check_error(result, comment))
changes.append(comment)
log.debug(comment)
is_stopped = None
process_type = None
if name in process_groups:
process_type = 'group'
# check if any processes in this group are stopped
is_stopped = False
for proc in all_processes:
if proc.startswith(name) \
and _is_stopped_state(all_processes[proc]['state']):
is_stopped = True
break
elif name in all_processes:
process_type = 'service'
if _is_stopped_state(all_processes[name]['state']):
is_stopped = True
else:
is_stopped = False
if is_stopped is False:
if restart and not just_updated:
comment = 'Restarting{0}: {1}'.format(
process_type is not None and ' {0}'.format(process_type) or '',
name
)
log.debug(comment)
result = __salt__['supervisord.restart'](
name,
user=user,
conf_file=conf_file,
bin_env=bin_env
)
ret.update(_check_error(result, comment))
changes.append(comment)
elif just_updated:
comment = 'Not starting updated{0}: {1}'.format(
process_type is not None and ' {0}'.format(process_type) or '',
name
)
result = comment
ret.update({'comment': comment})
else:
comment = 'Not starting already running{0}: {1}'.format(
process_type is not None and ' {0}'.format(process_type) or '',
name
)
result = comment
ret.update({'comment': comment})
elif not just_updated:
comment = 'Starting{0}: {1}'.format(
process_type is not None and ' {0}'.format(process_type) or '',
name
)
changes.append(comment)
log.debug(comment)
result = __salt__['supervisord.start'](
name,
user=user,
conf_file=conf_file,
bin_env=bin_env
)
ret.update(_check_error(result, comment))
log.debug(six.text_type(result))
if ret['result'] and changes:
ret['changes'][name] = ' '.join(changes)
return ret
|
[
"def",
"running",
"(",
"name",
",",
"restart",
"=",
"False",
",",
"update",
"=",
"False",
",",
"user",
"=",
"None",
",",
"conf_file",
"=",
"None",
",",
"bin_env",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"name",
".",
"endswith",
"(",
"':*'",
")",
":",
"name",
"=",
"name",
"[",
":",
"-",
"1",
"]",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"if",
"'supervisord.status'",
"not",
"in",
"__salt__",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Supervisord module not activated. Do you need to install supervisord?'",
"return",
"ret",
"all_processes",
"=",
"__salt__",
"[",
"'supervisord.status'",
"]",
"(",
"user",
"=",
"user",
",",
"conf_file",
"=",
"conf_file",
",",
"bin_env",
"=",
"bin_env",
")",
"# parse process groups",
"process_groups",
"=",
"set",
"(",
")",
"for",
"proc",
"in",
"all_processes",
":",
"if",
"':'",
"in",
"proc",
":",
"process_groups",
".",
"add",
"(",
"proc",
"[",
":",
"proc",
".",
"index",
"(",
"':'",
")",
"+",
"1",
"]",
")",
"process_groups",
"=",
"sorted",
"(",
"process_groups",
")",
"matches",
"=",
"{",
"}",
"if",
"name",
"in",
"all_processes",
":",
"matches",
"[",
"name",
"]",
"=",
"(",
"all_processes",
"[",
"name",
"]",
"[",
"'state'",
"]",
".",
"lower",
"(",
")",
"==",
"'running'",
")",
"elif",
"name",
"in",
"process_groups",
":",
"for",
"process",
"in",
"(",
"x",
"for",
"x",
"in",
"all_processes",
"if",
"x",
".",
"startswith",
"(",
"name",
")",
")",
":",
"matches",
"[",
"process",
"]",
"=",
"(",
"all_processes",
"[",
"process",
"]",
"[",
"'state'",
"]",
".",
"lower",
"(",
")",
"==",
"'running'",
")",
"to_add",
"=",
"not",
"bool",
"(",
"matches",
")",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"if",
"not",
"to_add",
":",
"# Process/group already present, check if any need to be started",
"to_start",
"=",
"[",
"x",
"for",
"x",
",",
"y",
"in",
"six",
".",
"iteritems",
"(",
"matches",
")",
"if",
"y",
"is",
"False",
"]",
"if",
"to_start",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"if",
"name",
".",
"endswith",
"(",
"':'",
")",
":",
"# Process group",
"if",
"len",
"(",
"to_start",
")",
"==",
"len",
"(",
"matches",
")",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'All services in group \\'{0}\\' will be started'",
".",
"format",
"(",
"name",
")",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'The following services will be started: {0}'",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"to_start",
")",
")",
")",
"else",
":",
"# Single program",
"ret",
"[",
"'comment'",
"]",
"=",
"'Service {0} will be started'",
".",
"format",
"(",
"name",
")",
"else",
":",
"if",
"name",
".",
"endswith",
"(",
"':'",
")",
":",
"# Process group",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'All services in group \\'{0}\\' are already running'",
".",
"format",
"(",
"name",
")",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'Service {0} is already running'",
".",
"format",
"(",
"name",
")",
")",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"# Process/group needs to be added",
"if",
"name",
".",
"endswith",
"(",
"':'",
")",
":",
"_type",
"=",
"'Group \\'{0}\\''",
".",
"format",
"(",
"name",
")",
"else",
":",
"_type",
"=",
"'Service {0}'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"'{0} will be added and started'",
".",
"format",
"(",
"_type",
")",
"return",
"ret",
"changes",
"=",
"[",
"]",
"just_updated",
"=",
"False",
"if",
"update",
":",
"# If the state explicitly asks to update, we don't care if the process",
"# is being added or not, since it'll take care of this for us,",
"# so give this condition priority in order",
"#",
"# That is, unless `to_add` somehow manages to contain processes",
"# we don't want running, in which case adding them may be a mistake",
"comment",
"=",
"'Updating supervisor'",
"result",
"=",
"__salt__",
"[",
"'supervisord.update'",
"]",
"(",
"user",
"=",
"user",
",",
"conf_file",
"=",
"conf_file",
",",
"bin_env",
"=",
"bin_env",
")",
"ret",
".",
"update",
"(",
"_check_error",
"(",
"result",
",",
"comment",
")",
")",
"log",
".",
"debug",
"(",
"comment",
")",
"if",
"'{0}: updated'",
".",
"format",
"(",
"name",
")",
"in",
"result",
":",
"just_updated",
"=",
"True",
"elif",
"to_add",
":",
"# Not sure if this condition is precise enough.",
"comment",
"=",
"'Adding service: {0}'",
".",
"format",
"(",
"name",
")",
"__salt__",
"[",
"'supervisord.reread'",
"]",
"(",
"user",
"=",
"user",
",",
"conf_file",
"=",
"conf_file",
",",
"bin_env",
"=",
"bin_env",
")",
"# Causes supervisorctl to throw `ERROR: process group already active`",
"# if process group exists. At this moment, I'm not sure how to handle",
"# this outside of grepping out the expected string in `_check_error`.",
"result",
"=",
"__salt__",
"[",
"'supervisord.add'",
"]",
"(",
"name",
",",
"user",
"=",
"user",
",",
"conf_file",
"=",
"conf_file",
",",
"bin_env",
"=",
"bin_env",
")",
"ret",
".",
"update",
"(",
"_check_error",
"(",
"result",
",",
"comment",
")",
")",
"changes",
".",
"append",
"(",
"comment",
")",
"log",
".",
"debug",
"(",
"comment",
")",
"is_stopped",
"=",
"None",
"process_type",
"=",
"None",
"if",
"name",
"in",
"process_groups",
":",
"process_type",
"=",
"'group'",
"# check if any processes in this group are stopped",
"is_stopped",
"=",
"False",
"for",
"proc",
"in",
"all_processes",
":",
"if",
"proc",
".",
"startswith",
"(",
"name",
")",
"and",
"_is_stopped_state",
"(",
"all_processes",
"[",
"proc",
"]",
"[",
"'state'",
"]",
")",
":",
"is_stopped",
"=",
"True",
"break",
"elif",
"name",
"in",
"all_processes",
":",
"process_type",
"=",
"'service'",
"if",
"_is_stopped_state",
"(",
"all_processes",
"[",
"name",
"]",
"[",
"'state'",
"]",
")",
":",
"is_stopped",
"=",
"True",
"else",
":",
"is_stopped",
"=",
"False",
"if",
"is_stopped",
"is",
"False",
":",
"if",
"restart",
"and",
"not",
"just_updated",
":",
"comment",
"=",
"'Restarting{0}: {1}'",
".",
"format",
"(",
"process_type",
"is",
"not",
"None",
"and",
"' {0}'",
".",
"format",
"(",
"process_type",
")",
"or",
"''",
",",
"name",
")",
"log",
".",
"debug",
"(",
"comment",
")",
"result",
"=",
"__salt__",
"[",
"'supervisord.restart'",
"]",
"(",
"name",
",",
"user",
"=",
"user",
",",
"conf_file",
"=",
"conf_file",
",",
"bin_env",
"=",
"bin_env",
")",
"ret",
".",
"update",
"(",
"_check_error",
"(",
"result",
",",
"comment",
")",
")",
"changes",
".",
"append",
"(",
"comment",
")",
"elif",
"just_updated",
":",
"comment",
"=",
"'Not starting updated{0}: {1}'",
".",
"format",
"(",
"process_type",
"is",
"not",
"None",
"and",
"' {0}'",
".",
"format",
"(",
"process_type",
")",
"or",
"''",
",",
"name",
")",
"result",
"=",
"comment",
"ret",
".",
"update",
"(",
"{",
"'comment'",
":",
"comment",
"}",
")",
"else",
":",
"comment",
"=",
"'Not starting already running{0}: {1}'",
".",
"format",
"(",
"process_type",
"is",
"not",
"None",
"and",
"' {0}'",
".",
"format",
"(",
"process_type",
")",
"or",
"''",
",",
"name",
")",
"result",
"=",
"comment",
"ret",
".",
"update",
"(",
"{",
"'comment'",
":",
"comment",
"}",
")",
"elif",
"not",
"just_updated",
":",
"comment",
"=",
"'Starting{0}: {1}'",
".",
"format",
"(",
"process_type",
"is",
"not",
"None",
"and",
"' {0}'",
".",
"format",
"(",
"process_type",
")",
"or",
"''",
",",
"name",
")",
"changes",
".",
"append",
"(",
"comment",
")",
"log",
".",
"debug",
"(",
"comment",
")",
"result",
"=",
"__salt__",
"[",
"'supervisord.start'",
"]",
"(",
"name",
",",
"user",
"=",
"user",
",",
"conf_file",
"=",
"conf_file",
",",
"bin_env",
"=",
"bin_env",
")",
"ret",
".",
"update",
"(",
"_check_error",
"(",
"result",
",",
"comment",
")",
")",
"log",
".",
"debug",
"(",
"six",
".",
"text_type",
"(",
"result",
")",
")",
"if",
"ret",
"[",
"'result'",
"]",
"and",
"changes",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"' '",
".",
"join",
"(",
"changes",
")",
"return",
"ret"
] | 31.710407 | 20.244344 |
def cheby_op(G, c, signal, **kwargs):
r"""
Chebyshev polynomial of graph Laplacian applied to vector.
Parameters
----------
G : Graph
c : ndarray or list of ndarrays
Chebyshev coefficients for a Filter or a Filterbank
signal : ndarray
Signal to filter
Returns
-------
r : ndarray
Result of the filtering
"""
# Handle if we do not have a list of filters but only a simple filter in cheby_coeff.
if not isinstance(c, np.ndarray):
c = np.array(c)
c = np.atleast_2d(c)
Nscales, M = c.shape
if M < 2:
raise TypeError("The coefficients have an invalid shape")
# thanks to that, we can also have 1d signal.
try:
Nv = np.shape(signal)[1]
r = np.zeros((G.N * Nscales, Nv))
except IndexError:
r = np.zeros((G.N * Nscales))
a_arange = [0, G.lmax]
a1 = float(a_arange[1] - a_arange[0]) / 2.
a2 = float(a_arange[1] + a_arange[0]) / 2.
twf_old = signal
twf_cur = (G.L.dot(signal) - a2 * signal) / a1
tmpN = np.arange(G.N, dtype=int)
for i in range(Nscales):
r[tmpN + G.N*i] = 0.5 * c[i, 0] * twf_old + c[i, 1] * twf_cur
factor = 2/a1 * (G.L - a2 * sparse.eye(G.N))
for k in range(2, M):
twf_new = factor.dot(twf_cur) - twf_old
for i in range(Nscales):
r[tmpN + G.N*i] += c[i, k] * twf_new
twf_old = twf_cur
twf_cur = twf_new
return r
|
[
"def",
"cheby_op",
"(",
"G",
",",
"c",
",",
"signal",
",",
"*",
"*",
"kwargs",
")",
":",
"# Handle if we do not have a list of filters but only a simple filter in cheby_coeff.",
"if",
"not",
"isinstance",
"(",
"c",
",",
"np",
".",
"ndarray",
")",
":",
"c",
"=",
"np",
".",
"array",
"(",
"c",
")",
"c",
"=",
"np",
".",
"atleast_2d",
"(",
"c",
")",
"Nscales",
",",
"M",
"=",
"c",
".",
"shape",
"if",
"M",
"<",
"2",
":",
"raise",
"TypeError",
"(",
"\"The coefficients have an invalid shape\"",
")",
"# thanks to that, we can also have 1d signal.",
"try",
":",
"Nv",
"=",
"np",
".",
"shape",
"(",
"signal",
")",
"[",
"1",
"]",
"r",
"=",
"np",
".",
"zeros",
"(",
"(",
"G",
".",
"N",
"*",
"Nscales",
",",
"Nv",
")",
")",
"except",
"IndexError",
":",
"r",
"=",
"np",
".",
"zeros",
"(",
"(",
"G",
".",
"N",
"*",
"Nscales",
")",
")",
"a_arange",
"=",
"[",
"0",
",",
"G",
".",
"lmax",
"]",
"a1",
"=",
"float",
"(",
"a_arange",
"[",
"1",
"]",
"-",
"a_arange",
"[",
"0",
"]",
")",
"/",
"2.",
"a2",
"=",
"float",
"(",
"a_arange",
"[",
"1",
"]",
"+",
"a_arange",
"[",
"0",
"]",
")",
"/",
"2.",
"twf_old",
"=",
"signal",
"twf_cur",
"=",
"(",
"G",
".",
"L",
".",
"dot",
"(",
"signal",
")",
"-",
"a2",
"*",
"signal",
")",
"/",
"a1",
"tmpN",
"=",
"np",
".",
"arange",
"(",
"G",
".",
"N",
",",
"dtype",
"=",
"int",
")",
"for",
"i",
"in",
"range",
"(",
"Nscales",
")",
":",
"r",
"[",
"tmpN",
"+",
"G",
".",
"N",
"*",
"i",
"]",
"=",
"0.5",
"*",
"c",
"[",
"i",
",",
"0",
"]",
"*",
"twf_old",
"+",
"c",
"[",
"i",
",",
"1",
"]",
"*",
"twf_cur",
"factor",
"=",
"2",
"/",
"a1",
"*",
"(",
"G",
".",
"L",
"-",
"a2",
"*",
"sparse",
".",
"eye",
"(",
"G",
".",
"N",
")",
")",
"for",
"k",
"in",
"range",
"(",
"2",
",",
"M",
")",
":",
"twf_new",
"=",
"factor",
".",
"dot",
"(",
"twf_cur",
")",
"-",
"twf_old",
"for",
"i",
"in",
"range",
"(",
"Nscales",
")",
":",
"r",
"[",
"tmpN",
"+",
"G",
".",
"N",
"*",
"i",
"]",
"+=",
"c",
"[",
"i",
",",
"k",
"]",
"*",
"twf_new",
"twf_old",
"=",
"twf_cur",
"twf_cur",
"=",
"twf_new",
"return",
"r"
] | 24.701754 | 21.140351 |
def receive(self,arg_formats=None):
"""
Recieve commands coming off the serial port.
arg_formats is an optimal keyword that specifies the formats to use to
parse incoming arguments. If specified here, arg_formats supercedes
the formats specified on initialization.
"""
# Read serial input until a command separator or empty character is
# reached
msg = [[]]
raw_msg = []
escaped = False
command_sep_found = False
while True:
tmp = self.board.read()
raw_msg.append(tmp)
if escaped:
# Either drop the escape character or, if this wasn't really
# an escape, keep previous escape character and new character
if tmp in self._escaped_characters:
msg[-1].append(tmp)
escaped = False
else:
msg[-1].append(self._byte_escape_sep)
msg[-1].append(tmp)
escaped = False
else:
# look for escape character
if tmp == self._byte_escape_sep:
escaped = True
# or field separator
elif tmp == self._byte_field_sep:
msg.append([])
# or command separator
elif tmp == self._byte_command_sep:
command_sep_found = True
break
# or any empty characater
elif tmp == b'':
break
# okay, must be something
else:
msg[-1].append(tmp)
# No message received given timeouts
if len(msg) == 1 and len(msg[0]) == 0:
return None
# Make sure the message terminated properly
if not command_sep_found:
# empty message (likely from line endings being included)
joined_raw = b''.join(raw_msg)
if joined_raw.strip() == b'':
return None
err = "Incomplete message ({})".format(joined_raw.decode())
raise EOFError(err)
# Turn message into fields
fields = [b''.join(m) for m in msg]
# Get the command name.
cmd = fields[0].strip().decode()
try:
cmd_name = self._int_to_cmd_name[int(cmd)]
except (ValueError,IndexError):
if self.give_warnings:
cmd_name = "unknown"
w = "Recieved unrecognized command ({}).".format(cmd)
warnings.warn(w,Warning)
# Figure out what formats to use for each argument.
arg_format_list = []
if arg_formats != None:
# The user specified formats
arg_format_list = list(arg_formats)
else:
try:
# See if class was initialized with a format for arguments to this
# command
arg_format_list = self._cmd_name_to_format[cmd_name]
except KeyError:
# if not, guess for all arguments
arg_format_list = ["g" for i in range(len(fields[1:]))]
# Deal with "*" format
arg_format_list = self._treat_star_format(arg_format_list,fields[1:])
if len(fields[1:]) > 0:
if len(arg_format_list) != len(fields[1:]):
err = "Number of argument formats must match the number of recieved arguments."
raise ValueError(err)
received = []
for i, f in enumerate(fields[1:]):
received.append(self._recv_methods[arg_format_list[i]](f))
# Record the time the message arrived
message_time = time.time()
return cmd_name, received, message_time
|
[
"def",
"receive",
"(",
"self",
",",
"arg_formats",
"=",
"None",
")",
":",
"# Read serial input until a command separator or empty character is",
"# reached ",
"msg",
"=",
"[",
"[",
"]",
"]",
"raw_msg",
"=",
"[",
"]",
"escaped",
"=",
"False",
"command_sep_found",
"=",
"False",
"while",
"True",
":",
"tmp",
"=",
"self",
".",
"board",
".",
"read",
"(",
")",
"raw_msg",
".",
"append",
"(",
"tmp",
")",
"if",
"escaped",
":",
"# Either drop the escape character or, if this wasn't really",
"# an escape, keep previous escape character and new character",
"if",
"tmp",
"in",
"self",
".",
"_escaped_characters",
":",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"tmp",
")",
"escaped",
"=",
"False",
"else",
":",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"self",
".",
"_byte_escape_sep",
")",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"tmp",
")",
"escaped",
"=",
"False",
"else",
":",
"# look for escape character",
"if",
"tmp",
"==",
"self",
".",
"_byte_escape_sep",
":",
"escaped",
"=",
"True",
"# or field separator",
"elif",
"tmp",
"==",
"self",
".",
"_byte_field_sep",
":",
"msg",
".",
"append",
"(",
"[",
"]",
")",
"# or command separator",
"elif",
"tmp",
"==",
"self",
".",
"_byte_command_sep",
":",
"command_sep_found",
"=",
"True",
"break",
"# or any empty characater ",
"elif",
"tmp",
"==",
"b''",
":",
"break",
"# okay, must be something",
"else",
":",
"msg",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"tmp",
")",
"# No message received given timeouts",
"if",
"len",
"(",
"msg",
")",
"==",
"1",
"and",
"len",
"(",
"msg",
"[",
"0",
"]",
")",
"==",
"0",
":",
"return",
"None",
"# Make sure the message terminated properly",
"if",
"not",
"command_sep_found",
":",
"# empty message (likely from line endings being included) ",
"joined_raw",
"=",
"b''",
".",
"join",
"(",
"raw_msg",
")",
"if",
"joined_raw",
".",
"strip",
"(",
")",
"==",
"b''",
":",
"return",
"None",
"err",
"=",
"\"Incomplete message ({})\"",
".",
"format",
"(",
"joined_raw",
".",
"decode",
"(",
")",
")",
"raise",
"EOFError",
"(",
"err",
")",
"# Turn message into fields",
"fields",
"=",
"[",
"b''",
".",
"join",
"(",
"m",
")",
"for",
"m",
"in",
"msg",
"]",
"# Get the command name.",
"cmd",
"=",
"fields",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"decode",
"(",
")",
"try",
":",
"cmd_name",
"=",
"self",
".",
"_int_to_cmd_name",
"[",
"int",
"(",
"cmd",
")",
"]",
"except",
"(",
"ValueError",
",",
"IndexError",
")",
":",
"if",
"self",
".",
"give_warnings",
":",
"cmd_name",
"=",
"\"unknown\"",
"w",
"=",
"\"Recieved unrecognized command ({}).\"",
".",
"format",
"(",
"cmd",
")",
"warnings",
".",
"warn",
"(",
"w",
",",
"Warning",
")",
"# Figure out what formats to use for each argument. ",
"arg_format_list",
"=",
"[",
"]",
"if",
"arg_formats",
"!=",
"None",
":",
"# The user specified formats",
"arg_format_list",
"=",
"list",
"(",
"arg_formats",
")",
"else",
":",
"try",
":",
"# See if class was initialized with a format for arguments to this",
"# command",
"arg_format_list",
"=",
"self",
".",
"_cmd_name_to_format",
"[",
"cmd_name",
"]",
"except",
"KeyError",
":",
"# if not, guess for all arguments",
"arg_format_list",
"=",
"[",
"\"g\"",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"fields",
"[",
"1",
":",
"]",
")",
")",
"]",
"# Deal with \"*\" format ",
"arg_format_list",
"=",
"self",
".",
"_treat_star_format",
"(",
"arg_format_list",
",",
"fields",
"[",
"1",
":",
"]",
")",
"if",
"len",
"(",
"fields",
"[",
"1",
":",
"]",
")",
">",
"0",
":",
"if",
"len",
"(",
"arg_format_list",
")",
"!=",
"len",
"(",
"fields",
"[",
"1",
":",
"]",
")",
":",
"err",
"=",
"\"Number of argument formats must match the number of recieved arguments.\"",
"raise",
"ValueError",
"(",
"err",
")",
"received",
"=",
"[",
"]",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"fields",
"[",
"1",
":",
"]",
")",
":",
"received",
".",
"append",
"(",
"self",
".",
"_recv_methods",
"[",
"arg_format_list",
"[",
"i",
"]",
"]",
"(",
"f",
")",
")",
"# Record the time the message arrived",
"message_time",
"=",
"time",
".",
"time",
"(",
")",
"return",
"cmd_name",
",",
"received",
",",
"message_time"
] | 32.652174 | 18.878261 |
def upload(self, local_path, remote_url):
"""Copy a local file to an S3 location."""
bucket, key = _parse_url(remote_url)
with open(local_path, 'rb') as fp:
return self.call("PutObject", bucket=bucket, key=key, body=fp)
|
[
"def",
"upload",
"(",
"self",
",",
"local_path",
",",
"remote_url",
")",
":",
"bucket",
",",
"key",
"=",
"_parse_url",
"(",
"remote_url",
")",
"with",
"open",
"(",
"local_path",
",",
"'rb'",
")",
"as",
"fp",
":",
"return",
"self",
".",
"call",
"(",
"\"PutObject\"",
",",
"bucket",
"=",
"bucket",
",",
"key",
"=",
"key",
",",
"body",
"=",
"fp",
")"
] | 41.833333 | 13.5 |
def register_wait(self, task, event_details=None):
""" :meth:`.WSimpleTrackerStorage.register_wait` method implementation
"""
if self.record_wait() is True:
record_type = WTrackerEvents.wait
record = WSimpleTrackerStorage.Record(record_type, task, event_details=event_details)
self.__store_record(record)
|
[
"def",
"register_wait",
"(",
"self",
",",
"task",
",",
"event_details",
"=",
"None",
")",
":",
"if",
"self",
".",
"record_wait",
"(",
")",
"is",
"True",
":",
"record_type",
"=",
"WTrackerEvents",
".",
"wait",
"record",
"=",
"WSimpleTrackerStorage",
".",
"Record",
"(",
"record_type",
",",
"task",
",",
"event_details",
"=",
"event_details",
")",
"self",
".",
"__store_record",
"(",
"record",
")"
] | 44.714286 | 11.428571 |
def count_selfintersections(self):
""" Get the number of self-intersections of this polygonal chain."""
# This can be solved more efficiently with sweep line
counter = 0
for i, j in itertools.combinations(range(len(self.lineSegments)), 2):
inters = get_segments_intersections(self.lineSegments[i],
self.lineSegments[j])
if abs(i-j) > 1 and len(inters) > 0:
counter += 1
return counter
|
[
"def",
"count_selfintersections",
"(",
"self",
")",
":",
"# This can be solved more efficiently with sweep line",
"counter",
"=",
"0",
"for",
"i",
",",
"j",
"in",
"itertools",
".",
"combinations",
"(",
"range",
"(",
"len",
"(",
"self",
".",
"lineSegments",
")",
")",
",",
"2",
")",
":",
"inters",
"=",
"get_segments_intersections",
"(",
"self",
".",
"lineSegments",
"[",
"i",
"]",
",",
"self",
".",
"lineSegments",
"[",
"j",
"]",
")",
"if",
"abs",
"(",
"i",
"-",
"j",
")",
">",
"1",
"and",
"len",
"(",
"inters",
")",
">",
"0",
":",
"counter",
"+=",
"1",
"return",
"counter"
] | 50.3 | 18.1 |
def execute_locally(self):
"""Runs the equivalent command locally in a blocking way."""
# Make script file #
self.make_script()
# Do it #
with open(self.kwargs['out_file'], 'w') as handle:
sh.python(self.script_path, _out=handle, _err=handle)
|
[
"def",
"execute_locally",
"(",
"self",
")",
":",
"# Make script file #",
"self",
".",
"make_script",
"(",
")",
"# Do it #",
"with",
"open",
"(",
"self",
".",
"kwargs",
"[",
"'out_file'",
"]",
",",
"'w'",
")",
"as",
"handle",
":",
"sh",
".",
"python",
"(",
"self",
".",
"script_path",
",",
"_out",
"=",
"handle",
",",
"_err",
"=",
"handle",
")"
] | 41.142857 | 15.142857 |
def cleaned_selector(html):
""" Clean parsel.selector.
"""
import parsel
try:
tree = _cleaned_html_tree(html)
sel = parsel.Selector(root=tree, type='html')
except (lxml.etree.XMLSyntaxError,
lxml.etree.ParseError,
lxml.etree.ParserError,
UnicodeEncodeError):
# likely plain text
sel = parsel.Selector(html)
return sel
|
[
"def",
"cleaned_selector",
"(",
"html",
")",
":",
"import",
"parsel",
"try",
":",
"tree",
"=",
"_cleaned_html_tree",
"(",
"html",
")",
"sel",
"=",
"parsel",
".",
"Selector",
"(",
"root",
"=",
"tree",
",",
"type",
"=",
"'html'",
")",
"except",
"(",
"lxml",
".",
"etree",
".",
"XMLSyntaxError",
",",
"lxml",
".",
"etree",
".",
"ParseError",
",",
"lxml",
".",
"etree",
".",
"ParserError",
",",
"UnicodeEncodeError",
")",
":",
"# likely plain text",
"sel",
"=",
"parsel",
".",
"Selector",
"(",
"html",
")",
"return",
"sel"
] | 28.285714 | 10.5 |
def OnUndo(self, event):
"""Calls the grid undo method"""
statustext = undo.stack().undotext()
undo.stack().undo()
# Update content changed state
try:
post_command_event(self.grid.main_window,
self.grid.ContentChangedMsg)
except TypeError:
# The main window does not exist any more
pass
self.grid.code_array.result_cache.clear()
post_command_event(self.grid.main_window, self.grid.TableChangedMsg,
updated_cell=True)
# Reset row heights and column widths by zooming
self.grid.actions.zoom()
# Change grid table dimensions
self.grid.GetTable().ResetView()
# Update TableChoiceIntCtrl
shape = self.grid.code_array.shape
post_command_event(self.main_window, self.grid.ResizeGridMsg,
shape=shape)
# Update toolbars
self.grid.update_entry_line()
self.grid.update_attribute_toolbar()
post_command_event(self.grid.main_window, self.grid.StatusBarMsg,
text=statustext)
|
[
"def",
"OnUndo",
"(",
"self",
",",
"event",
")",
":",
"statustext",
"=",
"undo",
".",
"stack",
"(",
")",
".",
"undotext",
"(",
")",
"undo",
".",
"stack",
"(",
")",
".",
"undo",
"(",
")",
"# Update content changed state",
"try",
":",
"post_command_event",
"(",
"self",
".",
"grid",
".",
"main_window",
",",
"self",
".",
"grid",
".",
"ContentChangedMsg",
")",
"except",
"TypeError",
":",
"# The main window does not exist any more",
"pass",
"self",
".",
"grid",
".",
"code_array",
".",
"result_cache",
".",
"clear",
"(",
")",
"post_command_event",
"(",
"self",
".",
"grid",
".",
"main_window",
",",
"self",
".",
"grid",
".",
"TableChangedMsg",
",",
"updated_cell",
"=",
"True",
")",
"# Reset row heights and column widths by zooming",
"self",
".",
"grid",
".",
"actions",
".",
"zoom",
"(",
")",
"# Change grid table dimensions",
"self",
".",
"grid",
".",
"GetTable",
"(",
")",
".",
"ResetView",
"(",
")",
"# Update TableChoiceIntCtrl",
"shape",
"=",
"self",
".",
"grid",
".",
"code_array",
".",
"shape",
"post_command_event",
"(",
"self",
".",
"main_window",
",",
"self",
".",
"grid",
".",
"ResizeGridMsg",
",",
"shape",
"=",
"shape",
")",
"# Update toolbars",
"self",
".",
"grid",
".",
"update_entry_line",
"(",
")",
"self",
".",
"grid",
".",
"update_attribute_toolbar",
"(",
")",
"post_command_event",
"(",
"self",
".",
"grid",
".",
"main_window",
",",
"self",
".",
"grid",
".",
"StatusBarMsg",
",",
"text",
"=",
"statustext",
")"
] | 31.5 | 18.833333 |
def menu_item_remove_libraries_or_root_clicked(self, menu_item):
"""Removes library from hard drive after request second confirmation"""
menu_item_text = self.get_menu_item_text(menu_item)
logger.info("Delete item '{0}' pressed.".format(menu_item_text))
model, path = self.view.get_selection().get_selected()
if path:
# Second confirmation to delete library
tree_m_row = self.tree_store[path]
library_os_path, library_path, library_name, item_key = self.extract_library_properties_from_selected_row()
# assert isinstance(tree_m_row[self.ITEM_STORAGE_ID], str)
library_file_system_path = library_os_path
if "root" in menu_item_text:
button_texts = [menu_item_text + "from tree and config", "Cancel"]
partial_message = "This will remove the library root from your configuration (config.yaml)."
else:
button_texts = [menu_item_text, "Cancel"]
partial_message = "This folder will be removed from hard drive! You really wanna do that?"
message_string = "You choose to {2} with " \
"\n\nlibrary tree path: {0}" \
"\n\nphysical path: {1}.\n\n\n"\
"{3}" \
"".format(os.path.join(self.convert_if_human_readable(tree_m_row[self.LIB_PATH_STORAGE_ID]),
item_key),
library_file_system_path,
menu_item_text.lower(),
partial_message)
width = 8*len("physical path: " + library_file_system_path)
dialog = RAFCONButtonDialog(message_string, button_texts, message_type=Gtk.MessageType.QUESTION,
parent=self.get_root_window(), width=min(width, 1400))
response_id = dialog.run()
dialog.destroy()
if response_id == 1:
if "root" in menu_item_text:
logger.info("Remove library root key '{0}' from config.".format(item_key))
from rafcon.gui.singleton import global_config
library_paths = global_config.get_config_value('LIBRARY_PATHS')
del library_paths[tree_m_row[self.LIB_KEY_STORAGE_ID]]
global_config.save_configuration()
self.model.library_manager.refresh_libraries()
elif "libraries" in menu_item_text:
logger.debug("Remove of all libraries in {} is triggered.".format(library_os_path))
import shutil
shutil.rmtree(library_os_path)
self.model.library_manager.refresh_libraries()
else:
logger.debug("Remove of Library {} is triggered.".format(library_os_path))
self.model.library_manager.remove_library_from_file_system(library_path,
library_name)
elif response_id in [2, -4]:
pass
else:
logger.warning("Response id: {} is not considered".format(response_id))
return True
return False
|
[
"def",
"menu_item_remove_libraries_or_root_clicked",
"(",
"self",
",",
"menu_item",
")",
":",
"menu_item_text",
"=",
"self",
".",
"get_menu_item_text",
"(",
"menu_item",
")",
"logger",
".",
"info",
"(",
"\"Delete item '{0}' pressed.\"",
".",
"format",
"(",
"menu_item_text",
")",
")",
"model",
",",
"path",
"=",
"self",
".",
"view",
".",
"get_selection",
"(",
")",
".",
"get_selected",
"(",
")",
"if",
"path",
":",
"# Second confirmation to delete library",
"tree_m_row",
"=",
"self",
".",
"tree_store",
"[",
"path",
"]",
"library_os_path",
",",
"library_path",
",",
"library_name",
",",
"item_key",
"=",
"self",
".",
"extract_library_properties_from_selected_row",
"(",
")",
"# assert isinstance(tree_m_row[self.ITEM_STORAGE_ID], str)",
"library_file_system_path",
"=",
"library_os_path",
"if",
"\"root\"",
"in",
"menu_item_text",
":",
"button_texts",
"=",
"[",
"menu_item_text",
"+",
"\"from tree and config\"",
",",
"\"Cancel\"",
"]",
"partial_message",
"=",
"\"This will remove the library root from your configuration (config.yaml).\"",
"else",
":",
"button_texts",
"=",
"[",
"menu_item_text",
",",
"\"Cancel\"",
"]",
"partial_message",
"=",
"\"This folder will be removed from hard drive! You really wanna do that?\"",
"message_string",
"=",
"\"You choose to {2} with \"",
"\"\\n\\nlibrary tree path: {0}\"",
"\"\\n\\nphysical path: {1}.\\n\\n\\n\"",
"\"{3}\"",
"\"\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"convert_if_human_readable",
"(",
"tree_m_row",
"[",
"self",
".",
"LIB_PATH_STORAGE_ID",
"]",
")",
",",
"item_key",
")",
",",
"library_file_system_path",
",",
"menu_item_text",
".",
"lower",
"(",
")",
",",
"partial_message",
")",
"width",
"=",
"8",
"*",
"len",
"(",
"\"physical path: \"",
"+",
"library_file_system_path",
")",
"dialog",
"=",
"RAFCONButtonDialog",
"(",
"message_string",
",",
"button_texts",
",",
"message_type",
"=",
"Gtk",
".",
"MessageType",
".",
"QUESTION",
",",
"parent",
"=",
"self",
".",
"get_root_window",
"(",
")",
",",
"width",
"=",
"min",
"(",
"width",
",",
"1400",
")",
")",
"response_id",
"=",
"dialog",
".",
"run",
"(",
")",
"dialog",
".",
"destroy",
"(",
")",
"if",
"response_id",
"==",
"1",
":",
"if",
"\"root\"",
"in",
"menu_item_text",
":",
"logger",
".",
"info",
"(",
"\"Remove library root key '{0}' from config.\"",
".",
"format",
"(",
"item_key",
")",
")",
"from",
"rafcon",
".",
"gui",
".",
"singleton",
"import",
"global_config",
"library_paths",
"=",
"global_config",
".",
"get_config_value",
"(",
"'LIBRARY_PATHS'",
")",
"del",
"library_paths",
"[",
"tree_m_row",
"[",
"self",
".",
"LIB_KEY_STORAGE_ID",
"]",
"]",
"global_config",
".",
"save_configuration",
"(",
")",
"self",
".",
"model",
".",
"library_manager",
".",
"refresh_libraries",
"(",
")",
"elif",
"\"libraries\"",
"in",
"menu_item_text",
":",
"logger",
".",
"debug",
"(",
"\"Remove of all libraries in {} is triggered.\"",
".",
"format",
"(",
"library_os_path",
")",
")",
"import",
"shutil",
"shutil",
".",
"rmtree",
"(",
"library_os_path",
")",
"self",
".",
"model",
".",
"library_manager",
".",
"refresh_libraries",
"(",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Remove of Library {} is triggered.\"",
".",
"format",
"(",
"library_os_path",
")",
")",
"self",
".",
"model",
".",
"library_manager",
".",
"remove_library_from_file_system",
"(",
"library_path",
",",
"library_name",
")",
"elif",
"response_id",
"in",
"[",
"2",
",",
"-",
"4",
"]",
":",
"pass",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Response id: {} is not considered\"",
".",
"format",
"(",
"response_id",
")",
")",
"return",
"True",
"return",
"False"
] | 55 | 29.409836 |
def _validate(self, sam_template, parameter_values):
""" Validates the template and parameter values and raises exceptions if there's an issue
:param dict sam_template: SAM template
:param dict parameter_values: Dictionary of parameter values provided by the user
"""
if parameter_values is None:
raise ValueError("`parameter_values` argument is required")
if ("Resources" not in sam_template or not isinstance(sam_template["Resources"], dict) or not
sam_template["Resources"]):
raise InvalidDocumentException(
[InvalidTemplateException("'Resources' section is required")])
if (not all(isinstance(sam_resource, dict) for sam_resource in sam_template["Resources"].values())):
raise InvalidDocumentException(
[InvalidTemplateException(
"All 'Resources' must be Objects. If you're using YAML, this may be an "
"indentation issue."
)])
sam_template_instance = SamTemplate(sam_template)
for resource_logical_id, sam_resource in sam_template_instance.iterate():
# NOTE: Properties isn't required for SimpleTable, so we can't check
# `not isinstance(sam_resources.get("Properties"), dict)` as this would be a breaking change.
# sam_resource.properties defaults to {} in SamTemplate init
if (not isinstance(sam_resource.properties, dict)):
raise InvalidDocumentException(
[InvalidResourceException(resource_logical_id,
"All 'Resources' must be Objects and have a 'Properties' Object. If "
"you're using YAML, this may be an indentation issue."
)])
SamTemplateValidator.validate(sam_template)
|
[
"def",
"_validate",
"(",
"self",
",",
"sam_template",
",",
"parameter_values",
")",
":",
"if",
"parameter_values",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"`parameter_values` argument is required\"",
")",
"if",
"(",
"\"Resources\"",
"not",
"in",
"sam_template",
"or",
"not",
"isinstance",
"(",
"sam_template",
"[",
"\"Resources\"",
"]",
",",
"dict",
")",
"or",
"not",
"sam_template",
"[",
"\"Resources\"",
"]",
")",
":",
"raise",
"InvalidDocumentException",
"(",
"[",
"InvalidTemplateException",
"(",
"\"'Resources' section is required\"",
")",
"]",
")",
"if",
"(",
"not",
"all",
"(",
"isinstance",
"(",
"sam_resource",
",",
"dict",
")",
"for",
"sam_resource",
"in",
"sam_template",
"[",
"\"Resources\"",
"]",
".",
"values",
"(",
")",
")",
")",
":",
"raise",
"InvalidDocumentException",
"(",
"[",
"InvalidTemplateException",
"(",
"\"All 'Resources' must be Objects. If you're using YAML, this may be an \"",
"\"indentation issue.\"",
")",
"]",
")",
"sam_template_instance",
"=",
"SamTemplate",
"(",
"sam_template",
")",
"for",
"resource_logical_id",
",",
"sam_resource",
"in",
"sam_template_instance",
".",
"iterate",
"(",
")",
":",
"# NOTE: Properties isn't required for SimpleTable, so we can't check",
"# `not isinstance(sam_resources.get(\"Properties\"), dict)` as this would be a breaking change.",
"# sam_resource.properties defaults to {} in SamTemplate init",
"if",
"(",
"not",
"isinstance",
"(",
"sam_resource",
".",
"properties",
",",
"dict",
")",
")",
":",
"raise",
"InvalidDocumentException",
"(",
"[",
"InvalidResourceException",
"(",
"resource_logical_id",
",",
"\"All 'Resources' must be Objects and have a 'Properties' Object. If \"",
"\"you're using YAML, this may be an indentation issue.\"",
")",
"]",
")",
"SamTemplateValidator",
".",
"validate",
"(",
"sam_template",
")"
] | 54.2 | 28.542857 |
def create_aggregator(self, subordinates):
"""Creates an aggregator event source, collecting events from multiple sources.
This way a single listener can listen for events coming from multiple sources,
using a single blocking :py:func:`get_event` on the returned aggregator.
in subordinates of type :class:`IEventSource`
Subordinate event source this one aggregates.
return result of type :class:`IEventSource`
Event source aggregating passed sources.
"""
if not isinstance(subordinates, list):
raise TypeError("subordinates can only be an instance of type list")
for a in subordinates[:10]:
if not isinstance(a, IEventSource):
raise TypeError(
"array can only contain objects of type IEventSource")
result = self._call("createAggregator",
in_p=[subordinates])
result = IEventSource(result)
return result
|
[
"def",
"create_aggregator",
"(",
"self",
",",
"subordinates",
")",
":",
"if",
"not",
"isinstance",
"(",
"subordinates",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"subordinates can only be an instance of type list\"",
")",
"for",
"a",
"in",
"subordinates",
"[",
":",
"10",
"]",
":",
"if",
"not",
"isinstance",
"(",
"a",
",",
"IEventSource",
")",
":",
"raise",
"TypeError",
"(",
"\"array can only contain objects of type IEventSource\"",
")",
"result",
"=",
"self",
".",
"_call",
"(",
"\"createAggregator\"",
",",
"in_p",
"=",
"[",
"subordinates",
"]",
")",
"result",
"=",
"IEventSource",
"(",
"result",
")",
"return",
"result"
] | 44.727273 | 18 |
def add_root_family(self, family_id):
"""Adds a root family.
arg: family_id (osid.id.Id): the ``Id`` of a family
raise: AlreadyExists - ``family_id`` is already in hierarchy
raise: NotFound - ``family_id`` not found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.add_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.add_root_catalog(catalog_id=family_id)
return self._hierarchy_session.add_root(id_=family_id)
|
[
"def",
"add_root_family",
"(",
"self",
",",
"family_id",
")",
":",
"# Implemented from template for",
"# osid.resource.BinHierarchyDesignSession.add_root_bin_template",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"add_root_catalog",
"(",
"catalog_id",
"=",
"family_id",
")",
"return",
"self",
".",
"_hierarchy_session",
".",
"add_root",
"(",
"id_",
"=",
"family_id",
")"
] | 46.705882 | 18.882353 |
def chi_a(mass1, mass2, spin1z, spin2z):
""" Returns the aligned mass-weighted spin difference from mass1, mass2,
spin1z, and spin2z.
"""
return (spin2z * mass2 - spin1z * mass1) / (mass2 + mass1)
|
[
"def",
"chi_a",
"(",
"mass1",
",",
"mass2",
",",
"spin1z",
",",
"spin2z",
")",
":",
"return",
"(",
"spin2z",
"*",
"mass2",
"-",
"spin1z",
"*",
"mass1",
")",
"/",
"(",
"mass2",
"+",
"mass1",
")"
] | 41.6 | 7.8 |
def mesh_other(mesh,
other,
samples=500,
scale=False,
icp_first=10,
icp_final=50):
"""
Align a mesh with another mesh or a PointCloud using
the principal axes of inertia as a starting point which
is refined by iterative closest point.
Parameters
------------
mesh : trimesh.Trimesh object
Mesh to align with other
other : trimesh.Trimesh or (n, 3) float
Mesh or points in space
samples : int
Number of samples from mesh surface to align
scale : bool
Allow scaling in transform
icp_first : int
How many ICP iterations for the 9 possible
combinations of sign flippage
icp_final : int
How many ICP iterations for the closest
candidate from the wider search
Returns
-----------
mesh_to_other : (4, 4) float
Transform to align mesh to the other object
cost : float
Average squared distance per point
"""
def key_points(m, count):
"""
Return a combination of mesh vertices and surface samples
with vertices chosen by likelihood to be important
to registation.
"""
if len(m.vertices) < (count / 2):
return np.vstack((
m.vertices,
m.sample(count - len(m.vertices))))
else:
return m.sample(count)
if not util.is_instance_named(mesh, 'Trimesh'):
raise ValueError('mesh must be Trimesh object!')
inverse = True
search = mesh
# if both are meshes use the smaller one for searching
if util.is_instance_named(other, 'Trimesh'):
if len(mesh.vertices) > len(other.vertices):
# do the expensive tree construction on the
# smaller mesh and query the others points
search = other
inverse = False
points = key_points(m=mesh, count=samples)
points_mesh = mesh
else:
points_mesh = other
points = key_points(m=other, count=samples)
if points_mesh.is_volume:
points_PIT = points_mesh.principal_inertia_transform
else:
points_PIT = points_mesh.bounding_box_oriented.principal_inertia_transform
elif util.is_shape(other, (-1, 3)):
# case where other is just points
points = other
points_PIT = bounds.oriented_bounds(points)[0]
else:
raise ValueError('other must be mesh or (n, 3) points!')
# get the transform that aligns the search mesh principal
# axes of inertia with the XYZ axis at the origin
if search.is_volume:
search_PIT = search.principal_inertia_transform
else:
search_PIT = search.bounding_box_oriented.principal_inertia_transform
# transform that moves the principal axes of inertia
# of the search mesh to be aligned with the best- guess
# principal axes of the points
search_to_points = np.dot(np.linalg.inv(points_PIT),
search_PIT)
# permutations of cube rotations
# the principal inertia transform has arbitrary sign
# along the 3 major axis so try all combinations of
# 180 degree rotations with a quick first ICP pass
cubes = np.array([np.eye(4) * np.append(diag, 1)
for diag in [[1, 1, 1],
[1, 1, -1],
[1, -1, 1],
[-1, 1, 1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]]])
# loop through permutations and run iterative closest point
costs = np.ones(len(cubes)) * np.inf
transforms = [None] * len(cubes)
centroid = search.centroid
for i, flip in enumerate(cubes):
# transform from points to search mesh
# flipped around the centroid of search
a_to_b = np.dot(
transformations.transform_around(flip, centroid),
np.linalg.inv(search_to_points))
# run first pass ICP
matrix, junk, cost = icp(a=points,
b=search,
initial=a_to_b,
max_iterations=int(icp_first),
scale=scale)
# save transform and costs from ICP
transforms[i] = matrix
costs[i] = cost
# run a final ICP refinement step
matrix, junk, cost = icp(a=points,
b=search,
initial=transforms[np.argmin(costs)],
max_iterations=int(icp_final),
scale=scale)
# convert to per- point distance average
cost /= len(points)
# we picked the smaller mesh to construct the tree
# on so we may have calculated a transform backwards
# to save computation, so just invert matrix here
if inverse:
mesh_to_other = np.linalg.inv(matrix)
else:
mesh_to_other = matrix
return mesh_to_other, cost
|
[
"def",
"mesh_other",
"(",
"mesh",
",",
"other",
",",
"samples",
"=",
"500",
",",
"scale",
"=",
"False",
",",
"icp_first",
"=",
"10",
",",
"icp_final",
"=",
"50",
")",
":",
"def",
"key_points",
"(",
"m",
",",
"count",
")",
":",
"\"\"\"\n Return a combination of mesh vertices and surface samples\n with vertices chosen by likelihood to be important\n to registation.\n \"\"\"",
"if",
"len",
"(",
"m",
".",
"vertices",
")",
"<",
"(",
"count",
"/",
"2",
")",
":",
"return",
"np",
".",
"vstack",
"(",
"(",
"m",
".",
"vertices",
",",
"m",
".",
"sample",
"(",
"count",
"-",
"len",
"(",
"m",
".",
"vertices",
")",
")",
")",
")",
"else",
":",
"return",
"m",
".",
"sample",
"(",
"count",
")",
"if",
"not",
"util",
".",
"is_instance_named",
"(",
"mesh",
",",
"'Trimesh'",
")",
":",
"raise",
"ValueError",
"(",
"'mesh must be Trimesh object!'",
")",
"inverse",
"=",
"True",
"search",
"=",
"mesh",
"# if both are meshes use the smaller one for searching",
"if",
"util",
".",
"is_instance_named",
"(",
"other",
",",
"'Trimesh'",
")",
":",
"if",
"len",
"(",
"mesh",
".",
"vertices",
")",
">",
"len",
"(",
"other",
".",
"vertices",
")",
":",
"# do the expensive tree construction on the",
"# smaller mesh and query the others points",
"search",
"=",
"other",
"inverse",
"=",
"False",
"points",
"=",
"key_points",
"(",
"m",
"=",
"mesh",
",",
"count",
"=",
"samples",
")",
"points_mesh",
"=",
"mesh",
"else",
":",
"points_mesh",
"=",
"other",
"points",
"=",
"key_points",
"(",
"m",
"=",
"other",
",",
"count",
"=",
"samples",
")",
"if",
"points_mesh",
".",
"is_volume",
":",
"points_PIT",
"=",
"points_mesh",
".",
"principal_inertia_transform",
"else",
":",
"points_PIT",
"=",
"points_mesh",
".",
"bounding_box_oriented",
".",
"principal_inertia_transform",
"elif",
"util",
".",
"is_shape",
"(",
"other",
",",
"(",
"-",
"1",
",",
"3",
")",
")",
":",
"# case where other is just points",
"points",
"=",
"other",
"points_PIT",
"=",
"bounds",
".",
"oriented_bounds",
"(",
"points",
")",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'other must be mesh or (n, 3) points!'",
")",
"# get the transform that aligns the search mesh principal",
"# axes of inertia with the XYZ axis at the origin",
"if",
"search",
".",
"is_volume",
":",
"search_PIT",
"=",
"search",
".",
"principal_inertia_transform",
"else",
":",
"search_PIT",
"=",
"search",
".",
"bounding_box_oriented",
".",
"principal_inertia_transform",
"# transform that moves the principal axes of inertia",
"# of the search mesh to be aligned with the best- guess",
"# principal axes of the points",
"search_to_points",
"=",
"np",
".",
"dot",
"(",
"np",
".",
"linalg",
".",
"inv",
"(",
"points_PIT",
")",
",",
"search_PIT",
")",
"# permutations of cube rotations",
"# the principal inertia transform has arbitrary sign",
"# along the 3 major axis so try all combinations of",
"# 180 degree rotations with a quick first ICP pass",
"cubes",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"eye",
"(",
"4",
")",
"*",
"np",
".",
"append",
"(",
"diag",
",",
"1",
")",
"for",
"diag",
"in",
"[",
"[",
"1",
",",
"1",
",",
"1",
"]",
",",
"[",
"1",
",",
"1",
",",
"-",
"1",
"]",
",",
"[",
"1",
",",
"-",
"1",
",",
"1",
"]",
",",
"[",
"-",
"1",
",",
"1",
",",
"1",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"1",
"]",
",",
"[",
"-",
"1",
",",
"1",
",",
"-",
"1",
"]",
",",
"[",
"1",
",",
"-",
"1",
",",
"-",
"1",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
"]",
"]",
"]",
")",
"# loop through permutations and run iterative closest point",
"costs",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"cubes",
")",
")",
"*",
"np",
".",
"inf",
"transforms",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"cubes",
")",
"centroid",
"=",
"search",
".",
"centroid",
"for",
"i",
",",
"flip",
"in",
"enumerate",
"(",
"cubes",
")",
":",
"# transform from points to search mesh",
"# flipped around the centroid of search",
"a_to_b",
"=",
"np",
".",
"dot",
"(",
"transformations",
".",
"transform_around",
"(",
"flip",
",",
"centroid",
")",
",",
"np",
".",
"linalg",
".",
"inv",
"(",
"search_to_points",
")",
")",
"# run first pass ICP",
"matrix",
",",
"junk",
",",
"cost",
"=",
"icp",
"(",
"a",
"=",
"points",
",",
"b",
"=",
"search",
",",
"initial",
"=",
"a_to_b",
",",
"max_iterations",
"=",
"int",
"(",
"icp_first",
")",
",",
"scale",
"=",
"scale",
")",
"# save transform and costs from ICP",
"transforms",
"[",
"i",
"]",
"=",
"matrix",
"costs",
"[",
"i",
"]",
"=",
"cost",
"# run a final ICP refinement step",
"matrix",
",",
"junk",
",",
"cost",
"=",
"icp",
"(",
"a",
"=",
"points",
",",
"b",
"=",
"search",
",",
"initial",
"=",
"transforms",
"[",
"np",
".",
"argmin",
"(",
"costs",
")",
"]",
",",
"max_iterations",
"=",
"int",
"(",
"icp_final",
")",
",",
"scale",
"=",
"scale",
")",
"# convert to per- point distance average",
"cost",
"/=",
"len",
"(",
"points",
")",
"# we picked the smaller mesh to construct the tree",
"# on so we may have calculated a transform backwards",
"# to save computation, so just invert matrix here",
"if",
"inverse",
":",
"mesh_to_other",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"matrix",
")",
"else",
":",
"mesh_to_other",
"=",
"matrix",
"return",
"mesh_to_other",
",",
"cost"
] | 33.871622 | 16.236486 |
def predict_proba(self, X):
"""Predict probabilities on test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
proba : array, shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
if not hasattr(self, '_program'):
raise NotFittedError('SymbolicClassifier not fitted.')
X = check_array(X)
_, n_features = X.shape
if self.n_features_ != n_features:
raise ValueError('Number of features of the model must match the '
'input. Model n_features is %s and input '
'n_features is %s.'
% (self.n_features_, n_features))
scores = self._program.execute(X)
proba = self._transformer(scores)
proba = np.vstack([1 - proba, proba]).T
return proba
|
[
"def",
"predict_proba",
"(",
"self",
",",
"X",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_program'",
")",
":",
"raise",
"NotFittedError",
"(",
"'SymbolicClassifier not fitted.'",
")",
"X",
"=",
"check_array",
"(",
"X",
")",
"_",
",",
"n_features",
"=",
"X",
".",
"shape",
"if",
"self",
".",
"n_features_",
"!=",
"n_features",
":",
"raise",
"ValueError",
"(",
"'Number of features of the model must match the '",
"'input. Model n_features is %s and input '",
"'n_features is %s.'",
"%",
"(",
"self",
".",
"n_features_",
",",
"n_features",
")",
")",
"scores",
"=",
"self",
".",
"_program",
".",
"execute",
"(",
"X",
")",
"proba",
"=",
"self",
".",
"_transformer",
"(",
"scores",
")",
"proba",
"=",
"np",
".",
"vstack",
"(",
"[",
"1",
"-",
"proba",
",",
"proba",
"]",
")",
".",
"T",
"return",
"proba"
] | 36.741935 | 19.903226 |
def add_widget(self, widget):
"""
Add a Widget as a managed child of this Widget.
The child will be
automatically positioned and sized to fill the entire space inside
this Widget (unless _update_child_widgets is redefined).
Parameters
----------
widget : instance of Widget
The widget to add.
Returns
-------
widget : instance of Widget
The widget.
"""
self._widgets.append(widget)
widget.parent = self
self._update_child_widgets()
return widget
|
[
"def",
"add_widget",
"(",
"self",
",",
"widget",
")",
":",
"self",
".",
"_widgets",
".",
"append",
"(",
"widget",
")",
"widget",
".",
"parent",
"=",
"self",
"self",
".",
"_update_child_widgets",
"(",
")",
"return",
"widget"
] | 26.318182 | 17.681818 |
def write_to_path(self,path,suffix='',format='png',overwrite=False):
"""
Output the data the dataframe's 'image' column to a directory structured by project->sample and named by frame
Args:
path (str): Where to write the directory of images
suffix (str): for labeling the imaages you write
format (str): default 'png' format to write the file
overwrite (bool): default False. if true can overwrite files in the path
Modifies:
Creates path folder if necessary and writes images to path
"""
if os.path.exists(path) and overwrite is False: raise ValueError("Error: use ovewrite=True to overwrite images")
if not os.path.exists(path): os.makedirs(path)
for i,r in self.iterrows():
spath = os.path.join(path,r['project_name'],r['sample_name'])
if not os.path.exists(spath): os.makedirs(spath)
if suffix == '':
fname = os.path.join(spath,r['frame_name']+'.'+format)
else: fname = os.path.join(spath,r['frame_name']+'_'+suffix+'.'+format)
imageio.imwrite(fname, r['image'],format=format)
|
[
"def",
"write_to_path",
"(",
"self",
",",
"path",
",",
"suffix",
"=",
"''",
",",
"format",
"=",
"'png'",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
"and",
"overwrite",
"is",
"False",
":",
"raise",
"ValueError",
"(",
"\"Error: use ovewrite=True to overwrite images\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"for",
"i",
",",
"r",
"in",
"self",
".",
"iterrows",
"(",
")",
":",
"spath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"r",
"[",
"'project_name'",
"]",
",",
"r",
"[",
"'sample_name'",
"]",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"spath",
")",
":",
"os",
".",
"makedirs",
"(",
"spath",
")",
"if",
"suffix",
"==",
"''",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"spath",
",",
"r",
"[",
"'frame_name'",
"]",
"+",
"'.'",
"+",
"format",
")",
"else",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"spath",
",",
"r",
"[",
"'frame_name'",
"]",
"+",
"'_'",
"+",
"suffix",
"+",
"'.'",
"+",
"format",
")",
"imageio",
".",
"imwrite",
"(",
"fname",
",",
"r",
"[",
"'image'",
"]",
",",
"format",
"=",
"format",
")"
] | 52.772727 | 28.772727 |
def difference(self, other):
"""
Compute the difference between this and a given range.
>>> intrange(1, 10).difference(intrange(10, 15))
intrange([1,10))
>>> intrange(1, 10).difference(intrange(5, 10))
intrange([1,5))
>>> intrange(1, 5).difference(intrange(5, 10))
intrange([1,5))
>>> intrange(1, 5).difference(intrange(1, 10))
intrange(empty)
The difference can not be computed if the resulting range would be split
in two separate ranges. This happens when the given range is completely
within this range and does not start or end at the same value.
>>> intrange(1, 15).difference(intrange(5, 10))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Other range must not be within this range
This does not modify the range in place.
This is the same as the ``-`` operator for two ranges in PostgreSQL.
:param other: Range to difference against.
:return: A new range that is the difference between this and `other`.
:raises ValueError: If difference bethween this and `other` can not be
computed.
"""
if not self.is_valid_range(other):
msg = "Unsupported type to test for difference '{.__class__.__name__}'"
raise TypeError(msg.format(other))
# Consider empty ranges or no overlap
if not self or not other or not self.overlap(other):
return self
# If self is contained within other, the result is empty
elif self in other:
return self.empty()
elif other in self and not (self.startswith(other) or self.endswith(other)):
raise ValueError("Other range must not be within this range")
elif self.endsbefore(other):
return self.replace(upper=other.lower, upper_inc=not other.lower_inc)
elif self.startsafter(other):
return self.replace(lower=other.upper, lower_inc=not other.upper_inc)
else:
return self.empty()
|
[
"def",
"difference",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"self",
".",
"is_valid_range",
"(",
"other",
")",
":",
"msg",
"=",
"\"Unsupported type to test for difference '{.__class__.__name__}'\"",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"other",
")",
")",
"# Consider empty ranges or no overlap",
"if",
"not",
"self",
"or",
"not",
"other",
"or",
"not",
"self",
".",
"overlap",
"(",
"other",
")",
":",
"return",
"self",
"# If self is contained within other, the result is empty",
"elif",
"self",
"in",
"other",
":",
"return",
"self",
".",
"empty",
"(",
")",
"elif",
"other",
"in",
"self",
"and",
"not",
"(",
"self",
".",
"startswith",
"(",
"other",
")",
"or",
"self",
".",
"endswith",
"(",
"other",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Other range must not be within this range\"",
")",
"elif",
"self",
".",
"endsbefore",
"(",
"other",
")",
":",
"return",
"self",
".",
"replace",
"(",
"upper",
"=",
"other",
".",
"lower",
",",
"upper_inc",
"=",
"not",
"other",
".",
"lower_inc",
")",
"elif",
"self",
".",
"startsafter",
"(",
"other",
")",
":",
"return",
"self",
".",
"replace",
"(",
"lower",
"=",
"other",
".",
"upper",
",",
"lower_inc",
"=",
"not",
"other",
".",
"upper_inc",
")",
"else",
":",
"return",
"self",
".",
"empty",
"(",
")"
] | 42.54 | 22.42 |
def iter_statuses(self, number=-1, etag=None):
"""Iterate over the deployment statuses for this deployment.
:param int number: (optional), the number of statuses to return.
Default: -1, returns all statuses.
:param str etag: (optional), the ETag header value from the last time
you iterated over the statuses.
:returns: generator of :class:`DeploymentStatus`\ es
"""
i = self._iter(int(number), self.statuses_url, DeploymentStatus,
etag=etag)
i.headers = Deployment.CUSTOM_HEADERS
return i
|
[
"def",
"iter_statuses",
"(",
"self",
",",
"number",
"=",
"-",
"1",
",",
"etag",
"=",
"None",
")",
":",
"i",
"=",
"self",
".",
"_iter",
"(",
"int",
"(",
"number",
")",
",",
"self",
".",
"statuses_url",
",",
"DeploymentStatus",
",",
"etag",
"=",
"etag",
")",
"i",
".",
"headers",
"=",
"Deployment",
".",
"CUSTOM_HEADERS",
"return",
"i"
] | 45.307692 | 16.307692 |
def _get_plugin_map(self, compiler, options_src, target):
"""Returns a map of plugin to args, for the given compiler.
Only plugins that must actually be activated will be present as keys in the map.
Plugins with no arguments will have an empty list as a value.
Active plugins and their args will be gathered from (in order of precedence):
- The <compiler>_plugins and <compiler>_plugin_args fields of the target, if it has them.
- The <compiler>_plugins and <compiler>_plugin_args options of this task, if it has them.
- The <compiler>_plugins and <compiler>_plugin_args fields of this task, if it has them.
Note that in-repo plugins will not be returned, even if requested, when building
themselves. Use published versions of those plugins for that.
See:
- examples/src/java/org/pantsbuild/example/javac/plugin/README.md.
- examples/src/scala/org/pantsbuild/example/scalac/plugin/README.md
:param compiler: one of 'javac', 'scalac'.
:param options_src: A JvmToolMixin instance providing plugin options.
:param target: The target whose plugins we compute.
"""
# Note that we get() options and getattr() target fields and task methods,
# so we're robust when those don't exist (or are None).
plugins_key = '{}_plugins'.format(compiler)
requested_plugins = (
tuple(getattr(self, plugins_key, []) or []) +
tuple(options_src.get_options().get(plugins_key, []) or []) +
tuple((getattr(target, plugins_key, []) or []))
)
# Allow multiple flags and also comma-separated values in a single flag.
requested_plugins = {p for val in requested_plugins for p in val.split(',')}
plugin_args_key = '{}_plugin_args'.format(compiler)
available_plugin_args = {}
available_plugin_args.update(getattr(self, plugin_args_key, {}) or {})
available_plugin_args.update(options_src.get_options().get(plugin_args_key, {}) or {})
available_plugin_args.update(getattr(target, plugin_args_key, {}) or {})
# From all available args, pluck just the ones for the selected plugins.
plugin_map = {}
for plugin in requested_plugins:
# Don't attempt to use a plugin while building that plugin.
# This avoids a bootstrapping problem. Note that you can still
# use published plugins on themselves, just not in-repo plugins.
if target not in self._plugin_targets(compiler).get(plugin, {}):
plugin_map[plugin] = available_plugin_args.get(plugin, [])
return plugin_map
|
[
"def",
"_get_plugin_map",
"(",
"self",
",",
"compiler",
",",
"options_src",
",",
"target",
")",
":",
"# Note that we get() options and getattr() target fields and task methods,",
"# so we're robust when those don't exist (or are None).",
"plugins_key",
"=",
"'{}_plugins'",
".",
"format",
"(",
"compiler",
")",
"requested_plugins",
"=",
"(",
"tuple",
"(",
"getattr",
"(",
"self",
",",
"plugins_key",
",",
"[",
"]",
")",
"or",
"[",
"]",
")",
"+",
"tuple",
"(",
"options_src",
".",
"get_options",
"(",
")",
".",
"get",
"(",
"plugins_key",
",",
"[",
"]",
")",
"or",
"[",
"]",
")",
"+",
"tuple",
"(",
"(",
"getattr",
"(",
"target",
",",
"plugins_key",
",",
"[",
"]",
")",
"or",
"[",
"]",
")",
")",
")",
"# Allow multiple flags and also comma-separated values in a single flag.",
"requested_plugins",
"=",
"{",
"p",
"for",
"val",
"in",
"requested_plugins",
"for",
"p",
"in",
"val",
".",
"split",
"(",
"','",
")",
"}",
"plugin_args_key",
"=",
"'{}_plugin_args'",
".",
"format",
"(",
"compiler",
")",
"available_plugin_args",
"=",
"{",
"}",
"available_plugin_args",
".",
"update",
"(",
"getattr",
"(",
"self",
",",
"plugin_args_key",
",",
"{",
"}",
")",
"or",
"{",
"}",
")",
"available_plugin_args",
".",
"update",
"(",
"options_src",
".",
"get_options",
"(",
")",
".",
"get",
"(",
"plugin_args_key",
",",
"{",
"}",
")",
"or",
"{",
"}",
")",
"available_plugin_args",
".",
"update",
"(",
"getattr",
"(",
"target",
",",
"plugin_args_key",
",",
"{",
"}",
")",
"or",
"{",
"}",
")",
"# From all available args, pluck just the ones for the selected plugins.",
"plugin_map",
"=",
"{",
"}",
"for",
"plugin",
"in",
"requested_plugins",
":",
"# Don't attempt to use a plugin while building that plugin.",
"# This avoids a bootstrapping problem. Note that you can still",
"# use published plugins on themselves, just not in-repo plugins.",
"if",
"target",
"not",
"in",
"self",
".",
"_plugin_targets",
"(",
"compiler",
")",
".",
"get",
"(",
"plugin",
",",
"{",
"}",
")",
":",
"plugin_map",
"[",
"plugin",
"]",
"=",
"available_plugin_args",
".",
"get",
"(",
"plugin",
",",
"[",
"]",
")",
"return",
"plugin_map"
] | 51.375 | 28.916667 |
def _parse_response(header_data, ignore_bad_cookies=False,
ignore_bad_attributes=True):
"""Turn one or more lines of 'Set-Cookie:' header data into a list of dicts
mapping attribute names to attribute values (as plain strings).
"""
cookie_dicts = []
for line in Definitions.EOL.split(header_data.strip()):
if not line:
break
cookie_dict = parse_one_response(
line, ignore_bad_cookies=ignore_bad_cookies,
ignore_bad_attributes=ignore_bad_attributes)
if not cookie_dict:
continue
cookie_dicts.append(cookie_dict)
if not cookie_dicts:
if not ignore_bad_cookies:
raise InvalidCookieError(data=header_data)
_report_invalid_cookie(header_data)
return cookie_dicts
|
[
"def",
"_parse_response",
"(",
"header_data",
",",
"ignore_bad_cookies",
"=",
"False",
",",
"ignore_bad_attributes",
"=",
"True",
")",
":",
"cookie_dicts",
"=",
"[",
"]",
"for",
"line",
"in",
"Definitions",
".",
"EOL",
".",
"split",
"(",
"header_data",
".",
"strip",
"(",
")",
")",
":",
"if",
"not",
"line",
":",
"break",
"cookie_dict",
"=",
"parse_one_response",
"(",
"line",
",",
"ignore_bad_cookies",
"=",
"ignore_bad_cookies",
",",
"ignore_bad_attributes",
"=",
"ignore_bad_attributes",
")",
"if",
"not",
"cookie_dict",
":",
"continue",
"cookie_dicts",
".",
"append",
"(",
"cookie_dict",
")",
"if",
"not",
"cookie_dicts",
":",
"if",
"not",
"ignore_bad_cookies",
":",
"raise",
"InvalidCookieError",
"(",
"data",
"=",
"header_data",
")",
"_report_invalid_cookie",
"(",
"header_data",
")",
"return",
"cookie_dicts"
] | 39.7 | 12.8 |
def pack_paths(paths, sheet_size=None):
"""
Pack a list of Path2D objects into a rectangle.
Parameters
------------
paths: (n,) Path2D
Geometry to be packed
Returns
------------
packed : trimesh.path.Path2D
Object containing input geometry
inserted : (m,) int
Indexes of paths inserted into result
"""
from .util import concatenate
if sheet_size is not None:
sheet_size = np.sort(sheet_size)[::-1]
quantity = []
for path in paths:
if 'quantity' in path.metadata:
quantity.append(path.metadata['quantity'])
else:
quantity.append(1)
# pack using exterior polygon (will OBB)
polygons = [i.polygons_closed[i.root[0]] for i in paths]
# pack the polygons using rectangular bin packing
inserted, transforms = multipack(polygons=polygons,
quantity=quantity,
sheet_size=sheet_size)
multi = []
for i, T in zip(inserted, transforms):
multi.append(paths[i].copy())
multi[-1].apply_transform(T)
# append all packed paths into a single Path object
packed = concatenate(multi)
return packed, inserted
|
[
"def",
"pack_paths",
"(",
"paths",
",",
"sheet_size",
"=",
"None",
")",
":",
"from",
".",
"util",
"import",
"concatenate",
"if",
"sheet_size",
"is",
"not",
"None",
":",
"sheet_size",
"=",
"np",
".",
"sort",
"(",
"sheet_size",
")",
"[",
":",
":",
"-",
"1",
"]",
"quantity",
"=",
"[",
"]",
"for",
"path",
"in",
"paths",
":",
"if",
"'quantity'",
"in",
"path",
".",
"metadata",
":",
"quantity",
".",
"append",
"(",
"path",
".",
"metadata",
"[",
"'quantity'",
"]",
")",
"else",
":",
"quantity",
".",
"append",
"(",
"1",
")",
"# pack using exterior polygon (will OBB)",
"polygons",
"=",
"[",
"i",
".",
"polygons_closed",
"[",
"i",
".",
"root",
"[",
"0",
"]",
"]",
"for",
"i",
"in",
"paths",
"]",
"# pack the polygons using rectangular bin packing",
"inserted",
",",
"transforms",
"=",
"multipack",
"(",
"polygons",
"=",
"polygons",
",",
"quantity",
"=",
"quantity",
",",
"sheet_size",
"=",
"sheet_size",
")",
"multi",
"=",
"[",
"]",
"for",
"i",
",",
"T",
"in",
"zip",
"(",
"inserted",
",",
"transforms",
")",
":",
"multi",
".",
"append",
"(",
"paths",
"[",
"i",
"]",
".",
"copy",
"(",
")",
")",
"multi",
"[",
"-",
"1",
"]",
".",
"apply_transform",
"(",
"T",
")",
"# append all packed paths into a single Path object",
"packed",
"=",
"concatenate",
"(",
"multi",
")",
"return",
"packed",
",",
"inserted"
] | 26.622222 | 18 |
def build_url_request(self):
"""
Consults the authenticator and grant for HTTP request parameters and
headers to send with the access token request, builds the request using
the stored endpoint and returns it.
"""
params = {}
headers = {}
self._authenticator(params, headers)
self._grant(params)
return Request(self._endpoint, urlencode(params), headers)
|
[
"def",
"build_url_request",
"(",
"self",
")",
":",
"params",
"=",
"{",
"}",
"headers",
"=",
"{",
"}",
"self",
".",
"_authenticator",
"(",
"params",
",",
"headers",
")",
"self",
".",
"_grant",
"(",
"params",
")",
"return",
"Request",
"(",
"self",
".",
"_endpoint",
",",
"urlencode",
"(",
"params",
")",
",",
"headers",
")"
] | 38.545455 | 15.818182 |
def mono(self):
"""
Return this instance summed to mono. If the instance is already mono,
this is a no-op.
"""
if self.channels == 1:
return self
x = self.sum(axis=1) * 0.5
y = x * 0.5
return AudioSamples(y, self.samplerate)
|
[
"def",
"mono",
"(",
"self",
")",
":",
"if",
"self",
".",
"channels",
"==",
"1",
":",
"return",
"self",
"x",
"=",
"self",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"*",
"0.5",
"y",
"=",
"x",
"*",
"0.5",
"return",
"AudioSamples",
"(",
"y",
",",
"self",
".",
"samplerate",
")"
] | 29.2 | 14 |
def find_page_of_state_m(self, state_m):
"""Return the identifier and page of a given state model
:param state_m: The state model to be searched
:return: page containing the state and the state_identifier
"""
for state_identifier, page_info in list(self.tabs.items()):
if page_info['state_m'] is state_m:
return page_info['page'], state_identifier
return None, None
|
[
"def",
"find_page_of_state_m",
"(",
"self",
",",
"state_m",
")",
":",
"for",
"state_identifier",
",",
"page_info",
"in",
"list",
"(",
"self",
".",
"tabs",
".",
"items",
"(",
")",
")",
":",
"if",
"page_info",
"[",
"'state_m'",
"]",
"is",
"state_m",
":",
"return",
"page_info",
"[",
"'page'",
"]",
",",
"state_identifier",
"return",
"None",
",",
"None"
] | 43.3 | 14.8 |
def set_empty_symbol(self):
"""Resets the context, retaining the fields that make it a child of its container (``container``, ``queue``,
``depth``, ``whence``), and sets an empty ``pending_symbol``.
This is useful when an empty quoted symbol immediately follows a long string.
"""
self.field_name = None
self.annotations = None
self.ion_type = None
self.set_pending_symbol(CodePointArray())
return self
|
[
"def",
"set_empty_symbol",
"(",
"self",
")",
":",
"self",
".",
"field_name",
"=",
"None",
"self",
".",
"annotations",
"=",
"None",
"self",
".",
"ion_type",
"=",
"None",
"self",
".",
"set_pending_symbol",
"(",
"CodePointArray",
"(",
")",
")",
"return",
"self"
] | 42.272727 | 17.090909 |
def _get_point_rates(self, source, mmin, mmax=np.inf):
"""
Adds the rates for a point source
:param source:
Point source as instance of :class:
openquake.hazardlib.source.point.PointSource
:param float mmin:
Minimum Magnitude
:param float mmax:
Maximum Magnitude
"""
xloc, yloc = self._get_point_location(source.location)
if (xloc is None) or (yloc is None):
return
# Get annual rates
annual_rate = source.get_annual_occurrence_rates()
mags = np.array([val[0] for val in annual_rate])
annual_rate = np.array([val[1] for val in annual_rate])
idx = np.logical_and(mags >= mmin, mags < mmax)
annual_rate = np.sum(annual_rate[idx])
for hypo_depth in source.hypocenter_distribution.data:
zloc = int((hypo_depth[1] - self.zlim[0]) / self.zspc)
if (zloc < 0) or (zloc >= (self.nz - 1)):
continue
else:
self.rates[xloc, yloc, zloc] += float(hypo_depth[0]) * \
annual_rate
|
[
"def",
"_get_point_rates",
"(",
"self",
",",
"source",
",",
"mmin",
",",
"mmax",
"=",
"np",
".",
"inf",
")",
":",
"xloc",
",",
"yloc",
"=",
"self",
".",
"_get_point_location",
"(",
"source",
".",
"location",
")",
"if",
"(",
"xloc",
"is",
"None",
")",
"or",
"(",
"yloc",
"is",
"None",
")",
":",
"return",
"# Get annual rates",
"annual_rate",
"=",
"source",
".",
"get_annual_occurrence_rates",
"(",
")",
"mags",
"=",
"np",
".",
"array",
"(",
"[",
"val",
"[",
"0",
"]",
"for",
"val",
"in",
"annual_rate",
"]",
")",
"annual_rate",
"=",
"np",
".",
"array",
"(",
"[",
"val",
"[",
"1",
"]",
"for",
"val",
"in",
"annual_rate",
"]",
")",
"idx",
"=",
"np",
".",
"logical_and",
"(",
"mags",
">=",
"mmin",
",",
"mags",
"<",
"mmax",
")",
"annual_rate",
"=",
"np",
".",
"sum",
"(",
"annual_rate",
"[",
"idx",
"]",
")",
"for",
"hypo_depth",
"in",
"source",
".",
"hypocenter_distribution",
".",
"data",
":",
"zloc",
"=",
"int",
"(",
"(",
"hypo_depth",
"[",
"1",
"]",
"-",
"self",
".",
"zlim",
"[",
"0",
"]",
")",
"/",
"self",
".",
"zspc",
")",
"if",
"(",
"zloc",
"<",
"0",
")",
"or",
"(",
"zloc",
">=",
"(",
"self",
".",
"nz",
"-",
"1",
")",
")",
":",
"continue",
"else",
":",
"self",
".",
"rates",
"[",
"xloc",
",",
"yloc",
",",
"zloc",
"]",
"+=",
"float",
"(",
"hypo_depth",
"[",
"0",
"]",
")",
"*",
"annual_rate"
] | 39.464286 | 15.25 |
def has_basal_dendrite(neuron, min_number=1, treefun=_read_neurite_type):
'''Check if a neuron has basal dendrites
Arguments:
neuron(Neuron): The neuron object to test
min_number: minimum number of basal dendrites required
treefun: Optional function to calculate the tree type of neuron's
neurites
Returns:
CheckResult with result
'''
types = [treefun(n) for n in neuron.neurites]
return CheckResult(types.count(NeuriteType.basal_dendrite) >= min_number)
|
[
"def",
"has_basal_dendrite",
"(",
"neuron",
",",
"min_number",
"=",
"1",
",",
"treefun",
"=",
"_read_neurite_type",
")",
":",
"types",
"=",
"[",
"treefun",
"(",
"n",
")",
"for",
"n",
"in",
"neuron",
".",
"neurites",
"]",
"return",
"CheckResult",
"(",
"types",
".",
"count",
"(",
"NeuriteType",
".",
"basal_dendrite",
")",
">=",
"min_number",
")"
] | 36.214286 | 24.785714 |
def makeAggShkDstn(self):
'''
Creates the attributes TranShkAggDstn, PermShkAggDstn, and AggShkDstn.
Draws on attributes TranShkAggStd, PermShkAddStd, TranShkAggCount, PermShkAggCount.
Parameters
----------
None
Returns
-------
None
'''
self.TranShkAggDstn = approxMeanOneLognormal(sigma=self.TranShkAggStd,N=self.TranShkAggCount)
self.PermShkAggDstn = approxMeanOneLognormal(sigma=self.PermShkAggStd,N=self.PermShkAggCount)
self.AggShkDstn = combineIndepDstns(self.PermShkAggDstn,self.TranShkAggDstn)
|
[
"def",
"makeAggShkDstn",
"(",
"self",
")",
":",
"self",
".",
"TranShkAggDstn",
"=",
"approxMeanOneLognormal",
"(",
"sigma",
"=",
"self",
".",
"TranShkAggStd",
",",
"N",
"=",
"self",
".",
"TranShkAggCount",
")",
"self",
".",
"PermShkAggDstn",
"=",
"approxMeanOneLognormal",
"(",
"sigma",
"=",
"self",
".",
"PermShkAggStd",
",",
"N",
"=",
"self",
".",
"PermShkAggCount",
")",
"self",
".",
"AggShkDstn",
"=",
"combineIndepDstns",
"(",
"self",
".",
"PermShkAggDstn",
",",
"self",
".",
"TranShkAggDstn",
")"
] | 37 | 34.875 |
def call(self, task, decorators=None):
"""
Call given task on service layer.
:param task: task to be called. task will be decorated with
TaskDecorator's contained in 'decorators' list
:type task: instance of Task class
:param decorators: list of TaskDecorator's / TaskResultDecorator's
inherited classes
:type decorators: list
:return task_result: result of task call decorated with TaskResultDecorator's
contained in 'decorators' list
:rtype TaskResult instance
"""
if decorators is None:
decorators = []
task = self.apply_task_decorators(task, decorators)
data = task.get_data()
name = task.get_name()
result = self._inner_call(name, data)
task_result = RawTaskResult(task, result)
return self.apply_task_result_decorators(task_result, decorators)
|
[
"def",
"call",
"(",
"self",
",",
"task",
",",
"decorators",
"=",
"None",
")",
":",
"if",
"decorators",
"is",
"None",
":",
"decorators",
"=",
"[",
"]",
"task",
"=",
"self",
".",
"apply_task_decorators",
"(",
"task",
",",
"decorators",
")",
"data",
"=",
"task",
".",
"get_data",
"(",
")",
"name",
"=",
"task",
".",
"get_name",
"(",
")",
"result",
"=",
"self",
".",
"_inner_call",
"(",
"name",
",",
"data",
")",
"task_result",
"=",
"RawTaskResult",
"(",
"task",
",",
"result",
")",
"return",
"self",
".",
"apply_task_result_decorators",
"(",
"task_result",
",",
"decorators",
")"
] | 34.807692 | 17.961538 |
def select(self, idx):
""" Return a new DictArray containing only the indexed values
"""
data = {}
for k in self.data:
data[k] = self.data[k][idx]
return self._return(data=data)
|
[
"def",
"select",
"(",
"self",
",",
"idx",
")",
":",
"data",
"=",
"{",
"}",
"for",
"k",
"in",
"self",
".",
"data",
":",
"data",
"[",
"k",
"]",
"=",
"self",
".",
"data",
"[",
"k",
"]",
"[",
"idx",
"]",
"return",
"self",
".",
"_return",
"(",
"data",
"=",
"data",
")"
] | 31.857143 | 8.142857 |
def plot_envelope(M, C, mesh):
"""
plot_envelope(M,C,mesh)
plots the pointwise mean +/- sd envelope defined by M and C
along their base mesh.
:Arguments:
- `M`: A Gaussian process mean.
- `C`: A Gaussian process covariance
- `mesh`: The mesh on which to evaluate the mean and cov.
"""
try:
from pylab import fill, plot, clf, axis
x = concatenate((mesh, mesh[::-1]))
mean, var = point_eval(M, C, mesh)
sig = sqrt(var)
mean = M(mesh)
y = concatenate((mean - sig, (mean + sig)[::-1]))
# clf()
fill(x, y, facecolor='.8', edgecolor='1.')
plot(mesh, mean, 'k-.')
except ImportError:
print_("Matplotlib is not installed; plotting is disabled.")
|
[
"def",
"plot_envelope",
"(",
"M",
",",
"C",
",",
"mesh",
")",
":",
"try",
":",
"from",
"pylab",
"import",
"fill",
",",
"plot",
",",
"clf",
",",
"axis",
"x",
"=",
"concatenate",
"(",
"(",
"mesh",
",",
"mesh",
"[",
":",
":",
"-",
"1",
"]",
")",
")",
"mean",
",",
"var",
"=",
"point_eval",
"(",
"M",
",",
"C",
",",
"mesh",
")",
"sig",
"=",
"sqrt",
"(",
"var",
")",
"mean",
"=",
"M",
"(",
"mesh",
")",
"y",
"=",
"concatenate",
"(",
"(",
"mean",
"-",
"sig",
",",
"(",
"mean",
"+",
"sig",
")",
"[",
":",
":",
"-",
"1",
"]",
")",
")",
"# clf()",
"fill",
"(",
"x",
",",
"y",
",",
"facecolor",
"=",
"'.8'",
",",
"edgecolor",
"=",
"'1.'",
")",
"plot",
"(",
"mesh",
",",
"mean",
",",
"'k-.'",
")",
"except",
"ImportError",
":",
"print_",
"(",
"\"Matplotlib is not installed; plotting is disabled.\"",
")"
] | 25.266667 | 20.8 |
def _kraus_to_choi(data, input_dim, output_dim):
"""Transform Kraus representation to Choi representation."""
choi = 0
kraus_l, kraus_r = data
if kraus_r is None:
for i in kraus_l:
vec = i.ravel(order='F')
choi += np.outer(vec, vec.conj())
else:
for i, j in zip(kraus_l, kraus_r):
choi += np.outer(i.ravel(order='F'), j.ravel(order='F').conj())
return choi
|
[
"def",
"_kraus_to_choi",
"(",
"data",
",",
"input_dim",
",",
"output_dim",
")",
":",
"choi",
"=",
"0",
"kraus_l",
",",
"kraus_r",
"=",
"data",
"if",
"kraus_r",
"is",
"None",
":",
"for",
"i",
"in",
"kraus_l",
":",
"vec",
"=",
"i",
".",
"ravel",
"(",
"order",
"=",
"'F'",
")",
"choi",
"+=",
"np",
".",
"outer",
"(",
"vec",
",",
"vec",
".",
"conj",
"(",
")",
")",
"else",
":",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"kraus_l",
",",
"kraus_r",
")",
":",
"choi",
"+=",
"np",
".",
"outer",
"(",
"i",
".",
"ravel",
"(",
"order",
"=",
"'F'",
")",
",",
"j",
".",
"ravel",
"(",
"order",
"=",
"'F'",
")",
".",
"conj",
"(",
")",
")",
"return",
"choi"
] | 35.083333 | 15.25 |
def _threeDdot_simple(M,a):
"Return Ma, where M is a 3x3 transformation matrix, for each pixel"
result = np.empty(a.shape,dtype=a.dtype)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
A = np.array([a[i,j,0],a[i,j,1],a[i,j,2]]).reshape((3,1))
L = np.dot(M,A)
result[i,j,0] = L[0]
result[i,j,1] = L[1]
result[i,j,2] = L[2]
return result
|
[
"def",
"_threeDdot_simple",
"(",
"M",
",",
"a",
")",
":",
"result",
"=",
"np",
".",
"empty",
"(",
"a",
".",
"shape",
",",
"dtype",
"=",
"a",
".",
"dtype",
")",
"for",
"i",
"in",
"range",
"(",
"a",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"j",
"in",
"range",
"(",
"a",
".",
"shape",
"[",
"1",
"]",
")",
":",
"A",
"=",
"np",
".",
"array",
"(",
"[",
"a",
"[",
"i",
",",
"j",
",",
"0",
"]",
",",
"a",
"[",
"i",
",",
"j",
",",
"1",
"]",
",",
"a",
"[",
"i",
",",
"j",
",",
"2",
"]",
"]",
")",
".",
"reshape",
"(",
"(",
"3",
",",
"1",
")",
")",
"L",
"=",
"np",
".",
"dot",
"(",
"M",
",",
"A",
")",
"result",
"[",
"i",
",",
"j",
",",
"0",
"]",
"=",
"L",
"[",
"0",
"]",
"result",
"[",
"i",
",",
"j",
",",
"1",
"]",
"=",
"L",
"[",
"1",
"]",
"result",
"[",
"i",
",",
"j",
",",
"2",
"]",
"=",
"L",
"[",
"2",
"]",
"return",
"result"
] | 29.785714 | 19.357143 |
def format_title(self):
def asciify(_title):
_title = unicodedata.normalize('NFD', unicode(_title))
ascii = True
out = []
ok = u"1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM- ',"
for ch in _title:
if ch in ok:
out.append(ch)
elif unicodedata.category(ch)[0] == ("L"): #a letter
out.append(hex(ord(ch)))
ascii = False
elif ch in u'\r\n\t':
out.append(u'-')
return (ascii, sub("[ ',-]+", '-', "".join(out)) )
""" Takes a string and sanitizes it for Github's url name format """
(ascii, _title) = asciify(self.meta.title)
if not ascii and self.meta.alternative_title:
(ascii, _title2) = asciify(self.meta.alternative_title)
if ascii:
_title = _title2
title_length = 99 - len(str(self.book_id)) - 1
if len(_title) > title_length:
# if the title was shortened, replace the trailing _ with an ellipsis
repo_title = "{0}__{1}".format(_title[:title_length], self.book_id)
else:
repo_title = "{0}_{1}".format(_title[:title_length], self.book_id)
logger.debug("%s %s" % (len(repo_title), repo_title))
self.meta.metadata['_repo'] = repo_title
return repo_title
|
[
"def",
"format_title",
"(",
"self",
")",
":",
"def",
"asciify",
"(",
"_title",
")",
":",
"_title",
"=",
"unicodedata",
".",
"normalize",
"(",
"'NFD'",
",",
"unicode",
"(",
"_title",
")",
")",
"ascii",
"=",
"True",
"out",
"=",
"[",
"]",
"ok",
"=",
"u\"1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM- ',\"",
"for",
"ch",
"in",
"_title",
":",
"if",
"ch",
"in",
"ok",
":",
"out",
".",
"append",
"(",
"ch",
")",
"elif",
"unicodedata",
".",
"category",
"(",
"ch",
")",
"[",
"0",
"]",
"==",
"(",
"\"L\"",
")",
":",
"#a letter",
"out",
".",
"append",
"(",
"hex",
"(",
"ord",
"(",
"ch",
")",
")",
")",
"ascii",
"=",
"False",
"elif",
"ch",
"in",
"u'\\r\\n\\t'",
":",
"out",
".",
"append",
"(",
"u'-'",
")",
"return",
"(",
"ascii",
",",
"sub",
"(",
"\"[ ',-]+\"",
",",
"'-'",
",",
"\"\"",
".",
"join",
"(",
"out",
")",
")",
")",
"(",
"ascii",
",",
"_title",
")",
"=",
"asciify",
"(",
"self",
".",
"meta",
".",
"title",
")",
"if",
"not",
"ascii",
"and",
"self",
".",
"meta",
".",
"alternative_title",
":",
"(",
"ascii",
",",
"_title2",
")",
"=",
"asciify",
"(",
"self",
".",
"meta",
".",
"alternative_title",
")",
"if",
"ascii",
":",
"_title",
"=",
"_title2",
"title_length",
"=",
"99",
"-",
"len",
"(",
"str",
"(",
"self",
".",
"book_id",
")",
")",
"-",
"1",
"if",
"len",
"(",
"_title",
")",
">",
"title_length",
":",
"# if the title was shortened, replace the trailing _ with an ellipsis",
"repo_title",
"=",
"\"{0}__{1}\"",
".",
"format",
"(",
"_title",
"[",
":",
"title_length",
"]",
",",
"self",
".",
"book_id",
")",
"else",
":",
"repo_title",
"=",
"\"{0}_{1}\"",
".",
"format",
"(",
"_title",
"[",
":",
"title_length",
"]",
",",
"self",
".",
"book_id",
")",
"logger",
".",
"debug",
"(",
"\"%s %s\"",
"%",
"(",
"len",
"(",
"repo_title",
")",
",",
"repo_title",
")",
")",
"self",
".",
"meta",
".",
"metadata",
"[",
"'_repo'",
"]",
"=",
"repo_title",
"return",
"repo_title"
] | 44.967742 | 17.935484 |
def _cleanup(lst):
'''
Return a list of non-empty dictionaries.
'''
clean = []
for ele in lst:
if ele and isinstance(ele, dict):
clean.append(ele)
return clean
|
[
"def",
"_cleanup",
"(",
"lst",
")",
":",
"clean",
"=",
"[",
"]",
"for",
"ele",
"in",
"lst",
":",
"if",
"ele",
"and",
"isinstance",
"(",
"ele",
",",
"dict",
")",
":",
"clean",
".",
"append",
"(",
"ele",
")",
"return",
"clean"
] | 21.666667 | 19.444444 |
def round(self, ndigits=0):
"""
Rounds the amount using the current ``Decimal`` rounding algorithm.
"""
if ndigits is None:
ndigits = 0
return self.__class__(
amount=self.amount.quantize(Decimal('1e' + str(-ndigits))),
currency=self.currency)
|
[
"def",
"round",
"(",
"self",
",",
"ndigits",
"=",
"0",
")",
":",
"if",
"ndigits",
"is",
"None",
":",
"ndigits",
"=",
"0",
"return",
"self",
".",
"__class__",
"(",
"amount",
"=",
"self",
".",
"amount",
".",
"quantize",
"(",
"Decimal",
"(",
"'1e'",
"+",
"str",
"(",
"-",
"ndigits",
")",
")",
")",
",",
"currency",
"=",
"self",
".",
"currency",
")"
] | 34.444444 | 13.777778 |
def main(args=None):
"""Start application."""
parser = _parser()
# Python 2 will error 'too few arguments' if no subcommand is supplied.
# No such error occurs in Python 3, which makes it feasible to check
# whether a subcommand was provided (displaying a help message if not).
# argparse internals vary significantly over the major versions, so it's
# much easier to just override the args passed to it. In this case, print
# the usage message if there are no args.
if args is None and len(sys.argv) <= 1:
sys.argv.append('--help')
options = parser.parse_args(args)
# pass options to subcommand
options.func(options)
return 0
|
[
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"parser",
"=",
"_parser",
"(",
")",
"# Python 2 will error 'too few arguments' if no subcommand is supplied.",
"# No such error occurs in Python 3, which makes it feasible to check",
"# whether a subcommand was provided (displaying a help message if not).",
"# argparse internals vary significantly over the major versions, so it's",
"# much easier to just override the args passed to it. In this case, print",
"# the usage message if there are no args.",
"if",
"args",
"is",
"None",
"and",
"len",
"(",
"sys",
".",
"argv",
")",
"<=",
"1",
":",
"sys",
".",
"argv",
".",
"append",
"(",
"'--help'",
")",
"options",
"=",
"parser",
".",
"parse_args",
"(",
"args",
")",
"# pass options to subcommand",
"options",
".",
"func",
"(",
"options",
")",
"return",
"0"
] | 35.368421 | 23.263158 |
def __check_equals(self, query):
"""Check if the query results on the two databases are equals.
Returns
-------
bool
True if the results are the same
False otherwise
list
A list with the differences
"""
self.cur1.execute(query)
records1 = self.cur1.fetchall()
self.cur2.execute(query)
records2 = self.cur2.fetchall()
result = True
differences = []
d = difflib.Differ()
records1 = [str(x) for x in records1]
records2 = [str(x) for x in records2]
for line in d.compare(records1, records2):
if line[0] in ('-', '+'):
result = False
if self.verbose_level == 1:
differences.append(line[0:79])
elif self.verbose_level == 2:
differences.append(line)
return result, differences
|
[
"def",
"__check_equals",
"(",
"self",
",",
"query",
")",
":",
"self",
".",
"cur1",
".",
"execute",
"(",
"query",
")",
"records1",
"=",
"self",
".",
"cur1",
".",
"fetchall",
"(",
")",
"self",
".",
"cur2",
".",
"execute",
"(",
"query",
")",
"records2",
"=",
"self",
".",
"cur2",
".",
"fetchall",
"(",
")",
"result",
"=",
"True",
"differences",
"=",
"[",
"]",
"d",
"=",
"difflib",
".",
"Differ",
"(",
")",
"records1",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"records1",
"]",
"records2",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"records2",
"]",
"for",
"line",
"in",
"d",
".",
"compare",
"(",
"records1",
",",
"records2",
")",
":",
"if",
"line",
"[",
"0",
"]",
"in",
"(",
"'-'",
",",
"'+'",
")",
":",
"result",
"=",
"False",
"if",
"self",
".",
"verbose_level",
"==",
"1",
":",
"differences",
".",
"append",
"(",
"line",
"[",
"0",
":",
"79",
"]",
")",
"elif",
"self",
".",
"verbose_level",
"==",
"2",
":",
"differences",
".",
"append",
"(",
"line",
")",
"return",
"result",
",",
"differences"
] | 28.575758 | 14.666667 |
def get_current_frame_data(self):
"""
Get all date about the current execution frame
:return: current frame data
:rtype: dict
:raises AttributeError: if the debugger does hold any execution frame.
:raises IOError: if source code for the current execution frame is not accessible.
"""
filename = self.curframe.f_code.co_filename
lines, start_line = inspect.findsource(self.curframe)
if sys.version_info[0] == 2:
lines = [line.decode('utf-8') for line in lines]
return {
'dirname': os.path.dirname(os.path.abspath(filename)) + os.path.sep,
'filename': os.path.basename(filename),
'file_listing': ''.join(lines),
'current_line': self.curframe.f_lineno,
'breakpoints': self.get_file_breaks(filename),
'globals': self.get_globals(),
'locals': self.get_locals()
}
|
[
"def",
"get_current_frame_data",
"(",
"self",
")",
":",
"filename",
"=",
"self",
".",
"curframe",
".",
"f_code",
".",
"co_filename",
"lines",
",",
"start_line",
"=",
"inspect",
".",
"findsource",
"(",
"self",
".",
"curframe",
")",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
":",
"lines",
"=",
"[",
"line",
".",
"decode",
"(",
"'utf-8'",
")",
"for",
"line",
"in",
"lines",
"]",
"return",
"{",
"'dirname'",
":",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"filename",
")",
")",
"+",
"os",
".",
"path",
".",
"sep",
",",
"'filename'",
":",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
",",
"'file_listing'",
":",
"''",
".",
"join",
"(",
"lines",
")",
",",
"'current_line'",
":",
"self",
".",
"curframe",
".",
"f_lineno",
",",
"'breakpoints'",
":",
"self",
".",
"get_file_breaks",
"(",
"filename",
")",
",",
"'globals'",
":",
"self",
".",
"get_globals",
"(",
")",
",",
"'locals'",
":",
"self",
".",
"get_locals",
"(",
")",
"}"
] | 42.227273 | 16.863636 |
def _g_a (self, x, a, b, s):
"""Asymmetric width term
x: frequency coordinate
a: peak position
b: half width
s: asymmetry parameter
"""
return 2*b/(1.0+np.exp(s*(x-a)))
|
[
"def",
"_g_a",
"(",
"self",
",",
"x",
",",
"a",
",",
"b",
",",
"s",
")",
":",
"return",
"2",
"*",
"b",
"/",
"(",
"1.0",
"+",
"np",
".",
"exp",
"(",
"s",
"*",
"(",
"x",
"-",
"a",
")",
")",
")"
] | 27.125 | 8.25 |
def _get_token(url, email, secret_key):
'''
retrieve the auth_token from nsot
:param url: str
:param email: str
:param secret_key: str
:return: str
'''
url = urlparse.urljoin(url, 'authenticate')
data_dict = {"email": email, "secret_key": secret_key}
query = salt.utils.http.query(url, data=data_dict, method='POST',
decode=True)
error = query.get('error')
if error:
log.error('Cannot obtain NSoT authentication token due to: %s.', error)
log.debug('Please verify NSoT URL %s is reachable and email %s is valid', url, email)
return False
else:
log.debug('successfully obtained token from nsot!')
return query['dict'].get('auth_token')
|
[
"def",
"_get_token",
"(",
"url",
",",
"email",
",",
"secret_key",
")",
":",
"url",
"=",
"urlparse",
".",
"urljoin",
"(",
"url",
",",
"'authenticate'",
")",
"data_dict",
"=",
"{",
"\"email\"",
":",
"email",
",",
"\"secret_key\"",
":",
"secret_key",
"}",
"query",
"=",
"salt",
".",
"utils",
".",
"http",
".",
"query",
"(",
"url",
",",
"data",
"=",
"data_dict",
",",
"method",
"=",
"'POST'",
",",
"decode",
"=",
"True",
")",
"error",
"=",
"query",
".",
"get",
"(",
"'error'",
")",
"if",
"error",
":",
"log",
".",
"error",
"(",
"'Cannot obtain NSoT authentication token due to: %s.'",
",",
"error",
")",
"log",
".",
"debug",
"(",
"'Please verify NSoT URL %s is reachable and email %s is valid'",
",",
"url",
",",
"email",
")",
"return",
"False",
"else",
":",
"log",
".",
"debug",
"(",
"'successfully obtained token from nsot!'",
")",
"return",
"query",
"[",
"'dict'",
"]",
".",
"get",
"(",
"'auth_token'",
")"
] | 35.285714 | 21.571429 |
def username(anon, obj, field, val):
"""
Generates a random username
"""
return anon.faker.user_name(field=field)
|
[
"def",
"username",
"(",
"anon",
",",
"obj",
",",
"field",
",",
"val",
")",
":",
"return",
"anon",
".",
"faker",
".",
"user_name",
"(",
"field",
"=",
"field",
")"
] | 25 | 3.4 |
def find(self, item):
"""
Return the smallest i such that i is the index of an
element that wholly contains item. Raises ValueError if no
such element exists. Does not require the segmentlist to
be coalesced.
"""
for i, seg in enumerate(self):
if item in seg:
return i
raise ValueError(item)
|
[
"def",
"find",
"(",
"self",
",",
"item",
")",
":",
"for",
"i",
",",
"seg",
"in",
"enumerate",
"(",
"self",
")",
":",
"if",
"item",
"in",
"seg",
":",
"return",
"i",
"raise",
"ValueError",
"(",
"item",
")"
] | 27.818182 | 15.636364 |
def cleanup_custom_options(id, weakref=None):
"""
Cleans up unused custom trees if all objects referencing the
custom id have been garbage collected or tree is otherwise
unreferenced.
"""
try:
if Store._options_context:
return
weakrefs = Store._weakrefs.get(id, [])
if weakref in weakrefs:
weakrefs.remove(weakref)
refs = []
for wr in list(weakrefs):
r = wr()
if r is None or r.id != id:
weakrefs.remove(wr)
else:
refs.append(r)
if not refs:
for bk in Store.loaded_backends():
if id in Store._custom_options[bk]:
Store._custom_options[bk].pop(id)
if not weakrefs:
Store._weakrefs.pop(id, None)
except Exception as e:
raise Exception('Cleanup of custom options tree with id %s '
'failed with the following exception: %s, '
'an unreferenced orphan tree may persist in '
'memory' % (e, id))
|
[
"def",
"cleanup_custom_options",
"(",
"id",
",",
"weakref",
"=",
"None",
")",
":",
"try",
":",
"if",
"Store",
".",
"_options_context",
":",
"return",
"weakrefs",
"=",
"Store",
".",
"_weakrefs",
".",
"get",
"(",
"id",
",",
"[",
"]",
")",
"if",
"weakref",
"in",
"weakrefs",
":",
"weakrefs",
".",
"remove",
"(",
"weakref",
")",
"refs",
"=",
"[",
"]",
"for",
"wr",
"in",
"list",
"(",
"weakrefs",
")",
":",
"r",
"=",
"wr",
"(",
")",
"if",
"r",
"is",
"None",
"or",
"r",
".",
"id",
"!=",
"id",
":",
"weakrefs",
".",
"remove",
"(",
"wr",
")",
"else",
":",
"refs",
".",
"append",
"(",
"r",
")",
"if",
"not",
"refs",
":",
"for",
"bk",
"in",
"Store",
".",
"loaded_backends",
"(",
")",
":",
"if",
"id",
"in",
"Store",
".",
"_custom_options",
"[",
"bk",
"]",
":",
"Store",
".",
"_custom_options",
"[",
"bk",
"]",
".",
"pop",
"(",
"id",
")",
"if",
"not",
"weakrefs",
":",
"Store",
".",
"_weakrefs",
".",
"pop",
"(",
"id",
",",
"None",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"'Cleanup of custom options tree with id %s '",
"'failed with the following exception: %s, '",
"'an unreferenced orphan tree may persist in '",
"'memory'",
"%",
"(",
"e",
",",
"id",
")",
")"
] | 35.8 | 13.666667 |
def write(self, outfile, encoding):
u"""
Writes the feed to the specified file in the
specified encoding.
"""
cal = Calendar()
cal.add('version', '2.0')
cal.add('calscale', 'GREGORIAN')
for ifield, efield in FEED_FIELD_MAP:
val = self.feed.get(ifield)
if val is not None:
cal.add(efield, val)
self.write_items(cal)
to_ical = getattr(cal, 'as_string', None)
if not to_ical:
to_ical = cal.to_ical
outfile.write(to_ical())
|
[
"def",
"write",
"(",
"self",
",",
"outfile",
",",
"encoding",
")",
":",
"cal",
"=",
"Calendar",
"(",
")",
"cal",
".",
"add",
"(",
"'version'",
",",
"'2.0'",
")",
"cal",
".",
"add",
"(",
"'calscale'",
",",
"'GREGORIAN'",
")",
"for",
"ifield",
",",
"efield",
"in",
"FEED_FIELD_MAP",
":",
"val",
"=",
"self",
".",
"feed",
".",
"get",
"(",
"ifield",
")",
"if",
"val",
"is",
"not",
"None",
":",
"cal",
".",
"add",
"(",
"efield",
",",
"val",
")",
"self",
".",
"write_items",
"(",
"cal",
")",
"to_ical",
"=",
"getattr",
"(",
"cal",
",",
"'as_string'",
",",
"None",
")",
"if",
"not",
"to_ical",
":",
"to_ical",
"=",
"cal",
".",
"to_ical",
"outfile",
".",
"write",
"(",
"to_ical",
"(",
")",
")"
] | 27.55 | 12.2 |
def policy_definitions_list(hide_builtin=False, **kwargs):
'''
.. versionadded:: 2019.2.0
List all policy definitions for a subscription.
:param hide_builtin: Boolean which will filter out BuiltIn policy definitions from the result.
CLI Example:
.. code-block:: bash
salt-call azurearm_resource.policy_definitions_list
'''
result = {}
polconn = __utils__['azurearm.get_client']('policy', **kwargs)
try:
policy_defs = __utils__['azurearm.paged_object_to_list'](polconn.policy_definitions.list())
for policy in policy_defs:
if not (hide_builtin and policy['policy_type'] == 'BuiltIn'):
result[policy['name']] = policy
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
result = {'error': str(exc)}
return result
|
[
"def",
"policy_definitions_list",
"(",
"hide_builtin",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"{",
"}",
"polconn",
"=",
"__utils__",
"[",
"'azurearm.get_client'",
"]",
"(",
"'policy'",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"policy_defs",
"=",
"__utils__",
"[",
"'azurearm.paged_object_to_list'",
"]",
"(",
"polconn",
".",
"policy_definitions",
".",
"list",
"(",
")",
")",
"for",
"policy",
"in",
"policy_defs",
":",
"if",
"not",
"(",
"hide_builtin",
"and",
"policy",
"[",
"'policy_type'",
"]",
"==",
"'BuiltIn'",
")",
":",
"result",
"[",
"policy",
"[",
"'name'",
"]",
"]",
"=",
"policy",
"except",
"CloudError",
"as",
"exc",
":",
"__utils__",
"[",
"'azurearm.log_cloud_error'",
"]",
"(",
"'resource'",
",",
"str",
"(",
"exc",
")",
",",
"*",
"*",
"kwargs",
")",
"result",
"=",
"{",
"'error'",
":",
"str",
"(",
"exc",
")",
"}",
"return",
"result"
] | 30.392857 | 28.75 |
def optimizeAngle(angle):
"""
Because any rotation can be expressed within 360 degrees
of any given number, and since negative angles sometimes
are one character longer than corresponding positive angle,
we shorten the number to one in the range to [-90, 270[.
"""
# First, we put the new angle in the range ]-360, 360[.
# The modulo operator yields results with the sign of the
# divisor, so for negative dividends, we preserve the sign
# of the angle.
if angle < 0:
angle %= -360
else:
angle %= 360
# 720 degrees is unnecessary, as 360 covers all angles.
# As "-x" is shorter than "35x" and "-xxx" one character
# longer than positive angles <= 260, we constrain angle
# range to [-90, 270[ (or, equally valid: ]-100, 260]).
if angle >= 270:
angle -= 360
elif angle < -90:
angle += 360
return angle
|
[
"def",
"optimizeAngle",
"(",
"angle",
")",
":",
"# First, we put the new angle in the range ]-360, 360[.",
"# The modulo operator yields results with the sign of the",
"# divisor, so for negative dividends, we preserve the sign",
"# of the angle.",
"if",
"angle",
"<",
"0",
":",
"angle",
"%=",
"-",
"360",
"else",
":",
"angle",
"%=",
"360",
"# 720 degrees is unnecessary, as 360 covers all angles.",
"# As \"-x\" is shorter than \"35x\" and \"-xxx\" one character",
"# longer than positive angles <= 260, we constrain angle",
"# range to [-90, 270[ (or, equally valid: ]-100, 260]).",
"if",
"angle",
">=",
"270",
":",
"angle",
"-=",
"360",
"elif",
"angle",
"<",
"-",
"90",
":",
"angle",
"+=",
"360",
"return",
"angle"
] | 36.875 | 18.958333 |
def convert_sbml_model(model):
"""Convert raw SBML model to extended model.
Args:
model: :class:`NativeModel` obtained from :class:`SBMLReader`.
"""
biomass_reactions = set()
for reaction in model.reactions:
# Extract limits
if reaction.id not in model.limits:
lower, upper = parse_flux_bounds(reaction)
if lower is not None or upper is not None:
model.limits[reaction.id] = reaction.id, lower, upper
# Detect objective
objective = parse_objective_coefficient(reaction)
if objective is not None and objective != 0:
biomass_reactions.add(reaction.id)
if len(biomass_reactions) == 1:
model.biomass_reaction = next(iter(biomass_reactions))
# Convert model to mutable entries
convert_model_entries(model)
# Detect extracelluar compartment
if model.extracellular_compartment is None:
extracellular = detect_extracellular_compartment(model)
model.extracellular_compartment = extracellular
# Convert exchange reactions to exchange compounds
convert_exchange_to_compounds(model)
|
[
"def",
"convert_sbml_model",
"(",
"model",
")",
":",
"biomass_reactions",
"=",
"set",
"(",
")",
"for",
"reaction",
"in",
"model",
".",
"reactions",
":",
"# Extract limits",
"if",
"reaction",
".",
"id",
"not",
"in",
"model",
".",
"limits",
":",
"lower",
",",
"upper",
"=",
"parse_flux_bounds",
"(",
"reaction",
")",
"if",
"lower",
"is",
"not",
"None",
"or",
"upper",
"is",
"not",
"None",
":",
"model",
".",
"limits",
"[",
"reaction",
".",
"id",
"]",
"=",
"reaction",
".",
"id",
",",
"lower",
",",
"upper",
"# Detect objective",
"objective",
"=",
"parse_objective_coefficient",
"(",
"reaction",
")",
"if",
"objective",
"is",
"not",
"None",
"and",
"objective",
"!=",
"0",
":",
"biomass_reactions",
".",
"add",
"(",
"reaction",
".",
"id",
")",
"if",
"len",
"(",
"biomass_reactions",
")",
"==",
"1",
":",
"model",
".",
"biomass_reaction",
"=",
"next",
"(",
"iter",
"(",
"biomass_reactions",
")",
")",
"# Convert model to mutable entries",
"convert_model_entries",
"(",
"model",
")",
"# Detect extracelluar compartment",
"if",
"model",
".",
"extracellular_compartment",
"is",
"None",
":",
"extracellular",
"=",
"detect_extracellular_compartment",
"(",
"model",
")",
"model",
".",
"extracellular_compartment",
"=",
"extracellular",
"# Convert exchange reactions to exchange compounds",
"convert_exchange_to_compounds",
"(",
"model",
")"
] | 34.90625 | 17.1875 |
def set_alternative(self, experiment_name, alternative):
"""Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented. The user will
be enrolled in the experiment even if the experiment would not normally accept this user."""
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
self._set_enrollment(experiment, alternative)
|
[
"def",
"set_alternative",
"(",
"self",
",",
"experiment_name",
",",
"alternative",
")",
":",
"experiment",
"=",
"experiment_manager",
".",
"get_experiment",
"(",
"experiment_name",
")",
"if",
"experiment",
":",
"self",
".",
"_set_enrollment",
"(",
"experiment",
",",
"alternative",
")"
] | 67 | 27.111111 |
def get_char(prompt=None):
"""
Read a line of text from standard input and return the equivalent char;
if text is not a single char, user is prompted to retry. If line can't
be read, return None.
"""
while True:
s = get_string(prompt)
if s is None:
return None
if len(s) == 1:
return s[0]
# Temporarily here for backwards compatibility
if prompt is None:
print("Retry: ", end="")
|
[
"def",
"get_char",
"(",
"prompt",
"=",
"None",
")",
":",
"while",
"True",
":",
"s",
"=",
"get_string",
"(",
"prompt",
")",
"if",
"s",
"is",
"None",
":",
"return",
"None",
"if",
"len",
"(",
"s",
")",
"==",
"1",
":",
"return",
"s",
"[",
"0",
"]",
"# Temporarily here for backwards compatibility",
"if",
"prompt",
"is",
"None",
":",
"print",
"(",
"\"Retry: \"",
",",
"end",
"=",
"\"\"",
")"
] | 29.0625 | 17.1875 |
def skipDryRun(logger, dryRun, level=logging.DEBUG):
""" Return logging function.
When logging function called, will return True if action should be skipped.
Log will indicate if skipped because of dry run.
"""
# This is an undocumented "feature" of logging module:
# logging.log() requires a numeric level
# logging.getLevelName() maps names to numbers
if not isinstance(level, int):
level = logging.getLevelName(level)
return (
functools.partial(_logDryRun, logger, level) if dryRun
else functools.partial(logger.log, level)
)
|
[
"def",
"skipDryRun",
"(",
"logger",
",",
"dryRun",
",",
"level",
"=",
"logging",
".",
"DEBUG",
")",
":",
"# This is an undocumented \"feature\" of logging module:",
"# logging.log() requires a numeric level",
"# logging.getLevelName() maps names to numbers",
"if",
"not",
"isinstance",
"(",
"level",
",",
"int",
")",
":",
"level",
"=",
"logging",
".",
"getLevelName",
"(",
"level",
")",
"return",
"(",
"functools",
".",
"partial",
"(",
"_logDryRun",
",",
"logger",
",",
"level",
")",
"if",
"dryRun",
"else",
"functools",
".",
"partial",
"(",
"logger",
".",
"log",
",",
"level",
")",
")"
] | 38.6 | 15.866667 |
def parse_form(self, req, name, field):
"""Pull a form value from the request.
.. note::
The request stream will be read and left at EOF.
"""
form = self._cache.get("form")
if form is None:
self._cache["form"] = form = parse_form_body(req)
return core.get_value(form, name, field)
|
[
"def",
"parse_form",
"(",
"self",
",",
"req",
",",
"name",
",",
"field",
")",
":",
"form",
"=",
"self",
".",
"_cache",
".",
"get",
"(",
"\"form\"",
")",
"if",
"form",
"is",
"None",
":",
"self",
".",
"_cache",
"[",
"\"form\"",
"]",
"=",
"form",
"=",
"parse_form_body",
"(",
"req",
")",
"return",
"core",
".",
"get_value",
"(",
"form",
",",
"name",
",",
"field",
")"
] | 31.272727 | 15.545455 |
def reftrack_element_data(rt, role):
"""Return the data for the element (e.g. the Asset or Shot)
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the element
:rtype: depending on role
:raises: None
"""
element = rt.get_element()
if element is None:
return
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return element.name
|
[
"def",
"reftrack_element_data",
"(",
"rt",
",",
"role",
")",
":",
"element",
"=",
"rt",
".",
"get_element",
"(",
")",
"if",
"element",
"is",
"None",
":",
"return",
"if",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"DisplayRole",
"or",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"EditRole",
":",
"return",
"element",
".",
"name"
] | 33.75 | 14.5 |
def zlexcount(self, name, min, max):
"""
Return the number of items in the sorted set between the
lexicographical range ``min`` and ``max``.
:param name: str the name of the redis key
:param min: int or '-inf'
:param max: int or '+inf'
:return: Future()
"""
with self.pipe as pipe:
return pipe.zlexcount(self.redis_key(name), min, max)
|
[
"def",
"zlexcount",
"(",
"self",
",",
"name",
",",
"min",
",",
"max",
")",
":",
"with",
"self",
".",
"pipe",
"as",
"pipe",
":",
"return",
"pipe",
".",
"zlexcount",
"(",
"self",
".",
"redis_key",
"(",
"name",
")",
",",
"min",
",",
"max",
")"
] | 34.416667 | 12.916667 |
def gevent_monkey_patch_report(self):
"""
Report effective gevent monkey patching on the logs.
"""
try:
import gevent.socket
import socket
if gevent.socket.socket is socket.socket:
self.log("gevent monkey patching is active")
return True
else:
self.notify_user("gevent monkey patching failed.")
except ImportError:
self.notify_user("gevent is not installed, monkey patching failed.")
return False
|
[
"def",
"gevent_monkey_patch_report",
"(",
"self",
")",
":",
"try",
":",
"import",
"gevent",
".",
"socket",
"import",
"socket",
"if",
"gevent",
".",
"socket",
".",
"socket",
"is",
"socket",
".",
"socket",
":",
"self",
".",
"log",
"(",
"\"gevent monkey patching is active\"",
")",
"return",
"True",
"else",
":",
"self",
".",
"notify_user",
"(",
"\"gevent monkey patching failed.\"",
")",
"except",
"ImportError",
":",
"self",
".",
"notify_user",
"(",
"\"gevent is not installed, monkey patching failed.\"",
")",
"return",
"False"
] | 33.625 | 17.625 |
def draw_cross(self, position, color=(255, 0, 0), radius=4):
"""Draw a cross on the canvas.
:param position: (row, col) tuple
:param color: RGB tuple
:param radius: radius of the cross (int)
"""
y, x = position
for xmod in np.arange(-radius, radius+1, 1):
xpos = x + xmod
if xpos < 0:
continue # Negative indices will draw on the opposite side.
if xpos >= self.shape[1]:
continue # Out of bounds.
self[int(y), int(xpos)] = color
for ymod in np.arange(-radius, radius+1, 1):
ypos = y + ymod
if ypos < 0:
continue # Negative indices will draw on the opposite side.
if ypos >= self.shape[0]:
continue # Out of bounds.
self[int(ypos), int(x)] = color
|
[
"def",
"draw_cross",
"(",
"self",
",",
"position",
",",
"color",
"=",
"(",
"255",
",",
"0",
",",
"0",
")",
",",
"radius",
"=",
"4",
")",
":",
"y",
",",
"x",
"=",
"position",
"for",
"xmod",
"in",
"np",
".",
"arange",
"(",
"-",
"radius",
",",
"radius",
"+",
"1",
",",
"1",
")",
":",
"xpos",
"=",
"x",
"+",
"xmod",
"if",
"xpos",
"<",
"0",
":",
"continue",
"# Negative indices will draw on the opposite side.",
"if",
"xpos",
">=",
"self",
".",
"shape",
"[",
"1",
"]",
":",
"continue",
"# Out of bounds.",
"self",
"[",
"int",
"(",
"y",
")",
",",
"int",
"(",
"xpos",
")",
"]",
"=",
"color",
"for",
"ymod",
"in",
"np",
".",
"arange",
"(",
"-",
"radius",
",",
"radius",
"+",
"1",
",",
"1",
")",
":",
"ypos",
"=",
"y",
"+",
"ymod",
"if",
"ypos",
"<",
"0",
":",
"continue",
"# Negative indices will draw on the opposite side.",
"if",
"ypos",
">=",
"self",
".",
"shape",
"[",
"0",
"]",
":",
"continue",
"# Out of bounds.",
"self",
"[",
"int",
"(",
"ypos",
")",
",",
"int",
"(",
"x",
")",
"]",
"=",
"color"
] | 38.818182 | 12.045455 |
def org_describe(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /org-xxxx/describe API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2Fdescribe
"""
return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs)
|
[
"def",
"org_describe",
"(",
"object_id",
",",
"input_params",
"=",
"{",
"}",
",",
"always_retry",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"DXHTTPRequest",
"(",
"'/%s/describe'",
"%",
"object_id",
",",
"input_params",
",",
"always_retry",
"=",
"always_retry",
",",
"*",
"*",
"kwargs",
")"
] | 51.714286 | 32.571429 |
def generate_json_schema(cls, schema, context=DEFAULT_DICT):
"""Generate a JSON Schema from a Marshmallow schema.
Args:
schema (marshmallow.Schema|str): The Marshmallow schema, or the
Python path to one, to create the JSON schema for.
Keyword Args:
file_pointer (file, optional): The path or pointer to the file
to write this schema to. If not provided, the schema will be
dumped to ``sys.stdout``.
Returns:
dict: The JSON schema in dictionary form.
"""
schema = cls._get_schema(schema)
# Generate the JSON Schema
return cls(context=context).dump(schema).data
|
[
"def",
"generate_json_schema",
"(",
"cls",
",",
"schema",
",",
"context",
"=",
"DEFAULT_DICT",
")",
":",
"schema",
"=",
"cls",
".",
"_get_schema",
"(",
"schema",
")",
"# Generate the JSON Schema",
"return",
"cls",
"(",
"context",
"=",
"context",
")",
".",
"dump",
"(",
"schema",
")",
".",
"data"
] | 36.473684 | 21.789474 |
def clear(self, asset_manager_id):
""" This method deletes all the data for an asset_manager_id.
It should be used with extreme caution. In production it
is almost always better to Inactivate rather than delete. """
self.logger.info('Clear Market Data - Asset Manager: %s', asset_manager_id)
url = '%s/clear/%s' % (self.endpoint, asset_manager_id)
response = self.session.delete(url)
if response.ok:
eod_price_count = response.json().get('eod_price_count', 'Unknown')
self.logger.info('Deleted %s EOD Prices.', eod_price_count)
fx_rate_count = response.json().get('fx_rate_count', 'Unknown')
self.logger.info('Deleted %s FX Rates.', fx_rate_count)
return response.json()
else:
self.logger.error(response.text)
response.raise_for_status()
|
[
"def",
"clear",
"(",
"self",
",",
"asset_manager_id",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Clear Market Data - Asset Manager: %s'",
",",
"asset_manager_id",
")",
"url",
"=",
"'%s/clear/%s'",
"%",
"(",
"self",
".",
"endpoint",
",",
"asset_manager_id",
")",
"response",
"=",
"self",
".",
"session",
".",
"delete",
"(",
"url",
")",
"if",
"response",
".",
"ok",
":",
"eod_price_count",
"=",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"'eod_price_count'",
",",
"'Unknown'",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Deleted %s EOD Prices.'",
",",
"eod_price_count",
")",
"fx_rate_count",
"=",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"'fx_rate_count'",
",",
"'Unknown'",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Deleted %s FX Rates.'",
",",
"fx_rate_count",
")",
"return",
"response",
".",
"json",
"(",
")",
"else",
":",
"self",
".",
"logger",
".",
"error",
"(",
"response",
".",
"text",
")",
"response",
".",
"raise_for_status",
"(",
")"
] | 54.9375 | 18.1875 |
def build_flat_msg(self,
id=None,
msg=None):
"""build_flat_msg
:param id: unique id for this message
:param msg: message dictionary to flatten
"""
flat_msg = {}
if not id:
log.error("Please pass in an id")
return None
if not msg:
log.error("Please pass in a msg")
return None
for k in msg["data"]:
if k == "ether":
flat_msg.update(self.process_ether_frame(
id=id,
msg=msg["data"][k]))
# end of ether
elif k == "ip":
flat_msg.update(self.process_ip_frame(
id=id,
msg=msg["data"][k]))
# end of ip
elif k == "ipv6":
flat_msg.update(self.process_ipvsix_frame(
id=id,
msg=msg["data"][k]))
# end of ipv6
elif k == "tcp":
flat_msg.update(self.process_tcp_frame(
id=id,
msg=msg["data"][k]))
# end of tcp
elif k == "udp":
flat_msg.update(self.process_udp_frame(
id=id,
msg=msg["data"][k]))
# end of udp
elif k == "dns":
flat_msg.update(self.process_dns_frame(
id=id,
msg=msg["data"][k]))
# end of dns
elif k == "icmp":
flat_msg.update(self.process_icmp_frame(
id=id,
msg=msg["data"][k]))
# end of icmp
elif k == "arp":
flat_msg.update(self.process_arp_frame(
id=id,
msg=msg["data"][k]))
# end of arp
elif k == "raw":
flat_msg.update(self.process_raw_frame(
id=id,
msg=msg["data"][k]))
# end of raw
elif k == "padding":
flat_msg.update(self.process_pad_frame(
id=id,
msg=msg["data"][k]))
# end of pad
else:
log.error(("Unsupported frame type={} "
"please file an issue to track this "
"with data={} msg={}")
.format(k,
ppj(msg["data"][k]),
msg["data"]))
# end of processing new message
return flat_msg
|
[
"def",
"build_flat_msg",
"(",
"self",
",",
"id",
"=",
"None",
",",
"msg",
"=",
"None",
")",
":",
"flat_msg",
"=",
"{",
"}",
"if",
"not",
"id",
":",
"log",
".",
"error",
"(",
"\"Please pass in an id\"",
")",
"return",
"None",
"if",
"not",
"msg",
":",
"log",
".",
"error",
"(",
"\"Please pass in a msg\"",
")",
"return",
"None",
"for",
"k",
"in",
"msg",
"[",
"\"data\"",
"]",
":",
"if",
"k",
"==",
"\"ether\"",
":",
"flat_msg",
".",
"update",
"(",
"self",
".",
"process_ether_frame",
"(",
"id",
"=",
"id",
",",
"msg",
"=",
"msg",
"[",
"\"data\"",
"]",
"[",
"k",
"]",
")",
")",
"# end of ether",
"elif",
"k",
"==",
"\"ip\"",
":",
"flat_msg",
".",
"update",
"(",
"self",
".",
"process_ip_frame",
"(",
"id",
"=",
"id",
",",
"msg",
"=",
"msg",
"[",
"\"data\"",
"]",
"[",
"k",
"]",
")",
")",
"# end of ip",
"elif",
"k",
"==",
"\"ipv6\"",
":",
"flat_msg",
".",
"update",
"(",
"self",
".",
"process_ipvsix_frame",
"(",
"id",
"=",
"id",
",",
"msg",
"=",
"msg",
"[",
"\"data\"",
"]",
"[",
"k",
"]",
")",
")",
"# end of ipv6",
"elif",
"k",
"==",
"\"tcp\"",
":",
"flat_msg",
".",
"update",
"(",
"self",
".",
"process_tcp_frame",
"(",
"id",
"=",
"id",
",",
"msg",
"=",
"msg",
"[",
"\"data\"",
"]",
"[",
"k",
"]",
")",
")",
"# end of tcp",
"elif",
"k",
"==",
"\"udp\"",
":",
"flat_msg",
".",
"update",
"(",
"self",
".",
"process_udp_frame",
"(",
"id",
"=",
"id",
",",
"msg",
"=",
"msg",
"[",
"\"data\"",
"]",
"[",
"k",
"]",
")",
")",
"# end of udp",
"elif",
"k",
"==",
"\"dns\"",
":",
"flat_msg",
".",
"update",
"(",
"self",
".",
"process_dns_frame",
"(",
"id",
"=",
"id",
",",
"msg",
"=",
"msg",
"[",
"\"data\"",
"]",
"[",
"k",
"]",
")",
")",
"# end of dns",
"elif",
"k",
"==",
"\"icmp\"",
":",
"flat_msg",
".",
"update",
"(",
"self",
".",
"process_icmp_frame",
"(",
"id",
"=",
"id",
",",
"msg",
"=",
"msg",
"[",
"\"data\"",
"]",
"[",
"k",
"]",
")",
")",
"# end of icmp",
"elif",
"k",
"==",
"\"arp\"",
":",
"flat_msg",
".",
"update",
"(",
"self",
".",
"process_arp_frame",
"(",
"id",
"=",
"id",
",",
"msg",
"=",
"msg",
"[",
"\"data\"",
"]",
"[",
"k",
"]",
")",
")",
"# end of arp",
"elif",
"k",
"==",
"\"raw\"",
":",
"flat_msg",
".",
"update",
"(",
"self",
".",
"process_raw_frame",
"(",
"id",
"=",
"id",
",",
"msg",
"=",
"msg",
"[",
"\"data\"",
"]",
"[",
"k",
"]",
")",
")",
"# end of raw",
"elif",
"k",
"==",
"\"padding\"",
":",
"flat_msg",
".",
"update",
"(",
"self",
".",
"process_pad_frame",
"(",
"id",
"=",
"id",
",",
"msg",
"=",
"msg",
"[",
"\"data\"",
"]",
"[",
"k",
"]",
")",
")",
"# end of pad",
"else",
":",
"log",
".",
"error",
"(",
"(",
"\"Unsupported frame type={} \"",
"\"please file an issue to track this \"",
"\"with data={} msg={}\"",
")",
".",
"format",
"(",
"k",
",",
"ppj",
"(",
"msg",
"[",
"\"data\"",
"]",
"[",
"k",
"]",
")",
",",
"msg",
"[",
"\"data\"",
"]",
")",
")",
"# end of processing new message",
"return",
"flat_msg"
] | 36.556962 | 13.721519 |
def fisher_by_pol(data):
"""
input: as in dolnp (list of dictionaries with 'dec' and 'inc')
description: do fisher mean after splitting data into two polarity domains.
output: three dictionaries:
'A'= polarity 'A'
'B = polarity 'B'
'ALL'= switching polarity of 'B' directions, and calculate fisher mean of all data
code modified from eqarea_ell.py b rshaar 1/23/2014
"""
FisherByPoles = {}
DIblock, nameblock, locblock = [], [], []
for rec in data:
if 'dec' in list(rec.keys()) and 'inc' in list(rec.keys()):
# collect data for fisher calculation
DIblock.append([float(rec["dec"]), float(rec["inc"])])
else:
continue
if 'name' in list(rec.keys()):
nameblock.append(rec['name'])
else:
nameblock.append("")
if 'loc' in list(rec.keys()):
locblock.append(rec['loc'])
else:
locblock.append("")
ppars = doprinc(np.array(DIblock)) # get principal directions
# choose the northerly declination principe component ("normal")
reference_DI = [ppars['dec'], ppars['inc']]
# make reference direction in northern hemisphere
if reference_DI[0] > 90 and reference_DI[0] < 270:
reference_DI[0] = (reference_DI[0] + 180.) % 360
reference_DI[1] = reference_DI[1] * -1.
nDIs, rDIs, all_DI, npars, rpars = [], [], [], [], []
nlist, rlist, alllist = "", "", ""
nloclist, rloclist, allloclist = "", "", ""
for k in range(len(DIblock)):
if angle([DIblock[k][0], DIblock[k][1]], reference_DI) > 90.:
rDIs.append(DIblock[k])
rlist = rlist + ":" + nameblock[k]
if locblock[k] not in rloclist:
rloclist = rloclist + ":" + locblock[k]
all_DI.append([(DIblock[k][0] + 180.) % 360., -1. * DIblock[k][1]])
alllist = alllist + ":" + nameblock[k]
if locblock[k] not in allloclist:
allloclist = allloclist + ":" + locblock[k]
else:
nDIs.append(DIblock[k])
nlist = nlist + ":" + nameblock[k]
if locblock[k] not in nloclist:
nloclist = nloclist + ":" + locblock[k]
all_DI.append(DIblock[k])
alllist = alllist + ":" + nameblock[k]
if locblock[k] not in allloclist:
allloclist = allloclist + ":" + locblock[k]
for mode in ['A', 'B', 'All']:
if mode == 'A' and len(nDIs) > 2:
fpars = fisher_mean(nDIs)
fpars['sites'] = nlist.strip(':')
fpars['locs'] = nloclist.strip(':')
FisherByPoles[mode] = fpars
elif mode == 'B' and len(rDIs) > 2:
fpars = fisher_mean(rDIs)
fpars['sites'] = rlist.strip(':')
fpars['locs'] = rloclist.strip(':')
FisherByPoles[mode] = fpars
elif mode == 'All' and len(all_DI) > 2:
fpars = fisher_mean(all_DI)
fpars['sites'] = alllist.strip(':')
fpars['locs'] = allloclist.strip(':')
FisherByPoles[mode] = fpars
return FisherByPoles
|
[
"def",
"fisher_by_pol",
"(",
"data",
")",
":",
"FisherByPoles",
"=",
"{",
"}",
"DIblock",
",",
"nameblock",
",",
"locblock",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"rec",
"in",
"data",
":",
"if",
"'dec'",
"in",
"list",
"(",
"rec",
".",
"keys",
"(",
")",
")",
"and",
"'inc'",
"in",
"list",
"(",
"rec",
".",
"keys",
"(",
")",
")",
":",
"# collect data for fisher calculation",
"DIblock",
".",
"append",
"(",
"[",
"float",
"(",
"rec",
"[",
"\"dec\"",
"]",
")",
",",
"float",
"(",
"rec",
"[",
"\"inc\"",
"]",
")",
"]",
")",
"else",
":",
"continue",
"if",
"'name'",
"in",
"list",
"(",
"rec",
".",
"keys",
"(",
")",
")",
":",
"nameblock",
".",
"append",
"(",
"rec",
"[",
"'name'",
"]",
")",
"else",
":",
"nameblock",
".",
"append",
"(",
"\"\"",
")",
"if",
"'loc'",
"in",
"list",
"(",
"rec",
".",
"keys",
"(",
")",
")",
":",
"locblock",
".",
"append",
"(",
"rec",
"[",
"'loc'",
"]",
")",
"else",
":",
"locblock",
".",
"append",
"(",
"\"\"",
")",
"ppars",
"=",
"doprinc",
"(",
"np",
".",
"array",
"(",
"DIblock",
")",
")",
"# get principal directions",
"# choose the northerly declination principe component (\"normal\")",
"reference_DI",
"=",
"[",
"ppars",
"[",
"'dec'",
"]",
",",
"ppars",
"[",
"'inc'",
"]",
"]",
"# make reference direction in northern hemisphere",
"if",
"reference_DI",
"[",
"0",
"]",
">",
"90",
"and",
"reference_DI",
"[",
"0",
"]",
"<",
"270",
":",
"reference_DI",
"[",
"0",
"]",
"=",
"(",
"reference_DI",
"[",
"0",
"]",
"+",
"180.",
")",
"%",
"360",
"reference_DI",
"[",
"1",
"]",
"=",
"reference_DI",
"[",
"1",
"]",
"*",
"-",
"1.",
"nDIs",
",",
"rDIs",
",",
"all_DI",
",",
"npars",
",",
"rpars",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"nlist",
",",
"rlist",
",",
"alllist",
"=",
"\"\"",
",",
"\"\"",
",",
"\"\"",
"nloclist",
",",
"rloclist",
",",
"allloclist",
"=",
"\"\"",
",",
"\"\"",
",",
"\"\"",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"DIblock",
")",
")",
":",
"if",
"angle",
"(",
"[",
"DIblock",
"[",
"k",
"]",
"[",
"0",
"]",
",",
"DIblock",
"[",
"k",
"]",
"[",
"1",
"]",
"]",
",",
"reference_DI",
")",
">",
"90.",
":",
"rDIs",
".",
"append",
"(",
"DIblock",
"[",
"k",
"]",
")",
"rlist",
"=",
"rlist",
"+",
"\":\"",
"+",
"nameblock",
"[",
"k",
"]",
"if",
"locblock",
"[",
"k",
"]",
"not",
"in",
"rloclist",
":",
"rloclist",
"=",
"rloclist",
"+",
"\":\"",
"+",
"locblock",
"[",
"k",
"]",
"all_DI",
".",
"append",
"(",
"[",
"(",
"DIblock",
"[",
"k",
"]",
"[",
"0",
"]",
"+",
"180.",
")",
"%",
"360.",
",",
"-",
"1.",
"*",
"DIblock",
"[",
"k",
"]",
"[",
"1",
"]",
"]",
")",
"alllist",
"=",
"alllist",
"+",
"\":\"",
"+",
"nameblock",
"[",
"k",
"]",
"if",
"locblock",
"[",
"k",
"]",
"not",
"in",
"allloclist",
":",
"allloclist",
"=",
"allloclist",
"+",
"\":\"",
"+",
"locblock",
"[",
"k",
"]",
"else",
":",
"nDIs",
".",
"append",
"(",
"DIblock",
"[",
"k",
"]",
")",
"nlist",
"=",
"nlist",
"+",
"\":\"",
"+",
"nameblock",
"[",
"k",
"]",
"if",
"locblock",
"[",
"k",
"]",
"not",
"in",
"nloclist",
":",
"nloclist",
"=",
"nloclist",
"+",
"\":\"",
"+",
"locblock",
"[",
"k",
"]",
"all_DI",
".",
"append",
"(",
"DIblock",
"[",
"k",
"]",
")",
"alllist",
"=",
"alllist",
"+",
"\":\"",
"+",
"nameblock",
"[",
"k",
"]",
"if",
"locblock",
"[",
"k",
"]",
"not",
"in",
"allloclist",
":",
"allloclist",
"=",
"allloclist",
"+",
"\":\"",
"+",
"locblock",
"[",
"k",
"]",
"for",
"mode",
"in",
"[",
"'A'",
",",
"'B'",
",",
"'All'",
"]",
":",
"if",
"mode",
"==",
"'A'",
"and",
"len",
"(",
"nDIs",
")",
">",
"2",
":",
"fpars",
"=",
"fisher_mean",
"(",
"nDIs",
")",
"fpars",
"[",
"'sites'",
"]",
"=",
"nlist",
".",
"strip",
"(",
"':'",
")",
"fpars",
"[",
"'locs'",
"]",
"=",
"nloclist",
".",
"strip",
"(",
"':'",
")",
"FisherByPoles",
"[",
"mode",
"]",
"=",
"fpars",
"elif",
"mode",
"==",
"'B'",
"and",
"len",
"(",
"rDIs",
")",
">",
"2",
":",
"fpars",
"=",
"fisher_mean",
"(",
"rDIs",
")",
"fpars",
"[",
"'sites'",
"]",
"=",
"rlist",
".",
"strip",
"(",
"':'",
")",
"fpars",
"[",
"'locs'",
"]",
"=",
"rloclist",
".",
"strip",
"(",
"':'",
")",
"FisherByPoles",
"[",
"mode",
"]",
"=",
"fpars",
"elif",
"mode",
"==",
"'All'",
"and",
"len",
"(",
"all_DI",
")",
">",
"2",
":",
"fpars",
"=",
"fisher_mean",
"(",
"all_DI",
")",
"fpars",
"[",
"'sites'",
"]",
"=",
"alllist",
".",
"strip",
"(",
"':'",
")",
"fpars",
"[",
"'locs'",
"]",
"=",
"allloclist",
".",
"strip",
"(",
"':'",
")",
"FisherByPoles",
"[",
"mode",
"]",
"=",
"fpars",
"return",
"FisherByPoles"
] | 41.716216 | 12.824324 |
def constrain_opts(self, constraint_dict, options):
""" Return result of constraints and options against a template """
constraints = {}
for constraint in constraint_dict:
if constraint != 'self':
if (constraint_dict[constraint] or
constraint_dict[constraint] == ''):
constraints[constraint] = constraint_dict[constraint]
results = self.constrained_sections(constraints=constraints,
options=options)
return results, self.template
|
[
"def",
"constrain_opts",
"(",
"self",
",",
"constraint_dict",
",",
"options",
")",
":",
"constraints",
"=",
"{",
"}",
"for",
"constraint",
"in",
"constraint_dict",
":",
"if",
"constraint",
"!=",
"'self'",
":",
"if",
"(",
"constraint_dict",
"[",
"constraint",
"]",
"or",
"constraint_dict",
"[",
"constraint",
"]",
"==",
"''",
")",
":",
"constraints",
"[",
"constraint",
"]",
"=",
"constraint_dict",
"[",
"constraint",
"]",
"results",
"=",
"self",
".",
"constrained_sections",
"(",
"constraints",
"=",
"constraints",
",",
"options",
"=",
"options",
")",
"return",
"results",
",",
"self",
".",
"template"
] | 52.272727 | 13.272727 |
def douglas_peucker(*args, df: pd.DataFrame=None, tolerance: float,
x='x', y='y', z=None, z_factor: float = 3.048,
lat=None, lon=None) -> np.ndarray:
"""Ramer-Douglas-Peucker algorithm for 2D/3D trajectories.
Simplify a trajectory by keeping the points further away from the straight
line.
Parameters:
df Optional a Pandas dataframe
tolerance float the threshold for cutting the
trajectory
z_factor float for ft/m conversion (default 3.048)
1km lateral, 100m vertical seems
like a good ratio
x, y, z str or ndarray[float] the column names if a dataframe is
given, otherwise a series of float
lat, lon str or ndarray[float] the column names if a dataframe is
given, otherwise a series of float.
x, y are built with a Lambert
Conformal projection
Note that lat, lon has precedence over x, y
Returns:
a np.array of booleans serving as a mask on the dataframe or
on the numpy array
See also: https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
"""
if df is None and (isinstance(x, str) or isinstance(y, str)):
raise ValueError("Provide a dataframe if x and y are column names")
if df is None and (isinstance(lon, str) or isinstance(lat, str)):
raise ValueError("Provide a dataframe if lat and lon are column names")
if tolerance < 0:
raise ValueError("tolerance must be a positive float")
if df is not None and isinstance(lat, str) and isinstance(lon, str):
lat, lon = df[lat].values, df[lon].values
if df is not None and lat is not None and lon is not None:
lat, lon = np.array(lat), np.array(lon)
x, y = pyproj.transform(
pyproj.Proj(init='EPSG:4326'),
pyproj.Proj(proj='lcc',
lat0=lat.mean(), lon0=lon.mean(),
lat1=lat.min(), lat2=lat.max(),
), lon, lat)
else:
if df is not None:
x, y = df[x].values, df[y].values
x, y = np.array(x), np.array(y)
if z is not None:
if df is not None:
z = df[z].values
z = z_factor * np.array(z)
mask = np.ones(len(x), dtype=bool)
if z is None:
_douglas_peucker_rec(x, y, mask, tolerance)
else:
_douglas_peucker_rec_3d(x, y, z, mask, tolerance)
return mask
|
[
"def",
"douglas_peucker",
"(",
"*",
"args",
",",
"df",
":",
"pd",
".",
"DataFrame",
"=",
"None",
",",
"tolerance",
":",
"float",
",",
"x",
"=",
"'x'",
",",
"y",
"=",
"'y'",
",",
"z",
"=",
"None",
",",
"z_factor",
":",
"float",
"=",
"3.048",
",",
"lat",
"=",
"None",
",",
"lon",
"=",
"None",
")",
"->",
"np",
".",
"ndarray",
":",
"if",
"df",
"is",
"None",
"and",
"(",
"isinstance",
"(",
"x",
",",
"str",
")",
"or",
"isinstance",
"(",
"y",
",",
"str",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Provide a dataframe if x and y are column names\"",
")",
"if",
"df",
"is",
"None",
"and",
"(",
"isinstance",
"(",
"lon",
",",
"str",
")",
"or",
"isinstance",
"(",
"lat",
",",
"str",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Provide a dataframe if lat and lon are column names\"",
")",
"if",
"tolerance",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"tolerance must be a positive float\"",
")",
"if",
"df",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"lat",
",",
"str",
")",
"and",
"isinstance",
"(",
"lon",
",",
"str",
")",
":",
"lat",
",",
"lon",
"=",
"df",
"[",
"lat",
"]",
".",
"values",
",",
"df",
"[",
"lon",
"]",
".",
"values",
"if",
"df",
"is",
"not",
"None",
"and",
"lat",
"is",
"not",
"None",
"and",
"lon",
"is",
"not",
"None",
":",
"lat",
",",
"lon",
"=",
"np",
".",
"array",
"(",
"lat",
")",
",",
"np",
".",
"array",
"(",
"lon",
")",
"x",
",",
"y",
"=",
"pyproj",
".",
"transform",
"(",
"pyproj",
".",
"Proj",
"(",
"init",
"=",
"'EPSG:4326'",
")",
",",
"pyproj",
".",
"Proj",
"(",
"proj",
"=",
"'lcc'",
",",
"lat0",
"=",
"lat",
".",
"mean",
"(",
")",
",",
"lon0",
"=",
"lon",
".",
"mean",
"(",
")",
",",
"lat1",
"=",
"lat",
".",
"min",
"(",
")",
",",
"lat2",
"=",
"lat",
".",
"max",
"(",
")",
",",
")",
",",
"lon",
",",
"lat",
")",
"else",
":",
"if",
"df",
"is",
"not",
"None",
":",
"x",
",",
"y",
"=",
"df",
"[",
"x",
"]",
".",
"values",
",",
"df",
"[",
"y",
"]",
".",
"values",
"x",
",",
"y",
"=",
"np",
".",
"array",
"(",
"x",
")",
",",
"np",
".",
"array",
"(",
"y",
")",
"if",
"z",
"is",
"not",
"None",
":",
"if",
"df",
"is",
"not",
"None",
":",
"z",
"=",
"df",
"[",
"z",
"]",
".",
"values",
"z",
"=",
"z_factor",
"*",
"np",
".",
"array",
"(",
"z",
")",
"mask",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"x",
")",
",",
"dtype",
"=",
"bool",
")",
"if",
"z",
"is",
"None",
":",
"_douglas_peucker_rec",
"(",
"x",
",",
"y",
",",
"mask",
",",
"tolerance",
")",
"else",
":",
"_douglas_peucker_rec_3d",
"(",
"x",
",",
"y",
",",
"z",
",",
"mask",
",",
"tolerance",
")",
"return",
"mask"
] | 41.121212 | 24.075758 |
def sql_fingerprint(query, hide_columns=True):
"""
Simplify a query, taking away exact values and fields selected.
Imperfect but better than super explicit, value-dependent queries.
"""
parsed_query = parse(query)[0]
sql_recursively_simplify(parsed_query, hide_columns=hide_columns)
return str(parsed_query)
|
[
"def",
"sql_fingerprint",
"(",
"query",
",",
"hide_columns",
"=",
"True",
")",
":",
"parsed_query",
"=",
"parse",
"(",
"query",
")",
"[",
"0",
"]",
"sql_recursively_simplify",
"(",
"parsed_query",
",",
"hide_columns",
"=",
"hide_columns",
")",
"return",
"str",
"(",
"parsed_query",
")"
] | 36.444444 | 16.666667 |
def assertEqual(first, second, message=None):
"""
Assert that first equals second.
:param first: First part to evaluate
:param second: Second part to evaluate
:param message: Failure message
:raises: TestStepFail if not first == second
"""
if not first == second:
raise TestStepFail(
format_message(message) if message is not None else "Assert: %s != %s" % (str(first),
str(second)))
|
[
"def",
"assertEqual",
"(",
"first",
",",
"second",
",",
"message",
"=",
"None",
")",
":",
"if",
"not",
"first",
"==",
"second",
":",
"raise",
"TestStepFail",
"(",
"format_message",
"(",
"message",
")",
"if",
"message",
"is",
"not",
"None",
"else",
"\"Assert: %s != %s\"",
"%",
"(",
"str",
"(",
"first",
")",
",",
"str",
"(",
"second",
")",
")",
")"
] | 39.230769 | 15.846154 |
def get(self, table='', start=0, limit=0, order=None, where=None):
"""
Get a list of stat items
:param table: str database table name
:param start: int
:param limit: int
:param order: list|tuple
:param where: list|tuple
:return:
"""
parameters = {}
args = ['start', 'limit', 'order', 'where']
for arg in args:
if arg in locals() and locals()[arg]:
parameters[arg] = locals()[arg]
response = self._client.session.get(
'{url}/{table}'.format(
url=self.endpoint_url, table=table
),
params=parameters
)
return self.process_response(response)
|
[
"def",
"get",
"(",
"self",
",",
"table",
"=",
"''",
",",
"start",
"=",
"0",
",",
"limit",
"=",
"0",
",",
"order",
"=",
"None",
",",
"where",
"=",
"None",
")",
":",
"parameters",
"=",
"{",
"}",
"args",
"=",
"[",
"'start'",
",",
"'limit'",
",",
"'order'",
",",
"'where'",
"]",
"for",
"arg",
"in",
"args",
":",
"if",
"arg",
"in",
"locals",
"(",
")",
"and",
"locals",
"(",
")",
"[",
"arg",
"]",
":",
"parameters",
"[",
"arg",
"]",
"=",
"locals",
"(",
")",
"[",
"arg",
"]",
"response",
"=",
"self",
".",
"_client",
".",
"session",
".",
"get",
"(",
"'{url}/{table}'",
".",
"format",
"(",
"url",
"=",
"self",
".",
"endpoint_url",
",",
"table",
"=",
"table",
")",
",",
"params",
"=",
"parameters",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
")"
] | 28.64 | 15.28 |
def _handle_calls(self, service_obj, calls):
""" Performs method calls on service object """
for call in calls:
method = call.get('method')
args = call.get('args', [])
kwargs = call.get('kwargs', {})
_check_type('args', args, list)
_check_type('kwargs', kwargs, dict)
if method is None:
raise InvalidServiceConfiguration(
'Service call must define a method.'
)
new_args = self._replace_scalars_in_args(args)
new_kwargs = self._replace_scalars_in_kwargs(kwargs)
getattr(service_obj, method)(*new_args, **new_kwargs)
|
[
"def",
"_handle_calls",
"(",
"self",
",",
"service_obj",
",",
"calls",
")",
":",
"for",
"call",
"in",
"calls",
":",
"method",
"=",
"call",
".",
"get",
"(",
"'method'",
")",
"args",
"=",
"call",
".",
"get",
"(",
"'args'",
",",
"[",
"]",
")",
"kwargs",
"=",
"call",
".",
"get",
"(",
"'kwargs'",
",",
"{",
"}",
")",
"_check_type",
"(",
"'args'",
",",
"args",
",",
"list",
")",
"_check_type",
"(",
"'kwargs'",
",",
"kwargs",
",",
"dict",
")",
"if",
"method",
"is",
"None",
":",
"raise",
"InvalidServiceConfiguration",
"(",
"'Service call must define a method.'",
")",
"new_args",
"=",
"self",
".",
"_replace_scalars_in_args",
"(",
"args",
")",
"new_kwargs",
"=",
"self",
".",
"_replace_scalars_in_kwargs",
"(",
"kwargs",
")",
"getattr",
"(",
"service_obj",
",",
"method",
")",
"(",
"*",
"new_args",
",",
"*",
"*",
"new_kwargs",
")"
] | 35.578947 | 16.789474 |
def visit_assign(self, node):
"""return an astroid.Assign node as string"""
lhs = " = ".join(n.accept(self) for n in node.targets)
return "%s = %s" % (lhs, node.value.accept(self))
|
[
"def",
"visit_assign",
"(",
"self",
",",
"node",
")",
":",
"lhs",
"=",
"\" = \"",
".",
"join",
"(",
"n",
".",
"accept",
"(",
"self",
")",
"for",
"n",
"in",
"node",
".",
"targets",
")",
"return",
"\"%s = %s\"",
"%",
"(",
"lhs",
",",
"node",
".",
"value",
".",
"accept",
"(",
"self",
")",
")"
] | 50.25 | 12.5 |
def _resolve_plt(self, addr, irsb, indir_jump):
"""
Determine if the IRSB at the given address is a PLT stub. If it is, concretely execute the basic block to
resolve the jump target.
:param int addr: Address of the block.
:param irsb: The basic block.
:param IndirectJump indir_jump: The IndirectJump instance.
:return: True if the IRSB represents a PLT stub and we successfully resolved the target.
False otherwise.
:rtype: bool
"""
# is the address identified by CLE as a PLT stub?
if self.project.loader.all_elf_objects:
# restrict this heuristics to ELF files only
if not any([ addr in obj.reverse_plt for obj in self.project.loader.all_elf_objects ]):
return False
# Make sure the IRSB has statements
if not irsb.has_statements:
irsb = self.project.factory.block(irsb.addr, size=irsb.size).vex
# try to resolve the jump target
simsucc = self.project.engines.default_engine.process(self._initial_state, irsb, force_addr=addr)
if len(simsucc.successors) == 1:
ip = simsucc.successors[0].ip
if ip._model_concrete is not ip:
target_addr = ip._model_concrete.value
if (self.project.loader.find_object_containing(target_addr, membership_check=False) is not
self.project.loader.main_object) \
or self.project.is_hooked(target_addr):
# resolved!
# Fill the IndirectJump object
indir_jump.resolved_targets.add(target_addr)
l.debug("Address %#x is resolved as a PLT entry, jumping to %#x", addr, target_addr)
return True
return False
|
[
"def",
"_resolve_plt",
"(",
"self",
",",
"addr",
",",
"irsb",
",",
"indir_jump",
")",
":",
"# is the address identified by CLE as a PLT stub?",
"if",
"self",
".",
"project",
".",
"loader",
".",
"all_elf_objects",
":",
"# restrict this heuristics to ELF files only",
"if",
"not",
"any",
"(",
"[",
"addr",
"in",
"obj",
".",
"reverse_plt",
"for",
"obj",
"in",
"self",
".",
"project",
".",
"loader",
".",
"all_elf_objects",
"]",
")",
":",
"return",
"False",
"# Make sure the IRSB has statements",
"if",
"not",
"irsb",
".",
"has_statements",
":",
"irsb",
"=",
"self",
".",
"project",
".",
"factory",
".",
"block",
"(",
"irsb",
".",
"addr",
",",
"size",
"=",
"irsb",
".",
"size",
")",
".",
"vex",
"# try to resolve the jump target",
"simsucc",
"=",
"self",
".",
"project",
".",
"engines",
".",
"default_engine",
".",
"process",
"(",
"self",
".",
"_initial_state",
",",
"irsb",
",",
"force_addr",
"=",
"addr",
")",
"if",
"len",
"(",
"simsucc",
".",
"successors",
")",
"==",
"1",
":",
"ip",
"=",
"simsucc",
".",
"successors",
"[",
"0",
"]",
".",
"ip",
"if",
"ip",
".",
"_model_concrete",
"is",
"not",
"ip",
":",
"target_addr",
"=",
"ip",
".",
"_model_concrete",
".",
"value",
"if",
"(",
"self",
".",
"project",
".",
"loader",
".",
"find_object_containing",
"(",
"target_addr",
",",
"membership_check",
"=",
"False",
")",
"is",
"not",
"self",
".",
"project",
".",
"loader",
".",
"main_object",
")",
"or",
"self",
".",
"project",
".",
"is_hooked",
"(",
"target_addr",
")",
":",
"# resolved!",
"# Fill the IndirectJump object",
"indir_jump",
".",
"resolved_targets",
".",
"add",
"(",
"target_addr",
")",
"l",
".",
"debug",
"(",
"\"Address %#x is resolved as a PLT entry, jumping to %#x\"",
",",
"addr",
",",
"target_addr",
")",
"return",
"True",
"return",
"False"
] | 48.923077 | 23.897436 |
def heronian_mean(nums):
r"""Return Heronian mean.
The Heronian mean is:
:math:`\frac{\sum\limits_{i, j}\sqrt{{x_i \cdot x_j}}}
{|nums| \cdot \frac{|nums| + 1}{2}}`
for :math:`j \ge i`
Cf. https://en.wikipedia.org/wiki/Heronian_mean
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
The Heronian mean of nums
Examples
--------
>>> heronian_mean([1, 2, 3, 4])
2.3888282852609093
>>> heronian_mean([1, 2])
1.4714045207910316
>>> heronian_mean([0, 5, 1000])
179.28511301977582
"""
mag = len(nums)
rolling_sum = 0
for i in range(mag):
for j in range(i, mag):
if nums[i] == nums[j]:
rolling_sum += nums[i]
else:
rolling_sum += (nums[i] * nums[j]) ** 0.5
return rolling_sum * 2 / (mag * (mag + 1))
|
[
"def",
"heronian_mean",
"(",
"nums",
")",
":",
"mag",
"=",
"len",
"(",
"nums",
")",
"rolling_sum",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"mag",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
",",
"mag",
")",
":",
"if",
"nums",
"[",
"i",
"]",
"==",
"nums",
"[",
"j",
"]",
":",
"rolling_sum",
"+=",
"nums",
"[",
"i",
"]",
"else",
":",
"rolling_sum",
"+=",
"(",
"nums",
"[",
"i",
"]",
"*",
"nums",
"[",
"j",
"]",
")",
"**",
"0.5",
"return",
"rolling_sum",
"*",
"2",
"/",
"(",
"mag",
"*",
"(",
"mag",
"+",
"1",
")",
")"
] | 22.179487 | 19.358974 |
def upload_file(self, container, file_or_path, obj_name=None,
content_type=None, etag=None, content_encoding=None, ttl=None,
content_length=None, return_none=False, headers=None,
metadata=None, extra_info=None):
"""
Uploads the specified file to the container. If no name is supplied,
the file's name will be used. Either a file path or an open file-like
object may be supplied. A StorageObject reference to the uploaded file
will be returned, unless 'return_none' is set to True.
You may optionally set the `content_type` and `content_encoding`
parameters; pyrax will create the appropriate headers when the object
is stored.
If the size of the file is known, it can be passed as `content_length`.
If you wish for the object to be temporary, specify the time it should
be stored in seconds in the `ttl` parameter. If this is specified, the
object will be deleted after that number of seconds.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self.create_object(container, file_or_path=file_or_path,
obj_name=obj_name, content_type=content_type, etag=etag,
content_encoding=content_encoding, ttl=ttl, headers=headers,
metadata=metadata, return_none=return_none)
|
[
"def",
"upload_file",
"(",
"self",
",",
"container",
",",
"file_or_path",
",",
"obj_name",
"=",
"None",
",",
"content_type",
"=",
"None",
",",
"etag",
"=",
"None",
",",
"content_encoding",
"=",
"None",
",",
"ttl",
"=",
"None",
",",
"content_length",
"=",
"None",
",",
"return_none",
"=",
"False",
",",
"headers",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
"extra_info",
"=",
"None",
")",
":",
"return",
"self",
".",
"create_object",
"(",
"container",
",",
"file_or_path",
"=",
"file_or_path",
",",
"obj_name",
"=",
"obj_name",
",",
"content_type",
"=",
"content_type",
",",
"etag",
"=",
"etag",
",",
"content_encoding",
"=",
"content_encoding",
",",
"ttl",
"=",
"ttl",
",",
"headers",
"=",
"headers",
",",
"metadata",
"=",
"metadata",
",",
"return_none",
"=",
"return_none",
")"
] | 53.75 | 28.821429 |
def get_trade_fee(self, **params):
"""Get trade fee.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#trade-fee-user_data
:param symbol: optional
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"tradeFee": [
{
"symbol": "ADABNB",
"maker": 0.9000,
"taker": 1.0000
}, {
"symbol": "BNBBTC",
"maker": 0.3000,
"taker": 0.3000
}
],
"success": true
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'tradeFee.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res
|
[
"def",
"get_trade_fee",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"res",
"=",
"self",
".",
"_request_withdraw_api",
"(",
"'get'",
",",
"'tradeFee.html'",
",",
"True",
",",
"data",
"=",
"params",
")",
"if",
"not",
"res",
"[",
"'success'",
"]",
":",
"raise",
"BinanceWithdrawException",
"(",
"res",
"[",
"'msg'",
"]",
")",
"return",
"res"
] | 29.055556 | 19.444444 |
def load_fits(self, filepath):
"""
Load a FITS file into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
self.set_image(image)
|
[
"def",
"load_fits",
"(",
"self",
",",
"filepath",
")",
":",
"image",
"=",
"AstroImage",
".",
"AstroImage",
"(",
"logger",
"=",
"self",
".",
"logger",
")",
"image",
".",
"load_file",
"(",
"filepath",
")",
"self",
".",
"set_image",
"(",
"image",
")"
] | 26.5 | 10.75 |
def getaddrinfo_wrapper(host, port, family=socket.AF_INET, socktype=0, proto=0, flags=0):
"""Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)"""
return orig_getaddrinfo(host, port, family, socktype, proto, flags)
|
[
"def",
"getaddrinfo_wrapper",
"(",
"host",
",",
"port",
",",
"family",
"=",
"socket",
".",
"AF_INET",
",",
"socktype",
"=",
"0",
",",
"proto",
"=",
"0",
",",
"flags",
"=",
"0",
")",
":",
"return",
"orig_getaddrinfo",
"(",
"host",
",",
"port",
",",
"family",
",",
"socktype",
",",
"proto",
",",
"flags",
")"
] | 84.333333 | 26.666667 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.